aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-06-10 09:20:19 -0400
committerDave Airlie <airlied@redhat.com>2009-06-14 19:37:57 -0400
commitba4e7d973dd09b66912ac4c0856add8b0703a997 (patch)
tree32a87edb83a427ffd22645c5f77e6cec8be4e719
parente6c03c5b40314d787f7053f631594d6b1bd609e8 (diff)
drm: Add the TTM GPU memory manager subsystem.
TTM is a GPU memory manager subsystem designed for use with GPU devices with various memory types (On-card VRAM, AGP, PCI apertures etc.). It's essentially a helper library that assists the DRM driver in creating and managing persistent buffer objects. TTM manages placement of data and CPU map setup and teardown on data movement. It can also optionally manage synchronization of data on a per-buffer-object level. TTM takes care to provide an always valid virtual user-space address to a buffer object which makes user-space sub-allocation of big buffer objects feasible. TTM uses a fine-grained per buffer-object locking scheme, taking care to release all relevant locks when waiting for the GPU. Although this implies some locking overhead, it's probably a big win for devices with multiple command submission mechanisms, since the lock contention will be minimal. TTM can be used with whatever user-space interface the driver chooses, including GEM. It's used by the upcoming Radeon KMS DRM driver and is also the GPU memory management core of various new experimental DRM drivers. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/Makefile8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c150
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1698
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c561
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c234
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c635
-rw-r--r--include/drm/ttm/ttm_bo_api.h618
-rw-r--r--include/drm/ttm/ttm_bo_driver.h867
-rw-r--r--include/drm/ttm/ttm_memory.h153
-rw-r--r--include/drm/ttm/ttm_module.h58
-rw-r--r--include/drm/ttm/ttm_placement.h92
16 files changed, 5701 insertions, 1 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5d46e7199d4..64a57adc0a2c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,14 @@ menuconfig DRM
18 details. You should also select and configure AGP 18 details. You should also select and configure AGP
19 (/dev/agpgart) support. 19 (/dev/agpgart) support.
20 20
21config DRM_TTM
22 tristate "TTM memory manager"
23 depends on DRM
24 help
25 GPU memory management subsystem for devices with multiple
26 GPU memory types. Will be enabled automatically if a device driver
27 uses it.
28
21config DRM_TDFX 29config DRM_TDFX
22 tristate "3dfx Banshee/Voodoo3+" 30 tristate "3dfx Banshee/Voodoo3+"
23 depends on DRM && PCI 31 depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4ec5061fa584..4e89ab08b7b8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
26obj-$(CONFIG_DRM_SIS) += sis/ 26obj-$(CONFIG_DRM_SIS) += sis/
27obj-$(CONFIG_DRM_SAVAGE)+= savage/ 27obj-$(CONFIG_DRM_SAVAGE)+= savage/
28obj-$(CONFIG_DRM_VIA) +=via/ 28obj-$(CONFIG_DRM_VIA) +=via/
29 29obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 000000000000..b0a9de7a57c2
--- /dev/null
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3
4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
7
8obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 000000000000..e8f6d2229d8c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,150 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Keith Packard.
30 */
31
32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h"
34#ifdef TTM_HAS_AGP
35#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h>
37#include <linux/module.h>
38#include <linux/io.h>
39#include <asm/agp.h>
40
41struct ttm_agp_backend {
42 struct ttm_backend backend;
43 struct agp_memory *mem;
44 struct agp_bridge_data *bridge;
45};
46
47static int ttm_agp_populate(struct ttm_backend *backend,
48 unsigned long num_pages, struct page **pages,
49 struct page *dummy_read_page)
50{
51 struct ttm_agp_backend *agp_be =
52 container_of(backend, struct ttm_agp_backend, backend);
53 struct page **cur_page, **last_page = pages + num_pages;
54 struct agp_memory *mem;
55
56 mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
57 if (unlikely(mem == NULL))
58 return -ENOMEM;
59
60 mem->page_count = 0;
61 for (cur_page = pages; cur_page < last_page; ++cur_page) {
62 struct page *page = *cur_page;
63 if (!page)
64 page = dummy_read_page;
65
66 mem->memory[mem->page_count++] =
67 phys_to_gart(page_to_phys(page));
68 }
69 agp_be->mem = mem;
70 return 0;
71}
72
73static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{
75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend);
77 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret;
80
81 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start);
85 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87
88 return ret;
89}
90
91static int ttm_agp_unbind(struct ttm_backend *backend)
92{
93 struct ttm_agp_backend *agp_be =
94 container_of(backend, struct ttm_agp_backend, backend);
95
96 if (agp_be->mem->is_bound)
97 return agp_unbind_memory(agp_be->mem);
98 else
99 return 0;
100}
101
102static void ttm_agp_clear(struct ttm_backend *backend)
103{
104 struct ttm_agp_backend *agp_be =
105 container_of(backend, struct ttm_agp_backend, backend);
106 struct agp_memory *mem = agp_be->mem;
107
108 if (mem) {
109 ttm_agp_unbind(backend);
110 agp_free_memory(mem);
111 }
112 agp_be->mem = NULL;
113}
114
115static void ttm_agp_destroy(struct ttm_backend *backend)
116{
117 struct ttm_agp_backend *agp_be =
118 container_of(backend, struct ttm_agp_backend, backend);
119
120 if (agp_be->mem)
121 ttm_agp_clear(backend);
122 kfree(agp_be);
123}
124
125static struct ttm_backend_func ttm_agp_func = {
126 .populate = ttm_agp_populate,
127 .clear = ttm_agp_clear,
128 .bind = ttm_agp_bind,
129 .unbind = ttm_agp_unbind,
130 .destroy = ttm_agp_destroy,
131};
132
133struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
134 struct agp_bridge_data *bridge)
135{
136 struct ttm_agp_backend *agp_be;
137
138 agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
139 if (!agp_be)
140 return NULL;
141
142 agp_be->mem = NULL;
143 agp_be->bridge = bridge;
144 agp_be->backend.func = &ttm_agp_func;
145 agp_be->backend.bdev = bdev;
146 return &agp_be->backend;
147}
148EXPORT_SYMBOL(ttm_agp_backend_init);
149
150#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 000000000000..1587aeca7bea
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1698 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41#define TTM_ASSERT_LOCKED(param)
42#define TTM_DEBUG(fmt, arg...)
43#define TTM_BO_HASH_ORDER 13
44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48
49static inline uint32_t ttm_bo_type_flags(unsigned type)
50{
51 return 1 << (type);
52}
53
54static void ttm_bo_release_list(struct kref *list_kref)
55{
56 struct ttm_buffer_object *bo =
57 container_of(list_kref, struct ttm_buffer_object, list_kref);
58 struct ttm_bo_device *bdev = bo->bdev;
59
60 BUG_ON(atomic_read(&bo->list_kref.refcount));
61 BUG_ON(atomic_read(&bo->kref.refcount));
62 BUG_ON(atomic_read(&bo->cpu_writers));
63 BUG_ON(bo->sync_obj != NULL);
64 BUG_ON(bo->mem.mm_node != NULL);
65 BUG_ON(!list_empty(&bo->lru));
66 BUG_ON(!list_empty(&bo->ddestroy));
67
68 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm);
70 if (bo->destroy)
71 bo->destroy(bo);
72 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
74 kfree(bo);
75 }
76}
77
78int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
79{
80
81 if (interruptible) {
82 int ret = 0;
83
84 ret = wait_event_interruptible(bo->event_queue,
85 atomic_read(&bo->reserved) == 0);
86 if (unlikely(ret != 0))
87 return -ERESTART;
88 } else {
89 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
90 }
91 return 0;
92}
93
94static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_mem_type_manager *man;
98
99 BUG_ON(!atomic_read(&bo->reserved));
100
101 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
102
103 BUG_ON(!list_empty(&bo->lru));
104
105 man = &bdev->man[bo->mem.mem_type];
106 list_add_tail(&bo->lru, &man->lru);
107 kref_get(&bo->list_kref);
108
109 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru);
111 kref_get(&bo->list_kref);
112 }
113 }
114}
115
116/**
117 * Call with the lru_lock held.
118 */
119
120static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
121{
122 int put_count = 0;
123
124 if (!list_empty(&bo->swap)) {
125 list_del_init(&bo->swap);
126 ++put_count;
127 }
128 if (!list_empty(&bo->lru)) {
129 list_del_init(&bo->lru);
130 ++put_count;
131 }
132
133 /*
134 * TODO: Add a driver hook to delete from
135 * driver-specific LRU's here.
136 */
137
138 return put_count;
139}
140
141int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence)
144{
145 struct ttm_bo_device *bdev = bo->bdev;
146 int ret;
147
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
149 if (use_sequence && bo->seq_valid &&
150 (sequence - bo->val_seq < (1 << 31))) {
151 return -EAGAIN;
152 }
153
154 if (no_wait)
155 return -EBUSY;
156
157 spin_unlock(&bdev->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock);
160
161 if (unlikely(ret))
162 return ret;
163 }
164
165 if (use_sequence) {
166 bo->val_seq = sequence;
167 bo->seq_valid = true;
168 } else {
169 bo->seq_valid = false;
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL(ttm_bo_reserve);
175
176static void ttm_bo_ref_bug(struct kref *list_kref)
177{
178 BUG();
179}
180
181int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence)
184{
185 struct ttm_bo_device *bdev = bo->bdev;
186 int put_count = 0;
187 int ret;
188
189 spin_lock(&bdev->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence);
192 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock);
195
196 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug);
198
199 return ret;
200}
201
202void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203{
204 struct ttm_bo_device *bdev = bo->bdev;
205
206 spin_lock(&bdev->lru_lock);
207 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock);
211}
212EXPORT_SYMBOL(ttm_bo_unreserve);
213
214/*
215 * Call bo->mutex locked.
216 */
217
218static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219{
220 struct ttm_bo_device *bdev = bo->bdev;
221 int ret = 0;
222 uint32_t page_flags = 0;
223
224 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL;
226
227 switch (bo->type) {
228 case ttm_bo_type_device:
229 if (zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page);
234 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM;
236 break;
237 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page);
241 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM;
243 break;
244
245 ret = ttm_tt_set_user(bo->ttm, current,
246 bo->buffer_start, bo->num_pages);
247 if (unlikely(ret != 0))
248 ttm_tt_destroy(bo->ttm);
249 break;
250 default:
251 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
252 ret = -EINVAL;
253 break;
254 }
255
256 return ret;
257}
258
259static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
260 struct ttm_mem_reg *mem,
261 bool evict, bool interruptible, bool no_wait)
262{
263 struct ttm_bo_device *bdev = bo->bdev;
264 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
265 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
266 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
267 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
268 int ret = 0;
269
270 if (old_is_pci || new_is_pci ||
271 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
272 ttm_bo_unmap_virtual(bo);
273
274 /*
275 * Create and bind a ttm if required.
276 */
277
278 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
279 ret = ttm_bo_add_ttm(bo, false);
280 if (ret)
281 goto out_err;
282
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret)
285 return ret;
286
287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem);
289 if (ret)
290 goto out_err;
291 }
292
293 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
294
295 struct ttm_mem_reg *old_mem = &bo->mem;
296 uint32_t save_flags = old_mem->placement;
297
298 *old_mem = *mem;
299 mem->mm_node = NULL;
300 ttm_flag_masked(&save_flags, mem->placement,
301 TTM_PL_MASK_MEMTYPE);
302 goto moved;
303 }
304
305 }
306
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
310 else if (bdev->driver->move)
311 ret = bdev->driver->move(bo, evict, interruptible,
312 no_wait, mem);
313 else
314 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
315
316 if (ret)
317 goto out_err;
318
319moved:
320 if (bo->evicted) {
321 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
322 if (ret)
323 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
324 bo->evicted = false;
325 }
326
327 if (bo->mem.mm_node) {
328 spin_lock(&bo->lock);
329 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
330 bdev->man[bo->mem.mem_type].gpu_offset;
331 bo->cur_placement = bo->mem.placement;
332 spin_unlock(&bo->lock);
333 }
334
335 return 0;
336
337out_err:
338 new_man = &bdev->man[bo->mem.mem_type];
339 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
340 ttm_tt_unbind(bo->ttm);
341 ttm_tt_destroy(bo->ttm);
342 bo->ttm = NULL;
343 }
344
345 return ret;
346}
347
348/**
349 * If bo idle, remove from delayed- and lru lists, and unref.
350 * If not idle, and already on delayed list, do nothing.
351 * If not idle, and not on delayed list, put on delayed list,
352 * up the list_kref and schedule a delayed list check.
353 */
354
355static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
356{
357 struct ttm_bo_device *bdev = bo->bdev;
358 struct ttm_bo_driver *driver = bdev->driver;
359 int ret;
360
361 spin_lock(&bo->lock);
362 (void) ttm_bo_wait(bo, false, false, !remove_all);
363
364 if (!bo->sync_obj) {
365 int put_count;
366
367 spin_unlock(&bo->lock);
368
369 spin_lock(&bdev->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret);
372 if (bo->ttm)
373 ttm_tt_unbind(bo->ttm);
374
375 if (!list_empty(&bo->ddestroy)) {
376 list_del_init(&bo->ddestroy);
377 kref_put(&bo->list_kref, ttm_bo_ref_bug);
378 }
379 if (bo->mem.mm_node) {
380 drm_mm_put_block(bo->mem.mm_node);
381 bo->mem.mm_node = NULL;
382 }
383 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock);
385
386 atomic_set(&bo->reserved, 0);
387
388 while (put_count--)
389 kref_put(&bo->list_kref, ttm_bo_release_list);
390
391 return 0;
392 }
393
394 spin_lock(&bdev->lru_lock);
395 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg;
398
399 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock);
402 spin_unlock(&bo->lock);
403
404 if (sync_obj)
405 driver->sync_obj_flush(sync_obj, sync_obj_arg);
406 schedule_delayed_work(&bdev->wq,
407 ((HZ / 100) < 1) ? 1 : HZ / 100);
408 ret = 0;
409
410 } else {
411 spin_unlock(&bdev->lru_lock);
412 spin_unlock(&bo->lock);
413 ret = -EBUSY;
414 }
415
416 return ret;
417}
418
419/**
420 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
421 * encountered buffers.
422 */
423
424static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
425{
426 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next;
428 int ret;
429
430 spin_lock(&bdev->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL;
434
435 /*
436 * Protect the next list entry from destruction while we
437 * unlock the lru_lock.
438 */
439
440 if (next != &bdev->ddestroy) {
441 nentry = list_entry(next, struct ttm_buffer_object,
442 ddestroy);
443 kref_get(&nentry->list_kref);
444 }
445 kref_get(&entry->list_kref);
446
447 spin_unlock(&bdev->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list);
450
451 spin_lock(&bdev->lru_lock);
452 if (nentry) {
453 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock);
457 /*
458 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting
460 * list traversal.
461 */
462
463 if (!next_onlist)
464 break;
465 }
466 if (ret)
467 break;
468 }
469 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock);
471
472 return ret;
473}
474
475static void ttm_bo_delayed_workqueue(struct work_struct *work)
476{
477 struct ttm_bo_device *bdev =
478 container_of(work, struct ttm_bo_device, wq.work);
479
480 if (ttm_bo_delayed_delete(bdev, false)) {
481 schedule_delayed_work(&bdev->wq,
482 ((HZ / 100) < 1) ? 1 : HZ / 100);
483 }
484}
485
486static void ttm_bo_release(struct kref *kref)
487{
488 struct ttm_buffer_object *bo =
489 container_of(kref, struct ttm_buffer_object, kref);
490 struct ttm_bo_device *bdev = bo->bdev;
491
492 if (likely(bo->vm_node != NULL)) {
493 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
494 drm_mm_put_block(bo->vm_node);
495 bo->vm_node = NULL;
496 }
497 write_unlock(&bdev->vm_lock);
498 ttm_bo_cleanup_refs(bo, false);
499 kref_put(&bo->list_kref, ttm_bo_release_list);
500 write_lock(&bdev->vm_lock);
501}
502
503void ttm_bo_unref(struct ttm_buffer_object **p_bo)
504{
505 struct ttm_buffer_object *bo = *p_bo;
506 struct ttm_bo_device *bdev = bo->bdev;
507
508 *p_bo = NULL;
509 write_lock(&bdev->vm_lock);
510 kref_put(&bo->kref, ttm_bo_release);
511 write_unlock(&bdev->vm_lock);
512}
513EXPORT_SYMBOL(ttm_bo_unref);
514
515static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
516 bool interruptible, bool no_wait)
517{
518 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev;
520 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement;
522
523 if (bo->mem.mem_type != mem_type)
524 goto out;
525
526 spin_lock(&bo->lock);
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock);
529
530 if (ret && ret != -ERESTART) {
531 printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
532 "buffer eviction.\n");
533 goto out;
534 }
535
536 BUG_ON(!atomic_read(&bo->reserved));
537
538 evict_mem = bo->mem;
539 evict_mem.mm_node = NULL;
540
541 proposed_placement = bdev->driver->evict_flags(bo);
542
543 ret = ttm_bo_mem_space(bo, proposed_placement,
544 &evict_mem, interruptible, no_wait);
545 if (unlikely(ret != 0 && ret != -ERESTART))
546 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
547 &evict_mem, interruptible, no_wait);
548
549 if (ret) {
550 if (ret != -ERESTART)
551 printk(KERN_ERR TTM_PFX
552 "Failed to find memory space for "
553 "buffer 0x%p eviction.\n", bo);
554 goto out;
555 }
556
557 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
558 no_wait);
559 if (ret) {
560 if (ret != -ERESTART)
561 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
562 goto out;
563 }
564
565 spin_lock(&bdev->lru_lock);
566 if (evict_mem.mm_node) {
567 drm_mm_put_block(evict_mem.mm_node);
568 evict_mem.mm_node = NULL;
569 }
570 spin_unlock(&bdev->lru_lock);
571 bo->evicted = true;
572out:
573 return ret;
574}
575
576/**
577 * Repeatedly evict memory from the LRU for @mem_type until we create enough
578 * space, or we've evicted everything and there isn't enough space.
579 */
580static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
581 struct ttm_mem_reg *mem,
582 uint32_t mem_type,
583 bool interruptible, bool no_wait)
584{
585 struct drm_mm_node *node;
586 struct ttm_buffer_object *entry;
587 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
588 struct list_head *lru;
589 unsigned long num_pages = mem->num_pages;
590 int put_count = 0;
591 int ret;
592
593retry_pre_get:
594 ret = drm_mm_pre_get(&man->manager);
595 if (unlikely(ret != 0))
596 return ret;
597
598 spin_lock(&bdev->lru_lock);
599 do {
600 node = drm_mm_search_free(&man->manager, num_pages,
601 mem->page_alignment, 1);
602 if (node)
603 break;
604
605 lru = &man->lru;
606 if (list_empty(lru))
607 break;
608
609 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
610 kref_get(&entry->list_kref);
611
612 ret =
613 ttm_bo_reserve_locked(entry, interruptible, no_wait,
614 false, 0);
615
616 if (likely(ret == 0))
617 put_count = ttm_bo_del_from_lru(entry);
618
619 spin_unlock(&bdev->lru_lock);
620
621 if (unlikely(ret != 0))
622 return ret;
623
624 while (put_count--)
625 kref_put(&entry->list_kref, ttm_bo_ref_bug);
626
627 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
628
629 ttm_bo_unreserve(entry);
630
631 kref_put(&entry->list_kref, ttm_bo_release_list);
632 if (ret)
633 return ret;
634
635 spin_lock(&bdev->lru_lock);
636 } while (1);
637
638 if (!node) {
639 spin_unlock(&bdev->lru_lock);
640 return -ENOMEM;
641 }
642
643 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
644 if (unlikely(!node)) {
645 spin_unlock(&bdev->lru_lock);
646 goto retry_pre_get;
647 }
648
649 spin_unlock(&bdev->lru_lock);
650 mem->mm_node = node;
651 mem->mem_type = mem_type;
652 return 0;
653}
654
655static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
656 bool disallow_fixed,
657 uint32_t mem_type,
658 uint32_t mask, uint32_t *res_mask)
659{
660 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
661
662 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
663 return false;
664
665 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
666 return false;
667
668 if ((mask & man->available_caching) == 0)
669 return false;
670 if (mask & man->default_caching)
671 cur_flags |= man->default_caching;
672 else if (mask & TTM_PL_FLAG_CACHED)
673 cur_flags |= TTM_PL_FLAG_CACHED;
674 else if (mask & TTM_PL_FLAG_WC)
675 cur_flags |= TTM_PL_FLAG_WC;
676 else
677 cur_flags |= TTM_PL_FLAG_UNCACHED;
678
679 *res_mask = cur_flags;
680 return true;
681}
682
683/**
684 * Creates space for memory region @mem according to its type.
685 *
686 * This function first searches for free space in compatible memory types in
687 * the priority order defined by the driver. If free space isn't found, then
688 * ttm_bo_mem_force_space is attempted in priority order to evict and find
689 * space.
690 */
691int ttm_bo_mem_space(struct ttm_buffer_object *bo,
692 uint32_t proposed_placement,
693 struct ttm_mem_reg *mem,
694 bool interruptible, bool no_wait)
695{
696 struct ttm_bo_device *bdev = bo->bdev;
697 struct ttm_mem_type_manager *man;
698
699 uint32_t num_prios = bdev->driver->num_mem_type_prio;
700 const uint32_t *prios = bdev->driver->mem_type_prio;
701 uint32_t i;
702 uint32_t mem_type = TTM_PL_SYSTEM;
703 uint32_t cur_flags = 0;
704 bool type_found = false;
705 bool type_ok = false;
706 bool has_eagain = false;
707 struct drm_mm_node *node = NULL;
708 int ret;
709
710 mem->mm_node = NULL;
711 for (i = 0; i < num_prios; ++i) {
712 mem_type = prios[i];
713 man = &bdev->man[mem_type];
714
715 type_ok = ttm_bo_mt_compatible(man,
716 bo->type == ttm_bo_type_user,
717 mem_type, proposed_placement,
718 &cur_flags);
719
720 if (!type_ok)
721 continue;
722
723 if (mem_type == TTM_PL_SYSTEM)
724 break;
725
726 if (man->has_type && man->use_type) {
727 type_found = true;
728 do {
729 ret = drm_mm_pre_get(&man->manager);
730 if (unlikely(ret))
731 return ret;
732
733 spin_lock(&bdev->lru_lock);
734 node = drm_mm_search_free(&man->manager,
735 mem->num_pages,
736 mem->page_alignment,
737 1);
738 if (unlikely(!node)) {
739 spin_unlock(&bdev->lru_lock);
740 break;
741 }
742 node = drm_mm_get_block_atomic(node,
743 mem->num_pages,
744 mem->
745 page_alignment);
746 spin_unlock(&bdev->lru_lock);
747 } while (!node);
748 }
749 if (node)
750 break;
751 }
752
753 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
754 mem->mm_node = node;
755 mem->mem_type = mem_type;
756 mem->placement = cur_flags;
757 return 0;
758 }
759
760 if (!type_found)
761 return -EINVAL;
762
763 num_prios = bdev->driver->num_mem_busy_prio;
764 prios = bdev->driver->mem_busy_prio;
765
766 for (i = 0; i < num_prios; ++i) {
767 mem_type = prios[i];
768 man = &bdev->man[mem_type];
769
770 if (!man->has_type)
771 continue;
772
773 if (!ttm_bo_mt_compatible(man,
774 bo->type == ttm_bo_type_user,
775 mem_type,
776 proposed_placement, &cur_flags))
777 continue;
778
779 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
780 interruptible, no_wait);
781
782 if (ret == 0 && mem->mm_node) {
783 mem->placement = cur_flags;
784 return 0;
785 }
786
787 if (ret == -ERESTART)
788 has_eagain = true;
789 }
790
791 ret = (has_eagain) ? -ERESTART : -ENOMEM;
792 return ret;
793}
794EXPORT_SYMBOL(ttm_bo_mem_space);
795
796int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
797{
798 int ret = 0;
799
800 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
801 return -EBUSY;
802
803 ret = wait_event_interruptible(bo->event_queue,
804 atomic_read(&bo->cpu_writers) == 0);
805
806 if (ret == -ERESTARTSYS)
807 ret = -ERESTART;
808
809 return ret;
810}
811
812int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
813 uint32_t proposed_placement,
814 bool interruptible, bool no_wait)
815{
816 struct ttm_bo_device *bdev = bo->bdev;
817 int ret = 0;
818 struct ttm_mem_reg mem;
819
820 BUG_ON(!atomic_read(&bo->reserved));
821
822 /*
823 * FIXME: It's possible to pipeline buffer moves.
824 * Have the driver move function wait for idle when necessary,
825 * instead of doing it here.
826 */
827
828 spin_lock(&bo->lock);
829 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
830 spin_unlock(&bo->lock);
831
832 if (ret)
833 return ret;
834
835 mem.num_pages = bo->num_pages;
836 mem.size = mem.num_pages << PAGE_SHIFT;
837 mem.page_alignment = bo->mem.page_alignment;
838
839 /*
840 * Determine where to move the buffer.
841 */
842
843 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
844 interruptible, no_wait);
845 if (ret)
846 goto out_unlock;
847
848 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
849
850out_unlock:
851 if (ret && mem.mm_node) {
852 spin_lock(&bdev->lru_lock);
853 drm_mm_put_block(mem.mm_node);
854 spin_unlock(&bdev->lru_lock);
855 }
856 return ret;
857}
858
859static int ttm_bo_mem_compat(uint32_t proposed_placement,
860 struct ttm_mem_reg *mem)
861{
862 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
863 return 0;
864 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
865 return 0;
866
867 return 1;
868}
869
870int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
871 uint32_t proposed_placement,
872 bool interruptible, bool no_wait)
873{
874 int ret;
875
876 BUG_ON(!atomic_read(&bo->reserved));
877 bo->proposed_placement = proposed_placement;
878
879 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
880 (unsigned long)proposed_placement,
881 (unsigned long)bo->mem.placement);
882
883 /*
884 * Check whether we need to move buffer.
885 */
886
887 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
888 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
889 interruptible, no_wait);
890 if (ret) {
891 if (ret != -ERESTART)
892 printk(KERN_ERR TTM_PFX
893 "Failed moving buffer. "
894 "Proposed placement 0x%08x\n",
895 bo->proposed_placement);
896 if (ret == -ENOMEM)
897 printk(KERN_ERR TTM_PFX
898 "Out of aperture space or "
899 "DRM memory quota.\n");
900 return ret;
901 }
902 }
903
904 /*
905 * We might need to add a TTM.
906 */
907
908 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
909 ret = ttm_bo_add_ttm(bo, true);
910 if (ret)
911 return ret;
912 }
913 /*
914 * Validation has succeeded, move the access and other
915 * non-mapping-related flag bits from the proposed flags to
916 * the active flags
917 */
918
919 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
920 ~TTM_PL_MASK_MEMTYPE);
921
922 return 0;
923}
924EXPORT_SYMBOL(ttm_buffer_object_validate);
925
926int
927ttm_bo_check_placement(struct ttm_buffer_object *bo,
928 uint32_t set_flags, uint32_t clr_flags)
929{
930 uint32_t new_mask = set_flags | clr_flags;
931
932 if ((bo->type == ttm_bo_type_user) &&
933 (clr_flags & TTM_PL_FLAG_CACHED)) {
934 printk(KERN_ERR TTM_PFX
935 "User buffers require cache-coherent memory.\n");
936 return -EINVAL;
937 }
938
939 if (!capable(CAP_SYS_ADMIN)) {
940 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
941 printk(KERN_ERR TTM_PFX "Need to be root to modify"
942 " NO_EVICT status.\n");
943 return -EINVAL;
944 }
945
946 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
947 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
948 printk(KERN_ERR TTM_PFX
949 "Incompatible memory specification"
950 " for NO_EVICT buffer.\n");
951 return -EINVAL;
952 }
953 }
954 return 0;
955}
956
957int ttm_buffer_object_init(struct ttm_bo_device *bdev,
958 struct ttm_buffer_object *bo,
959 unsigned long size,
960 enum ttm_bo_type type,
961 uint32_t flags,
962 uint32_t page_alignment,
963 unsigned long buffer_start,
964 bool interruptible,
965 struct file *persistant_swap_storage,
966 size_t acc_size,
967 void (*destroy) (struct ttm_buffer_object *))
968{
969 int ret = 0;
970 unsigned long num_pages;
971
972 size += buffer_start & ~PAGE_MASK;
973 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
974 if (num_pages == 0) {
975 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
976 return -EINVAL;
977 }
978 bo->destroy = destroy;
979
980 spin_lock_init(&bo->lock);
981 kref_init(&bo->kref);
982 kref_init(&bo->list_kref);
983 atomic_set(&bo->cpu_writers, 0);
984 atomic_set(&bo->reserved, 1);
985 init_waitqueue_head(&bo->event_queue);
986 INIT_LIST_HEAD(&bo->lru);
987 INIT_LIST_HEAD(&bo->ddestroy);
988 INIT_LIST_HEAD(&bo->swap);
989 bo->bdev = bdev;
990 bo->type = type;
991 bo->num_pages = num_pages;
992 bo->mem.mem_type = TTM_PL_SYSTEM;
993 bo->mem.num_pages = bo->num_pages;
994 bo->mem.mm_node = NULL;
995 bo->mem.page_alignment = page_alignment;
996 bo->buffer_start = buffer_start & PAGE_MASK;
997 bo->priv_flags = 0;
998 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
999 bo->seq_valid = false;
1000 bo->persistant_swap_storage = persistant_swap_storage;
1001 bo->acc_size = acc_size;
1002
1003 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1004 if (unlikely(ret != 0))
1005 goto out_err;
1006
1007 /*
1008 * If no caching attributes are set, accept any form of caching.
1009 */
1010
1011 if ((flags & TTM_PL_MASK_CACHING) == 0)
1012 flags |= TTM_PL_MASK_CACHING;
1013
1014 /*
1015 * For ttm_bo_type_device buffers, allocate
1016 * address space from the device.
1017 */
1018
1019 if (bo->type == ttm_bo_type_device) {
1020 ret = ttm_bo_setup_vm(bo);
1021 if (ret)
1022 goto out_err;
1023 }
1024
1025 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1026 if (ret)
1027 goto out_err;
1028
1029 ttm_bo_unreserve(bo);
1030 return 0;
1031
1032out_err:
1033 ttm_bo_unreserve(bo);
1034 ttm_bo_unref(&bo);
1035
1036 return ret;
1037}
1038EXPORT_SYMBOL(ttm_buffer_object_init);
1039
1040static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1041 unsigned long num_pages)
1042{
1043 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1044 PAGE_MASK;
1045
1046 return bdev->ttm_bo_size + 2 * page_array_size;
1047}
1048
1049int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1050 unsigned long size,
1051 enum ttm_bo_type type,
1052 uint32_t flags,
1053 uint32_t page_alignment,
1054 unsigned long buffer_start,
1055 bool interruptible,
1056 struct file *persistant_swap_storage,
1057 struct ttm_buffer_object **p_bo)
1058{
1059 struct ttm_buffer_object *bo;
1060 int ret;
1061 struct ttm_mem_global *mem_glob = bdev->mem_glob;
1062
1063 size_t acc_size =
1064 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1065 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1066 if (unlikely(ret != 0))
1067 return ret;
1068
1069 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1070
1071 if (unlikely(bo == NULL)) {
1072 ttm_mem_global_free(mem_glob, acc_size, false);
1073 return -ENOMEM;
1074 }
1075
1076 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1077 page_alignment, buffer_start,
1078 interruptible,
1079 persistant_swap_storage, acc_size, NULL);
1080 if (likely(ret == 0))
1081 *p_bo = bo;
1082
1083 return ret;
1084}
1085
1086static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1087 uint32_t mem_type, bool allow_errors)
1088{
1089 int ret;
1090
1091 spin_lock(&bo->lock);
1092 ret = ttm_bo_wait(bo, false, false, false);
1093 spin_unlock(&bo->lock);
1094
1095 if (ret && allow_errors)
1096 goto out;
1097
1098 if (bo->mem.mem_type == mem_type)
1099 ret = ttm_bo_evict(bo, mem_type, false, false);
1100
1101 if (ret) {
1102 if (allow_errors) {
1103 goto out;
1104 } else {
1105 ret = 0;
1106 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1107 }
1108 }
1109
1110out:
1111 return ret;
1112}
1113
1114static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1115 struct list_head *head,
1116 unsigned mem_type, bool allow_errors)
1117{
1118 struct ttm_buffer_object *entry;
1119 int ret;
1120 int put_count;
1121
1122 /*
1123 * Can't use standard list traversal since we're unlocking.
1124 */
1125
1126 spin_lock(&bdev->lru_lock);
1127
1128 while (!list_empty(head)) {
1129 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1130 kref_get(&entry->list_kref);
1131 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1132 put_count = ttm_bo_del_from_lru(entry);
1133 spin_unlock(&bdev->lru_lock);
1134 while (put_count--)
1135 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1136 BUG_ON(ret);
1137 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1138 ttm_bo_unreserve(entry);
1139 kref_put(&entry->list_kref, ttm_bo_release_list);
1140 spin_lock(&bdev->lru_lock);
1141 }
1142
1143 spin_unlock(&bdev->lru_lock);
1144
1145 return 0;
1146}
1147
1148int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1149{
1150 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1151 int ret = -EINVAL;
1152
1153 if (mem_type >= TTM_NUM_MEM_TYPES) {
1154 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1155 return ret;
1156 }
1157
1158 if (!man->has_type) {
1159 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1160 "memory manager type %u\n", mem_type);
1161 return ret;
1162 }
1163
1164 man->use_type = false;
1165 man->has_type = false;
1166
1167 ret = 0;
1168 if (mem_type > 0) {
1169 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1170
1171 spin_lock(&bdev->lru_lock);
1172 if (drm_mm_clean(&man->manager))
1173 drm_mm_takedown(&man->manager);
1174 else
1175 ret = -EBUSY;
1176
1177 spin_unlock(&bdev->lru_lock);
1178 }
1179
1180 return ret;
1181}
1182EXPORT_SYMBOL(ttm_bo_clean_mm);
1183
1184int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1185{
1186 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1187
1188 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1189 printk(KERN_ERR TTM_PFX
1190 "Illegal memory manager memory type %u.\n",
1191 mem_type);
1192 return -EINVAL;
1193 }
1194
1195 if (!man->has_type) {
1196 printk(KERN_ERR TTM_PFX
1197 "Memory type %u has not been initialized.\n",
1198 mem_type);
1199 return 0;
1200 }
1201
1202 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1203}
1204EXPORT_SYMBOL(ttm_bo_evict_mm);
1205
1206int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1207 unsigned long p_offset, unsigned long p_size)
1208{
1209 int ret = -EINVAL;
1210 struct ttm_mem_type_manager *man;
1211
1212 if (type >= TTM_NUM_MEM_TYPES) {
1213 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1214 return ret;
1215 }
1216
1217 man = &bdev->man[type];
1218 if (man->has_type) {
1219 printk(KERN_ERR TTM_PFX
1220 "Memory manager already initialized for type %d\n",
1221 type);
1222 return ret;
1223 }
1224
1225 ret = bdev->driver->init_mem_type(bdev, type, man);
1226 if (ret)
1227 return ret;
1228
1229 ret = 0;
1230 if (type != TTM_PL_SYSTEM) {
1231 if (!p_size) {
1232 printk(KERN_ERR TTM_PFX
1233 "Zero size memory manager type %d\n",
1234 type);
1235 return ret;
1236 }
1237 ret = drm_mm_init(&man->manager, p_offset, p_size);
1238 if (ret)
1239 return ret;
1240 }
1241 man->has_type = true;
1242 man->use_type = true;
1243 man->size = p_size;
1244
1245 INIT_LIST_HEAD(&man->lru);
1246
1247 return 0;
1248}
1249EXPORT_SYMBOL(ttm_bo_init_mm);
1250
1251int ttm_bo_device_release(struct ttm_bo_device *bdev)
1252{
1253 int ret = 0;
1254 unsigned i = TTM_NUM_MEM_TYPES;
1255 struct ttm_mem_type_manager *man;
1256
1257 while (i--) {
1258 man = &bdev->man[i];
1259 if (man->has_type) {
1260 man->use_type = false;
1261 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1262 ret = -EBUSY;
1263 printk(KERN_ERR TTM_PFX
1264 "DRM memory manager type %d "
1265 "is not clean.\n", i);
1266 }
1267 man->has_type = false;
1268 }
1269 }
1270
1271 if (!cancel_delayed_work(&bdev->wq))
1272 flush_scheduled_work();
1273
1274 while (ttm_bo_delayed_delete(bdev, true))
1275 ;
1276
1277 spin_lock(&bdev->lru_lock);
1278 if (list_empty(&bdev->ddestroy))
1279 TTM_DEBUG("Delayed destroy list was clean\n");
1280
1281 if (list_empty(&bdev->man[0].lru))
1282 TTM_DEBUG("Swap list was clean\n");
1283 spin_unlock(&bdev->lru_lock);
1284
1285 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1286 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1287 write_lock(&bdev->vm_lock);
1288 drm_mm_takedown(&bdev->addr_space_mm);
1289 write_unlock(&bdev->vm_lock);
1290
1291 __free_page(bdev->dummy_read_page);
1292 return ret;
1293}
1294EXPORT_SYMBOL(ttm_bo_device_release);
1295
1296/*
1297 * This function is intended to be called on drm driver load.
1298 * If you decide to call it from firstopen, you must protect the call
1299 * from a potentially racing ttm_bo_driver_finish in lastclose.
1300 * (This may happen on X server restart).
1301 */
1302
1303int ttm_bo_device_init(struct ttm_bo_device *bdev,
1304 struct ttm_mem_global *mem_glob,
1305 struct ttm_bo_driver *driver, uint64_t file_page_offset)
1306{
1307 int ret = -EINVAL;
1308
1309 bdev->dummy_read_page = NULL;
1310 rwlock_init(&bdev->vm_lock);
1311 spin_lock_init(&bdev->lru_lock);
1312
1313 bdev->driver = driver;
1314 bdev->mem_glob = mem_glob;
1315
1316 memset(bdev->man, 0, sizeof(bdev->man));
1317
1318 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1319 if (unlikely(bdev->dummy_read_page == NULL)) {
1320 ret = -ENOMEM;
1321 goto out_err0;
1322 }
1323
1324 /*
1325 * Initialize the system memory buffer type.
1326 * Other types need to be driver / IOCTL initialized.
1327 */
1328 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1329 if (unlikely(ret != 0))
1330 goto out_err1;
1331
1332 bdev->addr_space_rb = RB_ROOT;
1333 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1334 if (unlikely(ret != 0))
1335 goto out_err2;
1336
1337 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1338 bdev->nice_mode = true;
1339 INIT_LIST_HEAD(&bdev->ddestroy);
1340 INIT_LIST_HEAD(&bdev->swap_lru);
1341 bdev->dev_mapping = NULL;
1342 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1343 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1344 if (unlikely(ret != 0)) {
1345 printk(KERN_ERR TTM_PFX
1346 "Could not register buffer object swapout.\n");
1347 goto out_err2;
1348 }
1349
1350 bdev->ttm_bo_extra_size =
1351 ttm_round_pot(sizeof(struct ttm_tt)) +
1352 ttm_round_pot(sizeof(struct ttm_backend));
1353
1354 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1355 ttm_round_pot(sizeof(struct ttm_buffer_object));
1356
1357 return 0;
1358out_err2:
1359 ttm_bo_clean_mm(bdev, 0);
1360out_err1:
1361 __free_page(bdev->dummy_read_page);
1362out_err0:
1363 return ret;
1364}
1365EXPORT_SYMBOL(ttm_bo_device_init);
1366
1367/*
1368 * buffer object vm functions.
1369 */
1370
1371bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1372{
1373 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1374
1375 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1376 if (mem->mem_type == TTM_PL_SYSTEM)
1377 return false;
1378
1379 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1380 return false;
1381
1382 if (mem->placement & TTM_PL_FLAG_CACHED)
1383 return false;
1384 }
1385 return true;
1386}
1387
1388int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1389 struct ttm_mem_reg *mem,
1390 unsigned long *bus_base,
1391 unsigned long *bus_offset, unsigned long *bus_size)
1392{
1393 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1394
1395 *bus_size = 0;
1396 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1397 return -EINVAL;
1398
1399 if (ttm_mem_reg_is_pci(bdev, mem)) {
1400 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1401 *bus_size = mem->num_pages << PAGE_SHIFT;
1402 *bus_base = man->io_offset;
1403 }
1404
1405 return 0;
1406}
1407
1408void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1409{
1410 struct ttm_bo_device *bdev = bo->bdev;
1411 loff_t offset = (loff_t) bo->addr_space_offset;
1412 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1413
1414 if (!bdev->dev_mapping)
1415 return;
1416
1417 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1418}
1419
1420static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1421{
1422 struct ttm_bo_device *bdev = bo->bdev;
1423 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1424 struct rb_node *parent = NULL;
1425 struct ttm_buffer_object *cur_bo;
1426 unsigned long offset = bo->vm_node->start;
1427 unsigned long cur_offset;
1428
1429 while (*cur) {
1430 parent = *cur;
1431 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1432 cur_offset = cur_bo->vm_node->start;
1433 if (offset < cur_offset)
1434 cur = &parent->rb_left;
1435 else if (offset > cur_offset)
1436 cur = &parent->rb_right;
1437 else
1438 BUG();
1439 }
1440
1441 rb_link_node(&bo->vm_rb, parent, cur);
1442 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1443}
1444
1445/**
1446 * ttm_bo_setup_vm:
1447 *
1448 * @bo: the buffer to allocate address space for
1449 *
1450 * Allocate address space in the drm device so that applications
1451 * can mmap the buffer and access the contents. This only
1452 * applies to ttm_bo_type_device objects as others are not
1453 * placed in the drm device address space.
1454 */
1455
1456static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1457{
1458 struct ttm_bo_device *bdev = bo->bdev;
1459 int ret;
1460
1461retry_pre_get:
1462 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1463 if (unlikely(ret != 0))
1464 return ret;
1465
1466 write_lock(&bdev->vm_lock);
1467 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1468 bo->mem.num_pages, 0, 0);
1469
1470 if (unlikely(bo->vm_node == NULL)) {
1471 ret = -ENOMEM;
1472 goto out_unlock;
1473 }
1474
1475 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1476 bo->mem.num_pages, 0);
1477
1478 if (unlikely(bo->vm_node == NULL)) {
1479 write_unlock(&bdev->vm_lock);
1480 goto retry_pre_get;
1481 }
1482
1483 ttm_bo_vm_insert_rb(bo);
1484 write_unlock(&bdev->vm_lock);
1485 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1486
1487 return 0;
1488out_unlock:
1489 write_unlock(&bdev->vm_lock);
1490 return ret;
1491}
1492
1493int ttm_bo_wait(struct ttm_buffer_object *bo,
1494 bool lazy, bool interruptible, bool no_wait)
1495{
1496 struct ttm_bo_driver *driver = bo->bdev->driver;
1497 void *sync_obj;
1498 void *sync_obj_arg;
1499 int ret = 0;
1500
1501 if (likely(bo->sync_obj == NULL))
1502 return 0;
1503
1504 while (bo->sync_obj) {
1505
1506 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1507 void *tmp_obj = bo->sync_obj;
1508 bo->sync_obj = NULL;
1509 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1510 spin_unlock(&bo->lock);
1511 driver->sync_obj_unref(&tmp_obj);
1512 spin_lock(&bo->lock);
1513 continue;
1514 }
1515
1516 if (no_wait)
1517 return -EBUSY;
1518
1519 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1520 sync_obj_arg = bo->sync_obj_arg;
1521 spin_unlock(&bo->lock);
1522 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1523 lazy, interruptible);
1524 if (unlikely(ret != 0)) {
1525 driver->sync_obj_unref(&sync_obj);
1526 spin_lock(&bo->lock);
1527 return ret;
1528 }
1529 spin_lock(&bo->lock);
1530 if (likely(bo->sync_obj == sync_obj &&
1531 bo->sync_obj_arg == sync_obj_arg)) {
1532 void *tmp_obj = bo->sync_obj;
1533 bo->sync_obj = NULL;
1534 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1535 &bo->priv_flags);
1536 spin_unlock(&bo->lock);
1537 driver->sync_obj_unref(&sync_obj);
1538 driver->sync_obj_unref(&tmp_obj);
1539 spin_lock(&bo->lock);
1540 }
1541 }
1542 return 0;
1543}
1544EXPORT_SYMBOL(ttm_bo_wait);
1545
1546void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1547{
1548 atomic_set(&bo->reserved, 0);
1549 wake_up_all(&bo->event_queue);
1550}
1551
1552int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1553 bool no_wait)
1554{
1555 int ret;
1556
1557 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1558 if (no_wait)
1559 return -EBUSY;
1560 else if (interruptible) {
1561 ret = wait_event_interruptible
1562 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1563 if (unlikely(ret != 0))
1564 return -ERESTART;
1565 } else {
1566 wait_event(bo->event_queue,
1567 atomic_read(&bo->reserved) == 0);
1568 }
1569 }
1570 return 0;
1571}
1572
1573int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1574{
1575 int ret = 0;
1576
1577 /*
1578 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1579 * makes sure the lru lists are updated.
1580 */
1581
1582 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1583 if (unlikely(ret != 0))
1584 return ret;
1585 spin_lock(&bo->lock);
1586 ret = ttm_bo_wait(bo, false, true, no_wait);
1587 spin_unlock(&bo->lock);
1588 if (likely(ret == 0))
1589 atomic_inc(&bo->cpu_writers);
1590 ttm_bo_unreserve(bo);
1591 return ret;
1592}
1593
1594void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1595{
1596 if (atomic_dec_and_test(&bo->cpu_writers))
1597 wake_up_all(&bo->event_queue);
1598}
1599
1600/**
1601 * A buffer object shrink method that tries to swap out the first
1602 * buffer object on the bo_global::swap_lru list.
1603 */
1604
1605static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1606{
1607 struct ttm_bo_device *bdev =
1608 container_of(shrink, struct ttm_bo_device, shrink);
1609 struct ttm_buffer_object *bo;
1610 int ret = -EBUSY;
1611 int put_count;
1612 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1613
1614 spin_lock(&bdev->lru_lock);
1615 while (ret == -EBUSY) {
1616 if (unlikely(list_empty(&bdev->swap_lru))) {
1617 spin_unlock(&bdev->lru_lock);
1618 return -EBUSY;
1619 }
1620
1621 bo = list_first_entry(&bdev->swap_lru,
1622 struct ttm_buffer_object, swap);
1623 kref_get(&bo->list_kref);
1624
1625 /**
1626 * Reserve buffer. Since we unlock while sleeping, we need
1627 * to re-check that nobody removed us from the swap-list while
1628 * we slept.
1629 */
1630
1631 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1632 if (unlikely(ret == -EBUSY)) {
1633 spin_unlock(&bdev->lru_lock);
1634 ttm_bo_wait_unreserved(bo, false);
1635 kref_put(&bo->list_kref, ttm_bo_release_list);
1636 spin_lock(&bdev->lru_lock);
1637 }
1638 }
1639
1640 BUG_ON(ret != 0);
1641 put_count = ttm_bo_del_from_lru(bo);
1642 spin_unlock(&bdev->lru_lock);
1643
1644 while (put_count--)
1645 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1646
1647 /**
1648 * Wait for GPU, then move to system cached.
1649 */
1650
1651 spin_lock(&bo->lock);
1652 ret = ttm_bo_wait(bo, false, false, false);
1653 spin_unlock(&bo->lock);
1654
1655 if (unlikely(ret != 0))
1656 goto out;
1657
1658 if ((bo->mem.placement & swap_placement) != swap_placement) {
1659 struct ttm_mem_reg evict_mem;
1660
1661 evict_mem = bo->mem;
1662 evict_mem.mm_node = NULL;
1663 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1664 evict_mem.mem_type = TTM_PL_SYSTEM;
1665
1666 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1667 false, false);
1668 if (unlikely(ret != 0))
1669 goto out;
1670 }
1671
1672 ttm_bo_unmap_virtual(bo);
1673
1674 /**
1675 * Swap out. Buffer will be swapped in again as soon as
1676 * anyone tries to access a ttm page.
1677 */
1678
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1680out:
1681
1682 /**
1683 *
1684 * Unreserve without putting on LRU to avoid swapping out an
1685 * already swapped buffer.
1686 */
1687
1688 atomic_set(&bo->reserved, 0);
1689 wake_up_all(&bo->event_queue);
1690 kref_put(&bo->list_kref, ttm_bo_release_list);
1691 return ret;
1692}
1693
1694void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1695{
1696 while (ttm_bo_swapout(&bdev->shrink) == 0)
1697 ;
1698}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644
index 000000000000..517c84559633
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -0,0 +1,561 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
37#include <linux/version.h>
38#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
42 struct ttm_mem_reg *old_mem = &bo->mem;
43
44 if (old_mem->mm_node) {
45 spin_lock(&bo->bdev->lru_lock);
46 drm_mm_put_block(old_mem->mm_node);
47 spin_unlock(&bo->bdev->lru_lock);
48 }
49 old_mem->mm_node = NULL;
50}
51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
54{
55 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem;
57 uint32_t save_flags = old_mem->placement;
58 int ret;
59
60 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ttm_tt_unbind(ttm);
62 ttm_bo_free_old_node(bo);
63 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
64 TTM_PL_MASK_MEM);
65 old_mem->mem_type = TTM_PL_SYSTEM;
66 save_flags = old_mem->placement;
67 }
68
69 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
70 if (unlikely(ret != 0))
71 return ret;
72
73 if (new_mem->mem_type != TTM_PL_SYSTEM) {
74 ret = ttm_tt_bind(ttm, new_mem);
75 if (unlikely(ret != 0))
76 return ret;
77 }
78
79 *old_mem = *new_mem;
80 new_mem->mm_node = NULL;
81 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
82 return 0;
83}
84EXPORT_SYMBOL(ttm_bo_move_ttm);
85
86int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
87 void **virtual)
88{
89 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
90 unsigned long bus_offset;
91 unsigned long bus_size;
92 unsigned long bus_base;
93 int ret;
94 void *addr;
95
96 *virtual = NULL;
97 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
98 if (ret || bus_size == 0)
99 return ret;
100
101 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
102 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
103 else {
104 if (mem->placement & TTM_PL_FLAG_WC)
105 addr = ioremap_wc(bus_base + bus_offset, bus_size);
106 else
107 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
108 if (!addr)
109 return -ENOMEM;
110 }
111 *virtual = addr;
112 return 0;
113}
114
115void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
116 void *virtual)
117{
118 struct ttm_mem_type_manager *man;
119
120 man = &bdev->man[mem->mem_type];
121
122 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
123 iounmap(virtual);
124}
125
126static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
127{
128 uint32_t *dstP =
129 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
130 uint32_t *srcP =
131 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
132
133 int i;
134 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
135 iowrite32(ioread32(srcP++), dstP++);
136 return 0;
137}
138
139static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
140 unsigned long page)
141{
142 struct page *d = ttm_tt_get_page(ttm, page);
143 void *dst;
144
145 if (!d)
146 return -ENOMEM;
147
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
149 dst = kmap(d);
150 if (!dst)
151 return -ENOMEM;
152
153 memcpy_fromio(dst, src, PAGE_SIZE);
154 kunmap(d);
155 return 0;
156}
157
158static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
159 unsigned long page)
160{
161 struct page *s = ttm_tt_get_page(ttm, page);
162 void *src;
163
164 if (!s)
165 return -ENOMEM;
166
167 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
168 src = kmap(s);
169 if (!src)
170 return -ENOMEM;
171
172 memcpy_toio(dst, src, PAGE_SIZE);
173 kunmap(s);
174 return 0;
175}
176
177int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
178 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
179{
180 struct ttm_bo_device *bdev = bo->bdev;
181 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
182 struct ttm_tt *ttm = bo->ttm;
183 struct ttm_mem_reg *old_mem = &bo->mem;
184 struct ttm_mem_reg old_copy = *old_mem;
185 void *old_iomap;
186 void *new_iomap;
187 int ret;
188 uint32_t save_flags = old_mem->placement;
189 unsigned long i;
190 unsigned long page;
191 unsigned long add = 0;
192 int dir;
193
194 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
195 if (ret)
196 return ret;
197 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
198 if (ret)
199 goto out;
200
201 if (old_iomap == NULL && new_iomap == NULL)
202 goto out2;
203 if (old_iomap == NULL && ttm == NULL)
204 goto out2;
205
206 add = 0;
207 dir = 1;
208
209 if ((old_mem->mem_type == new_mem->mem_type) &&
210 (new_mem->mm_node->start <
211 old_mem->mm_node->start + old_mem->mm_node->size)) {
212 dir = -1;
213 add = new_mem->num_pages - 1;
214 }
215
216 for (i = 0; i < new_mem->num_pages; ++i) {
217 page = i * dir + add;
218 if (old_iomap == NULL)
219 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
220 else if (new_iomap == NULL)
221 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
222 else
223 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
224 if (ret)
225 goto out1;
226 }
227 mb();
228out2:
229 ttm_bo_free_old_node(bo);
230
231 *old_mem = *new_mem;
232 new_mem->mm_node = NULL;
233 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
234
235 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
236 ttm_tt_unbind(ttm);
237 ttm_tt_destroy(ttm);
238 bo->ttm = NULL;
239 }
240
241out1:
242 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
243out:
244 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
245 return ret;
246}
247EXPORT_SYMBOL(ttm_bo_move_memcpy);
248
249static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
250{
251 kfree(bo);
252}
253
254/**
255 * ttm_buffer_object_transfer
256 *
257 * @bo: A pointer to a struct ttm_buffer_object.
258 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
259 * holding the data of @bo with the old placement.
260 *
261 * This is a utility function that may be called after an accelerated move
262 * has been scheduled. A new buffer object is created as a placeholder for
263 * the old data while it's being copied. When that buffer object is idle,
264 * it can be destroyed, releasing the space of the old placement.
265 * Returns:
266 * !0: Failure.
267 */
268
269static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
270 struct ttm_buffer_object **new_obj)
271{
272 struct ttm_buffer_object *fbo;
273 struct ttm_bo_device *bdev = bo->bdev;
274 struct ttm_bo_driver *driver = bdev->driver;
275
276 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
277 if (!fbo)
278 return -ENOMEM;
279
280 *fbo = *bo;
281
282 /**
283 * Fix up members that we shouldn't copy directly:
284 * TODO: Explicit member copy would probably be better here.
285 */
286
287 spin_lock_init(&fbo->lock);
288 init_waitqueue_head(&fbo->event_queue);
289 INIT_LIST_HEAD(&fbo->ddestroy);
290 INIT_LIST_HEAD(&fbo->lru);
291 INIT_LIST_HEAD(&fbo->swap);
292 fbo->vm_node = NULL;
293
294 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
295 if (fbo->mem.mm_node)
296 fbo->mem.mm_node->private = (void *)fbo;
297 kref_init(&fbo->list_kref);
298 kref_init(&fbo->kref);
299 fbo->destroy = &ttm_transfered_destroy;
300
301 *new_obj = fbo;
302 return 0;
303}
304
305pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
306{
307#if defined(__i386__) || defined(__x86_64__)
308 if (caching_flags & TTM_PL_FLAG_WC)
309 tmp = pgprot_writecombine(tmp);
310 else if (boot_cpu_data.x86 > 3)
311 tmp = pgprot_noncached(tmp);
312
313#elif defined(__powerpc__)
314 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
315 pgprot_val(tmp) |= _PAGE_NO_CACHE;
316 if (caching_flags & TTM_PL_FLAG_UNCACHED)
317 pgprot_val(tmp) |= _PAGE_GUARDED;
318 }
319#endif
320#if defined(__ia64__)
321 if (caching_flags & TTM_PL_FLAG_WC)
322 tmp = pgprot_writecombine(tmp);
323 else
324 tmp = pgprot_noncached(tmp);
325#endif
326#if defined(__sparc__)
327 if (!(caching_flags & TTM_PL_FLAG_CACHED))
328 tmp = pgprot_noncached(tmp);
329#endif
330 return tmp;
331}
332
333static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
334 unsigned long bus_base,
335 unsigned long bus_offset,
336 unsigned long bus_size,
337 struct ttm_bo_kmap_obj *map)
338{
339 struct ttm_bo_device *bdev = bo->bdev;
340 struct ttm_mem_reg *mem = &bo->mem;
341 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
342
343 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
344 map->bo_kmap_type = ttm_bo_map_premapped;
345 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
346 } else {
347 map->bo_kmap_type = ttm_bo_map_iomap;
348 if (mem->placement & TTM_PL_FLAG_WC)
349 map->virtual = ioremap_wc(bus_base + bus_offset,
350 bus_size);
351 else
352 map->virtual = ioremap_nocache(bus_base + bus_offset,
353 bus_size);
354 }
355 return (!map->virtual) ? -ENOMEM : 0;
356}
357
358static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
359 unsigned long start_page,
360 unsigned long num_pages,
361 struct ttm_bo_kmap_obj *map)
362{
363 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
364 struct ttm_tt *ttm = bo->ttm;
365 struct page *d;
366 int i;
367
368 BUG_ON(!ttm);
369 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
370 /*
371 * We're mapping a single page, and the desired
372 * page protection is consistent with the bo.
373 */
374
375 map->bo_kmap_type = ttm_bo_map_kmap;
376 map->page = ttm_tt_get_page(ttm, start_page);
377 map->virtual = kmap(map->page);
378 } else {
379 /*
380 * Populate the part we're mapping;
381 */
382 for (i = start_page; i < start_page + num_pages; ++i) {
383 d = ttm_tt_get_page(ttm, i);
384 if (!d)
385 return -ENOMEM;
386 }
387
388 /*
389 * We need to use vmap to get the desired page protection
390 * or to make the buffer object look contigous.
391 */
392 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
393 PAGE_KERNEL :
394 ttm_io_prot(mem->placement, PAGE_KERNEL);
395 map->bo_kmap_type = ttm_bo_map_vmap;
396 map->virtual = vmap(ttm->pages + start_page, num_pages,
397 0, prot);
398 }
399 return (!map->virtual) ? -ENOMEM : 0;
400}
401
402int ttm_bo_kmap(struct ttm_buffer_object *bo,
403 unsigned long start_page, unsigned long num_pages,
404 struct ttm_bo_kmap_obj *map)
405{
406 int ret;
407 unsigned long bus_base;
408 unsigned long bus_offset;
409 unsigned long bus_size;
410
411 BUG_ON(!list_empty(&bo->swap));
412 map->virtual = NULL;
413 if (num_pages > bo->num_pages)
414 return -EINVAL;
415 if (start_page > bo->num_pages)
416 return -EINVAL;
417#if 0
418 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
419 return -EPERM;
420#endif
421 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
422 &bus_offset, &bus_size);
423 if (ret)
424 return ret;
425 if (bus_size == 0) {
426 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
427 } else {
428 bus_offset += start_page << PAGE_SHIFT;
429 bus_size = num_pages << PAGE_SHIFT;
430 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
431 }
432}
433EXPORT_SYMBOL(ttm_bo_kmap);
434
435void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
436{
437 if (!map->virtual)
438 return;
439 switch (map->bo_kmap_type) {
440 case ttm_bo_map_iomap:
441 iounmap(map->virtual);
442 break;
443 case ttm_bo_map_vmap:
444 vunmap(map->virtual);
445 break;
446 case ttm_bo_map_kmap:
447 kunmap(map->page);
448 break;
449 case ttm_bo_map_premapped:
450 break;
451 default:
452 BUG();
453 }
454 map->virtual = NULL;
455 map->page = NULL;
456}
457EXPORT_SYMBOL(ttm_bo_kunmap);
458
459int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
460 unsigned long dst_offset,
461 unsigned long *pfn, pgprot_t *prot)
462{
463 struct ttm_mem_reg *mem = &bo->mem;
464 struct ttm_bo_device *bdev = bo->bdev;
465 unsigned long bus_offset;
466 unsigned long bus_size;
467 unsigned long bus_base;
468 int ret;
469 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
470 &bus_size);
471 if (ret)
472 return -EINVAL;
473 if (bus_size != 0)
474 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
475 else
476 if (!bo->ttm)
477 return -EINVAL;
478 else
479 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
480 dst_offset >>
481 PAGE_SHIFT));
482 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
483 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
484
485 return 0;
486}
487
488int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
489 void *sync_obj,
490 void *sync_obj_arg,
491 bool evict, bool no_wait,
492 struct ttm_mem_reg *new_mem)
493{
494 struct ttm_bo_device *bdev = bo->bdev;
495 struct ttm_bo_driver *driver = bdev->driver;
496 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
497 struct ttm_mem_reg *old_mem = &bo->mem;
498 int ret;
499 uint32_t save_flags = old_mem->placement;
500 struct ttm_buffer_object *ghost_obj;
501 void *tmp_obj = NULL;
502
503 spin_lock(&bo->lock);
504 if (bo->sync_obj) {
505 tmp_obj = bo->sync_obj;
506 bo->sync_obj = NULL;
507 }
508 bo->sync_obj = driver->sync_obj_ref(sync_obj);
509 bo->sync_obj_arg = sync_obj_arg;
510 if (evict) {
511 ret = ttm_bo_wait(bo, false, false, false);
512 spin_unlock(&bo->lock);
513 driver->sync_obj_unref(&bo->sync_obj);
514
515 if (ret)
516 return ret;
517
518 ttm_bo_free_old_node(bo);
519 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
520 (bo->ttm != NULL)) {
521 ttm_tt_unbind(bo->ttm);
522 ttm_tt_destroy(bo->ttm);
523 bo->ttm = NULL;
524 }
525 } else {
526 /**
527 * This should help pipeline ordinary buffer moves.
528 *
529 * Hang old buffer memory on a new buffer object,
530 * and leave it to be released when the GPU
531 * operation has completed.
532 */
533
534 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
535 spin_unlock(&bo->lock);
536
537 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
538 if (ret)
539 return ret;
540
541 /**
542 * If we're not moving to fixed memory, the TTM object
543 * needs to stay alive. Otherwhise hang it on the ghost
544 * bo to be unbound and destroyed.
545 */
546
547 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
548 ghost_obj->ttm = NULL;
549 else
550 bo->ttm = NULL;
551
552 ttm_bo_unreserve(ghost_obj);
553 ttm_bo_unref(&ghost_obj);
554 }
555
556 *old_mem = *new_mem;
557 new_mem->mm_node = NULL;
558 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
559 return 0;
560}
561EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644
index 000000000000..27b146c54fbc
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -0,0 +1,454 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <ttm/ttm_module.h>
32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h>
34#include <linux/mm.h>
35#include <linux/version.h>
36#include <linux/rbtree.h>
37#include <linux/module.h>
38#include <linux/uaccess.h>
39
40#define TTM_BO_VM_NUM_PREFAULT 16
41
42static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
43 unsigned long page_start,
44 unsigned long num_pages)
45{
46 struct rb_node *cur = bdev->addr_space_rb.rb_node;
47 unsigned long cur_offset;
48 struct ttm_buffer_object *bo;
49 struct ttm_buffer_object *best_bo = NULL;
50
51 while (likely(cur != NULL)) {
52 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
53 cur_offset = bo->vm_node->start;
54 if (page_start >= cur_offset) {
55 cur = cur->rb_right;
56 best_bo = bo;
57 if (page_start == cur_offset)
58 break;
59 } else
60 cur = cur->rb_left;
61 }
62
63 if (unlikely(best_bo == NULL))
64 return NULL;
65
66 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
67 (page_start + num_pages)))
68 return NULL;
69
70 return best_bo;
71}
72
73static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74{
75 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 vma->vm_private_data;
77 struct ttm_bo_device *bdev = bo->bdev;
78 unsigned long bus_base;
79 unsigned long bus_offset;
80 unsigned long bus_size;
81 unsigned long page_offset;
82 unsigned long page_last;
83 unsigned long pfn;
84 struct ttm_tt *ttm = NULL;
85 struct page *page;
86 int ret;
87 int i;
88 bool is_iomem;
89 unsigned long address = (unsigned long)vmf->virtual_address;
90 int retval = VM_FAULT_NOPAGE;
91
92 /*
93 * Work around locking order reversal in fault / nopfn
94 * between mmap_sem and bo_reserve: Perform a trylock operation
95 * for reserve, and if it fails, retry the fault after scheduling.
96 */
97
98 ret = ttm_bo_reserve(bo, true, true, false, 0);
99 if (unlikely(ret != 0)) {
100 if (ret == -EBUSY)
101 set_need_resched();
102 return VM_FAULT_NOPAGE;
103 }
104
105 /*
106 * Wait for buffer data in transit, due to a pipelined
107 * move.
108 */
109
110 spin_lock(&bo->lock);
111 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
112 ret = ttm_bo_wait(bo, false, true, false);
113 spin_unlock(&bo->lock);
114 if (unlikely(ret != 0)) {
115 retval = (ret != -ERESTART) ?
116 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
117 goto out_unlock;
118 }
119 } else
120 spin_unlock(&bo->lock);
121
122
123 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
124 &bus_size);
125 if (unlikely(ret != 0)) {
126 retval = VM_FAULT_SIGBUS;
127 goto out_unlock;
128 }
129
130 is_iomem = (bus_size != 0);
131
132 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
133 bo->vm_node->start - vma->vm_pgoff;
134 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff;
136
137 if (unlikely(page_offset >= bo->num_pages)) {
138 retval = VM_FAULT_SIGBUS;
139 goto out_unlock;
140 }
141
142 /*
143 * Strictly, we're not allowed to modify vma->vm_page_prot here,
144 * since the mmap_sem is only held in read mode. However, we
145 * modify only the caching bits of vma->vm_page_prot and
146 * consider those bits protected by
147 * the bo->mutex, as we should be the only writers.
148 * There shouldn't really be any readers of these bits except
149 * within vm_insert_mixed()? fork?
150 *
151 * TODO: Add a list of vmas to the bo, and change the
152 * vma->vm_page_prot when the object changes caching policy, with
153 * the correct locks held.
154 */
155
156 if (is_iomem) {
157 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
158 vma->vm_page_prot);
159 } else {
160 ttm = bo->ttm;
161 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
162 vm_get_page_prot(vma->vm_flags) :
163 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
164 }
165
166 /*
167 * Speculatively prefault a number of pages. Only error on
168 * first page.
169 */
170
171 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
172
173 if (is_iomem)
174 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
175 page_offset;
176 else {
177 page = ttm_tt_get_page(ttm, page_offset);
178 if (unlikely(!page && i == 0)) {
179 retval = VM_FAULT_OOM;
180 goto out_unlock;
181 } else if (unlikely(!page)) {
182 break;
183 }
184 pfn = page_to_pfn(page);
185 }
186
187 ret = vm_insert_mixed(vma, address, pfn);
188 /*
189 * Somebody beat us to this PTE or prefaulting to
190 * an already populated PTE, or prefaulting error.
191 */
192
193 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
194 break;
195 else if (unlikely(ret != 0)) {
196 retval =
197 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
198 goto out_unlock;
199
200 }
201
202 address += PAGE_SIZE;
203 if (unlikely(++page_offset >= page_last))
204 break;
205 }
206
207out_unlock:
208 ttm_bo_unreserve(bo);
209 return retval;
210}
211
212static void ttm_bo_vm_open(struct vm_area_struct *vma)
213{
214 struct ttm_buffer_object *bo =
215 (struct ttm_buffer_object *)vma->vm_private_data;
216
217 (void)ttm_bo_reference(bo);
218}
219
220static void ttm_bo_vm_close(struct vm_area_struct *vma)
221{
222 struct ttm_buffer_object *bo =
223 (struct ttm_buffer_object *)vma->vm_private_data;
224
225 ttm_bo_unref(&bo);
226 vma->vm_private_data = NULL;
227}
228
229static struct vm_operations_struct ttm_bo_vm_ops = {
230 .fault = ttm_bo_vm_fault,
231 .open = ttm_bo_vm_open,
232 .close = ttm_bo_vm_close
233};
234
235int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
236 struct ttm_bo_device *bdev)
237{
238 struct ttm_bo_driver *driver;
239 struct ttm_buffer_object *bo;
240 int ret;
241
242 read_lock(&bdev->vm_lock);
243 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
244 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
245 if (likely(bo != NULL))
246 ttm_bo_reference(bo);
247 read_unlock(&bdev->vm_lock);
248
249 if (unlikely(bo == NULL)) {
250 printk(KERN_ERR TTM_PFX
251 "Could not find buffer object to map.\n");
252 return -EINVAL;
253 }
254
255 driver = bo->bdev->driver;
256 if (unlikely(!driver->verify_access)) {
257 ret = -EPERM;
258 goto out_unref;
259 }
260 ret = driver->verify_access(bo, filp);
261 if (unlikely(ret != 0))
262 goto out_unref;
263
264 vma->vm_ops = &ttm_bo_vm_ops;
265
266 /*
267 * Note: We're transferring the bo reference to
268 * vma->vm_private_data here.
269 */
270
271 vma->vm_private_data = bo;
272 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
273 return 0;
274out_unref:
275 ttm_bo_unref(&bo);
276 return ret;
277}
278EXPORT_SYMBOL(ttm_bo_mmap);
279
280int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
281{
282 if (vma->vm_pgoff != 0)
283 return -EACCES;
284
285 vma->vm_ops = &ttm_bo_vm_ops;
286 vma->vm_private_data = ttm_bo_reference(bo);
287 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
288 return 0;
289}
290EXPORT_SYMBOL(ttm_fbdev_mmap);
291
292
293ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
294 const char __user *wbuf, char __user *rbuf, size_t count,
295 loff_t *f_pos, bool write)
296{
297 struct ttm_buffer_object *bo;
298 struct ttm_bo_driver *driver;
299 struct ttm_bo_kmap_obj map;
300 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
301 unsigned long kmap_offset;
302 unsigned long kmap_end;
303 unsigned long kmap_num;
304 size_t io_size;
305 unsigned int page_offset;
306 char *virtual;
307 int ret;
308 bool no_wait = false;
309 bool dummy;
310
311 read_lock(&bdev->vm_lock);
312 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
313 if (likely(bo != NULL))
314 ttm_bo_reference(bo);
315 read_unlock(&bdev->vm_lock);
316
317 if (unlikely(bo == NULL))
318 return -EFAULT;
319
320 driver = bo->bdev->driver;
321 if (unlikely(driver->verify_access)) {
322 ret = -EPERM;
323 goto out_unref;
324 }
325
326 ret = driver->verify_access(bo, filp);
327 if (unlikely(ret != 0))
328 goto out_unref;
329
330 kmap_offset = dev_offset - bo->vm_node->start;
331 if (unlikely(kmap_offset) >= bo->num_pages) {
332 ret = -EFBIG;
333 goto out_unref;
334 }
335
336 page_offset = *f_pos & ~PAGE_MASK;
337 io_size = bo->num_pages - kmap_offset;
338 io_size = (io_size << PAGE_SHIFT) - page_offset;
339 if (count < io_size)
340 io_size = count;
341
342 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
343 kmap_num = kmap_end - kmap_offset + 1;
344
345 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
346
347 switch (ret) {
348 case 0:
349 break;
350 case -ERESTART:
351 ret = -EINTR;
352 goto out_unref;
353 case -EBUSY:
354 ret = -EAGAIN;
355 goto out_unref;
356 default:
357 goto out_unref;
358 }
359
360 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361 if (unlikely(ret != 0)) {
362 ttm_bo_unreserve(bo);
363 goto out_unref;
364 }
365
366 virtual = ttm_kmap_obj_virtual(&map, &dummy);
367 virtual += page_offset;
368
369 if (write)
370 ret = copy_from_user(virtual, wbuf, io_size);
371 else
372 ret = copy_to_user(rbuf, virtual, io_size);
373
374 ttm_bo_kunmap(&map);
375 ttm_bo_unreserve(bo);
376 ttm_bo_unref(&bo);
377
378 if (unlikely(ret != 0))
379 return -EFBIG;
380
381 *f_pos += io_size;
382
383 return io_size;
384out_unref:
385 ttm_bo_unref(&bo);
386 return ret;
387}
388
389ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390 char __user *rbuf, size_t count, loff_t *f_pos,
391 bool write)
392{
393 struct ttm_bo_kmap_obj map;
394 unsigned long kmap_offset;
395 unsigned long kmap_end;
396 unsigned long kmap_num;
397 size_t io_size;
398 unsigned int page_offset;
399 char *virtual;
400 int ret;
401 bool no_wait = false;
402 bool dummy;
403
404 kmap_offset = (*f_pos >> PAGE_SHIFT);
405 if (unlikely(kmap_offset) >= bo->num_pages)
406 return -EFBIG;
407
408 page_offset = *f_pos & ~PAGE_MASK;
409 io_size = bo->num_pages - kmap_offset;
410 io_size = (io_size << PAGE_SHIFT) - page_offset;
411 if (count < io_size)
412 io_size = count;
413
414 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415 kmap_num = kmap_end - kmap_offset + 1;
416
417 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
418
419 switch (ret) {
420 case 0:
421 break;
422 case -ERESTART:
423 return -EINTR;
424 case -EBUSY:
425 return -EAGAIN;
426 default:
427 return ret;
428 }
429
430 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 if (unlikely(ret != 0)) {
432 ttm_bo_unreserve(bo);
433 return ret;
434 }
435
436 virtual = ttm_kmap_obj_virtual(&map, &dummy);
437 virtual += page_offset;
438
439 if (write)
440 ret = copy_from_user(virtual, wbuf, io_size);
441 else
442 ret = copy_to_user(rbuf, virtual, io_size);
443
444 ttm_bo_kunmap(&map);
445 ttm_bo_unreserve(bo);
446 ttm_bo_unref(&bo);
447
448 if (unlikely(ret != 0))
449 return ret;
450
451 *f_pos += io_size;
452
453 return io_size;
454}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
new file mode 100644
index 000000000000..0b14eb1972b8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -0,0 +1,114 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include <linux/mutex.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35
36struct ttm_global_item {
37 struct mutex mutex;
38 void *object;
39 int refcount;
40};
41
42static struct ttm_global_item glob[TTM_GLOBAL_NUM];
43
44void ttm_global_init(void)
45{
46 int i;
47
48 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
49 struct ttm_global_item *item = &glob[i];
50 mutex_init(&item->mutex);
51 item->object = NULL;
52 item->refcount = 0;
53 }
54}
55
56void ttm_global_release(void)
57{
58 int i;
59 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
60 struct ttm_global_item *item = &glob[i];
61 BUG_ON(item->object != NULL);
62 BUG_ON(item->refcount != 0);
63 }
64}
65
66int ttm_global_item_ref(struct ttm_global_reference *ref)
67{
68 int ret;
69 struct ttm_global_item *item = &glob[ref->global_type];
70 void *object;
71
72 mutex_lock(&item->mutex);
73 if (item->refcount == 0) {
74 item->object = kmalloc(ref->size, GFP_KERNEL);
75 if (unlikely(item->object == NULL)) {
76 ret = -ENOMEM;
77 goto out_err;
78 }
79
80 ref->object = item->object;
81 ret = ref->init(ref);
82 if (unlikely(ret != 0))
83 goto out_err;
84
85 ++item->refcount;
86 }
87 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex);
90 return 0;
91out_err:
92 kfree(item->object);
93 mutex_unlock(&item->mutex);
94 item->object = NULL;
95 return ret;
96}
97EXPORT_SYMBOL(ttm_global_item_ref);
98
99void ttm_global_item_unref(struct ttm_global_reference *ref)
100{
101 struct ttm_global_item *item = &glob[ref->global_type];
102
103 mutex_lock(&item->mutex);
104 BUG_ON(item->refcount == 0);
105 BUG_ON(ref->object != item->object);
106 if (--item->refcount == 0) {
107 ref->release(ref);
108 kfree(item->object);
109 item->object = NULL;
110 }
111 mutex_unlock(&item->mutex);
112}
113EXPORT_SYMBOL(ttm_global_item_unref);
114
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644
index 000000000000..87323d4ff68d
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -0,0 +1,234 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_memory.h"
29#include <linux/spinlock.h>
30#include <linux/sched.h>
31#include <linux/wait.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34
35#define TTM_PFX "[TTM] "
36#define TTM_MEMORY_ALLOC_RETRIES 4
37
38/**
39 * At this point we only support a single shrink callback.
40 * Extend this if needed, perhaps using a linked list of callbacks.
41 * Note that this function is reentrant:
42 * many threads may try to swap out at any given time.
43 */
44
45static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
46 uint64_t extra)
47{
48 int ret;
49 struct ttm_mem_shrink *shrink;
50 uint64_t target;
51 uint64_t total_target;
52
53 spin_lock(&glob->lock);
54 if (glob->shrink == NULL)
55 goto out;
56
57 if (from_workqueue) {
58 target = glob->swap_limit;
59 total_target = glob->total_memory_swap_limit;
60 } else if (capable(CAP_SYS_ADMIN)) {
61 total_target = glob->emer_total_memory;
62 target = glob->emer_memory;
63 } else {
64 total_target = glob->max_total_memory;
65 target = glob->max_memory;
66 }
67
68 total_target = (extra >= total_target) ? 0 : total_target - extra;
69 target = (extra >= target) ? 0 : target - extra;
70
71 while (glob->used_memory > target ||
72 glob->used_total_memory > total_target) {
73 shrink = glob->shrink;
74 spin_unlock(&glob->lock);
75 ret = shrink->do_shrink(shrink);
76 spin_lock(&glob->lock);
77 if (unlikely(ret != 0))
78 goto out;
79 }
80out:
81 spin_unlock(&glob->lock);
82}
83
84static void ttm_shrink_work(struct work_struct *work)
85{
86 struct ttm_mem_global *glob =
87 container_of(work, struct ttm_mem_global, work);
88
89 ttm_shrink(glob, true, 0ULL);
90}
91
92int ttm_mem_global_init(struct ttm_mem_global *glob)
93{
94 struct sysinfo si;
95 uint64_t mem;
96
97 spin_lock_init(&glob->lock);
98 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
99 INIT_WORK(&glob->work, ttm_shrink_work);
100 init_waitqueue_head(&glob->queue);
101
102 si_meminfo(&si);
103
104 mem = si.totalram - si.totalhigh;
105 mem *= si.mem_unit;
106
107 glob->max_memory = mem >> 1;
108 glob->emer_memory = (mem >> 1) + (mem >> 2);
109 glob->swap_limit = glob->max_memory - (mem >> 3);
110 glob->used_memory = 0;
111 glob->used_total_memory = 0;
112 glob->shrink = NULL;
113
114 mem = si.totalram;
115 mem *= si.mem_unit;
116
117 glob->max_total_memory = mem >> 1;
118 glob->emer_total_memory = (mem >> 1) + (mem >> 2);
119
120 glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
121
122 printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
123 glob->max_total_memory >> 20);
124 printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
125 glob->max_memory >> 20);
126
127 return 0;
128}
129EXPORT_SYMBOL(ttm_mem_global_init);
130
131void ttm_mem_global_release(struct ttm_mem_global *glob)
132{
133 printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
134 (unsigned long long)glob->used_total_memory);
135 flush_workqueue(glob->swap_queue);
136 destroy_workqueue(glob->swap_queue);
137 glob->swap_queue = NULL;
138}
139EXPORT_SYMBOL(ttm_mem_global_release);
140
141static inline void ttm_check_swapping(struct ttm_mem_global *glob)
142{
143 bool needs_swapping;
144
145 spin_lock(&glob->lock);
146 needs_swapping = (glob->used_memory > glob->swap_limit ||
147 glob->used_total_memory >
148 glob->total_memory_swap_limit);
149 spin_unlock(&glob->lock);
150
151 if (unlikely(needs_swapping))
152 (void)queue_work(glob->swap_queue, &glob->work);
153
154}
155
156void ttm_mem_global_free(struct ttm_mem_global *glob,
157 uint64_t amount, bool himem)
158{
159 spin_lock(&glob->lock);
160 glob->used_total_memory -= amount;
161 if (!himem)
162 glob->used_memory -= amount;
163 wake_up_all(&glob->queue);
164 spin_unlock(&glob->lock);
165}
166
167static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
168 uint64_t amount, bool himem, bool reserve)
169{
170 uint64_t limit;
171 uint64_t lomem_limit;
172 int ret = -ENOMEM;
173
174 spin_lock(&glob->lock);
175
176 if (capable(CAP_SYS_ADMIN)) {
177 limit = glob->emer_total_memory;
178 lomem_limit = glob->emer_memory;
179 } else {
180 limit = glob->max_total_memory;
181 lomem_limit = glob->max_memory;
182 }
183
184 if (unlikely(glob->used_total_memory + amount > limit))
185 goto out_unlock;
186 if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
187 goto out_unlock;
188
189 if (reserve) {
190 glob->used_total_memory += amount;
191 if (!himem)
192 glob->used_memory += amount;
193 }
194 ret = 0;
195out_unlock:
196 spin_unlock(&glob->lock);
197 ttm_check_swapping(glob);
198
199 return ret;
200}
201
202int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
203 bool no_wait, bool interruptible, bool himem)
204{
205 int count = TTM_MEMORY_ALLOC_RETRIES;
206
207 while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
208 != 0)) {
209 if (no_wait)
210 return -ENOMEM;
211 if (unlikely(count-- == 0))
212 return -ENOMEM;
213 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
214 }
215
216 return 0;
217}
218
219size_t ttm_round_pot(size_t size)
220{
221 if ((size & (size - 1)) == 0)
222 return size;
223 else if (size > PAGE_SIZE)
224 return PAGE_ALIGN(size);
225 else {
226 size_t tmp_size = 4;
227
228 while (tmp_size < size)
229 tmp_size <<= 1;
230
231 return tmp_size;
232 }
233 return 0;
234}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644
index 000000000000..59ce8191d584
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -0,0 +1,50 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Jerome Glisse
30 */
31#include <linux/module.h>
32#include <ttm/ttm_module.h>
33
34static int __init ttm_init(void)
35{
36 ttm_global_init();
37 return 0;
38}
39
40static void __exit ttm_exit(void)
41{
42 ttm_global_release();
43}
44
45module_init(ttm_init);
46module_exit(ttm_exit);
47
48MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
49MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
50MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644
index 000000000000..c27ab3a877ad
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -0,0 +1,635 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <linux/version.h>
32#include <linux/vmalloc.h>
33#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/file.h>
37#include <linux/swap.h>
38#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h"
41
42static int ttm_tt_swapin(struct ttm_tt *ttm);
43
44#if defined(CONFIG_X86)
45static void ttm_tt_clflush_page(struct page *page)
46{
47 uint8_t *page_virtual;
48 unsigned int i;
49
50 if (unlikely(page == NULL))
51 return;
52
53 page_virtual = kmap_atomic(page, KM_USER0);
54
55 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
56 clflush(page_virtual + i);
57
58 kunmap_atomic(page_virtual, KM_USER0);
59}
60
61static void ttm_tt_cache_flush_clflush(struct page *pages[],
62 unsigned long num_pages)
63{
64 unsigned long i;
65
66 mb();
67 for (i = 0; i < num_pages; ++i)
68 ttm_tt_clflush_page(*pages++);
69 mb();
70}
71#else
72static void ttm_tt_ipi_handler(void *null)
73{
74 ;
75}
76#endif
77
78void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
79{
80
81#if defined(CONFIG_X86)
82 if (cpu_has_clflush) {
83 ttm_tt_cache_flush_clflush(pages, num_pages);
84 return;
85 }
86#else
87 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88 printk(KERN_ERR TTM_PFX
89 "Timed out waiting for drm cache flush.\n");
90#endif
91}
92
93/**
94 * Allocates storage for pointers to the pages that back the ttm.
95 *
96 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
97 */
98static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
99{
100 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
101 ttm->pages = NULL;
102
103 if (size <= PAGE_SIZE)
104 ttm->pages = kzalloc(size, GFP_KERNEL);
105
106 if (!ttm->pages) {
107 ttm->pages = vmalloc_user(size);
108 if (ttm->pages)
109 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
110 }
111}
112
113static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
114{
115 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
116 vfree(ttm->pages);
117 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
118 } else {
119 kfree(ttm->pages);
120 }
121 ttm->pages = NULL;
122}
123
124static struct page *ttm_tt_alloc_page(unsigned page_flags)
125{
126 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
127 return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
128
129 return alloc_page(GFP_HIGHUSER);
130}
131
132static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
133{
134 int write;
135 int dirty;
136 struct page *page;
137 int i;
138 struct ttm_backend *be = ttm->be;
139
140 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
141 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
142 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
143
144 if (be)
145 be->func->clear(be);
146
147 for (i = 0; i < ttm->num_pages; ++i) {
148 page = ttm->pages[i];
149 if (page == NULL)
150 continue;
151
152 if (page == ttm->dummy_read_page) {
153 BUG_ON(write);
154 continue;
155 }
156
157 if (write && dirty && !PageReserved(page))
158 set_page_dirty_lock(page);
159
160 ttm->pages[i] = NULL;
161 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
162 put_page(page);
163 }
164 ttm->state = tt_unpopulated;
165 ttm->first_himem_page = ttm->num_pages;
166 ttm->last_lomem_page = -1;
167}
168
169static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
170{
171 struct page *p;
172 struct ttm_bo_device *bdev = ttm->bdev;
173 struct ttm_mem_global *mem_glob = bdev->mem_glob;
174 int ret;
175
176 while (NULL == (p = ttm->pages[index])) {
177 p = ttm_tt_alloc_page(ttm->page_flags);
178
179 if (!p)
180 return NULL;
181
182 if (PageHighMem(p)) {
183 ret =
184 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
185 false, false, true);
186 if (unlikely(ret != 0))
187 goto out_err;
188 ttm->pages[--ttm->first_himem_page] = p;
189 } else {
190 ret =
191 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
192 false, false, false);
193 if (unlikely(ret != 0))
194 goto out_err;
195 ttm->pages[++ttm->last_lomem_page] = p;
196 }
197 }
198 return p;
199out_err:
200 put_page(p);
201 return NULL;
202}
203
204struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
205{
206 int ret;
207
208 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
209 ret = ttm_tt_swapin(ttm);
210 if (unlikely(ret != 0))
211 return NULL;
212 }
213 return __ttm_tt_get_page(ttm, index);
214}
215
216int ttm_tt_populate(struct ttm_tt *ttm)
217{
218 struct page *page;
219 unsigned long i;
220 struct ttm_backend *be;
221 int ret;
222
223 if (ttm->state != tt_unpopulated)
224 return 0;
225
226 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
227 ret = ttm_tt_swapin(ttm);
228 if (unlikely(ret != 0))
229 return ret;
230 }
231
232 be = ttm->be;
233
234 for (i = 0; i < ttm->num_pages; ++i) {
235 page = __ttm_tt_get_page(ttm, i);
236 if (!page)
237 return -ENOMEM;
238 }
239
240 be->func->populate(be, ttm->num_pages, ttm->pages,
241 ttm->dummy_read_page);
242 ttm->state = tt_unbound;
243 return 0;
244}
245
246#ifdef CONFIG_X86
247static inline int ttm_tt_set_page_caching(struct page *p,
248 enum ttm_caching_state c_state)
249{
250 if (PageHighMem(p))
251 return 0;
252
253 switch (c_state) {
254 case tt_cached:
255 return set_pages_wb(p, 1);
256 case tt_wc:
257 return set_memory_wc((unsigned long) page_address(p), 1);
258 default:
259 return set_pages_uc(p, 1);
260 }
261}
262#else /* CONFIG_X86 */
263static inline int ttm_tt_set_page_caching(struct page *p,
264 enum ttm_caching_state c_state)
265{
266 return 0;
267}
268#endif /* CONFIG_X86 */
269
270/*
271 * Change caching policy for the linear kernel map
272 * for range of pages in a ttm.
273 */
274
275static int ttm_tt_set_caching(struct ttm_tt *ttm,
276 enum ttm_caching_state c_state)
277{
278 int i, j;
279 struct page *cur_page;
280 int ret;
281
282 if (ttm->caching_state == c_state)
283 return 0;
284
285 if (c_state != tt_cached) {
286 ret = ttm_tt_populate(ttm);
287 if (unlikely(ret != 0))
288 return ret;
289 }
290
291 if (ttm->caching_state == tt_cached)
292 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
293
294 for (i = 0; i < ttm->num_pages; ++i) {
295 cur_page = ttm->pages[i];
296 if (likely(cur_page != NULL)) {
297 ret = ttm_tt_set_page_caching(cur_page, c_state);
298 if (unlikely(ret != 0))
299 goto out_err;
300 }
301 }
302
303 ttm->caching_state = c_state;
304
305 return 0;
306
307out_err:
308 for (j = 0; j < i; ++j) {
309 cur_page = ttm->pages[j];
310 if (likely(cur_page != NULL)) {
311 (void)ttm_tt_set_page_caching(cur_page,
312 ttm->caching_state);
313 }
314 }
315
316 return ret;
317}
318
319int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
320{
321 enum ttm_caching_state state;
322
323 if (placement & TTM_PL_FLAG_WC)
324 state = tt_wc;
325 else if (placement & TTM_PL_FLAG_UNCACHED)
326 state = tt_uncached;
327 else
328 state = tt_cached;
329
330 return ttm_tt_set_caching(ttm, state);
331}
332
333static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
334{
335 int i;
336 struct page *cur_page;
337 struct ttm_backend *be = ttm->be;
338
339 if (be)
340 be->func->clear(be);
341 (void)ttm_tt_set_caching(ttm, tt_cached);
342 for (i = 0; i < ttm->num_pages; ++i) {
343 cur_page = ttm->pages[i];
344 ttm->pages[i] = NULL;
345 if (cur_page) {
346 if (page_count(cur_page) != 1)
347 printk(KERN_ERR TTM_PFX
348 "Erroneous page count. "
349 "Leaking pages.\n");
350 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
351 PageHighMem(cur_page));
352 __free_page(cur_page);
353 }
354 }
355 ttm->state = tt_unpopulated;
356 ttm->first_himem_page = ttm->num_pages;
357 ttm->last_lomem_page = -1;
358}
359
360void ttm_tt_destroy(struct ttm_tt *ttm)
361{
362 struct ttm_backend *be;
363
364 if (unlikely(ttm == NULL))
365 return;
366
367 be = ttm->be;
368 if (likely(be != NULL)) {
369 be->func->destroy(be);
370 ttm->be = NULL;
371 }
372
373 if (likely(ttm->pages != NULL)) {
374 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
375 ttm_tt_free_user_pages(ttm);
376 else
377 ttm_tt_free_alloced_pages(ttm);
378
379 ttm_tt_free_page_directory(ttm);
380 }
381
382 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
383 ttm->swap_storage)
384 fput(ttm->swap_storage);
385
386 kfree(ttm);
387}
388
389int ttm_tt_set_user(struct ttm_tt *ttm,
390 struct task_struct *tsk,
391 unsigned long start, unsigned long num_pages)
392{
393 struct mm_struct *mm = tsk->mm;
394 int ret;
395 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
396 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
397
398 BUG_ON(num_pages != ttm->num_pages);
399 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
400
401 /**
402 * Account user pages as lowmem pages for now.
403 */
404
405 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
406 false, false, false);
407 if (unlikely(ret != 0))
408 return ret;
409
410 down_read(&mm->mmap_sem);
411 ret = get_user_pages(tsk, mm, start, num_pages,
412 write, 0, ttm->pages, NULL);
413 up_read(&mm->mmap_sem);
414
415 if (ret != num_pages && write) {
416 ttm_tt_free_user_pages(ttm);
417 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
418 return -ENOMEM;
419 }
420
421 ttm->tsk = tsk;
422 ttm->start = start;
423 ttm->state = tt_unbound;
424
425 return 0;
426}
427
428struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
429 uint32_t page_flags, struct page *dummy_read_page)
430{
431 struct ttm_bo_driver *bo_driver = bdev->driver;
432 struct ttm_tt *ttm;
433
434 if (!bo_driver)
435 return NULL;
436
437 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
438 if (!ttm)
439 return NULL;
440
441 ttm->bdev = bdev;
442
443 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
444 ttm->first_himem_page = ttm->num_pages;
445 ttm->last_lomem_page = -1;
446 ttm->caching_state = tt_cached;
447 ttm->page_flags = page_flags;
448
449 ttm->dummy_read_page = dummy_read_page;
450
451 ttm_tt_alloc_page_directory(ttm);
452 if (!ttm->pages) {
453 ttm_tt_destroy(ttm);
454 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
455 return NULL;
456 }
457 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
458 if (!ttm->be) {
459 ttm_tt_destroy(ttm);
460 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
461 return NULL;
462 }
463 ttm->state = tt_unpopulated;
464 return ttm;
465}
466
467void ttm_tt_unbind(struct ttm_tt *ttm)
468{
469 int ret;
470 struct ttm_backend *be = ttm->be;
471
472 if (ttm->state == tt_bound) {
473 ret = be->func->unbind(be);
474 BUG_ON(ret);
475 ttm->state = tt_unbound;
476 }
477}
478
479int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
480{
481 int ret = 0;
482 struct ttm_backend *be;
483
484 if (!ttm)
485 return -EINVAL;
486
487 if (ttm->state == tt_bound)
488 return 0;
489
490 be = ttm->be;
491
492 ret = ttm_tt_populate(ttm);
493 if (ret)
494 return ret;
495
496 ret = be->func->bind(be, bo_mem);
497 if (ret) {
498 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
499 return ret;
500 }
501
502 ttm->state = tt_bound;
503
504 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
505 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
506 return 0;
507}
508EXPORT_SYMBOL(ttm_tt_bind);
509
510static int ttm_tt_swapin(struct ttm_tt *ttm)
511{
512 struct address_space *swap_space;
513 struct file *swap_storage;
514 struct page *from_page;
515 struct page *to_page;
516 void *from_virtual;
517 void *to_virtual;
518 int i;
519 int ret;
520
521 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
522 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
523 ttm->num_pages);
524 if (unlikely(ret != 0))
525 return ret;
526
527 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
528 return 0;
529 }
530
531 swap_storage = ttm->swap_storage;
532 BUG_ON(swap_storage == NULL);
533
534 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
535
536 for (i = 0; i < ttm->num_pages; ++i) {
537 from_page = read_mapping_page(swap_space, i, NULL);
538 if (IS_ERR(from_page))
539 goto out_err;
540 to_page = __ttm_tt_get_page(ttm, i);
541 if (unlikely(to_page == NULL))
542 goto out_err;
543
544 preempt_disable();
545 from_virtual = kmap_atomic(from_page, KM_USER0);
546 to_virtual = kmap_atomic(to_page, KM_USER1);
547 memcpy(to_virtual, from_virtual, PAGE_SIZE);
548 kunmap_atomic(to_virtual, KM_USER1);
549 kunmap_atomic(from_virtual, KM_USER0);
550 preempt_enable();
551 page_cache_release(from_page);
552 }
553
554 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
555 fput(swap_storage);
556 ttm->swap_storage = NULL;
557 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
558
559 return 0;
560out_err:
561 ttm_tt_free_alloced_pages(ttm);
562 return -ENOMEM;
563}
564
565int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
566{
567 struct address_space *swap_space;
568 struct file *swap_storage;
569 struct page *from_page;
570 struct page *to_page;
571 void *from_virtual;
572 void *to_virtual;
573 int i;
574
575 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
576 BUG_ON(ttm->caching_state != tt_cached);
577
578 /*
579 * For user buffers, just unpin the pages, as there should be
580 * vma references.
581 */
582
583 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
584 ttm_tt_free_user_pages(ttm);
585 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
586 ttm->swap_storage = NULL;
587 return 0;
588 }
589
590 if (!persistant_swap_storage) {
591 swap_storage = shmem_file_setup("ttm swap",
592 ttm->num_pages << PAGE_SHIFT,
593 0);
594 if (unlikely(IS_ERR(swap_storage))) {
595 printk(KERN_ERR "Failed allocating swap storage.\n");
596 return -ENOMEM;
597 }
598 } else
599 swap_storage = persistant_swap_storage;
600
601 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
602
603 for (i = 0; i < ttm->num_pages; ++i) {
604 from_page = ttm->pages[i];
605 if (unlikely(from_page == NULL))
606 continue;
607 to_page = read_mapping_page(swap_space, i, NULL);
608 if (unlikely(to_page == NULL))
609 goto out_err;
610
611 preempt_disable();
612 from_virtual = kmap_atomic(from_page, KM_USER0);
613 to_virtual = kmap_atomic(to_page, KM_USER1);
614 memcpy(to_virtual, from_virtual, PAGE_SIZE);
615 kunmap_atomic(to_virtual, KM_USER1);
616 kunmap_atomic(from_virtual, KM_USER0);
617 preempt_enable();
618 set_page_dirty(to_page);
619 mark_page_accessed(to_page);
620 page_cache_release(to_page);
621 }
622
623 ttm_tt_free_alloced_pages(ttm);
624 ttm->swap_storage = swap_storage;
625 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
626 if (persistant_swap_storage)
627 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
628
629 return 0;
630out_err:
631 if (!persistant_swap_storage)
632 fput(swap_storage);
633
634 return -ENOMEM;
635}
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644
index 000000000000..cd22ab4b495c
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -0,0 +1,618 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_BO_API_H_
32#define _TTM_BO_API_H_
33
34#include "drm_hashtab.h"
35#include <linux/kref.h>
36#include <linux/list.h>
37#include <linux/wait.h>
38#include <linux/mutex.h>
39#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h>
42
43struct ttm_bo_device;
44
45struct drm_mm_node;
46
47/**
48 * struct ttm_mem_reg
49 *
50 * @mm_node: Memory manager node.
51 * @size: Requested size of memory region.
52 * @num_pages: Actual size of memory region in pages.
53 * @page_alignment: Page alignment.
54 * @placement: Placement flags.
55 *
56 * Structure indicating the placement and space resources used by a
57 * buffer object.
58 */
59
60struct ttm_mem_reg {
61 struct drm_mm_node *mm_node;
62 unsigned long size;
63 unsigned long num_pages;
64 uint32_t page_alignment;
65 uint32_t mem_type;
66 uint32_t placement;
67};
68
69/**
70 * enum ttm_bo_type
71 *
72 * @ttm_bo_type_device: These are 'normal' buffers that can
73 * be mmapped by user space. Each of these bos occupy a slot in the
74 * device address space, that can be used for normal vm operations.
75 *
76 * @ttm_bo_type_user: These are user-space memory areas that are made
77 * available to the GPU by mapping the buffer pages into the GPU aperture
78 * space. These buffers cannot be mmaped from the device address space.
79 *
80 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
81 * but they cannot be accessed from user-space. For kernel-only use.
82 */
83
84enum ttm_bo_type {
85 ttm_bo_type_device,
86 ttm_bo_type_user,
87 ttm_bo_type_kernel
88};
89
90struct ttm_tt;
91
92/**
93 * struct ttm_buffer_object
94 *
95 * @bdev: Pointer to the buffer object device structure.
96 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
97 * buffers.
98 * @type: The bo type.
99 * @destroy: Destruction function. If NULL, kfree is used.
100 * @num_pages: Actual number of pages.
101 * @addr_space_offset: Address space offset.
102 * @acc_size: Accounted size for this object.
103 * @kref: Reference count of this buffer object. When this refcount reaches
104 * zero, the object is put on the delayed delete list.
105 * @list_kref: List reference count of this buffer object. This member is
106 * used to avoid destruction while the buffer object is still on a list.
107 * Lru lists may keep one refcount, the delayed delete list, and kref != 0
108 * keeps one refcount. When this refcount reaches zero,
109 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member
119 * holds a pointer to a persistant shmem object.
120 * @ttm: TTM structure holding system pages.
121 * @evicted: Whether the object was evicted without user-space knowing.
122 * @cpu_writes: For synchronization. Number of cpu writers.
123 * @lru: List head for the lru list.
124 * @ddestroy: List head for the delayed destroy list.
125 * @swap: List head for swap LRU list.
126 * @val_seq: Sequence of the validation holding the @reserved lock.
127 * Used to avoid starvation when many processes compete to validate the
128 * buffer. This member is protected by the bo_device::lru_lock.
129 * @seq_valid: The value of @val_seq is valid. This value is protected by
130 * the bo_device::lru_lock.
131 * @reserved: Deadlock-free lock used for synchronization state transitions.
132 * @sync_obj_arg: Opaque argument to synchronization object function.
133 * @sync_obj: Pointer to a synchronization object.
134 * @priv_flags: Flags describing buffer object internal state.
135 * @vm_rb: Rb node for the vm rb tree.
136 * @vm_node: Address space manager node.
137 * @offset: The current GPU offset, which can have different meanings
138 * depending on the memory type. For SYSTEM type memory, it should be 0.
139 * @cur_placement: Hint of current placement.
140 *
141 * Base class for TTM buffer object, that deals with data placement and CPU
142 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
143 * the driver can usually use the placement offset @offset directly as the
144 * GPU virtual address. For drivers implementing multiple
145 * GPU memory manager contexts, the driver should manage the address space
146 * in these contexts separately and use these objects to get the correct
147 * placement and caching for these GPU maps. This makes it possible to use
148 * these objects for even quite elaborate memory management schemes.
149 * The destroy member, the API visibility of this object makes it possible
150 * to derive driver specific types.
151 */
152
153struct ttm_buffer_object {
154 /**
155 * Members constant at init.
156 */
157
158 struct ttm_bo_device *bdev;
159 unsigned long buffer_start;
160 enum ttm_bo_type type;
161 void (*destroy) (struct ttm_buffer_object *);
162 unsigned long num_pages;
163 uint64_t addr_space_offset;
164 size_t acc_size;
165
166 /**
167 * Members not needing protection.
168 */
169
170 struct kref kref;
171 struct kref list_kref;
172 wait_queue_head_t event_queue;
173 spinlock_t lock;
174
175 /**
176 * Members protected by the bo::reserved lock.
177 */
178
179 uint32_t proposed_placement;
180 struct ttm_mem_reg mem;
181 struct file *persistant_swap_storage;
182 struct ttm_tt *ttm;
183 bool evicted;
184
185 /**
186 * Members protected by the bo::reserved lock only when written to.
187 */
188
189 atomic_t cpu_writers;
190
191 /**
192 * Members protected by the bdev::lru_lock.
193 */
194
195 struct list_head lru;
196 struct list_head ddestroy;
197 struct list_head swap;
198 uint32_t val_seq;
199 bool seq_valid;
200
201 /**
202 * Members protected by the bdev::lru_lock
203 * only when written to.
204 */
205
206 atomic_t reserved;
207
208
209 /**
210 * Members protected by the bo::lock
211 */
212
213 void *sync_obj_arg;
214 void *sync_obj;
215 unsigned long priv_flags;
216
217 /**
218 * Members protected by the bdev::vm_lock
219 */
220
221 struct rb_node vm_rb;
222 struct drm_mm_node *vm_node;
223
224
225 /**
226 * Special members that are protected by the reserve lock
227 * and the bo::lock when written to. Can be read with
228 * either of these locks held.
229 */
230
231 unsigned long offset;
232 uint32_t cur_placement;
233};
234
235/**
236 * struct ttm_bo_kmap_obj
237 *
238 * @virtual: The current kernel virtual address.
239 * @page: The page when kmap'ing a single page.
240 * @bo_kmap_type: Type of bo_kmap.
241 *
242 * Object describing a kernel mapping. Since a TTM bo may be located
243 * in various memory types with various caching policies, the
244 * mapping can either be an ioremap, a vmap, a kmap or part of a
245 * premapped region.
246 */
247
248struct ttm_bo_kmap_obj {
249 void *virtual;
250 struct page *page;
251 enum {
252 ttm_bo_map_iomap,
253 ttm_bo_map_vmap,
254 ttm_bo_map_kmap,
255 ttm_bo_map_premapped,
256 } bo_kmap_type;
257};
258
259/**
260 * ttm_bo_reference - reference a struct ttm_buffer_object
261 *
262 * @bo: The buffer object.
263 *
264 * Returns a refcounted pointer to a buffer object.
265 */
266
267static inline struct ttm_buffer_object *
268ttm_bo_reference(struct ttm_buffer_object *bo)
269{
270 kref_get(&bo->kref);
271 return bo;
272}
273
274/**
275 * ttm_bo_wait - wait for buffer idle.
276 *
277 * @bo: The buffer object.
278 * @interruptible: Use interruptible wait.
279 * @no_wait: Return immediately if buffer is busy.
280 *
281 * This function must be called with the bo::mutex held, and makes
282 * sure any previous rendering to the buffer is completed.
283 * Note: It might be necessary to block validations before the
284 * wait by reserving the buffer.
285 * Returns -EBUSY if no_wait is true and the buffer is busy.
286 * Returns -ERESTART if interrupted by a signal.
287 */
288extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
289 bool interruptible, bool no_wait);
290/**
291 * ttm_buffer_object_validate
292 *
293 * @bo: The buffer object.
294 * @proposed_placement: Proposed_placement for the buffer object.
295 * @interruptible: Sleep interruptible if sleeping.
296 * @no_wait: Return immediately if the buffer is busy.
297 *
298 * Changes placement and caching policy of the buffer object
299 * according to bo::proposed_flags.
300 * Returns
301 * -EINVAL on invalid proposed_flags.
302 * -ENOMEM on out-of-memory condition.
303 * -EBUSY if no_wait is true and buffer busy.
304 * -ERESTART if interrupted by a signal.
305 */
306extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
307 uint32_t proposed_placement,
308 bool interruptible, bool no_wait);
309/**
310 * ttm_bo_unref
311 *
312 * @bo: The buffer object.
313 *
314 * Unreference and clear a pointer to a buffer object.
315 */
316extern void ttm_bo_unref(struct ttm_buffer_object **bo);
317
318/**
319 * ttm_bo_synccpu_write_grab
320 *
321 * @bo: The buffer object:
322 * @no_wait: Return immediately if buffer is busy.
323 *
324 * Synchronizes a buffer object for CPU RW access. This means
325 * blocking command submission that affects the buffer and
326 * waiting for buffer idle. This lock is recursive.
327 * Returns
328 * -EBUSY if the buffer is busy and no_wait is true.
329 * -ERESTART if interrupted by a signal.
330 */
331
332extern int
333ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
334/**
335 * ttm_bo_synccpu_write_release:
336 *
337 * @bo : The buffer object.
338 *
339 * Releases a synccpu lock.
340 */
341extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
342
343/**
344 * ttm_buffer_object_init
345 *
346 * @bdev: Pointer to a ttm_bo_device struct.
347 * @bo: Pointer to a ttm_buffer_object to be initialized.
348 * @size: Requested size of buffer object.
349 * @type: Requested type of buffer object.
350 * @flags: Initial placement flags.
351 * @page_alignment: Data alignment in pages.
352 * @buffer_start: Virtual address of user space data backing a
353 * user buffer object.
354 * @interruptible: If needing to sleep to wait for GPU resources,
355 * sleep interruptible.
356 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
357 * pinned in physical memory. If this behaviour is not desired, this member
358 * holds a pointer to a persistant shmem object. Typically, this would
359 * point to the shmem object backing a GEM object if TTM is used to back a
360 * GEM user interface.
361 * @acc_size: Accounted size for this object.
362 * @destroy: Destroy function. Use NULL for kfree().
363 *
364 * This function initializes a pre-allocated struct ttm_buffer_object.
365 * As this object may be part of a larger structure, this function,
366 * together with the @destroy function,
367 * enables driver-specific objects derived from a ttm_buffer_object.
368 * On successful return, the object kref and list_kref are set to 1.
369 * Returns
370 * -ENOMEM: Out of memory.
371 * -EINVAL: Invalid placement flags.
372 * -ERESTART: Interrupted by signal while sleeping waiting for resources.
373 */
374
375extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
376 struct ttm_buffer_object *bo,
377 unsigned long size,
378 enum ttm_bo_type type,
379 uint32_t flags,
380 uint32_t page_alignment,
381 unsigned long buffer_start,
382 bool interrubtible,
383 struct file *persistant_swap_storage,
384 size_t acc_size,
385 void (*destroy) (struct ttm_buffer_object *));
386/**
387 * ttm_bo_synccpu_object_init
388 *
389 * @bdev: Pointer to a ttm_bo_device struct.
390 * @bo: Pointer to a ttm_buffer_object to be initialized.
391 * @size: Requested size of buffer object.
392 * @type: Requested type of buffer object.
393 * @flags: Initial placement flags.
394 * @page_alignment: Data alignment in pages.
395 * @buffer_start: Virtual address of user space data backing a
396 * user buffer object.
397 * @interruptible: If needing to sleep while waiting for GPU resources,
398 * sleep interruptible.
399 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
400 * pinned in physical memory. If this behaviour is not desired, this member
401 * holds a pointer to a persistant shmem object. Typically, this would
402 * point to the shmem object backing a GEM object if TTM is used to back a
403 * GEM user interface.
404 * @p_bo: On successful completion *p_bo points to the created object.
405 *
406 * This function allocates a ttm_buffer_object, and then calls
407 * ttm_buffer_object_init on that object.
408 * The destroy function is set to kfree().
409 * Returns
410 * -ENOMEM: Out of memory.
411 * -EINVAL: Invalid placement flags.
412 * -ERESTART: Interrupted by signal while waiting for resources.
413 */
414
415extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
416 unsigned long size,
417 enum ttm_bo_type type,
418 uint32_t flags,
419 uint32_t page_alignment,
420 unsigned long buffer_start,
421 bool interruptible,
422 struct file *persistant_swap_storage,
423 struct ttm_buffer_object **p_bo);
424
425/**
426 * ttm_bo_check_placement
427 *
428 * @bo: the buffer object.
429 * @set_flags: placement flags to set.
430 * @clr_flags: placement flags to clear.
431 *
432 * Performs minimal validity checking on an intended change of
433 * placement flags.
434 * Returns
435 * -EINVAL: Intended change is invalid or not allowed.
436 */
437
438extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
439 uint32_t set_flags, uint32_t clr_flags);
440
441/**
442 * ttm_bo_init_mm
443 *
444 * @bdev: Pointer to a ttm_bo_device struct.
445 * @mem_type: The memory type.
446 * @p_offset: offset for managed area in pages.
447 * @p_size: size managed area in pages.
448 *
449 * Initialize a manager for a given memory type.
450 * Note: if part of driver firstopen, it must be protected from a
451 * potentially racing lastclose.
452 * Returns:
453 * -EINVAL: invalid size or memory type.
454 * -ENOMEM: Not enough memory.
455 * May also return driver-specified errors.
456 */
457
458extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
459 unsigned long p_offset, unsigned long p_size);
460/**
461 * ttm_bo_clean_mm
462 *
463 * @bdev: Pointer to a ttm_bo_device struct.
464 * @mem_type: The memory type.
465 *
466 * Take down a manager for a given memory type after first walking
467 * the LRU list to evict any buffers left alive.
468 *
469 * Normally, this function is part of lastclose() or unload(), and at that
470 * point there shouldn't be any buffers left created by user-space, since
471 * there should've been removed by the file descriptor release() method.
472 * However, before this function is run, make sure to signal all sync objects,
473 * and verify that the delayed delete queue is empty. The driver must also
474 * make sure that there are no NO_EVICT buffers present in this memory type
475 * when the call is made.
476 *
477 * If this function is part of a VT switch, the caller must make sure that
478 * there are no appications currently validating buffers before this
479 * function is called. The caller can do that by first taking the
480 * struct ttm_bo_device::ttm_lock in write mode.
481 *
482 * Returns:
483 * -EINVAL: invalid or uninitialized memory type.
484 * -EBUSY: There are still buffers left in this memory type.
485 */
486
487extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
488
489/**
490 * ttm_bo_evict_mm
491 *
492 * @bdev: Pointer to a ttm_bo_device struct.
493 * @mem_type: The memory type.
494 *
495 * Evicts all buffers on the lru list of the memory type.
496 * This is normally part of a VT switch or an
497 * out-of-memory-space-due-to-fragmentation handler.
498 * The caller must make sure that there are no other processes
499 * currently validating buffers, and can do that by taking the
500 * struct ttm_bo_device::ttm_lock in write mode.
501 *
502 * Returns:
503 * -EINVAL: Invalid or uninitialized memory type.
504 * -ERESTART: The call was interrupted by a signal while waiting to
505 * evict a buffer.
506 */
507
508extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
509
510/**
511 * ttm_kmap_obj_virtual
512 *
513 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
514 * @is_iomem: Pointer to an integer that on return indicates 1 if the
515 * virtual map is io memory, 0 if normal memory.
516 *
517 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
518 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
519 * that should strictly be accessed by the iowriteXX() and similar functions.
520 */
521
522static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
523 bool *is_iomem)
524{
525 *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
526 map->bo_kmap_type == ttm_bo_map_premapped);
527 return map->virtual;
528}
529
530/**
531 * ttm_bo_kmap
532 *
533 * @bo: The buffer object.
534 * @start_page: The first page to map.
535 * @num_pages: Number of pages to map.
536 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
537 *
538 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
539 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
540 * used to obtain a virtual address to the data.
541 *
542 * Returns
543 * -ENOMEM: Out of memory.
544 * -EINVAL: Invalid range.
545 */
546
547extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
548 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
549
550/**
551 * ttm_bo_kunmap
552 *
553 * @map: Object describing the map to unmap.
554 *
555 * Unmaps a kernel map set up by ttm_bo_kmap.
556 */
557
558extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
559
560#if 0
561#endif
562
563/**
564 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
565 *
566 * @vma: vma as input from the fbdev mmap method.
567 * @bo: The bo backing the address space. The address space will
568 * have the same size as the bo, and start at offset 0.
569 *
570 * This function is intended to be called by the fbdev mmap method
571 * if the fbdev address space is to be backed by a bo.
572 */
573
574extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
575 struct ttm_buffer_object *bo);
576
577/**
578 * ttm_bo_mmap - mmap out of the ttm device address space.
579 *
580 * @filp: filp as input from the mmap method.
581 * @vma: vma as input from the mmap method.
582 * @bdev: Pointer to the ttm_bo_device with the address space manager.
583 *
584 * This function is intended to be called by the device mmap method.
585 * if the device address space is to be backed by the bo manager.
586 */
587
588extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
589 struct ttm_bo_device *bdev);
590
591/**
592 * ttm_bo_io
593 *
594 * @bdev: Pointer to the struct ttm_bo_device.
595 * @filp: Pointer to the struct file attempting to read / write.
596 * @wbuf: User-space pointer to address of buffer to write. NULL on read.
597 * @rbuf: User-space pointer to address of buffer to read into.
598 * Null on write.
599 * @count: Number of bytes to read / write.
600 * @f_pos: Pointer to current file position.
601 * @write: 1 for read, 0 for write.
602 *
603 * This function implements read / write into ttm buffer objects, and is
604 * intended to
605 * be called from the fops::read and fops::write method.
606 * Returns:
607 * See man (2) write, man(2) read. In particular,
608 * the function may return -EINTR if
609 * interrupted by a signal.
610 */
611
612extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
613 const char __user *wbuf, char __user *rbuf,
614 size_t count, loff_t *f_pos, bool write);
615
616extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
617
618#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644
index 000000000000..62ed733c52a2
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -0,0 +1,867 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30#ifndef _TTM_BO_DRIVER_H_
31#define _TTM_BO_DRIVER_H_
32
33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_memory.h"
35#include "drm_mm.h"
36#include "linux/workqueue.h"
37#include "linux/fs.h"
38#include "linux/spinlock.h"
39
40struct ttm_backend;
41
42struct ttm_backend_func {
43 /**
44 * struct ttm_backend_func member populate
45 *
46 * @backend: Pointer to a struct ttm_backend.
47 * @num_pages: Number of pages to populate.
48 * @pages: Array of pointers to ttm pages.
49 * @dummy_read_page: Page to be used instead of NULL pages in the
50 * array @pages.
51 *
52 * Populate the backend with ttm pages. Depending on the backend,
53 * it may or may not copy the @pages array.
54 */
55 int (*populate) (struct ttm_backend *backend,
56 unsigned long num_pages, struct page **pages,
57 struct page *dummy_read_page);
58 /**
59 * struct ttm_backend_func member clear
60 *
61 * @backend: Pointer to a struct ttm_backend.
62 *
63 * This is an "unpopulate" function. Release all resources
64 * allocated with populate.
65 */
66 void (*clear) (struct ttm_backend *backend);
67
68 /**
69 * struct ttm_backend_func member bind
70 *
71 * @backend: Pointer to a struct ttm_backend.
72 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
73 * memory type and location for binding.
74 *
75 * Bind the backend pages into the aperture in the location
76 * indicated by @bo_mem. This function should be able to handle
77 * differences between aperture- and system page sizes.
78 */
79 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
80
81 /**
82 * struct ttm_backend_func member unbind
83 *
84 * @backend: Pointer to a struct ttm_backend.
85 *
86 * Unbind previously bound backend pages. This function should be
87 * able to handle differences between aperture- and system page sizes.
88 */
89 int (*unbind) (struct ttm_backend *backend);
90
91 /**
92 * struct ttm_backend_func member destroy
93 *
94 * @backend: Pointer to a struct ttm_backend.
95 *
96 * Destroy the backend.
97 */
98 void (*destroy) (struct ttm_backend *backend);
99};
100
101/**
102 * struct ttm_backend
103 *
104 * @bdev: Pointer to a struct ttm_bo_device.
105 * @flags: For driver use.
106 * @func: Pointer to a struct ttm_backend_func that describes
107 * the backend methods.
108 *
109 */
110
111struct ttm_backend {
112 struct ttm_bo_device *bdev;
113 uint32_t flags;
114 struct ttm_backend_func *func;
115};
116
117#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
118#define TTM_PAGE_FLAG_USER (1 << 1)
119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
120#define TTM_PAGE_FLAG_WRITE (1 << 3)
121#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
122#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
123#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
124
125enum ttm_caching_state {
126 tt_uncached,
127 tt_wc,
128 tt_cached
129};
130
131/**
132 * struct ttm_tt
133 *
134 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
135 * pointer.
136 * @pages: Array of pages backing the data.
137 * @first_himem_page: Himem pages are put last in the page array, which
138 * enables us to run caching attribute changes on only the first part
139 * of the page array containing lomem pages. This is the index of the
140 * first himem page.
141 * @last_lomem_page: Index of the last lomem page in the page array.
142 * @num_pages: Number of pages in the page array.
143 * @bdev: Pointer to the current struct ttm_bo_device.
144 * @be: Pointer to the ttm backend.
145 * @tsk: The task for user ttm.
146 * @start: virtual address for user ttm.
147 * @swap_storage: Pointer to shmem struct file for swap storage.
148 * @caching_state: The current caching state of the pages.
149 * @state: The current binding state of the pages.
150 *
151 * This is a structure holding the pages, caching- and aperture binding
152 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
153 * memory.
154 */
155
156struct ttm_tt {
157 struct page *dummy_read_page;
158 struct page **pages;
159 long first_himem_page;
160 long last_lomem_page;
161 uint32_t page_flags;
162 unsigned long num_pages;
163 struct ttm_bo_device *bdev;
164 struct ttm_backend *be;
165 struct task_struct *tsk;
166 unsigned long start;
167 struct file *swap_storage;
168 enum ttm_caching_state caching_state;
169 enum {
170 tt_bound,
171 tt_unbound,
172 tt_unpopulated,
173 } state;
174};
175
176#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
177#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
178#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
179 before kernel access. */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @io_offset: The io_offset of the first managed page of IO memory or
192 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
193 * memory, this should be set to NULL.
194 * @io_size: The size of a managed IO region (fixed memory or aperture).
195 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
196 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
197 * @io_addr should be set to NULL.
198 * @size: Size of the managed region.
199 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
200 * as defined in ttm_placement_common.h
201 * @default_caching: The default caching policy used for a buffer object
202 * placed in this memory type if the user doesn't provide one.
203 * @manager: The range manager used for this memory type. FIXME: If the aperture
204 * has a page size different from the underlying system, the granularity
205 * of this manager should take care of this. But the range allocating code
206 * in ttm_bo.c needs to be modified for this.
207 * @lru: The lru list for this memory type.
208 *
209 * This structure is used to identify and manage memory types for a device.
210 * It's set up by the ttm_bo_driver::init_mem_type method.
211 */
212
213struct ttm_mem_type_manager {
214
215 /*
216 * No protection. Constant from start.
217 */
218
219 bool has_type;
220 bool use_type;
221 uint32_t flags;
222 unsigned long gpu_offset;
223 unsigned long io_offset;
224 unsigned long io_size;
225 void *io_addr;
226 uint64_t size;
227 uint32_t available_caching;
228 uint32_t default_caching;
229
230 /*
231 * Protected by the bdev->lru_lock.
232 * TODO: Consider one lru_lock per ttm_mem_type_manager.
233 * Plays ill with list removal, though.
234 */
235
236 struct drm_mm manager;
237 struct list_head lru;
238};
239
240/**
241 * struct ttm_bo_driver
242 *
243 * @mem_type_prio: Priority array of memory types to place a buffer object in
244 * if it fits without evicting buffers from any of these memory types.
245 * @mem_busy_prio: Priority array of memory types to place a buffer object in
246 * if it needs to evict buffers to make room.
247 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
248 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
249 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
250 * @invalidate_caches: Callback to invalidate read caches when a buffer object
251 * has been evicted.
252 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
253 * structure.
254 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
255 * @move: Callback for a driver to hook in accelerated functions to
256 * move a buffer.
257 * If set to NULL, a potentially slow memcpy() move is used.
258 * @sync_obj_signaled: See ttm_fence_api.h
259 * @sync_obj_wait: See ttm_fence_api.h
260 * @sync_obj_flush: See ttm_fence_api.h
261 * @sync_obj_unref: See ttm_fence_api.h
262 * @sync_obj_ref: See ttm_fence_api.h
263 */
264
265struct ttm_bo_driver {
266 const uint32_t *mem_type_prio;
267 const uint32_t *mem_busy_prio;
268 uint32_t num_mem_type_prio;
269 uint32_t num_mem_busy_prio;
270
271 /**
272 * struct ttm_bo_driver member create_ttm_backend_entry
273 *
274 * @bdev: The buffer object device.
275 *
276 * Create a driver specific struct ttm_backend.
277 */
278
279 struct ttm_backend *(*create_ttm_backend_entry)
280 (struct ttm_bo_device *bdev);
281
282 /**
283 * struct ttm_bo_driver member invalidate_caches
284 *
285 * @bdev: the buffer object device.
286 * @flags: new placement of the rebound buffer object.
287 *
288 * A previosly evicted buffer has been rebound in a
289 * potentially new location. Tell the driver that it might
290 * consider invalidating read (texture) caches on the next command
291 * submission as a consequence.
292 */
293
294 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
295 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
296 struct ttm_mem_type_manager *man);
297 /**
298 * struct ttm_bo_driver member evict_flags:
299 *
300 * @bo: the buffer object to be evicted
301 *
302 * Return the bo flags for a buffer which is not mapped to the hardware.
303 * These will be placed in proposed_flags so that when the move is
304 * finished, they'll end up in bo->mem.flags
305 */
306
307 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
308 /**
309 * struct ttm_bo_driver member move:
310 *
311 * @bo: the buffer to move
312 * @evict: whether this motion is evicting the buffer from
313 * the graphics address space
314 * @interruptible: Use interruptible sleeps if possible when sleeping.
315 * @no_wait: whether this should give up and return -EBUSY
316 * if this move would require sleeping
317 * @new_mem: the new memory region receiving the buffer
318 *
319 * Move a buffer between two memory regions.
320 */
321 int (*move) (struct ttm_buffer_object *bo,
322 bool evict, bool interruptible,
323 bool no_wait, struct ttm_mem_reg *new_mem);
324
325 /**
326 * struct ttm_bo_driver_member verify_access
327 *
328 * @bo: Pointer to a buffer object.
329 * @filp: Pointer to a struct file trying to access the object.
330 *
331 * Called from the map / write / read methods to verify that the
332 * caller is permitted to access the buffer object.
333 * This member may be set to NULL, which will refuse this kind of
334 * access for all buffer objects.
335 * This function should return 0 if access is granted, -EPERM otherwise.
336 */
337 int (*verify_access) (struct ttm_buffer_object *bo,
338 struct file *filp);
339
340 /**
341 * In case a driver writer dislikes the TTM fence objects,
342 * the driver writer can replace those with sync objects of
343 * his / her own. If it turns out that no driver writer is
344 * using these. I suggest we remove these hooks and plug in
345 * fences directly. The bo driver needs the following functionality:
346 * See the corresponding functions in the fence object API
347 * documentation.
348 */
349
350 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
351 int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
352 bool lazy, bool interruptible);
353 int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
354 void (*sync_obj_unref) (void **sync_obj);
355 void *(*sync_obj_ref) (void *sync_obj);
356};
357
358#define TTM_NUM_MEM_TYPES 8
359
360#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
361 idling before CPU mapping */
362#define TTM_BO_PRIV_FLAG_MAX 1
363/**
364 * struct ttm_bo_device - Buffer object driver device-specific data.
365 *
366 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
367 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
368 * @count: Current number of buffer object.
369 * @pages: Current number of pinned pages.
370 * @dummy_read_page: Pointer to a dummy page used for mapping requests
371 * of unpopulated pages.
372 * @shrink: A shrink callback object used for buffre object swap.
373 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
374 * used by a buffer object. This is excluding page arrays and backing pages.
375 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
376 * @man: An array of mem_type_managers.
377 * @addr_space_mm: Range manager for the device address space.
378 * lru_lock: Spinlock that protects the buffer+device lru lists and
379 * ddestroy lists.
380 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
381 * If a GPU lockup has been detected, this is forced to 0.
382 * @dev_mapping: A pointer to the struct address_space representing the
383 * device address space.
384 * @wq: Work queue structure for the delayed delete workqueue.
385 *
386 */
387
388struct ttm_bo_device {
389
390 /*
391 * Constant after bo device init / atomic.
392 */
393
394 struct ttm_mem_global *mem_glob;
395 struct ttm_bo_driver *driver;
396 struct page *dummy_read_page;
397 struct ttm_mem_shrink shrink;
398
399 size_t ttm_bo_extra_size;
400 size_t ttm_bo_size;
401
402 rwlock_t vm_lock;
403 /*
404 * Protected by the vm lock.
405 */
406 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
407 struct rb_root addr_space_rb;
408 struct drm_mm addr_space_mm;
409
410 /*
411 * Might want to change this to one lock per manager.
412 */
413 spinlock_t lru_lock;
414 /*
415 * Protected by the lru lock.
416 */
417 struct list_head ddestroy;
418 struct list_head swap_lru;
419
420 /*
421 * Protected by load / firstopen / lastclose /unload sync.
422 */
423
424 bool nice_mode;
425 struct address_space *dev_mapping;
426
427 /*
428 * Internal protection.
429 */
430
431 struct delayed_work wq;
432};
433
434/**
435 * ttm_flag_masked
436 *
437 * @old: Pointer to the result and original value.
438 * @new: New value of bits.
439 * @mask: Mask of bits to change.
440 *
441 * Convenience function to change a number of bits identified by a mask.
442 */
443
444static inline uint32_t
445ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
446{
447 *old ^= (*old ^ new) & mask;
448 return *old;
449}
450
451/**
452 * ttm_tt_create
453 *
454 * @bdev: pointer to a struct ttm_bo_device:
455 * @size: Size of the data needed backing.
456 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
457 * @dummy_read_page: See struct ttm_bo_device.
458 *
459 * Create a struct ttm_tt to back data with system memory pages.
460 * No pages are actually allocated.
461 * Returns:
462 * NULL: Out of memory.
463 */
464extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
465 unsigned long size,
466 uint32_t page_flags,
467 struct page *dummy_read_page);
468
469/**
470 * ttm_tt_set_user:
471 *
472 * @ttm: The struct ttm_tt to populate.
473 * @tsk: A struct task_struct for which @start is a valid user-space address.
474 * @start: A valid user-space address.
475 * @num_pages: Size in pages of the user memory area.
476 *
477 * Populate a struct ttm_tt with a user-space memory area after first pinning
478 * the pages backing it.
479 * Returns:
480 * !0: Error.
481 */
482
483extern int ttm_tt_set_user(struct ttm_tt *ttm,
484 struct task_struct *tsk,
485 unsigned long start, unsigned long num_pages);
486
487/**
488 * ttm_ttm_bind:
489 *
490 * @ttm: The struct ttm_tt containing backing pages.
491 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
492 *
493 * Bind the pages of @ttm to an aperture location identified by @bo_mem
494 */
495extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
496
497/**
498 * ttm_ttm_destroy:
499 *
500 * @ttm: The struct ttm_tt.
501 *
502 * Unbind, unpopulate and destroy a struct ttm_tt.
503 */
504extern void ttm_tt_destroy(struct ttm_tt *ttm);
505
506/**
507 * ttm_ttm_unbind:
508 *
509 * @ttm: The struct ttm_tt.
510 *
511 * Unbind a struct ttm_tt.
512 */
513extern void ttm_tt_unbind(struct ttm_tt *ttm);
514
515/**
516 * ttm_ttm_destroy:
517 *
518 * @ttm: The struct ttm_tt.
519 * @index: Index of the desired page.
520 *
521 * Return a pointer to the struct page backing @ttm at page
522 * index @index. If the page is unpopulated, one will be allocated to
523 * populate that index.
524 *
525 * Returns:
526 * NULL on OOM.
527 */
528extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
529
530/**
531 * ttm_tt_cache_flush:
532 *
533 * @pages: An array of pointers to struct page:s to flush.
534 * @num_pages: Number of pages to flush.
535 *
536 * Flush the data of the indicated pages from the cpu caches.
537 * This is used when changing caching attributes of the pages from
538 * cache-coherent.
539 */
540extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
541
542/**
543 * ttm_tt_set_placement_caching:
544 *
545 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
546 * @placement: Flag indicating the desired caching policy.
547 *
548 * This function will change caching policy of any default kernel mappings of
549 * the pages backing @ttm. If changing from cached to uncached or
550 * write-combined,
551 * all CPU caches will first be flushed to make sure the data of the pages
552 * hit RAM. This function may be very costly as it involves global TLB
553 * and cache flushes and potential page splitting / combining.
554 */
555extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
556extern int ttm_tt_swapout(struct ttm_tt *ttm,
557 struct file *persistant_swap_storage);
558
559/*
560 * ttm_bo.c
561 */
562
563/**
564 * ttm_mem_reg_is_pci
565 *
566 * @bdev: Pointer to a struct ttm_bo_device.
567 * @mem: A valid struct ttm_mem_reg.
568 *
569 * Returns true if the memory described by @mem is PCI memory,
570 * false otherwise.
571 */
572extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
573 struct ttm_mem_reg *mem);
574
575/**
576 * ttm_bo_mem_space
577 *
578 * @bo: Pointer to a struct ttm_buffer_object. the data of which
579 * we want to allocate space for.
580 * @proposed_placement: Proposed new placement for the buffer object.
581 * @mem: A struct ttm_mem_reg.
582 * @interruptible: Sleep interruptible when sliping.
583 * @no_wait: Don't sleep waiting for space to become available.
584 *
585 * Allocate memory space for the buffer object pointed to by @bo, using
586 * the placement flags in @mem, potentially evicting other idle buffer objects.
587 * This function may sleep while waiting for space to become available.
588 * Returns:
589 * -EBUSY: No space available (only if no_wait == 1).
590 * -ENOMEM: Could not allocate memory for the buffer object, either due to
591 * fragmentation or concurrent allocators.
592 * -ERESTART: An interruptible sleep was interrupted by a signal.
593 */
594extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
595 uint32_t proposed_placement,
596 struct ttm_mem_reg *mem,
597 bool interruptible, bool no_wait);
598/**
599 * ttm_bo_wait_for_cpu
600 *
601 * @bo: Pointer to a struct ttm_buffer_object.
602 * @no_wait: Don't sleep while waiting.
603 *
604 * Wait until a buffer object is no longer sync'ed for CPU access.
605 * Returns:
606 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
607 * -ERESTART: An interruptible sleep was interrupted by a signal.
608 */
609
610extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
611
612/**
613 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
614 *
615 * @bo Pointer to a struct ttm_buffer_object.
616 * @bus_base On return the base of the PCI region
617 * @bus_offset On return the byte offset into the PCI region
618 * @bus_size On return the byte size of the buffer object or zero if
619 * the buffer object memory is not accessible through a PCI region.
620 *
621 * Returns:
622 * -EINVAL if the buffer object is currently not mappable.
623 * 0 otherwise.
624 */
625
626extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 unsigned long *bus_base,
629 unsigned long *bus_offset,
630 unsigned long *bus_size);
631
632extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
633
634/**
635 * ttm_bo_device_init
636 *
637 * @bdev: A pointer to a struct ttm_bo_device to initialize.
638 * @mem_global: A pointer to an initialized struct ttm_mem_global.
639 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
640 * @file_page_offset: Offset into the device address space that is available
641 * for buffer data. This ensures compatibility with other users of the
642 * address space.
643 *
644 * Initializes a struct ttm_bo_device:
645 * Returns:
646 * !0: Failure.
647 */
648extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
649 struct ttm_mem_global *mem_glob,
650 struct ttm_bo_driver *driver,
651 uint64_t file_page_offset);
652
653/**
654 * ttm_bo_reserve:
655 *
656 * @bo: A pointer to a struct ttm_buffer_object.
657 * @interruptible: Sleep interruptible if waiting.
658 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
659 * @use_sequence: If @bo is already reserved, Only sleep waiting for
660 * it to become unreserved if @sequence < (@bo)->sequence.
661 *
662 * Locks a buffer object for validation. (Or prevents other processes from
663 * locking it for validation) and removes it from lru lists, while taking
664 * a number of measures to prevent deadlocks.
665 *
666 * Deadlocks may occur when two processes try to reserve multiple buffers in
667 * different order, either by will or as a result of a buffer being evicted
668 * to make room for a buffer already reserved. (Buffers are reserved before
669 * they are evicted). The following algorithm prevents such deadlocks from
670 * occuring:
671 * 1) Buffers are reserved with the lru spinlock held. Upon successful
672 * reservation they are removed from the lru list. This stops a reserved buffer
673 * from being evicted. However the lru spinlock is released between the time
674 * a buffer is selected for eviction and the time it is reserved.
675 * Therefore a check is made when a buffer is reserved for eviction, that it
676 * is still the first buffer in the lru list, before it is removed from the
677 * list. @check_lru == 1 forces this check. If it fails, the function returns
678 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
679 * the procedure.
680 * 2) Processes attempting to reserve multiple buffers other than for eviction,
681 * (typically execbuf), should first obtain a unique 32-bit
682 * validation sequence number,
683 * and call this function with @use_sequence == 1 and @sequence == the unique
684 * sequence number. If upon call of this function, the buffer object is already
685 * reserved, the validation sequence is checked against the validation
686 * sequence of the process currently reserving the buffer,
687 * and if the current validation sequence is greater than that of the process
688 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
689 * waiting for the buffer to become unreserved, after which it retries
690 * reserving.
691 * The caller should, when receiving an -EAGAIN error
692 * release all its buffer reservations, wait for @bo to become unreserved, and
693 * then rerun the validation with the same validation sequence. This procedure
694 * will always guarantee that the process with the lowest validation sequence
695 * will eventually succeed, preventing both deadlocks and starvation.
696 *
697 * Returns:
698 * -EAGAIN: The reservation may cause a deadlock.
699 * Release all buffer reservations, wait for @bo to become unreserved and
700 * try again. (only if use_sequence == 1).
701 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
702 * a signal. Release all buffer reservations and return to user-space.
703 */
704extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
705 bool interruptible,
706 bool no_wait, bool use_sequence, uint32_t sequence);
707
708/**
709 * ttm_bo_unreserve
710 *
711 * @bo: A pointer to a struct ttm_buffer_object.
712 *
713 * Unreserve a previous reservation of @bo.
714 */
715extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
716
717/**
718 * ttm_bo_wait_unreserved
719 *
720 * @bo: A pointer to a struct ttm_buffer_object.
721 *
722 * Wait for a struct ttm_buffer_object to become unreserved.
723 * This is typically used in the execbuf code to relax cpu-usage when
724 * a potential deadlock condition backoff.
725 */
726extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
727 bool interruptible);
728
729/**
730 * ttm_bo_block_reservation
731 *
732 * @bo: A pointer to a struct ttm_buffer_object.
733 * @interruptible: Use interruptible sleep when waiting.
734 * @no_wait: Don't sleep, but rather return -EBUSY.
735 *
736 * Block reservation for validation by simply reserving the buffer.
737 * This is intended for single buffer use only without eviction,
738 * and thus needs no deadlock protection.
739 *
740 * Returns:
741 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
742 * -ERESTART: If interruptible == 1 and the process received a signal
743 * while sleeping.
744 */
745extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
746 bool interruptible, bool no_wait);
747
748/**
749 * ttm_bo_unblock_reservation
750 *
751 * @bo: A pointer to a struct ttm_buffer_object.
752 *
753 * Unblocks reservation leaving lru lists untouched.
754 */
755extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
756
757/*
758 * ttm_bo_util.c
759 */
760
761/**
762 * ttm_bo_move_ttm
763 *
764 * @bo: A pointer to a struct ttm_buffer_object.
765 * @evict: 1: This is an eviction. Don't try to pipeline.
766 * @no_wait: Never sleep, but rather return with -EBUSY.
767 * @new_mem: struct ttm_mem_reg indicating where to move.
768 *
769 * Optimized move function for a buffer object with both old and
770 * new placement backed by a TTM. The function will, if successful,
771 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
772 * and update the (@bo)->mem placement flags. If unsuccessful, the old
773 * data remains untouched, and it's up to the caller to free the
774 * memory space indicated by @new_mem.
775 * Returns:
776 * !0: Failure.
777 */
778
779extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
780 bool evict, bool no_wait,
781 struct ttm_mem_reg *new_mem);
782
783/**
784 * ttm_bo_move_memcpy
785 *
786 * @bo: A pointer to a struct ttm_buffer_object.
787 * @evict: 1: This is an eviction. Don't try to pipeline.
788 * @no_wait: Never sleep, but rather return with -EBUSY.
789 * @new_mem: struct ttm_mem_reg indicating where to move.
790 *
791 * Fallback move function for a mappable buffer object in mappable memory.
792 * The function will, if successful,
793 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
794 * and update the (@bo)->mem placement flags. If unsuccessful, the old
795 * data remains untouched, and it's up to the caller to free the
796 * memory space indicated by @new_mem.
797 * Returns:
798 * !0: Failure.
799 */
800
801extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
802 bool evict,
803 bool no_wait, struct ttm_mem_reg *new_mem);
804
805/**
806 * ttm_bo_free_old_node
807 *
808 * @bo: A pointer to a struct ttm_buffer_object.
809 *
810 * Utility function to free an old placement after a successful move.
811 */
812extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
813
814/**
815 * ttm_bo_move_accel_cleanup.
816 *
817 * @bo: A pointer to a struct ttm_buffer_object.
818 * @sync_obj: A sync object that signals when moving is complete.
819 * @sync_obj_arg: An argument to pass to the sync object idle / wait
820 * functions.
821 * @evict: This is an evict move. Don't return until the buffer is idle.
822 * @no_wait: Never sleep, but rather return with -EBUSY.
823 * @new_mem: struct ttm_mem_reg indicating where to move.
824 *
825 * Accelerated move function to be called when an accelerated move
826 * has been scheduled. The function will create a new temporary buffer object
827 * representing the old placement, and put the sync object on both buffer
828 * objects. After that the newly created buffer object is unref'd to be
829 * destroyed when the move is complete. This will help pipeline
830 * buffer moves.
831 */
832
833extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
834 void *sync_obj,
835 void *sync_obj_arg,
836 bool evict, bool no_wait,
837 struct ttm_mem_reg *new_mem);
838/**
839 * ttm_io_prot
840 *
841 * @c_state: Caching state.
842 * @tmp: Page protection flag for a normal, cached mapping.
843 *
844 * Utility function that returns the pgprot_t that should be used for
845 * setting up a PTE with the caching model indicated by @c_state.
846 */
847extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
848
849#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
850#define TTM_HAS_AGP
851#include <linux/agp_backend.h>
852
853/**
854 * ttm_agp_backend_init
855 *
856 * @bdev: Pointer to a struct ttm_bo_device.
857 * @bridge: The agp bridge this device is sitting on.
858 *
859 * Create a TTM backend that uses the indicated AGP bridge as an aperture
860 * for TT memory. This function uses the linux agpgart interface to
861 * bind and unbind memory backing a ttm_tt.
862 */
863extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
864 struct agp_bridge_data *bridge);
865#endif
866
867#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644
index 000000000000..d8b8f042c4f1
--- /dev/null
+++ b/include/drm/ttm/ttm_memory.h
@@ -0,0 +1,153 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef TTM_MEMORY_H
29#define TTM_MEMORY_H
30
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
33#include <linux/wait.h>
34#include <linux/errno.h>
35
36/**
37 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
38 *
39 * @do_shrink: The callback function.
40 *
41 * Arguments to the do_shrink functions are intended to be passed using
42 * inheritance. That is, the argument class derives from struct ttm_mem_srink,
43 * and can be accessed using container_of().
44 */
45
46struct ttm_mem_shrink {
47 int (*do_shrink) (struct ttm_mem_shrink *);
48};
49
50/**
51 * struct ttm_mem_global - Global memory accounting structure.
52 *
53 * @shrink: A single callback to shrink TTM memory usage. Extend this
54 * to a linked list to be able to handle multiple callbacks when needed.
55 * @swap_queue: A workqueue to handle shrinking in low memory situations. We
56 * need a separate workqueue since it will spend a lot of time waiting
57 * for the GPU, and this will otherwise block other workqueue tasks(?)
58 * At this point we use only a single-threaded workqueue.
59 * @work: The workqueue callback for the shrink queue.
60 * @queue: Wait queue for processes suspended waiting for memory.
61 * @lock: Lock to protect the @shrink - and the memory accounting members,
62 * that is, essentially the whole structure with some exceptions.
63 * @emer_memory: Lowmem memory limit available for root.
64 * @max_memory: Lowmem memory limit available for non-root.
65 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
66 * @used_memory: Currently used lowmem memory.
67 * @used_total_memory: Currently used total (lowmem + highmem) memory.
68 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
69 * kicks in.
70 * @max_total_memory: Total memory available to non-root processes.
71 * @emer_total_memory: Total memory available to root processes.
72 *
73 * Note that this structure is not per device. It should be global for all
74 * graphics devices.
75 */
76
77struct ttm_mem_global {
78 struct ttm_mem_shrink *shrink;
79 struct workqueue_struct *swap_queue;
80 struct work_struct work;
81 wait_queue_head_t queue;
82 spinlock_t lock;
83 uint64_t emer_memory;
84 uint64_t max_memory;
85 uint64_t swap_limit;
86 uint64_t used_memory;
87 uint64_t used_total_memory;
88 uint64_t total_memory_swap_limit;
89 uint64_t max_total_memory;
90 uint64_t emer_total_memory;
91};
92
93/**
94 * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
95 *
96 * @shrink: The object to initialize.
97 * @func: The callback function.
98 */
99
100static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
101 int (*func) (struct ttm_mem_shrink *))
102{
103 shrink->do_shrink = func;
104}
105
106/**
107 * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
108 *
109 * @glob: The struct ttm_mem_global object to register with.
110 * @shrink: An initialized struct ttm_mem_shrink object to register.
111 *
112 * Returns:
113 * -EBUSY: There's already a callback registered. (May change).
114 */
115
116static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
117 struct ttm_mem_shrink *shrink)
118{
119 spin_lock(&glob->lock);
120 if (glob->shrink != NULL) {
121 spin_unlock(&glob->lock);
122 return -EBUSY;
123 }
124 glob->shrink = shrink;
125 spin_unlock(&glob->lock);
126 return 0;
127}
128
129/**
130 * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
131 *
132 * @glob: The struct ttm_mem_global object to unregister from.
133 * @shrink: A previously registert struct ttm_mem_shrink object.
134 *
135 */
136
137static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
138 struct ttm_mem_shrink *shrink)
139{
140 spin_lock(&glob->lock);
141 BUG_ON(glob->shrink != shrink);
142 glob->shrink = NULL;
143 spin_unlock(&glob->lock);
144}
145
146extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible, bool himem);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount, bool himem);
152extern size_t ttm_round_pot(size_t size);
153#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..889a4c7958ae
--- /dev/null
+++ b/include/drm/ttm/ttm_module.h
@@ -0,0 +1,58 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_MODULE_H_
32#define _TTM_MODULE_H_
33
34#include <linux/kernel.h>
35
36#define TTM_PFX "[TTM]"
37
38enum ttm_global_types {
39 TTM_GLOBAL_TTM_MEM = 0,
40 TTM_GLOBAL_TTM_BO,
41 TTM_GLOBAL_TTM_OBJECT,
42 TTM_GLOBAL_NUM
43};
44
45struct ttm_global_reference {
46 enum ttm_global_types global_type;
47 size_t size;
48 void *object;
49 int (*init) (struct ttm_global_reference *);
50 void (*release) (struct ttm_global_reference *);
51};
52
53extern void ttm_global_init(void);
54extern void ttm_global_release(void);
55extern int ttm_global_item_ref(struct ttm_global_reference *ref);
56extern void ttm_global_item_unref(struct ttm_global_reference *ref);
57
58#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644
index 000000000000..c84ff153a564
--- /dev/null
+++ b/include/drm/ttm/ttm_placement.h
@@ -0,0 +1,92 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_PLACEMENT_H_
32#define _TTM_PLACEMENT_H_
33/*
34 * Memory regions for data placement.
35 */
36
37#define TTM_PL_SYSTEM 0
38#define TTM_PL_TT 1
39#define TTM_PL_VRAM 2
40#define TTM_PL_PRIV0 3
41#define TTM_PL_PRIV1 4
42#define TTM_PL_PRIV2 5
43#define TTM_PL_PRIV3 6
44#define TTM_PL_PRIV4 7
45#define TTM_PL_PRIV5 8
46#define TTM_PL_SWAPPED 15
47
48#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
49#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
50#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
51#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
52#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
53#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
54#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
55#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
56#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
57#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
58#define TTM_PL_MASK_MEM 0x0000FFFF
59
60/*
61 * Other flags that affects data placement.
62 * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
63 * if available.
64 * TTM_PL_FLAG_SHARED means that another application may
65 * reference the buffer.
66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never
67 * be evicted to make room for other buffers.
68 */
69
70#define TTM_PL_FLAG_CACHED (1 << 16)
71#define TTM_PL_FLAG_UNCACHED (1 << 17)
72#define TTM_PL_FLAG_WC (1 << 18)
73#define TTM_PL_FLAG_SHARED (1 << 20)
74#define TTM_PL_FLAG_NO_EVICT (1 << 21)
75
76#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
77 TTM_PL_FLAG_UNCACHED | \
78 TTM_PL_FLAG_WC)
79
80#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
81
82/*
83 * Access flags to be used for CPU- and GPU- mappings.
84 * The idea is that the TTM synchronization mechanism will
85 * allow concurrent READ access and exclusive write access.
86 * Currently GPU- and CPU accesses are exclusive.
87 */
88
89#define TTM_ACCESS_READ (1 << 0)
90#define TTM_ACCESS_WRITE (1 << 1)
91
92#endif