aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 00:20:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 00:20:39 -0400
commit8d15b0ec32f20a57881dc073b2e8d11dea0ccceb (patch)
tree8aa3d4dfd0660807c3ce2893b35292b3b0e3bf08 /include/drm
parent005411c3e9147bc3b78215390e847d688dbbc163 (diff)
parentcc8da5263fa743c33d6503f85113bcb70048e633 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm/radeon: switch to using late_initcall radeon legacy chips: tv dac bg/dac adj updates drm/radeon: introduce kernel modesetting for radeon hardware drm: Add the TTM GPU memory manager subsystem. drm: Memory fragmentation from lost alignment blocks drm/radeon: fix mobility flags on new PCI IDs.
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drm_pciids.h8
-rw-r--r--include/drm/radeon_drm.h130
-rw-r--r--include/drm/ttm/ttm_bo_api.h618
-rw-r--r--include/drm/ttm/ttm_bo_driver.h867
-rw-r--r--include/drm/ttm/ttm_memory.h153
-rw-r--r--include/drm/ttm/ttm_module.h58
-rw-r--r--include/drm/ttm/ttm_placement.h92
7 files changed, 1922 insertions, 4 deletions
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f8634ab53b8f..45c18672b093 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -254,8 +254,8 @@
254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
@@ -273,8 +273,8 @@
273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index fe3e3a4b4aed..41862e9a4c20 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -496,6 +496,16 @@ typedef struct {
496#define DRM_RADEON_SETPARAM 0x19 496#define DRM_RADEON_SETPARAM 0x19
497#define DRM_RADEON_SURF_ALLOC 0x1a 497#define DRM_RADEON_SURF_ALLOC 0x1a
498#define DRM_RADEON_SURF_FREE 0x1b 498#define DRM_RADEON_SURF_FREE 0x1b
499/* KMS ioctl */
500#define DRM_RADEON_GEM_INFO 0x1c
501#define DRM_RADEON_GEM_CREATE 0x1d
502#define DRM_RADEON_GEM_MMAP 0x1e
503#define DRM_RADEON_GEM_PREAD 0x21
504#define DRM_RADEON_GEM_PWRITE 0x22
505#define DRM_RADEON_GEM_SET_DOMAIN 0x23
506#define DRM_RADEON_GEM_WAIT_IDLE 0x24
507#define DRM_RADEON_CS 0x26
508#define DRM_RADEON_INFO 0x27
499 509
500#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 510#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
501#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 511#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -524,6 +534,17 @@ typedef struct {
524#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) 534#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
525#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) 535#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
526#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) 536#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
537/* KMS */
538#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
539#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
540#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
541#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
542#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
543#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
544#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
545#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
546#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
547
527 548
528typedef struct drm_radeon_init { 549typedef struct drm_radeon_init {
529 enum { 550 enum {
@@ -682,6 +703,7 @@ typedef struct drm_radeon_indirect {
682#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ 703#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
683#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ 704#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
684#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ 705#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
706#define RADEON_PARAM_DEVICE_ID 16
685 707
686typedef struct drm_radeon_getparam { 708typedef struct drm_radeon_getparam {
687 int param; 709 int param;
@@ -751,4 +773,112 @@ typedef struct drm_radeon_surface_free {
751#define DRM_RADEON_VBLANK_CRTC1 1 773#define DRM_RADEON_VBLANK_CRTC1 1
752#define DRM_RADEON_VBLANK_CRTC2 2 774#define DRM_RADEON_VBLANK_CRTC2 2
753 775
776/*
777 * Kernel modesetting world below.
778 */
779#define RADEON_GEM_DOMAIN_CPU 0x1
780#define RADEON_GEM_DOMAIN_GTT 0x2
781#define RADEON_GEM_DOMAIN_VRAM 0x4
782
783struct drm_radeon_gem_info {
784 uint64_t gart_size;
785 uint64_t vram_size;
786 uint64_t vram_visible;
787};
788
789#define RADEON_GEM_NO_BACKING_STORE 1
790
791struct drm_radeon_gem_create {
792 uint64_t size;
793 uint64_t alignment;
794 uint32_t handle;
795 uint32_t initial_domain;
796 uint32_t flags;
797};
798
799struct drm_radeon_gem_mmap {
800 uint32_t handle;
801 uint32_t pad;
802 uint64_t offset;
803 uint64_t size;
804 uint64_t addr_ptr;
805};
806
807struct drm_radeon_gem_set_domain {
808 uint32_t handle;
809 uint32_t read_domains;
810 uint32_t write_domain;
811};
812
813struct drm_radeon_gem_wait_idle {
814 uint32_t handle;
815 uint32_t pad;
816};
817
818struct drm_radeon_gem_busy {
819 uint32_t handle;
820 uint32_t busy;
821};
822
823struct drm_radeon_gem_pread {
824 /** Handle for the object being read. */
825 uint32_t handle;
826 uint32_t pad;
827 /** Offset into the object to read from */
828 uint64_t offset;
829 /** Length of data to read */
830 uint64_t size;
831 /** Pointer to write the data into. */
832 /* void *, but pointers are not 32/64 compatible */
833 uint64_t data_ptr;
834};
835
836struct drm_radeon_gem_pwrite {
837 /** Handle for the object being written to. */
838 uint32_t handle;
839 uint32_t pad;
840 /** Offset into the object to write to */
841 uint64_t offset;
842 /** Length of data to write */
843 uint64_t size;
844 /** Pointer to read the data from. */
845 /* void *, but pointers are not 32/64 compatible */
846 uint64_t data_ptr;
847};
848
849#define RADEON_CHUNK_ID_RELOCS 0x01
850#define RADEON_CHUNK_ID_IB 0x02
851
852struct drm_radeon_cs_chunk {
853 uint32_t chunk_id;
854 uint32_t length_dw;
855 uint64_t chunk_data;
856};
857
858struct drm_radeon_cs_reloc {
859 uint32_t handle;
860 uint32_t read_domains;
861 uint32_t write_domain;
862 uint32_t flags;
863};
864
865struct drm_radeon_cs {
866 uint32_t num_chunks;
867 uint32_t cs_id;
868 /* this points to uint64_t * which point to cs chunks */
869 uint64_t chunks;
870 /* updates to the limits after this CS ioctl */
871 uint64_t gart_limit;
872 uint64_t vram_limit;
873};
874
875#define RADEON_INFO_DEVICE_ID 0x00
876#define RADEON_INFO_NUM_GB_PIPES 0x01
877
878struct drm_radeon_info {
879 uint32_t request;
880 uint32_t pad;
881 uint64_t value;
882};
883
754#endif 884#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644
index 000000000000..cd22ab4b495c
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -0,0 +1,618 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_BO_API_H_
32#define _TTM_BO_API_H_
33
34#include "drm_hashtab.h"
35#include <linux/kref.h>
36#include <linux/list.h>
37#include <linux/wait.h>
38#include <linux/mutex.h>
39#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h>
42
43struct ttm_bo_device;
44
45struct drm_mm_node;
46
47/**
48 * struct ttm_mem_reg
49 *
50 * @mm_node: Memory manager node.
51 * @size: Requested size of memory region.
52 * @num_pages: Actual size of memory region in pages.
53 * @page_alignment: Page alignment.
54 * @placement: Placement flags.
55 *
56 * Structure indicating the placement and space resources used by a
57 * buffer object.
58 */
59
60struct ttm_mem_reg {
61 struct drm_mm_node *mm_node;
62 unsigned long size;
63 unsigned long num_pages;
64 uint32_t page_alignment;
65 uint32_t mem_type;
66 uint32_t placement;
67};
68
69/**
70 * enum ttm_bo_type
71 *
72 * @ttm_bo_type_device: These are 'normal' buffers that can
73 * be mmapped by user space. Each of these bos occupy a slot in the
74 * device address space, that can be used for normal vm operations.
75 *
76 * @ttm_bo_type_user: These are user-space memory areas that are made
77 * available to the GPU by mapping the buffer pages into the GPU aperture
78 * space. These buffers cannot be mmaped from the device address space.
79 *
80 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
81 * but they cannot be accessed from user-space. For kernel-only use.
82 */
83
84enum ttm_bo_type {
85 ttm_bo_type_device,
86 ttm_bo_type_user,
87 ttm_bo_type_kernel
88};
89
90struct ttm_tt;
91
92/**
93 * struct ttm_buffer_object
94 *
95 * @bdev: Pointer to the buffer object device structure.
96 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
97 * buffers.
98 * @type: The bo type.
99 * @destroy: Destruction function. If NULL, kfree is used.
100 * @num_pages: Actual number of pages.
101 * @addr_space_offset: Address space offset.
102 * @acc_size: Accounted size for this object.
103 * @kref: Reference count of this buffer object. When this refcount reaches
104 * zero, the object is put on the delayed delete list.
105 * @list_kref: List reference count of this buffer object. This member is
106 * used to avoid destruction while the buffer object is still on a list.
107 * Lru lists may keep one refcount, the delayed delete list, and kref != 0
108 * keeps one refcount. When this refcount reaches zero,
109 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member
119 * holds a pointer to a persistant shmem object.
120 * @ttm: TTM structure holding system pages.
121 * @evicted: Whether the object was evicted without user-space knowing.
122 * @cpu_writes: For synchronization. Number of cpu writers.
123 * @lru: List head for the lru list.
124 * @ddestroy: List head for the delayed destroy list.
125 * @swap: List head for swap LRU list.
126 * @val_seq: Sequence of the validation holding the @reserved lock.
127 * Used to avoid starvation when many processes compete to validate the
128 * buffer. This member is protected by the bo_device::lru_lock.
129 * @seq_valid: The value of @val_seq is valid. This value is protected by
130 * the bo_device::lru_lock.
131 * @reserved: Deadlock-free lock used for synchronization state transitions.
132 * @sync_obj_arg: Opaque argument to synchronization object function.
133 * @sync_obj: Pointer to a synchronization object.
134 * @priv_flags: Flags describing buffer object internal state.
135 * @vm_rb: Rb node for the vm rb tree.
136 * @vm_node: Address space manager node.
137 * @offset: The current GPU offset, which can have different meanings
138 * depending on the memory type. For SYSTEM type memory, it should be 0.
139 * @cur_placement: Hint of current placement.
140 *
141 * Base class for TTM buffer object, that deals with data placement and CPU
142 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
143 * the driver can usually use the placement offset @offset directly as the
144 * GPU virtual address. For drivers implementing multiple
145 * GPU memory manager contexts, the driver should manage the address space
146 * in these contexts separately and use these objects to get the correct
147 * placement and caching for these GPU maps. This makes it possible to use
148 * these objects for even quite elaborate memory management schemes.
149 * The destroy member, the API visibility of this object makes it possible
150 * to derive driver specific types.
151 */
152
153struct ttm_buffer_object {
154 /**
155 * Members constant at init.
156 */
157
158 struct ttm_bo_device *bdev;
159 unsigned long buffer_start;
160 enum ttm_bo_type type;
161 void (*destroy) (struct ttm_buffer_object *);
162 unsigned long num_pages;
163 uint64_t addr_space_offset;
164 size_t acc_size;
165
166 /**
167 * Members not needing protection.
168 */
169
170 struct kref kref;
171 struct kref list_kref;
172 wait_queue_head_t event_queue;
173 spinlock_t lock;
174
175 /**
176 * Members protected by the bo::reserved lock.
177 */
178
179 uint32_t proposed_placement;
180 struct ttm_mem_reg mem;
181 struct file *persistant_swap_storage;
182 struct ttm_tt *ttm;
183 bool evicted;
184
185 /**
186 * Members protected by the bo::reserved lock only when written to.
187 */
188
189 atomic_t cpu_writers;
190
191 /**
192 * Members protected by the bdev::lru_lock.
193 */
194
195 struct list_head lru;
196 struct list_head ddestroy;
197 struct list_head swap;
198 uint32_t val_seq;
199 bool seq_valid;
200
201 /**
202 * Members protected by the bdev::lru_lock
203 * only when written to.
204 */
205
206 atomic_t reserved;
207
208
209 /**
210 * Members protected by the bo::lock
211 */
212
213 void *sync_obj_arg;
214 void *sync_obj;
215 unsigned long priv_flags;
216
217 /**
218 * Members protected by the bdev::vm_lock
219 */
220
221 struct rb_node vm_rb;
222 struct drm_mm_node *vm_node;
223
224
225 /**
226 * Special members that are protected by the reserve lock
227 * and the bo::lock when written to. Can be read with
228 * either of these locks held.
229 */
230
231 unsigned long offset;
232 uint32_t cur_placement;
233};
234
235/**
236 * struct ttm_bo_kmap_obj
237 *
238 * @virtual: The current kernel virtual address.
239 * @page: The page when kmap'ing a single page.
240 * @bo_kmap_type: Type of bo_kmap.
241 *
242 * Object describing a kernel mapping. Since a TTM bo may be located
243 * in various memory types with various caching policies, the
244 * mapping can either be an ioremap, a vmap, a kmap or part of a
245 * premapped region.
246 */
247
248struct ttm_bo_kmap_obj {
249 void *virtual;
250 struct page *page;
251 enum {
252 ttm_bo_map_iomap,
253 ttm_bo_map_vmap,
254 ttm_bo_map_kmap,
255 ttm_bo_map_premapped,
256 } bo_kmap_type;
257};
258
259/**
260 * ttm_bo_reference - reference a struct ttm_buffer_object
261 *
262 * @bo: The buffer object.
263 *
264 * Returns a refcounted pointer to a buffer object.
265 */
266
267static inline struct ttm_buffer_object *
268ttm_bo_reference(struct ttm_buffer_object *bo)
269{
270 kref_get(&bo->kref);
271 return bo;
272}
273
274/**
275 * ttm_bo_wait - wait for buffer idle.
276 *
277 * @bo: The buffer object.
278 * @interruptible: Use interruptible wait.
279 * @no_wait: Return immediately if buffer is busy.
280 *
281 * This function must be called with the bo::mutex held, and makes
282 * sure any previous rendering to the buffer is completed.
283 * Note: It might be necessary to block validations before the
284 * wait by reserving the buffer.
285 * Returns -EBUSY if no_wait is true and the buffer is busy.
286 * Returns -ERESTART if interrupted by a signal.
287 */
288extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
289 bool interruptible, bool no_wait);
290/**
291 * ttm_buffer_object_validate
292 *
293 * @bo: The buffer object.
294 * @proposed_placement: Proposed_placement for the buffer object.
295 * @interruptible: Sleep interruptible if sleeping.
296 * @no_wait: Return immediately if the buffer is busy.
297 *
298 * Changes placement and caching policy of the buffer object
299 * according to bo::proposed_flags.
300 * Returns
301 * -EINVAL on invalid proposed_flags.
302 * -ENOMEM on out-of-memory condition.
303 * -EBUSY if no_wait is true and buffer busy.
304 * -ERESTART if interrupted by a signal.
305 */
306extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
307 uint32_t proposed_placement,
308 bool interruptible, bool no_wait);
309/**
310 * ttm_bo_unref
311 *
312 * @bo: The buffer object.
313 *
314 * Unreference and clear a pointer to a buffer object.
315 */
316extern void ttm_bo_unref(struct ttm_buffer_object **bo);
317
318/**
319 * ttm_bo_synccpu_write_grab
320 *
321 * @bo: The buffer object:
322 * @no_wait: Return immediately if buffer is busy.
323 *
324 * Synchronizes a buffer object for CPU RW access. This means
325 * blocking command submission that affects the buffer and
326 * waiting for buffer idle. This lock is recursive.
327 * Returns
328 * -EBUSY if the buffer is busy and no_wait is true.
329 * -ERESTART if interrupted by a signal.
330 */
331
332extern int
333ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
334/**
335 * ttm_bo_synccpu_write_release:
336 *
337 * @bo : The buffer object.
338 *
339 * Releases a synccpu lock.
340 */
341extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
342
343/**
344 * ttm_buffer_object_init
345 *
346 * @bdev: Pointer to a ttm_bo_device struct.
347 * @bo: Pointer to a ttm_buffer_object to be initialized.
348 * @size: Requested size of buffer object.
349 * @type: Requested type of buffer object.
350 * @flags: Initial placement flags.
351 * @page_alignment: Data alignment in pages.
352 * @buffer_start: Virtual address of user space data backing a
353 * user buffer object.
354 * @interruptible: If needing to sleep to wait for GPU resources,
355 * sleep interruptible.
356 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
357 * pinned in physical memory. If this behaviour is not desired, this member
358 * holds a pointer to a persistant shmem object. Typically, this would
359 * point to the shmem object backing a GEM object if TTM is used to back a
360 * GEM user interface.
361 * @acc_size: Accounted size for this object.
362 * @destroy: Destroy function. Use NULL for kfree().
363 *
364 * This function initializes a pre-allocated struct ttm_buffer_object.
365 * As this object may be part of a larger structure, this function,
366 * together with the @destroy function,
367 * enables driver-specific objects derived from a ttm_buffer_object.
368 * On successful return, the object kref and list_kref are set to 1.
369 * Returns
370 * -ENOMEM: Out of memory.
371 * -EINVAL: Invalid placement flags.
372 * -ERESTART: Interrupted by signal while sleeping waiting for resources.
373 */
374
375extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
376 struct ttm_buffer_object *bo,
377 unsigned long size,
378 enum ttm_bo_type type,
379 uint32_t flags,
380 uint32_t page_alignment,
381 unsigned long buffer_start,
382 bool interrubtible,
383 struct file *persistant_swap_storage,
384 size_t acc_size,
385 void (*destroy) (struct ttm_buffer_object *));
386/**
387 * ttm_bo_synccpu_object_init
388 *
389 * @bdev: Pointer to a ttm_bo_device struct.
390 * @bo: Pointer to a ttm_buffer_object to be initialized.
391 * @size: Requested size of buffer object.
392 * @type: Requested type of buffer object.
393 * @flags: Initial placement flags.
394 * @page_alignment: Data alignment in pages.
395 * @buffer_start: Virtual address of user space data backing a
396 * user buffer object.
397 * @interruptible: If needing to sleep while waiting for GPU resources,
398 * sleep interruptible.
399 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
400 * pinned in physical memory. If this behaviour is not desired, this member
401 * holds a pointer to a persistant shmem object. Typically, this would
402 * point to the shmem object backing a GEM object if TTM is used to back a
403 * GEM user interface.
404 * @p_bo: On successful completion *p_bo points to the created object.
405 *
406 * This function allocates a ttm_buffer_object, and then calls
407 * ttm_buffer_object_init on that object.
408 * The destroy function is set to kfree().
409 * Returns
410 * -ENOMEM: Out of memory.
411 * -EINVAL: Invalid placement flags.
412 * -ERESTART: Interrupted by signal while waiting for resources.
413 */
414
415extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
416 unsigned long size,
417 enum ttm_bo_type type,
418 uint32_t flags,
419 uint32_t page_alignment,
420 unsigned long buffer_start,
421 bool interruptible,
422 struct file *persistant_swap_storage,
423 struct ttm_buffer_object **p_bo);
424
425/**
426 * ttm_bo_check_placement
427 *
428 * @bo: the buffer object.
429 * @set_flags: placement flags to set.
430 * @clr_flags: placement flags to clear.
431 *
432 * Performs minimal validity checking on an intended change of
433 * placement flags.
434 * Returns
435 * -EINVAL: Intended change is invalid or not allowed.
436 */
437
438extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
439 uint32_t set_flags, uint32_t clr_flags);
440
441/**
442 * ttm_bo_init_mm
443 *
444 * @bdev: Pointer to a ttm_bo_device struct.
445 * @mem_type: The memory type.
446 * @p_offset: offset for managed area in pages.
447 * @p_size: size managed area in pages.
448 *
449 * Initialize a manager for a given memory type.
450 * Note: if part of driver firstopen, it must be protected from a
451 * potentially racing lastclose.
452 * Returns:
453 * -EINVAL: invalid size or memory type.
454 * -ENOMEM: Not enough memory.
455 * May also return driver-specified errors.
456 */
457
458extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
459 unsigned long p_offset, unsigned long p_size);
460/**
461 * ttm_bo_clean_mm
462 *
463 * @bdev: Pointer to a ttm_bo_device struct.
464 * @mem_type: The memory type.
465 *
466 * Take down a manager for a given memory type after first walking
467 * the LRU list to evict any buffers left alive.
468 *
469 * Normally, this function is part of lastclose() or unload(), and at that
470 * point there shouldn't be any buffers left created by user-space, since
471 * there should've been removed by the file descriptor release() method.
472 * However, before this function is run, make sure to signal all sync objects,
473 * and verify that the delayed delete queue is empty. The driver must also
474 * make sure that there are no NO_EVICT buffers present in this memory type
475 * when the call is made.
476 *
477 * If this function is part of a VT switch, the caller must make sure that
478 * there are no appications currently validating buffers before this
479 * function is called. The caller can do that by first taking the
480 * struct ttm_bo_device::ttm_lock in write mode.
481 *
482 * Returns:
483 * -EINVAL: invalid or uninitialized memory type.
484 * -EBUSY: There are still buffers left in this memory type.
485 */
486
487extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
488
489/**
490 * ttm_bo_evict_mm
491 *
492 * @bdev: Pointer to a ttm_bo_device struct.
493 * @mem_type: The memory type.
494 *
495 * Evicts all buffers on the lru list of the memory type.
496 * This is normally part of a VT switch or an
497 * out-of-memory-space-due-to-fragmentation handler.
498 * The caller must make sure that there are no other processes
499 * currently validating buffers, and can do that by taking the
500 * struct ttm_bo_device::ttm_lock in write mode.
501 *
502 * Returns:
503 * -EINVAL: Invalid or uninitialized memory type.
504 * -ERESTART: The call was interrupted by a signal while waiting to
505 * evict a buffer.
506 */
507
508extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
509
510/**
511 * ttm_kmap_obj_virtual
512 *
513 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
514 * @is_iomem: Pointer to an integer that on return indicates 1 if the
515 * virtual map is io memory, 0 if normal memory.
516 *
517 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
518 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
519 * that should strictly be accessed by the iowriteXX() and similar functions.
520 */
521
522static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
523 bool *is_iomem)
524{
525 *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
526 map->bo_kmap_type == ttm_bo_map_premapped);
527 return map->virtual;
528}
529
530/**
531 * ttm_bo_kmap
532 *
533 * @bo: The buffer object.
534 * @start_page: The first page to map.
535 * @num_pages: Number of pages to map.
536 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
537 *
538 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
539 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
540 * used to obtain a virtual address to the data.
541 *
542 * Returns
543 * -ENOMEM: Out of memory.
544 * -EINVAL: Invalid range.
545 */
546
547extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
548 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
549
550/**
551 * ttm_bo_kunmap
552 *
553 * @map: Object describing the map to unmap.
554 *
555 * Unmaps a kernel map set up by ttm_bo_kmap.
556 */
557
558extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
559
560#if 0
561#endif
562
563/**
564 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
565 *
566 * @vma: vma as input from the fbdev mmap method.
567 * @bo: The bo backing the address space. The address space will
568 * have the same size as the bo, and start at offset 0.
569 *
570 * This function is intended to be called by the fbdev mmap method
571 * if the fbdev address space is to be backed by a bo.
572 */
573
574extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
575 struct ttm_buffer_object *bo);
576
577/**
578 * ttm_bo_mmap - mmap out of the ttm device address space.
579 *
580 * @filp: filp as input from the mmap method.
581 * @vma: vma as input from the mmap method.
582 * @bdev: Pointer to the ttm_bo_device with the address space manager.
583 *
584 * This function is intended to be called by the device mmap method.
585 * if the device address space is to be backed by the bo manager.
586 */
587
588extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
589 struct ttm_bo_device *bdev);
590
591/**
592 * ttm_bo_io
593 *
594 * @bdev: Pointer to the struct ttm_bo_device.
595 * @filp: Pointer to the struct file attempting to read / write.
596 * @wbuf: User-space pointer to address of buffer to write. NULL on read.
597 * @rbuf: User-space pointer to address of buffer to read into.
598 * Null on write.
599 * @count: Number of bytes to read / write.
600 * @f_pos: Pointer to current file position.
601 * @write: 1 for read, 0 for write.
602 *
603 * This function implements read / write into ttm buffer objects, and is
604 * intended to
605 * be called from the fops::read and fops::write method.
606 * Returns:
607 * See man (2) write, man(2) read. In particular,
608 * the function may return -EINTR if
609 * interrupted by a signal.
610 */
611
612extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
613 const char __user *wbuf, char __user *rbuf,
614 size_t count, loff_t *f_pos, bool write);
615
616extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
617
618#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644
index 000000000000..62ed733c52a2
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -0,0 +1,867 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30#ifndef _TTM_BO_DRIVER_H_
31#define _TTM_BO_DRIVER_H_
32
33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_memory.h"
35#include "drm_mm.h"
36#include "linux/workqueue.h"
37#include "linux/fs.h"
38#include "linux/spinlock.h"
39
40struct ttm_backend;
41
42struct ttm_backend_func {
43 /**
44 * struct ttm_backend_func member populate
45 *
46 * @backend: Pointer to a struct ttm_backend.
47 * @num_pages: Number of pages to populate.
48 * @pages: Array of pointers to ttm pages.
49 * @dummy_read_page: Page to be used instead of NULL pages in the
50 * array @pages.
51 *
52 * Populate the backend with ttm pages. Depending on the backend,
53 * it may or may not copy the @pages array.
54 */
55 int (*populate) (struct ttm_backend *backend,
56 unsigned long num_pages, struct page **pages,
57 struct page *dummy_read_page);
58 /**
59 * struct ttm_backend_func member clear
60 *
61 * @backend: Pointer to a struct ttm_backend.
62 *
63 * This is an "unpopulate" function. Release all resources
64 * allocated with populate.
65 */
66 void (*clear) (struct ttm_backend *backend);
67
68 /**
69 * struct ttm_backend_func member bind
70 *
71 * @backend: Pointer to a struct ttm_backend.
72 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
73 * memory type and location for binding.
74 *
75 * Bind the backend pages into the aperture in the location
76 * indicated by @bo_mem. This function should be able to handle
77 * differences between aperture- and system page sizes.
78 */
79 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
80
81 /**
82 * struct ttm_backend_func member unbind
83 *
84 * @backend: Pointer to a struct ttm_backend.
85 *
86 * Unbind previously bound backend pages. This function should be
87 * able to handle differences between aperture- and system page sizes.
88 */
89 int (*unbind) (struct ttm_backend *backend);
90
91 /**
92 * struct ttm_backend_func member destroy
93 *
94 * @backend: Pointer to a struct ttm_backend.
95 *
96 * Destroy the backend.
97 */
98 void (*destroy) (struct ttm_backend *backend);
99};
100
101/**
102 * struct ttm_backend
103 *
104 * @bdev: Pointer to a struct ttm_bo_device.
105 * @flags: For driver use.
106 * @func: Pointer to a struct ttm_backend_func that describes
107 * the backend methods.
108 *
109 */
110
111struct ttm_backend {
112 struct ttm_bo_device *bdev;
113 uint32_t flags;
114 struct ttm_backend_func *func;
115};
116
117#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
118#define TTM_PAGE_FLAG_USER (1 << 1)
119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
120#define TTM_PAGE_FLAG_WRITE (1 << 3)
121#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
122#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
123#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
124
125enum ttm_caching_state {
126 tt_uncached,
127 tt_wc,
128 tt_cached
129};
130
131/**
132 * struct ttm_tt
133 *
134 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
135 * pointer.
136 * @pages: Array of pages backing the data.
137 * @first_himem_page: Himem pages are put last in the page array, which
138 * enables us to run caching attribute changes on only the first part
139 * of the page array containing lomem pages. This is the index of the
140 * first himem page.
141 * @last_lomem_page: Index of the last lomem page in the page array.
142 * @num_pages: Number of pages in the page array.
143 * @bdev: Pointer to the current struct ttm_bo_device.
144 * @be: Pointer to the ttm backend.
145 * @tsk: The task for user ttm.
146 * @start: virtual address for user ttm.
147 * @swap_storage: Pointer to shmem struct file for swap storage.
148 * @caching_state: The current caching state of the pages.
149 * @state: The current binding state of the pages.
150 *
151 * This is a structure holding the pages, caching- and aperture binding
152 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
153 * memory.
154 */
155
156struct ttm_tt {
157 struct page *dummy_read_page;
158 struct page **pages;
159 long first_himem_page;
160 long last_lomem_page;
161 uint32_t page_flags;
162 unsigned long num_pages;
163 struct ttm_bo_device *bdev;
164 struct ttm_backend *be;
165 struct task_struct *tsk;
166 unsigned long start;
167 struct file *swap_storage;
168 enum ttm_caching_state caching_state;
169 enum {
170 tt_bound,
171 tt_unbound,
172 tt_unpopulated,
173 } state;
174};
175
176#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
177#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
178#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
179 before kernel access. */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @io_offset: The io_offset of the first managed page of IO memory or
192 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
193 * memory, this should be set to NULL.
194 * @io_size: The size of a managed IO region (fixed memory or aperture).
195 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
196 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
197 * @io_addr should be set to NULL.
198 * @size: Size of the managed region.
199 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
200 * as defined in ttm_placement_common.h
201 * @default_caching: The default caching policy used for a buffer object
202 * placed in this memory type if the user doesn't provide one.
203 * @manager: The range manager used for this memory type. FIXME: If the aperture
204 * has a page size different from the underlying system, the granularity
205 * of this manager should take care of this. But the range allocating code
206 * in ttm_bo.c needs to be modified for this.
207 * @lru: The lru list for this memory type.
208 *
209 * This structure is used to identify and manage memory types for a device.
210 * It's set up by the ttm_bo_driver::init_mem_type method.
211 */
212
213struct ttm_mem_type_manager {
214
215 /*
216 * No protection. Constant from start.
217 */
218
219 bool has_type;
220 bool use_type;
221 uint32_t flags;
222 unsigned long gpu_offset;
223 unsigned long io_offset;
224 unsigned long io_size;
225 void *io_addr;
226 uint64_t size;
227 uint32_t available_caching;
228 uint32_t default_caching;
229
230 /*
231 * Protected by the bdev->lru_lock.
232 * TODO: Consider one lru_lock per ttm_mem_type_manager.
233 * Plays ill with list removal, though.
234 */
235
236 struct drm_mm manager;
237 struct list_head lru;
238};
239
240/**
241 * struct ttm_bo_driver
242 *
243 * @mem_type_prio: Priority array of memory types to place a buffer object in
244 * if it fits without evicting buffers from any of these memory types.
245 * @mem_busy_prio: Priority array of memory types to place a buffer object in
246 * if it needs to evict buffers to make room.
247 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
248 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
249 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
250 * @invalidate_caches: Callback to invalidate read caches when a buffer object
251 * has been evicted.
252 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
253 * structure.
254 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
255 * @move: Callback for a driver to hook in accelerated functions to
256 * move a buffer.
257 * If set to NULL, a potentially slow memcpy() move is used.
258 * @sync_obj_signaled: See ttm_fence_api.h
259 * @sync_obj_wait: See ttm_fence_api.h
260 * @sync_obj_flush: See ttm_fence_api.h
261 * @sync_obj_unref: See ttm_fence_api.h
262 * @sync_obj_ref: See ttm_fence_api.h
263 */
264
265struct ttm_bo_driver {
266 const uint32_t *mem_type_prio;
267 const uint32_t *mem_busy_prio;
268 uint32_t num_mem_type_prio;
269 uint32_t num_mem_busy_prio;
270
271 /**
272 * struct ttm_bo_driver member create_ttm_backend_entry
273 *
274 * @bdev: The buffer object device.
275 *
276 * Create a driver specific struct ttm_backend.
277 */
278
279 struct ttm_backend *(*create_ttm_backend_entry)
280 (struct ttm_bo_device *bdev);
281
282 /**
283 * struct ttm_bo_driver member invalidate_caches
284 *
285 * @bdev: the buffer object device.
286 * @flags: new placement of the rebound buffer object.
287 *
288 * A previosly evicted buffer has been rebound in a
289 * potentially new location. Tell the driver that it might
290 * consider invalidating read (texture) caches on the next command
291 * submission as a consequence.
292 */
293
294 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
295 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
296 struct ttm_mem_type_manager *man);
297 /**
298 * struct ttm_bo_driver member evict_flags:
299 *
300 * @bo: the buffer object to be evicted
301 *
302 * Return the bo flags for a buffer which is not mapped to the hardware.
303 * These will be placed in proposed_flags so that when the move is
304 * finished, they'll end up in bo->mem.flags
305 */
306
307 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
308 /**
309 * struct ttm_bo_driver member move:
310 *
311 * @bo: the buffer to move
312 * @evict: whether this motion is evicting the buffer from
313 * the graphics address space
314 * @interruptible: Use interruptible sleeps if possible when sleeping.
315 * @no_wait: whether this should give up and return -EBUSY
316 * if this move would require sleeping
317 * @new_mem: the new memory region receiving the buffer
318 *
319 * Move a buffer between two memory regions.
320 */
321 int (*move) (struct ttm_buffer_object *bo,
322 bool evict, bool interruptible,
323 bool no_wait, struct ttm_mem_reg *new_mem);
324
325 /**
326 * struct ttm_bo_driver_member verify_access
327 *
328 * @bo: Pointer to a buffer object.
329 * @filp: Pointer to a struct file trying to access the object.
330 *
331 * Called from the map / write / read methods to verify that the
332 * caller is permitted to access the buffer object.
333 * This member may be set to NULL, which will refuse this kind of
334 * access for all buffer objects.
335 * This function should return 0 if access is granted, -EPERM otherwise.
336 */
337 int (*verify_access) (struct ttm_buffer_object *bo,
338 struct file *filp);
339
340 /**
341 * In case a driver writer dislikes the TTM fence objects,
342 * the driver writer can replace those with sync objects of
343 * his / her own. If it turns out that no driver writer is
344 * using these. I suggest we remove these hooks and plug in
345 * fences directly. The bo driver needs the following functionality:
346 * See the corresponding functions in the fence object API
347 * documentation.
348 */
349
350 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
351 int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
352 bool lazy, bool interruptible);
353 int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
354 void (*sync_obj_unref) (void **sync_obj);
355 void *(*sync_obj_ref) (void *sync_obj);
356};
357
358#define TTM_NUM_MEM_TYPES 8
359
360#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
361 idling before CPU mapping */
362#define TTM_BO_PRIV_FLAG_MAX 1
363/**
364 * struct ttm_bo_device - Buffer object driver device-specific data.
365 *
366 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
367 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
368 * @count: Current number of buffer object.
369 * @pages: Current number of pinned pages.
370 * @dummy_read_page: Pointer to a dummy page used for mapping requests
371 * of unpopulated pages.
372 * @shrink: A shrink callback object used for buffre object swap.
373 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
374 * used by a buffer object. This is excluding page arrays and backing pages.
375 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
376 * @man: An array of mem_type_managers.
377 * @addr_space_mm: Range manager for the device address space.
378 * lru_lock: Spinlock that protects the buffer+device lru lists and
379 * ddestroy lists.
380 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
381 * If a GPU lockup has been detected, this is forced to 0.
382 * @dev_mapping: A pointer to the struct address_space representing the
383 * device address space.
384 * @wq: Work queue structure for the delayed delete workqueue.
385 *
386 */
387
388struct ttm_bo_device {
389
390 /*
391 * Constant after bo device init / atomic.
392 */
393
394 struct ttm_mem_global *mem_glob;
395 struct ttm_bo_driver *driver;
396 struct page *dummy_read_page;
397 struct ttm_mem_shrink shrink;
398
399 size_t ttm_bo_extra_size;
400 size_t ttm_bo_size;
401
402 rwlock_t vm_lock;
403 /*
404 * Protected by the vm lock.
405 */
406 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
407 struct rb_root addr_space_rb;
408 struct drm_mm addr_space_mm;
409
410 /*
411 * Might want to change this to one lock per manager.
412 */
413 spinlock_t lru_lock;
414 /*
415 * Protected by the lru lock.
416 */
417 struct list_head ddestroy;
418 struct list_head swap_lru;
419
420 /*
421 * Protected by load / firstopen / lastclose /unload sync.
422 */
423
424 bool nice_mode;
425 struct address_space *dev_mapping;
426
427 /*
428 * Internal protection.
429 */
430
431 struct delayed_work wq;
432};
433
434/**
435 * ttm_flag_masked
436 *
437 * @old: Pointer to the result and original value.
438 * @new: New value of bits.
439 * @mask: Mask of bits to change.
440 *
441 * Convenience function to change a number of bits identified by a mask.
442 */
443
444static inline uint32_t
445ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
446{
447 *old ^= (*old ^ new) & mask;
448 return *old;
449}
450
451/**
452 * ttm_tt_create
453 *
454 * @bdev: pointer to a struct ttm_bo_device:
455 * @size: Size of the data needed backing.
456 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
457 * @dummy_read_page: See struct ttm_bo_device.
458 *
459 * Create a struct ttm_tt to back data with system memory pages.
460 * No pages are actually allocated.
461 * Returns:
462 * NULL: Out of memory.
463 */
464extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
465 unsigned long size,
466 uint32_t page_flags,
467 struct page *dummy_read_page);
468
469/**
470 * ttm_tt_set_user:
471 *
472 * @ttm: The struct ttm_tt to populate.
473 * @tsk: A struct task_struct for which @start is a valid user-space address.
474 * @start: A valid user-space address.
475 * @num_pages: Size in pages of the user memory area.
476 *
477 * Populate a struct ttm_tt with a user-space memory area after first pinning
478 * the pages backing it.
479 * Returns:
480 * !0: Error.
481 */
482
483extern int ttm_tt_set_user(struct ttm_tt *ttm,
484 struct task_struct *tsk,
485 unsigned long start, unsigned long num_pages);
486
487/**
488 * ttm_ttm_bind:
489 *
490 * @ttm: The struct ttm_tt containing backing pages.
491 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
492 *
493 * Bind the pages of @ttm to an aperture location identified by @bo_mem
494 */
495extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
496
497/**
498 * ttm_ttm_destroy:
499 *
500 * @ttm: The struct ttm_tt.
501 *
502 * Unbind, unpopulate and destroy a struct ttm_tt.
503 */
504extern void ttm_tt_destroy(struct ttm_tt *ttm);
505
506/**
507 * ttm_ttm_unbind:
508 *
509 * @ttm: The struct ttm_tt.
510 *
511 * Unbind a struct ttm_tt.
512 */
513extern void ttm_tt_unbind(struct ttm_tt *ttm);
514
515/**
516 * ttm_ttm_destroy:
517 *
518 * @ttm: The struct ttm_tt.
519 * @index: Index of the desired page.
520 *
521 * Return a pointer to the struct page backing @ttm at page
522 * index @index. If the page is unpopulated, one will be allocated to
523 * populate that index.
524 *
525 * Returns:
526 * NULL on OOM.
527 */
528extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
529
530/**
531 * ttm_tt_cache_flush:
532 *
533 * @pages: An array of pointers to struct page:s to flush.
534 * @num_pages: Number of pages to flush.
535 *
536 * Flush the data of the indicated pages from the cpu caches.
537 * This is used when changing caching attributes of the pages from
538 * cache-coherent.
539 */
540extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
541
542/**
543 * ttm_tt_set_placement_caching:
544 *
545 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
546 * @placement: Flag indicating the desired caching policy.
547 *
548 * This function will change caching policy of any default kernel mappings of
549 * the pages backing @ttm. If changing from cached to uncached or
550 * write-combined,
551 * all CPU caches will first be flushed to make sure the data of the pages
552 * hit RAM. This function may be very costly as it involves global TLB
553 * and cache flushes and potential page splitting / combining.
554 */
555extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
556extern int ttm_tt_swapout(struct ttm_tt *ttm,
557 struct file *persistant_swap_storage);
558
559/*
560 * ttm_bo.c
561 */
562
563/**
564 * ttm_mem_reg_is_pci
565 *
566 * @bdev: Pointer to a struct ttm_bo_device.
567 * @mem: A valid struct ttm_mem_reg.
568 *
569 * Returns true if the memory described by @mem is PCI memory,
570 * false otherwise.
571 */
572extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
573 struct ttm_mem_reg *mem);
574
575/**
576 * ttm_bo_mem_space
577 *
578 * @bo: Pointer to a struct ttm_buffer_object. the data of which
579 * we want to allocate space for.
580 * @proposed_placement: Proposed new placement for the buffer object.
581 * @mem: A struct ttm_mem_reg.
582 * @interruptible: Sleep interruptible when sliping.
583 * @no_wait: Don't sleep waiting for space to become available.
584 *
585 * Allocate memory space for the buffer object pointed to by @bo, using
586 * the placement flags in @mem, potentially evicting other idle buffer objects.
587 * This function may sleep while waiting for space to become available.
588 * Returns:
589 * -EBUSY: No space available (only if no_wait == 1).
590 * -ENOMEM: Could not allocate memory for the buffer object, either due to
591 * fragmentation or concurrent allocators.
592 * -ERESTART: An interruptible sleep was interrupted by a signal.
593 */
594extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
595 uint32_t proposed_placement,
596 struct ttm_mem_reg *mem,
597 bool interruptible, bool no_wait);
598/**
599 * ttm_bo_wait_for_cpu
600 *
601 * @bo: Pointer to a struct ttm_buffer_object.
602 * @no_wait: Don't sleep while waiting.
603 *
604 * Wait until a buffer object is no longer sync'ed for CPU access.
605 * Returns:
606 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
607 * -ERESTART: An interruptible sleep was interrupted by a signal.
608 */
609
610extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
611
612/**
613 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
614 *
615 * @bo Pointer to a struct ttm_buffer_object.
616 * @bus_base On return the base of the PCI region
617 * @bus_offset On return the byte offset into the PCI region
618 * @bus_size On return the byte size of the buffer object or zero if
619 * the buffer object memory is not accessible through a PCI region.
620 *
621 * Returns:
622 * -EINVAL if the buffer object is currently not mappable.
623 * 0 otherwise.
624 */
625
626extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 unsigned long *bus_base,
629 unsigned long *bus_offset,
630 unsigned long *bus_size);
631
632extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
633
634/**
635 * ttm_bo_device_init
636 *
637 * @bdev: A pointer to a struct ttm_bo_device to initialize.
638 * @mem_global: A pointer to an initialized struct ttm_mem_global.
639 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
640 * @file_page_offset: Offset into the device address space that is available
641 * for buffer data. This ensures compatibility with other users of the
642 * address space.
643 *
644 * Initializes a struct ttm_bo_device:
645 * Returns:
646 * !0: Failure.
647 */
648extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
649 struct ttm_mem_global *mem_glob,
650 struct ttm_bo_driver *driver,
651 uint64_t file_page_offset);
652
653/**
654 * ttm_bo_reserve:
655 *
656 * @bo: A pointer to a struct ttm_buffer_object.
657 * @interruptible: Sleep interruptible if waiting.
658 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
659 * @use_sequence: If @bo is already reserved, Only sleep waiting for
660 * it to become unreserved if @sequence < (@bo)->sequence.
661 *
662 * Locks a buffer object for validation. (Or prevents other processes from
663 * locking it for validation) and removes it from lru lists, while taking
664 * a number of measures to prevent deadlocks.
665 *
666 * Deadlocks may occur when two processes try to reserve multiple buffers in
667 * different order, either by will or as a result of a buffer being evicted
668 * to make room for a buffer already reserved. (Buffers are reserved before
669 * they are evicted). The following algorithm prevents such deadlocks from
670 * occuring:
671 * 1) Buffers are reserved with the lru spinlock held. Upon successful
672 * reservation they are removed from the lru list. This stops a reserved buffer
673 * from being evicted. However the lru spinlock is released between the time
674 * a buffer is selected for eviction and the time it is reserved.
675 * Therefore a check is made when a buffer is reserved for eviction, that it
676 * is still the first buffer in the lru list, before it is removed from the
677 * list. @check_lru == 1 forces this check. If it fails, the function returns
678 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
679 * the procedure.
680 * 2) Processes attempting to reserve multiple buffers other than for eviction,
681 * (typically execbuf), should first obtain a unique 32-bit
682 * validation sequence number,
683 * and call this function with @use_sequence == 1 and @sequence == the unique
684 * sequence number. If upon call of this function, the buffer object is already
685 * reserved, the validation sequence is checked against the validation
686 * sequence of the process currently reserving the buffer,
687 * and if the current validation sequence is greater than that of the process
688 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
689 * waiting for the buffer to become unreserved, after which it retries
690 * reserving.
691 * The caller should, when receiving an -EAGAIN error
692 * release all its buffer reservations, wait for @bo to become unreserved, and
693 * then rerun the validation with the same validation sequence. This procedure
694 * will always guarantee that the process with the lowest validation sequence
695 * will eventually succeed, preventing both deadlocks and starvation.
696 *
697 * Returns:
698 * -EAGAIN: The reservation may cause a deadlock.
699 * Release all buffer reservations, wait for @bo to become unreserved and
700 * try again. (only if use_sequence == 1).
701 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
702 * a signal. Release all buffer reservations and return to user-space.
703 */
704extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
705 bool interruptible,
706 bool no_wait, bool use_sequence, uint32_t sequence);
707
708/**
709 * ttm_bo_unreserve
710 *
711 * @bo: A pointer to a struct ttm_buffer_object.
712 *
713 * Unreserve a previous reservation of @bo.
714 */
715extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
716
717/**
718 * ttm_bo_wait_unreserved
719 *
720 * @bo: A pointer to a struct ttm_buffer_object.
721 *
722 * Wait for a struct ttm_buffer_object to become unreserved.
723 * This is typically used in the execbuf code to relax cpu-usage when
724 * a potential deadlock condition backoff.
725 */
726extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
727 bool interruptible);
728
729/**
730 * ttm_bo_block_reservation
731 *
732 * @bo: A pointer to a struct ttm_buffer_object.
733 * @interruptible: Use interruptible sleep when waiting.
734 * @no_wait: Don't sleep, but rather return -EBUSY.
735 *
736 * Block reservation for validation by simply reserving the buffer.
737 * This is intended for single buffer use only without eviction,
738 * and thus needs no deadlock protection.
739 *
740 * Returns:
741 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
742 * -ERESTART: If interruptible == 1 and the process received a signal
743 * while sleeping.
744 */
745extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
746 bool interruptible, bool no_wait);
747
748/**
749 * ttm_bo_unblock_reservation
750 *
751 * @bo: A pointer to a struct ttm_buffer_object.
752 *
753 * Unblocks reservation leaving lru lists untouched.
754 */
755extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
756
757/*
758 * ttm_bo_util.c
759 */
760
761/**
762 * ttm_bo_move_ttm
763 *
764 * @bo: A pointer to a struct ttm_buffer_object.
765 * @evict: 1: This is an eviction. Don't try to pipeline.
766 * @no_wait: Never sleep, but rather return with -EBUSY.
767 * @new_mem: struct ttm_mem_reg indicating where to move.
768 *
769 * Optimized move function for a buffer object with both old and
770 * new placement backed by a TTM. The function will, if successful,
771 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
772 * and update the (@bo)->mem placement flags. If unsuccessful, the old
773 * data remains untouched, and it's up to the caller to free the
774 * memory space indicated by @new_mem.
775 * Returns:
776 * !0: Failure.
777 */
778
779extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
780 bool evict, bool no_wait,
781 struct ttm_mem_reg *new_mem);
782
783/**
784 * ttm_bo_move_memcpy
785 *
786 * @bo: A pointer to a struct ttm_buffer_object.
787 * @evict: 1: This is an eviction. Don't try to pipeline.
788 * @no_wait: Never sleep, but rather return with -EBUSY.
789 * @new_mem: struct ttm_mem_reg indicating where to move.
790 *
791 * Fallback move function for a mappable buffer object in mappable memory.
792 * The function will, if successful,
793 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
794 * and update the (@bo)->mem placement flags. If unsuccessful, the old
795 * data remains untouched, and it's up to the caller to free the
796 * memory space indicated by @new_mem.
797 * Returns:
798 * !0: Failure.
799 */
800
801extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
802 bool evict,
803 bool no_wait, struct ttm_mem_reg *new_mem);
804
805/**
806 * ttm_bo_free_old_node
807 *
808 * @bo: A pointer to a struct ttm_buffer_object.
809 *
810 * Utility function to free an old placement after a successful move.
811 */
812extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
813
814/**
815 * ttm_bo_move_accel_cleanup.
816 *
817 * @bo: A pointer to a struct ttm_buffer_object.
818 * @sync_obj: A sync object that signals when moving is complete.
819 * @sync_obj_arg: An argument to pass to the sync object idle / wait
820 * functions.
821 * @evict: This is an evict move. Don't return until the buffer is idle.
822 * @no_wait: Never sleep, but rather return with -EBUSY.
823 * @new_mem: struct ttm_mem_reg indicating where to move.
824 *
825 * Accelerated move function to be called when an accelerated move
826 * has been scheduled. The function will create a new temporary buffer object
827 * representing the old placement, and put the sync object on both buffer
828 * objects. After that the newly created buffer object is unref'd to be
829 * destroyed when the move is complete. This will help pipeline
830 * buffer moves.
831 */
832
833extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
834 void *sync_obj,
835 void *sync_obj_arg,
836 bool evict, bool no_wait,
837 struct ttm_mem_reg *new_mem);
838/**
839 * ttm_io_prot
840 *
841 * @c_state: Caching state.
842 * @tmp: Page protection flag for a normal, cached mapping.
843 *
844 * Utility function that returns the pgprot_t that should be used for
845 * setting up a PTE with the caching model indicated by @c_state.
846 */
847extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
848
849#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
850#define TTM_HAS_AGP
851#include <linux/agp_backend.h>
852
853/**
854 * ttm_agp_backend_init
855 *
856 * @bdev: Pointer to a struct ttm_bo_device.
857 * @bridge: The agp bridge this device is sitting on.
858 *
859 * Create a TTM backend that uses the indicated AGP bridge as an aperture
860 * for TT memory. This function uses the linux agpgart interface to
861 * bind and unbind memory backing a ttm_tt.
862 */
863extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
864 struct agp_bridge_data *bridge);
865#endif
866
867#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644
index 000000000000..d8b8f042c4f1
--- /dev/null
+++ b/include/drm/ttm/ttm_memory.h
@@ -0,0 +1,153 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef TTM_MEMORY_H
29#define TTM_MEMORY_H
30
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
33#include <linux/wait.h>
34#include <linux/errno.h>
35
36/**
37 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
38 *
39 * @do_shrink: The callback function.
40 *
41 * Arguments to the do_shrink functions are intended to be passed using
42 * inheritance. That is, the argument class derives from struct ttm_mem_srink,
43 * and can be accessed using container_of().
44 */
45
46struct ttm_mem_shrink {
47 int (*do_shrink) (struct ttm_mem_shrink *);
48};
49
50/**
51 * struct ttm_mem_global - Global memory accounting structure.
52 *
53 * @shrink: A single callback to shrink TTM memory usage. Extend this
54 * to a linked list to be able to handle multiple callbacks when needed.
55 * @swap_queue: A workqueue to handle shrinking in low memory situations. We
56 * need a separate workqueue since it will spend a lot of time waiting
57 * for the GPU, and this will otherwise block other workqueue tasks(?)
58 * At this point we use only a single-threaded workqueue.
59 * @work: The workqueue callback for the shrink queue.
60 * @queue: Wait queue for processes suspended waiting for memory.
61 * @lock: Lock to protect the @shrink - and the memory accounting members,
62 * that is, essentially the whole structure with some exceptions.
63 * @emer_memory: Lowmem memory limit available for root.
64 * @max_memory: Lowmem memory limit available for non-root.
65 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
66 * @used_memory: Currently used lowmem memory.
67 * @used_total_memory: Currently used total (lowmem + highmem) memory.
68 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
69 * kicks in.
70 * @max_total_memory: Total memory available to non-root processes.
71 * @emer_total_memory: Total memory available to root processes.
72 *
73 * Note that this structure is not per device. It should be global for all
74 * graphics devices.
75 */
76
77struct ttm_mem_global {
78 struct ttm_mem_shrink *shrink;
79 struct workqueue_struct *swap_queue;
80 struct work_struct work;
81 wait_queue_head_t queue;
82 spinlock_t lock;
83 uint64_t emer_memory;
84 uint64_t max_memory;
85 uint64_t swap_limit;
86 uint64_t used_memory;
87 uint64_t used_total_memory;
88 uint64_t total_memory_swap_limit;
89 uint64_t max_total_memory;
90 uint64_t emer_total_memory;
91};
92
93/**
94 * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
95 *
96 * @shrink: The object to initialize.
97 * @func: The callback function.
98 */
99
100static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
101 int (*func) (struct ttm_mem_shrink *))
102{
103 shrink->do_shrink = func;
104}
105
106/**
107 * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
108 *
109 * @glob: The struct ttm_mem_global object to register with.
110 * @shrink: An initialized struct ttm_mem_shrink object to register.
111 *
112 * Returns:
113 * -EBUSY: There's already a callback registered. (May change).
114 */
115
116static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
117 struct ttm_mem_shrink *shrink)
118{
119 spin_lock(&glob->lock);
120 if (glob->shrink != NULL) {
121 spin_unlock(&glob->lock);
122 return -EBUSY;
123 }
124 glob->shrink = shrink;
125 spin_unlock(&glob->lock);
126 return 0;
127}
128
129/**
130 * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
131 *
132 * @glob: The struct ttm_mem_global object to unregister from.
133 * @shrink: A previously registert struct ttm_mem_shrink object.
134 *
135 */
136
137static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
138 struct ttm_mem_shrink *shrink)
139{
140 spin_lock(&glob->lock);
141 BUG_ON(glob->shrink != shrink);
142 glob->shrink = NULL;
143 spin_unlock(&glob->lock);
144}
145
146extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible, bool himem);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount, bool himem);
152extern size_t ttm_round_pot(size_t size);
153#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..889a4c7958ae
--- /dev/null
+++ b/include/drm/ttm/ttm_module.h
@@ -0,0 +1,58 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_MODULE_H_
32#define _TTM_MODULE_H_
33
34#include <linux/kernel.h>
35
36#define TTM_PFX "[TTM]"
37
38enum ttm_global_types {
39 TTM_GLOBAL_TTM_MEM = 0,
40 TTM_GLOBAL_TTM_BO,
41 TTM_GLOBAL_TTM_OBJECT,
42 TTM_GLOBAL_NUM
43};
44
45struct ttm_global_reference {
46 enum ttm_global_types global_type;
47 size_t size;
48 void *object;
49 int (*init) (struct ttm_global_reference *);
50 void (*release) (struct ttm_global_reference *);
51};
52
53extern void ttm_global_init(void);
54extern void ttm_global_release(void);
55extern int ttm_global_item_ref(struct ttm_global_reference *ref);
56extern void ttm_global_item_unref(struct ttm_global_reference *ref);
57
58#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644
index 000000000000..c84ff153a564
--- /dev/null
+++ b/include/drm/ttm/ttm_placement.h
@@ -0,0 +1,92 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_PLACEMENT_H_
32#define _TTM_PLACEMENT_H_
33/*
34 * Memory regions for data placement.
35 */
36
37#define TTM_PL_SYSTEM 0
38#define TTM_PL_TT 1
39#define TTM_PL_VRAM 2
40#define TTM_PL_PRIV0 3
41#define TTM_PL_PRIV1 4
42#define TTM_PL_PRIV2 5
43#define TTM_PL_PRIV3 6
44#define TTM_PL_PRIV4 7
45#define TTM_PL_PRIV5 8
46#define TTM_PL_SWAPPED 15
47
48#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
49#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
50#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
51#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
52#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
53#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
54#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
55#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
56#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
57#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
58#define TTM_PL_MASK_MEM 0x0000FFFF
59
60/*
61 * Other flags that affects data placement.
62 * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
63 * if available.
64 * TTM_PL_FLAG_SHARED means that another application may
65 * reference the buffer.
66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never
67 * be evicted to make room for other buffers.
68 */
69
70#define TTM_PL_FLAG_CACHED (1 << 16)
71#define TTM_PL_FLAG_UNCACHED (1 << 17)
72#define TTM_PL_FLAG_WC (1 << 18)
73#define TTM_PL_FLAG_SHARED (1 << 20)
74#define TTM_PL_FLAG_NO_EVICT (1 << 21)
75
76#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
77 TTM_PL_FLAG_UNCACHED | \
78 TTM_PL_FLAG_WC)
79
80#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
81
82/*
83 * Access flags to be used for CPU- and GPU- mappings.
84 * The idea is that the TTM synchronization mechanism will
85 * allow concurrent READ access and exclusive write access.
86 * Currently GPU- and CPU accesses are exclusive.
87 */
88
89#define TTM_ACCESS_READ (1 << 0)
90#define TTM_ACCESS_WRITE (1 << 1)
91
92#endif