diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2010-10-26 15:21:47 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-26 21:07:46 -0400 |
commit | 135cba0dc399fdd47bd3ae305c1db75fcd77243f (patch) | |
tree | 3eedcd7c5701dfe05246aca3479ab7396169f2e7 /drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |
parent | 8f895da57da80b307efa2f94b5d4caf801e959a5 (diff) |
vmwgfx: Implement a proper GMR eviction mechanism
Use Ben's new range manager hooks to implement a manager for
GMRs that manages ids rather than ranges.
This means we can use the standard TTM code for binding, unbinding and
eviction.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 81 |
1 files changed, 62 insertions, 19 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 1b3bd8c6c67e..80bc37b274e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
39 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 39 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
40 | TTM_PL_FLAG_CACHED; | 40 | TTM_PL_FLAG_CACHED; |
41 | 41 | ||
42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | ||
43 | TTM_PL_FLAG_CACHED; | ||
44 | |||
42 | struct ttm_placement vmw_vram_placement = { | 45 | struct ttm_placement vmw_vram_placement = { |
43 | .fpfn = 0, | 46 | .fpfn = 0, |
44 | .lpfn = 0, | 47 | .lpfn = 0, |
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = { | |||
48 | .busy_placement = &vram_placement_flags | 51 | .busy_placement = &vram_placement_flags |
49 | }; | 52 | }; |
50 | 53 | ||
54 | static uint32_t vram_gmr_placement_flags[] = { | ||
55 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | ||
56 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | ||
57 | }; | ||
58 | |||
59 | struct ttm_placement vmw_vram_gmr_placement = { | ||
60 | .fpfn = 0, | ||
61 | .lpfn = 0, | ||
62 | .num_placement = 2, | ||
63 | .placement = vram_gmr_placement_flags, | ||
64 | .num_busy_placement = 1, | ||
65 | .busy_placement = &gmr_placement_flags | ||
66 | }; | ||
67 | |||
51 | struct ttm_placement vmw_vram_sys_placement = { | 68 | struct ttm_placement vmw_vram_sys_placement = { |
52 | .fpfn = 0, | 69 | .fpfn = 0, |
53 | .lpfn = 0, | 70 | .lpfn = 0, |
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = { | |||
77 | 94 | ||
78 | struct vmw_ttm_backend { | 95 | struct vmw_ttm_backend { |
79 | struct ttm_backend backend; | 96 | struct ttm_backend backend; |
97 | struct page **pages; | ||
98 | unsigned long num_pages; | ||
99 | struct vmw_private *dev_priv; | ||
100 | int gmr_id; | ||
80 | }; | 101 | }; |
81 | 102 | ||
82 | static int vmw_ttm_populate(struct ttm_backend *backend, | 103 | static int vmw_ttm_populate(struct ttm_backend *backend, |
83 | unsigned long num_pages, struct page **pages, | 104 | unsigned long num_pages, struct page **pages, |
84 | struct page *dummy_read_page) | 105 | struct page *dummy_read_page) |
85 | { | 106 | { |
107 | struct vmw_ttm_backend *vmw_be = | ||
108 | container_of(backend, struct vmw_ttm_backend, backend); | ||
109 | |||
110 | vmw_be->pages = pages; | ||
111 | vmw_be->num_pages = num_pages; | ||
112 | |||
86 | return 0; | 113 | return 0; |
87 | } | 114 | } |
88 | 115 | ||
89 | static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | 116 | static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) |
90 | { | 117 | { |
91 | return 0; | 118 | struct vmw_ttm_backend *vmw_be = |
119 | container_of(backend, struct vmw_ttm_backend, backend); | ||
120 | |||
121 | vmw_be->gmr_id = bo_mem->start; | ||
122 | |||
123 | return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, | ||
124 | vmw_be->num_pages, vmw_be->gmr_id); | ||
92 | } | 125 | } |
93 | 126 | ||
94 | static int vmw_ttm_unbind(struct ttm_backend *backend) | 127 | static int vmw_ttm_unbind(struct ttm_backend *backend) |
95 | { | 128 | { |
129 | struct vmw_ttm_backend *vmw_be = | ||
130 | container_of(backend, struct vmw_ttm_backend, backend); | ||
131 | |||
132 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
96 | return 0; | 133 | return 0; |
97 | } | 134 | } |
98 | 135 | ||
99 | static void vmw_ttm_clear(struct ttm_backend *backend) | 136 | static void vmw_ttm_clear(struct ttm_backend *backend) |
100 | { | 137 | { |
138 | struct vmw_ttm_backend *vmw_be = | ||
139 | container_of(backend, struct vmw_ttm_backend, backend); | ||
140 | |||
141 | vmw_be->pages = NULL; | ||
142 | vmw_be->num_pages = 0; | ||
101 | } | 143 | } |
102 | 144 | ||
103 | static void vmw_ttm_destroy(struct ttm_backend *backend) | 145 | static void vmw_ttm_destroy(struct ttm_backend *backend) |
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) | |||
125 | return NULL; | 167 | return NULL; |
126 | 168 | ||
127 | vmw_be->backend.func = &vmw_ttm_func; | 169 | vmw_be->backend.func = &vmw_ttm_func; |
170 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | ||
128 | 171 | ||
129 | return &vmw_be->backend; | 172 | return &vmw_be->backend; |
130 | } | 173 | } |
@@ -142,7 +185,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
142 | /* System memory */ | 185 | /* System memory */ |
143 | 186 | ||
144 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | 187 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
145 | man->available_caching = TTM_PL_MASK_CACHING; | 188 | man->available_caching = TTM_PL_FLAG_CACHED; |
146 | man->default_caching = TTM_PL_FLAG_CACHED; | 189 | man->default_caching = TTM_PL_FLAG_CACHED; |
147 | break; | 190 | break; |
148 | case TTM_PL_VRAM: | 191 | case TTM_PL_VRAM: |
@@ -150,8 +193,20 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
150 | man->func = &ttm_bo_manager_func; | 193 | man->func = &ttm_bo_manager_func; |
151 | man->gpu_offset = 0; | 194 | man->gpu_offset = 0; |
152 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; | 195 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
153 | man->available_caching = TTM_PL_MASK_CACHING; | 196 | man->available_caching = TTM_PL_FLAG_CACHED; |
154 | man->default_caching = TTM_PL_FLAG_WC; | 197 | man->default_caching = TTM_PL_FLAG_CACHED; |
198 | break; | ||
199 | case VMW_PL_GMR: | ||
200 | /* | ||
201 | * "Guest Memory Regions" is an aperture like feature with | ||
202 | * one slot per bo. There is an upper limit of the number of | ||
203 | * slots as well as the bo size. | ||
204 | */ | ||
205 | man->func = &vmw_gmrid_manager_func; | ||
206 | man->gpu_offset = 0; | ||
207 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
208 | man->available_caching = TTM_PL_FLAG_CACHED; | ||
209 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
155 | break; | 210 | break; |
156 | default: | 211 | default: |
157 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | 212 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
@@ -175,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
175 | return 0; | 230 | return 0; |
176 | } | 231 | } |
177 | 232 | ||
178 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
179 | struct ttm_mem_reg *new_mem) | ||
180 | { | ||
181 | if (new_mem->mem_type != TTM_PL_SYSTEM) | ||
182 | vmw_dmabuf_gmr_unbind(bo); | ||
183 | } | ||
184 | |||
185 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
186 | { | ||
187 | vmw_dmabuf_gmr_unbind(bo); | ||
188 | } | ||
189 | |||
190 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 233 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
191 | { | 234 | { |
192 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 235 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
@@ -201,7 +244,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
201 | return -EINVAL; | 244 | return -EINVAL; |
202 | switch (mem->mem_type) { | 245 | switch (mem->mem_type) { |
203 | case TTM_PL_SYSTEM: | 246 | case TTM_PL_SYSTEM: |
204 | /* System memory */ | 247 | case VMW_PL_GMR: |
205 | return 0; | 248 | return 0; |
206 | case TTM_PL_VRAM: | 249 | case TTM_PL_VRAM: |
207 | mem->bus.offset = mem->start << PAGE_SHIFT; | 250 | mem->bus.offset = mem->start << PAGE_SHIFT; |
@@ -277,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
277 | .sync_obj_flush = vmw_sync_obj_flush, | 320 | .sync_obj_flush = vmw_sync_obj_flush, |
278 | .sync_obj_unref = vmw_sync_obj_unref, | 321 | .sync_obj_unref = vmw_sync_obj_unref, |
279 | .sync_obj_ref = vmw_sync_obj_ref, | 322 | .sync_obj_ref = vmw_sync_obj_ref, |
280 | .move_notify = vmw_move_notify, | 323 | .move_notify = NULL, |
281 | .swap_notify = vmw_swap_notify, | 324 | .swap_notify = NULL, |
282 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 325 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
283 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 326 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
284 | .io_mem_free = &vmw_ttm_io_mem_free, | 327 | .io_mem_free = &vmw_ttm_io_mem_free, |