diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-09 11:15:47 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-09 15:25:23 -0500 |
commit | 7bddb01fb9697afd5d39bb69dd9f782a28063101 (patch) | |
tree | cf8d8b67d4bac35a611073a6723228d074960036 /drivers/gpu/drm/i915/i915_gem_gtt.c | |
parent | 1d2a314c97ceaf383de8e23cdde46729927d433c (diff) |
drm/i915: ppgtt binding/unbinding support
This adds support to bind/unbind objects and wires it up. Objects are
only put into the ppgtt when necessary, i.e. at execbuf time.
Objects are still unconditionally put into the global gtt.
v2: Kill the quick hack and explicitly pass cache_level to ppgtt_bind
like for the global gtt function. Noticed by Chris Wilson.
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 146 |
1 files changed, 140 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f408f8c710d..2eacd78bb93 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -34,22 +34,31 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | |||
34 | unsigned first_entry, | 34 | unsigned first_entry, |
35 | unsigned num_entries) | 35 | unsigned num_entries) |
36 | { | 36 | { |
37 | int i, j; | ||
38 | uint32_t *pt_vaddr; | 37 | uint32_t *pt_vaddr; |
39 | uint32_t scratch_pte; | 38 | uint32_t scratch_pte; |
39 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
40 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
41 | unsigned last_pte, i; | ||
40 | 42 | ||
41 | scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); | 43 | scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); |
42 | scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; | 44 | scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; |
43 | 45 | ||
44 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 46 | while (num_entries) { |
45 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[i]); | 47 | last_pte = first_pte + num_entries; |
48 | if (last_pte > I915_PPGTT_PT_ENTRIES) | ||
49 | last_pte = I915_PPGTT_PT_ENTRIES; | ||
50 | |||
51 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
46 | 52 | ||
47 | for (j = 0; j < I915_PPGTT_PT_ENTRIES; j++) | 53 | for (i = first_pte; i < last_pte; i++) |
48 | pt_vaddr[j] = scratch_pte; | 54 | pt_vaddr[i] = scratch_pte; |
49 | 55 | ||
50 | kunmap_atomic(pt_vaddr); | 56 | kunmap_atomic(pt_vaddr); |
51 | } | ||
52 | 57 | ||
58 | num_entries -= last_pte - first_pte; | ||
59 | first_pte = 0; | ||
60 | act_pd++; | ||
61 | } | ||
53 | } | 62 | } |
54 | 63 | ||
55 | int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | 64 | int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) |
@@ -168,6 +177,131 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | |||
168 | kfree(ppgtt); | 177 | kfree(ppgtt); |
169 | } | 178 | } |
170 | 179 | ||
180 | static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, | ||
181 | struct scatterlist *sg_list, | ||
182 | unsigned sg_len, | ||
183 | unsigned first_entry, | ||
184 | uint32_t pte_flags) | ||
185 | { | ||
186 | uint32_t *pt_vaddr, pte; | ||
187 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
188 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
189 | unsigned i, j, m, segment_len; | ||
190 | dma_addr_t page_addr; | ||
191 | struct scatterlist *sg; | ||
192 | |||
193 | /* init sg walking */ | ||
194 | sg = sg_list; | ||
195 | i = 0; | ||
196 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
197 | m = 0; | ||
198 | |||
199 | while (i < sg_len) { | ||
200 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
201 | |||
202 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | ||
203 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | ||
204 | pte = GEN6_PTE_ADDR_ENCODE(page_addr); | ||
205 | pt_vaddr[j] = pte | pte_flags; | ||
206 | |||
207 | /* grab the next page */ | ||
208 | m++; | ||
209 | if (m == segment_len) { | ||
210 | sg = sg_next(sg); | ||
211 | i++; | ||
212 | if (i == sg_len) | ||
213 | break; | ||
214 | |||
215 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
216 | m = 0; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | kunmap_atomic(pt_vaddr); | ||
221 | |||
222 | first_pte = 0; | ||
223 | act_pd++; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, | ||
228 | unsigned first_entry, unsigned num_entries, | ||
229 | struct page **pages, uint32_t pte_flags) | ||
230 | { | ||
231 | uint32_t *pt_vaddr, pte; | ||
232 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
233 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
234 | unsigned last_pte, i; | ||
235 | dma_addr_t page_addr; | ||
236 | |||
237 | while (num_entries) { | ||
238 | last_pte = first_pte + num_entries; | ||
239 | last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES); | ||
240 | |||
241 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
242 | |||
243 | for (i = first_pte; i < last_pte; i++) { | ||
244 | page_addr = page_to_phys(*pages); | ||
245 | pte = GEN6_PTE_ADDR_ENCODE(page_addr); | ||
246 | pt_vaddr[i] = pte | pte_flags; | ||
247 | |||
248 | pages++; | ||
249 | } | ||
250 | |||
251 | kunmap_atomic(pt_vaddr); | ||
252 | |||
253 | num_entries -= last_pte - first_pte; | ||
254 | first_pte = 0; | ||
255 | act_pd++; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | ||
260 | struct drm_i915_gem_object *obj, | ||
261 | enum i915_cache_level cache_level) | ||
262 | { | ||
263 | struct drm_device *dev = obj->base.dev; | ||
264 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
265 | uint32_t pte_flags = GEN6_PTE_VALID; | ||
266 | |||
267 | switch (cache_level) { | ||
268 | case I915_CACHE_LLC_MLC: | ||
269 | pte_flags |= GEN6_PTE_CACHE_LLC_MLC; | ||
270 | break; | ||
271 | case I915_CACHE_LLC: | ||
272 | pte_flags |= GEN6_PTE_CACHE_LLC; | ||
273 | break; | ||
274 | case I915_CACHE_NONE: | ||
275 | pte_flags |= GEN6_PTE_UNCACHED; | ||
276 | break; | ||
277 | default: | ||
278 | BUG(); | ||
279 | } | ||
280 | |||
281 | if (dev_priv->mm.gtt->needs_dmar) { | ||
282 | BUG_ON(!obj->sg_list); | ||
283 | |||
284 | i915_ppgtt_insert_sg_entries(ppgtt, | ||
285 | obj->sg_list, | ||
286 | obj->num_sg, | ||
287 | obj->gtt_space->start >> PAGE_SHIFT, | ||
288 | pte_flags); | ||
289 | } else | ||
290 | i915_ppgtt_insert_pages(ppgtt, | ||
291 | obj->gtt_space->start >> PAGE_SHIFT, | ||
292 | obj->base.size >> PAGE_SHIFT, | ||
293 | obj->pages, | ||
294 | pte_flags); | ||
295 | } | ||
296 | |||
297 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | ||
298 | struct drm_i915_gem_object *obj) | ||
299 | { | ||
300 | i915_ppgtt_clear_range(ppgtt, | ||
301 | obj->gtt_space->start >> PAGE_SHIFT, | ||
302 | obj->base.size >> PAGE_SHIFT); | ||
303 | } | ||
304 | |||
171 | /* XXX kill agp_type! */ | 305 | /* XXX kill agp_type! */ |
172 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, | 306 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, |
173 | enum i915_cache_level cache_level) | 307 | enum i915_cache_level cache_level) |