diff options
author | Imre Deak <imre.deak@intel.com> | 2013-02-18 12:28:04 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-03-23 07:17:31 -0400 |
commit | 6e995e231a90ce7c5ce2a9eae23c8e22f4388db1 (patch) | |
tree | 980777b86f465ae8035415d67b59d03aa8320f43 | |
parent | 90797e6d1ec0dfde6ba62a48b9ee3803887d6ed4 (diff) |
drm/i915: use for_each_sg_page for setting up the gtt ptes
The existing gtt setup code is correct - and so doesn't need to be fixed to
handle compact dma scatter lists similarly to the previous patches. Still,
take the for_each_sg_page macro into use, to get somewhat simpler code.
Signed-off-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 67 |
1 files changed, 24 insertions, 43 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2d7d3a94257b..c0ccbe7b61bc 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -116,41 +116,26 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, | |||
116 | { | 116 | { |
117 | gtt_pte_t *pt_vaddr; | 117 | gtt_pte_t *pt_vaddr; |
118 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | 118 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
119 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 119 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
120 | unsigned i, j, m, segment_len; | 120 | struct sg_page_iter sg_iter; |
121 | dma_addr_t page_addr; | 121 | |
122 | struct scatterlist *sg; | 122 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); |
123 | 123 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | |
124 | /* init sg walking */ | 124 | dma_addr_t page_addr; |
125 | sg = pages->sgl; | 125 | |
126 | i = 0; | 126 | page_addr = sg_dma_address(sg_iter.sg) + |
127 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | 127 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
128 | m = 0; | 128 | pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, |
129 | 129 | cache_level); | |
130 | while (i < pages->nents) { | 130 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
131 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | 131 | kunmap_atomic(pt_vaddr); |
132 | 132 | act_pd++; | |
133 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | 133 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); |
134 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | 134 | act_pte = 0; |
135 | pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr, | ||
136 | cache_level); | ||
137 | |||
138 | /* grab the next page */ | ||
139 | if (++m == segment_len) { | ||
140 | if (++i == pages->nents) | ||
141 | break; | ||
142 | 135 | ||
143 | sg = sg_next(sg); | ||
144 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
145 | m = 0; | ||
146 | } | ||
147 | } | 136 | } |
148 | |||
149 | kunmap_atomic(pt_vaddr); | ||
150 | |||
151 | first_pte = 0; | ||
152 | act_pd++; | ||
153 | } | 137 | } |
138 | kunmap_atomic(pt_vaddr); | ||
154 | } | 139 | } |
155 | 140 | ||
156 | static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) | 141 | static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) |
@@ -432,21 +417,17 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, | |||
432 | enum i915_cache_level level) | 417 | enum i915_cache_level level) |
433 | { | 418 | { |
434 | struct drm_i915_private *dev_priv = dev->dev_private; | 419 | struct drm_i915_private *dev_priv = dev->dev_private; |
435 | struct scatterlist *sg = st->sgl; | ||
436 | gtt_pte_t __iomem *gtt_entries = | 420 | gtt_pte_t __iomem *gtt_entries = |
437 | (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; | 421 | (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
438 | int unused, i = 0; | 422 | int i = 0; |
439 | unsigned int len, m = 0; | 423 | struct sg_page_iter sg_iter; |
440 | dma_addr_t addr; | 424 | dma_addr_t addr; |
441 | 425 | ||
442 | for_each_sg(st->sgl, sg, st->nents, unused) { | 426 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
443 | len = sg_dma_len(sg) >> PAGE_SHIFT; | 427 | addr = sg_dma_address(sg_iter.sg) + |
444 | for (m = 0; m < len; m++) { | 428 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
445 | addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | 429 | iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]); |
446 | iowrite32(gen6_pte_encode(dev, addr, level), | 430 | i++; |
447 | >t_entries[i]); | ||
448 | i++; | ||
449 | } | ||
450 | } | 431 | } |
451 | 432 | ||
452 | /* XXX: This serves as a posting read to make sure that the PTE has | 433 | /* XXX: This serves as a posting read to make sure that the PTE has |