aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-24 16:49:56 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-31 05:50:06 -0500
commit3440d265857b411a13ed8f67814a29fa2011cdb3 (patch)
tree8cd1a4bcc23f87debe91fc90613445a7a0d9e684 /drivers/gpu/drm/i915/i915_gem_gtt.c
parent960e3e429f0c7d9e27e60cf8fa2f51ada71e717e (diff)
drm/i915: extract hw ppgtt setup/cleanup code
At the moment only cosmetics, but being able to initialize/cleanup arbitrary ppgtt address spaces paves the way to have more than one of them ... Just in case we ever get around to implementing real per-process address spaces. Note that in that case another vfunc for ppgtt would be beneficial though. But that can wait until the code grows a second place which initializes ppgtts. Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c68
1 files changed, 44 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d17198210568..55020676474c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -153,10 +153,28 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
153 } 153 }
154} 154}
155 155
156static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 156static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
157{ 157{
158 int i;
159
160 if (ppgtt->pt_dma_addr) {
161 for (i = 0; i < ppgtt->num_pd_entries; i++)
162 pci_unmap_page(ppgtt->dev->pdev,
163 ppgtt->pt_dma_addr[i],
164 4096, PCI_DMA_BIDIRECTIONAL);
165 }
166
167 kfree(ppgtt->pt_dma_addr);
168 for (i = 0; i < ppgtt->num_pd_entries; i++)
169 __free_page(ppgtt->pt_pages[i]);
170 kfree(ppgtt->pt_pages);
171 kfree(ppgtt);
172}
173
174static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
175{
176 struct drm_device *dev = ppgtt->dev;
158 struct drm_i915_private *dev_priv = dev->dev_private; 177 struct drm_i915_private *dev_priv = dev->dev_private;
159 struct i915_hw_ppgtt *ppgtt;
160 unsigned first_pd_entry_in_global_pt; 178 unsigned first_pd_entry_in_global_pt;
161 int i; 179 int i;
162 int ret = -ENOMEM; 180 int ret = -ENOMEM;
@@ -166,18 +184,14 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
166 * now. */ 184 * now. */
167 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; 185 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
168 186
169 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
170 if (!ppgtt)
171 return ret;
172
173 ppgtt->dev = dev;
174 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 187 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
175 ppgtt->clear_range = gen6_ppgtt_clear_range; 188 ppgtt->clear_range = gen6_ppgtt_clear_range;
176 ppgtt->insert_entries = gen6_ppgtt_insert_entries; 189 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
190 ppgtt->cleanup = gen6_ppgtt_cleanup;
177 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 191 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
178 GFP_KERNEL); 192 GFP_KERNEL);
179 if (!ppgtt->pt_pages) 193 if (!ppgtt->pt_pages)
180 goto err_ppgtt; 194 return -ENOMEM;
181 195
182 for (i = 0; i < ppgtt->num_pd_entries; i++) { 196 for (i = 0; i < ppgtt->num_pd_entries; i++) {
183 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 197 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -211,8 +225,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
211 225
212 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); 226 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
213 227
214 dev_priv->mm.aliasing_ppgtt = ppgtt;
215
216 return 0; 228 return 0;
217 229
218err_pd_pin: 230err_pd_pin:
@@ -228,8 +240,27 @@ err_pt_alloc:
228 __free_page(ppgtt->pt_pages[i]); 240 __free_page(ppgtt->pt_pages[i]);
229 } 241 }
230 kfree(ppgtt->pt_pages); 242 kfree(ppgtt->pt_pages);
231err_ppgtt: 243
232 kfree(ppgtt); 244 return ret;
245}
246
247static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
248{
249 struct drm_i915_private *dev_priv = dev->dev_private;
250 struct i915_hw_ppgtt *ppgtt;
251 int ret;
252
253 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
254 if (!ppgtt)
255 return -ENOMEM;
256
257 ppgtt->dev = dev;
258
259 ret = gen6_ppgtt_init(ppgtt);
260 if (ret)
261 kfree(ppgtt);
262 else
263 dev_priv->mm.aliasing_ppgtt = ppgtt;
233 264
234 return ret; 265 return ret;
235} 266}
@@ -238,22 +269,11 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
238{ 269{
239 struct drm_i915_private *dev_priv = dev->dev_private; 270 struct drm_i915_private *dev_priv = dev->dev_private;
240 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 271 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
241 int i;
242 272
243 if (!ppgtt) 273 if (!ppgtt)
244 return; 274 return;
245 275
246 if (ppgtt->pt_dma_addr) { 276 ppgtt->cleanup(ppgtt);
247 for (i = 0; i < ppgtt->num_pd_entries; i++)
248 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
249 4096, PCI_DMA_BIDIRECTIONAL);
250 }
251
252 kfree(ppgtt->pt_dma_addr);
253 for (i = 0; i < ppgtt->num_pd_entries; i++)
254 __free_page(ppgtt->pt_pages[i]);
255 kfree(ppgtt->pt_pages);
256 kfree(ppgtt);
257} 277}
258 278
259void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 279void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,