diff options
author | Jerome Glisse <jglisse@redhat.com> | 2011-11-09 17:15:26 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-06 05:40:02 -0500 |
commit | 8e7e70522d760c4ccd4cd370ebfa0ba69e006c6e (patch) | |
tree | a2b0f931e513f3aeba174b974bd5e869685fe288 /drivers/gpu/drm/nouveau/nouveau_sgdma.c | |
parent | 3230cfc34fca9d17c1628cf0e4ac25199592a69a (diff) |
drm/ttm: isolate dma data from ttm_tt V4
Move dma data to a superset ttm_dma_tt structure which herit
from ttm_tt. This allow driver that don't use dma functionalities
to not have to waste memory for it.
V2 Rebase on top of no memory account changes (where/when is my
delorean when i need it ?)
V3 Make sure page list is initialized empty
V4 typo/syntax fixes
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index ee1eb7cba798..47f245edf538 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -8,7 +8,10 @@ | |||
8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) | 8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) |
9 | 9 | ||
10 | struct nouveau_sgdma_be { | 10 | struct nouveau_sgdma_be { |
11 | struct ttm_tt ttm; | 11 | /* this has to be the first field so populate/unpopulated in |
12 | * nouve_bo.c works properly, otherwise have to move them here | ||
13 | */ | ||
14 | struct ttm_dma_tt ttm; | ||
12 | struct drm_device *dev; | 15 | struct drm_device *dev; |
13 | u64 offset; | 16 | u64 offset; |
14 | }; | 17 | }; |
@@ -20,6 +23,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm) | |||
20 | 23 | ||
21 | if (ttm) { | 24 | if (ttm) { |
22 | NV_DEBUG(nvbe->dev, "\n"); | 25 | NV_DEBUG(nvbe->dev, "\n"); |
26 | ttm_dma_tt_fini(&nvbe->ttm); | ||
23 | kfree(nvbe); | 27 | kfree(nvbe); |
24 | } | 28 | } |
25 | } | 29 | } |
@@ -38,7 +42,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
38 | nvbe->offset = mem->start << PAGE_SHIFT; | 42 | nvbe->offset = mem->start << PAGE_SHIFT; |
39 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | 43 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
40 | for (i = 0; i < ttm->num_pages; i++) { | 44 | for (i = 0; i < ttm->num_pages; i++) { |
41 | dma_addr_t dma_offset = ttm->dma_address[i]; | 45 | dma_addr_t dma_offset = nvbe->ttm.dma_address[i]; |
42 | uint32_t offset_l = lower_32_bits(dma_offset); | 46 | uint32_t offset_l = lower_32_bits(dma_offset); |
43 | 47 | ||
44 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { | 48 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
@@ -97,7 +101,7 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
97 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | 101 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
98 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 102 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
99 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 103 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
100 | dma_addr_t *list = ttm->dma_address; | 104 | dma_addr_t *list = nvbe->ttm.dma_address; |
101 | u32 pte = mem->start << 2; | 105 | u32 pte = mem->start << 2; |
102 | u32 cnt = ttm->num_pages; | 106 | u32 cnt = ttm->num_pages; |
103 | 107 | ||
@@ -206,7 +210,7 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
206 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | 210 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
207 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 211 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
208 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 212 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
209 | dma_addr_t *list = ttm->dma_address; | 213 | dma_addr_t *list = nvbe->ttm.dma_address; |
210 | u32 pte = mem->start << 2, tmp[4]; | 214 | u32 pte = mem->start << 2, tmp[4]; |
211 | u32 cnt = ttm->num_pages; | 215 | u32 cnt = ttm->num_pages; |
212 | int i; | 216 | int i; |
@@ -282,10 +286,11 @@ static struct ttm_backend_func nv44_sgdma_backend = { | |||
282 | static int | 286 | static int |
283 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | 287 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
284 | { | 288 | { |
289 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | ||
285 | struct nouveau_mem *node = mem->mm_node; | 290 | struct nouveau_mem *node = mem->mm_node; |
286 | 291 | ||
287 | /* noop: bound in move_notify() */ | 292 | /* noop: bound in move_notify() */ |
288 | node->pages = ttm->dma_address; | 293 | node->pages = nvbe->ttm.dma_address; |
289 | return 0; | 294 | return 0; |
290 | } | 295 | } |
291 | 296 | ||
@@ -316,12 +321,13 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, | |||
316 | return NULL; | 321 | return NULL; |
317 | 322 | ||
318 | nvbe->dev = dev; | 323 | nvbe->dev = dev; |
319 | nvbe->ttm.func = dev_priv->gart_info.func; | 324 | nvbe->ttm.ttm.func = dev_priv->gart_info.func; |
320 | 325 | ||
321 | if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { | 326 | if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { |
327 | kfree(nvbe); | ||
322 | return NULL; | 328 | return NULL; |
323 | } | 329 | } |
324 | return &nvbe->ttm; | 330 | return &nvbe->ttm.ttm; |
325 | } | 331 | } |
326 | 332 | ||
327 | int | 333 | int |