diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-10-17 17:14:26 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-06 05:39:51 -0500 |
commit | 3230cfc34fca9d17c1628cf0e4ac25199592a69a (patch) | |
tree | 40685914703f0a709b2180d7cdf01e770fa5a4dc /drivers | |
parent | c52494f69538f6fe1a234972f024011b17a48329 (diff) |
drm/nouveau: enable the ttm dma pool when swiotlb is active V3
If the card is capable of more than 32-bit, then use the default
TTM page pool code which allocates from anywhere in the memory.
Note: If the 'ttm.no_dma' parameter is set, the override is ignored
and the default TTM pool is used.
V2 use pci_set_consistent_dma_mask
V3 Rebase on top of no memory account changes (where/when is my
delorean when i need it ?)
CC: Ben Skeggs <bskeggs@redhat.com>
CC: Francisco Jerez <currojerez@riseup.net>
CC: Dave Airlie <airlied@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 73 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_debugfs.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 60 |
4 files changed, 79 insertions, 61 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f19ac42578bb..2dc0d8303cb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -1049,10 +1049,79 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | |||
1049 | nouveau_fence_unref(&old_fence); | 1049 | nouveau_fence_unref(&old_fence); |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | static int | ||
1053 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) | ||
1054 | { | ||
1055 | struct drm_nouveau_private *dev_priv; | ||
1056 | struct drm_device *dev; | ||
1057 | unsigned i; | ||
1058 | int r; | ||
1059 | |||
1060 | if (ttm->state != tt_unpopulated) | ||
1061 | return 0; | ||
1062 | |||
1063 | dev_priv = nouveau_bdev(ttm->bdev); | ||
1064 | dev = dev_priv->dev; | ||
1065 | |||
1066 | #ifdef CONFIG_SWIOTLB | ||
1067 | if (swiotlb_nr_tbl()) { | ||
1068 | return ttm_dma_populate(ttm, dev->dev); | ||
1069 | } | ||
1070 | #endif | ||
1071 | |||
1072 | r = ttm_pool_populate(ttm); | ||
1073 | if (r) { | ||
1074 | return r; | ||
1075 | } | ||
1076 | |||
1077 | for (i = 0; i < ttm->num_pages; i++) { | ||
1078 | ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], | ||
1079 | 0, PAGE_SIZE, | ||
1080 | PCI_DMA_BIDIRECTIONAL); | ||
1081 | if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { | ||
1082 | while (--i) { | ||
1083 | pci_unmap_page(dev->pdev, ttm->dma_address[i], | ||
1084 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1085 | ttm->dma_address[i] = 0; | ||
1086 | } | ||
1087 | ttm_pool_unpopulate(ttm); | ||
1088 | return -EFAULT; | ||
1089 | } | ||
1090 | } | ||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | static void | ||
1095 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
1096 | { | ||
1097 | struct drm_nouveau_private *dev_priv; | ||
1098 | struct drm_device *dev; | ||
1099 | unsigned i; | ||
1100 | |||
1101 | dev_priv = nouveau_bdev(ttm->bdev); | ||
1102 | dev = dev_priv->dev; | ||
1103 | |||
1104 | #ifdef CONFIG_SWIOTLB | ||
1105 | if (swiotlb_nr_tbl()) { | ||
1106 | ttm_dma_unpopulate(ttm, dev->dev); | ||
1107 | return; | ||
1108 | } | ||
1109 | #endif | ||
1110 | |||
1111 | for (i = 0; i < ttm->num_pages; i++) { | ||
1112 | if (ttm->dma_address[i]) { | ||
1113 | pci_unmap_page(dev->pdev, ttm->dma_address[i], | ||
1114 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1115 | } | ||
1116 | } | ||
1117 | |||
1118 | ttm_pool_unpopulate(ttm); | ||
1119 | } | ||
1120 | |||
1052 | struct ttm_bo_driver nouveau_bo_driver = { | 1121 | struct ttm_bo_driver nouveau_bo_driver = { |
1053 | .ttm_tt_create = &nouveau_ttm_tt_create, | 1122 | .ttm_tt_create = &nouveau_ttm_tt_create, |
1054 | .ttm_tt_populate = &ttm_pool_populate, | 1123 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
1055 | .ttm_tt_unpopulate = &ttm_pool_unpopulate, | 1124 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
1056 | .invalidate_caches = nouveau_bo_invalidate_caches, | 1125 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1057 | .init_mem_type = nouveau_bo_init_mem_type, | 1126 | .init_mem_type = nouveau_bo_init_mem_type, |
1058 | .evict_flags = nouveau_bo_evict_flags, | 1127 | .evict_flags = nouveau_bo_evict_flags, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 8e1592368cce..f52c2db3529e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | |||
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { | |||
178 | { "memory", nouveau_debugfs_memory_info, 0, NULL }, | 178 | { "memory", nouveau_debugfs_memory_info, 0, NULL }, |
179 | { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, | 179 | { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, |
180 | { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, | 180 | { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, |
181 | { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL }, | ||
181 | }; | 182 | }; |
182 | #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) | 183 | #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) |
183 | 184 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 36bec4807701..37fcaa260e98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
407 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | 407 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); |
408 | if (ret) | 408 | if (ret) |
409 | return ret; | 409 | return ret; |
410 | ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | ||
411 | if (ret) { | ||
412 | /* Reset to default value. */ | ||
413 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); | ||
414 | } | ||
415 | |||
410 | 416 | ||
411 | ret = nouveau_ttm_global_init(dev_priv); | 417 | ret = nouveau_ttm_global_init(dev_priv); |
412 | if (ret) | 418 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index bc2ab900b24c..ee1eb7cba798 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -13,41 +13,6 @@ struct nouveau_sgdma_be { | |||
13 | u64 offset; | 13 | u64 offset; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | static int | ||
17 | nouveau_sgdma_dma_map(struct ttm_tt *ttm) | ||
18 | { | ||
19 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | ||
20 | struct drm_device *dev = nvbe->dev; | ||
21 | int i; | ||
22 | |||
23 | for (i = 0; i < ttm->num_pages; i++) { | ||
24 | ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], | ||
25 | 0, PAGE_SIZE, | ||
26 | PCI_DMA_BIDIRECTIONAL); | ||
27 | if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { | ||
28 | return -EFAULT; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static void | ||
36 | nouveau_sgdma_dma_unmap(struct ttm_tt *ttm) | ||
37 | { | ||
38 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | ||
39 | struct drm_device *dev = nvbe->dev; | ||
40 | int i; | ||
41 | |||
42 | for (i = 0; i < ttm->num_pages; i++) { | ||
43 | if (ttm->dma_address[i]) { | ||
44 | pci_unmap_page(dev->pdev, ttm->dma_address[i], | ||
45 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
46 | } | ||
47 | ttm->dma_address[i] = 0; | ||
48 | } | ||
49 | } | ||
50 | |||
51 | static void | 16 | static void |
52 | nouveau_sgdma_destroy(struct ttm_tt *ttm) | 17 | nouveau_sgdma_destroy(struct ttm_tt *ttm) |
53 | { | 18 | { |
@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
67 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 32 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
68 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | 33 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
69 | unsigned i, j, pte; | 34 | unsigned i, j, pte; |
70 | int r; | ||
71 | 35 | ||
72 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); | 36 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
73 | r = nouveau_sgdma_dma_map(ttm); | ||
74 | if (r) { | ||
75 | return r; | ||
76 | } | ||
77 | 37 | ||
78 | nvbe->offset = mem->start << PAGE_SHIFT; | 38 | nvbe->offset = mem->start << PAGE_SHIFT; |
79 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | 39 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm) | |||
110 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | 70 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); |
111 | } | 71 | } |
112 | 72 | ||
113 | nouveau_sgdma_dma_unmap(ttm); | ||
114 | return 0; | 73 | return 0; |
115 | } | 74 | } |
116 | 75 | ||
@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
141 | dma_addr_t *list = ttm->dma_address; | 100 | dma_addr_t *list = ttm->dma_address; |
142 | u32 pte = mem->start << 2; | 101 | u32 pte = mem->start << 2; |
143 | u32 cnt = ttm->num_pages; | 102 | u32 cnt = ttm->num_pages; |
144 | int r; | ||
145 | 103 | ||
146 | nvbe->offset = mem->start << PAGE_SHIFT; | 104 | nvbe->offset = mem->start << PAGE_SHIFT; |
147 | r = nouveau_sgdma_dma_map(ttm); | ||
148 | if (r) { | ||
149 | return r; | ||
150 | } | ||
151 | 105 | ||
152 | while (cnt--) { | 106 | while (cnt--) { |
153 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); | 107 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); |
@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm) | |||
173 | } | 127 | } |
174 | 128 | ||
175 | nv41_sgdma_flush(nvbe); | 129 | nv41_sgdma_flush(nvbe); |
176 | nouveau_sgdma_dma_unmap(ttm); | ||
177 | return 0; | 130 | return 0; |
178 | } | 131 | } |
179 | 132 | ||
@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
256 | dma_addr_t *list = ttm->dma_address; | 209 | dma_addr_t *list = ttm->dma_address; |
257 | u32 pte = mem->start << 2, tmp[4]; | 210 | u32 pte = mem->start << 2, tmp[4]; |
258 | u32 cnt = ttm->num_pages; | 211 | u32 cnt = ttm->num_pages; |
259 | int i, r; | 212 | int i; |
260 | 213 | ||
261 | nvbe->offset = mem->start << PAGE_SHIFT; | 214 | nvbe->offset = mem->start << PAGE_SHIFT; |
262 | r = nouveau_sgdma_dma_map(ttm); | ||
263 | if (r) { | ||
264 | return r; | ||
265 | } | ||
266 | 215 | ||
267 | if (pte & 0x0000000c) { | 216 | if (pte & 0x0000000c) { |
268 | u32 max = 4 - ((pte >> 2) & 0x3); | 217 | u32 max = 4 - ((pte >> 2) & 0x3); |
@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm) | |||
321 | nv44_sgdma_fill(pgt, NULL, pte, cnt); | 270 | nv44_sgdma_fill(pgt, NULL, pte, cnt); |
322 | 271 | ||
323 | nv44_sgdma_flush(ttm); | 272 | nv44_sgdma_flush(ttm); |
324 | nouveau_sgdma_dma_unmap(ttm); | ||
325 | return 0; | 273 | return 0; |
326 | } | 274 | } |
327 | 275 | ||
@@ -335,13 +283,8 @@ static int | |||
335 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | 283 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
336 | { | 284 | { |
337 | struct nouveau_mem *node = mem->mm_node; | 285 | struct nouveau_mem *node = mem->mm_node; |
338 | int r; | ||
339 | 286 | ||
340 | /* noop: bound in move_notify() */ | 287 | /* noop: bound in move_notify() */ |
341 | r = nouveau_sgdma_dma_map(ttm); | ||
342 | if (r) { | ||
343 | return r; | ||
344 | } | ||
345 | node->pages = ttm->dma_address; | 288 | node->pages = ttm->dma_address; |
346 | return 0; | 289 | return 0; |
347 | } | 290 | } |
@@ -350,7 +293,6 @@ static int | |||
350 | nv50_sgdma_unbind(struct ttm_tt *ttm) | 293 | nv50_sgdma_unbind(struct ttm_tt *ttm) |
351 | { | 294 | { |
352 | /* noop: unbound in move_notify() */ | 295 | /* noop: unbound in move_notify() */ |
353 | nouveau_sgdma_dma_unmap(ttm); | ||
354 | return 0; | 296 | return 0; |
355 | } | 297 | } |
356 | 298 | ||