aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2009-12-11 04:24:15 -0500
committerDave Airlie <airlied@redhat.com>2009-12-11 06:29:34 -0500
commit6ee738610f41b59733f63718f0bdbcba7d3a3f12 (patch)
treeeccb9f07671998c50a1bc606a54cd6f82ba43e0a /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parentd1ede145cea25c5b6d2ebb19b167af14e374bb45 (diff)
drm/nouveau: Add DRM driver for NVIDIA GPUs
This adds a drm/kms staging non-API stable driver for GPUs from NVIDIA. This driver is a KMS-based driver and requires a compatible nouveau userspace libdrm and nouveau X.org driver. This driver requires firmware files not available in this kernel tree, interested parties can find them via the nouveau project git archive. This driver is reverse engineered, and is in no way supported by nVidia. Support for nearly the complete range of nvidia hw from nv04->g80 (nv50) is available, and the kms driver should support driving nearly all output types (displayport is under development still) along with supporting suspend/resume. This work is all from the upstream nouveau project found at nouveau.freedesktop.org. The original authors list from nouveau git tree is: Anssi Hannula <anssi.hannula@iki.fi> Ben Skeggs <bskeggs@redhat.com> Francisco Jerez <currojerez@riseup.net> Maarten Maathuis <madman2003@gmail.com> Marcin Koƛcielnicki <koriakin@0x04.net> Matthew Garrett <mjg@redhat.com> Matt Parnell <mparnell@gmail.com> Patrice Mandin <patmandin@gmail.com> Pekka Paalanen <pq@iki.fi> Xavier Chantry <shiningxc@gmail.com> along with project founder Stephane Marchesin <marchesin@icps.u-strasbg.fr> Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c321
1 files changed, 321 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 000000000000..4c7f1e403e80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4
5#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
7#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
8
9struct nouveau_sgdma_be {
10 struct ttm_backend backend;
11 struct drm_device *dev;
12
13 dma_addr_t *pages;
14 unsigned nr_pages;
15
16 unsigned pte_start;
17 bool bound;
18};
19
20static int
21nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
22 struct page **pages, struct page *dummy_read_page)
23{
24 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
25 struct drm_device *dev = nvbe->dev;
26
27 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
28
29 if (nvbe->pages)
30 return -EINVAL;
31
32 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
33 if (!nvbe->pages)
34 return -ENOMEM;
35
36 nvbe->nr_pages = 0;
37 while (num_pages--) {
38 nvbe->pages[nvbe->nr_pages] =
39 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
40 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
41 if (pci_dma_mapping_error(dev->pdev,
42 nvbe->pages[nvbe->nr_pages])) {
43 be->func->clear(be);
44 return -EFAULT;
45 }
46
47 nvbe->nr_pages++;
48 }
49
50 return 0;
51}
52
53static void
54nouveau_sgdma_clear(struct ttm_backend *be)
55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60
61 if (nvbe && nvbe->pages) {
62 if (nvbe->bound)
63 be->func->unbind(be);
64
65 while (nvbe->nr_pages--) {
66 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
67 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
68 }
69 kfree(nvbe->pages);
70 nvbe->pages = NULL;
71 nvbe->nr_pages = 0;
72 }
73}
74
75static inline unsigned
76nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
80
81 if (dev_priv->card_type < NV_50)
82 return pte + 2;
83
84 return pte << 1;
85}
86
87static int
88nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
89{
90 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
91 struct drm_device *dev = nvbe->dev;
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
94 unsigned i, j, pte;
95
96 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
97
98 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
99 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
100 nvbe->pte_start = pte;
101 for (i = 0; i < nvbe->nr_pages; i++) {
102 dma_addr_t dma_offset = nvbe->pages[i];
103 uint32_t offset_l = lower_32_bits(dma_offset);
104 uint32_t offset_h = upper_32_bits(dma_offset);
105
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
107 if (dev_priv->card_type < NV_50)
108 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
109 else {
110 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
111 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
112 }
113
114 dma_offset += NV_CTXDMA_PAGE_SIZE;
115 }
116 }
117 dev_priv->engine.instmem.finish_access(nvbe->dev);
118
119 if (dev_priv->card_type == NV_50) {
120 nv_wr32(dev, 0x100c80, 0x00050001);
121 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
122 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
123 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
124 nv_rd32(dev, 0x100c80));
125 return -EBUSY;
126 }
127
128 nv_wr32(dev, 0x100c80, 0x00000001);
129 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
130 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
131 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
132 nv_rd32(dev, 0x100c80));
133 return -EBUSY;
134 }
135 }
136
137 nvbe->bound = true;
138 return 0;
139}
140
141static int
142nouveau_sgdma_unbind(struct ttm_backend *be)
143{
144 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
145 struct drm_device *dev = nvbe->dev;
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
148 unsigned i, j, pte;
149
150 NV_DEBUG(dev, "\n");
151
152 if (!nvbe->bound)
153 return 0;
154
155 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
156 pte = nvbe->pte_start;
157 for (i = 0; i < nvbe->nr_pages; i++) {
158 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
159
160 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
161 if (dev_priv->card_type < NV_50)
162 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
163 else {
164 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
165 nv_wo32(dev, gpuobj, pte++, 0x00000000);
166 }
167
168 dma_offset += NV_CTXDMA_PAGE_SIZE;
169 }
170 }
171 dev_priv->engine.instmem.finish_access(nvbe->dev);
172
173 nvbe->bound = false;
174 return 0;
175}
176
177static void
178nouveau_sgdma_destroy(struct ttm_backend *be)
179{
180 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
181
182 if (be) {
183 NV_DEBUG(nvbe->dev, "\n");
184
185 if (nvbe) {
186 if (nvbe->pages)
187 be->func->clear(be);
188 kfree(nvbe);
189 }
190 }
191}
192
193static struct ttm_backend_func nouveau_sgdma_backend = {
194 .populate = nouveau_sgdma_populate,
195 .clear = nouveau_sgdma_clear,
196 .bind = nouveau_sgdma_bind,
197 .unbind = nouveau_sgdma_unbind,
198 .destroy = nouveau_sgdma_destroy
199};
200
201struct ttm_backend *
202nouveau_sgdma_init_ttm(struct drm_device *dev)
203{
204 struct drm_nouveau_private *dev_priv = dev->dev_private;
205 struct nouveau_sgdma_be *nvbe;
206
207 if (!dev_priv->gart_info.sg_ctxdma)
208 return NULL;
209
210 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
211 if (!nvbe)
212 return NULL;
213
214 nvbe->dev = dev;
215
216 nvbe->backend.func = &nouveau_sgdma_backend;
217
218 return &nvbe->backend;
219}
220
221int
222nouveau_sgdma_init(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_gpuobj *gpuobj = NULL;
226 uint32_t aper_size, obj_size;
227 int i, ret;
228
229 if (dev_priv->card_type < NV_50) {
230 aper_size = (64 * 1024 * 1024);
231 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
232 obj_size += 8; /* ctxdma header */
233 } else {
234 /* 1 entire VM page table */
235 aper_size = (512 * 1024 * 1024);
236 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
237 }
238
239 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
240 NVOBJ_FLAG_ALLOW_NO_REFS |
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
249 alloc_page(GFP_KERNEL|__GFP_DMA32);
250 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
251 dev_priv->gart_info.sg_dummy_bus =
252 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
253 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
254
255 dev_priv->engine.instmem.prepare_access(dev, true);
256 if (dev_priv->card_type < NV_50) {
257 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
258 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
259 * on those cards? */
260 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
261 (1 << 12) /* PT present */ |
262 (0 << 13) /* PT *not* linear */ |
263 (NV_DMA_ACCESS_RW << 14) |
264 (NV_DMA_TARGET_PCI << 16));
265 nv_wo32(dev, gpuobj, 1, aper_size - 1);
266 for (i = 2; i < 2 + (aper_size >> 12); i++) {
267 nv_wo32(dev, gpuobj, i,
268 dev_priv->gart_info.sg_dummy_bus | 3);
269 }
270 } else {
271 for (i = 0; i < obj_size; i += 8) {
272 nv_wo32(dev, gpuobj, (i+0)/4,
273 dev_priv->gart_info.sg_dummy_bus | 0x21);
274 nv_wo32(dev, gpuobj, (i+4)/4, 0);
275 }
276 }
277 dev_priv->engine.instmem.finish_access(dev);
278
279 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
280 dev_priv->gart_info.aper_base = 0;
281 dev_priv->gart_info.aper_size = aper_size;
282 dev_priv->gart_info.sg_ctxdma = gpuobj;
283 return 0;
284}
285
286void
287nouveau_sgdma_takedown(struct drm_device *dev)
288{
289 struct drm_nouveau_private *dev_priv = dev->dev_private;
290
291 if (dev_priv->gart_info.sg_dummy_page) {
292 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
293 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
294 unlock_page(dev_priv->gart_info.sg_dummy_page);
295 __free_page(dev_priv->gart_info.sg_dummy_page);
296 dev_priv->gart_info.sg_dummy_page = NULL;
297 dev_priv->gart_info.sg_dummy_bus = 0;
298 }
299
300 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
301}
302
303int
304nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
305{
306 struct drm_nouveau_private *dev_priv = dev->dev_private;
307 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
308 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
309 int pte;
310
311 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
312 if (dev_priv->card_type < NV_50) {
313 instmem->prepare_access(dev, false);
314 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
315 instmem->finish_access(dev);
316 return 0;
317 }
318
319 NV_ERROR(dev, "Unimplemented on NV50\n");
320 return -EINVAL;
321}