diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_prime.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_prime.c | 163 |
1 files changed, 163 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c new file mode 100644 index 000000000000..ed6b8465260a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
@@ -0,0 +1,163 @@ | |||
1 | |||
2 | #include "drmP.h" | ||
3 | #include "drm.h" | ||
4 | |||
5 | #include "nouveau_drv.h" | ||
6 | #include "nouveau_drm.h" | ||
7 | #include "nouveau_dma.h" | ||
8 | |||
9 | #include <linux/dma-buf.h> | ||
10 | |||
11 | static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, | ||
12 | enum dma_data_direction dir) | ||
13 | { | ||
14 | struct nouveau_bo *nvbo = attachment->dmabuf->priv; | ||
15 | struct drm_device *dev = nvbo->gem->dev; | ||
16 | int npages = nvbo->bo.num_pages; | ||
17 | struct sg_table *sg; | ||
18 | int nents; | ||
19 | |||
20 | mutex_lock(&dev->struct_mutex); | ||
21 | sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); | ||
22 | nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
23 | mutex_unlock(&dev->struct_mutex); | ||
24 | return sg; | ||
25 | } | ||
26 | |||
27 | static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | ||
28 | struct sg_table *sg, enum dma_data_direction dir) | ||
29 | { | ||
30 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
31 | sg_free_table(sg); | ||
32 | kfree(sg); | ||
33 | } | ||
34 | |||
35 | static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf) | ||
36 | { | ||
37 | struct nouveau_bo *nvbo = dma_buf->priv; | ||
38 | |||
39 | if (nvbo->gem->export_dma_buf == dma_buf) { | ||
40 | nvbo->gem->export_dma_buf = NULL; | ||
41 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
42 | } | ||
43 | } | ||
44 | |||
45 | static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | ||
46 | { | ||
47 | return NULL; | ||
48 | } | ||
49 | |||
50 | static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
51 | { | ||
52 | |||
53 | } | ||
54 | static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) | ||
55 | { | ||
56 | return NULL; | ||
57 | } | ||
58 | |||
59 | static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
60 | { | ||
61 | |||
62 | } | ||
63 | |||
64 | struct dma_buf_ops nouveau_dmabuf_ops = { | ||
65 | .map_dma_buf = nouveau_gem_map_dma_buf, | ||
66 | .unmap_dma_buf = nouveau_gem_unmap_dma_buf, | ||
67 | .release = nouveau_gem_dmabuf_release, | ||
68 | .kmap = nouveau_gem_kmap, | ||
69 | .kmap_atomic = nouveau_gem_kmap_atomic, | ||
70 | .kunmap = nouveau_gem_kunmap, | ||
71 | .kunmap_atomic = nouveau_gem_kunmap_atomic, | ||
72 | }; | ||
73 | |||
74 | static int | ||
75 | nouveau_prime_new(struct drm_device *dev, | ||
76 | size_t size, | ||
77 | struct sg_table *sg, | ||
78 | struct nouveau_bo **pnvbo) | ||
79 | { | ||
80 | struct nouveau_bo *nvbo; | ||
81 | u32 flags = 0; | ||
82 | int ret; | ||
83 | |||
84 | flags = TTM_PL_FLAG_TT; | ||
85 | |||
86 | ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, | ||
87 | sg, pnvbo); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | nvbo = *pnvbo; | ||
91 | |||
92 | /* we restrict allowed domains on nv50+ to only the types | ||
93 | * that were requested at creation time. not possibly on | ||
94 | * earlier chips without busting the ABI. | ||
95 | */ | ||
96 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; | ||
97 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | ||
98 | if (!nvbo->gem) { | ||
99 | nouveau_bo_ref(NULL, pnvbo); | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | |||
103 | nvbo->gem->driver_private = nvbo; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, | ||
108 | struct drm_gem_object *obj, int flags) | ||
109 | { | ||
110 | struct nouveau_bo *nvbo = nouveau_gem_object(obj); | ||
111 | int ret = 0; | ||
112 | |||
113 | /* pin buffer into GTT */ | ||
114 | ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); | ||
115 | if (ret) | ||
116 | return ERR_PTR(-EINVAL); | ||
117 | |||
118 | return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags); | ||
119 | } | ||
120 | |||
121 | struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, | ||
122 | struct dma_buf *dma_buf) | ||
123 | { | ||
124 | struct dma_buf_attachment *attach; | ||
125 | struct sg_table *sg; | ||
126 | struct nouveau_bo *nvbo; | ||
127 | int ret; | ||
128 | |||
129 | if (dma_buf->ops == &nouveau_dmabuf_ops) { | ||
130 | nvbo = dma_buf->priv; | ||
131 | if (nvbo->gem) { | ||
132 | if (nvbo->gem->dev == dev) { | ||
133 | drm_gem_object_reference(nvbo->gem); | ||
134 | return nvbo->gem; | ||
135 | } | ||
136 | } | ||
137 | } | ||
138 | /* need to attach */ | ||
139 | attach = dma_buf_attach(dma_buf, dev->dev); | ||
140 | if (IS_ERR(attach)) | ||
141 | return ERR_PTR(PTR_ERR(attach)); | ||
142 | |||
143 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | ||
144 | if (IS_ERR(sg)) { | ||
145 | ret = PTR_ERR(sg); | ||
146 | goto fail_detach; | ||
147 | } | ||
148 | |||
149 | ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo); | ||
150 | if (ret) | ||
151 | goto fail_unmap; | ||
152 | |||
153 | nvbo->gem->import_attach = attach; | ||
154 | |||
155 | return nvbo->gem; | ||
156 | |||
157 | fail_unmap: | ||
158 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | ||
159 | fail_detach: | ||
160 | dma_buf_detach(dma_buf, attach); | ||
161 | return ERR_PTR(ret); | ||
162 | } | ||
163 | |||