aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c160
1 files changed, 3 insertions, 157 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6ef0b035becb..61d8d803199f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
125} 125}
126 126
127 127
128static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129 struct list_head *desc_pages)
130{
131 struct page *page, *next;
132 struct svga_guest_mem_descriptor *page_virtual;
133 unsigned int desc_per_page = PAGE_SIZE /
134 sizeof(struct svga_guest_mem_descriptor) - 1;
135
136 if (list_empty(desc_pages))
137 return;
138
139 list_for_each_entry_safe(page, next, desc_pages, lru) {
140 list_del_init(&page->lru);
141
142 if (likely(desc_dma != DMA_ADDR_INVALID)) {
143 dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144 DMA_TO_DEVICE);
145 }
146
147 page_virtual = kmap_atomic(page);
148 desc_dma = (dma_addr_t)
149 le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150 PAGE_SHIFT;
151 kunmap_atomic(page_virtual);
152
153 __free_page(page);
154 }
155}
156
157/**
158 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
159 * the number of used descriptors.
160 *
161 */
162
163static int vmw_gmr_build_descriptors(struct device *dev,
164 struct list_head *desc_pages,
165 struct vmw_piter *iter,
166 unsigned long num_pages,
167 dma_addr_t *first_dma)
168{
169 struct page *page;
170 struct svga_guest_mem_descriptor *page_virtual = NULL;
171 struct svga_guest_mem_descriptor *desc_virtual = NULL;
172 unsigned int desc_per_page;
173 unsigned long prev_pfn;
174 unsigned long pfn;
175 int ret;
176 dma_addr_t desc_dma;
177
178 desc_per_page = PAGE_SIZE /
179 sizeof(struct svga_guest_mem_descriptor) - 1;
180
181 while (likely(num_pages != 0)) {
182 page = alloc_page(__GFP_HIGHMEM);
183 if (unlikely(page == NULL)) {
184 ret = -ENOMEM;
185 goto out_err;
186 }
187
188 list_add_tail(&page->lru, desc_pages);
189 page_virtual = kmap_atomic(page);
190 desc_virtual = page_virtual - 1;
191 prev_pfn = ~(0UL);
192
193 while (likely(num_pages != 0)) {
194 pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
195
196 if (pfn != prev_pfn + 1) {
197
198 if (desc_virtual - page_virtual ==
199 desc_per_page - 1)
200 break;
201
202 (++desc_virtual)->ppn = cpu_to_le32(pfn);
203 desc_virtual->num_pages = cpu_to_le32(1);
204 } else {
205 uint32_t tmp =
206 le32_to_cpu(desc_virtual->num_pages);
207 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
208 }
209 prev_pfn = pfn;
210 --num_pages;
211 vmw_piter_next(iter);
212 }
213
214 (++desc_virtual)->ppn = DMA_PAGE_INVALID;
215 desc_virtual->num_pages = cpu_to_le32(0);
216 kunmap_atomic(page_virtual);
217 }
218
219 desc_dma = 0;
220 list_for_each_entry_reverse(page, desc_pages, lru) {
221 page_virtual = kmap_atomic(page);
222 page_virtual[desc_per_page].ppn = cpu_to_le32
223 (desc_dma >> PAGE_SHIFT);
224 kunmap_atomic(page_virtual);
225 desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226 DMA_TO_DEVICE);
227
228 if (unlikely(dma_mapping_error(dev, desc_dma)))
229 goto out_err;
230 }
231 *first_dma = desc_dma;
232
233 return 0;
234out_err:
235 vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
236 return ret;
237}
238
239static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
240 int gmr_id, dma_addr_t desc_dma)
241{
242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
245 wmb();
246 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
247 mb();
248
249 mutex_unlock(&dev_priv->hw_mutex);
250
251}
252
253int vmw_gmr_bind(struct vmw_private *dev_priv, 128int vmw_gmr_bind(struct vmw_private *dev_priv,
254 const struct vmw_sg_table *vsgt, 129 const struct vmw_sg_table *vsgt,
255 unsigned long num_pages, 130 unsigned long num_pages,
256 int gmr_id) 131 int gmr_id)
257{ 132{
258 struct list_head desc_pages;
259 dma_addr_t desc_dma = 0;
260 struct device *dev = dev_priv->dev->dev;
261 struct vmw_piter data_iter; 133 struct vmw_piter data_iter;
262 int ret;
263 134
264 vmw_piter_start(&data_iter, vsgt, 0); 135 vmw_piter_start(&data_iter, vsgt, 0);
265 136
266 if (unlikely(!vmw_piter_next(&data_iter))) 137 if (unlikely(!vmw_piter_next(&data_iter)))
267 return 0; 138 return 0;
268 139
269 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 140 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
270 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
271
272 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
273 return -EINVAL;
274
275 if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
276 return -EINVAL; 141 return -EINVAL;
277 142
278 INIT_LIST_HEAD(&desc_pages); 143 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
279
280 ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281 num_pages, &desc_dma);
282 if (unlikely(ret != 0))
283 return ret;
284
285 vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
286 vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
287
288 return 0;
289} 144}
290 145
291 146
292void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) 147void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
293{ 148{
294 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { 149 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
295 vmw_gmr2_unbind(dev_priv, gmr_id); 150 vmw_gmr2_unbind(dev_priv, gmr_id);
296 return;
297 }
298
299 mutex_lock(&dev_priv->hw_mutex);
300 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
301 wmb();
302 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
303 mb();
304 mutex_unlock(&dev_priv->hw_mutex);
305} 151}