diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/dma-buf.c | 124 |
1 files changed, 123 insertions, 1 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 5641b9c8d50c..07cbbc6fddb4 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c | |||
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, | |||
80 | if (WARN_ON(!priv || !ops | 80 | if (WARN_ON(!priv || !ops |
81 | || !ops->map_dma_buf | 81 | || !ops->map_dma_buf |
82 | || !ops->unmap_dma_buf | 82 | || !ops->unmap_dma_buf |
83 | || !ops->release)) { | 83 | || !ops->release |
84 | || !ops->kmap_atomic | ||
85 | || !ops->kmap)) { | ||
84 | return ERR_PTR(-EINVAL); | 86 | return ERR_PTR(-EINVAL); |
85 | } | 87 | } |
86 | 88 | ||
@@ -284,3 +286,123 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | |||
284 | direction); | 286 | direction); |
285 | } | 287 | } |
286 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); | 288 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); |
289 | |||
290 | |||
291 | /** | ||
292 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | ||
293 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific | ||
294 | * preparations. Coherency is only guaranteed in the specified range for the | ||
295 | * specified access direction. | ||
296 | * @dma_buf: [in] buffer to prepare cpu access for. | ||
297 | * @start: [in] start of range for cpu access. | ||
298 | * @len: [in] length of range for cpu access. | ||
299 | * @direction: [in] length of range for cpu access. | ||
300 | * | ||
301 | * Can return negative error values, returns 0 on success. | ||
302 | */ | ||
303 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | ||
304 | enum dma_data_direction direction) | ||
305 | { | ||
306 | int ret = 0; | ||
307 | |||
308 | if (WARN_ON(!dmabuf)) | ||
309 | return -EINVAL; | ||
310 | |||
311 | if (dmabuf->ops->begin_cpu_access) | ||
312 | ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); | ||
313 | |||
314 | return ret; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); | ||
317 | |||
318 | /** | ||
319 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the | ||
320 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific | ||
321 | * actions. Coherency is only guaranteed in the specified range for the | ||
322 | * specified access direction. | ||
323 | * @dma_buf: [in] buffer to complete cpu access for. | ||
324 | * @start: [in] start of range for cpu access. | ||
325 | * @len: [in] length of range for cpu access. | ||
326 | * @direction: [in] length of range for cpu access. | ||
327 | * | ||
328 | * This call must always succeed. | ||
329 | */ | ||
330 | void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | ||
331 | enum dma_data_direction direction) | ||
332 | { | ||
333 | WARN_ON(!dmabuf); | ||
334 | |||
335 | if (dmabuf->ops->end_cpu_access) | ||
336 | dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); | ||
337 | } | ||
338 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); | ||
339 | |||
340 | /** | ||
341 | * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address | ||
342 | * space. The same restrictions as for kmap_atomic and friends apply. | ||
343 | * @dma_buf: [in] buffer to map page from. | ||
344 | * @page_num: [in] page in PAGE_SIZE units to map. | ||
345 | * | ||
346 | * This call must always succeed, any necessary preparations that might fail | ||
347 | * need to be done in begin_cpu_access. | ||
348 | */ | ||
349 | void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) | ||
350 | { | ||
351 | WARN_ON(!dmabuf); | ||
352 | |||
353 | return dmabuf->ops->kmap_atomic(dmabuf, page_num); | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); | ||
356 | |||
357 | /** | ||
358 | * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. | ||
359 | * @dma_buf: [in] buffer to unmap page from. | ||
360 | * @page_num: [in] page in PAGE_SIZE units to unmap. | ||
361 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. | ||
362 | * | ||
363 | * This call must always succeed. | ||
364 | */ | ||
365 | void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, | ||
366 | void *vaddr) | ||
367 | { | ||
368 | WARN_ON(!dmabuf); | ||
369 | |||
370 | if (dmabuf->ops->kunmap_atomic) | ||
371 | dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); | ||
372 | } | ||
373 | EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); | ||
374 | |||
375 | /** | ||
376 | * dma_buf_kmap - Map a page of the buffer object into kernel address space. The | ||
377 | * same restrictions as for kmap and friends apply. | ||
378 | * @dma_buf: [in] buffer to map page from. | ||
379 | * @page_num: [in] page in PAGE_SIZE units to map. | ||
380 | * | ||
381 | * This call must always succeed, any necessary preparations that might fail | ||
382 | * need to be done in begin_cpu_access. | ||
383 | */ | ||
384 | void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) | ||
385 | { | ||
386 | WARN_ON(!dmabuf); | ||
387 | |||
388 | return dmabuf->ops->kmap(dmabuf, page_num); | ||
389 | } | ||
390 | EXPORT_SYMBOL_GPL(dma_buf_kmap); | ||
391 | |||
392 | /** | ||
393 | * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. | ||
394 | * @dma_buf: [in] buffer to unmap page from. | ||
395 | * @page_num: [in] page in PAGE_SIZE units to unmap. | ||
396 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. | ||
397 | * | ||
398 | * This call must always succeed. | ||
399 | */ | ||
400 | void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, | ||
401 | void *vaddr) | ||
402 | { | ||
403 | WARN_ON(!dmabuf); | ||
404 | |||
405 | if (dmabuf->ops->kunmap) | ||
406 | dmabuf->ops->kunmap(dmabuf, page_num, vaddr); | ||
407 | } | ||
408 | EXPORT_SYMBOL_GPL(dma_buf_kunmap); | ||