diff options
author | Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 2017-04-20 17:33:56 -0400 |
---|---|---|
committer | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2017-06-02 03:57:07 -0400 |
commit | 24fbaca0e20acebfcdf7468f03465d5b1966c314 (patch) | |
tree | 3dc3b9a3a7ff8b6ca3a0b3559b15b711ceb7f811 /drivers/gpu/drm/omapdrm/omap_gem.c | |
parent | d61ce7da02a3c52317474f2a15dd610ec652d513 (diff) |
drm: omapdrm: Fix incorrect usage of the term 'cache coherency'
The is_cache_coherent() function currently returns true when the mapping
is not cache-coherent. This isn't a bug as such as the callers interpret
cache-coherent as meaning that the driver has to handle the coherency
manually, but it is nonetheless very confusing. Fix it and add a bit
more documentation to explain how cached buffers are handled.
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 86567e591f4e..eb02a1399a10 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -719,16 +719,21 @@ fail: | |||
719 | * Memory Management & DMA Sync | 719 | * Memory Management & DMA Sync |
720 | */ | 720 | */ |
721 | 721 | ||
722 | /** | 722 | /* |
723 | * shmem buffers that are mapped cached can simulate coherency via using | 723 | * shmem buffers that are mapped cached are not coherent. |
724 | * page faulting to keep track of dirty pages | 724 | * |
725 | * We keep track of dirty pages using page faulting to perform cache management. | ||
726 | * When a page is mapped to the CPU in read/write mode the device can't access | ||
727 | * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device | ||
728 | * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is | ||
729 | * unmapped from the CPU. | ||
725 | */ | 730 | */ |
726 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | 731 | static inline bool is_cached_coherent(struct drm_gem_object *obj) |
727 | { | 732 | { |
728 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 733 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
729 | 734 | ||
730 | return (omap_obj->flags & OMAP_BO_MEM_SHMEM) && | 735 | return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) && |
731 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | 736 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED)); |
732 | } | 737 | } |
733 | 738 | ||
734 | /* Sync the buffer for CPU access.. note pages should already be | 739 | /* Sync the buffer for CPU access.. note pages should already be |
@@ -739,7 +744,10 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) | |||
739 | struct drm_device *dev = obj->dev; | 744 | struct drm_device *dev = obj->dev; |
740 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 745 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
741 | 746 | ||
742 | if (is_cached_coherent(obj) && omap_obj->dma_addrs[pgoff]) { | 747 | if (is_cached_coherent(obj)) |
748 | return; | ||
749 | |||
750 | if (omap_obj->dma_addrs[pgoff]) { | ||
743 | dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], | 751 | dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], |
744 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 752 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
745 | omap_obj->dma_addrs[pgoff] = 0; | 753 | omap_obj->dma_addrs[pgoff] = 0; |
@@ -756,7 +764,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, | |||
756 | struct page **pages = omap_obj->pages; | 764 | struct page **pages = omap_obj->pages; |
757 | bool dirty = false; | 765 | bool dirty = false; |
758 | 766 | ||
759 | if (!is_cached_coherent(obj)) | 767 | if (is_cached_coherent(obj)) |
760 | return; | 768 | return; |
761 | 769 | ||
762 | for (i = 0; i < npages; i++) { | 770 | for (i = 0; i < npages; i++) { |