aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_bufs.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index cddea1a2472c..6d80d17f1e96 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -34,6 +34,8 @@
34 */ 34 */
35 35
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/log2.h>
38#include <asm/shmparam.h>
37#include "drmP.h" 39#include "drmP.h"
38 40
39resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) 41resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
@@ -83,9 +85,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
83} 85}
84 86
85static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 87static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
86 unsigned long user_token, int hashed_handle) 88 unsigned long user_token, int hashed_handle, int shm)
87{ 89{
88 int use_hashed_handle; 90 int use_hashed_handle, shift;
91 unsigned long add;
92
89#if (BITS_PER_LONG == 64) 93#if (BITS_PER_LONG == 64)
90 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 94 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
91#elif (BITS_PER_LONG == 32) 95#elif (BITS_PER_LONG == 32)
@@ -101,9 +105,31 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
101 if (ret != -EINVAL) 105 if (ret != -EINVAL)
102 return ret; 106 return ret;
103 } 107 }
108
109 shift = 0;
110 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
111 if (shm && (SHMLBA > PAGE_SIZE)) {
112 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
113
114 /* For shared memory, we have to preserve the SHMLBA
115 * bits of the eventual vma->vm_pgoff value during
116 * mmap(). Otherwise we run into cache aliasing problems
117 * on some platforms. On these platforms, the pgoff of
118 * a mmap() request is used to pick a suitable virtual
119 * address for the mmap() region such that it will not
120 * cause cache aliasing problems.
121 *
122 * Therefore, make sure the SHMLBA relevant bits of the
123 * hash value we use are equal to those in the original
124 * kernel virtual address.
125 */
126 shift = bits;
127 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
128 }
129
104 return drm_ht_just_insert_please(&dev->map_hash, hash, 130 return drm_ht_just_insert_please(&dev->map_hash, hash,
105 user_token, 32 - PAGE_SHIFT - 3, 131 user_token, 32 - PAGE_SHIFT - 3,
106 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT); 132 shift, add);
107} 133}
108 134
109/** 135/**
@@ -323,7 +349,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
323 /* We do it here so that dev->struct_mutex protects the increment */ 349 /* We do it here so that dev->struct_mutex protects the increment */
324 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 350 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
325 map->offset; 351 map->offset;
326 ret = drm_map_handle(dev, &list->hash, user_token, 0); 352 ret = drm_map_handle(dev, &list->hash, user_token, 0,
353 (map->type == _DRM_SHM));
327 if (ret) { 354 if (ret) {
328 if (map->type == _DRM_REGISTERS) 355 if (map->type == _DRM_REGISTERS)
329 iounmap(map->handle); 356 iounmap(map->handle);