aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c46
3 files changed, 49 insertions, 11 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 3d6a073e130b..77e77b2b9d80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -322,6 +322,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
322 dev_priv->max_gmr_ids = 322 dev_priv->max_gmr_ids =
323 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 323 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
324 } 324 }
325 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
326 dev_priv->max_gmr_pages =
327 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
328 dev_priv->memory_size =
329 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
330 }
325 331
326 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 332 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
327 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 333 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -338,6 +344,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
338 DRM_INFO("Max GMR descriptors is %u\n", 344 DRM_INFO("Max GMR descriptors is %u\n",
339 (unsigned)dev_priv->max_gmr_descriptors); 345 (unsigned)dev_priv->max_gmr_descriptors);
340 } 346 }
347 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
348 DRM_INFO("Max number of GMR pages is %u\n",
349 (unsigned)dev_priv->max_gmr_pages);
350 DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
351 (unsigned)dev_priv->memory_size);
352 }
341 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 353 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
342 dev_priv->vram_start, dev_priv->vram_size / 1024); 354 dev_priv->vram_start, dev_priv->vram_size / 1024);
343 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 355 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 95b75000f0c9..323fc10de2d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -190,6 +190,8 @@ struct vmw_private {
190 uint32_t capabilities; 190 uint32_t capabilities;
191 uint32_t max_gmr_descriptors; 191 uint32_t max_gmr_descriptors;
192 uint32_t max_gmr_ids; 192 uint32_t max_gmr_ids;
193 uint32_t max_gmr_pages;
194 uint32_t memory_size;
193 bool has_gmr; 195 bool has_gmr;
194 struct mutex hw_mutex; 196 struct mutex hw_mutex;
195 197
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ac6e0d1bd629..5f717152cff5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man {
40 spinlock_t lock; 40 spinlock_t lock;
41 struct ida gmr_ida; 41 struct ida gmr_ida;
42 uint32_t max_gmr_ids; 42 uint32_t max_gmr_ids;
43 uint32_t max_gmr_pages;
44 uint32_t used_gmr_pages;
43}; 45};
44 46
45static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
49{ 51{
50 struct vmwgfx_gmrid_man *gman = 52 struct vmwgfx_gmrid_man *gman =
51 (struct vmwgfx_gmrid_man *)man->priv; 53 (struct vmwgfx_gmrid_man *)man->priv;
52 int ret; 54 int ret = 0;
53 int id; 55 int id;
54 56
55 mem->mm_node = NULL; 57 mem->mm_node = NULL;
56 58
57 do { 59 spin_lock(&gman->lock);
58 if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) 60
59 return -ENOMEM; 61 if (gman->max_gmr_pages > 0) {
62 gman->used_gmr_pages += bo->num_pages;
63 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
64 goto out_err_locked;
65 }
60 66
67 do {
68 spin_unlock(&gman->lock);
69 if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
70 ret = -ENOMEM;
71 goto out_err;
72 }
61 spin_lock(&gman->lock); 73 spin_lock(&gman->lock);
62 ret = ida_get_new(&gman->gmr_ida, &id);
63 74
75 ret = ida_get_new(&gman->gmr_ida, &id);
64 if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { 76 if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
65 ida_remove(&gman->gmr_ida, id); 77 ida_remove(&gman->gmr_ida, id);
66 spin_unlock(&gman->lock); 78 ret = 0;
67 return 0; 79 goto out_err_locked;
68 } 80 }
69
70 spin_unlock(&gman->lock);
71
72 } while (ret == -EAGAIN); 81 } while (ret == -EAGAIN);
73 82
74 if (likely(ret == 0)) { 83 if (likely(ret == 0)) {
75 mem->mm_node = gman; 84 mem->mm_node = gman;
76 mem->start = id; 85 mem->start = id;
77 } 86 mem->num_pages = bo->num_pages;
87 } else
88 goto out_err_locked;
89
90 spin_unlock(&gman->lock);
91 return 0;
78 92
93out_err:
94 spin_lock(&gman->lock);
95out_err_locked:
96 gman->used_gmr_pages -= bo->num_pages;
97 spin_unlock(&gman->lock);
79 return ret; 98 return ret;
80} 99}
81 100
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
88 if (mem->mm_node) { 107 if (mem->mm_node) {
89 spin_lock(&gman->lock); 108 spin_lock(&gman->lock);
90 ida_remove(&gman->gmr_ida, mem->start); 109 ida_remove(&gman->gmr_ida, mem->start);
110 gman->used_gmr_pages -= mem->num_pages;
91 spin_unlock(&gman->lock); 111 spin_unlock(&gman->lock);
92 mem->mm_node = NULL; 112 mem->mm_node = NULL;
93 } 113 }
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
96static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, 116static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
97 unsigned long p_size) 117 unsigned long p_size)
98{ 118{
119 struct vmw_private *dev_priv =
120 container_of(man->bdev, struct vmw_private, bdev);
99 struct vmwgfx_gmrid_man *gman = 121 struct vmwgfx_gmrid_man *gman =
100 kzalloc(sizeof(*gman), GFP_KERNEL); 122 kzalloc(sizeof(*gman), GFP_KERNEL);
101 123
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
103 return -ENOMEM; 125 return -ENOMEM;
104 126
105 spin_lock_init(&gman->lock); 127 spin_lock_init(&gman->lock);
128 gman->max_gmr_pages = dev_priv->max_gmr_pages;
129 gman->used_gmr_pages = 0;
106 ida_init(&gman->gmr_ida); 130 ida_init(&gman->gmr_ida);
107 gman->max_gmr_ids = p_size; 131 gman->max_gmr_ids = p_size;
108 man->priv = (void *) gman; 132 man->priv = (void *) gman;