aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-04-22 18:53:41 -0400
committerDave Airlie <airlied@redhat.com>2018-04-22 18:53:41 -0400
commit2e1d6eab503d179dbca51f54c65c4e0b70775fbf (patch)
tree6f51958c6bdf40077419cdc064877a484f0dfb6c
parentbc9ebca2daeb132a6375700f41bd65d87794e9c7 (diff)
parentff059fcbeed9cbed7421f82d1463dd74c472636e (diff)
Merge tag 'exynos-drm-fixes-for-v4.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
Remove Exynos specific framebuffer structure and relevant functions. - it removes exynos_drm_fb structure which is a wrapper of drm_framebuffer and unnecessary two exynos specific callback functions, exynos_drm_destory() and exynos_drm_fb_create_handle() because we can reuse existing drm common callback ones instead. * tag 'exynos-drm-fixes-for-v4.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: drm/exynos: exynos_drm_fb -> drm_framebuffer drm/exynos: Move dma_addr out of exynos_drm_fb drm/exynos: Move GEM BOs to drm_framebuffer drm/amdkfd: Deallocate SDMA queues correctly drm/amdkfd: Fix scratch memory with HWS enabled
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c73
3 files changed, 29 insertions, 67 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c18e048f23c6..d55d29d31da4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1058,13 +1058,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1058 pr_warn("Can't create new usermode queue because %d queues were already created\n", 1058 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1059 dqm->total_queue_count); 1059 dqm->total_queue_count);
1060 retval = -EPERM; 1060 retval = -EPERM;
1061 goto out; 1061 goto out_unlock;
1062 } 1062 }
1063 1063
1064 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 1064 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1065 retval = allocate_sdma_queue(dqm, &q->sdma_id); 1065 retval = allocate_sdma_queue(dqm, &q->sdma_id);
1066 if (retval) 1066 if (retval)
1067 goto out; 1067 goto out_unlock;
1068 q->properties.sdma_queue_id = 1068 q->properties.sdma_queue_id =
1069 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE; 1069 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
1070 q->properties.sdma_engine_id = 1070 q->properties.sdma_engine_id =
@@ -1075,7 +1075,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1075 1075
1076 if (!mqd) { 1076 if (!mqd) {
1077 retval = -ENOMEM; 1077 retval = -ENOMEM;
1078 goto out; 1078 goto out_deallocate_sdma_queue;
1079 } 1079 }
1080 /* 1080 /*
1081 * Eviction state logic: we only mark active queues as evicted 1081 * Eviction state logic: we only mark active queues as evicted
@@ -1093,7 +1093,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1093 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 1093 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
1094 &q->gart_mqd_addr, &q->properties); 1094 &q->gart_mqd_addr, &q->properties);
1095 if (retval) 1095 if (retval)
1096 goto out; 1096 goto out_deallocate_sdma_queue;
1097 1097
1098 list_add(&q->list, &qpd->queues_list); 1098 list_add(&q->list, &qpd->queues_list);
1099 qpd->queue_count++; 1099 qpd->queue_count++;
@@ -1114,7 +1114,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1114 pr_debug("Total of %d queues are accountable so far\n", 1114 pr_debug("Total of %d queues are accountable so far\n",
1115 dqm->total_queue_count); 1115 dqm->total_queue_count);
1116 1116
1117out: 1117 mutex_unlock(&dqm->lock);
1118 return retval;
1119
1120out_deallocate_sdma_queue:
1121 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1122 deallocate_sdma_queue(dqm, q->sdma_id);
1123out_unlock:
1118 mutex_unlock(&dqm->lock); 1124 mutex_unlock(&dqm->lock);
1119 return retval; 1125 return retval;
1120} 1126}
@@ -1433,8 +1439,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1433 1439
1434 /* Clear all user mode queues */ 1440 /* Clear all user mode queues */
1435 list_for_each_entry(q, &qpd->queues_list, list) { 1441 list_for_each_entry(q, &qpd->queues_list, list) {
1436 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 1442 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1437 dqm->sdma_queue_count--; 1443 dqm->sdma_queue_count--;
1444 deallocate_sdma_queue(dqm, q->sdma_id);
1445 }
1438 1446
1439 if (q->properties.is_active) 1447 if (q->properties.is_active)
1440 dqm->queue_count--; 1448 dqm->queue_count--;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 7614375489a4..89ba4c670ec5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -188,8 +188,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
188 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; 188 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
189 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; 189 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
190 190
191 /* TODO: scratch support */ 191 packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
192 packet->sh_hidden_private_base_vmid = 0;
193 192
194 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 193 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
195 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 194 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..f0e79178bde6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -18,6 +18,7 @@
18#include <drm/drm_fb_helper.h> 18#include <drm/drm_fb_helper.h>
19#include <drm/drm_atomic.h> 19#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h>
21#include <uapi/drm/exynos_drm.h> 22#include <uapi/drm/exynos_drm.h>
22 23
23#include "exynos_drm_drv.h" 24#include "exynos_drm_drv.h"
@@ -26,20 +27,6 @@
26#include "exynos_drm_iommu.h" 27#include "exynos_drm_iommu.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
28 29
29#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
30
31/*
32 * exynos specific framebuffer structure.
33 *
34 * @fb: drm framebuffer obejct.
35 * @exynos_gem: array of exynos specific gem object containing a gem object.
36 */
37struct exynos_drm_fb {
38 struct drm_framebuffer fb;
39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40 dma_addr_t dma_addr[MAX_FB_BUFFER];
41};
42
43static int check_fb_gem_memory_type(struct drm_device *drm_dev, 30static int check_fb_gem_memory_type(struct drm_device *drm_dev,
44 struct exynos_drm_gem *exynos_gem) 31 struct exynos_drm_gem *exynos_gem)
45{ 32{
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
66 return 0; 53 return 0;
67} 54}
68 55
69static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
70{
71 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
72 unsigned int i;
73
74 drm_framebuffer_cleanup(fb);
75
76 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
77 struct drm_gem_object *obj;
78
79 if (exynos_fb->exynos_gem[i] == NULL)
80 continue;
81
82 obj = &exynos_fb->exynos_gem[i]->base;
83 drm_gem_object_unreference_unlocked(obj);
84 }
85
86 kfree(exynos_fb);
87 exynos_fb = NULL;
88}
89
90static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
91 struct drm_file *file_priv,
92 unsigned int *handle)
93{
94 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
95
96 return drm_gem_handle_create(file_priv,
97 &exynos_fb->exynos_gem[0]->base, handle);
98}
99
100static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 56static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
101 .destroy = exynos_drm_fb_destroy, 57 .destroy = drm_gem_fb_destroy,
102 .create_handle = exynos_drm_fb_create_handle, 58 .create_handle = drm_gem_fb_create_handle,
103}; 59};
104 60
105struct drm_framebuffer * 61struct drm_framebuffer *
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
108 struct exynos_drm_gem **exynos_gem, 64 struct exynos_drm_gem **exynos_gem,
109 int count) 65 int count)
110{ 66{
111 struct exynos_drm_fb *exynos_fb; 67 struct drm_framebuffer *fb;
112 int i; 68 int i;
113 int ret; 69 int ret;
114 70
115 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 71 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
116 if (!exynos_fb) 72 if (!fb)
117 return ERR_PTR(-ENOMEM); 73 return ERR_PTR(-ENOMEM);
118 74
119 for (i = 0; i < count; i++) { 75 for (i = 0; i < count; i++) {
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
121 if (ret < 0) 77 if (ret < 0)
122 goto err; 78 goto err;
123 79
124 exynos_fb->exynos_gem[i] = exynos_gem[i]; 80 fb->obj[i] = &exynos_gem[i]->base;
125 exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
126 + mode_cmd->offsets[i];
127 } 81 }
128 82
129 drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); 83 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
130 84
131 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 85 ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
132 if (ret < 0) { 86 if (ret < 0) {
133 DRM_ERROR("failed to initialize framebuffer\n"); 87 DRM_ERROR("failed to initialize framebuffer\n");
134 goto err; 88 goto err;
135 } 89 }
136 90
137 return &exynos_fb->fb; 91 return fb;
138 92
139err: 93err:
140 kfree(exynos_fb); 94 kfree(fb);
141 return ERR_PTR(ret); 95 return ERR_PTR(ret);
142} 96}
143 97
@@ -191,12 +145,13 @@ err:
191 145
192dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) 146dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
193{ 147{
194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 148 struct exynos_drm_gem *exynos_gem;
195 149
196 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) 150 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
197 return 0; 151 return 0;
198 152
199 return exynos_fb->dma_addr[index]; 153 exynos_gem = to_exynos_gem(fb->obj[index]);
154 return exynos_gem->dma_addr + fb->offsets[index];
200} 155}
201 156
202static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { 157static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {