diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 83 |
1 files changed, 59 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 41bc7fc0ebf6..a5d8242ace95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -48,33 +48,53 @@ static void amdgpu_ctx_do_release(struct kref *ref) | |||
48 | kfree(ctx); | 48 | kfree(ctx); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void amdgpu_ctx_init(struct amdgpu_device *adev, | ||
52 | struct amdgpu_fpriv *fpriv, | ||
53 | struct amdgpu_ctx *ctx, | ||
54 | uint32_t id) | ||
55 | { | ||
56 | int i; | ||
57 | memset(ctx, 0, sizeof(*ctx)); | ||
58 | ctx->adev = adev; | ||
59 | kref_init(&ctx->refcount); | ||
60 | spin_lock_init(&ctx->ring_lock); | ||
61 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
62 | ctx->rings[i].sequence = 1; | ||
63 | } | ||
64 | |||
51 | int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | 65 | int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, |
52 | uint32_t *id) | 66 | uint32_t *id) |
53 | { | 67 | { |
54 | struct amdgpu_ctx *ctx; | 68 | struct amdgpu_ctx *ctx; |
55 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
56 | int i, j, r; | 69 | int i, j, r; |
57 | 70 | ||
58 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 71 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
59 | if (!ctx) | 72 | if (!ctx) |
60 | return -ENOMEM; | 73 | return -ENOMEM; |
61 | 74 | if (fpriv) { | |
62 | mutex_lock(&mgr->lock); | 75 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
63 | r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); | 76 | mutex_lock(&mgr->lock); |
64 | if (r < 0) { | 77 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); |
78 | if (r < 0) { | ||
79 | mutex_unlock(&mgr->lock); | ||
80 | kfree(ctx); | ||
81 | return r; | ||
82 | } | ||
83 | *id = (uint32_t)r; | ||
84 | amdgpu_ctx_init(adev, fpriv, ctx, *id); | ||
65 | mutex_unlock(&mgr->lock); | 85 | mutex_unlock(&mgr->lock); |
66 | kfree(ctx); | 86 | } else { |
67 | return r; | 87 | if (adev->kernel_ctx) { |
88 | DRM_ERROR("kernel cnotext has been created.\n"); | ||
89 | kfree(ctx); | ||
90 | return 0; | ||
91 | } | ||
92 | *id = AMD_KERNEL_CONTEXT_ID; | ||
93 | amdgpu_ctx_init(adev, fpriv, ctx, *id); | ||
94 | |||
95 | adev->kernel_ctx = ctx; | ||
68 | } | 96 | } |
69 | *id = (uint32_t)r; | ||
70 | 97 | ||
71 | memset(ctx, 0, sizeof(*ctx)); | ||
72 | ctx->adev = adev; | ||
73 | kref_init(&ctx->refcount); | ||
74 | spin_lock_init(&ctx->ring_lock); | ||
75 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
76 | ctx->rings[i].sequence = 1; | ||
77 | mutex_unlock(&mgr->lock); | ||
78 | if (amdgpu_enable_scheduler) { | 98 | if (amdgpu_enable_scheduler) { |
79 | /* create context entity for each ring */ | 99 | /* create context entity for each ring */ |
80 | for (i = 0; i < adev->num_rings; i++) { | 100 | for (i = 0; i < adev->num_rings; i++) { |
@@ -105,17 +125,23 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
105 | int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) | 125 | int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) |
106 | { | 126 | { |
107 | struct amdgpu_ctx *ctx; | 127 | struct amdgpu_ctx *ctx; |
108 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
109 | 128 | ||
110 | mutex_lock(&mgr->lock); | 129 | if (fpriv) { |
111 | ctx = idr_find(&mgr->ctx_handles, id); | 130 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
112 | if (ctx) { | 131 | mutex_lock(&mgr->lock); |
113 | idr_remove(&mgr->ctx_handles, id); | 132 | ctx = idr_find(&mgr->ctx_handles, id); |
114 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); | 133 | if (ctx) { |
134 | idr_remove(&mgr->ctx_handles, id); | ||
135 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); | ||
136 | mutex_unlock(&mgr->lock); | ||
137 | return 0; | ||
138 | } | ||
115 | mutex_unlock(&mgr->lock); | 139 | mutex_unlock(&mgr->lock); |
140 | } else { | ||
141 | ctx = adev->kernel_ctx; | ||
142 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); | ||
116 | return 0; | 143 | return 0; |
117 | } | 144 | } |
118 | mutex_unlock(&mgr->lock); | ||
119 | return -EINVAL; | 145 | return -EINVAL; |
120 | } | 146 | } |
121 | 147 | ||
@@ -124,9 +150,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, | |||
124 | union drm_amdgpu_ctx_out *out) | 150 | union drm_amdgpu_ctx_out *out) |
125 | { | 151 | { |
126 | struct amdgpu_ctx *ctx; | 152 | struct amdgpu_ctx *ctx; |
127 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 153 | struct amdgpu_ctx_mgr *mgr; |
128 | unsigned reset_counter; | 154 | unsigned reset_counter; |
129 | 155 | ||
156 | if (!fpriv) | ||
157 | return -EINVAL; | ||
158 | |||
159 | mgr = &fpriv->ctx_mgr; | ||
130 | mutex_lock(&mgr->lock); | 160 | mutex_lock(&mgr->lock); |
131 | ctx = idr_find(&mgr->ctx_handles, id); | 161 | ctx = idr_find(&mgr->ctx_handles, id); |
132 | if (!ctx) { | 162 | if (!ctx) { |
@@ -202,7 +232,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | |||
202 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) | 232 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) |
203 | { | 233 | { |
204 | struct amdgpu_ctx *ctx; | 234 | struct amdgpu_ctx *ctx; |
205 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 235 | struct amdgpu_ctx_mgr *mgr; |
236 | |||
237 | if (!fpriv) | ||
238 | return NULL; | ||
239 | |||
240 | mgr = &fpriv->ctx_mgr; | ||
206 | 241 | ||
207 | mutex_lock(&mgr->lock); | 242 | mutex_lock(&mgr->lock); |
208 | ctx = idr_find(&mgr->ctx_handles, id); | 243 | ctx = idr_find(&mgr->ctx_handles, id); |