Skip to content

Commit 1d26347

Browse files
Dan Carpenteralexdeucher
authored andcommitted
drm/amdgpu: unwind properly in amdgpu_cs_parser_init()
The amdgpu_cs_parser_init() function doesn't clean up after itself but instead the caller uses a free everything function amdgpu_cs_parser_fini() on failure. This style of error handling is often buggy. In this example, we call "drm_free_large(parser->chunks[i].kdata);" when it is an unintialized pointer or when "parser->chunks" is NULL. I fixed this bug by adding unwind code so that it frees everything that it allocates. I also mode some other very minor changes: 1) Renamed "r" to "ret". 2) Moved the chunk_array allocation to the start of the function. 3) Removed some initializers which are no longer needed. Reviewed-by: Christian König <[email protected]> Reported-by: Ilja Van Sprundel <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
1 parent 5a6adfa commit 1d26347

File tree

1 file changed

+51
-34
lines changed

1 file changed

+51
-34
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

Lines changed: 51 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154154
{
155155
union drm_amdgpu_cs *cs = data;
156156
uint64_t *chunk_array_user;
157-
uint64_t *chunk_array = NULL;
157+
uint64_t *chunk_array;
158158
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159159
unsigned size, i;
160-
int r = 0;
160+
int ret;
161161

162-
if (!cs->in.num_chunks)
163-
goto out;
162+
if (cs->in.num_chunks == 0)
163+
return 0;
164+
165+
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
166+
if (!chunk_array)
167+
return -ENOMEM;
164168

165169
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166170
if (!p->ctx) {
167-
r = -EINVAL;
168-
goto out;
171+
ret = -EINVAL;
172+
goto free_chunk;
169173
}
174+
170175
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171176

172177
/* get chunks */
173178
INIT_LIST_HEAD(&p->validated);
174-
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
175-
if (chunk_array == NULL) {
176-
r = -ENOMEM;
177-
goto out;
178-
}
179-
180179
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181180
if (copy_from_user(chunk_array, chunk_array_user,
182181
sizeof(uint64_t)*cs->in.num_chunks)) {
183-
r = -EFAULT;
184-
goto out;
182+
ret = -EFAULT;
183+
goto put_bo_list;
185184
}
186185

187186
p->nchunks = cs->in.num_chunks;
188187
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189188
GFP_KERNEL);
190-
if (p->chunks == NULL) {
191-
r = -ENOMEM;
192-
goto out;
189+
if (!p->chunks) {
190+
ret = -ENOMEM;
191+
goto put_bo_list;
193192
}
194193

195194
for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200199
chunk_ptr = (void __user *)chunk_array[i];
201200
if (copy_from_user(&user_chunk, chunk_ptr,
202201
sizeof(struct drm_amdgpu_cs_chunk))) {
203-
r = -EFAULT;
204-
goto out;
202+
ret = -EFAULT;
203+
i--;
204+
goto free_partial_kdata;
205205
}
206206
p->chunks[i].chunk_id = user_chunk.chunk_id;
207207
p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
212212

213213
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214214
if (p->chunks[i].kdata == NULL) {
215-
r = -ENOMEM;
216-
goto out;
215+
ret = -ENOMEM;
216+
i--;
217+
goto free_partial_kdata;
217218
}
218219
size *= sizeof(uint32_t);
219220
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220-
r = -EFAULT;
221-
goto out;
221+
ret = -EFAULT;
222+
goto free_partial_kdata;
222223
}
223224

224225
switch (p->chunks[i].chunk_id) {
@@ -238,35 +239,51 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238239
gobj = drm_gem_object_lookup(p->adev->ddev,
239240
p->filp, handle);
240241
if (gobj == NULL) {
241-
r = -EINVAL;
242-
goto out;
242+
ret = -EINVAL;
243+
goto free_partial_kdata;
243244
}
244245

245246
p->uf.bo = gem_to_amdgpu_bo(gobj);
246247
p->uf.offset = fence_data->offset;
247248
} else {
248-
r = -EINVAL;
249-
goto out;
249+
ret = -EINVAL;
250+
goto free_partial_kdata;
250251
}
251252
break;
252253

253254
case AMDGPU_CHUNK_ID_DEPENDENCIES:
254255
break;
255256

256257
default:
257-
r = -EINVAL;
258-
goto out;
258+
ret = -EINVAL;
259+
goto free_partial_kdata;
259260
}
260261
}
261262

262263

263264
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264-
if (!p->ibs)
265-
r = -ENOMEM;
265+
if (!p->ibs) {
266+
ret = -ENOMEM;
267+
goto free_all_kdata;
268+
}
266269

267-
out:
268270
kfree(chunk_array);
269-
return r;
271+
return 0;
272+
273+
free_all_kdata:
274+
i = p->nchunks - 1;
275+
free_partial_kdata:
276+
for (; i >= 0; i--)
277+
drm_free_large(p->chunks[i].kdata);
278+
kfree(p->chunks);
279+
put_bo_list:
280+
if (p->bo_list)
281+
amdgpu_bo_list_put(p->bo_list);
282+
amdgpu_ctx_put(p->ctx);
283+
free_chunk:
284+
kfree(chunk_array);
285+
286+
return ret;
270287
}
271288

272289
/* Returns how many bytes TTM can move per IB.
@@ -810,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
810827
r = amdgpu_cs_parser_init(parser, data);
811828
if (r) {
812829
DRM_ERROR("Failed to initialize parser !\n");
813-
amdgpu_cs_parser_fini(parser, r, false);
830+
kfree(parser);
814831
up_read(&adev->exclusive_lock);
815832
r = amdgpu_cs_handle_lockup(adev, r);
816833
return r;

0 commit comments

Comments
 (0)