diff options
author | Arto Merilainen <amerilainen@nvidia.com> | 2013-05-29 06:26:05 -0400 |
---|---|---|
committer | Thierry Reding <thierry.reding@gmail.com> | 2013-06-22 06:43:53 -0400 |
commit | 3364cd28906d87f0c77754998679bb66639d4112 (patch) | |
tree | fdd2d07872db8694df523fb64bc8779fe9f55f23 /drivers/gpu/host1x/job.c | |
parent | afac0e43c6c98473cce18fdeb5f7dda86dcf244f (diff) |
gpu: host1x: Copy gathers before verification
The firewall verified gather buffers before copying them. This
allowed a malicious application to rewrite the buffer content by
timing the rewrite carefully.
This patch makes the buffer validation occur after copying the
buffers.
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
Diffstat (limited to 'drivers/gpu/host1x/job.c')
-rw-r--r-- | drivers/gpu/host1x/job.c | 51 |
1 files changed, 20 insertions, 31 deletions
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 5b9548f610f1..cc807667d8f1 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c | |||
@@ -228,17 +228,15 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) | |||
228 | void *cmdbuf_page_addr = NULL; | 228 | void *cmdbuf_page_addr = NULL; |
229 | 229 | ||
230 | /* pin & patch the relocs for one gather */ | 230 | /* pin & patch the relocs for one gather */ |
231 | while (i < job->num_relocs) { | 231 | for (i = 0; i < job->num_relocs; i++) { |
232 | struct host1x_reloc *reloc = &job->relocarray[i]; | 232 | struct host1x_reloc *reloc = &job->relocarray[i]; |
233 | u32 reloc_addr = (job->reloc_addr_phys[i] + | 233 | u32 reloc_addr = (job->reloc_addr_phys[i] + |
234 | reloc->target_offset) >> reloc->shift; | 234 | reloc->target_offset) >> reloc->shift; |
235 | u32 *target; | 235 | u32 *target; |
236 | 236 | ||
237 | /* skip all other gathers */ | 237 | /* skip all other gathers */ |
238 | if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) { | 238 | if (cmdbuf != reloc->cmdbuf) |
239 | i++; | ||
240 | continue; | 239 | continue; |
241 | } | ||
242 | 240 | ||
243 | if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) { | 241 | if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) { |
244 | if (cmdbuf_page_addr) | 242 | if (cmdbuf_page_addr) |
@@ -257,9 +255,6 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) | |||
257 | 255 | ||
258 | target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK); | 256 | target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK); |
259 | *target = reloc_addr; | 257 | *target = reloc_addr; |
260 | |||
261 | /* mark this gather as handled */ | ||
262 | reloc->cmdbuf = 0; | ||
263 | } | 258 | } |
264 | 259 | ||
265 | if (cmdbuf_page_addr) | 260 | if (cmdbuf_page_addr) |
@@ -378,15 +373,13 @@ static int check_nonincr(struct host1x_firewall *fw) | |||
378 | 373 | ||
379 | static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) | 374 | static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) |
380 | { | 375 | { |
381 | u32 *cmdbuf_base; | 376 | u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped + |
377 | (g->offset / sizeof(u32)); | ||
382 | int err = 0; | 378 | int err = 0; |
383 | 379 | ||
384 | if (!fw->job->is_addr_reg) | 380 | if (!fw->job->is_addr_reg) |
385 | return 0; | 381 | return 0; |
386 | 382 | ||
387 | cmdbuf_base = host1x_bo_mmap(g->bo); | ||
388 | if (!cmdbuf_base) | ||
389 | return -ENOMEM; | ||
390 | fw->words = g->words; | 383 | fw->words = g->words; |
391 | fw->cmdbuf_id = g->bo; | 384 | fw->cmdbuf_id = g->bo; |
392 | fw->offset = 0; | 385 | fw->offset = 0; |
@@ -453,10 +446,17 @@ out: | |||
453 | 446 | ||
454 | static inline int copy_gathers(struct host1x_job *job, struct device *dev) | 447 | static inline int copy_gathers(struct host1x_job *job, struct device *dev) |
455 | { | 448 | { |
449 | struct host1x_firewall fw; | ||
456 | size_t size = 0; | 450 | size_t size = 0; |
457 | size_t offset = 0; | 451 | size_t offset = 0; |
458 | int i; | 452 | int i; |
459 | 453 | ||
454 | fw.job = job; | ||
455 | fw.dev = dev; | ||
456 | fw.reloc = job->relocarray; | ||
457 | fw.num_relocs = job->num_relocs; | ||
458 | fw.class = 0; | ||
459 | |||
460 | for (i = 0; i < job->num_gathers; i++) { | 460 | for (i = 0; i < job->num_gathers; i++) { |
461 | struct host1x_job_gather *g = &job->gathers[i]; | 461 | struct host1x_job_gather *g = &job->gathers[i]; |
462 | size += g->words * sizeof(u32); | 462 | size += g->words * sizeof(u32); |
@@ -477,14 +477,19 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) | |||
477 | struct host1x_job_gather *g = &job->gathers[i]; | 477 | struct host1x_job_gather *g = &job->gathers[i]; |
478 | void *gather; | 478 | void *gather; |
479 | 479 | ||
480 | /* Copy the gather */ | ||
480 | gather = host1x_bo_mmap(g->bo); | 481 | gather = host1x_bo_mmap(g->bo); |
481 | memcpy(job->gather_copy_mapped + offset, gather + g->offset, | 482 | memcpy(job->gather_copy_mapped + offset, gather + g->offset, |
482 | g->words * sizeof(u32)); | 483 | g->words * sizeof(u32)); |
483 | host1x_bo_munmap(g->bo, gather); | 484 | host1x_bo_munmap(g->bo, gather); |
484 | 485 | ||
486 | /* Store the location in the buffer */ | ||
485 | g->base = job->gather_copy; | 487 | g->base = job->gather_copy; |
486 | g->offset = offset; | 488 | g->offset = offset; |
487 | g->bo = NULL; | 489 | |
490 | /* Validate the job */ | ||
491 | if (validate(&fw, g)) | ||
492 | return -EINVAL; | ||
488 | 493 | ||
489 | offset += g->words * sizeof(u32); | 494 | offset += g->words * sizeof(u32); |
490 | } | 495 | } |
@@ -497,15 +502,8 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) | |||
497 | int err; | 502 | int err; |
498 | unsigned int i, j; | 503 | unsigned int i, j; |
499 | struct host1x *host = dev_get_drvdata(dev->parent); | 504 | struct host1x *host = dev_get_drvdata(dev->parent); |
500 | struct host1x_firewall fw; | ||
501 | DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host)); | 505 | DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host)); |
502 | 506 | ||
503 | fw.job = job; | ||
504 | fw.dev = dev; | ||
505 | fw.reloc = job->relocarray; | ||
506 | fw.num_relocs = job->num_relocs; | ||
507 | fw.class = 0; | ||
508 | |||
509 | bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host)); | 507 | bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host)); |
510 | for (i = 0; i < job->num_waitchk; i++) { | 508 | for (i = 0; i < job->num_waitchk; i++) { |
511 | u32 syncpt_id = job->waitchk[i].syncpt_id; | 509 | u32 syncpt_id = job->waitchk[i].syncpt_id; |
@@ -536,20 +534,11 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) | |||
536 | if (job->gathers[j].bo == g->bo) | 534 | if (job->gathers[j].bo == g->bo) |
537 | job->gathers[j].handled = true; | 535 | job->gathers[j].handled = true; |
538 | 536 | ||
539 | err = 0; | 537 | err = do_relocs(job, g->bo); |
540 | |||
541 | if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) | ||
542 | err = validate(&fw, g); | ||
543 | |||
544 | if (err) | 538 | if (err) |
545 | dev_err(dev, "Job invalid (err=%d)\n", err); | 539 | break; |
546 | |||
547 | if (!err) | ||
548 | err = do_relocs(job, g->bo); | ||
549 | |||
550 | if (!err) | ||
551 | err = do_waitchks(job, host, g->bo); | ||
552 | 540 | ||
541 | err = do_waitchks(job, host, g->bo); | ||
553 | if (err) | 542 | if (err) |
554 | break; | 543 | break; |
555 | } | 544 | } |