summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorAingara Paramakuru <aparamakuru@nvidia.com>2015-11-03 11:44:14 -0500
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:09 -0500
commit9ab9436268ae2121d3dc57c98d16890953f6cd35 (patch)
treec499c00b1e563396e0dc577f359cc242016b7e3f /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parentf4b2a02b68d79d30a1292f9b3551d08c71fb899f (diff)
gpu: nvgpu: gp10b: map GfxP buffers as GPU cacheable
Some of the allocated buffers are used during normal graphics processing. Mark them as GPU cacheable to improve performance. Bug 1695718 Change-Id: I71d5d1538516e966526abe5e38a557776321597f Signed-off-by: Aingara Paramakuru <aparamakuru@nvidia.com> Reviewed-on: http://git-master/r/827087 (cherry picked from commit 60b40ac144c94e24a2c449c8be937edf8865e1ed) Reviewed-on: http://git-master/r/828493 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c51
1 files changed, 43 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index de6023b5..c801a2b8 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -18,6 +18,7 @@
18#include <linux/tegra-fuse.h> 18#include <linux/tegra-fuse.h>
19 19
20#include "gk20a/gr_gk20a.h" 20#include "gk20a/gr_gk20a.h"
21#include "gk20a/semaphore_gk20a.h"
21 22
22#include "gm20b/gr_gm20b.h" /* for MAXWELL classes */ 23#include "gm20b/gr_gm20b.h" /* for MAXWELL classes */
23#include "gp10b/gr_gp10b.h" 24#include "gp10b/gr_gp10b.h"
@@ -492,6 +493,36 @@ static int gr_gp10b_init_ctx_state(struct gk20a *g)
492 return 0; 493 return 0;
493} 494}
494 495
496int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
497 struct mem_desc *mem)
498{
499 int err;
500
501 gk20a_dbg_fn("");
502
503 err = gk20a_gmmu_alloc_attr(vm->mm->g, 0, size, mem);
504 if (err)
505 return err;
506
507 mem->gpu_va = gk20a_gmmu_map(vm,
508 &mem->sgt,
509 size,
510 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
511 gk20a_mem_flag_none,
512 false);
513
514 if (!mem->gpu_va) {
515 err = -ENOMEM;
516 goto fail_free;
517 }
518
519 return 0;
520
521fail_free:
522 gk20a_gmmu_free(vm->mm->g, mem);
523 return err;
524}
525
495static int gr_gp10b_alloc_gr_ctx(struct gk20a *g, 526static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
496 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm, 527 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm,
497 u32 class, 528 u32 class,
@@ -530,32 +561,36 @@ static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
530 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 561 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size);
531 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 562 gk20a_dbg_info("gfxp context attrib_cb_size=%d",
532 attrib_cb_size); 563 attrib_cb_size);
533 err = gk20a_gmmu_alloc_map(vm, g->gr.t18x.ctx_vars.preempt_image_size, 564 err = gr_gp10b_alloc_buffer(vm,
534 &(*gr_ctx)->t18x.preempt_ctxsw_buffer); 565 g->gr.t18x.ctx_vars.preempt_image_size,
566 &(*gr_ctx)->t18x.preempt_ctxsw_buffer);
535 if (err) { 567 if (err) {
536 gk20a_err(dev_from_gk20a(vm->mm->g), 568 gk20a_err(dev_from_gk20a(vm->mm->g),
537 "cannot allocate preempt buffer"); 569 "cannot allocate preempt buffer");
538 goto fail_free_gk20a_ctx; 570 goto fail_free_gk20a_ctx;
539 } 571 }
540 572
541 err = gk20a_gmmu_alloc_map(vm, spill_size, 573 err = gr_gp10b_alloc_buffer(vm,
542 &(*gr_ctx)->t18x.spill_ctxsw_buffer); 574 spill_size,
575 &(*gr_ctx)->t18x.spill_ctxsw_buffer);
543 if (err) { 576 if (err) {
544 gk20a_err(dev_from_gk20a(vm->mm->g), 577 gk20a_err(dev_from_gk20a(vm->mm->g),
545 "cannot allocate spill buffer"); 578 "cannot allocate spill buffer");
546 goto fail_free_preempt; 579 goto fail_free_preempt;
547 } 580 }
548 581
549 err = gk20a_gmmu_alloc_map(vm, attrib_cb_size, 582 err = gr_gp10b_alloc_buffer(vm,
550 &(*gr_ctx)->t18x.betacb_ctxsw_buffer); 583 attrib_cb_size,
584 &(*gr_ctx)->t18x.betacb_ctxsw_buffer);
551 if (err) { 585 if (err) {
552 gk20a_err(dev_from_gk20a(vm->mm->g), 586 gk20a_err(dev_from_gk20a(vm->mm->g),
553 "cannot allocate beta buffer"); 587 "cannot allocate beta buffer");
554 goto fail_free_spill; 588 goto fail_free_spill;
555 } 589 }
556 590
557 err = gk20a_gmmu_alloc_map(vm, pagepool_size, 591 err = gr_gp10b_alloc_buffer(vm,
558 &(*gr_ctx)->t18x.pagepool_ctxsw_buffer); 592 pagepool_size,
593 &(*gr_ctx)->t18x.pagepool_ctxsw_buffer);
559 if (err) { 594 if (err) {
560 gk20a_err(dev_from_gk20a(vm->mm->g), 595 gk20a_err(dev_from_gk20a(vm->mm->g),
561 "cannot allocate page pool"); 596 "cannot allocate page pool");