summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pramin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/pramin.c')
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c129
1 files changed, 129 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
new file mode 100644
index 00000000..b9216309
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/pramin.h>
18#include <nvgpu/page_allocator.h>
19
20#include "gk20a/gk20a.h"
21
22/*
23 * Flip this to force all gk20a_mem* accesses via PRAMIN from the start of the
24 * boot, even for buffers that would work via cpu_va. In runtime, the flag is
25 * in debugfs, called "force_pramin".
26 */
27#define GK20A_FORCE_PRAMIN_DEFAULT false
28
29void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
30{
31 u32 r = start, *dest_u32 = *arg;
32
33 if (!g->regs) {
34 __gk20a_warn_on_no_regs();
35 return;
36 }
37
38 while (words--) {
39 *dest_u32++ = gk20a_readl(g, r);
40 r += sizeof(u32);
41 }
42
43 *arg = dest_u32;
44}
45
46void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
47{
48 u32 r = start, *src_u32 = *arg;
49
50 if (!g->regs) {
51 __gk20a_warn_on_no_regs();
52 return;
53 }
54
55 while (words--) {
56 writel_relaxed(*src_u32++, g->regs + r);
57 r += sizeof(u32);
58 }
59
60 *arg = src_u32;
61}
62
63void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg)
64{
65 u32 r = start, repeat = **arg;
66
67 if (!g->regs) {
68 __gk20a_warn_on_no_regs();
69 return;
70 }
71
72 while (words--) {
73 writel_relaxed(repeat, g->regs + r);
74 r += sizeof(u32);
75 }
76}
77
78/*
79 * The PRAMIN range is 1 MB, must change base addr if a buffer crosses that.
80 * This same loop is used for read/write/memset. Offset and size in bytes.
81 * One call to "loop" is done per range, with "arg" supplied.
82 */
83void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
84 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
85{
86 struct nvgpu_page_alloc *alloc = NULL;
87 struct page_alloc_chunk *chunk = NULL;
88 u32 byteoff, start_reg, until_end, n;
89
90 alloc = get_vidmem_page_alloc(mem->sgt->sgl);
91 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
92 if (offset >= chunk->length)
93 offset -= chunk->length;
94 else
95 break;
96 }
97
98 offset /= sizeof(u32);
99
100 while (size) {
101 byteoff = g->ops.pramin.enter(g, mem, chunk, offset);
102 start_reg = g->ops.pramin.data032_r(byteoff / sizeof(u32));
103 until_end = SZ_1M - (byteoff & (SZ_1M - 1));
104
105 n = min3(size, until_end, (u32)(chunk->length - offset));
106
107 loop(g, start_reg, n / sizeof(u32), arg);
108
109 /* read back to synchronize accesses */
110 gk20a_readl(g, start_reg);
111 g->ops.pramin.exit(g, mem, chunk);
112
113 size -= n;
114
115 if (n == (chunk->length - offset)) {
116 chunk = list_next_entry(chunk, list_entry);
117 offset = 0;
118 } else {
119 offset += n / sizeof(u32);
120 }
121 }
122}
123
124void nvgpu_init_pramin(struct mm_gk20a *mm)
125{
126 mm->pramin_window = 0;
127 nvgpu_spinlock_init(&mm->pramin_window_lock);
128 mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT;
129}