summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c160
1 files changed, 160 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
new file mode 100644
index 00000000..3bbfde0a
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
@@ -0,0 +1,160 @@
1/*
2 * GP10B RPFB
3 *
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/dma.h>
26
27#include "gk20a/gk20a.h"
28
29#include "rpfb_gp10b.h"
30
31#include <nvgpu/hw/gp10b/hw_fifo_gp10b.h>
32#include <nvgpu/hw/gp10b/hw_fb_gp10b.h>
33#include <nvgpu/hw/gp10b/hw_bus_gp10b.h>
34#include <nvgpu/hw/gp10b/hw_gmmu_gp10b.h>
35
36int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
37{
38 u32 addr_lo;
39 u32 addr_hi;
40 struct vm_gk20a *vm = g->mm.bar2.vm;
41 int err;
42 size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE *
43 fifo_replay_fault_buffer_size_hw_entries_v();
44
45 gk20a_dbg_fn("");
46
47 if (!g->mm.bar2_desc.gpu_va) {
48 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
49 &g->mm.bar2_desc);
50 if (err) {
51 nvgpu_err(g, "Error in replayable fault buffer");
52 return err;
53 }
54 }
55 addr_lo = u64_lo32(g->mm.bar2_desc.gpu_va >> 12);
56 addr_hi = u64_hi32(g->mm.bar2_desc.gpu_va);
57 gk20a_writel(g, fifo_replay_fault_buffer_hi_r(),
58 fifo_replay_fault_buffer_hi_base_f(addr_hi));
59
60 gk20a_writel(g, fifo_replay_fault_buffer_lo_r(),
61 fifo_replay_fault_buffer_lo_base_f(addr_lo) |
62 fifo_replay_fault_buffer_lo_enable_true_v());
63 gk20a_dbg_fn("done");
64 return 0;
65}
66
67void gp10b_replayable_pagefault_buffer_deinit(struct gk20a *g)
68{
69 struct vm_gk20a *vm = g->mm.bar2.vm;
70
71 nvgpu_dma_unmap_free(vm, &g->mm.bar2_desc);
72}
73
74u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
75{
76 u32 get_idx = 0;
77
78 gk20a_dbg_fn("");
79
80 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
81
82 if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
83 nvgpu_err(g, "Error in replayable fault buffer");
84
85 gk20a_dbg_fn("done");
86 return get_idx;
87}
88
89u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g)
90{
91 u32 put_idx = 0;
92
93 gk20a_dbg_fn("");
94 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
95
96 if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
97 nvgpu_err(g, "Error in UVM");
98
99 gk20a_dbg_fn("done");
100 return put_idx;
101}
102
103bool gp10b_replayable_pagefault_buffer_is_empty(struct gk20a *g)
104{
105 u32 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
106 u32 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
107
108 return (get_idx == put_idx ? true : false);
109}
110
111bool gp10b_replayable_pagefault_buffer_is_full(struct gk20a *g)
112{
113 u32 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
114 u32 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
115 u32 hw_entries = gk20a_readl(g, fifo_replay_fault_buffer_size_r());
116
117 return (get_idx == ((put_idx + 1) % hw_entries) ? true : false);
118}
119
120bool gp10b_replayable_pagefault_buffer_is_overflow(struct gk20a *g)
121{
122 u32 info = gk20a_readl(g, fifo_replay_fault_buffer_info_r());
123
124 return fifo_replay_fault_buffer_info_overflow_f(info);
125}
126
127void gp10b_replayable_pagefault_buffer_clear_overflow(struct gk20a *g)
128{
129 u32 info = gk20a_readl(g, fifo_replay_fault_buffer_info_r());
130
131 info |= fifo_replay_fault_buffer_info_overflow_clear_v();
132 gk20a_writel(g, fifo_replay_fault_buffer_info_r(), info);
133
134}
135
136void gp10b_replayable_pagefault_buffer_info(struct gk20a *g)
137{
138
139 gk20a_dbg_fn("");
140 pr_info("rpfb low: 0x%x\n",
141 (gk20a_readl(g, fifo_replay_fault_buffer_lo_r()) >> 12));
142 pr_info("rpfb hi: 0x%x\n",
143 gk20a_readl(g, fifo_replay_fault_buffer_hi_r()));
144 pr_info("rpfb enabled: 0x%x\n",
145 (gk20a_readl(g, fifo_replay_fault_buffer_lo_r()) & 0x1));
146 pr_info("rpfb size: %d\n",
147 gk20a_readl(g, fifo_replay_fault_buffer_size_r()));
148 pr_info("rpfb get index: %d\n",
149 gp10b_replayable_pagefault_buffer_get_index(g));
150 pr_info("rpfb put index: %d\n",
151 gp10b_replayable_pagefault_buffer_put_index(g));
152 pr_info("rpfb empty: %d\n",
153 gp10b_replayable_pagefault_buffer_is_empty(g));
154 pr_info("rpfb full %d\n",
155 gp10b_replayable_pagefault_buffer_is_full(g));
156 pr_info("rpfb overflow %d\n",
157 gp10b_replayable_pagefault_buffer_is_overflow(g));
158
159 gk20a_dbg_fn("done");
160}