summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/sim_pci.c
diff options
context:
space:
mode:
authorAntony Clince Alex <aalex@nvidia.com>2018-04-25 00:49:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:25:32 -0400
commit2a23cd249451c952aaab71eaad6cf5f77d2bc6e2 (patch)
treec9804d669872cc78ef2eb7d941c443ebec42b674 /drivers/gpu/nvgpu/common/linux/sim_pci.c
parent703d00d730d230f9ac9970e7d2d22a7d8f0cd2d1 (diff)
gpu: nvgpu: refectored sim_readl/sim_writel
refactored sim_readl and sim_writel to use os-agnostic structures. converted all sim buffers to the type nvgpu_mem and replaced all alloc_page and free_page calls with corresponding nvgpu_dma_alloc/nvgpu_dma_free calls. JIRA VQRM-2368 Change-Id: Ia9d29119d31f239ed16be932cfd16c334002c727 Signed-off-by: Antony Clince Alex <aalex@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1702050 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Sourab Gupta <sourabg@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/sim_pci.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/sim_pci.c170
1 files changed, 81 insertions, 89 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/sim_pci.c b/drivers/gpu/nvgpu/common/linux/sim_pci.c
index 889eeb24..ea417615 100644
--- a/drivers/gpu/nvgpu/common/linux/sim_pci.c
+++ b/drivers/gpu/nvgpu/common/linux/sim_pci.c
@@ -20,47 +20,67 @@
20 20
21#include <nvgpu/log.h> 21#include <nvgpu/log.h>
22#include <nvgpu/linux/vm.h> 22#include <nvgpu/linux/vm.h>
23 23#include <nvgpu/bitops.h>
24#include <nvgpu/nvgpu_mem.h>
25#include <nvgpu/dma.h>
24#include "gk20a/gk20a.h" 26#include "gk20a/gk20a.h"
25#include "os_linux.h" 27#include "os_linux.h"
26#include "sim.h" 28#include "sim.h"
27#include "hw_sim_pci.h" 29#include "hw_sim_pci.h"
28 30
29static inline void sim_writel(struct sim_gk20a_linux *sim_linux, u32 r, u32 v) 31static inline void sim_writel(struct sim_gk20a *sim, u32 r, u32 v)
30{ 32{
33 struct sim_gk20a_linux *sim_linux =
34 container_of(sim, struct sim_gk20a_linux, sim);
35
31 writel(v, sim_linux->regs + r); 36 writel(v, sim_linux->regs + r);
32} 37}
33 38
34static inline u32 sim_readl(struct sim_gk20a_linux *sim_linux, u32 r) 39static inline u32 sim_readl(struct sim_gk20a *sim, u32 r)
35{ 40{
41 struct sim_gk20a_linux *sim_linux =
42 container_of(sim, struct sim_gk20a_linux, sim);
43
36 return readl(sim_linux->regs + r); 44 return readl(sim_linux->regs + r);
37} 45}
38 46
39static void kunmap_and_free_iopage(void **kvaddr, struct page **page) 47static int gk20a_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
40{ 48{
41 if (*kvaddr) { 49 int err;
42 kunmap(*kvaddr); 50
43 *kvaddr = NULL; 51 err = nvgpu_dma_alloc(g, PAGE_SIZE, mem);
44 } 52
45 if (*page) { 53 if (err)
46 __free_page(*page); 54 return err;
47 *page = NULL; 55 /*
56 * create a valid cpu_va mapping
57 */
58 nvgpu_mem_begin(g, mem);
59
60 return 0;
61}
62
63static void gk20a_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
64{
65 if (nvgpu_mem_is_valid(mem)) {
66 /*
67 * invalidate the cpu_va mapping
68 */
69 nvgpu_mem_end(g, mem);
70 nvgpu_dma_free(g, mem);
48 } 71 }
72
73 memset(mem, 0, sizeof(*mem));
49} 74}
50 75
51static void gk20a_free_sim_support(struct gk20a *g) 76static void gk20a_free_sim_support(struct gk20a *g)
52{ 77{
53 struct sim_gk20a_linux *sim_linux = 78 struct sim_gk20a_linux *sim_linux =
54 container_of(g->sim, struct sim_gk20a_linux, sim); 79 container_of(g->sim, struct sim_gk20a_linux, sim);
55 /* free sim mappings, bfrs */
56 kunmap_and_free_iopage(&sim_linux->send_bfr.kvaddr,
57 &sim_linux->send_bfr.page);
58 80
59 kunmap_and_free_iopage(&sim_linux->recv_bfr.kvaddr, 81 gk20a_free_sim_buffer(g, &sim_linux->send_bfr);
60 &sim_linux->recv_bfr.page); 82 gk20a_free_sim_buffer(g, &sim_linux->recv_bfr);
61 83 gk20a_free_sim_buffer(g, &sim_linux->msg_bfr);
62 kunmap_and_free_iopage(&sim_linux->msg_bfr.kvaddr,
63 &sim_linux->msg_bfr.page);
64} 84}
65 85
66static void gk20a_remove_sim_support(struct sim_gk20a *s) 86static void gk20a_remove_sim_support(struct sim_gk20a *s)
@@ -70,7 +90,7 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s)
70 container_of(g->sim, struct sim_gk20a_linux, sim); 90 container_of(g->sim, struct sim_gk20a_linux, sim);
71 91
72 if (sim_linux->regs) 92 if (sim_linux->regs)
73 sim_writel(sim_linux, sim_config_r(), sim_config_mode_disabled_v()); 93 sim_writel(s, sim_config_r(), sim_config_mode_disabled_v());
74 gk20a_free_sim_support(g); 94 gk20a_free_sim_support(g);
75 95
76 if (sim_linux->regs) { 96 if (sim_linux->regs) {
@@ -82,35 +102,6 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s)
82 g->sim = NULL; 102 g->sim = NULL;
83} 103}
84 104
85static int alloc_and_kmap_iopage(struct gk20a *g,
86 void **kvaddr,
87 u64 *phys,
88 struct page **page)
89{
90 int err = 0;
91 *page = alloc_page(GFP_KERNEL);
92
93 if (!*page) {
94 err = -ENOMEM;
95 nvgpu_err(g, "couldn't allocate io page");
96 goto fail;
97 }
98
99 *kvaddr = kmap(*page);
100 if (!*kvaddr) {
101 err = -ENOMEM;
102 nvgpu_err(g, "couldn't kmap io page");
103 goto fail;
104 }
105 *phys = page_to_phys(*page);
106 return 0;
107
108 fail:
109 kunmap_and_free_iopage(kvaddr, page);
110 return err;
111
112}
113
114static inline u32 sim_msg_header_size(void) 105static inline u32 sim_msg_header_size(void)
115{ 106{
116 return 32U; 107 return 32U;
@@ -120,7 +111,11 @@ static inline u32 *sim_msg_bfr(struct gk20a *g, u32 byte_offset)
120{ 111{
121 struct sim_gk20a_linux *sim_linux = 112 struct sim_gk20a_linux *sim_linux =
122 container_of(g->sim, struct sim_gk20a_linux, sim); 113 container_of(g->sim, struct sim_gk20a_linux, sim);
123 return (u32 *)(sim_linux->msg_bfr.kvaddr + byte_offset); 114 u8 *cpu_va;
115
116 cpu_va = (u8 *)sim_linux->msg_bfr.cpu_va;
117
118 return (u32 *)(cpu_va + byte_offset);
124} 119}
125 120
126static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset) 121static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset)
@@ -155,7 +150,11 @@ static u32 *sim_send_ring_bfr(struct gk20a *g, u32 byte_offset)
155{ 150{
156 struct sim_gk20a_linux *sim_linux = 151 struct sim_gk20a_linux *sim_linux =
157 container_of(g->sim, struct sim_gk20a_linux, sim); 152 container_of(g->sim, struct sim_gk20a_linux, sim);
158 return (u32 *)(sim_linux->send_bfr.kvaddr + byte_offset); 153 u8 *cpu_va;
154
155 cpu_va = (u8 *)sim_linux->send_bfr.cpu_va;
156
157 return (u32 *)(cpu_va + byte_offset);
159} 158}
160 159
161static int rpc_send_message(struct gk20a *g) 160static int rpc_send_message(struct gk20a *g)
@@ -171,10 +170,10 @@ static int rpc_send_message(struct gk20a *g)
171 sim_dma_target_phys_pci_coherent_f() | 170 sim_dma_target_phys_pci_coherent_f() |
172 sim_dma_status_valid_f() | 171 sim_dma_status_valid_f() |
173 sim_dma_size_4kb_f() | 172 sim_dma_size_4kb_f() |
174 sim_dma_addr_lo_f(sim_linux->msg_bfr.phys >> PAGE_SHIFT); 173 sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &sim_linux->msg_bfr) >> PAGE_SHIFT);
175 174
176 *sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) = 175 *sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) =
177 u64_hi32(sim_linux->msg_bfr.phys); 176 u64_hi32(nvgpu_mem_get_addr(g, &sim_linux->msg_bfr));
178 177
179 *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++; 178 *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
180 179
@@ -182,7 +181,7 @@ static int rpc_send_message(struct gk20a *g)
182 PAGE_SIZE; 181 PAGE_SIZE;
183 182
184 /* Update the put pointer. This will trap into the host. */ 183 /* Update the put pointer. This will trap into the host. */
185 sim_writel(sim_linux, sim_send_put_r(), g->sim->send_ring_put); 184 sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
186 185
187 return 0; 186 return 0;
188} 187}
@@ -191,7 +190,11 @@ static inline u32 *sim_recv_ring_bfr(struct gk20a *g, u32 byte_offset)
191{ 190{
192 struct sim_gk20a_linux *sim_linux = 191 struct sim_gk20a_linux *sim_linux =
193 container_of(g->sim, struct sim_gk20a_linux, sim); 192 container_of(g->sim, struct sim_gk20a_linux, sim);
194 return (u32 *)(sim_linux->recv_bfr.kvaddr + byte_offset); 193 u8 *cpu_va;
194
195 cpu_va = (u8 *)sim_linux->recv_bfr.cpu_va;
196
197 return (u32 *)(cpu_va + byte_offset);
195} 198}
196 199
197static int rpc_recv_poll(struct gk20a *g) 200static int rpc_recv_poll(struct gk20a *g)
@@ -202,7 +205,7 @@ static int rpc_recv_poll(struct gk20a *g)
202 205
203 /* Poll the recv ring get pointer in an infinite loop */ 206 /* Poll the recv ring get pointer in an infinite loop */
204 do { 207 do {
205 g->sim->recv_ring_put = sim_readl(sim_linux, sim_recv_put_r()); 208 g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
206 } while (g->sim->recv_ring_put == g->sim->recv_ring_get); 209 } while (g->sim->recv_ring_put == g->sim->recv_ring_get);
207 210
208 /* process all replies */ 211 /* process all replies */
@@ -219,18 +222,19 @@ static int rpc_recv_poll(struct gk20a *g)
219 recv_phys_addr = (u64)recv_phys_addr_hi << 32 | 222 recv_phys_addr = (u64)recv_phys_addr_hi << 32 |
220 (u64)recv_phys_addr_lo << PAGE_SHIFT; 223 (u64)recv_phys_addr_lo << PAGE_SHIFT;
221 224
222 if (recv_phys_addr != sim_linux->msg_bfr.phys) { 225 if (recv_phys_addr !=
226 nvgpu_mem_get_addr(g, &sim_linux->msg_bfr)) {
223 nvgpu_err(g, "Error in RPC reply"); 227 nvgpu_err(g, "Error in RPC reply");
224 return -EINVAL; 228 return -EINVAL;
225 } 229 }
226 230
227 /* Update GET pointer */ 231 /* Update GET pointer */
228 g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32)) % 232 g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32))
229 PAGE_SIZE; 233 % PAGE_SIZE;
230 234
231 sim_writel(sim_linux, sim_recv_get_r(), g->sim->recv_ring_get); 235 sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
232 236
233 g->sim->recv_ring_put = sim_readl(sim_linux, sim_recv_put_r()); 237 g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
234 } 238 }
235 239
236 return 0; 240 return 0;
@@ -316,53 +320,41 @@ int nvgpu_pci_init_sim_support(struct gk20a *g)
316 sim_linux->regs = l->regs + sim_r(); 320 sim_linux->regs = l->regs + sim_r();
317 321
318 /* allocate sim event/msg buffers */ 322 /* allocate sim event/msg buffers */
319 err = alloc_and_kmap_iopage(g, &sim_linux->send_bfr.kvaddr, 323 err = gk20a_alloc_sim_buffer(g, &sim_linux->send_bfr);
320 &sim_linux->send_bfr.phys, 324 err = err || gk20a_alloc_sim_buffer(g, &sim_linux->recv_bfr);
321 &sim_linux->send_bfr.page); 325 err = err || gk20a_alloc_sim_buffer(g, &sim_linux->msg_bfr);
322
323 err = err || alloc_and_kmap_iopage(g, &sim_linux->recv_bfr.kvaddr,
324 &sim_linux->recv_bfr.phys,
325 &sim_linux->recv_bfr.page);
326
327 err = err || alloc_and_kmap_iopage(g, &sim_linux->msg_bfr.kvaddr,
328 &sim_linux->msg_bfr.phys,
329 &sim_linux->msg_bfr.page);
330 326
331 if (!(sim_linux->send_bfr.kvaddr && sim_linux->recv_bfr.kvaddr && 327 if (err)
332 sim_linux->msg_bfr.kvaddr)) {
333 nvgpu_err(g, "couldn't allocate all sim buffers");
334 goto fail; 328 goto fail;
335 }
336
337 /* mark send ring invalid */ 329 /* mark send ring invalid */
338 sim_writel(sim_linux, sim_send_ring_r(), sim_send_ring_status_invalid_f()); 330 sim_writel(g->sim, sim_send_ring_r(), sim_send_ring_status_invalid_f());
339 331
340 /* read get pointer and make equal to put */ 332 /* read get pointer and make equal to put */
341 g->sim->send_ring_put = sim_readl(sim_linux, sim_send_get_r()); 333 g->sim->send_ring_put = sim_readl(g->sim, sim_send_get_r());
342 sim_writel(sim_linux, sim_send_put_r(), g->sim->send_ring_put); 334 sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
343 335
344 /* write send ring address and make it valid */ 336 /* write send ring address and make it valid */
345 phys = sim_linux->send_bfr.phys; 337 phys = nvgpu_mem_get_addr(g, &sim_linux->send_bfr);
346 sim_writel(sim_linux, sim_send_ring_hi_r(), 338 sim_writel(g->sim, sim_send_ring_hi_r(),
347 sim_send_ring_hi_addr_f(u64_hi32(phys))); 339 sim_send_ring_hi_addr_f(u64_hi32(phys)));
348 sim_writel(sim_linux, sim_send_ring_r(), 340 sim_writel(g->sim, sim_send_ring_r(),
349 sim_send_ring_status_valid_f() | 341 sim_send_ring_status_valid_f() |
350 sim_send_ring_target_phys_pci_coherent_f() | 342 sim_send_ring_target_phys_pci_coherent_f() |
351 sim_send_ring_size_4kb_f() | 343 sim_send_ring_size_4kb_f() |
352 sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT)); 344 sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT));
353 345
354 /* repeat for recv ring (but swap put,get as roles are opposite) */ 346 /* repeat for recv ring (but swap put,get as roles are opposite) */
355 sim_writel(sim_linux, sim_recv_ring_r(), sim_recv_ring_status_invalid_f()); 347 sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
356 348
357 /* read put pointer and make equal to get */ 349 /* read put pointer and make equal to get */
358 g->sim->recv_ring_get = sim_readl(sim_linux, sim_recv_put_r()); 350 g->sim->recv_ring_get = sim_readl(g->sim, sim_recv_put_r());
359 sim_writel(sim_linux, sim_recv_get_r(), g->sim->recv_ring_get); 351 sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
360 352
361 /* write send ring address and make it valid */ 353 /* write send ring address and make it valid */
362 phys = sim_linux->recv_bfr.phys; 354 phys = nvgpu_mem_get_addr(g, &sim_linux->recv_bfr);
363 sim_writel(sim_linux, sim_recv_ring_hi_r(), 355 sim_writel(g->sim, sim_recv_ring_hi_r(),
364 sim_recv_ring_hi_addr_f(u64_hi32(phys))); 356 sim_recv_ring_hi_addr_f(u64_hi32(phys)));
365 sim_writel(sim_linux, sim_recv_ring_r(), 357 sim_writel(g->sim, sim_recv_ring_r(),
366 sim_recv_ring_status_valid_f() | 358 sim_recv_ring_status_valid_f() |
367 sim_recv_ring_target_phys_pci_coherent_f() | 359 sim_recv_ring_target_phys_pci_coherent_f() |
368 sim_recv_ring_size_4kb_f() | 360 sim_recv_ring_size_4kb_f() |