summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/sim_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/sim_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/sim_gk20a.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
index 76d29ee5..8951d5a4 100644
--- a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
@@ -20,6 +20,8 @@
20 20
21#include "gk20a.h" 21#include "gk20a.h"
22 22
23#include <nvgpu/log.h>
24
23#include <nvgpu/hw/gk20a/hw_sim_gk20a.h> 25#include <nvgpu/hw/gk20a/hw_sim_gk20a.h>
24 26
25static inline void sim_writel(struct gk20a *g, u32 r, u32 v) 27static inline void sim_writel(struct gk20a *g, u32 r, u32 v)
@@ -65,7 +67,7 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s)
65 gk20a_free_sim_support(g); 67 gk20a_free_sim_support(g);
66} 68}
67 69
68static int alloc_and_kmap_iopage(struct device *d, 70static int alloc_and_kmap_iopage(struct gk20a *g,
69 void **kvaddr, 71 void **kvaddr,
70 u64 *phys, 72 u64 *phys,
71 struct page **page) 73 struct page **page)
@@ -75,14 +77,14 @@ static int alloc_and_kmap_iopage(struct device *d,
75 77
76 if (!*page) { 78 if (!*page) {
77 err = -ENOMEM; 79 err = -ENOMEM;
78 dev_err(d, "couldn't allocate io page\n"); 80 nvgpu_err(g, "couldn't allocate io page\n");
79 goto fail; 81 goto fail;
80 } 82 }
81 83
82 *kvaddr = kmap(*page); 84 *kvaddr = kmap(*page);
83 if (!*kvaddr) { 85 if (!*kvaddr) {
84 err = -ENOMEM; 86 err = -ENOMEM;
85 dev_err(d, "couldn't kmap io page\n"); 87 nvgpu_err(g, "couldn't kmap io page\n");
86 goto fail; 88 goto fail;
87 } 89 }
88 *phys = page_to_phys(*page); 90 *phys = page_to_phys(*page);
@@ -105,27 +107,27 @@ int gk20a_init_sim_support(struct platform_device *pdev)
105 g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM, 107 g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM,
106 &g->sim.reg_mem); 108 &g->sim.reg_mem);
107 if (IS_ERR(g->sim.regs)) { 109 if (IS_ERR(g->sim.regs)) {
108 dev_err(dev, "failed to remap gk20a sim regs\n"); 110 nvgpu_err(g, "failed to remap gk20a sim regs\n");
109 err = PTR_ERR(g->sim.regs); 111 err = PTR_ERR(g->sim.regs);
110 goto fail; 112 goto fail;
111 } 113 }
112 114
113 /* allocate sim event/msg buffers */ 115 /* allocate sim event/msg buffers */
114 err = alloc_and_kmap_iopage(dev, &g->sim.send_bfr.kvaddr, 116 err = alloc_and_kmap_iopage(g, &g->sim.send_bfr.kvaddr,
115 &g->sim.send_bfr.phys, 117 &g->sim.send_bfr.phys,
116 &g->sim.send_bfr.page); 118 &g->sim.send_bfr.page);
117 119
118 err = err || alloc_and_kmap_iopage(dev, &g->sim.recv_bfr.kvaddr, 120 err = err || alloc_and_kmap_iopage(g, &g->sim.recv_bfr.kvaddr,
119 &g->sim.recv_bfr.phys, 121 &g->sim.recv_bfr.phys,
120 &g->sim.recv_bfr.page); 122 &g->sim.recv_bfr.page);
121 123
122 err = err || alloc_and_kmap_iopage(dev, &g->sim.msg_bfr.kvaddr, 124 err = err || alloc_and_kmap_iopage(g, &g->sim.msg_bfr.kvaddr,
123 &g->sim.msg_bfr.phys, 125 &g->sim.msg_bfr.phys,
124 &g->sim.msg_bfr.page); 126 &g->sim.msg_bfr.page);
125 127
126 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr && 128 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
127 g->sim.msg_bfr.kvaddr)) { 129 g->sim.msg_bfr.kvaddr)) {
128 dev_err(dev, "couldn't allocate all sim buffers\n"); 130 nvgpu_err(g, "couldn't allocate all sim buffers\n");
129 goto fail; 131 goto fail;
130 } 132 }
131 133
@@ -275,7 +277,7 @@ static int rpc_recv_poll(struct gk20a *g)
275 (u64)recv_phys_addr_lo << PAGE_SHIFT; 277 (u64)recv_phys_addr_lo << PAGE_SHIFT;
276 278
277 if (recv_phys_addr != g->sim.msg_bfr.phys) { 279 if (recv_phys_addr != g->sim.msg_bfr.phys) {
278 dev_err(dev_from_gk20a(g), "%s Error in RPC reply\n", 280 nvgpu_err(g, "%s Error in RPC reply\n",
279 __func__); 281 __func__);
280 return -1; 282 return -1;
281 } 283 }
@@ -302,21 +304,21 @@ static int issue_rpc_and_wait(struct gk20a *g)
302 304
303 err = rpc_send_message(g); 305 err = rpc_send_message(g);
304 if (err) { 306 if (err) {
305 dev_err(dev_from_gk20a(g), "%s failed rpc_send_message\n", 307 nvgpu_err(g, "%s failed rpc_send_message\n",
306 __func__); 308 __func__);
307 return err; 309 return err;
308 } 310 }
309 311
310 err = rpc_recv_poll(g); 312 err = rpc_recv_poll(g);
311 if (err) { 313 if (err) {
312 dev_err(dev_from_gk20a(g), "%s failed rpc_recv_poll\n", 314 nvgpu_err(g, "%s failed rpc_recv_poll\n",
313 __func__); 315 __func__);
314 return err; 316 return err;
315 } 317 }
316 318
317 /* Now check if RPC really succeeded */ 319 /* Now check if RPC really succeeded */
318 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { 320 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
319 dev_err(dev_from_gk20a(g), "%s received failed status!\n", 321 nvgpu_err(g, "%s received failed status!\n",
320 __func__); 322 __func__);
321 return -(*sim_msg_hdr(g, sim_msg_result_r())); 323 return -(*sim_msg_hdr(g, sim_msg_result_r()));
322 } 324 }