summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
index 705eccaa..ffd15a37 100644
--- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
@@ -47,7 +47,7 @@ struct gk20a_ctxsw_dev {
47 47
48 atomic_t vma_ref; 48 atomic_t vma_ref;
49 49
50 struct mutex write_lock; 50 struct nvgpu_mutex write_lock;
51}; 51};
52 52
53 53
@@ -83,16 +83,16 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
83 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 83 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw,
84 "filp=%p buf=%p size=%zu", filp, buf, size); 84 "filp=%p buf=%p size=%zu", filp, buf, size);
85 85
86 mutex_lock(&dev->write_lock); 86 nvgpu_mutex_acquire(&dev->write_lock);
87 while (ring_is_empty(hdr)) { 87 while (ring_is_empty(hdr)) {
88 mutex_unlock(&dev->write_lock); 88 nvgpu_mutex_release(&dev->write_lock);
89 if (filp->f_flags & O_NONBLOCK) 89 if (filp->f_flags & O_NONBLOCK)
90 return -EAGAIN; 90 return -EAGAIN;
91 err = wait_event_interruptible(dev->readout_wq, 91 err = wait_event_interruptible(dev->readout_wq,
92 !ring_is_empty(hdr)); 92 !ring_is_empty(hdr));
93 if (err) 93 if (err)
94 return err; 94 return err;
95 mutex_lock(&dev->write_lock); 95 nvgpu_mutex_acquire(&dev->write_lock);
96 } 96 }
97 97
98 while (size >= sizeof(struct nvgpu_ctxsw_trace_entry)) { 98 while (size >= sizeof(struct nvgpu_ctxsw_trace_entry)) {
@@ -101,7 +101,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
101 101
102 if (copy_to_user(entry, &dev->ents[hdr->read_idx], 102 if (copy_to_user(entry, &dev->ents[hdr->read_idx],
103 sizeof(*entry))) { 103 sizeof(*entry))) {
104 mutex_unlock(&dev->write_lock); 104 nvgpu_mutex_release(&dev->write_lock);
105 return -EFAULT; 105 return -EFAULT;
106 } 106 }
107 107
@@ -118,7 +118,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
118 hdr->read_idx); 118 hdr->read_idx);
119 119
120 *off = hdr->read_idx; 120 *off = hdr->read_idx;
121 mutex_unlock(&dev->write_lock); 121 nvgpu_mutex_release(&dev->write_lock);
122 122
123 return copied; 123 return copied;
124} 124}
@@ -126,9 +126,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
126static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) 126static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
127{ 127{
128 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); 128 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled");
129 mutex_lock(&dev->write_lock); 129 nvgpu_mutex_acquire(&dev->write_lock);
130 dev->write_enabled = true; 130 dev->write_enabled = true;
131 mutex_unlock(&dev->write_lock); 131 nvgpu_mutex_release(&dev->write_lock);
132 dev->g->ops.fecs_trace.enable(dev->g); 132 dev->g->ops.fecs_trace.enable(dev->g);
133 return 0; 133 return 0;
134} 134}
@@ -137,9 +137,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev)
137{ 137{
138 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); 138 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled");
139 dev->g->ops.fecs_trace.disable(dev->g); 139 dev->g->ops.fecs_trace.disable(dev->g);
140 mutex_lock(&dev->write_lock); 140 nvgpu_mutex_acquire(&dev->write_lock);
141 dev->write_enabled = false; 141 dev->write_enabled = false;
142 mutex_unlock(&dev->write_lock); 142 nvgpu_mutex_release(&dev->write_lock);
143 return 0; 143 return 0;
144} 144}
145 145
@@ -211,9 +211,9 @@ static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev,
211 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) 211 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE)
212 return -EINVAL; 212 return -EINVAL;
213 213
214 mutex_lock(&dev->write_lock); 214 nvgpu_mutex_acquire(&dev->write_lock);
215 ret = gk20a_ctxsw_dev_alloc_buffer(dev, size); 215 ret = gk20a_ctxsw_dev_alloc_buffer(dev, size);
216 mutex_unlock(&dev->write_lock); 216 nvgpu_mutex_release(&dev->write_lock);
217 217
218 return ret; 218 return ret;
219} 219}
@@ -223,9 +223,9 @@ static int gk20a_ctxsw_dev_ioctl_set_filter(struct gk20a_ctxsw_dev *dev,
223{ 223{
224 struct gk20a *g = dev->g; 224 struct gk20a *g = dev->g;
225 225
226 mutex_lock(&dev->write_lock); 226 nvgpu_mutex_acquire(&dev->write_lock);
227 dev->filter = args->filter; 227 dev->filter = args->filter;
228 mutex_unlock(&dev->write_lock); 228 nvgpu_mutex_release(&dev->write_lock);
229 229
230 if (g->ops.fecs_trace.set_filter) 230 if (g->ops.fecs_trace.set_filter)
231 g->ops.fecs_trace.set_filter(g, &dev->filter); 231 g->ops.fecs_trace.set_filter(g, &dev->filter);
@@ -235,9 +235,9 @@ static int gk20a_ctxsw_dev_ioctl_set_filter(struct gk20a_ctxsw_dev *dev,
235static int gk20a_ctxsw_dev_ioctl_get_filter(struct gk20a_ctxsw_dev *dev, 235static int gk20a_ctxsw_dev_ioctl_get_filter(struct gk20a_ctxsw_dev *dev,
236 struct nvgpu_ctxsw_trace_filter_args *args) 236 struct nvgpu_ctxsw_trace_filter_args *args)
237{ 237{
238 mutex_lock(&dev->write_lock); 238 nvgpu_mutex_acquire(&dev->write_lock);
239 args->filter = dev->filter; 239 args->filter = dev->filter;
240 mutex_unlock(&dev->write_lock); 240 nvgpu_mutex_release(&dev->write_lock);
241 241
242 return 0; 242 return 0;
243} 243}
@@ -293,7 +293,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
293 293
294 /* Allow only one user for this device */ 294 /* Allow only one user for this device */
295 dev = &trace->devs[vmid]; 295 dev = &trace->devs[vmid];
296 mutex_lock(&dev->write_lock); 296 nvgpu_mutex_acquire(&dev->write_lock);
297 if (dev->hdr) { 297 if (dev->hdr) {
298 err = -EBUSY; 298 err = -EBUSY;
299 goto done; 299 goto done;
@@ -321,7 +321,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
321 } 321 }
322 322
323done: 323done:
324 mutex_unlock(&dev->write_lock); 324 nvgpu_mutex_release(&dev->write_lock);
325 325
326idle: 326idle:
327 gk20a_idle(g->dev); 327 gk20a_idle(g->dev);
@@ -338,9 +338,9 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp)
338 338
339 g->ops.fecs_trace.disable(g); 339 g->ops.fecs_trace.disable(g);
340 340
341 mutex_lock(&dev->write_lock); 341 nvgpu_mutex_acquire(&dev->write_lock);
342 dev->write_enabled = false; 342 dev->write_enabled = false;
343 mutex_unlock(&dev->write_lock); 343 nvgpu_mutex_release(&dev->write_lock);
344 344
345 if (dev->hdr) { 345 if (dev->hdr) {
346 dev->g->ops.fecs_trace.free_user_buffer(dev->g); 346 dev->g->ops.fecs_trace.free_user_buffer(dev->g);
@@ -414,11 +414,11 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
414 414
415 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 415 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "");
416 416
417 mutex_lock(&dev->write_lock); 417 nvgpu_mutex_acquire(&dev->write_lock);
418 poll_wait(filp, &dev->readout_wq, wait); 418 poll_wait(filp, &dev->readout_wq, wait);
419 if (!ring_is_empty(hdr)) 419 if (!ring_is_empty(hdr))
420 mask |= POLLIN | POLLRDNORM; 420 mask |= POLLIN | POLLRDNORM;
421 mutex_unlock(&dev->write_lock); 421 nvgpu_mutex_release(&dev->write_lock);
422 422
423 return mask; 423 return mask;
424} 424}
@@ -482,7 +482,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
482 dev->hdr = NULL; 482 dev->hdr = NULL;
483 dev->write_enabled = false; 483 dev->write_enabled = false;
484 init_waitqueue_head(&dev->readout_wq); 484 init_waitqueue_head(&dev->readout_wq);
485 mutex_init(&dev->write_lock); 485 nvgpu_mutex_init(&dev->write_lock);
486 atomic_set(&dev->vma_ref, 0); 486 atomic_set(&dev->vma_ref, 0);
487 dev++; 487 dev++;
488 } 488 }
@@ -567,7 +567,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
567 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 567 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw,
568 "dev=%p hdr=%p", dev, hdr); 568 "dev=%p hdr=%p", dev, hdr);
569 569
570 mutex_lock(&dev->write_lock); 570 nvgpu_mutex_acquire(&dev->write_lock);
571 571
572 if (unlikely(!hdr)) { 572 if (unlikely(!hdr)) {
573 /* device has been released */ 573 /* device has been released */
@@ -621,7 +621,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
621 gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", 621 gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d",
622 hdr->read_idx, hdr->write_idx, ring_len(hdr)); 622 hdr->read_idx, hdr->write_idx, ring_len(hdr));
623 623
624 mutex_unlock(&dev->write_lock); 624 nvgpu_mutex_release(&dev->write_lock);
625 return ret; 625 return ret;
626 626
627disable: 627disable:
@@ -638,7 +638,7 @@ filter:
638 entry->tag, entry->timestamp, reason); 638 entry->tag, entry->timestamp, reason);
639 639
640done: 640done:
641 mutex_unlock(&dev->write_lock); 641 nvgpu_mutex_release(&dev->write_lock);
642 return ret; 642 return ret;
643} 643}
644 644