summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
diff options
context:
space:
mode:
authorLauri Peltonen <lpeltonen@nvidia.com>2014-07-17 19:21:34 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:40 -0400
commitbcf60a22c3e8671468517d34aa37548272455c1f (patch)
treec3544f6714c291e611e33a8d0e39c5cb2c795821 /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
parent55295c6087ed975be12e92f9be799269aef94678 (diff)
gpu: nvgpu: Add gk20a_fence type
When moving compression state tracking and compbit management ops to kernel, we need to attach a fence to dma-buf metadata, along with the compbit state. To make in-kernel fence management easier, introduce a new gk20a_fence abstraction. A gk20a_fence may be backed by a semaphore or a syncpoint (id, value) pair. If the kernel is configured with CONFIG_SYNC, it will also contain a sync_fence. The gk20a_fence can easily be converted back to a syncpoint (id, value) parir or sync FD when we need to return it to user space. Change gk20a_submit_channel_gpfifo to return a gk20a_fence instead of nvhost_fence. This is to facilitate work submission initiated from kernel. Bug 1509620 Change-Id: I6154764a279dba83f5e91ba9e0cb5e227ca08e1b Signed-off-by: Lauri Peltonen <lpeltonen@nvidia.com> Reviewed-on: http://git-master/r/439846 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h74
1 files changed, 28 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
index baa4a151..a3cd8208 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
@@ -24,34 +24,28 @@ struct gk20a_channel_sync;
24struct priv_cmd_entry; 24struct priv_cmd_entry;
25struct channel_gk20a; 25struct channel_gk20a;
26struct gk20a_semaphore; 26struct gk20a_semaphore;
27 27struct gk20a_fence;
28struct gk20a_channel_fence {
29 bool valid;
30 bool wfi; /* was issued with preceding wfi */
31 u32 thresh; /* syncpoint fences only */
32 struct gk20a_semaphore *semaphore; /* semaphore fences only */
33};
34 28
35struct gk20a_channel_sync { 29struct gk20a_channel_sync {
36 /* CPU wait for a fence returned by incr_syncpt() or incr_fd(). */ 30 /* Generate a gpu wait cmdbuf from syncpoint.
37 int (*wait_cpu)(struct gk20a_channel_sync *s, 31 * Returns
38 struct gk20a_channel_fence *fence, 32 * - a gpu cmdbuf that performs the wait when executed,
39 int timeout); 33 * - possibly a helper fence that the caller must hold until the
40 34 * cmdbuf is executed.
41 /* Test whether a fence returned by incr_syncpt() or incr_fd() is 35 */
42 * expired. */
43 bool (*is_expired)(struct gk20a_channel_sync *s,
44 struct gk20a_channel_fence *fence);
45
46 /* Generate a gpu wait cmdbuf from syncpoint. */
47 int (*wait_syncpt)(struct gk20a_channel_sync *s, u32 id, u32 thresh, 36 int (*wait_syncpt)(struct gk20a_channel_sync *s, u32 id, u32 thresh,
48 struct priv_cmd_entry **entry, 37 struct priv_cmd_entry **entry,
49 struct gk20a_channel_fence *fence); 38 struct gk20a_fence **fence);
50 39
51 /* Generate a gpu wait cmdbuf from sync fd. */ 40 /* Generate a gpu wait cmdbuf from sync fd.
41 * Returns
42 * - a gpu cmdbuf that performs the wait when executed,
43 * - possibly a helper fence that the caller must hold until the
44 * cmdbuf is executed.
45 */
52 int (*wait_fd)(struct gk20a_channel_sync *s, int fd, 46 int (*wait_fd)(struct gk20a_channel_sync *s, int fd,
53 struct priv_cmd_entry **entry, 47 struct priv_cmd_entry **entry,
54 struct gk20a_channel_fence *fence); 48 struct gk20a_fence **fence);
55 49
56 /* Increment syncpoint/semaphore. 50 /* Increment syncpoint/semaphore.
57 * Returns 51 * Returns
@@ -60,7 +54,7 @@ struct gk20a_channel_sync {
60 */ 54 */
61 int (*incr)(struct gk20a_channel_sync *s, 55 int (*incr)(struct gk20a_channel_sync *s,
62 struct priv_cmd_entry **entry, 56 struct priv_cmd_entry **entry,
63 struct gk20a_channel_fence *fence); 57 struct gk20a_fence **fence);
64 58
65 /* Increment syncpoint/semaphore, preceded by a wfi. 59 /* Increment syncpoint/semaphore, preceded by a wfi.
66 * Returns 60 * Returns
@@ -69,38 +63,29 @@ struct gk20a_channel_sync {
69 */ 63 */
70 int (*incr_wfi)(struct gk20a_channel_sync *s, 64 int (*incr_wfi)(struct gk20a_channel_sync *s,
71 struct priv_cmd_entry **entry, 65 struct priv_cmd_entry **entry,
72 struct gk20a_channel_fence *fence); 66 struct gk20a_fence **fence);
73
74 /* Increment syncpoint, so that the returned fence represents
75 * work completion (may need wfi) and can be returned to user space.
76 * Returns
77 * - a gpu cmdbuf that performs the increment when executed,
78 * - a fence that can be passed to wait_cpu() and is_expired(),
79 * - a syncpoint id/value pair that can be returned to user space.
80 */
81 int (*incr_user_syncpt)(struct gk20a_channel_sync *s,
82 struct priv_cmd_entry **entry,
83 struct gk20a_channel_fence *fence,
84 bool wfi,
85 u32 *id, u32 *thresh);
86 67
87 /* Increment syncpoint/semaphore, so that the returned fence represents 68 /* Increment syncpoint/semaphore, so that the returned fence represents
88 * work completion (may need wfi) and can be returned to user space. 69 * work completion (may need wfi) and can be returned to user space.
89 * Returns 70 * Returns
90 * - a gpu cmdbuf that performs the increment when executed, 71 * - a gpu cmdbuf that performs the increment when executed,
91 * - a fence that can be passed to wait_cpu() and is_expired(), 72 * - a fence that can be passed to wait_cpu() and is_expired(),
92 * - a sync fd that can be returned to user space. 73 * - a gk20a_fence that signals when the incr has happened.
93 */ 74 */
94 int (*incr_user_fd)(struct gk20a_channel_sync *s, 75 int (*incr_user)(struct gk20a_channel_sync *s,
95 int wait_fence_fd, 76 int wait_fence_fd,
96 struct priv_cmd_entry **entry, 77 struct priv_cmd_entry **entry,
97 struct gk20a_channel_fence *fence, 78 struct gk20a_fence **fence,
98 bool wfi, 79 bool wfi);
99 int *fd);
100 80
101 /* Reset the channel syncpoint/semaphore. */ 81 /* Reset the channel syncpoint/semaphore. */
102 void (*set_min_eq_max)(struct gk20a_channel_sync *s); 82 void (*set_min_eq_max)(struct gk20a_channel_sync *s);
103 83
84 /* Signals the sync timeline (if owned by the gk20a_channel_sync layer).
85 * This should be called when we notice that a gk20a_fence is
86 * expired. */
87 void (*signal_timeline)(struct gk20a_channel_sync *s);
88
104 /* flag to set sync destroy aggressiveness */ 89 /* flag to set sync destroy aggressiveness */
105 bool aggressive_destroy; 90 bool aggressive_destroy;
106 91
@@ -110,7 +95,4 @@ struct gk20a_channel_sync {
110 95
111struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c); 96struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c);
112 97
113void gk20a_channel_fence_close(struct gk20a_channel_fence *f);
114void gk20a_channel_fence_dup(struct gk20a_channel_fence *from,
115 struct gk20a_channel_fence *to);
116#endif 98#endif