diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2018-10-01 10:47:54 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2018-10-01 15:34:21 -0400 |
commit | e2f3496e93be3238de2e2e6bfc83b3a83c084ce5 (patch) | |
tree | d231482d8b3f4a2343484673692754d2ecd2c7d8 /drivers/gpu/drm/i915/i915_request.c | |
parent | b16c765122f987056e1dc9ef6c214571bb5bd694 (diff) |
drm/i915: Pull scheduling under standalone lock
Currently, the backend scheduling code abuses struct_mutex into order to
have a global lock to manipulate a temporary list (without widespread
allocation) and to protect against list modifications. This is an
extraneous coupling to struct_mutex and further can not extend beyond
the local device.
Pull all the code that needs to be under the one true lock into
i915_scheduler.c, and make it so.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181001144755.7978-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 85 |
1 files changed, 0 insertions, 85 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 56140ca054e8..d73ad490a261 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c | |||
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request) | |||
111 | spin_unlock(&file_priv->mm.lock); | 111 | spin_unlock(&file_priv->mm.lock); |
112 | } | 112 | } |
113 | 113 | ||
114 | static struct i915_dependency * | ||
115 | i915_dependency_alloc(struct drm_i915_private *i915) | ||
116 | { | ||
117 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | ||
118 | } | ||
119 | |||
120 | static void | ||
121 | i915_dependency_free(struct drm_i915_private *i915, | ||
122 | struct i915_dependency *dep) | ||
123 | { | ||
124 | kmem_cache_free(i915->dependencies, dep); | ||
125 | } | ||
126 | |||
127 | static void | ||
128 | __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
129 | struct i915_sched_node *signal, | ||
130 | struct i915_dependency *dep, | ||
131 | unsigned long flags) | ||
132 | { | ||
133 | INIT_LIST_HEAD(&dep->dfs_link); | ||
134 | list_add(&dep->wait_link, &signal->waiters_list); | ||
135 | list_add(&dep->signal_link, &node->signalers_list); | ||
136 | dep->signaler = signal; | ||
137 | dep->flags = flags; | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | i915_sched_node_add_dependency(struct drm_i915_private *i915, | ||
142 | struct i915_sched_node *node, | ||
143 | struct i915_sched_node *signal) | ||
144 | { | ||
145 | struct i915_dependency *dep; | ||
146 | |||
147 | dep = i915_dependency_alloc(i915); | ||
148 | if (!dep) | ||
149 | return -ENOMEM; | ||
150 | |||
151 | __i915_sched_node_add_dependency(node, signal, dep, | ||
152 | I915_DEPENDENCY_ALLOC); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static void | ||
157 | i915_sched_node_fini(struct drm_i915_private *i915, | ||
158 | struct i915_sched_node *node) | ||
159 | { | ||
160 | struct i915_dependency *dep, *tmp; | ||
161 | |||
162 | GEM_BUG_ON(!list_empty(&node->link)); | ||
163 | |||
164 | /* | ||
165 | * Everyone we depended upon (the fences we wait to be signaled) | ||
166 | * should retire before us and remove themselves from our list. | ||
167 | * However, retirement is run independently on each timeline and | ||
168 | * so we may be called out-of-order. | ||
169 | */ | ||
170 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { | ||
171 | GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler)); | ||
172 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
173 | |||
174 | list_del(&dep->wait_link); | ||
175 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
176 | i915_dependency_free(i915, dep); | ||
177 | } | ||
178 | |||
179 | /* Remove ourselves from everyone who depends upon us */ | ||
180 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { | ||
181 | GEM_BUG_ON(dep->signaler != node); | ||
182 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
183 | |||
184 | list_del(&dep->signal_link); | ||
185 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
186 | i915_dependency_free(i915, dep); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static void | ||
191 | i915_sched_node_init(struct i915_sched_node *node) | ||
192 | { | ||
193 | INIT_LIST_HEAD(&node->signalers_list); | ||
194 | INIT_LIST_HEAD(&node->waiters_list); | ||
195 | INIT_LIST_HEAD(&node->link); | ||
196 | node->attr.priority = I915_PRIORITY_INVALID; | ||
197 | } | ||
198 | |||
199 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | 114 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) |
200 | { | 115 | { |
201 | struct intel_engine_cs *engine; | 116 | struct intel_engine_cs *engine; |