diff options
author | Christian König <christian.koenig@amd.com> | 2019-08-11 04:06:32 -0400 |
---|---|---|
committer | Christian König <christian.koenig@amd.com> | 2019-08-13 03:09:30 -0400 |
commit | 52791eeec1d9f4a7e7fe08aaba0b1553149d93bc (patch) | |
tree | ea1ff9cb7040e20e2cedad4ca8f7d2994ce3d0a8 | |
parent | 5d344f58da760b226562e7d5199fb73294eb93fa (diff) |
dma-buf: rename reservation_object to dma_resv
Be more consistent with the naming of the other DMA-buf objects.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/323401/
104 files changed, 523 insertions, 550 deletions
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index e8c7310cb800..dcfb01e7c6f4 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ | 2 | obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ |
3 | reservation.o seqno-fence.o | 3 | dma-resv.o seqno-fence.o |
4 | obj-$(CONFIG_SYNC_FILE) += sync_file.o | 4 | obj-$(CONFIG_SYNC_FILE) += sync_file.o |
5 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o | 5 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o |
6 | obj-$(CONFIG_UDMABUF) += udmabuf.o | 6 | obj-$(CONFIG_UDMABUF) += udmabuf.o |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 67510f2be8bc..b3400d6524ab 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/poll.h> | 23 | #include <linux/poll.h> |
24 | #include <linux/reservation.h> | 24 | #include <linux/dma-resv.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/pseudo_fs.h> | 27 | #include <linux/pseudo_fs.h> |
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file) | |||
104 | list_del(&dmabuf->list_node); | 104 | list_del(&dmabuf->list_node); |
105 | mutex_unlock(&db_list.lock); | 105 | mutex_unlock(&db_list.lock); |
106 | 106 | ||
107 | if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) | 107 | if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) |
108 | reservation_object_fini(dmabuf->resv); | 108 | dma_resv_fini(dmabuf->resv); |
109 | 109 | ||
110 | module_put(dmabuf->owner); | 110 | module_put(dmabuf->owner); |
111 | kfree(dmabuf); | 111 | kfree(dmabuf); |
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) | |||
165 | * To support cross-device and cross-driver synchronization of buffer access | 165 | * To support cross-device and cross-driver synchronization of buffer access |
166 | * implicit fences (represented internally in the kernel with &struct fence) can | 166 | * implicit fences (represented internally in the kernel with &struct fence) can |
167 | * be attached to a &dma_buf. The glue for that and a few related things are | 167 | * be attached to a &dma_buf. The glue for that and a few related things are |
168 | * provided in the &reservation_object structure. | 168 | * provided in the &dma_resv structure. |
169 | * | 169 | * |
170 | * Userspace can query the state of these implicitly tracked fences using poll() | 170 | * Userspace can query the state of these implicitly tracked fences using poll() |
171 | * and related system calls: | 171 | * and related system calls: |
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
195 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) | 195 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) |
196 | { | 196 | { |
197 | struct dma_buf *dmabuf; | 197 | struct dma_buf *dmabuf; |
198 | struct reservation_object *resv; | 198 | struct dma_resv *resv; |
199 | struct reservation_object_list *fobj; | 199 | struct dma_resv_list *fobj; |
200 | struct dma_fence *fence_excl; | 200 | struct dma_fence *fence_excl; |
201 | __poll_t events; | 201 | __poll_t events; |
202 | unsigned shared_count; | 202 | unsigned shared_count; |
@@ -214,7 +214,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) | |||
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | rcu_read_lock(); | 216 | rcu_read_lock(); |
217 | reservation_object_fences(resv, &fence_excl, &fobj, &shared_count); | 217 | dma_resv_fences(resv, &fence_excl, &fobj, &shared_count); |
218 | if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { | 218 | if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { |
219 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; | 219 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; |
220 | __poll_t pevents = EPOLLIN; | 220 | __poll_t pevents = EPOLLIN; |
@@ -493,13 +493,13 @@ err_alloc_file: | |||
493 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) | 493 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) |
494 | { | 494 | { |
495 | struct dma_buf *dmabuf; | 495 | struct dma_buf *dmabuf; |
496 | struct reservation_object *resv = exp_info->resv; | 496 | struct dma_resv *resv = exp_info->resv; |
497 | struct file *file; | 497 | struct file *file; |
498 | size_t alloc_size = sizeof(struct dma_buf); | 498 | size_t alloc_size = sizeof(struct dma_buf); |
499 | int ret; | 499 | int ret; |
500 | 500 | ||
501 | if (!exp_info->resv) | 501 | if (!exp_info->resv) |
502 | alloc_size += sizeof(struct reservation_object); | 502 | alloc_size += sizeof(struct dma_resv); |
503 | else | 503 | else |
504 | /* prevent &dma_buf[1] == dma_buf->resv */ | 504 | /* prevent &dma_buf[1] == dma_buf->resv */ |
505 | alloc_size += 1; | 505 | alloc_size += 1; |
@@ -531,8 +531,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) | |||
531 | dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; | 531 | dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; |
532 | 532 | ||
533 | if (!resv) { | 533 | if (!resv) { |
534 | resv = (struct reservation_object *)&dmabuf[1]; | 534 | resv = (struct dma_resv *)&dmabuf[1]; |
535 | reservation_object_init(resv); | 535 | dma_resv_init(resv); |
536 | } | 536 | } |
537 | dmabuf->resv = resv; | 537 | dmabuf->resv = resv; |
538 | 538 | ||
@@ -896,11 +896,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, | |||
896 | { | 896 | { |
897 | bool write = (direction == DMA_BIDIRECTIONAL || | 897 | bool write = (direction == DMA_BIDIRECTIONAL || |
898 | direction == DMA_TO_DEVICE); | 898 | direction == DMA_TO_DEVICE); |
899 | struct reservation_object *resv = dmabuf->resv; | 899 | struct dma_resv *resv = dmabuf->resv; |
900 | long ret; | 900 | long ret; |
901 | 901 | ||
902 | /* Wait on any implicit rendering fences */ | 902 | /* Wait on any implicit rendering fences */ |
903 | ret = reservation_object_wait_timeout_rcu(resv, write, true, | 903 | ret = dma_resv_wait_timeout_rcu(resv, write, true, |
904 | MAX_SCHEDULE_TIMEOUT); | 904 | MAX_SCHEDULE_TIMEOUT); |
905 | if (ret < 0) | 905 | if (ret < 0) |
906 | return ret; | 906 | return ret; |
@@ -1141,8 +1141,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) | |||
1141 | int ret; | 1141 | int ret; |
1142 | struct dma_buf *buf_obj; | 1142 | struct dma_buf *buf_obj; |
1143 | struct dma_buf_attachment *attach_obj; | 1143 | struct dma_buf_attachment *attach_obj; |
1144 | struct reservation_object *robj; | 1144 | struct dma_resv *robj; |
1145 | struct reservation_object_list *fobj; | 1145 | struct dma_resv_list *fobj; |
1146 | struct dma_fence *fence; | 1146 | struct dma_fence *fence; |
1147 | int count = 0, attach_count, shared_count, i; | 1147 | int count = 0, attach_count, shared_count, i; |
1148 | size_t size = 0; | 1148 | size_t size = 0; |
@@ -1175,7 +1175,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) | |||
1175 | 1175 | ||
1176 | robj = buf_obj->resv; | 1176 | robj = buf_obj->resv; |
1177 | rcu_read_lock(); | 1177 | rcu_read_lock(); |
1178 | reservation_object_fences(robj, &fence, &fobj, &shared_count); | 1178 | dma_resv_fences(robj, &fence, &fobj, &shared_count); |
1179 | rcu_read_unlock(); | 1179 | rcu_read_unlock(); |
1180 | 1180 | ||
1181 | if (fence) | 1181 | if (fence) |
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 59ac96ec7ba8..8025a891d3e9 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c | |||
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); | |||
60 | * | 60 | * |
61 | * - Then there's also implicit fencing, where the synchronization points are | 61 | * - Then there's also implicit fencing, where the synchronization points are |
62 | * implicitly passed around as part of shared &dma_buf instances. Such | 62 | * implicitly passed around as part of shared &dma_buf instances. Such |
63 | * implicit fences are stored in &struct reservation_object through the | 63 | * implicit fences are stored in &struct dma_resv through the |
64 | * &dma_buf.resv pointer. | 64 | * &dma_buf.resv pointer. |
65 | */ | 65 | */ |
66 | 66 | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c index f7f4a0858c2a..f5142683c851 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/dma-resv.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/reservation.h> | 35 | #include <linux/dma-resv.h> |
36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
37 | 37 | ||
38 | /** | 38 | /** |
@@ -50,16 +50,15 @@ DEFINE_WD_CLASS(reservation_ww_class); | |||
50 | EXPORT_SYMBOL(reservation_ww_class); | 50 | EXPORT_SYMBOL(reservation_ww_class); |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * reservation_object_list_alloc - allocate fence list | 53 | * dma_resv_list_alloc - allocate fence list |
54 | * @shared_max: number of fences we need space for | 54 | * @shared_max: number of fences we need space for |
55 | * | 55 | * |
56 | * Allocate a new reservation_object_list and make sure to correctly initialize | 56 | * Allocate a new dma_resv_list and make sure to correctly initialize |
57 | * shared_max. | 57 | * shared_max. |
58 | */ | 58 | */ |
59 | static struct reservation_object_list * | 59 | static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) |
60 | reservation_object_list_alloc(unsigned int shared_max) | ||
61 | { | 60 | { |
62 | struct reservation_object_list *list; | 61 | struct dma_resv_list *list; |
63 | 62 | ||
64 | list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); | 63 | list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); |
65 | if (!list) | 64 | if (!list) |
@@ -72,12 +71,12 @@ reservation_object_list_alloc(unsigned int shared_max) | |||
72 | } | 71 | } |
73 | 72 | ||
74 | /** | 73 | /** |
75 | * reservation_object_list_free - free fence list | 74 | * dma_resv_list_free - free fence list |
76 | * @list: list to free | 75 | * @list: list to free |
77 | * | 76 | * |
78 | * Free a reservation_object_list and make sure to drop all references. | 77 | * Free a dma_resv_list and make sure to drop all references. |
79 | */ | 78 | */ |
80 | static void reservation_object_list_free(struct reservation_object_list *list) | 79 | static void dma_resv_list_free(struct dma_resv_list *list) |
81 | { | 80 | { |
82 | unsigned int i; | 81 | unsigned int i; |
83 | 82 | ||
@@ -91,24 +90,24 @@ static void reservation_object_list_free(struct reservation_object_list *list) | |||
91 | } | 90 | } |
92 | 91 | ||
93 | /** | 92 | /** |
94 | * reservation_object_init - initialize a reservation object | 93 | * dma_resv_init - initialize a reservation object |
95 | * @obj: the reservation object | 94 | * @obj: the reservation object |
96 | */ | 95 | */ |
97 | void reservation_object_init(struct reservation_object *obj) | 96 | void dma_resv_init(struct dma_resv *obj) |
98 | { | 97 | { |
99 | ww_mutex_init(&obj->lock, &reservation_ww_class); | 98 | ww_mutex_init(&obj->lock, &reservation_ww_class); |
100 | RCU_INIT_POINTER(obj->fence, NULL); | 99 | RCU_INIT_POINTER(obj->fence, NULL); |
101 | RCU_INIT_POINTER(obj->fence_excl, NULL); | 100 | RCU_INIT_POINTER(obj->fence_excl, NULL); |
102 | } | 101 | } |
103 | EXPORT_SYMBOL(reservation_object_init); | 102 | EXPORT_SYMBOL(dma_resv_init); |
104 | 103 | ||
105 | /** | 104 | /** |
106 | * reservation_object_fini - destroys a reservation object | 105 | * dma_resv_fini - destroys a reservation object |
107 | * @obj: the reservation object | 106 | * @obj: the reservation object |
108 | */ | 107 | */ |
109 | void reservation_object_fini(struct reservation_object *obj) | 108 | void dma_resv_fini(struct dma_resv *obj) |
110 | { | 109 | { |
111 | struct reservation_object_list *fobj; | 110 | struct dma_resv_list *fobj; |
112 | struct dma_fence *excl; | 111 | struct dma_fence *excl; |
113 | 112 | ||
114 | /* | 113 | /* |
@@ -120,32 +119,31 @@ void reservation_object_fini(struct reservation_object *obj) | |||
120 | dma_fence_put(excl); | 119 | dma_fence_put(excl); |
121 | 120 | ||
122 | fobj = rcu_dereference_protected(obj->fence, 1); | 121 | fobj = rcu_dereference_protected(obj->fence, 1); |
123 | reservation_object_list_free(fobj); | 122 | dma_resv_list_free(fobj); |
124 | ww_mutex_destroy(&obj->lock); | 123 | ww_mutex_destroy(&obj->lock); |
125 | } | 124 | } |
126 | EXPORT_SYMBOL(reservation_object_fini); | 125 | EXPORT_SYMBOL(dma_resv_fini); |
127 | 126 | ||
128 | /** | 127 | /** |
129 | * reservation_object_reserve_shared - Reserve space to add shared fences to | 128 | * dma_resv_reserve_shared - Reserve space to add shared fences to |
130 | * a reservation_object. | 129 | * a dma_resv. |
131 | * @obj: reservation object | 130 | * @obj: reservation object |
132 | * @num_fences: number of fences we want to add | 131 | * @num_fences: number of fences we want to add |
133 | * | 132 | * |
134 | * Should be called before reservation_object_add_shared_fence(). Must | 133 | * Should be called before dma_resv_add_shared_fence(). Must |
135 | * be called with obj->lock held. | 134 | * be called with obj->lock held. |
136 | * | 135 | * |
137 | * RETURNS | 136 | * RETURNS |
138 | * Zero for success, or -errno | 137 | * Zero for success, or -errno |
139 | */ | 138 | */ |
140 | int reservation_object_reserve_shared(struct reservation_object *obj, | 139 | int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) |
141 | unsigned int num_fences) | ||
142 | { | 140 | { |
143 | struct reservation_object_list *old, *new; | 141 | struct dma_resv_list *old, *new; |
144 | unsigned int i, j, k, max; | 142 | unsigned int i, j, k, max; |
145 | 143 | ||
146 | reservation_object_assert_held(obj); | 144 | dma_resv_assert_held(obj); |
147 | 145 | ||
148 | old = reservation_object_get_list(obj); | 146 | old = dma_resv_get_list(obj); |
149 | 147 | ||
150 | if (old && old->shared_max) { | 148 | if (old && old->shared_max) { |
151 | if ((old->shared_count + num_fences) <= old->shared_max) | 149 | if ((old->shared_count + num_fences) <= old->shared_max) |
@@ -157,7 +155,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj, | |||
157 | max = 4; | 155 | max = 4; |
158 | } | 156 | } |
159 | 157 | ||
160 | new = reservation_object_list_alloc(max); | 158 | new = dma_resv_list_alloc(max); |
161 | if (!new) | 159 | if (!new) |
162 | return -ENOMEM; | 160 | return -ENOMEM; |
163 | 161 | ||
@@ -171,7 +169,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj, | |||
171 | struct dma_fence *fence; | 169 | struct dma_fence *fence; |
172 | 170 | ||
173 | fence = rcu_dereference_protected(old->shared[i], | 171 | fence = rcu_dereference_protected(old->shared[i], |
174 | reservation_object_held(obj)); | 172 | dma_resv_held(obj)); |
175 | if (dma_fence_is_signaled(fence)) | 173 | if (dma_fence_is_signaled(fence)) |
176 | RCU_INIT_POINTER(new->shared[--k], fence); | 174 | RCU_INIT_POINTER(new->shared[--k], fence); |
177 | else | 175 | else |
@@ -197,41 +195,40 @@ int reservation_object_reserve_shared(struct reservation_object *obj, | |||
197 | struct dma_fence *fence; | 195 | struct dma_fence *fence; |
198 | 196 | ||
199 | fence = rcu_dereference_protected(new->shared[i], | 197 | fence = rcu_dereference_protected(new->shared[i], |
200 | reservation_object_held(obj)); | 198 | dma_resv_held(obj)); |
201 | dma_fence_put(fence); | 199 | dma_fence_put(fence); |
202 | } | 200 | } |
203 | kfree_rcu(old, rcu); | 201 | kfree_rcu(old, rcu); |
204 | 202 | ||
205 | return 0; | 203 | return 0; |
206 | } | 204 | } |
207 | EXPORT_SYMBOL(reservation_object_reserve_shared); | 205 | EXPORT_SYMBOL(dma_resv_reserve_shared); |
208 | 206 | ||
209 | /** | 207 | /** |
210 | * reservation_object_add_shared_fence - Add a fence to a shared slot | 208 | * dma_resv_add_shared_fence - Add a fence to a shared slot |
211 | * @obj: the reservation object | 209 | * @obj: the reservation object |
212 | * @fence: the shared fence to add | 210 | * @fence: the shared fence to add |
213 | * | 211 | * |
214 | * Add a fence to a shared slot, obj->lock must be held, and | 212 | * Add a fence to a shared slot, obj->lock must be held, and |
215 | * reservation_object_reserve_shared() has been called. | 213 | * dma_resv_reserve_shared() has been called. |
216 | */ | 214 | */ |
217 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 215 | void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) |
218 | struct dma_fence *fence) | ||
219 | { | 216 | { |
220 | struct reservation_object_list *fobj; | 217 | struct dma_resv_list *fobj; |
221 | struct dma_fence *old; | 218 | struct dma_fence *old; |
222 | unsigned int i, count; | 219 | unsigned int i, count; |
223 | 220 | ||
224 | dma_fence_get(fence); | 221 | dma_fence_get(fence); |
225 | 222 | ||
226 | reservation_object_assert_held(obj); | 223 | dma_resv_assert_held(obj); |
227 | 224 | ||
228 | fobj = reservation_object_get_list(obj); | 225 | fobj = dma_resv_get_list(obj); |
229 | count = fobj->shared_count; | 226 | count = fobj->shared_count; |
230 | 227 | ||
231 | for (i = 0; i < count; ++i) { | 228 | for (i = 0; i < count; ++i) { |
232 | 229 | ||
233 | old = rcu_dereference_protected(fobj->shared[i], | 230 | old = rcu_dereference_protected(fobj->shared[i], |
234 | reservation_object_held(obj)); | 231 | dma_resv_held(obj)); |
235 | if (old->context == fence->context || | 232 | if (old->context == fence->context || |
236 | dma_fence_is_signaled(old)) | 233 | dma_fence_is_signaled(old)) |
237 | goto replace; | 234 | goto replace; |
@@ -247,25 +244,24 @@ replace: | |||
247 | smp_store_mb(fobj->shared_count, count); | 244 | smp_store_mb(fobj->shared_count, count); |
248 | dma_fence_put(old); | 245 | dma_fence_put(old); |
249 | } | 246 | } |
250 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | 247 | EXPORT_SYMBOL(dma_resv_add_shared_fence); |
251 | 248 | ||
252 | /** | 249 | /** |
253 | * reservation_object_add_excl_fence - Add an exclusive fence. | 250 | * dma_resv_add_excl_fence - Add an exclusive fence. |
254 | * @obj: the reservation object | 251 | * @obj: the reservation object |
255 | * @fence: the shared fence to add | 252 | * @fence: the shared fence to add |
256 | * | 253 | * |
257 | * Add a fence to the exclusive slot. The obj->lock must be held. | 254 | * Add a fence to the exclusive slot. The obj->lock must be held. |
258 | */ | 255 | */ |
259 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 256 | void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) |
260 | struct dma_fence *fence) | ||
261 | { | 257 | { |
262 | struct dma_fence *old_fence = reservation_object_get_excl(obj); | 258 | struct dma_fence *old_fence = dma_resv_get_excl(obj); |
263 | struct reservation_object_list *old; | 259 | struct dma_resv_list *old; |
264 | u32 i = 0; | 260 | u32 i = 0; |
265 | 261 | ||
266 | reservation_object_assert_held(obj); | 262 | dma_resv_assert_held(obj); |
267 | 263 | ||
268 | old = reservation_object_get_list(obj); | 264 | old = dma_resv_get_list(obj); |
269 | if (old) | 265 | if (old) |
270 | i = old->shared_count; | 266 | i = old->shared_count; |
271 | 267 | ||
@@ -282,41 +278,40 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
282 | /* inplace update, no shared fences */ | 278 | /* inplace update, no shared fences */ |
283 | while (i--) | 279 | while (i--) |
284 | dma_fence_put(rcu_dereference_protected(old->shared[i], | 280 | dma_fence_put(rcu_dereference_protected(old->shared[i], |
285 | reservation_object_held(obj))); | 281 | dma_resv_held(obj))); |
286 | 282 | ||
287 | dma_fence_put(old_fence); | 283 | dma_fence_put(old_fence); |
288 | } | 284 | } |
289 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 285 | EXPORT_SYMBOL(dma_resv_add_excl_fence); |
290 | 286 | ||
291 | /** | 287 | /** |
292 | * reservation_object_copy_fences - Copy all fences from src to dst. | 288 | * dma_resv_copy_fences - Copy all fences from src to dst. |
293 | * @dst: the destination reservation object | 289 | * @dst: the destination reservation object |
294 | * @src: the source reservation object | 290 | * @src: the source reservation object |
295 | * | 291 | * |
296 | * Copy all fences from src to dst. dst-lock must be held. | 292 | * Copy all fences from src to dst. dst-lock must be held. |
297 | */ | 293 | */ |
298 | int reservation_object_copy_fences(struct reservation_object *dst, | 294 | int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) |
299 | struct reservation_object *src) | ||
300 | { | 295 | { |
301 | struct reservation_object_list *src_list, *dst_list; | 296 | struct dma_resv_list *src_list, *dst_list; |
302 | struct dma_fence *old, *new; | 297 | struct dma_fence *old, *new; |
303 | unsigned int i, shared_count; | 298 | unsigned int i, shared_count; |
304 | 299 | ||
305 | reservation_object_assert_held(dst); | 300 | dma_resv_assert_held(dst); |
306 | 301 | ||
307 | rcu_read_lock(); | 302 | rcu_read_lock(); |
308 | 303 | ||
309 | retry: | 304 | retry: |
310 | reservation_object_fences(src, &new, &src_list, &shared_count); | 305 | dma_resv_fences(src, &new, &src_list, &shared_count); |
311 | if (shared_count) { | 306 | if (shared_count) { |
312 | rcu_read_unlock(); | 307 | rcu_read_unlock(); |
313 | 308 | ||
314 | dst_list = reservation_object_list_alloc(shared_count); | 309 | dst_list = dma_resv_list_alloc(shared_count); |
315 | if (!dst_list) | 310 | if (!dst_list) |
316 | return -ENOMEM; | 311 | return -ENOMEM; |
317 | 312 | ||
318 | rcu_read_lock(); | 313 | rcu_read_lock(); |
319 | reservation_object_fences(src, &new, &src_list, &shared_count); | 314 | dma_resv_fences(src, &new, &src_list, &shared_count); |
320 | if (!src_list || shared_count > dst_list->shared_max) { | 315 | if (!src_list || shared_count > dst_list->shared_max) { |
321 | kfree(dst_list); | 316 | kfree(dst_list); |
322 | goto retry; | 317 | goto retry; |
@@ -332,7 +327,7 @@ retry: | |||
332 | continue; | 327 | continue; |
333 | 328 | ||
334 | if (!dma_fence_get_rcu(fence)) { | 329 | if (!dma_fence_get_rcu(fence)) { |
335 | reservation_object_list_free(dst_list); | 330 | dma_resv_list_free(dst_list); |
336 | goto retry; | 331 | goto retry; |
337 | } | 332 | } |
338 | 333 | ||
@@ -348,28 +343,28 @@ retry: | |||
348 | } | 343 | } |
349 | 344 | ||
350 | if (new && !dma_fence_get_rcu(new)) { | 345 | if (new && !dma_fence_get_rcu(new)) { |
351 | reservation_object_list_free(dst_list); | 346 | dma_resv_list_free(dst_list); |
352 | goto retry; | 347 | goto retry; |
353 | } | 348 | } |
354 | rcu_read_unlock(); | 349 | rcu_read_unlock(); |
355 | 350 | ||
356 | src_list = reservation_object_get_list(dst); | 351 | src_list = dma_resv_get_list(dst); |
357 | old = reservation_object_get_excl(dst); | 352 | old = dma_resv_get_excl(dst); |
358 | 353 | ||
359 | preempt_disable(); | 354 | preempt_disable(); |
360 | rcu_assign_pointer(dst->fence_excl, new); | 355 | rcu_assign_pointer(dst->fence_excl, new); |
361 | rcu_assign_pointer(dst->fence, dst_list); | 356 | rcu_assign_pointer(dst->fence, dst_list); |
362 | preempt_enable(); | 357 | preempt_enable(); |
363 | 358 | ||
364 | reservation_object_list_free(src_list); | 359 | dma_resv_list_free(src_list); |
365 | dma_fence_put(old); | 360 | dma_fence_put(old); |
366 | 361 | ||
367 | return 0; | 362 | return 0; |
368 | } | 363 | } |
369 | EXPORT_SYMBOL(reservation_object_copy_fences); | 364 | EXPORT_SYMBOL(dma_resv_copy_fences); |
370 | 365 | ||
371 | /** | 366 | /** |
372 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive | 367 | * dma_resv_get_fences_rcu - Get an object's shared and exclusive |
373 | * fences without update side lock held | 368 | * fences without update side lock held |
374 | * @obj: the reservation object | 369 | * @obj: the reservation object |
375 | * @pfence_excl: the returned exclusive fence (or NULL) | 370 | * @pfence_excl: the returned exclusive fence (or NULL) |
@@ -381,10 +376,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences); | |||
381 | * exclusive fence is not specified the fence is put into the array of the | 376 | * exclusive fence is not specified the fence is put into the array of the |
382 | * shared fences as well. Returns either zero or -ENOMEM. | 377 | * shared fences as well. Returns either zero or -ENOMEM. |
383 | */ | 378 | */ |
384 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 379 | int dma_resv_get_fences_rcu(struct dma_resv *obj, |
385 | struct dma_fence **pfence_excl, | 380 | struct dma_fence **pfence_excl, |
386 | unsigned *pshared_count, | 381 | unsigned *pshared_count, |
387 | struct dma_fence ***pshared) | 382 | struct dma_fence ***pshared) |
388 | { | 383 | { |
389 | struct dma_fence **shared = NULL; | 384 | struct dma_fence **shared = NULL; |
390 | struct dma_fence *fence_excl; | 385 | struct dma_fence *fence_excl; |
@@ -392,14 +387,14 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, | |||
392 | int ret = 1; | 387 | int ret = 1; |
393 | 388 | ||
394 | do { | 389 | do { |
395 | struct reservation_object_list *fobj; | 390 | struct dma_resv_list *fobj; |
396 | unsigned int i; | 391 | unsigned int i; |
397 | size_t sz = 0; | 392 | size_t sz = 0; |
398 | 393 | ||
399 | i = 0; | 394 | i = 0; |
400 | 395 | ||
401 | rcu_read_lock(); | 396 | rcu_read_lock(); |
402 | reservation_object_fences(obj, &fence_excl, &fobj, | 397 | dma_resv_fences(obj, &fence_excl, &fobj, |
403 | &shared_count); | 398 | &shared_count); |
404 | 399 | ||
405 | if (fence_excl && !dma_fence_get_rcu(fence_excl)) | 400 | if (fence_excl && !dma_fence_get_rcu(fence_excl)) |
@@ -465,10 +460,10 @@ unlock: | |||
465 | *pshared = shared; | 460 | *pshared = shared; |
466 | return ret; | 461 | return ret; |
467 | } | 462 | } |
468 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | 463 | EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); |
469 | 464 | ||
470 | /** | 465 | /** |
471 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects | 466 | * dma_resv_wait_timeout_rcu - Wait on reservation's objects |
472 | * shared and/or exclusive fences. | 467 | * shared and/or exclusive fences. |
473 | * @obj: the reservation object | 468 | * @obj: the reservation object |
474 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | 469 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
@@ -479,11 +474,11 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | |||
479 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | 474 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
480 | * greater than zer on success. | 475 | * greater than zer on success. |
481 | */ | 476 | */ |
482 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 477 | long dma_resv_wait_timeout_rcu(struct dma_resv *obj, |
483 | bool wait_all, bool intr, | 478 | bool wait_all, bool intr, |
484 | unsigned long timeout) | 479 | unsigned long timeout) |
485 | { | 480 | { |
486 | struct reservation_object_list *fobj; | 481 | struct dma_resv_list *fobj; |
487 | struct dma_fence *fence; | 482 | struct dma_fence *fence; |
488 | unsigned shared_count; | 483 | unsigned shared_count; |
489 | long ret = timeout ? timeout : 1; | 484 | long ret = timeout ? timeout : 1; |
@@ -493,7 +488,7 @@ retry: | |||
493 | rcu_read_lock(); | 488 | rcu_read_lock(); |
494 | i = -1; | 489 | i = -1; |
495 | 490 | ||
496 | reservation_object_fences(obj, &fence, &fobj, &shared_count); | 491 | dma_resv_fences(obj, &fence, &fobj, &shared_count); |
497 | if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 492 | if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
498 | if (!dma_fence_get_rcu(fence)) | 493 | if (!dma_fence_get_rcu(fence)) |
499 | goto unlock_retry; | 494 | goto unlock_retry; |
@@ -541,11 +536,10 @@ unlock_retry: | |||
541 | rcu_read_unlock(); | 536 | rcu_read_unlock(); |
542 | goto retry; | 537 | goto retry; |
543 | } | 538 | } |
544 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | 539 | EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); |
545 | 540 | ||
546 | 541 | ||
547 | static inline int | 542 | static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) |
548 | reservation_object_test_signaled_single(struct dma_fence *passed_fence) | ||
549 | { | 543 | { |
550 | struct dma_fence *fence, *lfence = passed_fence; | 544 | struct dma_fence *fence, *lfence = passed_fence; |
551 | int ret = 1; | 545 | int ret = 1; |
@@ -562,7 +556,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) | |||
562 | } | 556 | } |
563 | 557 | ||
564 | /** | 558 | /** |
565 | * reservation_object_test_signaled_rcu - Test if a reservation object's | 559 | * dma_resv_test_signaled_rcu - Test if a reservation object's |
566 | * fences have been signaled. | 560 | * fences have been signaled. |
567 | * @obj: the reservation object | 561 | * @obj: the reservation object |
568 | * @test_all: if true, test all fences, otherwise only test the exclusive | 562 | * @test_all: if true, test all fences, otherwise only test the exclusive |
@@ -571,10 +565,9 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) | |||
571 | * RETURNS | 565 | * RETURNS |
572 | * true if all fences signaled, else false | 566 | * true if all fences signaled, else false |
573 | */ | 567 | */ |
574 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | 568 | bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) |
575 | bool test_all) | ||
576 | { | 569 | { |
577 | struct reservation_object_list *fobj; | 570 | struct dma_resv_list *fobj; |
578 | struct dma_fence *fence_excl; | 571 | struct dma_fence *fence_excl; |
579 | unsigned shared_count; | 572 | unsigned shared_count; |
580 | int ret; | 573 | int ret; |
@@ -583,14 +576,14 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | |||
583 | retry: | 576 | retry: |
584 | ret = true; | 577 | ret = true; |
585 | 578 | ||
586 | reservation_object_fences(obj, &fence_excl, &fobj, &shared_count); | 579 | dma_resv_fences(obj, &fence_excl, &fobj, &shared_count); |
587 | if (test_all) { | 580 | if (test_all) { |
588 | unsigned i; | 581 | unsigned i; |
589 | 582 | ||
590 | for (i = 0; i < shared_count; ++i) { | 583 | for (i = 0; i < shared_count; ++i) { |
591 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); | 584 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); |
592 | 585 | ||
593 | ret = reservation_object_test_signaled_single(fence); | 586 | ret = dma_resv_test_signaled_single(fence); |
594 | if (ret < 0) | 587 | if (ret < 0) |
595 | goto retry; | 588 | goto retry; |
596 | else if (!ret) | 589 | else if (!ret) |
@@ -599,7 +592,7 @@ retry: | |||
599 | } | 592 | } |
600 | 593 | ||
601 | if (!shared_count && fence_excl) { | 594 | if (!shared_count && fence_excl) { |
602 | ret = reservation_object_test_signaled_single(fence_excl); | 595 | ret = dma_resv_test_signaled_single(fence_excl); |
603 | if (ret < 0) | 596 | if (ret < 0) |
604 | goto retry; | 597 | goto retry; |
605 | } | 598 | } |
@@ -607,4 +600,4 @@ retry: | |||
607 | rcu_read_unlock(); | 600 | rcu_read_unlock(); |
608 | return ret; | 601 | return ret; |
609 | } | 602 | } |
610 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); | 603 | EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a4640ddc24d1..bc4ec6b20a87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | |||
@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) | |||
218 | static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, | 218 | static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, |
219 | struct amdgpu_amdkfd_fence *ef) | 219 | struct amdgpu_amdkfd_fence *ef) |
220 | { | 220 | { |
221 | struct reservation_object *resv = bo->tbo.base.resv; | 221 | struct dma_resv *resv = bo->tbo.base.resv; |
222 | struct reservation_object_list *old, *new; | 222 | struct dma_resv_list *old, *new; |
223 | unsigned int i, j, k; | 223 | unsigned int i, j, k; |
224 | 224 | ||
225 | if (!ef) | 225 | if (!ef) |
226 | return -EINVAL; | 226 | return -EINVAL; |
227 | 227 | ||
228 | old = reservation_object_get_list(resv); | 228 | old = dma_resv_get_list(resv); |
229 | if (!old) | 229 | if (!old) |
230 | return 0; | 230 | return 0; |
231 | 231 | ||
@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, | |||
241 | struct dma_fence *f; | 241 | struct dma_fence *f; |
242 | 242 | ||
243 | f = rcu_dereference_protected(old->shared[i], | 243 | f = rcu_dereference_protected(old->shared[i], |
244 | reservation_object_held(resv)); | 244 | dma_resv_held(resv)); |
245 | 245 | ||
246 | if (f->context == ef->base.context) | 246 | if (f->context == ef->base.context) |
247 | RCU_INIT_POINTER(new->shared[--j], f); | 247 | RCU_INIT_POINTER(new->shared[--j], f); |
@@ -258,7 +258,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, | |||
258 | struct dma_fence *f; | 258 | struct dma_fence *f; |
259 | 259 | ||
260 | f = rcu_dereference_protected(new->shared[i], | 260 | f = rcu_dereference_protected(new->shared[i], |
261 | reservation_object_held(resv)); | 261 | dma_resv_held(resv)); |
262 | dma_fence_put(f); | 262 | dma_fence_put(f); |
263 | } | 263 | } |
264 | kfree_rcu(old, rcu); | 264 | kfree_rcu(old, rcu); |
@@ -882,7 +882,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, | |||
882 | AMDGPU_FENCE_OWNER_KFD, false); | 882 | AMDGPU_FENCE_OWNER_KFD, false); |
883 | if (ret) | 883 | if (ret) |
884 | goto wait_pd_fail; | 884 | goto wait_pd_fail; |
885 | ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); | 885 | ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); |
886 | if (ret) | 886 | if (ret) |
887 | goto reserve_shared_fail; | 887 | goto reserve_shared_fail; |
888 | amdgpu_bo_fence(vm->root.base.bo, | 888 | amdgpu_bo_fence(vm->root.base.bo, |
@@ -2127,7 +2127,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem | |||
2127 | * Add process eviction fence to bo so they can | 2127 | * Add process eviction fence to bo so they can |
2128 | * evict each other. | 2128 | * evict each other. |
2129 | */ | 2129 | */ |
2130 | ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1); | 2130 | ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); |
2131 | if (ret) | 2131 | if (ret) |
2132 | goto reserve_shared_fail; | 2132 | goto reserve_shared_fail; |
2133 | amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); | 2133 | amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e748cd284780..22236d367e26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |||
730 | 730 | ||
731 | list_for_each_entry(e, &p->validated, tv.head) { | 731 | list_for_each_entry(e, &p->validated, tv.head) { |
732 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | 732 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
733 | struct reservation_object *resv = bo->tbo.base.resv; | 733 | struct dma_resv *resv = bo->tbo.base.resv; |
734 | 734 | ||
735 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, | 735 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, |
736 | amdgpu_bo_explicit_sync(bo)); | 736 | amdgpu_bo_explicit_sync(bo)); |
@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | |||
1729 | *map = mapping; | 1729 | *map = mapping; |
1730 | 1730 | ||
1731 | /* Double check that the BO is reserved by this CS */ | 1731 | /* Double check that the BO is reserved by this CS */ |
1732 | if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) | 1732 | if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) |
1733 | return -EINVAL; | 1733 | return -EINVAL; |
1734 | 1734 | ||
1735 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { | 1735 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b5d020e15c35..8a48cb5b5875 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, | |||
204 | goto unpin; | 204 | goto unpin; |
205 | } | 205 | } |
206 | 206 | ||
207 | r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, | 207 | r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, |
208 | &work->shared_count, | 208 | &work->shared_count, |
209 | &work->shared); | 209 | &work->shared); |
210 | if (unlikely(r != 0)) { | 210 | if (unlikely(r != 0)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index b88e27da7c28..bf0f00508987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | |||
@@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | static int | 139 | static int |
140 | __reservation_object_make_exclusive(struct reservation_object *obj) | 140 | __dma_resv_make_exclusive(struct dma_resv *obj) |
141 | { | 141 | { |
142 | struct dma_fence **fences; | 142 | struct dma_fence **fences; |
143 | unsigned int count; | 143 | unsigned int count; |
144 | int r; | 144 | int r; |
145 | 145 | ||
146 | if (!reservation_object_get_list(obj)) /* no shared fences to convert */ | 146 | if (!dma_resv_get_list(obj)) /* no shared fences to convert */ |
147 | return 0; | 147 | return 0; |
148 | 148 | ||
149 | r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); | 149 | r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); |
150 | if (r) | 150 | if (r) |
151 | return r; | 151 | return r; |
152 | 152 | ||
153 | if (count == 0) { | 153 | if (count == 0) { |
154 | /* Now that was unexpected. */ | 154 | /* Now that was unexpected. */ |
155 | } else if (count == 1) { | 155 | } else if (count == 1) { |
156 | reservation_object_add_excl_fence(obj, fences[0]); | 156 | dma_resv_add_excl_fence(obj, fences[0]); |
157 | dma_fence_put(fences[0]); | 157 | dma_fence_put(fences[0]); |
158 | kfree(fences); | 158 | kfree(fences); |
159 | } else { | 159 | } else { |
@@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj) | |||
165 | if (!array) | 165 | if (!array) |
166 | goto err_fences_put; | 166 | goto err_fences_put; |
167 | 167 | ||
168 | reservation_object_add_excl_fence(obj, &array->base); | 168 | dma_resv_add_excl_fence(obj, &array->base); |
169 | dma_fence_put(&array->base); | 169 | dma_fence_put(&array->base); |
170 | } | 170 | } |
171 | 171 | ||
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, | |||
216 | * fences on the reservation object into a single exclusive | 216 | * fences on the reservation object into a single exclusive |
217 | * fence. | 217 | * fence. |
218 | */ | 218 | */ |
219 | r = __reservation_object_make_exclusive(bo->tbo.base.resv); | 219 | r = __dma_resv_make_exclusive(bo->tbo.base.resv); |
220 | if (r) | 220 | if (r) |
221 | goto error_unreserve; | 221 | goto error_unreserve; |
222 | } | 222 | } |
@@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
367 | struct dma_buf_attachment *attach, | 367 | struct dma_buf_attachment *attach, |
368 | struct sg_table *sg) | 368 | struct sg_table *sg) |
369 | { | 369 | { |
370 | struct reservation_object *resv = attach->dmabuf->resv; | 370 | struct dma_resv *resv = attach->dmabuf->resv; |
371 | struct amdgpu_device *adev = dev->dev_private; | 371 | struct amdgpu_device *adev = dev->dev_private; |
372 | struct amdgpu_bo *bo; | 372 | struct amdgpu_bo *bo; |
373 | struct amdgpu_bo_param bp; | 373 | struct amdgpu_bo_param bp; |
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
380 | bp.flags = 0; | 380 | bp.flags = 0; |
381 | bp.type = ttm_bo_type_sg; | 381 | bp.type = ttm_bo_type_sg; |
382 | bp.resv = resv; | 382 | bp.resv = resv; |
383 | reservation_object_lock(resv, NULL); | 383 | dma_resv_lock(resv, NULL); |
384 | ret = amdgpu_bo_create(adev, &bp, &bo); | 384 | ret = amdgpu_bo_create(adev, &bp, &bo); |
385 | if (ret) | 385 | if (ret) |
386 | goto error; | 386 | goto error; |
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
392 | if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) | 392 | if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) |
393 | bo->prime_shared_count = 1; | 393 | bo->prime_shared_count = 1; |
394 | 394 | ||
395 | reservation_object_unlock(resv); | 395 | dma_resv_unlock(resv); |
396 | return &bo->tbo.base; | 396 | return &bo->tbo.base; |
397 | 397 | ||
398 | error: | 398 | error: |
399 | reservation_object_unlock(resv); | 399 | dma_resv_unlock(resv); |
400 | return ERR_PTR(ret); | 400 | return ERR_PTR(ret); |
401 | } | 401 | } |
402 | 402 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index bff9173a1a94..40f673cfbbfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) | |||
50 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | 50 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
51 | int alignment, u32 initial_domain, | 51 | int alignment, u32 initial_domain, |
52 | u64 flags, enum ttm_bo_type type, | 52 | u64 flags, enum ttm_bo_type type, |
53 | struct reservation_object *resv, | 53 | struct dma_resv *resv, |
54 | struct drm_gem_object **obj) | 54 | struct drm_gem_object **obj) |
55 | { | 55 | { |
56 | struct amdgpu_bo *bo; | 56 | struct amdgpu_bo *bo; |
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | |||
215 | union drm_amdgpu_gem_create *args = data; | 215 | union drm_amdgpu_gem_create *args = data; |
216 | uint64_t flags = args->in.domain_flags; | 216 | uint64_t flags = args->in.domain_flags; |
217 | uint64_t size = args->in.bo_size; | 217 | uint64_t size = args->in.bo_size; |
218 | struct reservation_object *resv = NULL; | 218 | struct dma_resv *resv = NULL; |
219 | struct drm_gem_object *gobj; | 219 | struct drm_gem_object *gobj; |
220 | uint32_t handle; | 220 | uint32_t handle; |
221 | int r; | 221 | int r; |
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
433 | return -ENOENT; | 433 | return -ENOENT; |
434 | } | 434 | } |
435 | robj = gem_to_amdgpu_bo(gobj); | 435 | robj = gem_to_amdgpu_bo(gobj); |
436 | ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, | 436 | ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, |
437 | timeout); | 437 | timeout); |
438 | 438 | ||
439 | /* ret == 0 means not signaled, | 439 | /* ret == 0 means not signaled, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 2f17150e26e1..0b66d2e6b5d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | |||
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev); | |||
47 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | 47 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
48 | int alignment, u32 initial_domain, | 48 | int alignment, u32 initial_domain, |
49 | u64 flags, enum ttm_bo_type type, | 49 | u64 flags, enum ttm_bo_type type, |
50 | struct reservation_object *resv, | 50 | struct dma_resv *resv, |
51 | struct drm_gem_object **obj); | 51 | struct drm_gem_object **obj); |
52 | 52 | ||
53 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, | 53 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 57b3d8a9bef3..b3823f657bdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | |||
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence, | |||
104 | * | 104 | * |
105 | * Free the pasid only after all the fences in resv are signaled. | 105 | * Free the pasid only after all the fences in resv are signaled. |
106 | */ | 106 | */ |
107 | void amdgpu_pasid_free_delayed(struct reservation_object *resv, | 107 | void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
108 | unsigned int pasid) | 108 | unsigned int pasid) |
109 | { | 109 | { |
110 | struct dma_fence *fence, **fences; | 110 | struct dma_fence *fence, **fences; |
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv, | |||
112 | unsigned count; | 112 | unsigned count; |
113 | int r; | 113 | int r; |
114 | 114 | ||
115 | r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); | 115 | r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); |
116 | if (r) | 116 | if (r) |
117 | goto fallback; | 117 | goto fallback; |
118 | 118 | ||
@@ -156,7 +156,7 @@ fallback: | |||
156 | /* Not enough memory for the delayed delete, as last resort | 156 | /* Not enough memory for the delayed delete, as last resort |
157 | * block for all the fences to complete. | 157 | * block for all the fences to complete. |
158 | */ | 158 | */ |
159 | reservation_object_wait_timeout_rcu(resv, true, false, | 159 | dma_resv_wait_timeout_rcu(resv, true, false, |
160 | MAX_SCHEDULE_TIMEOUT); | 160 | MAX_SCHEDULE_TIMEOUT); |
161 | amdgpu_pasid_free(pasid); | 161 | amdgpu_pasid_free(pasid); |
162 | } | 162 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 7625419f0fc2..8e58325bbca2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | |||
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr { | |||
72 | 72 | ||
73 | int amdgpu_pasid_alloc(unsigned int bits); | 73 | int amdgpu_pasid_alloc(unsigned int bits); |
74 | void amdgpu_pasid_free(unsigned int pasid); | 74 | void amdgpu_pasid_free(unsigned int pasid); |
75 | void amdgpu_pasid_free_delayed(struct reservation_object *resv, | 75 | void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
76 | unsigned int pasid); | 76 | unsigned int pasid); |
77 | 77 | ||
78 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, | 78 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 50022acc8a81..f1f8cdd695d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | |||
179 | if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) | 179 | if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) |
180 | continue; | 180 | continue; |
181 | 181 | ||
182 | r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, | 182 | r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, |
183 | true, false, MAX_SCHEDULE_TIMEOUT); | 183 | true, false, MAX_SCHEDULE_TIMEOUT); |
184 | if (r <= 0) | 184 | if (r <= 0) |
185 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); | 185 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8ae44d383a13..2f11ebd95528 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -544,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, | |||
544 | 544 | ||
545 | fail_unreserve: | 545 | fail_unreserve: |
546 | if (!bp->resv) | 546 | if (!bp->resv) |
547 | reservation_object_unlock(bo->tbo.base.resv); | 547 | dma_resv_unlock(bo->tbo.base.resv); |
548 | amdgpu_bo_unref(&bo); | 548 | amdgpu_bo_unref(&bo); |
549 | return r; | 549 | return r; |
550 | } | 550 | } |
@@ -606,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
606 | 606 | ||
607 | if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { | 607 | if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { |
608 | if (!bp->resv) | 608 | if (!bp->resv) |
609 | WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv, | 609 | WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, |
610 | NULL)); | 610 | NULL)); |
611 | 611 | ||
612 | r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); | 612 | r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); |
613 | 613 | ||
614 | if (!bp->resv) | 614 | if (!bp->resv) |
615 | reservation_object_unlock((*bo_ptr)->tbo.base.resv); | 615 | dma_resv_unlock((*bo_ptr)->tbo.base.resv); |
616 | 616 | ||
617 | if (r) | 617 | if (r) |
618 | amdgpu_bo_unref(bo_ptr); | 618 | amdgpu_bo_unref(bo_ptr); |
@@ -709,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | |||
709 | return 0; | 709 | return 0; |
710 | } | 710 | } |
711 | 711 | ||
712 | r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false, | 712 | r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, |
713 | MAX_SCHEDULE_TIMEOUT); | 713 | MAX_SCHEDULE_TIMEOUT); |
714 | if (r < 0) | 714 | if (r < 0) |
715 | return r; | 715 | return r; |
@@ -1087,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) | |||
1087 | */ | 1087 | */ |
1088 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) | 1088 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
1089 | { | 1089 | { |
1090 | reservation_object_assert_held(bo->tbo.base.resv); | 1090 | dma_resv_assert_held(bo->tbo.base.resv); |
1091 | 1091 | ||
1092 | if (tiling_flags) | 1092 | if (tiling_flags) |
1093 | *tiling_flags = bo->tiling_flags; | 1093 | *tiling_flags = bo->tiling_flags; |
@@ -1283,12 +1283,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
1283 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, | 1283 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
1284 | bool shared) | 1284 | bool shared) |
1285 | { | 1285 | { |
1286 | struct reservation_object *resv = bo->tbo.base.resv; | 1286 | struct dma_resv *resv = bo->tbo.base.resv; |
1287 | 1287 | ||
1288 | if (shared) | 1288 | if (shared) |
1289 | reservation_object_add_shared_fence(resv, fence); | 1289 | dma_resv_add_shared_fence(resv, fence); |
1290 | else | 1290 | else |
1291 | reservation_object_add_excl_fence(resv, fence); | 1291 | dma_resv_add_excl_fence(resv, fence); |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | /** | 1294 | /** |
@@ -1328,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) | |||
1328 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | 1328 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
1329 | { | 1329 | { |
1330 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); | 1330 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); |
1331 | WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) && | 1331 | WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && |
1332 | !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); | 1332 | !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); |
1333 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); | 1333 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); |
1334 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && | 1334 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 113fb2feb437..1a555b0fd3b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -41,7 +41,7 @@ struct amdgpu_bo_param { | |||
41 | u32 preferred_domain; | 41 | u32 preferred_domain; |
42 | u64 flags; | 42 | u64 flags; |
43 | enum ttm_bo_type type; | 43 | enum ttm_bo_type type; |
44 | struct reservation_object *resv; | 44 | struct dma_resv *resv; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* bo virtual addresses in a vm */ | 47 | /* bo virtual addresses in a vm */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 9828f3c7c655..95e5e93edd18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
190 | */ | 190 | */ |
191 | int amdgpu_sync_resv(struct amdgpu_device *adev, | 191 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
192 | struct amdgpu_sync *sync, | 192 | struct amdgpu_sync *sync, |
193 | struct reservation_object *resv, | 193 | struct dma_resv *resv, |
194 | void *owner, bool explicit_sync) | 194 | void *owner, bool explicit_sync) |
195 | { | 195 | { |
196 | struct reservation_object_list *flist; | 196 | struct dma_resv_list *flist; |
197 | struct dma_fence *f; | 197 | struct dma_fence *f; |
198 | void *fence_owner; | 198 | void *fence_owner; |
199 | unsigned i; | 199 | unsigned i; |
@@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | 204 | ||
205 | /* always sync to the exclusive fence */ | 205 | /* always sync to the exclusive fence */ |
206 | f = reservation_object_get_excl(resv); | 206 | f = dma_resv_get_excl(resv); |
207 | r = amdgpu_sync_fence(adev, sync, f, false); | 207 | r = amdgpu_sync_fence(adev, sync, f, false); |
208 | 208 | ||
209 | flist = reservation_object_get_list(resv); | 209 | flist = dma_resv_get_list(resv); |
210 | if (!flist || r) | 210 | if (!flist || r) |
211 | return r; | 211 | return r; |
212 | 212 | ||
213 | for (i = 0; i < flist->shared_count; ++i) { | 213 | for (i = 0; i < flist->shared_count; ++i) { |
214 | f = rcu_dereference_protected(flist->shared[i], | 214 | f = rcu_dereference_protected(flist->shared[i], |
215 | reservation_object_held(resv)); | 215 | dma_resv_held(resv)); |
216 | /* We only want to trigger KFD eviction fences on | 216 | /* We only want to trigger KFD eviction fences on |
217 | * evict or move jobs. Skip KFD fences otherwise. | 217 | * evict or move jobs. Skip KFD fences otherwise. |
218 | */ | 218 | */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 10cf23a57f17..b5f1778a2319 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/hashtable.h> | 27 | #include <linux/hashtable.h> |
28 | 28 | ||
29 | struct dma_fence; | 29 | struct dma_fence; |
30 | struct reservation_object; | 30 | struct dma_resv; |
31 | struct amdgpu_device; | 31 | struct amdgpu_device; |
32 | struct amdgpu_ring; | 32 | struct amdgpu_ring; |
33 | 33 | ||
@@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
44 | struct dma_fence *f, bool explicit); | 44 | struct dma_fence *f, bool explicit); |
45 | int amdgpu_sync_resv(struct amdgpu_device *adev, | 45 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
46 | struct amdgpu_sync *sync, | 46 | struct amdgpu_sync *sync, |
47 | struct reservation_object *resv, | 47 | struct dma_resv *resv, |
48 | void *owner, | 48 | void *owner, |
49 | bool explicit_sync); | 49 | bool explicit_sync); |
50 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | 50 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 63e7d1e01b76..fb09314bcfd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | |||
303 | struct amdgpu_copy_mem *src, | 303 | struct amdgpu_copy_mem *src, |
304 | struct amdgpu_copy_mem *dst, | 304 | struct amdgpu_copy_mem *dst, |
305 | uint64_t size, | 305 | uint64_t size, |
306 | struct reservation_object *resv, | 306 | struct dma_resv *resv, |
307 | struct dma_fence **f) | 307 | struct dma_fence **f) |
308 | { | 308 | { |
309 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 309 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
@@ -1470,7 +1470,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | |||
1470 | { | 1470 | { |
1471 | unsigned long num_pages = bo->mem.num_pages; | 1471 | unsigned long num_pages = bo->mem.num_pages; |
1472 | struct drm_mm_node *node = bo->mem.mm_node; | 1472 | struct drm_mm_node *node = bo->mem.mm_node; |
1473 | struct reservation_object_list *flist; | 1473 | struct dma_resv_list *flist; |
1474 | struct dma_fence *f; | 1474 | struct dma_fence *f; |
1475 | int i; | 1475 | int i; |
1476 | 1476 | ||
@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | |||
1478 | * cleanly handle page faults. | 1478 | * cleanly handle page faults. |
1479 | */ | 1479 | */ |
1480 | if (bo->type == ttm_bo_type_kernel && | 1480 | if (bo->type == ttm_bo_type_kernel && |
1481 | !reservation_object_test_signaled_rcu(bo->base.resv, true)) | 1481 | !dma_resv_test_signaled_rcu(bo->base.resv, true)) |
1482 | return false; | 1482 | return false; |
1483 | 1483 | ||
1484 | /* If bo is a KFD BO, check if the bo belongs to the current process. | 1484 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
1485 | * If true, then return false as any KFD process needs all its BOs to | 1485 | * If true, then return false as any KFD process needs all its BOs to |
1486 | * be resident to run successfully | 1486 | * be resident to run successfully |
1487 | */ | 1487 | */ |
1488 | flist = reservation_object_get_list(bo->base.resv); | 1488 | flist = dma_resv_get_list(bo->base.resv); |
1489 | if (flist) { | 1489 | if (flist) { |
1490 | for (i = 0; i < flist->shared_count; ++i) { | 1490 | for (i = 0; i < flist->shared_count; ++i) { |
1491 | f = rcu_dereference_protected(flist->shared[i], | 1491 | f = rcu_dereference_protected(flist->shared[i], |
1492 | reservation_object_held(bo->base.resv)); | 1492 | dma_resv_held(bo->base.resv)); |
1493 | if (amdkfd_fence_check_mm(f, current->mm)) | 1493 | if (amdkfd_fence_check_mm(f, current->mm)) |
1494 | return false; | 1494 | return false; |
1495 | } | 1495 | } |
@@ -1992,7 +1992,7 @@ error_free: | |||
1992 | 1992 | ||
1993 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, | 1993 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
1994 | uint64_t dst_offset, uint32_t byte_count, | 1994 | uint64_t dst_offset, uint32_t byte_count, |
1995 | struct reservation_object *resv, | 1995 | struct dma_resv *resv, |
1996 | struct dma_fence **fence, bool direct_submit, | 1996 | struct dma_fence **fence, bool direct_submit, |
1997 | bool vm_needs_flush) | 1997 | bool vm_needs_flush) |
1998 | { | 1998 | { |
@@ -2066,7 +2066,7 @@ error_free: | |||
2066 | 2066 | ||
2067 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 2067 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
2068 | uint32_t src_data, | 2068 | uint32_t src_data, |
2069 | struct reservation_object *resv, | 2069 | struct dma_resv *resv, |
2070 | struct dma_fence **fence) | 2070 | struct dma_fence **fence) |
2071 | { | 2071 | { |
2072 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | 2072 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index caa76c693700..80896bd6b972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -83,18 +83,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, | |||
83 | 83 | ||
84 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, | 84 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
85 | uint64_t dst_offset, uint32_t byte_count, | 85 | uint64_t dst_offset, uint32_t byte_count, |
86 | struct reservation_object *resv, | 86 | struct dma_resv *resv, |
87 | struct dma_fence **fence, bool direct_submit, | 87 | struct dma_fence **fence, bool direct_submit, |
88 | bool vm_needs_flush); | 88 | bool vm_needs_flush); |
89 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | 89 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, |
90 | struct amdgpu_copy_mem *src, | 90 | struct amdgpu_copy_mem *src, |
91 | struct amdgpu_copy_mem *dst, | 91 | struct amdgpu_copy_mem *dst, |
92 | uint64_t size, | 92 | uint64_t size, |
93 | struct reservation_object *resv, | 93 | struct dma_resv *resv, |
94 | struct dma_fence **f); | 94 | struct dma_fence **f); |
95 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 95 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
96 | uint32_t src_data, | 96 | uint32_t src_data, |
97 | struct reservation_object *resv, | 97 | struct dma_resv *resv, |
98 | struct dma_fence **fence); | 98 | struct dma_fence **fence); |
99 | 99 | ||
100 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 100 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index f858607b17a5..b2c364b8695f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
1073 | ib->length_dw = 16; | 1073 | ib->length_dw = 16; |
1074 | 1074 | ||
1075 | if (direct) { | 1075 | if (direct) { |
1076 | r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, | 1076 | r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, |
1077 | true, false, | 1077 | true, false, |
1078 | msecs_to_jiffies(10)); | 1078 | msecs_to_jiffies(10)); |
1079 | if (r == 0) | 1079 | if (r == 0) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 95eef0ac2829..07dcad7ecb26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1702 | ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); | 1702 | ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); |
1703 | pages_addr = ttm->dma_address; | 1703 | pages_addr = ttm->dma_address; |
1704 | } | 1704 | } |
1705 | exclusive = reservation_object_get_excl(bo->tbo.base.resv); | 1705 | exclusive = dma_resv_get_excl(bo->tbo.base.resv); |
1706 | } | 1706 | } |
1707 | 1707 | ||
1708 | if (bo) { | 1708 | if (bo) { |
@@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, | |||
1879 | */ | 1879 | */ |
1880 | static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | 1880 | static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
1881 | { | 1881 | { |
1882 | struct reservation_object *resv = vm->root.base.bo->tbo.base.resv; | 1882 | struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; |
1883 | struct dma_fence *excl, **shared; | 1883 | struct dma_fence *excl, **shared; |
1884 | unsigned i, shared_count; | 1884 | unsigned i, shared_count; |
1885 | int r; | 1885 | int r; |
1886 | 1886 | ||
1887 | r = reservation_object_get_fences_rcu(resv, &excl, | 1887 | r = dma_resv_get_fences_rcu(resv, &excl, |
1888 | &shared_count, &shared); | 1888 | &shared_count, &shared); |
1889 | if (r) { | 1889 | if (r) { |
1890 | /* Not enough memory to grab the fence list, as last resort | 1890 | /* Not enough memory to grab the fence list, as last resort |
1891 | * block for all the fences to complete. | 1891 | * block for all the fences to complete. |
1892 | */ | 1892 | */ |
1893 | reservation_object_wait_timeout_rcu(resv, true, false, | 1893 | dma_resv_wait_timeout_rcu(resv, true, false, |
1894 | MAX_SCHEDULE_TIMEOUT); | 1894 | MAX_SCHEDULE_TIMEOUT); |
1895 | return; | 1895 | return; |
1896 | } | 1896 | } |
@@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | |||
1978 | struct amdgpu_vm *vm) | 1978 | struct amdgpu_vm *vm) |
1979 | { | 1979 | { |
1980 | struct amdgpu_bo_va *bo_va, *tmp; | 1980 | struct amdgpu_bo_va *bo_va, *tmp; |
1981 | struct reservation_object *resv; | 1981 | struct dma_resv *resv; |
1982 | bool clear; | 1982 | bool clear; |
1983 | int r; | 1983 | int r; |
1984 | 1984 | ||
@@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | |||
1997 | spin_unlock(&vm->invalidated_lock); | 1997 | spin_unlock(&vm->invalidated_lock); |
1998 | 1998 | ||
1999 | /* Try to reserve the BO to avoid clearing its ptes */ | 1999 | /* Try to reserve the BO to avoid clearing its ptes */ |
2000 | if (!amdgpu_vm_debug && reservation_object_trylock(resv)) | 2000 | if (!amdgpu_vm_debug && dma_resv_trylock(resv)) |
2001 | clear = false; | 2001 | clear = false; |
2002 | /* Somebody else is using the BO right now */ | 2002 | /* Somebody else is using the BO right now */ |
2003 | else | 2003 | else |
@@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | |||
2008 | return r; | 2008 | return r; |
2009 | 2009 | ||
2010 | if (!clear) | 2010 | if (!clear) |
2011 | reservation_object_unlock(resv); | 2011 | dma_resv_unlock(resv); |
2012 | spin_lock(&vm->invalidated_lock); | 2012 | spin_lock(&vm->invalidated_lock); |
2013 | } | 2013 | } |
2014 | spin_unlock(&vm->invalidated_lock); | 2014 | spin_unlock(&vm->invalidated_lock); |
@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) | |||
2416 | struct amdgpu_bo *bo; | 2416 | struct amdgpu_bo *bo; |
2417 | 2417 | ||
2418 | bo = mapping->bo_va->base.bo; | 2418 | bo = mapping->bo_va->base.bo; |
2419 | if (reservation_object_locking_ctx(bo->tbo.base.resv) != | 2419 | if (dma_resv_locking_ctx(bo->tbo.base.resv) != |
2420 | ticket) | 2420 | ticket) |
2421 | continue; | 2421 | continue; |
2422 | } | 2422 | } |
@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, | |||
2649 | */ | 2649 | */ |
2650 | long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) | 2650 | long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) |
2651 | { | 2651 | { |
2652 | return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, | 2652 | return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, |
2653 | true, true, timeout); | 2653 | true, true, timeout); |
2654 | } | 2654 | } |
2655 | 2655 | ||
@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2724 | if (r) | 2724 | if (r) |
2725 | goto error_free_root; | 2725 | goto error_free_root; |
2726 | 2726 | ||
2727 | r = reservation_object_reserve_shared(root->tbo.base.resv, 1); | 2727 | r = dma_resv_reserve_shared(root->tbo.base.resv, 1); |
2728 | if (r) | 2728 | if (r) |
2729 | goto error_unreserve; | 2729 | goto error_unreserve; |
2730 | 2730 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 381a5345f195..cb7cfa9b34f2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
5693 | * deadlock during GPU reset when this fence will not signal | 5693 | * deadlock during GPU reset when this fence will not signal |
5694 | * but we hold reservation lock for the BO. | 5694 | * but we hold reservation lock for the BO. |
5695 | */ | 5695 | */ |
5696 | r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true, | 5696 | r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, |
5697 | false, | 5697 | false, |
5698 | msecs_to_jiffies(5000)); | 5698 | msecs_to_jiffies(5000)); |
5699 | if (unlikely(r <= 0)) | 5699 | if (unlikely(r <= 0)) |
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 19ae119f1a5d..5a5b42db6f2a 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c | |||
@@ -1037,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, | |||
1037 | * As a contrast, with implicit fencing the kernel keeps track of any | 1037 | * As a contrast, with implicit fencing the kernel keeps track of any |
1038 | * ongoing rendering, and automatically ensures that the atomic update waits | 1038 | * ongoing rendering, and automatically ensures that the atomic update waits |
1039 | * for any pending rendering to complete. For shared buffers represented with | 1039 | * for any pending rendering to complete. For shared buffers represented with |
1040 | * a &struct dma_buf this is tracked in &struct reservation_object. | 1040 | * a &struct dma_buf this is tracked in &struct dma_resv. |
1041 | * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), | 1041 | * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), |
1042 | * whereas explicit fencing is what Android wants. | 1042 | * whereas explicit fencing is what Android wants. |
1043 | * | 1043 | * |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a2dd198177f2..6854f5867d51 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -159,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev, | |||
159 | kref_init(&obj->refcount); | 159 | kref_init(&obj->refcount); |
160 | obj->handle_count = 0; | 160 | obj->handle_count = 0; |
161 | obj->size = size; | 161 | obj->size = size; |
162 | reservation_object_init(&obj->_resv); | 162 | dma_resv_init(&obj->_resv); |
163 | if (!obj->resv) | 163 | if (!obj->resv) |
164 | obj->resv = &obj->_resv; | 164 | obj->resv = &obj->_resv; |
165 | 165 | ||
@@ -755,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle) | |||
755 | EXPORT_SYMBOL(drm_gem_object_lookup); | 755 | EXPORT_SYMBOL(drm_gem_object_lookup); |
756 | 756 | ||
757 | /** | 757 | /** |
758 | * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects | 758 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
759 | * shared and/or exclusive fences. | 759 | * shared and/or exclusive fences. |
760 | * @filep: DRM file private date | 760 | * @filep: DRM file private date |
761 | * @handle: userspace handle | 761 | * @handle: userspace handle |
@@ -767,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup); | |||
767 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | 767 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
768 | * greater than 0 on success. | 768 | * greater than 0 on success. |
769 | */ | 769 | */ |
770 | long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, | 770 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
771 | bool wait_all, unsigned long timeout) | 771 | bool wait_all, unsigned long timeout) |
772 | { | 772 | { |
773 | long ret; | 773 | long ret; |
@@ -779,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, | |||
779 | return -EINVAL; | 779 | return -EINVAL; |
780 | } | 780 | } |
781 | 781 | ||
782 | ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, | 782 | ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, |
783 | true, timeout); | 783 | true, timeout); |
784 | if (ret == 0) | 784 | if (ret == 0) |
785 | ret = -ETIME; | 785 | ret = -ETIME; |
@@ -790,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, | |||
790 | 790 | ||
791 | return ret; | 791 | return ret; |
792 | } | 792 | } |
793 | EXPORT_SYMBOL(drm_gem_reservation_object_wait); | 793 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
794 | 794 | ||
795 | /** | 795 | /** |
796 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl | 796 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
@@ -956,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj) | |||
956 | if (obj->filp) | 956 | if (obj->filp) |
957 | fput(obj->filp); | 957 | fput(obj->filp); |
958 | 958 | ||
959 | reservation_object_fini(&obj->_resv); | 959 | dma_resv_fini(&obj->_resv); |
960 | drm_gem_free_mmap_offset(obj); | 960 | drm_gem_free_mmap_offset(obj); |
961 | } | 961 | } |
962 | EXPORT_SYMBOL(drm_gem_object_release); | 962 | EXPORT_SYMBOL(drm_gem_object_release); |
@@ -1291,7 +1291,7 @@ retry: | |||
1291 | if (contended != -1) { | 1291 | if (contended != -1) { |
1292 | struct drm_gem_object *obj = objs[contended]; | 1292 | struct drm_gem_object *obj = objs[contended]; |
1293 | 1293 | ||
1294 | ret = reservation_object_lock_slow_interruptible(obj->resv, | 1294 | ret = dma_resv_lock_slow_interruptible(obj->resv, |
1295 | acquire_ctx); | 1295 | acquire_ctx); |
1296 | if (ret) { | 1296 | if (ret) { |
1297 | ww_acquire_done(acquire_ctx); | 1297 | ww_acquire_done(acquire_ctx); |
@@ -1303,16 +1303,16 @@ retry: | |||
1303 | if (i == contended) | 1303 | if (i == contended) |
1304 | continue; | 1304 | continue; |
1305 | 1305 | ||
1306 | ret = reservation_object_lock_interruptible(objs[i]->resv, | 1306 | ret = dma_resv_lock_interruptible(objs[i]->resv, |
1307 | acquire_ctx); | 1307 | acquire_ctx); |
1308 | if (ret) { | 1308 | if (ret) { |
1309 | int j; | 1309 | int j; |
1310 | 1310 | ||
1311 | for (j = 0; j < i; j++) | 1311 | for (j = 0; j < i; j++) |
1312 | reservation_object_unlock(objs[j]->resv); | 1312 | dma_resv_unlock(objs[j]->resv); |
1313 | 1313 | ||
1314 | if (contended != -1 && contended >= i) | 1314 | if (contended != -1 && contended >= i) |
1315 | reservation_object_unlock(objs[contended]->resv); | 1315 | dma_resv_unlock(objs[contended]->resv); |
1316 | 1316 | ||
1317 | if (ret == -EDEADLK) { | 1317 | if (ret == -EDEADLK) { |
1318 | contended = i; | 1318 | contended = i; |
@@ -1337,7 +1337,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, | |||
1337 | int i; | 1337 | int i; |
1338 | 1338 | ||
1339 | for (i = 0; i < count; i++) | 1339 | for (i = 0; i < count; i++) |
1340 | reservation_object_unlock(objs[i]->resv); | 1340 | dma_resv_unlock(objs[i]->resv); |
1341 | 1341 | ||
1342 | ww_acquire_fini(acquire_ctx); | 1342 | ww_acquire_fini(acquire_ctx); |
1343 | } | 1343 | } |
@@ -1413,12 +1413,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, | |||
1413 | 1413 | ||
1414 | if (!write) { | 1414 | if (!write) { |
1415 | struct dma_fence *fence = | 1415 | struct dma_fence *fence = |
1416 | reservation_object_get_excl_rcu(obj->resv); | 1416 | dma_resv_get_excl_rcu(obj->resv); |
1417 | 1417 | ||
1418 | return drm_gem_fence_array_add(fence_array, fence); | 1418 | return drm_gem_fence_array_add(fence_array, fence); |
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | ret = reservation_object_get_fences_rcu(obj->resv, NULL, | 1421 | ret = dma_resv_get_fences_rcu(obj->resv, NULL, |
1422 | &fence_count, &fences); | 1422 | &fence_count, &fences); |
1423 | if (ret || !fence_count) | 1423 | if (ret || !fence_count) |
1424 | return ret; | 1424 | return ret; |
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index f61304054786..b9bcd310ca2d 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/dma-buf.h> | 8 | #include <linux/dma-buf.h> |
9 | #include <linux/dma-fence.h> | 9 | #include <linux/dma-fence.h> |
10 | #include <linux/reservation.h> | 10 | #include <linux/dma-resv.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | 12 | ||
13 | #include <drm/drm_atomic.h> | 13 | #include <drm/drm_atomic.h> |
@@ -294,7 +294,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane, | |||
294 | return 0; | 294 | return 0; |
295 | 295 | ||
296 | obj = drm_gem_fb_get_obj(state->fb, 0); | 296 | obj = drm_gem_fb_get_obj(state->fb, 0); |
297 | fence = reservation_object_get_excl_rcu(obj->resv); | 297 | fence = dma_resv_get_excl_rcu(obj->resv); |
298 | drm_atomic_set_fence_for_plane(state, fence); | 298 | drm_atomic_set_fence_for_plane(state, fence); |
299 | 299 | ||
300 | return 0; | 300 | return 0; |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 17ca602db60a..7d83e04ec36e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, | |||
397 | } | 397 | } |
398 | 398 | ||
399 | if (op & ETNA_PREP_NOSYNC) { | 399 | if (op & ETNA_PREP_NOSYNC) { |
400 | if (!reservation_object_test_signaled_rcu(obj->resv, | 400 | if (!dma_resv_test_signaled_rcu(obj->resv, |
401 | write)) | 401 | write)) |
402 | return -EBUSY; | 402 | return -EBUSY; |
403 | } else { | 403 | } else { |
404 | unsigned long remain = etnaviv_timeout_to_jiffies(timeout); | 404 | unsigned long remain = etnaviv_timeout_to_jiffies(timeout); |
405 | 405 | ||
406 | ret = reservation_object_wait_timeout_rcu(obj->resv, | 406 | ret = dma_resv_wait_timeout_rcu(obj->resv, |
407 | write, true, remain); | 407 | write, true, remain); |
408 | if (ret <= 0) | 408 | if (ret <= 0) |
409 | return ret == 0 ? -ETIMEDOUT : ret; | 409 | return ret == 0 ? -ETIMEDOUT : ret; |
@@ -459,8 +459,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence, | |||
459 | static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | 459 | static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
460 | { | 460 | { |
461 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 461 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
462 | struct reservation_object *robj = obj->resv; | 462 | struct dma_resv *robj = obj->resv; |
463 | struct reservation_object_list *fobj; | 463 | struct dma_resv_list *fobj; |
464 | struct dma_fence *fence; | 464 | struct dma_fence *fence; |
465 | unsigned long off = drm_vma_node_start(&obj->vma_node); | 465 | unsigned long off = drm_vma_node_start(&obj->vma_node); |
466 | 466 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index fcd5d71b502f..28379e4df253 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #ifndef __ETNAVIV_GEM_H__ | 6 | #ifndef __ETNAVIV_GEM_H__ |
7 | #define __ETNAVIV_GEM_H__ | 7 | #define __ETNAVIV_GEM_H__ |
8 | 8 | ||
9 | #include <linux/reservation.h> | 9 | #include <linux/dma-resv.h> |
10 | #include "etnaviv_cmdbuf.h" | 10 | #include "etnaviv_cmdbuf.h" |
11 | #include "etnaviv_drv.h" | 11 | #include "etnaviv_drv.h" |
12 | 12 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1a636469eeda..998c96b40d8a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/dma-fence-array.h> | 6 | #include <linux/dma-fence-array.h> |
7 | #include <linux/reservation.h> | 7 | #include <linux/dma-resv.h> |
8 | #include <linux/sync_file.h> | 8 | #include <linux/sync_file.h> |
9 | #include "etnaviv_cmdbuf.h" | 9 | #include "etnaviv_cmdbuf.h" |
10 | #include "etnaviv_drv.h" | 10 | #include "etnaviv_drv.h" |
@@ -165,10 +165,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) | |||
165 | 165 | ||
166 | for (i = 0; i < submit->nr_bos; i++) { | 166 | for (i = 0; i < submit->nr_bos; i++) { |
167 | struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; | 167 | struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; |
168 | struct reservation_object *robj = bo->obj->base.resv; | 168 | struct dma_resv *robj = bo->obj->base.resv; |
169 | 169 | ||
170 | if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { | 170 | if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { |
171 | ret = reservation_object_reserve_shared(robj, 1); | 171 | ret = dma_resv_reserve_shared(robj, 1); |
172 | if (ret) | 172 | if (ret) |
173 | return ret; | 173 | return ret; |
174 | } | 174 | } |
@@ -177,13 +177,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) | |||
177 | continue; | 177 | continue; |
178 | 178 | ||
179 | if (bo->flags & ETNA_SUBMIT_BO_WRITE) { | 179 | if (bo->flags & ETNA_SUBMIT_BO_WRITE) { |
180 | ret = reservation_object_get_fences_rcu(robj, &bo->excl, | 180 | ret = dma_resv_get_fences_rcu(robj, &bo->excl, |
181 | &bo->nr_shared, | 181 | &bo->nr_shared, |
182 | &bo->shared); | 182 | &bo->shared); |
183 | if (ret) | 183 | if (ret) |
184 | return ret; | 184 | return ret; |
185 | } else { | 185 | } else { |
186 | bo->excl = reservation_object_get_excl_rcu(robj); | 186 | bo->excl = dma_resv_get_excl_rcu(robj); |
187 | } | 187 | } |
188 | 188 | ||
189 | } | 189 | } |
@@ -199,10 +199,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) | |||
199 | struct drm_gem_object *obj = &submit->bos[i].obj->base; | 199 | struct drm_gem_object *obj = &submit->bos[i].obj->base; |
200 | 200 | ||
201 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) | 201 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) |
202 | reservation_object_add_excl_fence(obj->resv, | 202 | dma_resv_add_excl_fence(obj->resv, |
203 | submit->out_fence); | 203 | submit->out_fence); |
204 | else | 204 | else |
205 | reservation_object_add_shared_fence(obj->resv, | 205 | dma_resv_add_shared_fence(obj->resv, |
206 | submit->out_fence); | 206 | submit->out_fence); |
207 | 207 | ||
208 | submit_unlock_object(submit, i); | 208 | submit_unlock_object(submit, i); |
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..21c71fdceec0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/intel-iommu.h> | 29 | #include <linux/intel-iommu.h> |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/reservation.h> | 32 | #include <linux/dma-resv.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/vgaarb.h> | 34 | #include <linux/vgaarb.h> |
35 | 35 | ||
@@ -14317,7 +14317,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
14317 | if (ret < 0) | 14317 | if (ret < 0) |
14318 | return ret; | 14318 | return ret; |
14319 | 14319 | ||
14320 | fence = reservation_object_get_excl_rcu(obj->base.resv); | 14320 | fence = dma_resv_get_excl_rcu(obj->base.resv); |
14321 | if (fence) { | 14321 | if (fence) { |
14322 | add_rps_boost_after_vblank(new_state->crtc, fence); | 14322 | add_rps_boost_after_vblank(new_state->crtc, fence); |
14323 | dma_fence_put(fence); | 14323 | dma_fence_put(fence); |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 8473292096cb..a2aff1d8290e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c | |||
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
82 | { | 82 | { |
83 | struct drm_i915_gem_busy *args = data; | 83 | struct drm_i915_gem_busy *args = data; |
84 | struct drm_i915_gem_object *obj; | 84 | struct drm_i915_gem_object *obj; |
85 | struct reservation_object_list *list; | 85 | struct dma_resv_list *list; |
86 | unsigned int i, shared_count; | 86 | unsigned int i, shared_count; |
87 | struct dma_fence *excl; | 87 | struct dma_fence *excl; |
88 | int err; | 88 | int err; |
@@ -106,11 +106,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
106 | * Alternatively, we can trade that extra information on read/write | 106 | * Alternatively, we can trade that extra information on read/write |
107 | * activity with | 107 | * activity with |
108 | * args->busy = | 108 | * args->busy = |
109 | * !reservation_object_test_signaled_rcu(obj->resv, true); | 109 | * !dma_resv_test_signaled_rcu(obj->resv, true); |
110 | * to report the overall busyness. This is what the wait-ioctl does. | 110 | * to report the overall busyness. This is what the wait-ioctl does. |
111 | * | 111 | * |
112 | */ | 112 | */ |
113 | reservation_object_fences(obj->base.resv, &excl, &list, &shared_count); | 113 | dma_resv_fences(obj->base.resv, &excl, &list, &shared_count); |
114 | 114 | ||
115 | /* Translate the exclusive fence to the READ *and* WRITE engine */ | 115 | /* Translate the exclusive fence to the READ *and* WRITE engine */ |
116 | args->busy = busy_check_writer(excl); | 116 | args->busy = busy_check_writer(excl); |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 5295285d5843..88ee8ca7967f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c | |||
@@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, | |||
147 | true, I915_FENCE_TIMEOUT, | 147 | true, I915_FENCE_TIMEOUT, |
148 | I915_FENCE_GFP); | 148 | I915_FENCE_GFP); |
149 | 149 | ||
150 | reservation_object_add_excl_fence(obj->base.resv, | 150 | dma_resv_add_excl_fence(obj->base.resv, |
151 | &clflush->dma); | 151 | &clflush->dma); |
152 | 152 | ||
153 | i915_sw_fence_commit(&clflush->wait); | 153 | i915_sw_fence_commit(&clflush->wait); |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 1fdab0767a47..693fcfce5d69 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | |||
@@ -288,7 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, | |||
288 | if (err < 0) { | 288 | if (err < 0) { |
289 | dma_fence_set_error(&work->dma, err); | 289 | dma_fence_set_error(&work->dma, err); |
290 | } else { | 290 | } else { |
291 | reservation_object_add_excl_fence(obj->base.resv, &work->dma); | 291 | dma_resv_add_excl_fence(obj->base.resv, &work->dma); |
292 | err = 0; | 292 | err = 0; |
293 | } | 293 | } |
294 | i915_gem_object_unlock(obj); | 294 | i915_gem_object_unlock(obj); |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 570b20ad9e58..96ce95c8ac5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/dma-buf.h> | 7 | #include <linux/dma-buf.h> |
8 | #include <linux/highmem.h> | 8 | #include <linux/highmem.h> |
9 | #include <linux/reservation.h> | 9 | #include <linux/dma-resv.h> |
10 | 10 | ||
11 | #include "i915_drv.h" | 11 | #include "i915_drv.h" |
12 | #include "i915_gem_object.h" | 12 | #include "i915_gem_object.h" |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5fae0e50aad0..2d71653ede00 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | |||
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/intel-iommu.h> | 7 | #include <linux/intel-iommu.h> |
8 | #include <linux/reservation.h> | 8 | #include <linux/dma-resv.h> |
9 | #include <linux/sync_file.h> | 9 | #include <linux/sync_file.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | 11 | ||
@@ -1246,7 +1246,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, | |||
1246 | goto skip_request; | 1246 | goto skip_request; |
1247 | 1247 | ||
1248 | i915_vma_lock(batch); | 1248 | i915_vma_lock(batch); |
1249 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); | 1249 | GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true)); |
1250 | err = i915_vma_move_to_active(batch, rq, 0); | 1250 | err = i915_vma_move_to_active(batch, rq, 0); |
1251 | i915_vma_unlock(batch); | 1251 | i915_vma_unlock(batch); |
1252 | if (err) | 1252 | if (err) |
@@ -1317,7 +1317,7 @@ relocate_entry(struct i915_vma *vma, | |||
1317 | 1317 | ||
1318 | if (!eb->reloc_cache.vaddr && | 1318 | if (!eb->reloc_cache.vaddr && |
1319 | (DBG_FORCE_RELOC == FORCE_GPU_RELOC || | 1319 | (DBG_FORCE_RELOC == FORCE_GPU_RELOC || |
1320 | !reservation_object_test_signaled_rcu(vma->resv, true))) { | 1320 | !dma_resv_test_signaled_rcu(vma->resv, true))) { |
1321 | const unsigned int gen = eb->reloc_cache.gen; | 1321 | const unsigned int gen = eb->reloc_cache.gen; |
1322 | unsigned int len; | 1322 | unsigned int len; |
1323 | u32 *batch; | 1323 | u32 *batch; |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c index cf0439e6be83..5496f33a9064 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c | |||
@@ -78,7 +78,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) | |||
78 | I915_FENCE_GFP) < 0) | 78 | I915_FENCE_GFP) < 0) |
79 | goto err; | 79 | goto err; |
80 | 80 | ||
81 | reservation_object_add_excl_fence(obj->base.resv, &stub->dma); | 81 | dma_resv_add_excl_fence(obj->base.resv, &stub->dma); |
82 | 82 | ||
83 | return &stub->dma; | 83 | return &stub->dma; |
84 | 84 | ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index dfebd5706f16..d5f8fdc95fd9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h | |||
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) | |||
99 | __drm_gem_object_put(&obj->base); | 99 | __drm_gem_object_put(&obj->base); |
100 | } | 100 | } |
101 | 101 | ||
102 | #define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv) | 102 | #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) |
103 | 103 | ||
104 | static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) | 104 | static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) |
105 | { | 105 | { |
106 | reservation_object_lock(obj->base.resv, NULL); | 106 | dma_resv_lock(obj->base.resv, NULL); |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline int | 109 | static inline int |
110 | i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) | 110 | i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) |
111 | { | 111 | { |
112 | return reservation_object_lock_interruptible(obj->base.resv, NULL); | 112 | return dma_resv_lock_interruptible(obj->base.resv, NULL); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) | 115 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) |
116 | { | 116 | { |
117 | reservation_object_unlock(obj->base.resv); | 117 | dma_resv_unlock(obj->base.resv); |
118 | } | 118 | } |
119 | 119 | ||
120 | struct dma_fence * | 120 | struct dma_fence * |
@@ -373,7 +373,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) | |||
373 | struct dma_fence *fence; | 373 | struct dma_fence *fence; |
374 | 374 | ||
375 | rcu_read_lock(); | 375 | rcu_read_lock(); |
376 | fence = reservation_object_get_excl_rcu(obj->base.resv); | 376 | fence = dma_resv_get_excl_rcu(obj->base.resv); |
377 | rcu_read_unlock(); | 377 | rcu_read_unlock(); |
378 | 378 | ||
379 | if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) | 379 | if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index fa46a54bcbe7..8af55cd3e690 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c | |||
@@ -31,7 +31,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, | |||
31 | } | 31 | } |
32 | 32 | ||
33 | static long | 33 | static long |
34 | i915_gem_object_wait_reservation(struct reservation_object *resv, | 34 | i915_gem_object_wait_reservation(struct dma_resv *resv, |
35 | unsigned int flags, | 35 | unsigned int flags, |
36 | long timeout) | 36 | long timeout) |
37 | { | 37 | { |
@@ -43,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
43 | unsigned int count, i; | 43 | unsigned int count, i; |
44 | int ret; | 44 | int ret; |
45 | 45 | ||
46 | ret = reservation_object_get_fences_rcu(resv, | 46 | ret = dma_resv_get_fences_rcu(resv, |
47 | &excl, &count, &shared); | 47 | &excl, &count, &shared); |
48 | if (ret) | 48 | if (ret) |
49 | return ret; | 49 | return ret; |
@@ -72,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
72 | */ | 72 | */ |
73 | prune_fences = count && timeout >= 0; | 73 | prune_fences = count && timeout >= 0; |
74 | } else { | 74 | } else { |
75 | excl = reservation_object_get_excl_rcu(resv); | 75 | excl = dma_resv_get_excl_rcu(resv); |
76 | } | 76 | } |
77 | 77 | ||
78 | if (excl && timeout >= 0) | 78 | if (excl && timeout >= 0) |
@@ -84,10 +84,10 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
84 | * Opportunistically prune the fences iff we know they have *all* been | 84 | * Opportunistically prune the fences iff we know they have *all* been |
85 | * signaled. | 85 | * signaled. |
86 | */ | 86 | */ |
87 | if (prune_fences && reservation_object_trylock(resv)) { | 87 | if (prune_fences && dma_resv_trylock(resv)) { |
88 | if (reservation_object_test_signaled_rcu(resv, true)) | 88 | if (dma_resv_test_signaled_rcu(resv, true)) |
89 | reservation_object_add_excl_fence(resv, NULL); | 89 | dma_resv_add_excl_fence(resv, NULL); |
90 | reservation_object_unlock(resv); | 90 | dma_resv_unlock(resv); |
91 | } | 91 | } |
92 | 92 | ||
93 | return timeout; | 93 | return timeout; |
@@ -140,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | |||
140 | unsigned int count, i; | 140 | unsigned int count, i; |
141 | int ret; | 141 | int ret; |
142 | 142 | ||
143 | ret = reservation_object_get_fences_rcu(obj->base.resv, | 143 | ret = dma_resv_get_fences_rcu(obj->base.resv, |
144 | &excl, &count, &shared); | 144 | &excl, &count, &shared); |
145 | if (ret) | 145 | if (ret) |
146 | return ret; | 146 | return ret; |
@@ -152,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | |||
152 | 152 | ||
153 | kfree(shared); | 153 | kfree(shared); |
154 | } else { | 154 | } else { |
155 | excl = reservation_object_get_excl_rcu(obj->base.resv); | 155 | excl = dma_resv_get_excl_rcu(obj->base.resv); |
156 | } | 156 | } |
157 | 157 | ||
158 | if (excl) { | 158 | if (excl) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bdd596604e93..b62db22b37a1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -43,7 +43,7 @@ | |||
43 | #include <linux/mm_types.h> | 43 | #include <linux/mm_types.h> |
44 | #include <linux/perf_event.h> | 44 | #include <linux/perf_event.h> |
45 | #include <linux/pm_qos.h> | 45 | #include <linux/pm_qos.h> |
46 | #include <linux/reservation.h> | 46 | #include <linux/dma-resv.h> |
47 | #include <linux/shmem_fs.h> | 47 | #include <linux/shmem_fs.h> |
48 | #include <linux/stackdepot.h> | 48 | #include <linux/stackdepot.h> |
49 | 49 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..a5a439cb8c06 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <drm/i915_drm.h> | 29 | #include <drm/i915_drm.h> |
30 | #include <linux/dma-fence-array.h> | 30 | #include <linux/dma-fence-array.h> |
31 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
32 | #include <linux/reservation.h> | 32 | #include <linux/dma-resv.h> |
33 | #include <linux/shmem_fs.h> | 33 | #include <linux/shmem_fs.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/stop_machine.h> | 35 | #include <linux/stop_machine.h> |
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 25a3e4d09a2f..5f82a763e64c 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c | |||
@@ -96,9 +96,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, | |||
96 | list_for_each_entry(obj, list, batch_pool_link) { | 96 | list_for_each_entry(obj, list, batch_pool_link) { |
97 | /* The batches are strictly LRU ordered */ | 97 | /* The batches are strictly LRU ordered */ |
98 | if (i915_gem_object_is_active(obj)) { | 98 | if (i915_gem_object_is_active(obj)) { |
99 | struct reservation_object *resv = obj->base.resv; | 99 | struct dma_resv *resv = obj->base.resv; |
100 | 100 | ||
101 | if (!reservation_object_test_signaled_rcu(resv, true)) | 101 | if (!dma_resv_test_signaled_rcu(resv, true)) |
102 | break; | 102 | break; |
103 | 103 | ||
104 | i915_retire_requests(pool->engine->i915); | 104 | i915_retire_requests(pool->engine->i915); |
@@ -113,13 +113,13 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, | |||
113 | * than replace the existing fence. | 113 | * than replace the existing fence. |
114 | */ | 114 | */ |
115 | if (rcu_access_pointer(resv->fence)) { | 115 | if (rcu_access_pointer(resv->fence)) { |
116 | reservation_object_lock(resv, NULL); | 116 | dma_resv_lock(resv, NULL); |
117 | reservation_object_add_excl_fence(resv, NULL); | 117 | dma_resv_add_excl_fence(resv, NULL); |
118 | reservation_object_unlock(resv); | 118 | dma_resv_unlock(resv); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv, | 122 | GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv, |
123 | true)); | 123 | true)); |
124 | 124 | ||
125 | if (obj->base.size >= size) | 125 | if (obj->base.size >= size) |
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a195a92d0105..0d1bd3f56c21 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c | |||
@@ -1027,7 +1027,7 @@ i915_request_await_object(struct i915_request *to, | |||
1027 | struct dma_fence **shared; | 1027 | struct dma_fence **shared; |
1028 | unsigned int count, i; | 1028 | unsigned int count, i; |
1029 | 1029 | ||
1030 | ret = reservation_object_get_fences_rcu(obj->base.resv, | 1030 | ret = dma_resv_get_fences_rcu(obj->base.resv, |
1031 | &excl, &count, &shared); | 1031 | &excl, &count, &shared); |
1032 | if (ret) | 1032 | if (ret) |
1033 | return ret; | 1033 | return ret; |
@@ -1044,7 +1044,7 @@ i915_request_await_object(struct i915_request *to, | |||
1044 | dma_fence_put(shared[i]); | 1044 | dma_fence_put(shared[i]); |
1045 | kfree(shared); | 1045 | kfree(shared); |
1046 | } else { | 1046 | } else { |
1047 | excl = reservation_object_get_excl_rcu(obj->base.resv); | 1047 | excl = dma_resv_get_excl_rcu(obj->base.resv); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | if (excl) { | 1050 | if (excl) { |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 5387aafd3424..362e4e00b4c6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/dma-fence.h> | 8 | #include <linux/dma-fence.h> |
9 | #include <linux/irq_work.h> | 9 | #include <linux/irq_work.h> |
10 | #include <linux/reservation.h> | 10 | #include <linux/dma-resv.h> |
11 | 11 | ||
12 | #include "i915_sw_fence.h" | 12 | #include "i915_sw_fence.h" |
13 | #include "i915_selftest.h" | 13 | #include "i915_selftest.h" |
@@ -510,7 +510,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
510 | } | 510 | } |
511 | 511 | ||
512 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 512 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
513 | struct reservation_object *resv, | 513 | struct dma_resv *resv, |
514 | const struct dma_fence_ops *exclude, | 514 | const struct dma_fence_ops *exclude, |
515 | bool write, | 515 | bool write, |
516 | unsigned long timeout, | 516 | unsigned long timeout, |
@@ -526,7 +526,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
526 | struct dma_fence **shared; | 526 | struct dma_fence **shared; |
527 | unsigned int count, i; | 527 | unsigned int count, i; |
528 | 528 | ||
529 | ret = reservation_object_get_fences_rcu(resv, | 529 | ret = dma_resv_get_fences_rcu(resv, |
530 | &excl, &count, &shared); | 530 | &excl, &count, &shared); |
531 | if (ret) | 531 | if (ret) |
532 | return ret; | 532 | return ret; |
@@ -551,7 +551,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
551 | dma_fence_put(shared[i]); | 551 | dma_fence_put(shared[i]); |
552 | kfree(shared); | 552 | kfree(shared); |
553 | } else { | 553 | } else { |
554 | excl = reservation_object_get_excl_rcu(resv); | 554 | excl = dma_resv_get_excl_rcu(resv); |
555 | } | 555 | } |
556 | 556 | ||
557 | if (ret >= 0 && excl && excl->ops != exclude) { | 557 | if (ret >= 0 && excl && excl->ops != exclude) { |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 9cb5c3b307a6..8cf353e8c3e0 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | 17 | ||
18 | struct completion; | 18 | struct completion; |
19 | struct reservation_object; | 19 | struct dma_resv; |
20 | 20 | ||
21 | struct i915_sw_fence { | 21 | struct i915_sw_fence { |
22 | wait_queue_head_t wait; | 22 | wait_queue_head_t wait; |
@@ -82,7 +82,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
82 | gfp_t gfp); | 82 | gfp_t gfp); |
83 | 83 | ||
84 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 84 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
85 | struct reservation_object *resv, | 85 | struct dma_resv *resv, |
86 | const struct dma_fence_ops *exclude, | 86 | const struct dma_fence_ops *exclude, |
87 | bool write, | 87 | bool write, |
88 | unsigned long timeout, | 88 | unsigned long timeout, |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a57729be8312..ebfd03d117cd 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref) | |||
99 | return; | 99 | return; |
100 | 100 | ||
101 | /* Prune the shared fence arrays iff completely idle (inc. external) */ | 101 | /* Prune the shared fence arrays iff completely idle (inc. external) */ |
102 | if (reservation_object_trylock(obj->base.resv)) { | 102 | if (dma_resv_trylock(obj->base.resv)) { |
103 | if (reservation_object_test_signaled_rcu(obj->base.resv, true)) | 103 | if (dma_resv_test_signaled_rcu(obj->base.resv, true)) |
104 | reservation_object_add_excl_fence(obj->base.resv, NULL); | 104 | dma_resv_add_excl_fence(obj->base.resv, NULL); |
105 | reservation_object_unlock(obj->base.resv); | 105 | dma_resv_unlock(obj->base.resv); |
106 | } | 106 | } |
107 | 107 | ||
108 | /* | 108 | /* |
@@ -903,7 +903,7 @@ static void export_fence(struct i915_vma *vma, | |||
903 | struct i915_request *rq, | 903 | struct i915_request *rq, |
904 | unsigned int flags) | 904 | unsigned int flags) |
905 | { | 905 | { |
906 | struct reservation_object *resv = vma->resv; | 906 | struct dma_resv *resv = vma->resv; |
907 | 907 | ||
908 | /* | 908 | /* |
909 | * Ignore errors from failing to allocate the new fence, we can't | 909 | * Ignore errors from failing to allocate the new fence, we can't |
@@ -911,9 +911,9 @@ static void export_fence(struct i915_vma *vma, | |||
911 | * synchronisation leading to rendering corruption. | 911 | * synchronisation leading to rendering corruption. |
912 | */ | 912 | */ |
913 | if (flags & EXEC_OBJECT_WRITE) | 913 | if (flags & EXEC_OBJECT_WRITE) |
914 | reservation_object_add_excl_fence(resv, &rq->fence); | 914 | dma_resv_add_excl_fence(resv, &rq->fence); |
915 | else if (reservation_object_reserve_shared(resv, 1) == 0) | 915 | else if (dma_resv_reserve_shared(resv, 1) == 0) |
916 | reservation_object_add_shared_fence(resv, &rq->fence); | 916 | dma_resv_add_shared_fence(resv, &rq->fence); |
917 | } | 917 | } |
918 | 918 | ||
919 | int i915_vma_move_to_active(struct i915_vma *vma, | 919 | int i915_vma_move_to_active(struct i915_vma *vma, |
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4b769db649bf..59a497561fc4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h | |||
@@ -55,7 +55,7 @@ struct i915_vma { | |||
55 | struct i915_address_space *vm; | 55 | struct i915_address_space *vm; |
56 | const struct i915_vma_ops *ops; | 56 | const struct i915_vma_ops *ops; |
57 | struct i915_fence_reg *fence; | 57 | struct i915_fence_reg *fence; |
58 | struct reservation_object *resv; /** Alias of obj->resv */ | 58 | struct dma_resv *resv; /** Alias of obj->resv */ |
59 | struct sg_table *pages; | 59 | struct sg_table *pages; |
60 | void __iomem *iomap; | 60 | void __iomem *iomap; |
61 | void *private; /* owned by creator */ | 61 | void *private; /* owned by creator */ |
@@ -299,16 +299,16 @@ void i915_vma_close(struct i915_vma *vma); | |||
299 | void i915_vma_reopen(struct i915_vma *vma); | 299 | void i915_vma_reopen(struct i915_vma *vma); |
300 | void i915_vma_destroy(struct i915_vma *vma); | 300 | void i915_vma_destroy(struct i915_vma *vma); |
301 | 301 | ||
302 | #define assert_vma_held(vma) reservation_object_assert_held((vma)->resv) | 302 | #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) |
303 | 303 | ||
304 | static inline void i915_vma_lock(struct i915_vma *vma) | 304 | static inline void i915_vma_lock(struct i915_vma *vma) |
305 | { | 305 | { |
306 | reservation_object_lock(vma->resv, NULL); | 306 | dma_resv_lock(vma->resv, NULL); |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline void i915_vma_unlock(struct i915_vma *vma) | 309 | static inline void i915_vma_unlock(struct i915_vma *vma) |
310 | { | 310 | { |
311 | reservation_object_unlock(vma->resv); | 311 | dma_resv_unlock(vma->resv); |
312 | } | 312 | } |
313 | 313 | ||
314 | int __i915_vma_do_pin(struct i915_vma *vma, | 314 | int __i915_vma_do_pin(struct i915_vma *vma, |
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index fd1a024703d2..ff3d9acc24fc 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c | |||
@@ -136,7 +136,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, | |||
136 | int err = 0; | 136 | int err = 0; |
137 | 137 | ||
138 | if (!write) { | 138 | if (!write) { |
139 | err = reservation_object_reserve_shared(bo->gem.resv, 1); | 139 | err = dma_resv_reserve_shared(bo->gem.resv, 1); |
140 | if (err) | 140 | if (err) |
141 | return err; | 141 | return err; |
142 | } | 142 | } |
@@ -296,9 +296,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) | |||
296 | 296 | ||
297 | for (i = 0; i < submit->nr_bos; i++) { | 297 | for (i = 0; i < submit->nr_bos; i++) { |
298 | if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) | 298 | if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) |
299 | reservation_object_add_excl_fence(bos[i]->gem.resv, fence); | 299 | dma_resv_add_excl_fence(bos[i]->gem.resv, fence); |
300 | else | 300 | else |
301 | reservation_object_add_shared_fence(bos[i]->gem.resv, fence); | 301 | dma_resv_add_shared_fence(bos[i]->gem.resv, fence); |
302 | } | 302 | } |
303 | 303 | ||
304 | lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); | 304 | lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); |
@@ -341,7 +341,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) | |||
341 | 341 | ||
342 | timeout = drm_timeout_abs_to_jiffies(timeout_ns); | 342 | timeout = drm_timeout_abs_to_jiffies(timeout_ns); |
343 | 343 | ||
344 | ret = drm_gem_reservation_object_wait(file, handle, write, timeout); | 344 | ret = drm_gem_dma_resv_wait(file, handle, write, timeout); |
345 | if (ret == 0) | 345 | if (ret == 0) |
346 | ret = timeout ? -ETIMEDOUT : -EBUSY; | 346 | ret = timeout ? -ETIMEDOUT : -EBUSY; |
347 | 347 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c index ae40b080ae47..3f230a28a2dc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/dma-buf.h> | 6 | #include <linux/dma-buf.h> |
7 | #include <linux/reservation.h> | 7 | #include <linux/dma-resv.h> |
8 | 8 | ||
9 | #include <drm/drm_modeset_helper.h> | 9 | #include <drm/drm_modeset_helper.h> |
10 | #include <drm/drm_fb_helper.h> | 10 | #include <drm/drm_fb_helper.h> |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 18da8d6ffc51..348a7ad2c044 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -663,13 +663,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) | |||
663 | int msm_gem_sync_object(struct drm_gem_object *obj, | 663 | int msm_gem_sync_object(struct drm_gem_object *obj, |
664 | struct msm_fence_context *fctx, bool exclusive) | 664 | struct msm_fence_context *fctx, bool exclusive) |
665 | { | 665 | { |
666 | struct reservation_object_list *fobj; | 666 | struct dma_resv_list *fobj; |
667 | struct dma_fence *fence; | 667 | struct dma_fence *fence; |
668 | int i, ret; | 668 | int i, ret; |
669 | 669 | ||
670 | fobj = reservation_object_get_list(obj->resv); | 670 | fobj = dma_resv_get_list(obj->resv); |
671 | if (!fobj || (fobj->shared_count == 0)) { | 671 | if (!fobj || (fobj->shared_count == 0)) { |
672 | fence = reservation_object_get_excl(obj->resv); | 672 | fence = dma_resv_get_excl(obj->resv); |
673 | /* don't need to wait on our own fences, since ring is fifo */ | 673 | /* don't need to wait on our own fences, since ring is fifo */ |
674 | if (fence && (fence->context != fctx->context)) { | 674 | if (fence && (fence->context != fctx->context)) { |
675 | ret = dma_fence_wait(fence, true); | 675 | ret = dma_fence_wait(fence, true); |
@@ -683,7 +683,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
683 | 683 | ||
684 | for (i = 0; i < fobj->shared_count; i++) { | 684 | for (i = 0; i < fobj->shared_count; i++) { |
685 | fence = rcu_dereference_protected(fobj->shared[i], | 685 | fence = rcu_dereference_protected(fobj->shared[i], |
686 | reservation_object_held(obj->resv)); | 686 | dma_resv_held(obj->resv)); |
687 | if (fence->context != fctx->context) { | 687 | if (fence->context != fctx->context) { |
688 | ret = dma_fence_wait(fence, true); | 688 | ret = dma_fence_wait(fence, true); |
689 | if (ret) | 689 | if (ret) |
@@ -701,9 +701,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, | |||
701 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); | 701 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
702 | msm_obj->gpu = gpu; | 702 | msm_obj->gpu = gpu; |
703 | if (exclusive) | 703 | if (exclusive) |
704 | reservation_object_add_excl_fence(obj->resv, fence); | 704 | dma_resv_add_excl_fence(obj->resv, fence); |
705 | else | 705 | else |
706 | reservation_object_add_shared_fence(obj->resv, fence); | 706 | dma_resv_add_shared_fence(obj->resv, fence); |
707 | list_del_init(&msm_obj->mm_list); | 707 | list_del_init(&msm_obj->mm_list); |
708 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | 708 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
709 | } | 709 | } |
@@ -728,7 +728,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) | |||
728 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); | 728 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
729 | long ret; | 729 | long ret; |
730 | 730 | ||
731 | ret = reservation_object_wait_timeout_rcu(obj->resv, write, | 731 | ret = dma_resv_wait_timeout_rcu(obj->resv, write, |
732 | true, remain); | 732 | true, remain); |
733 | if (ret == 0) | 733 | if (ret == 0) |
734 | return remain == 0 ? -EBUSY : -ETIMEDOUT; | 734 | return remain == 0 ? -EBUSY : -ETIMEDOUT; |
@@ -760,8 +760,8 @@ static void describe_fence(struct dma_fence *fence, const char *type, | |||
760 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | 760 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
761 | { | 761 | { |
762 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 762 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
763 | struct reservation_object *robj = obj->resv; | 763 | struct dma_resv *robj = obj->resv; |
764 | struct reservation_object_list *fobj; | 764 | struct dma_resv_list *fobj; |
765 | struct dma_fence *fence; | 765 | struct dma_fence *fence; |
766 | struct msm_gem_vma *vma; | 766 | struct msm_gem_vma *vma; |
767 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 767 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8cfcf8f09e3e..9e0953c2b7ce 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #define __MSM_GEM_H__ | 8 | #define __MSM_GEM_H__ |
9 | 9 | ||
10 | #include <linux/kref.h> | 10 | #include <linux/kref.h> |
11 | #include <linux/reservation.h> | 11 | #include <linux/dma-resv.h> |
12 | #include "msm_drv.h" | 12 | #include "msm_drv.h" |
13 | 13 | ||
14 | /* Additional internal-use only BO flags: */ | 14 | /* Additional internal-use only BO flags: */ |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 348f8c2be806..2e1556b7af26 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -225,7 +225,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) | |||
225 | * strange place to call it. OTOH this is a | 225 | * strange place to call it. OTOH this is a |
226 | * convenient can-fail point to hook it in. | 226 | * convenient can-fail point to hook it in. |
227 | */ | 227 | */ |
228 | ret = reservation_object_reserve_shared(msm_obj->base.resv, | 228 | ret = dma_resv_reserve_shared(msm_obj->base.resv, |
229 | 1); | 229 | 1); |
230 | if (ret) | 230 | if (ret) |
231 | return ret; | 231 | return ret; |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 878ef6822812..e8506335cd15 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/of_graph.h> | 17 | #include <linux/of_graph.h> |
18 | #include <linux/of_reserved_mem.h> | 18 | #include <linux/of_reserved_mem.h> |
19 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
20 | #include <linux/reservation.h> | 20 | #include <linux/dma-resv.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | 22 | ||
23 | #include <drm/drm_atomic.h> | 23 | #include <drm/drm_atomic.h> |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 89f8e76a2d7d..027a01b97d1c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c | |||
@@ -457,7 +457,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) | |||
457 | asyw->image.handle[0] = ctxdma->object.handle; | 457 | asyw->image.handle[0] = ctxdma->object.handle; |
458 | } | 458 | } |
459 | 459 | ||
460 | asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv); | 460 | asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv); |
461 | asyw->image.offset[0] = fb->nvbo->bo.offset; | 461 | asyw->image.offset[0] = fb->nvbo->bo.offset; |
462 | 462 | ||
463 | if (wndw->func->prepare) { | 463 | if (wndw->func->prepare) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 99e391be9370..e0b1bbee936f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -188,7 +188,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | |||
188 | int | 188 | int |
189 | nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, | 189 | nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
190 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | 190 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
191 | struct sg_table *sg, struct reservation_object *robj, | 191 | struct sg_table *sg, struct dma_resv *robj, |
192 | struct nouveau_bo **pnvbo) | 192 | struct nouveau_bo **pnvbo) |
193 | { | 193 | { |
194 | struct nouveau_drm *drm = cli->drm; | 194 | struct nouveau_drm *drm = cli->drm; |
@@ -1324,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
1324 | { | 1324 | { |
1325 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1325 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1326 | struct drm_device *dev = drm->dev; | 1326 | struct drm_device *dev = drm->dev; |
1327 | struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); | 1327 | struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); |
1328 | 1328 | ||
1329 | nv10_bo_put_tile_region(dev, *old_tile, fence); | 1329 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
1330 | *old_tile = new_tile; | 1330 | *old_tile = new_tile; |
@@ -1655,12 +1655,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1655 | void | 1655 | void |
1656 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) | 1656 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) |
1657 | { | 1657 | { |
1658 | struct reservation_object *resv = nvbo->bo.base.resv; | 1658 | struct dma_resv *resv = nvbo->bo.base.resv; |
1659 | 1659 | ||
1660 | if (exclusive) | 1660 | if (exclusive) |
1661 | reservation_object_add_excl_fence(resv, &fence->base); | 1661 | dma_resv_add_excl_fence(resv, &fence->base); |
1662 | else if (fence) | 1662 | else if (fence) |
1663 | reservation_object_add_shared_fence(resv, &fence->base); | 1663 | dma_resv_add_shared_fence(resv, &fence->base); |
1664 | } | 1664 | } |
1665 | 1665 | ||
1666 | struct ttm_bo_driver nouveau_bo_driver = { | 1666 | struct ttm_bo_driver nouveau_bo_driver = { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index d675efe8e7f9..3ae84834bd5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h | |||
@@ -73,7 +73,7 @@ extern struct ttm_bo_driver nouveau_bo_driver; | |||
73 | void nouveau_bo_move_init(struct nouveau_drm *); | 73 | void nouveau_bo_move_init(struct nouveau_drm *); |
74 | int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, | 74 | int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, |
75 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, | 75 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, |
76 | struct reservation_object *robj, | 76 | struct dma_resv *robj, |
77 | struct nouveau_bo **); | 77 | struct nouveau_bo **); |
78 | int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); | 78 | int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); |
79 | int nouveau_bo_unpin(struct nouveau_bo *); | 79 | int nouveau_bo_unpin(struct nouveau_bo *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index e5f249ab216a..8df390078c85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -335,20 +335,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
335 | { | 335 | { |
336 | struct nouveau_fence_chan *fctx = chan->fence; | 336 | struct nouveau_fence_chan *fctx = chan->fence; |
337 | struct dma_fence *fence; | 337 | struct dma_fence *fence; |
338 | struct reservation_object *resv = nvbo->bo.base.resv; | 338 | struct dma_resv *resv = nvbo->bo.base.resv; |
339 | struct reservation_object_list *fobj; | 339 | struct dma_resv_list *fobj; |
340 | struct nouveau_fence *f; | 340 | struct nouveau_fence *f; |
341 | int ret = 0, i; | 341 | int ret = 0, i; |
342 | 342 | ||
343 | if (!exclusive) { | 343 | if (!exclusive) { |
344 | ret = reservation_object_reserve_shared(resv, 1); | 344 | ret = dma_resv_reserve_shared(resv, 1); |
345 | 345 | ||
346 | if (ret) | 346 | if (ret) |
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | 349 | ||
350 | fobj = reservation_object_get_list(resv); | 350 | fobj = dma_resv_get_list(resv); |
351 | fence = reservation_object_get_excl(resv); | 351 | fence = dma_resv_get_excl(resv); |
352 | 352 | ||
353 | if (fence && (!exclusive || !fobj || !fobj->shared_count)) { | 353 | if (fence && (!exclusive || !fobj || !fobj->shared_count)) { |
354 | struct nouveau_channel *prev = NULL; | 354 | struct nouveau_channel *prev = NULL; |
@@ -377,7 +377,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
377 | bool must_wait = true; | 377 | bool must_wait = true; |
378 | 378 | ||
379 | fence = rcu_dereference_protected(fobj->shared[i], | 379 | fence = rcu_dereference_protected(fobj->shared[i], |
380 | reservation_object_held(resv)); | 380 | dma_resv_held(resv)); |
381 | 381 | ||
382 | f = nouveau_local_fence(fence, chan->drm); | 382 | f = nouveau_local_fence(fence, chan->drm); |
383 | if (f) { | 383 | if (f) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index c7368aa0bdec..c77302f969e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -887,7 +887,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
887 | return -ENOENT; | 887 | return -ENOENT; |
888 | nvbo = nouveau_gem_object(gem); | 888 | nvbo = nouveau_gem_object(gem); |
889 | 889 | ||
890 | lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true, | 890 | lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, |
891 | no_wait ? 0 : 30 * HZ); | 891 | no_wait ? 0 : 30 * HZ); |
892 | if (!lret) | 892 | if (!lret) |
893 | ret = -EBUSY; | 893 | ret = -EBUSY; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index e86ad7ae622b..7262ced9688a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
@@ -62,16 +62,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, | |||
62 | { | 62 | { |
63 | struct nouveau_drm *drm = nouveau_drm(dev); | 63 | struct nouveau_drm *drm = nouveau_drm(dev); |
64 | struct nouveau_bo *nvbo; | 64 | struct nouveau_bo *nvbo; |
65 | struct reservation_object *robj = attach->dmabuf->resv; | 65 | struct dma_resv *robj = attach->dmabuf->resv; |
66 | u32 flags = 0; | 66 | u32 flags = 0; |
67 | int ret; | 67 | int ret; |
68 | 68 | ||
69 | flags = TTM_PL_FLAG_TT; | 69 | flags = TTM_PL_FLAG_TT; |
70 | 70 | ||
71 | reservation_object_lock(robj, NULL); | 71 | dma_resv_lock(robj, NULL); |
72 | ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, | 72 | ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, |
73 | sg, robj, &nvbo); | 73 | sg, robj, &nvbo); |
74 | reservation_object_unlock(robj); | 74 | dma_resv_unlock(robj); |
75 | if (ret) | 75 | if (ret) |
76 | return ERR_PTR(ret); | 76 | return ERR_PTR(ret); |
77 | 77 | ||
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index a1352750984c..b41754658681 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c | |||
@@ -274,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, | |||
274 | if (!gem_obj) | 274 | if (!gem_obj) |
275 | return -ENOENT; | 275 | return -ENOENT; |
276 | 276 | ||
277 | ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, | 277 | ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, |
278 | true, timeout); | 278 | true, timeout); |
279 | if (!ret) | 279 | if (!ret) |
280 | ret = timeout ? -ETIMEDOUT : -EBUSY; | 280 | ret = timeout ? -ETIMEDOUT : -EBUSY; |
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index d567ce98494c..0fc4539fd08d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/io.h> | 6 | #include <linux/io.h> |
7 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
8 | #include <linux/pm_runtime.h> | 8 | #include <linux/pm_runtime.h> |
9 | #include <linux/reservation.h> | 9 | #include <linux/dma-resv.h> |
10 | #include <drm/gpu_scheduler.h> | 10 | #include <drm/gpu_scheduler.h> |
11 | #include <drm/panfrost_drm.h> | 11 | #include <drm/panfrost_drm.h> |
12 | 12 | ||
@@ -199,7 +199,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos, | |||
199 | int i; | 199 | int i; |
200 | 200 | ||
201 | for (i = 0; i < bo_count; i++) | 201 | for (i = 0; i < bo_count; i++) |
202 | implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv); | 202 | implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); |
203 | } | 203 | } |
204 | 204 | ||
205 | static void panfrost_attach_object_fences(struct drm_gem_object **bos, | 205 | static void panfrost_attach_object_fences(struct drm_gem_object **bos, |
@@ -209,7 +209,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos, | |||
209 | int i; | 209 | int i; |
210 | 210 | ||
211 | for (i = 0; i < bo_count; i++) | 211 | for (i = 0; i < bo_count; i++) |
212 | reservation_object_add_excl_fence(bos[i]->resv, fence); | 212 | dma_resv_add_excl_fence(bos[i]->resv, fence); |
213 | } | 213 | } |
214 | 214 | ||
215 | int panfrost_job_push(struct panfrost_job *job) | 215 | int panfrost_job_push(struct panfrost_job *job) |
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 94439212a5c5..a4f4175bbdbe 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c | |||
@@ -57,7 +57,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) | |||
57 | struct qxl_bo *bo; | 57 | struct qxl_bo *bo; |
58 | 58 | ||
59 | list_for_each_entry(bo, &qdev->gem.objects, list) { | 59 | list_for_each_entry(bo, &qdev->gem.objects, list) { |
60 | struct reservation_object_list *fobj; | 60 | struct dma_resv_list *fobj; |
61 | int rel; | 61 | int rel; |
62 | 62 | ||
63 | rcu_read_lock(); | 63 | rcu_read_lock(); |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index df55b83e0a55..312216caeea2 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -238,7 +238,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo) | |||
238 | return ret; | 238 | return ret; |
239 | } | 239 | } |
240 | 240 | ||
241 | ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1); | 241 | ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); |
242 | if (ret) | 242 | if (ret) |
243 | return ret; | 243 | return ret; |
244 | 244 | ||
@@ -458,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
458 | list_for_each_entry(entry, &release->bos, head) { | 458 | list_for_each_entry(entry, &release->bos, head) { |
459 | bo = entry->bo; | 459 | bo = entry->bo; |
460 | 460 | ||
461 | reservation_object_add_shared_fence(bo->base.resv, &release->base); | 461 | dma_resv_add_shared_fence(bo->base.resv, &release->base); |
462 | ttm_bo_add_to_lru(bo); | 462 | ttm_bo_add_to_lru(bo); |
463 | reservation_object_unlock(bo->base.resv); | 463 | dma_resv_unlock(bo->base.resv); |
464 | } | 464 | } |
465 | spin_unlock(&glob->lru_lock); | 465 | spin_unlock(&glob->lru_lock); |
466 | ww_acquire_fini(&release->ticket); | 466 | ww_acquire_fini(&release->ticket); |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 40f4d29edfe2..62eab82a64f9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
3659 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, | 3659 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
3660 | uint64_t src_offset, uint64_t dst_offset, | 3660 | uint64_t src_offset, uint64_t dst_offset, |
3661 | unsigned num_gpu_pages, | 3661 | unsigned num_gpu_pages, |
3662 | struct reservation_object *resv) | 3662 | struct dma_resv *resv) |
3663 | { | 3663 | { |
3664 | struct radeon_fence *fence; | 3664 | struct radeon_fence *fence; |
3665 | struct radeon_sync sync; | 3665 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 589217a7e435..35b9dc6ce46a 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev) | |||
579 | struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, | 579 | struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, |
580 | uint64_t src_offset, uint64_t dst_offset, | 580 | uint64_t src_offset, uint64_t dst_offset, |
581 | unsigned num_gpu_pages, | 581 | unsigned num_gpu_pages, |
582 | struct reservation_object *resv) | 582 | struct dma_resv *resv) |
583 | { | 583 | { |
584 | struct radeon_fence *fence; | 584 | struct radeon_fence *fence; |
585 | struct radeon_sync sync; | 585 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 5505a04ca402..a46ee6c2099d 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
@@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, | |||
108 | uint64_t src_offset, | 108 | uint64_t src_offset, |
109 | uint64_t dst_offset, | 109 | uint64_t dst_offset, |
110 | unsigned num_gpu_pages, | 110 | unsigned num_gpu_pages, |
111 | struct reservation_object *resv) | 111 | struct dma_resv *resv) |
112 | { | 112 | { |
113 | struct radeon_fence *fence; | 113 | struct radeon_fence *fence; |
114 | struct radeon_sync sync; | 114 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5c05193da520..7089dfc8c2a9 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, | |||
891 | uint64_t src_offset, | 891 | uint64_t src_offset, |
892 | uint64_t dst_offset, | 892 | uint64_t dst_offset, |
893 | unsigned num_gpu_pages, | 893 | unsigned num_gpu_pages, |
894 | struct reservation_object *resv) | 894 | struct dma_resv *resv) |
895 | { | 895 | { |
896 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 896 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
897 | struct radeon_fence *fence; | 897 | struct radeon_fence *fence; |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 9ce6dd83d284..840401413c58 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, | |||
84 | uint64_t src_offset, | 84 | uint64_t src_offset, |
85 | uint64_t dst_offset, | 85 | uint64_t dst_offset, |
86 | unsigned num_gpu_pages, | 86 | unsigned num_gpu_pages, |
87 | struct reservation_object *resv) | 87 | struct dma_resv *resv) |
88 | { | 88 | { |
89 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 89 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
90 | struct radeon_fence *fence; | 90 | struct radeon_fence *fence; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 7d175a9e8330..e937cc01910d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, | |||
2963 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, | 2963 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
2964 | uint64_t src_offset, uint64_t dst_offset, | 2964 | uint64_t src_offset, uint64_t dst_offset, |
2965 | unsigned num_gpu_pages, | 2965 | unsigned num_gpu_pages, |
2966 | struct reservation_object *resv) | 2966 | struct dma_resv *resv) |
2967 | { | 2967 | { |
2968 | struct radeon_fence *fence; | 2968 | struct radeon_fence *fence; |
2969 | struct radeon_sync sync; | 2969 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 35d92ef8a0d4..af6c0da45f28 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
@@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
444 | struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, | 444 | struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, |
445 | uint64_t src_offset, uint64_t dst_offset, | 445 | uint64_t src_offset, uint64_t dst_offset, |
446 | unsigned num_gpu_pages, | 446 | unsigned num_gpu_pages, |
447 | struct reservation_object *resv) | 447 | struct dma_resv *resv) |
448 | { | 448 | { |
449 | struct radeon_fence *fence; | 449 | struct radeon_fence *fence; |
450 | struct radeon_sync sync; | 450 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3f7701321d21..de1d090df034 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -619,7 +619,7 @@ void radeon_sync_fence(struct radeon_sync *sync, | |||
619 | struct radeon_fence *fence); | 619 | struct radeon_fence *fence); |
620 | int radeon_sync_resv(struct radeon_device *rdev, | 620 | int radeon_sync_resv(struct radeon_device *rdev, |
621 | struct radeon_sync *sync, | 621 | struct radeon_sync *sync, |
622 | struct reservation_object *resv, | 622 | struct dma_resv *resv, |
623 | bool shared); | 623 | bool shared); |
624 | int radeon_sync_rings(struct radeon_device *rdev, | 624 | int radeon_sync_rings(struct radeon_device *rdev, |
625 | struct radeon_sync *sync, | 625 | struct radeon_sync *sync, |
@@ -1912,20 +1912,20 @@ struct radeon_asic { | |||
1912 | uint64_t src_offset, | 1912 | uint64_t src_offset, |
1913 | uint64_t dst_offset, | 1913 | uint64_t dst_offset, |
1914 | unsigned num_gpu_pages, | 1914 | unsigned num_gpu_pages, |
1915 | struct reservation_object *resv); | 1915 | struct dma_resv *resv); |
1916 | u32 blit_ring_index; | 1916 | u32 blit_ring_index; |
1917 | struct radeon_fence *(*dma)(struct radeon_device *rdev, | 1917 | struct radeon_fence *(*dma)(struct radeon_device *rdev, |
1918 | uint64_t src_offset, | 1918 | uint64_t src_offset, |
1919 | uint64_t dst_offset, | 1919 | uint64_t dst_offset, |
1920 | unsigned num_gpu_pages, | 1920 | unsigned num_gpu_pages, |
1921 | struct reservation_object *resv); | 1921 | struct dma_resv *resv); |
1922 | u32 dma_ring_index; | 1922 | u32 dma_ring_index; |
1923 | /* method used for bo copy */ | 1923 | /* method used for bo copy */ |
1924 | struct radeon_fence *(*copy)(struct radeon_device *rdev, | 1924 | struct radeon_fence *(*copy)(struct radeon_device *rdev, |
1925 | uint64_t src_offset, | 1925 | uint64_t src_offset, |
1926 | uint64_t dst_offset, | 1926 | uint64_t dst_offset, |
1927 | unsigned num_gpu_pages, | 1927 | unsigned num_gpu_pages, |
1928 | struct reservation_object *resv); | 1928 | struct dma_resv *resv); |
1929 | /* ring used for bo copies */ | 1929 | /* ring used for bo copies */ |
1930 | u32 copy_ring_index; | 1930 | u32 copy_ring_index; |
1931 | } copy; | 1931 | } copy; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e3f036c20d64..a74fa18cd27b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, | |||
86 | uint64_t src_offset, | 86 | uint64_t src_offset, |
87 | uint64_t dst_offset, | 87 | uint64_t dst_offset, |
88 | unsigned num_gpu_pages, | 88 | unsigned num_gpu_pages, |
89 | struct reservation_object *resv); | 89 | struct dma_resv *resv); |
90 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 90 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
91 | uint32_t tiling_flags, uint32_t pitch, | 91 | uint32_t tiling_flags, uint32_t pitch, |
92 | uint32_t offset, uint32_t obj_size); | 92 | uint32_t offset, uint32_t obj_size); |
@@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, | |||
157 | uint64_t src_offset, | 157 | uint64_t src_offset, |
158 | uint64_t dst_offset, | 158 | uint64_t dst_offset, |
159 | unsigned num_gpu_pages, | 159 | unsigned num_gpu_pages, |
160 | struct reservation_object *resv); | 160 | struct dma_resv *resv); |
161 | void r200_set_safe_registers(struct radeon_device *rdev); | 161 | void r200_set_safe_registers(struct radeon_device *rdev); |
162 | 162 | ||
163 | /* | 163 | /* |
@@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); | |||
347 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, | 347 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
348 | uint64_t src_offset, uint64_t dst_offset, | 348 | uint64_t src_offset, uint64_t dst_offset, |
349 | unsigned num_gpu_pages, | 349 | unsigned num_gpu_pages, |
350 | struct reservation_object *resv); | 350 | struct dma_resv *resv); |
351 | struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, | 351 | struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, |
352 | uint64_t src_offset, uint64_t dst_offset, | 352 | uint64_t src_offset, uint64_t dst_offset, |
353 | unsigned num_gpu_pages, | 353 | unsigned num_gpu_pages, |
354 | struct reservation_object *resv); | 354 | struct dma_resv *resv); |
355 | void r600_hpd_init(struct radeon_device *rdev); | 355 | void r600_hpd_init(struct radeon_device *rdev); |
356 | void r600_hpd_fini(struct radeon_device *rdev); | 356 | void r600_hpd_fini(struct radeon_device *rdev); |
357 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 357 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev); | |||
473 | struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, | 473 | struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
474 | uint64_t src_offset, uint64_t dst_offset, | 474 | uint64_t src_offset, uint64_t dst_offset, |
475 | unsigned num_gpu_pages, | 475 | unsigned num_gpu_pages, |
476 | struct reservation_object *resv); | 476 | struct dma_resv *resv); |
477 | u32 rv770_get_xclk(struct radeon_device *rdev); | 477 | u32 rv770_get_xclk(struct radeon_device *rdev); |
478 | int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); | 478 | int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
479 | int rv770_get_temp(struct radeon_device *rdev); | 479 | int rv770_get_temp(struct radeon_device *rdev); |
@@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, | |||
547 | struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, | 547 | struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, |
548 | uint64_t src_offset, uint64_t dst_offset, | 548 | uint64_t src_offset, uint64_t dst_offset, |
549 | unsigned num_gpu_pages, | 549 | unsigned num_gpu_pages, |
550 | struct reservation_object *resv); | 550 | struct dma_resv *resv); |
551 | int evergreen_get_temp(struct radeon_device *rdev); | 551 | int evergreen_get_temp(struct radeon_device *rdev); |
552 | int evergreen_get_allowed_info_register(struct radeon_device *rdev, | 552 | int evergreen_get_allowed_info_register(struct radeon_device *rdev, |
553 | u32 reg, u32 *val); | 553 | u32 reg, u32 *val); |
@@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | |||
725 | struct radeon_fence *si_copy_dma(struct radeon_device *rdev, | 725 | struct radeon_fence *si_copy_dma(struct radeon_device *rdev, |
726 | uint64_t src_offset, uint64_t dst_offset, | 726 | uint64_t src_offset, uint64_t dst_offset, |
727 | unsigned num_gpu_pages, | 727 | unsigned num_gpu_pages, |
728 | struct reservation_object *resv); | 728 | struct dma_resv *resv); |
729 | 729 | ||
730 | void si_dma_vm_copy_pages(struct radeon_device *rdev, | 730 | void si_dma_vm_copy_pages(struct radeon_device *rdev, |
731 | struct radeon_ib *ib, | 731 | struct radeon_ib *ib, |
@@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |||
796 | struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, | 796 | struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, |
797 | uint64_t src_offset, uint64_t dst_offset, | 797 | uint64_t src_offset, uint64_t dst_offset, |
798 | unsigned num_gpu_pages, | 798 | unsigned num_gpu_pages, |
799 | struct reservation_object *resv); | 799 | struct dma_resv *resv); |
800 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, | 800 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
801 | uint64_t src_offset, uint64_t dst_offset, | 801 | uint64_t src_offset, uint64_t dst_offset, |
802 | unsigned num_gpu_pages, | 802 | unsigned num_gpu_pages, |
803 | struct reservation_object *resv); | 803 | struct dma_resv *resv); |
804 | int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); | 804 | int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); |
805 | int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | 805 | int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
806 | bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); | 806 | bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 1ea50ce16312..ac9a5ec481c3 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -35,7 +35,7 @@ | |||
35 | static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, | 35 | static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, |
36 | uint64_t saddr, uint64_t daddr, | 36 | uint64_t saddr, uint64_t daddr, |
37 | int flag, int n, | 37 | int flag, int n, |
38 | struct reservation_object *resv) | 38 | struct dma_resv *resv) |
39 | { | 39 | { |
40 | unsigned long start_jiffies; | 40 | unsigned long start_jiffies; |
41 | unsigned long end_jiffies; | 41 | unsigned long end_jiffies; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 7e5254a34e84..7b5460678382 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -255,7 +255,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) | |||
255 | int r; | 255 | int r; |
256 | 256 | ||
257 | list_for_each_entry(reloc, &p->validated, tv.head) { | 257 | list_for_each_entry(reloc, &p->validated, tv.head) { |
258 | struct reservation_object *resv; | 258 | struct dma_resv *resv; |
259 | 259 | ||
260 | resv = reloc->robj->tbo.base.resv; | 260 | resv = reloc->robj->tbo.base.resv; |
261 | r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, | 261 | r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7bf73230ac0b..e81b01f8db90 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, | |||
533 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 533 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
534 | goto cleanup; | 534 | goto cleanup; |
535 | } | 535 | } |
536 | work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv)); | 536 | work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); |
537 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); | 537 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
538 | radeon_bo_unreserve(new_rbo); | 538 | radeon_bo_unreserve(new_rbo); |
539 | 539 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 03873f21a734..4cf58dbbe439 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
114 | } | 114 | } |
115 | if (domain == RADEON_GEM_DOMAIN_CPU) { | 115 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
116 | /* Asking for cpu access wait for object idle */ | 116 | /* Asking for cpu access wait for object idle */ |
117 | r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); | 117 | r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
118 | if (!r) | 118 | if (!r) |
119 | r = -EBUSY; | 119 | r = -EBUSY; |
120 | 120 | ||
@@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
449 | } | 449 | } |
450 | robj = gem_to_radeon_bo(gobj); | 450 | robj = gem_to_radeon_bo(gobj); |
451 | 451 | ||
452 | r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true); | 452 | r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); |
453 | if (r == 0) | 453 | if (r == 0) |
454 | r = -EBUSY; | 454 | r = -EBUSY; |
455 | else | 455 | else |
@@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
478 | } | 478 | } |
479 | robj = gem_to_radeon_bo(gobj); | 479 | robj = gem_to_radeon_bo(gobj); |
480 | 480 | ||
481 | ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); | 481 | ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
482 | if (ret == 0) | 482 | if (ret == 0) |
483 | r = -EBUSY; | 483 | r = -EBUSY; |
484 | else if (ret < 0) | 484 | else if (ret < 0) |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 0d64ace0e6c1..6902f998ede9 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
163 | continue; | 163 | continue; |
164 | } | 164 | } |
165 | 165 | ||
166 | r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, | 166 | r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, |
167 | true, false, MAX_SCHEDULE_TIMEOUT); | 167 | true, false, MAX_SCHEDULE_TIMEOUT); |
168 | if (r <= 0) | 168 | if (r <= 0) |
169 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); | 169 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 9db8ba29ef68..2abe1eab471f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
183 | int radeon_bo_create(struct radeon_device *rdev, | 183 | int radeon_bo_create(struct radeon_device *rdev, |
184 | unsigned long size, int byte_align, bool kernel, | 184 | unsigned long size, int byte_align, bool kernel, |
185 | u32 domain, u32 flags, struct sg_table *sg, | 185 | u32 domain, u32 flags, struct sg_table *sg, |
186 | struct reservation_object *resv, | 186 | struct dma_resv *resv, |
187 | struct radeon_bo **bo_ptr) | 187 | struct radeon_bo **bo_ptr) |
188 | { | 188 | { |
189 | struct radeon_bo *bo; | 189 | struct radeon_bo *bo; |
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) | |||
610 | int steal; | 610 | int steal; |
611 | int i; | 611 | int i; |
612 | 612 | ||
613 | reservation_object_assert_held(bo->tbo.base.resv); | 613 | dma_resv_assert_held(bo->tbo.base.resv); |
614 | 614 | ||
615 | if (!bo->tiling_flags) | 615 | if (!bo->tiling_flags) |
616 | return 0; | 616 | return 0; |
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, | |||
736 | uint32_t *tiling_flags, | 736 | uint32_t *tiling_flags, |
737 | uint32_t *pitch) | 737 | uint32_t *pitch) |
738 | { | 738 | { |
739 | reservation_object_assert_held(bo->tbo.base.resv); | 739 | dma_resv_assert_held(bo->tbo.base.resv); |
740 | 740 | ||
741 | if (tiling_flags) | 741 | if (tiling_flags) |
742 | *tiling_flags = bo->tiling_flags; | 742 | *tiling_flags = bo->tiling_flags; |
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | |||
748 | bool force_drop) | 748 | bool force_drop) |
749 | { | 749 | { |
750 | if (!force_drop) | 750 | if (!force_drop) |
751 | reservation_object_assert_held(bo->tbo.base.resv); | 751 | dma_resv_assert_held(bo->tbo.base.resv); |
752 | 752 | ||
753 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | 753 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) |
754 | return 0; | 754 | return 0; |
@@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) | |||
870 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, | 870 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
871 | bool shared) | 871 | bool shared) |
872 | { | 872 | { |
873 | struct reservation_object *resv = bo->tbo.base.resv; | 873 | struct dma_resv *resv = bo->tbo.base.resv; |
874 | 874 | ||
875 | if (shared) | 875 | if (shared) |
876 | reservation_object_add_shared_fence(resv, &fence->base); | 876 | dma_resv_add_shared_fence(resv, &fence->base); |
877 | else | 877 | else |
878 | reservation_object_add_excl_fence(resv, &fence->base); | 878 | dma_resv_add_excl_fence(resv, &fence->base); |
879 | } | 879 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index e5554bf9140e..d23f2ed4126e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev, | |||
126 | unsigned long size, int byte_align, | 126 | unsigned long size, int byte_align, |
127 | bool kernel, u32 domain, u32 flags, | 127 | bool kernel, u32 domain, u32 flags, |
128 | struct sg_table *sg, | 128 | struct sg_table *sg, |
129 | struct reservation_object *resv, | 129 | struct dma_resv *resv, |
130 | struct radeon_bo **bo_ptr); | 130 | struct radeon_bo **bo_ptr); |
131 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | 131 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
132 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | 132 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 52b0d0cd8cbe..b906e8fbd5f3 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, | |||
63 | struct dma_buf_attachment *attach, | 63 | struct dma_buf_attachment *attach, |
64 | struct sg_table *sg) | 64 | struct sg_table *sg) |
65 | { | 65 | { |
66 | struct reservation_object *resv = attach->dmabuf->resv; | 66 | struct dma_resv *resv = attach->dmabuf->resv; |
67 | struct radeon_device *rdev = dev->dev_private; | 67 | struct radeon_device *rdev = dev->dev_private; |
68 | struct radeon_bo *bo; | 68 | struct radeon_bo *bo; |
69 | int ret; | 69 | int ret; |
70 | 70 | ||
71 | reservation_object_lock(resv, NULL); | 71 | dma_resv_lock(resv, NULL); |
72 | ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, | 72 | ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, |
73 | RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); | 73 | RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); |
74 | reservation_object_unlock(resv); | 74 | dma_resv_unlock(resv); |
75 | if (ret) | 75 | if (ret) |
76 | return ERR_PTR(ret); | 76 | return ERR_PTR(ret); |
77 | 77 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 8c9780b5a884..55cc77a73c7b 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c | |||
@@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync, | |||
87 | */ | 87 | */ |
88 | int radeon_sync_resv(struct radeon_device *rdev, | 88 | int radeon_sync_resv(struct radeon_device *rdev, |
89 | struct radeon_sync *sync, | 89 | struct radeon_sync *sync, |
90 | struct reservation_object *resv, | 90 | struct dma_resv *resv, |
91 | bool shared) | 91 | bool shared) |
92 | { | 92 | { |
93 | struct reservation_object_list *flist; | 93 | struct dma_resv_list *flist; |
94 | struct dma_fence *f; | 94 | struct dma_fence *f; |
95 | struct radeon_fence *fence; | 95 | struct radeon_fence *fence; |
96 | unsigned i; | 96 | unsigned i; |
97 | int r = 0; | 97 | int r = 0; |
98 | 98 | ||
99 | /* always sync to the exclusive fence */ | 99 | /* always sync to the exclusive fence */ |
100 | f = reservation_object_get_excl(resv); | 100 | f = dma_resv_get_excl(resv); |
101 | fence = f ? to_radeon_fence(f) : NULL; | 101 | fence = f ? to_radeon_fence(f) : NULL; |
102 | if (fence && fence->rdev == rdev) | 102 | if (fence && fence->rdev == rdev) |
103 | radeon_sync_fence(sync, fence); | 103 | radeon_sync_fence(sync, fence); |
104 | else if (f) | 104 | else if (f) |
105 | r = dma_fence_wait(f, true); | 105 | r = dma_fence_wait(f, true); |
106 | 106 | ||
107 | flist = reservation_object_get_list(resv); | 107 | flist = dma_resv_get_list(resv); |
108 | if (shared || !flist || r) | 108 | if (shared || !flist || r) |
109 | return r; | 109 | return r; |
110 | 110 | ||
111 | for (i = 0; i < flist->shared_count; ++i) { | 111 | for (i = 0; i < flist->shared_count; ++i) { |
112 | f = rcu_dereference_protected(flist->shared[i], | 112 | f = rcu_dereference_protected(flist->shared[i], |
113 | reservation_object_held(resv)); | 113 | dma_resv_held(resv)); |
114 | fence = to_radeon_fence(f); | 114 | fence = to_radeon_fence(f); |
115 | if (fence && fence->rdev == rdev) | 115 | if (fence && fence->rdev == rdev) |
116 | radeon_sync_fence(sync, fence); | 116 | radeon_sync_fence(sync, fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 311e69c2ed7f..1ad5c3b86b64 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
477 | return -EINVAL; | 477 | return -EINVAL; |
478 | } | 478 | } |
479 | 479 | ||
480 | f = reservation_object_get_excl(bo->tbo.base.resv); | 480 | f = dma_resv_get_excl(bo->tbo.base.resv); |
481 | if (f) { | 481 | if (f) { |
482 | r = radeon_fence_wait((struct radeon_fence *)f, false); | 482 | r = radeon_fence_wait((struct radeon_fence *)f, false); |
483 | if (r) { | 483 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index e48a05533126..e0ad547786e8 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev, | |||
831 | int r; | 831 | int r; |
832 | 832 | ||
833 | radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); | 833 | radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); |
834 | r = reservation_object_reserve_shared(pt->tbo.base.resv, 1); | 834 | r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); |
835 | if (r) | 835 | if (r) |
836 | return r; | 836 | return r; |
837 | 837 | ||
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index 0866b38ef264..4c91614b5e70 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c | |||
@@ -42,7 +42,7 @@ | |||
42 | struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, | 42 | struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
43 | uint64_t src_offset, uint64_t dst_offset, | 43 | uint64_t src_offset, uint64_t dst_offset, |
44 | unsigned num_gpu_pages, | 44 | unsigned num_gpu_pages, |
45 | struct reservation_object *resv) | 45 | struct dma_resv *resv) |
46 | { | 46 | { |
47 | struct radeon_fence *fence; | 47 | struct radeon_fence *fence; |
48 | struct radeon_sync sync; | 48 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 4773bb7d947e..d2fa302a5be9 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
231 | struct radeon_fence *si_copy_dma(struct radeon_device *rdev, | 231 | struct radeon_fence *si_copy_dma(struct radeon_device *rdev, |
232 | uint64_t src_offset, uint64_t dst_offset, | 232 | uint64_t src_offset, uint64_t dst_offset, |
233 | unsigned num_gpu_pages, | 233 | unsigned num_gpu_pages, |
234 | struct reservation_object *resv) | 234 | struct dma_resv *resv) |
235 | { | 235 | { |
236 | struct radeon_fence *fence; | 236 | struct radeon_fence *fence; |
237 | struct radeon_sync sync; | 237 | struct radeon_sync sync; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 10a861a1690c..58d1f2b28132 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/file.h> | 41 | #include <linux/file.h> |
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/atomic.h> | 43 | #include <linux/atomic.h> |
44 | #include <linux/reservation.h> | 44 | #include <linux/dma-resv.h> |
45 | 45 | ||
46 | static void ttm_bo_global_kobj_release(struct kobject *kobj); | 46 | static void ttm_bo_global_kobj_release(struct kobject *kobj); |
47 | 47 | ||
@@ -161,7 +161,7 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
161 | atomic_dec(&bo->bdev->glob->bo_count); | 161 | atomic_dec(&bo->bdev->glob->bo_count); |
162 | dma_fence_put(bo->moving); | 162 | dma_fence_put(bo->moving); |
163 | if (!ttm_bo_uses_embedded_gem_object(bo)) | 163 | if (!ttm_bo_uses_embedded_gem_object(bo)) |
164 | reservation_object_fini(&bo->base._resv); | 164 | dma_resv_fini(&bo->base._resv); |
165 | mutex_destroy(&bo->wu_mutex); | 165 | mutex_destroy(&bo->wu_mutex); |
166 | bo->destroy(bo); | 166 | bo->destroy(bo); |
167 | ttm_mem_global_free(bdev->glob->mem_glob, acc_size); | 167 | ttm_mem_global_free(bdev->glob->mem_glob, acc_size); |
@@ -173,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, | |||
173 | struct ttm_bo_device *bdev = bo->bdev; | 173 | struct ttm_bo_device *bdev = bo->bdev; |
174 | struct ttm_mem_type_manager *man; | 174 | struct ttm_mem_type_manager *man; |
175 | 175 | ||
176 | reservation_object_assert_held(bo->base.resv); | 176 | dma_resv_assert_held(bo->base.resv); |
177 | 177 | ||
178 | if (!list_empty(&bo->lru)) | 178 | if (!list_empty(&bo->lru)) |
179 | return; | 179 | return; |
@@ -244,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, | |||
244 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, | 244 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, |
245 | struct ttm_lru_bulk_move *bulk) | 245 | struct ttm_lru_bulk_move *bulk) |
246 | { | 246 | { |
247 | reservation_object_assert_held(bo->base.resv); | 247 | dma_resv_assert_held(bo->base.resv); |
248 | 248 | ||
249 | ttm_bo_del_from_lru(bo); | 249 | ttm_bo_del_from_lru(bo); |
250 | ttm_bo_add_to_lru(bo); | 250 | ttm_bo_add_to_lru(bo); |
@@ -277,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) | |||
277 | if (!pos->first) | 277 | if (!pos->first) |
278 | continue; | 278 | continue; |
279 | 279 | ||
280 | reservation_object_assert_held(pos->first->base.resv); | 280 | dma_resv_assert_held(pos->first->base.resv); |
281 | reservation_object_assert_held(pos->last->base.resv); | 281 | dma_resv_assert_held(pos->last->base.resv); |
282 | 282 | ||
283 | man = &pos->first->bdev->man[TTM_PL_TT]; | 283 | man = &pos->first->bdev->man[TTM_PL_TT]; |
284 | list_bulk_move_tail(&man->lru[i], &pos->first->lru, | 284 | list_bulk_move_tail(&man->lru[i], &pos->first->lru, |
@@ -292,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) | |||
292 | if (!pos->first) | 292 | if (!pos->first) |
293 | continue; | 293 | continue; |
294 | 294 | ||
295 | reservation_object_assert_held(pos->first->base.resv); | 295 | dma_resv_assert_held(pos->first->base.resv); |
296 | reservation_object_assert_held(pos->last->base.resv); | 296 | dma_resv_assert_held(pos->last->base.resv); |
297 | 297 | ||
298 | man = &pos->first->bdev->man[TTM_PL_VRAM]; | 298 | man = &pos->first->bdev->man[TTM_PL_VRAM]; |
299 | list_bulk_move_tail(&man->lru[i], &pos->first->lru, | 299 | list_bulk_move_tail(&man->lru[i], &pos->first->lru, |
@@ -307,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) | |||
307 | if (!pos->first) | 307 | if (!pos->first) |
308 | continue; | 308 | continue; |
309 | 309 | ||
310 | reservation_object_assert_held(pos->first->base.resv); | 310 | dma_resv_assert_held(pos->first->base.resv); |
311 | reservation_object_assert_held(pos->last->base.resv); | 311 | dma_resv_assert_held(pos->last->base.resv); |
312 | 312 | ||
313 | lru = &pos->first->bdev->glob->swap_lru[i]; | 313 | lru = &pos->first->bdev->glob->swap_lru[i]; |
314 | list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); | 314 | list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); |
@@ -442,29 +442,29 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) | |||
442 | if (bo->base.resv == &bo->base._resv) | 442 | if (bo->base.resv == &bo->base._resv) |
443 | return 0; | 443 | return 0; |
444 | 444 | ||
445 | BUG_ON(!reservation_object_trylock(&bo->base._resv)); | 445 | BUG_ON(!dma_resv_trylock(&bo->base._resv)); |
446 | 446 | ||
447 | r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv); | 447 | r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); |
448 | if (r) | 448 | if (r) |
449 | reservation_object_unlock(&bo->base._resv); | 449 | dma_resv_unlock(&bo->base._resv); |
450 | 450 | ||
451 | return r; | 451 | return r; |
452 | } | 452 | } |
453 | 453 | ||
454 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) | 454 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
455 | { | 455 | { |
456 | struct reservation_object_list *fobj; | 456 | struct dma_resv_list *fobj; |
457 | struct dma_fence *fence; | 457 | struct dma_fence *fence; |
458 | int i; | 458 | int i; |
459 | 459 | ||
460 | fobj = reservation_object_get_list(&bo->base._resv); | 460 | fobj = dma_resv_get_list(&bo->base._resv); |
461 | fence = reservation_object_get_excl(&bo->base._resv); | 461 | fence = dma_resv_get_excl(&bo->base._resv); |
462 | if (fence && !fence->ops->signaled) | 462 | if (fence && !fence->ops->signaled) |
463 | dma_fence_enable_sw_signaling(fence); | 463 | dma_fence_enable_sw_signaling(fence); |
464 | 464 | ||
465 | for (i = 0; fobj && i < fobj->shared_count; ++i) { | 465 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
466 | fence = rcu_dereference_protected(fobj->shared[i], | 466 | fence = rcu_dereference_protected(fobj->shared[i], |
467 | reservation_object_held(bo->base.resv)); | 467 | dma_resv_held(bo->base.resv)); |
468 | 468 | ||
469 | if (!fence->ops->signaled) | 469 | if (!fence->ops->signaled) |
470 | dma_fence_enable_sw_signaling(fence); | 470 | dma_fence_enable_sw_signaling(fence); |
@@ -482,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
482 | /* Last resort, if we fail to allocate memory for the | 482 | /* Last resort, if we fail to allocate memory for the |
483 | * fences block for the BO to become idle | 483 | * fences block for the BO to become idle |
484 | */ | 484 | */ |
485 | reservation_object_wait_timeout_rcu(bo->base.resv, true, false, | 485 | dma_resv_wait_timeout_rcu(bo->base.resv, true, false, |
486 | 30 * HZ); | 486 | 30 * HZ); |
487 | spin_lock(&glob->lru_lock); | 487 | spin_lock(&glob->lru_lock); |
488 | goto error; | 488 | goto error; |
489 | } | 489 | } |
490 | 490 | ||
491 | spin_lock(&glob->lru_lock); | 491 | spin_lock(&glob->lru_lock); |
492 | ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY; | 492 | ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; |
493 | if (!ret) { | 493 | if (!ret) { |
494 | if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) { | 494 | if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { |
495 | ttm_bo_del_from_lru(bo); | 495 | ttm_bo_del_from_lru(bo); |
496 | spin_unlock(&glob->lru_lock); | 496 | spin_unlock(&glob->lru_lock); |
497 | if (bo->base.resv != &bo->base._resv) | 497 | if (bo->base.resv != &bo->base._resv) |
498 | reservation_object_unlock(&bo->base._resv); | 498 | dma_resv_unlock(&bo->base._resv); |
499 | 499 | ||
500 | ttm_bo_cleanup_memtype_use(bo); | 500 | ttm_bo_cleanup_memtype_use(bo); |
501 | reservation_object_unlock(bo->base.resv); | 501 | dma_resv_unlock(bo->base.resv); |
502 | return; | 502 | return; |
503 | } | 503 | } |
504 | 504 | ||
@@ -514,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
514 | ttm_bo_add_to_lru(bo); | 514 | ttm_bo_add_to_lru(bo); |
515 | } | 515 | } |
516 | 516 | ||
517 | reservation_object_unlock(bo->base.resv); | 517 | dma_resv_unlock(bo->base.resv); |
518 | } | 518 | } |
519 | if (bo->base.resv != &bo->base._resv) | 519 | if (bo->base.resv != &bo->base._resv) |
520 | reservation_object_unlock(&bo->base._resv); | 520 | dma_resv_unlock(&bo->base._resv); |
521 | 521 | ||
522 | error: | 522 | error: |
523 | kref_get(&bo->list_kref); | 523 | kref_get(&bo->list_kref); |
@@ -546,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
546 | bool unlock_resv) | 546 | bool unlock_resv) |
547 | { | 547 | { |
548 | struct ttm_bo_global *glob = bo->bdev->glob; | 548 | struct ttm_bo_global *glob = bo->bdev->glob; |
549 | struct reservation_object *resv; | 549 | struct dma_resv *resv; |
550 | int ret; | 550 | int ret; |
551 | 551 | ||
552 | if (unlikely(list_empty(&bo->ddestroy))) | 552 | if (unlikely(list_empty(&bo->ddestroy))) |
@@ -554,7 +554,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
554 | else | 554 | else |
555 | resv = &bo->base._resv; | 555 | resv = &bo->base._resv; |
556 | 556 | ||
557 | if (reservation_object_test_signaled_rcu(resv, true)) | 557 | if (dma_resv_test_signaled_rcu(resv, true)) |
558 | ret = 0; | 558 | ret = 0; |
559 | else | 559 | else |
560 | ret = -EBUSY; | 560 | ret = -EBUSY; |
@@ -563,10 +563,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
563 | long lret; | 563 | long lret; |
564 | 564 | ||
565 | if (unlock_resv) | 565 | if (unlock_resv) |
566 | reservation_object_unlock(bo->base.resv); | 566 | dma_resv_unlock(bo->base.resv); |
567 | spin_unlock(&glob->lru_lock); | 567 | spin_unlock(&glob->lru_lock); |
568 | 568 | ||
569 | lret = reservation_object_wait_timeout_rcu(resv, true, | 569 | lret = dma_resv_wait_timeout_rcu(resv, true, |
570 | interruptible, | 570 | interruptible, |
571 | 30 * HZ); | 571 | 30 * HZ); |
572 | 572 | ||
@@ -576,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
576 | return -EBUSY; | 576 | return -EBUSY; |
577 | 577 | ||
578 | spin_lock(&glob->lru_lock); | 578 | spin_lock(&glob->lru_lock); |
579 | if (unlock_resv && !reservation_object_trylock(bo->base.resv)) { | 579 | if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { |
580 | /* | 580 | /* |
581 | * We raced, and lost, someone else holds the reservation now, | 581 | * We raced, and lost, someone else holds the reservation now, |
582 | * and is probably busy in ttm_bo_cleanup_memtype_use. | 582 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
@@ -593,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
593 | 593 | ||
594 | if (ret || unlikely(list_empty(&bo->ddestroy))) { | 594 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
595 | if (unlock_resv) | 595 | if (unlock_resv) |
596 | reservation_object_unlock(bo->base.resv); | 596 | dma_resv_unlock(bo->base.resv); |
597 | spin_unlock(&glob->lru_lock); | 597 | spin_unlock(&glob->lru_lock); |
598 | return ret; | 598 | return ret; |
599 | } | 599 | } |
@@ -606,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
606 | ttm_bo_cleanup_memtype_use(bo); | 606 | ttm_bo_cleanup_memtype_use(bo); |
607 | 607 | ||
608 | if (unlock_resv) | 608 | if (unlock_resv) |
609 | reservation_object_unlock(bo->base.resv); | 609 | dma_resv_unlock(bo->base.resv); |
610 | 610 | ||
611 | return 0; | 611 | return 0; |
612 | } | 612 | } |
@@ -634,12 +634,12 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | |||
634 | 634 | ||
635 | if (remove_all || bo->base.resv != &bo->base._resv) { | 635 | if (remove_all || bo->base.resv != &bo->base._resv) { |
636 | spin_unlock(&glob->lru_lock); | 636 | spin_unlock(&glob->lru_lock); |
637 | reservation_object_lock(bo->base.resv, NULL); | 637 | dma_resv_lock(bo->base.resv, NULL); |
638 | 638 | ||
639 | spin_lock(&glob->lru_lock); | 639 | spin_lock(&glob->lru_lock); |
640 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); | 640 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); |
641 | 641 | ||
642 | } else if (reservation_object_trylock(bo->base.resv)) { | 642 | } else if (dma_resv_trylock(bo->base.resv)) { |
643 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); | 643 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); |
644 | } else { | 644 | } else { |
645 | spin_unlock(&glob->lru_lock); | 645 | spin_unlock(&glob->lru_lock); |
@@ -708,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, | |||
708 | struct ttm_placement placement; | 708 | struct ttm_placement placement; |
709 | int ret = 0; | 709 | int ret = 0; |
710 | 710 | ||
711 | reservation_object_assert_held(bo->base.resv); | 711 | dma_resv_assert_held(bo->base.resv); |
712 | 712 | ||
713 | placement.num_placement = 0; | 713 | placement.num_placement = 0; |
714 | placement.num_busy_placement = 0; | 714 | placement.num_busy_placement = 0; |
@@ -779,7 +779,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, | |||
779 | bool ret = false; | 779 | bool ret = false; |
780 | 780 | ||
781 | if (bo->base.resv == ctx->resv) { | 781 | if (bo->base.resv == ctx->resv) { |
782 | reservation_object_assert_held(bo->base.resv); | 782 | dma_resv_assert_held(bo->base.resv); |
783 | if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT | 783 | if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT |
784 | || !list_empty(&bo->ddestroy)) | 784 | || !list_empty(&bo->ddestroy)) |
785 | ret = true; | 785 | ret = true; |
@@ -787,7 +787,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, | |||
787 | if (busy) | 787 | if (busy) |
788 | *busy = false; | 788 | *busy = false; |
789 | } else { | 789 | } else { |
790 | ret = reservation_object_trylock(bo->base.resv); | 790 | ret = dma_resv_trylock(bo->base.resv); |
791 | *locked = ret; | 791 | *locked = ret; |
792 | if (busy) | 792 | if (busy) |
793 | *busy = !ret; | 793 | *busy = !ret; |
@@ -815,10 +815,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, | |||
815 | return -EBUSY; | 815 | return -EBUSY; |
816 | 816 | ||
817 | if (ctx->interruptible) | 817 | if (ctx->interruptible) |
818 | r = reservation_object_lock_interruptible(busy_bo->base.resv, | 818 | r = dma_resv_lock_interruptible(busy_bo->base.resv, |
819 | ticket); | 819 | ticket); |
820 | else | 820 | else |
821 | r = reservation_object_lock(busy_bo->base.resv, ticket); | 821 | r = dma_resv_lock(busy_bo->base.resv, ticket); |
822 | 822 | ||
823 | /* | 823 | /* |
824 | * TODO: It would be better to keep the BO locked until allocation is at | 824 | * TODO: It would be better to keep the BO locked until allocation is at |
@@ -826,7 +826,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, | |||
826 | * of TTM. | 826 | * of TTM. |
827 | */ | 827 | */ |
828 | if (!r) | 828 | if (!r) |
829 | reservation_object_unlock(busy_bo->base.resv); | 829 | dma_resv_unlock(busy_bo->base.resv); |
830 | 830 | ||
831 | return r == -EDEADLK ? -EBUSY : r; | 831 | return r == -EDEADLK ? -EBUSY : r; |
832 | } | 832 | } |
@@ -852,7 +852,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | |||
852 | if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, | 852 | if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, |
853 | &busy)) { | 853 | &busy)) { |
854 | if (busy && !busy_bo && ticket != | 854 | if (busy && !busy_bo && ticket != |
855 | reservation_object_locking_ctx(bo->base.resv)) | 855 | dma_resv_locking_ctx(bo->base.resv)) |
856 | busy_bo = bo; | 856 | busy_bo = bo; |
857 | continue; | 857 | continue; |
858 | } | 858 | } |
@@ -860,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | |||
860 | if (place && !bdev->driver->eviction_valuable(bo, | 860 | if (place && !bdev->driver->eviction_valuable(bo, |
861 | place)) { | 861 | place)) { |
862 | if (locked) | 862 | if (locked) |
863 | reservation_object_unlock(bo->base.resv); | 863 | dma_resv_unlock(bo->base.resv); |
864 | continue; | 864 | continue; |
865 | } | 865 | } |
866 | break; | 866 | break; |
@@ -932,9 +932,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
932 | spin_unlock(&man->move_lock); | 932 | spin_unlock(&man->move_lock); |
933 | 933 | ||
934 | if (fence) { | 934 | if (fence) { |
935 | reservation_object_add_shared_fence(bo->base.resv, fence); | 935 | dma_resv_add_shared_fence(bo->base.resv, fence); |
936 | 936 | ||
937 | ret = reservation_object_reserve_shared(bo->base.resv, 1); | 937 | ret = dma_resv_reserve_shared(bo->base.resv, 1); |
938 | if (unlikely(ret)) { | 938 | if (unlikely(ret)) { |
939 | dma_fence_put(fence); | 939 | dma_fence_put(fence); |
940 | return ret; | 940 | return ret; |
@@ -961,7 +961,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
961 | struct ww_acquire_ctx *ticket; | 961 | struct ww_acquire_ctx *ticket; |
962 | int ret; | 962 | int ret; |
963 | 963 | ||
964 | ticket = reservation_object_locking_ctx(bo->base.resv); | 964 | ticket = dma_resv_locking_ctx(bo->base.resv); |
965 | do { | 965 | do { |
966 | ret = (*man->func->get_node)(man, bo, place, mem); | 966 | ret = (*man->func->get_node)(man, bo, place, mem); |
967 | if (unlikely(ret != 0)) | 967 | if (unlikely(ret != 0)) |
@@ -1091,7 +1091,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
1091 | bool type_found = false; | 1091 | bool type_found = false; |
1092 | int i, ret; | 1092 | int i, ret; |
1093 | 1093 | ||
1094 | ret = reservation_object_reserve_shared(bo->base.resv, 1); | 1094 | ret = dma_resv_reserve_shared(bo->base.resv, 1); |
1095 | if (unlikely(ret)) | 1095 | if (unlikely(ret)) |
1096 | return ret; | 1096 | return ret; |
1097 | 1097 | ||
@@ -1172,7 +1172,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1172 | int ret = 0; | 1172 | int ret = 0; |
1173 | struct ttm_mem_reg mem; | 1173 | struct ttm_mem_reg mem; |
1174 | 1174 | ||
1175 | reservation_object_assert_held(bo->base.resv); | 1175 | dma_resv_assert_held(bo->base.resv); |
1176 | 1176 | ||
1177 | mem.num_pages = bo->num_pages; | 1177 | mem.num_pages = bo->num_pages; |
1178 | mem.size = mem.num_pages << PAGE_SHIFT; | 1178 | mem.size = mem.num_pages << PAGE_SHIFT; |
@@ -1242,7 +1242,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, | |||
1242 | int ret; | 1242 | int ret; |
1243 | uint32_t new_flags; | 1243 | uint32_t new_flags; |
1244 | 1244 | ||
1245 | reservation_object_assert_held(bo->base.resv); | 1245 | dma_resv_assert_held(bo->base.resv); |
1246 | /* | 1246 | /* |
1247 | * Check whether we need to move buffer. | 1247 | * Check whether we need to move buffer. |
1248 | */ | 1248 | */ |
@@ -1279,7 +1279,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
1279 | struct ttm_operation_ctx *ctx, | 1279 | struct ttm_operation_ctx *ctx, |
1280 | size_t acc_size, | 1280 | size_t acc_size, |
1281 | struct sg_table *sg, | 1281 | struct sg_table *sg, |
1282 | struct reservation_object *resv, | 1282 | struct dma_resv *resv, |
1283 | void (*destroy) (struct ttm_buffer_object *)) | 1283 | void (*destroy) (struct ttm_buffer_object *)) |
1284 | { | 1284 | { |
1285 | int ret = 0; | 1285 | int ret = 0; |
@@ -1333,7 +1333,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
1333 | bo->sg = sg; | 1333 | bo->sg = sg; |
1334 | if (resv) { | 1334 | if (resv) { |
1335 | bo->base.resv = resv; | 1335 | bo->base.resv = resv; |
1336 | reservation_object_assert_held(bo->base.resv); | 1336 | dma_resv_assert_held(bo->base.resv); |
1337 | } else { | 1337 | } else { |
1338 | bo->base.resv = &bo->base._resv; | 1338 | bo->base.resv = &bo->base._resv; |
1339 | } | 1339 | } |
@@ -1342,7 +1342,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
1342 | * bo.gem is not initialized, so we have to setup the | 1342 | * bo.gem is not initialized, so we have to setup the |
1343 | * struct elements we want use regardless. | 1343 | * struct elements we want use regardless. |
1344 | */ | 1344 | */ |
1345 | reservation_object_init(&bo->base._resv); | 1345 | dma_resv_init(&bo->base._resv); |
1346 | drm_vma_node_reset(&bo->base.vma_node); | 1346 | drm_vma_node_reset(&bo->base.vma_node); |
1347 | } | 1347 | } |
1348 | atomic_inc(&bo->bdev->glob->bo_count); | 1348 | atomic_inc(&bo->bdev->glob->bo_count); |
@@ -1360,7 +1360,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
1360 | * since otherwise lockdep will be angered in radeon. | 1360 | * since otherwise lockdep will be angered in radeon. |
1361 | */ | 1361 | */ |
1362 | if (!resv) { | 1362 | if (!resv) { |
1363 | locked = reservation_object_trylock(bo->base.resv); | 1363 | locked = dma_resv_trylock(bo->base.resv); |
1364 | WARN_ON(!locked); | 1364 | WARN_ON(!locked); |
1365 | } | 1365 | } |
1366 | 1366 | ||
@@ -1394,7 +1394,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1394 | bool interruptible, | 1394 | bool interruptible, |
1395 | size_t acc_size, | 1395 | size_t acc_size, |
1396 | struct sg_table *sg, | 1396 | struct sg_table *sg, |
1397 | struct reservation_object *resv, | 1397 | struct dma_resv *resv, |
1398 | void (*destroy) (struct ttm_buffer_object *)) | 1398 | void (*destroy) (struct ttm_buffer_object *)) |
1399 | { | 1399 | { |
1400 | struct ttm_operation_ctx ctx = { interruptible, false }; | 1400 | struct ttm_operation_ctx ctx = { interruptible, false }; |
@@ -1804,13 +1804,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1804 | long timeout = 15 * HZ; | 1804 | long timeout = 15 * HZ; |
1805 | 1805 | ||
1806 | if (no_wait) { | 1806 | if (no_wait) { |
1807 | if (reservation_object_test_signaled_rcu(bo->base.resv, true)) | 1807 | if (dma_resv_test_signaled_rcu(bo->base.resv, true)) |
1808 | return 0; | 1808 | return 0; |
1809 | else | 1809 | else |
1810 | return -EBUSY; | 1810 | return -EBUSY; |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true, | 1813 | timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, |
1814 | interruptible, timeout); | 1814 | interruptible, timeout); |
1815 | if (timeout < 0) | 1815 | if (timeout < 0) |
1816 | return timeout; | 1816 | return timeout; |
@@ -1818,7 +1818,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1818 | if (timeout == 0) | 1818 | if (timeout == 0) |
1819 | return -EBUSY; | 1819 | return -EBUSY; |
1820 | 1820 | ||
1821 | reservation_object_add_excl_fence(bo->base.resv, NULL); | 1821 | dma_resv_add_excl_fence(bo->base.resv, NULL); |
1822 | return 0; | 1822 | return 0; |
1823 | } | 1823 | } |
1824 | EXPORT_SYMBOL(ttm_bo_wait); | 1824 | EXPORT_SYMBOL(ttm_bo_wait); |
@@ -1934,7 +1934,7 @@ out: | |||
1934 | * already swapped buffer. | 1934 | * already swapped buffer. |
1935 | */ | 1935 | */ |
1936 | if (locked) | 1936 | if (locked) |
1937 | reservation_object_unlock(bo->base.resv); | 1937 | dma_resv_unlock(bo->base.resv); |
1938 | kref_put(&bo->list_kref, ttm_bo_release_list); | 1938 | kref_put(&bo->list_kref, ttm_bo_release_list); |
1939 | return ret; | 1939 | return ret; |
1940 | } | 1940 | } |
@@ -1972,14 +1972,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) | |||
1972 | ret = mutex_lock_interruptible(&bo->wu_mutex); | 1972 | ret = mutex_lock_interruptible(&bo->wu_mutex); |
1973 | if (unlikely(ret != 0)) | 1973 | if (unlikely(ret != 0)) |
1974 | return -ERESTARTSYS; | 1974 | return -ERESTARTSYS; |
1975 | if (!reservation_object_is_locked(bo->base.resv)) | 1975 | if (!dma_resv_is_locked(bo->base.resv)) |
1976 | goto out_unlock; | 1976 | goto out_unlock; |
1977 | ret = reservation_object_lock_interruptible(bo->base.resv, NULL); | 1977 | ret = dma_resv_lock_interruptible(bo->base.resv, NULL); |
1978 | if (ret == -EINTR) | 1978 | if (ret == -EINTR) |
1979 | ret = -ERESTARTSYS; | 1979 | ret = -ERESTARTSYS; |
1980 | if (unlikely(ret != 0)) | 1980 | if (unlikely(ret != 0)) |
1981 | goto out_unlock; | 1981 | goto out_unlock; |
1982 | reservation_object_unlock(bo->base.resv); | 1982 | dma_resv_unlock(bo->base.resv); |
1983 | 1983 | ||
1984 | out_unlock: | 1984 | out_unlock: |
1985 | mutex_unlock(&bo->wu_mutex); | 1985 | mutex_unlock(&bo->wu_mutex); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 425a6d627b30..fe81c565e7ef 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/reservation.h> | 41 | #include <linux/dma-resv.h> |
42 | 42 | ||
43 | struct ttm_transfer_obj { | 43 | struct ttm_transfer_obj { |
44 | struct ttm_buffer_object base; | 44 | struct ttm_buffer_object base; |
@@ -518,8 +518,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
518 | fbo->base.destroy = &ttm_transfered_destroy; | 518 | fbo->base.destroy = &ttm_transfered_destroy; |
519 | fbo->base.acc_size = 0; | 519 | fbo->base.acc_size = 0; |
520 | fbo->base.base.resv = &fbo->base.base._resv; | 520 | fbo->base.base.resv = &fbo->base.base._resv; |
521 | reservation_object_init(fbo->base.base.resv); | 521 | dma_resv_init(fbo->base.base.resv); |
522 | ret = reservation_object_trylock(fbo->base.base.resv); | 522 | ret = dma_resv_trylock(fbo->base.base.resv); |
523 | WARN_ON(!ret); | 523 | WARN_ON(!ret); |
524 | 524 | ||
525 | *new_obj = &fbo->base; | 525 | *new_obj = &fbo->base; |
@@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
689 | int ret; | 689 | int ret; |
690 | struct ttm_buffer_object *ghost_obj; | 690 | struct ttm_buffer_object *ghost_obj; |
691 | 691 | ||
692 | reservation_object_add_excl_fence(bo->base.resv, fence); | 692 | dma_resv_add_excl_fence(bo->base.resv, fence); |
693 | if (evict) { | 693 | if (evict) { |
694 | ret = ttm_bo_wait(bo, false, false); | 694 | ret = ttm_bo_wait(bo, false, false); |
695 | if (ret) | 695 | if (ret) |
@@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
716 | if (ret) | 716 | if (ret) |
717 | return ret; | 717 | return ret; |
718 | 718 | ||
719 | reservation_object_add_excl_fence(ghost_obj->base.resv, fence); | 719 | dma_resv_add_excl_fence(ghost_obj->base.resv, fence); |
720 | 720 | ||
721 | /** | 721 | /** |
722 | * If we're not moving to fixed memory, the TTM object | 722 | * If we're not moving to fixed memory, the TTM object |
@@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
752 | 752 | ||
753 | int ret; | 753 | int ret; |
754 | 754 | ||
755 | reservation_object_add_excl_fence(bo->base.resv, fence); | 755 | dma_resv_add_excl_fence(bo->base.resv, fence); |
756 | 756 | ||
757 | if (!evict) { | 757 | if (!evict) { |
758 | struct ttm_buffer_object *ghost_obj; | 758 | struct ttm_buffer_object *ghost_obj; |
@@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
772 | if (ret) | 772 | if (ret) |
773 | return ret; | 773 | return ret; |
774 | 774 | ||
775 | reservation_object_add_excl_fence(ghost_obj->base.resv, fence); | 775 | dma_resv_add_excl_fence(ghost_obj->base.resv, fence); |
776 | 776 | ||
777 | /** | 777 | /** |
778 | * If we're not moving to fixed memory, the TTM object | 778 | * If we're not moving to fixed memory, the TTM object |
@@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) | |||
841 | if (ret) | 841 | if (ret) |
842 | return ret; | 842 | return ret; |
843 | 843 | ||
844 | ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv); | 844 | ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); |
845 | /* Last resort, wait for the BO to be idle when we are OOM */ | 845 | /* Last resort, wait for the BO to be idle when we are OOM */ |
846 | if (ret) | 846 | if (ret) |
847 | ttm_bo_wait(bo, false, false); | 847 | ttm_bo_wait(bo, false, false); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 85f5bcbe0c76..76eedb963693 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
71 | ttm_bo_get(bo); | 71 | ttm_bo_get(bo); |
72 | up_read(&vmf->vma->vm_mm->mmap_sem); | 72 | up_read(&vmf->vma->vm_mm->mmap_sem); |
73 | (void) dma_fence_wait(bo->moving, true); | 73 | (void) dma_fence_wait(bo->moving, true); |
74 | reservation_object_unlock(bo->base.resv); | 74 | dma_resv_unlock(bo->base.resv); |
75 | ttm_bo_put(bo); | 75 | ttm_bo_put(bo); |
76 | goto out_unlock; | 76 | goto out_unlock; |
77 | } | 77 | } |
@@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) | |||
131 | * for reserve, and if it fails, retry the fault after waiting | 131 | * for reserve, and if it fails, retry the fault after waiting |
132 | * for the buffer to become unreserved. | 132 | * for the buffer to become unreserved. |
133 | */ | 133 | */ |
134 | if (unlikely(!reservation_object_trylock(bo->base.resv))) { | 134 | if (unlikely(!dma_resv_trylock(bo->base.resv))) { |
135 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | 135 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
136 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | 136 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
137 | ttm_bo_get(bo); | 137 | ttm_bo_get(bo); |
@@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) | |||
296 | out_io_unlock: | 296 | out_io_unlock: |
297 | ttm_mem_io_unlock(man); | 297 | ttm_mem_io_unlock(man); |
298 | out_unlock: | 298 | out_unlock: |
299 | reservation_object_unlock(bo->base.resv); | 299 | dma_resv_unlock(bo->base.resv); |
300 | return ret; | 300 | return ret; |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 3aefe72fb5cb..131dae8f4170 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list, | |||
39 | list_for_each_entry_continue_reverse(entry, list, head) { | 39 | list_for_each_entry_continue_reverse(entry, list, head) { |
40 | struct ttm_buffer_object *bo = entry->bo; | 40 | struct ttm_buffer_object *bo = entry->bo; |
41 | 41 | ||
42 | reservation_object_unlock(bo->base.resv); | 42 | dma_resv_unlock(bo->base.resv); |
43 | } | 43 | } |
44 | } | 44 | } |
45 | 45 | ||
@@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
71 | 71 | ||
72 | if (list_empty(&bo->lru)) | 72 | if (list_empty(&bo->lru)) |
73 | ttm_bo_add_to_lru(bo); | 73 | ttm_bo_add_to_lru(bo); |
74 | reservation_object_unlock(bo->base.resv); | 74 | dma_resv_unlock(bo->base.resv); |
75 | } | 75 | } |
76 | spin_unlock(&glob->lru_lock); | 76 | spin_unlock(&glob->lru_lock); |
77 | 77 | ||
@@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
114 | 114 | ||
115 | ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); | 115 | ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); |
116 | if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { | 116 | if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
117 | reservation_object_unlock(bo->base.resv); | 117 | dma_resv_unlock(bo->base.resv); |
118 | 118 | ||
119 | ret = -EBUSY; | 119 | ret = -EBUSY; |
120 | 120 | ||
@@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
130 | if (!entry->num_shared) | 130 | if (!entry->num_shared) |
131 | continue; | 131 | continue; |
132 | 132 | ||
133 | ret = reservation_object_reserve_shared(bo->base.resv, | 133 | ret = dma_resv_reserve_shared(bo->base.resv, |
134 | entry->num_shared); | 134 | entry->num_shared); |
135 | if (!ret) | 135 | if (!ret) |
136 | continue; | 136 | continue; |
@@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
144 | 144 | ||
145 | if (ret == -EDEADLK) { | 145 | if (ret == -EDEADLK) { |
146 | if (intr) { | 146 | if (intr) { |
147 | ret = reservation_object_lock_slow_interruptible(bo->base.resv, | 147 | ret = dma_resv_lock_slow_interruptible(bo->base.resv, |
148 | ticket); | 148 | ticket); |
149 | } else { | 149 | } else { |
150 | reservation_object_lock_slow(bo->base.resv, ticket); | 150 | dma_resv_lock_slow(bo->base.resv, ticket); |
151 | ret = 0; | 151 | ret = 0; |
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
155 | if (!ret && entry->num_shared) | 155 | if (!ret && entry->num_shared) |
156 | ret = reservation_object_reserve_shared(bo->base.resv, | 156 | ret = dma_resv_reserve_shared(bo->base.resv, |
157 | entry->num_shared); | 157 | entry->num_shared); |
158 | 158 | ||
159 | if (unlikely(ret != 0)) { | 159 | if (unlikely(ret != 0)) { |
@@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
201 | list_for_each_entry(entry, list, head) { | 201 | list_for_each_entry(entry, list, head) { |
202 | bo = entry->bo; | 202 | bo = entry->bo; |
203 | if (entry->num_shared) | 203 | if (entry->num_shared) |
204 | reservation_object_add_shared_fence(bo->base.resv, fence); | 204 | dma_resv_add_shared_fence(bo->base.resv, fence); |
205 | else | 205 | else |
206 | reservation_object_add_excl_fence(bo->base.resv, fence); | 206 | dma_resv_add_excl_fence(bo->base.resv, fence); |
207 | if (list_empty(&bo->lru)) | 207 | if (list_empty(&bo->lru)) |
208 | ttm_bo_add_to_lru(bo); | 208 | ttm_bo_add_to_lru(bo); |
209 | else | 209 | else |
210 | ttm_bo_move_to_lru_tail(bo, NULL); | 210 | ttm_bo_move_to_lru_tail(bo, NULL); |
211 | reservation_object_unlock(bo->base.resv); | 211 | dma_resv_unlock(bo->base.resv); |
212 | } | 212 | } |
213 | spin_unlock(&glob->lru_lock); | 213 | spin_unlock(&glob->lru_lock); |
214 | if (ticket) | 214 | if (ticket) |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 00b4a3337840..e0e9b4f69db6 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) | |||
48 | struct ttm_bo_device *bdev = bo->bdev; | 48 | struct ttm_bo_device *bdev = bo->bdev; |
49 | uint32_t page_flags = 0; | 49 | uint32_t page_flags = 0; |
50 | 50 | ||
51 | reservation_object_assert_held(bo->base.resv); | 51 | dma_resv_assert_held(bo->base.resv); |
52 | 52 | ||
53 | if (bdev->need_dma32) | 53 | if (bdev->need_dma32) |
54 | page_flags |= TTM_PAGE_FLAG_DMA32; | 54 | page_flags |= TTM_PAGE_FLAG_DMA32; |
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 79744137d89f..5d80507b539b 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c | |||
@@ -409,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, | |||
409 | if (args->pad != 0) | 409 | if (args->pad != 0) |
410 | return -EINVAL; | 410 | return -EINVAL; |
411 | 411 | ||
412 | ret = drm_gem_reservation_object_wait(file_priv, args->handle, | 412 | ret = drm_gem_dma_resv_wait(file_priv, args->handle, |
413 | true, timeout_jiffies); | 413 | true, timeout_jiffies); |
414 | 414 | ||
415 | /* Decrement the user's timeout, in case we got interrupted | 415 | /* Decrement the user's timeout, in case we got interrupted |
@@ -495,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, | |||
495 | 495 | ||
496 | for (i = 0; i < job->bo_count; i++) { | 496 | for (i = 0; i < job->bo_count; i++) { |
497 | /* XXX: Use shared fences for read-only objects. */ | 497 | /* XXX: Use shared fences for read-only objects. */ |
498 | reservation_object_add_excl_fence(job->bo[i]->resv, | 498 | dma_resv_add_excl_fence(job->bo[i]->resv, |
499 | job->done_fence); | 499 | job->done_fence); |
500 | } | 500 | } |
501 | 501 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index b72b760e3018..7a06cb6e31c5 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
@@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) | |||
543 | bo = to_vc4_bo(&exec->bo[i]->base); | 543 | bo = to_vc4_bo(&exec->bo[i]->base); |
544 | bo->seqno = seqno; | 544 | bo->seqno = seqno; |
545 | 545 | ||
546 | reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); | 546 | dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); |
547 | } | 547 | } |
548 | 548 | ||
549 | list_for_each_entry(bo, &exec->unref_list, unref_head) { | 549 | list_for_each_entry(bo, &exec->unref_list, unref_head) { |
@@ -554,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) | |||
554 | bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); | 554 | bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); |
555 | bo->write_seqno = seqno; | 555 | bo->write_seqno = seqno; |
556 | 556 | ||
557 | reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); | 557 | dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
@@ -642,7 +642,7 @@ retry: | |||
642 | for (i = 0; i < exec->bo_count; i++) { | 642 | for (i = 0; i < exec->bo_count; i++) { |
643 | bo = &exec->bo[i]->base; | 643 | bo = &exec->bo[i]->base; |
644 | 644 | ||
645 | ret = reservation_object_reserve_shared(bo->resv, 1); | 645 | ret = dma_resv_reserve_shared(bo->resv, 1); |
646 | if (ret) { | 646 | if (ret) { |
647 | vc4_unlock_bo_reservations(dev, exec, acquire_ctx); | 647 | vc4_unlock_bo_reservations(dev, exec, acquire_ctx); |
648 | return ret; | 648 | return ret; |
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index d8630467549c..9268f6fc3f66 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/dma-buf.h> | 23 | #include <linux/dma-buf.h> |
24 | #include <linux/reservation.h> | 24 | #include <linux/dma-resv.h> |
25 | 25 | ||
26 | #include <drm/drm_file.h> | 26 | #include <drm/drm_file.h> |
27 | 27 | ||
@@ -128,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
128 | { | 128 | { |
129 | struct drm_vgem_fence_attach *arg = data; | 129 | struct drm_vgem_fence_attach *arg = data; |
130 | struct vgem_file *vfile = file->driver_priv; | 130 | struct vgem_file *vfile = file->driver_priv; |
131 | struct reservation_object *resv; | 131 | struct dma_resv *resv; |
132 | struct drm_gem_object *obj; | 132 | struct drm_gem_object *obj; |
133 | struct dma_fence *fence; | 133 | struct dma_fence *fence; |
134 | int ret; | 134 | int ret; |
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
151 | 151 | ||
152 | /* Check for a conflicting fence */ | 152 | /* Check for a conflicting fence */ |
153 | resv = obj->resv; | 153 | resv = obj->resv; |
154 | if (!reservation_object_test_signaled_rcu(resv, | 154 | if (!dma_resv_test_signaled_rcu(resv, |
155 | arg->flags & VGEM_FENCE_WRITE)) { | 155 | arg->flags & VGEM_FENCE_WRITE)) { |
156 | ret = -EBUSY; | 156 | ret = -EBUSY; |
157 | goto err_fence; | 157 | goto err_fence; |
@@ -159,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
159 | 159 | ||
160 | /* Expose the fence via the dma-buf */ | 160 | /* Expose the fence via the dma-buf */ |
161 | ret = 0; | 161 | ret = 0; |
162 | reservation_object_lock(resv, NULL); | 162 | dma_resv_lock(resv, NULL); |
163 | if (arg->flags & VGEM_FENCE_WRITE) | 163 | if (arg->flags & VGEM_FENCE_WRITE) |
164 | reservation_object_add_excl_fence(resv, fence); | 164 | dma_resv_add_excl_fence(resv, fence); |
165 | else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0) | 165 | else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) |
166 | reservation_object_add_shared_fence(resv, fence); | 166 | dma_resv_add_shared_fence(resv, fence); |
167 | reservation_object_unlock(resv); | 167 | dma_resv_unlock(resv); |
168 | 168 | ||
169 | /* Record the fence in our idr for later signaling */ | 169 | /* Record the fence in our idr for later signaling */ |
170 | if (ret == 0) { | 170 | if (ret == 0) { |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 3c430dd65f67..0a88ef11b9d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -396,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, | |||
396 | (vgdev, qobj->hw_res_handle, | 396 | (vgdev, qobj->hw_res_handle, |
397 | vfpriv->ctx_id, offset, args->level, | 397 | vfpriv->ctx_id, offset, args->level, |
398 | &box, fence); | 398 | &box, fence); |
399 | reservation_object_add_excl_fence(qobj->tbo.base.resv, | 399 | dma_resv_add_excl_fence(qobj->tbo.base.resv, |
400 | &fence->f); | 400 | &fence->f); |
401 | 401 | ||
402 | dma_fence_put(&fence->f); | 402 | dma_fence_put(&fence->f); |
@@ -450,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, | |||
450 | (vgdev, qobj, | 450 | (vgdev, qobj, |
451 | vfpriv ? vfpriv->ctx_id : 0, offset, | 451 | vfpriv ? vfpriv->ctx_id : 0, offset, |
452 | args->level, &box, fence); | 452 | args->level, &box, fence); |
453 | reservation_object_add_excl_fence(qobj->tbo.base.resv, | 453 | dma_resv_add_excl_fence(qobj->tbo.base.resv, |
454 | &fence->f); | 454 | &fence->f); |
455 | dma_fence_put(&fence->f); | 455 | dma_fence_put(&fence->f); |
456 | } | 456 | } |
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 3dc08f991a8d..a492ac3f4a7e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c | |||
@@ -212,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, | |||
212 | 0, 0, vgfb->fence); | 212 | 0, 0, vgfb->fence); |
213 | ret = virtio_gpu_object_reserve(bo, false); | 213 | ret = virtio_gpu_object_reserve(bo, false); |
214 | if (!ret) { | 214 | if (!ret) { |
215 | reservation_object_add_excl_fence(bo->tbo.base.resv, | 215 | dma_resv_add_excl_fence(bo->tbo.base.resv, |
216 | &vgfb->fence->f); | 216 | &vgfb->fence->f); |
217 | dma_fence_put(&vgfb->fence->f); | 217 | dma_fence_put(&vgfb->fence->f); |
218 | vgfb->fence = NULL; | 218 | vgfb->fence = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 6c01ad2785dd..bb46ca0c458f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | |||
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, | |||
459 | 459 | ||
460 | /* Buffer objects need to be either pinned or reserved: */ | 460 | /* Buffer objects need to be either pinned or reserved: */ |
461 | if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) | 461 | if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) |
462 | reservation_object_assert_held(dst->base.resv); | 462 | dma_resv_assert_held(dst->base.resv); |
463 | if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) | 463 | if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) |
464 | reservation_object_assert_held(src->base.resv); | 464 | dma_resv_assert_held(src->base.resv); |
465 | 465 | ||
466 | if (dst->ttm->state == tt_unpopulated) { | 466 | if (dst->ttm->state == tt_unpopulated) { |
467 | ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); | 467 | ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 369034c0de31..a05ef9d0a2e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | |||
@@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) | |||
342 | uint32_t old_mem_type = bo->mem.mem_type; | 342 | uint32_t old_mem_type = bo->mem.mem_type; |
343 | int ret; | 343 | int ret; |
344 | 344 | ||
345 | reservation_object_assert_held(bo->base.resv); | 345 | dma_resv_assert_held(bo->base.resv); |
346 | 346 | ||
347 | if (pin) { | 347 | if (pin) { |
348 | if (vbo->pin_count++ > 0) | 348 | if (vbo->pin_count++ > 0) |
@@ -689,7 +689,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, | |||
689 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); | 689 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
690 | long lret; | 690 | long lret; |
691 | 691 | ||
692 | lret = reservation_object_wait_timeout_rcu | 692 | lret = dma_resv_wait_timeout_rcu |
693 | (bo->base.resv, true, true, | 693 | (bo->base.resv, true, true, |
694 | nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); | 694 | nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); |
695 | if (!lret) | 695 | if (!lret) |
@@ -1007,10 +1007,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, | |||
1007 | 1007 | ||
1008 | if (fence == NULL) { | 1008 | if (fence == NULL) { |
1009 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1009 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1010 | reservation_object_add_excl_fence(bo->base.resv, &fence->base); | 1010 | dma_resv_add_excl_fence(bo->base.resv, &fence->base); |
1011 | dma_fence_put(&fence->base); | 1011 | dma_fence_put(&fence->base); |
1012 | } else | 1012 | } else |
1013 | reservation_object_add_excl_fence(bo->base.resv, &fence->base); | 1013 | dma_resv_add_excl_fence(bo->base.resv, &fence->base); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | 1016 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 7984f172ec4a..1e3bd58bc2d4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
@@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) | |||
169 | } *cmd; | 169 | } *cmd; |
170 | 170 | ||
171 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | 171 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); |
172 | reservation_object_assert_held(bo->base.resv); | 172 | dma_resv_assert_held(bo->base.resv); |
173 | 173 | ||
174 | cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); | 174 | cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); |
175 | if (!cmd) | 175 | if (!cmd) |
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
311 | return 0; | 311 | return 0; |
312 | 312 | ||
313 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | 313 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); |
314 | reservation_object_assert_held(bo->base.resv); | 314 | dma_resv_assert_held(bo->base.resv); |
315 | 315 | ||
316 | mutex_lock(&dev_priv->binding_mutex); | 316 | mutex_lock(&dev_priv->binding_mutex); |
317 | if (!vcotbl->scrubbed) | 317 | if (!vcotbl->scrubbed) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 701643b7b0c4..0b5472450633 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res, | |||
402 | 402 | ||
403 | if (switch_backup && new_backup != res->backup) { | 403 | if (switch_backup && new_backup != res->backup) { |
404 | if (res->backup) { | 404 | if (res->backup) { |
405 | reservation_object_assert_held(res->backup->base.base.resv); | 405 | dma_resv_assert_held(res->backup->base.base.resv); |
406 | list_del_init(&res->mob_head); | 406 | list_del_init(&res->mob_head); |
407 | vmw_bo_unreference(&res->backup); | 407 | vmw_bo_unreference(&res->backup); |
408 | } | 408 | } |
409 | 409 | ||
410 | if (new_backup) { | 410 | if (new_backup) { |
411 | res->backup = vmw_bo_reference(new_backup); | 411 | res->backup = vmw_bo_reference(new_backup); |
412 | reservation_object_assert_held(new_backup->base.base.resv); | 412 | dma_resv_assert_held(new_backup->base.base.resv); |
413 | list_add_tail(&res->mob_head, &new_backup->res_list); | 413 | list_add_tail(&res->mob_head, &new_backup->res_list); |
414 | } else { | 414 | } else { |
415 | res->backup = NULL; | 415 | res->backup = NULL; |
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) | |||
691 | .num_shared = 0 | 691 | .num_shared = 0 |
692 | }; | 692 | }; |
693 | 693 | ||
694 | reservation_object_assert_held(vbo->base.base.resv); | 694 | dma_resv_assert_held(vbo->base.base.resv); |
695 | list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { | 695 | list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { |
696 | if (!res->func->unbind) | 696 | if (!res->func->unbind) |
697 | continue; | 697 | continue; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 94aae87b1138..037b1f7a87a5 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -87,7 +87,7 @@ struct module; | |||
87 | 87 | ||
88 | struct device_node; | 88 | struct device_node; |
89 | struct videomode; | 89 | struct videomode; |
90 | struct reservation_object; | 90 | struct dma_resv; |
91 | struct dma_buf_attachment; | 91 | struct dma_buf_attachment; |
92 | 92 | ||
93 | struct pci_dev; | 93 | struct pci_dev; |
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index ae693c0666cd..6aaba14f5972 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/kref.h> | 37 | #include <linux/kref.h> |
38 | #include <linux/reservation.h> | 38 | #include <linux/dma-resv.h> |
39 | 39 | ||
40 | #include <drm/drm_vma_manager.h> | 40 | #include <drm/drm_vma_manager.h> |
41 | 41 | ||
@@ -276,7 +276,7 @@ struct drm_gem_object { | |||
276 | * | 276 | * |
277 | * Normally (@resv == &@_resv) except for imported GEM objects. | 277 | * Normally (@resv == &@_resv) except for imported GEM objects. |
278 | */ | 278 | */ |
279 | struct reservation_object *resv; | 279 | struct dma_resv *resv; |
280 | 280 | ||
281 | /** | 281 | /** |
282 | * @_resv: | 282 | * @_resv: |
@@ -285,7 +285,7 @@ struct drm_gem_object { | |||
285 | * | 285 | * |
286 | * This is unused for imported GEM objects. | 286 | * This is unused for imported GEM objects. |
287 | */ | 287 | */ |
288 | struct reservation_object _resv; | 288 | struct dma_resv _resv; |
289 | 289 | ||
290 | /** | 290 | /** |
291 | * @funcs: | 291 | * @funcs: |
@@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | |||
390 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, | 390 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
391 | int count, struct drm_gem_object ***objs_out); | 391 | int count, struct drm_gem_object ***objs_out); |
392 | struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); | 392 | struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); |
393 | long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, | 393 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
394 | bool wait_all, unsigned long timeout); | 394 | bool wait_all, unsigned long timeout); |
395 | int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, | 395 | int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
396 | struct ww_acquire_ctx *acquire_ctx); | 396 | struct ww_acquire_ctx *acquire_ctx); |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 65ef5376de59..43c4929a2171 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
42 | #include <linux/bitmap.h> | 42 | #include <linux/bitmap.h> |
43 | #include <linux/reservation.h> | 43 | #include <linux/dma-resv.h> |
44 | 44 | ||
45 | struct ttm_bo_global; | 45 | struct ttm_bo_global; |
46 | 46 | ||
@@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj { | |||
273 | struct ttm_operation_ctx { | 273 | struct ttm_operation_ctx { |
274 | bool interruptible; | 274 | bool interruptible; |
275 | bool no_wait_gpu; | 275 | bool no_wait_gpu; |
276 | struct reservation_object *resv; | 276 | struct dma_resv *resv; |
277 | uint64_t bytes_moved; | 277 | uint64_t bytes_moved; |
278 | uint32_t flags; | 278 | uint32_t flags; |
279 | }; | 279 | }; |
@@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, | |||
493 | * @page_alignment: Data alignment in pages. | 493 | * @page_alignment: Data alignment in pages. |
494 | * @ctx: TTM operation context for memory allocation. | 494 | * @ctx: TTM operation context for memory allocation. |
495 | * @acc_size: Accounted size for this object. | 495 | * @acc_size: Accounted size for this object. |
496 | * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. | 496 | * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. |
497 | * @destroy: Destroy function. Use NULL for kfree(). | 497 | * @destroy: Destroy function. Use NULL for kfree(). |
498 | * | 498 | * |
499 | * This function initializes a pre-allocated struct ttm_buffer_object. | 499 | * This function initializes a pre-allocated struct ttm_buffer_object. |
@@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
526 | struct ttm_operation_ctx *ctx, | 526 | struct ttm_operation_ctx *ctx, |
527 | size_t acc_size, | 527 | size_t acc_size, |
528 | struct sg_table *sg, | 528 | struct sg_table *sg, |
529 | struct reservation_object *resv, | 529 | struct dma_resv *resv, |
530 | void (*destroy) (struct ttm_buffer_object *)); | 530 | void (*destroy) (struct ttm_buffer_object *)); |
531 | 531 | ||
532 | /** | 532 | /** |
@@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
545 | * point to the shmem object backing a GEM object if TTM is used to back a | 545 | * point to the shmem object backing a GEM object if TTM is used to back a |
546 | * GEM user interface. | 546 | * GEM user interface. |
547 | * @acc_size: Accounted size for this object. | 547 | * @acc_size: Accounted size for this object. |
548 | * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. | 548 | * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. |
549 | * @destroy: Destroy function. Use NULL for kfree(). | 549 | * @destroy: Destroy function. Use NULL for kfree(). |
550 | * | 550 | * |
551 | * This function initializes a pre-allocated struct ttm_buffer_object. | 551 | * This function initializes a pre-allocated struct ttm_buffer_object. |
@@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, | |||
570 | unsigned long size, enum ttm_bo_type type, | 570 | unsigned long size, enum ttm_bo_type type, |
571 | struct ttm_placement *placement, | 571 | struct ttm_placement *placement, |
572 | uint32_t page_alignment, bool interrubtible, size_t acc_size, | 572 | uint32_t page_alignment, bool interrubtible, size_t acc_size, |
573 | struct sg_table *sg, struct reservation_object *resv, | 573 | struct sg_table *sg, struct dma_resv *resv, |
574 | void (*destroy) (struct ttm_buffer_object *)); | 574 | void (*destroy) (struct ttm_buffer_object *)); |
575 | 575 | ||
576 | /** | 576 | /** |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3f1935c19a66..e88e00c6cbf2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
36 | #include <linux/fs.h> | 36 | #include <linux/fs.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/reservation.h> | 38 | #include <linux/dma-resv.h> |
39 | 39 | ||
40 | #include "ttm_bo_api.h" | 40 | #include "ttm_bo_api.h" |
41 | #include "ttm_memory.h" | 41 | #include "ttm_memory.h" |
@@ -654,14 +654,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
654 | if (WARN_ON(ticket)) | 654 | if (WARN_ON(ticket)) |
655 | return -EBUSY; | 655 | return -EBUSY; |
656 | 656 | ||
657 | success = reservation_object_trylock(bo->base.resv); | 657 | success = dma_resv_trylock(bo->base.resv); |
658 | return success ? 0 : -EBUSY; | 658 | return success ? 0 : -EBUSY; |
659 | } | 659 | } |
660 | 660 | ||
661 | if (interruptible) | 661 | if (interruptible) |
662 | ret = reservation_object_lock_interruptible(bo->base.resv, ticket); | 662 | ret = dma_resv_lock_interruptible(bo->base.resv, ticket); |
663 | else | 663 | else |
664 | ret = reservation_object_lock(bo->base.resv, ticket); | 664 | ret = dma_resv_lock(bo->base.resv, ticket); |
665 | if (ret == -EINTR) | 665 | if (ret == -EINTR) |
666 | return -ERESTARTSYS; | 666 | return -ERESTARTSYS; |
667 | return ret; | 667 | return ret; |
@@ -745,10 +745,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, | |||
745 | WARN_ON(!kref_read(&bo->kref)); | 745 | WARN_ON(!kref_read(&bo->kref)); |
746 | 746 | ||
747 | if (interruptible) | 747 | if (interruptible) |
748 | ret = reservation_object_lock_slow_interruptible(bo->base.resv, | 748 | ret = dma_resv_lock_slow_interruptible(bo->base.resv, |
749 | ticket); | 749 | ticket); |
750 | else | 750 | else |
751 | reservation_object_lock_slow(bo->base.resv, ticket); | 751 | dma_resv_lock_slow(bo->base.resv, ticket); |
752 | 752 | ||
753 | if (likely(ret == 0)) | 753 | if (likely(ret == 0)) |
754 | ttm_bo_del_sub_from_lru(bo); | 754 | ttm_bo_del_sub_from_lru(bo); |
@@ -773,7 +773,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) | |||
773 | else | 773 | else |
774 | ttm_bo_move_to_lru_tail(bo, NULL); | 774 | ttm_bo_move_to_lru_tail(bo, NULL); |
775 | spin_unlock(&bo->bdev->glob->lru_lock); | 775 | spin_unlock(&bo->bdev->glob->lru_lock); |
776 | reservation_object_unlock(bo->base.resv); | 776 | dma_resv_unlock(bo->base.resv); |
777 | } | 777 | } |
778 | 778 | ||
779 | /* | 779 | /* |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index bae060fae862..ec212cb27fdc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -306,7 +306,7 @@ struct dma_buf { | |||
306 | struct module *owner; | 306 | struct module *owner; |
307 | struct list_head list_node; | 307 | struct list_head list_node; |
308 | void *priv; | 308 | void *priv; |
309 | struct reservation_object *resv; | 309 | struct dma_resv *resv; |
310 | 310 | ||
311 | /* poll support */ | 311 | /* poll support */ |
312 | wait_queue_head_t poll; | 312 | wait_queue_head_t poll; |
@@ -365,7 +365,7 @@ struct dma_buf_export_info { | |||
365 | const struct dma_buf_ops *ops; | 365 | const struct dma_buf_ops *ops; |
366 | size_t size; | 366 | size_t size; |
367 | int flags; | 367 | int flags; |
368 | struct reservation_object *resv; | 368 | struct dma_resv *resv; |
369 | void *priv; | 369 | void *priv; |
370 | }; | 370 | }; |
371 | 371 | ||
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index bea1d05cf51e..404aa748eda6 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h | |||
@@ -279,7 +279,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) | |||
279 | } | 279 | } |
280 | 280 | ||
281 | /** | 281 | /** |
282 | * dma_fence_get_rcu - get a fence from a reservation_object_list with | 282 | * dma_fence_get_rcu - get a fence from a dma_resv_list with |
283 | * rcu read lock | 283 | * rcu read lock |
284 | * @fence: fence to increase refcount of | 284 | * @fence: fence to increase refcount of |
285 | * | 285 | * |
@@ -303,7 +303,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) | |||
303 | * so long as the caller is using RCU on the pointer to the fence. | 303 | * so long as the caller is using RCU on the pointer to the fence. |
304 | * | 304 | * |
305 | * An alternative mechanism is to employ a seqlock to protect a bunch of | 305 | * An alternative mechanism is to employ a seqlock to protect a bunch of |
306 | * fences, such as used by struct reservation_object. When using a seqlock, | 306 | * fences, such as used by struct dma_resv. When using a seqlock, |
307 | * the seqlock must be taken before and checked after a reference to the | 307 | * the seqlock must be taken before and checked after a reference to the |
308 | * fence is acquired (as shown here). | 308 | * fence is acquired (as shown here). |
309 | * | 309 | * |
diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h index acddefea694f..38f2802afabb 100644 --- a/include/linux/reservation.h +++ b/include/linux/dma-resv.h | |||
@@ -48,38 +48,37 @@ | |||
48 | extern struct ww_class reservation_ww_class; | 48 | extern struct ww_class reservation_ww_class; |
49 | 49 | ||
50 | /** | 50 | /** |
51 | * struct reservation_object_list - a list of shared fences | 51 | * struct dma_resv_list - a list of shared fences |
52 | * @rcu: for internal use | 52 | * @rcu: for internal use |
53 | * @shared_count: table of shared fences | 53 | * @shared_count: table of shared fences |
54 | * @shared_max: for growing shared fence table | 54 | * @shared_max: for growing shared fence table |
55 | * @shared: shared fence table | 55 | * @shared: shared fence table |
56 | */ | 56 | */ |
57 | struct reservation_object_list { | 57 | struct dma_resv_list { |
58 | struct rcu_head rcu; | 58 | struct rcu_head rcu; |
59 | u32 shared_count, shared_max; | 59 | u32 shared_count, shared_max; |
60 | struct dma_fence __rcu *shared[]; | 60 | struct dma_fence __rcu *shared[]; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * struct reservation_object - a reservation object manages fences for a buffer | 64 | * struct dma_resv - a reservation object manages fences for a buffer |
65 | * @lock: update side lock | 65 | * @lock: update side lock |
66 | * @seq: sequence count for managing RCU read-side synchronization | 66 | * @seq: sequence count for managing RCU read-side synchronization |
67 | * @fence_excl: the exclusive fence, if there is one currently | 67 | * @fence_excl: the exclusive fence, if there is one currently |
68 | * @fence: list of current shared fences | 68 | * @fence: list of current shared fences |
69 | */ | 69 | */ |
70 | struct reservation_object { | 70 | struct dma_resv { |
71 | struct ww_mutex lock; | 71 | struct ww_mutex lock; |
72 | 72 | ||
73 | struct dma_fence __rcu *fence_excl; | 73 | struct dma_fence __rcu *fence_excl; |
74 | struct reservation_object_list __rcu *fence; | 74 | struct dma_resv_list __rcu *fence; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) | 77 | #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) |
78 | #define reservation_object_assert_held(obj) \ | 78 | #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) |
79 | lockdep_assert_held(&(obj)->lock.base) | ||
80 | 79 | ||
81 | /** | 80 | /** |
82 | * reservation_object_get_excl - get the reservation object's | 81 | * dma_resv_get_excl - get the reservation object's |
83 | * exclusive fence, with update-side lock held | 82 | * exclusive fence, with update-side lock held |
84 | * @obj: the reservation object | 83 | * @obj: the reservation object |
85 | * | 84 | * |
@@ -90,30 +89,28 @@ struct reservation_object { | |||
90 | * RETURNS | 89 | * RETURNS |
91 | * The exclusive fence or NULL | 90 | * The exclusive fence or NULL |
92 | */ | 91 | */ |
93 | static inline struct dma_fence * | 92 | static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj) |
94 | reservation_object_get_excl(struct reservation_object *obj) | ||
95 | { | 93 | { |
96 | return rcu_dereference_protected(obj->fence_excl, | 94 | return rcu_dereference_protected(obj->fence_excl, |
97 | reservation_object_held(obj)); | 95 | dma_resv_held(obj)); |
98 | } | 96 | } |
99 | 97 | ||
100 | /** | 98 | /** |
101 | * reservation_object_get_list - get the reservation object's | 99 | * dma_resv_get_list - get the reservation object's |
102 | * shared fence list, with update-side lock held | 100 | * shared fence list, with update-side lock held |
103 | * @obj: the reservation object | 101 | * @obj: the reservation object |
104 | * | 102 | * |
105 | * Returns the shared fence list. Does NOT take references to | 103 | * Returns the shared fence list. Does NOT take references to |
106 | * the fence. The obj->lock must be held. | 104 | * the fence. The obj->lock must be held. |
107 | */ | 105 | */ |
108 | static inline struct reservation_object_list * | 106 | static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) |
109 | reservation_object_get_list(struct reservation_object *obj) | ||
110 | { | 107 | { |
111 | return rcu_dereference_protected(obj->fence, | 108 | return rcu_dereference_protected(obj->fence, |
112 | reservation_object_held(obj)); | 109 | dma_resv_held(obj)); |
113 | } | 110 | } |
114 | 111 | ||
115 | /** | 112 | /** |
116 | * reservation_object_fences - read consistent fence pointers | 113 | * dma_resv_fences - read consistent fence pointers |
117 | * @obj: reservation object where we get the fences from | 114 | * @obj: reservation object where we get the fences from |
118 | * @excl: pointer for the exclusive fence | 115 | * @excl: pointer for the exclusive fence |
119 | * @list: pointer for the shared fence list | 116 | * @list: pointer for the shared fence list |
@@ -121,22 +118,21 @@ reservation_object_get_list(struct reservation_object *obj) | |||
121 | * Make sure we have a consisten exclusive fence and shared fence list. | 118 | * Make sure we have a consisten exclusive fence and shared fence list. |
122 | * Must be called with rcu read side lock held. | 119 | * Must be called with rcu read side lock held. |
123 | */ | 120 | */ |
124 | static inline void | 121 | static inline void dma_resv_fences(struct dma_resv *obj, |
125 | reservation_object_fences(struct reservation_object *obj, | 122 | struct dma_fence **excl, |
126 | struct dma_fence **excl, | 123 | struct dma_resv_list **list, |
127 | struct reservation_object_list **list, | 124 | u32 *shared_count) |
128 | u32 *shared_count) | ||
129 | { | 125 | { |
130 | do { | 126 | do { |
131 | *excl = rcu_dereference(obj->fence_excl); | 127 | *excl = rcu_dereference(obj->fence_excl); |
132 | *list = rcu_dereference(obj->fence); | 128 | *list = rcu_dereference(obj->fence); |
133 | *shared_count = *list ? (*list)->shared_count : 0; | 129 | *shared_count = *list ? (*list)->shared_count : 0; |
134 | smp_rmb(); /* See reservation_object_add_excl_fence */ | 130 | smp_rmb(); /* See dma_resv_add_excl_fence */ |
135 | } while (rcu_access_pointer(obj->fence_excl) != *excl); | 131 | } while (rcu_access_pointer(obj->fence_excl) != *excl); |
136 | } | 132 | } |
137 | 133 | ||
138 | /** | 134 | /** |
139 | * reservation_object_get_excl_rcu - get the reservation object's | 135 | * dma_resv_get_excl_rcu - get the reservation object's |
140 | * exclusive fence, without lock held. | 136 | * exclusive fence, without lock held. |
141 | * @obj: the reservation object | 137 | * @obj: the reservation object |
142 | * | 138 | * |
@@ -146,8 +142,7 @@ reservation_object_fences(struct reservation_object *obj, | |||
146 | * RETURNS | 142 | * RETURNS |
147 | * The exclusive fence or NULL if none | 143 | * The exclusive fence or NULL if none |
148 | */ | 144 | */ |
149 | static inline struct dma_fence * | 145 | static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj) |
150 | reservation_object_get_excl_rcu(struct reservation_object *obj) | ||
151 | { | 146 | { |
152 | struct dma_fence *fence; | 147 | struct dma_fence *fence; |
153 | 148 | ||
@@ -162,7 +157,7 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) | |||
162 | } | 157 | } |
163 | 158 | ||
164 | /** | 159 | /** |
165 | * reservation_object_lock - lock the reservation object | 160 | * dma_resv_lock - lock the reservation object |
166 | * @obj: the reservation object | 161 | * @obj: the reservation object |
167 | * @ctx: the locking context | 162 | * @ctx: the locking context |
168 | * | 163 | * |
@@ -176,15 +171,14 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) | |||
176 | * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation | 171 | * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation |
177 | * object may be locked by itself by passing NULL as @ctx. | 172 | * object may be locked by itself by passing NULL as @ctx. |
178 | */ | 173 | */ |
179 | static inline int | 174 | static inline int dma_resv_lock(struct dma_resv *obj, |
180 | reservation_object_lock(struct reservation_object *obj, | 175 | struct ww_acquire_ctx *ctx) |
181 | struct ww_acquire_ctx *ctx) | ||
182 | { | 176 | { |
183 | return ww_mutex_lock(&obj->lock, ctx); | 177 | return ww_mutex_lock(&obj->lock, ctx); |
184 | } | 178 | } |
185 | 179 | ||
186 | /** | 180 | /** |
187 | * reservation_object_lock_interruptible - lock the reservation object | 181 | * dma_resv_lock_interruptible - lock the reservation object |
188 | * @obj: the reservation object | 182 | * @obj: the reservation object |
189 | * @ctx: the locking context | 183 | * @ctx: the locking context |
190 | * | 184 | * |
@@ -198,48 +192,45 @@ reservation_object_lock(struct reservation_object *obj, | |||
198 | * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation | 192 | * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation |
199 | * object may be locked by itself by passing NULL as @ctx. | 193 | * object may be locked by itself by passing NULL as @ctx. |
200 | */ | 194 | */ |
201 | static inline int | 195 | static inline int dma_resv_lock_interruptible(struct dma_resv *obj, |
202 | reservation_object_lock_interruptible(struct reservation_object *obj, | 196 | struct ww_acquire_ctx *ctx) |
203 | struct ww_acquire_ctx *ctx) | ||
204 | { | 197 | { |
205 | return ww_mutex_lock_interruptible(&obj->lock, ctx); | 198 | return ww_mutex_lock_interruptible(&obj->lock, ctx); |
206 | } | 199 | } |
207 | 200 | ||
208 | /** | 201 | /** |
209 | * reservation_object_lock_slow - slowpath lock the reservation object | 202 | * dma_resv_lock_slow - slowpath lock the reservation object |
210 | * @obj: the reservation object | 203 | * @obj: the reservation object |
211 | * @ctx: the locking context | 204 | * @ctx: the locking context |
212 | * | 205 | * |
213 | * Acquires the reservation object after a die case. This function | 206 | * Acquires the reservation object after a die case. This function |
214 | * will sleep until the lock becomes available. See reservation_object_lock() as | 207 | * will sleep until the lock becomes available. See dma_resv_lock() as |
215 | * well. | 208 | * well. |
216 | */ | 209 | */ |
217 | static inline void | 210 | static inline void dma_resv_lock_slow(struct dma_resv *obj, |
218 | reservation_object_lock_slow(struct reservation_object *obj, | 211 | struct ww_acquire_ctx *ctx) |
219 | struct ww_acquire_ctx *ctx) | ||
220 | { | 212 | { |
221 | ww_mutex_lock_slow(&obj->lock, ctx); | 213 | ww_mutex_lock_slow(&obj->lock, ctx); |
222 | } | 214 | } |
223 | 215 | ||
224 | /** | 216 | /** |
225 | * reservation_object_lock_slow_interruptible - slowpath lock the reservation | 217 | * dma_resv_lock_slow_interruptible - slowpath lock the reservation |
226 | * object, interruptible | 218 | * object, interruptible |
227 | * @obj: the reservation object | 219 | * @obj: the reservation object |
228 | * @ctx: the locking context | 220 | * @ctx: the locking context |
229 | * | 221 | * |
230 | * Acquires the reservation object interruptible after a die case. This function | 222 | * Acquires the reservation object interruptible after a die case. This function |
231 | * will sleep until the lock becomes available. See | 223 | * will sleep until the lock becomes available. See |
232 | * reservation_object_lock_interruptible() as well. | 224 | * dma_resv_lock_interruptible() as well. |
233 | */ | 225 | */ |
234 | static inline int | 226 | static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, |
235 | reservation_object_lock_slow_interruptible(struct reservation_object *obj, | 227 | struct ww_acquire_ctx *ctx) |
236 | struct ww_acquire_ctx *ctx) | ||
237 | { | 228 | { |
238 | return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); | 229 | return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); |
239 | } | 230 | } |
240 | 231 | ||
241 | /** | 232 | /** |
242 | * reservation_object_trylock - trylock the reservation object | 233 | * dma_resv_trylock - trylock the reservation object |
243 | * @obj: the reservation object | 234 | * @obj: the reservation object |
244 | * | 235 | * |
245 | * Tries to lock the reservation object for exclusive access and modification. | 236 | * Tries to lock the reservation object for exclusive access and modification. |
@@ -252,51 +243,46 @@ reservation_object_lock_slow_interruptible(struct reservation_object *obj, | |||
252 | * | 243 | * |
253 | * Returns true if the lock was acquired, false otherwise. | 244 | * Returns true if the lock was acquired, false otherwise. |
254 | */ | 245 | */ |
255 | static inline bool __must_check | 246 | static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) |
256 | reservation_object_trylock(struct reservation_object *obj) | ||
257 | { | 247 | { |
258 | return ww_mutex_trylock(&obj->lock); | 248 | return ww_mutex_trylock(&obj->lock); |
259 | } | 249 | } |
260 | 250 | ||
261 | /** | 251 | /** |
262 | * reservation_object_is_locked - is the reservation object locked | 252 | * dma_resv_is_locked - is the reservation object locked |
263 | * @obj: the reservation object | 253 | * @obj: the reservation object |
264 | * | 254 | * |
265 | * Returns true if the mutex is locked, false if unlocked. | 255 | * Returns true if the mutex is locked, false if unlocked. |
266 | */ | 256 | */ |
267 | static inline bool | 257 | static inline bool dma_resv_is_locked(struct dma_resv *obj) |
268 | reservation_object_is_locked(struct reservation_object *obj) | ||
269 | { | 258 | { |
270 | return ww_mutex_is_locked(&obj->lock); | 259 | return ww_mutex_is_locked(&obj->lock); |
271 | } | 260 | } |
272 | 261 | ||
273 | /** | 262 | /** |
274 | * reservation_object_locking_ctx - returns the context used to lock the object | 263 | * dma_resv_locking_ctx - returns the context used to lock the object |
275 | * @obj: the reservation object | 264 | * @obj: the reservation object |
276 | * | 265 | * |
277 | * Returns the context used to lock a reservation object or NULL if no context | 266 | * Returns the context used to lock a reservation object or NULL if no context |
278 | * was used or the object is not locked at all. | 267 | * was used or the object is not locked at all. |
279 | */ | 268 | */ |
280 | static inline struct ww_acquire_ctx * | 269 | static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) |
281 | reservation_object_locking_ctx(struct reservation_object *obj) | ||
282 | { | 270 | { |
283 | return READ_ONCE(obj->lock.ctx); | 271 | return READ_ONCE(obj->lock.ctx); |
284 | } | 272 | } |
285 | 273 | ||
286 | /** | 274 | /** |
287 | * reservation_object_unlock - unlock the reservation object | 275 | * dma_resv_unlock - unlock the reservation object |
288 | * @obj: the reservation object | 276 | * @obj: the reservation object |
289 | * | 277 | * |
290 | * Unlocks the reservation object following exclusive access. | 278 | * Unlocks the reservation object following exclusive access. |
291 | */ | 279 | */ |
292 | static inline void | 280 | static inline void dma_resv_unlock(struct dma_resv *obj) |
293 | reservation_object_unlock(struct reservation_object *obj) | ||
294 | { | 281 | { |
295 | #ifdef CONFIG_DEBUG_MUTEXES | 282 | #ifdef CONFIG_DEBUG_MUTEXES |
296 | /* Test shared fence slot reservation */ | 283 | /* Test shared fence slot reservation */ |
297 | if (rcu_access_pointer(obj->fence)) { | 284 | if (rcu_access_pointer(obj->fence)) { |
298 | struct reservation_object_list *fence = | 285 | struct dma_resv_list *fence = dma_resv_get_list(obj); |
299 | reservation_object_get_list(obj); | ||
300 | 286 | ||
301 | fence->shared_max = fence->shared_count; | 287 | fence->shared_max = fence->shared_count; |
302 | } | 288 | } |
@@ -304,29 +290,23 @@ reservation_object_unlock(struct reservation_object *obj) | |||
304 | ww_mutex_unlock(&obj->lock); | 290 | ww_mutex_unlock(&obj->lock); |
305 | } | 291 | } |
306 | 292 | ||
307 | void reservation_object_init(struct reservation_object *obj); | 293 | void dma_resv_init(struct dma_resv *obj); |
308 | void reservation_object_fini(struct reservation_object *obj); | 294 | void dma_resv_fini(struct dma_resv *obj); |
309 | int reservation_object_reserve_shared(struct reservation_object *obj, | 295 | int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); |
310 | unsigned int num_fences); | 296 | void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); |
311 | void reservation_object_add_shared_fence(struct reservation_object *obj, | ||
312 | struct dma_fence *fence); | ||
313 | 297 | ||
314 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 298 | void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); |
315 | struct dma_fence *fence); | ||
316 | 299 | ||
317 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 300 | int dma_resv_get_fences_rcu(struct dma_resv *obj, |
318 | struct dma_fence **pfence_excl, | 301 | struct dma_fence **pfence_excl, |
319 | unsigned *pshared_count, | 302 | unsigned *pshared_count, |
320 | struct dma_fence ***pshared); | 303 | struct dma_fence ***pshared); |
321 | 304 | ||
322 | int reservation_object_copy_fences(struct reservation_object *dst, | 305 | int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); |
323 | struct reservation_object *src); | ||
324 | 306 | ||
325 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 307 | long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, |
326 | bool wait_all, bool intr, | 308 | unsigned long timeout); |
327 | unsigned long timeout); | ||
328 | 309 | ||
329 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | 310 | bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); |
330 | bool test_all); | ||
331 | 311 | ||
332 | #endif /* _LINUX_RESERVATION_H */ | 312 | #endif /* _LINUX_RESERVATION_H */ |