diff options
author | Dave Airlie <airlied@redhat.com> | 2016-10-27 21:33:52 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-10-27 21:33:52 -0400 |
commit | 220196b38483be6d84a295d318d48595f65da443 (patch) | |
tree | f91c2e6e64ef59afdc075d843d51f23369e9164a | |
parent | a1873c62710b23e9afbd2faeed5f28649cbe4739 (diff) | |
parent | 56df51d003203f1c3a8eab05605973515aa15feb (diff) |
Merge tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next
Pull request already again to get the s/fence/dma_fence/ stuff in and
allow everyone to resync. Otherwise really just misc stuff all over, and a
new bridge driver.
* tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel:
drm/bridge: fix platform_no_drv_owner.cocci warnings
drm/bridge: fix semicolon.cocci warnings
drm: Print some debug/error info during DP dual mode detect
drm: mark drm_of_component_match_add dummy inline
drm/bridge: add Silicon Image SiI8620 driver
dt-bindings: add Silicon Image SiI8620 bridge bindings
video: add header file for Mobile High-Definition Link (MHL) interface
drm: convert DT component matching to component_match_add_release()
dma-buf: Rename struct fence to dma_fence
dma-buf/fence: add an lockdep_assert_held()
drm/dp: Factor out helper to distinguish between branch and sink devices
drm/edid: Only print the bad edid when aborting
drm/msm: add missing header dependencies
drm/msm/adreno: move function declarations to header file
drm/i2c/tda998x: mark symbol static where possible
doc: add missing docbook parameter for fence-array
drm: RIP mode_config->rotation_property
drm/msm/mdp5: Advertize 180 degree rotation
drm/msm/mdp5: Use per-plane rotation property
144 files changed, 4865 insertions, 1332 deletions
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt new file mode 100644 index 000000000000..9409d9c6a260 --- /dev/null +++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | Silicon Image SiI8620 HDMI/MHL bridge bindings | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: "sil,sii8620" | ||
5 | - reg: i2c address of the bridge | ||
6 | - cvcc10-supply: Digital Core Supply Voltage (1.0V) | ||
7 | - iovcc18-supply: I/O Supply Voltage (1.8V) | ||
8 | - interrupts, interrupt-parent: interrupt specifier of INT pin | ||
9 | - reset-gpios: gpio specifier of RESET pin | ||
10 | - clocks, clock-names: specification and name of "xtal" clock | ||
11 | - video interfaces: Device node can contain video interface port | ||
12 | node for HDMI encoder according to [1]. | ||
13 | |||
14 | [1]: Documentation/devicetree/bindings/media/video-interfaces.txt | ||
15 | |||
16 | Example: | ||
17 | sii8620@39 { | ||
18 | reg = <0x39>; | ||
19 | compatible = "sil,sii8620"; | ||
20 | cvcc10-supply = <&ldo36_reg>; | ||
21 | iovcc18-supply = <&ldo34_reg>; | ||
22 | interrupt-parent = <&gpf0>; | ||
23 | interrupts = <2 0>; | ||
24 | reset-gpio = <&gpv7 0 0>; | ||
25 | clocks = <&pmu_system_controller 0>; | ||
26 | clock-names = "xtal"; | ||
27 | |||
28 | port { | ||
29 | mhl_to_hdmi: endpoint { | ||
30 | remote-endpoint = <&hdmi_to_mhl>; | ||
31 | }; | ||
32 | }; | ||
33 | }; | ||
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt index b63a68531afd..269681a6faec 100644 --- a/Documentation/sync_file.txt +++ b/Documentation/sync_file.txt | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | This document serves as a guide for device drivers writers on what the | 7 | This document serves as a guide for device drivers writers on what the |
8 | sync_file API is, and how drivers can support it. Sync file is the carrier of | 8 | sync_file API is, and how drivers can support it. Sync file is the carrier of |
9 | the fences(struct fence) that are needed to synchronize between drivers or | 9 | the fences(struct dma_fence) that are needed to synchronize between drivers or |
10 | across process boundaries. | 10 | across process boundaries. |
11 | 11 | ||
12 | The sync_file API is meant to be used to send and receive fence information | 12 | The sync_file API is meant to be used to send and receive fence information |
@@ -32,9 +32,9 @@ in-fences and out-fences | |||
32 | Sync files can go either to or from userspace. When a sync_file is sent from | 32 | Sync files can go either to or from userspace. When a sync_file is sent from |
33 | the driver to userspace we call the fences it contains 'out-fences'. They are | 33 | the driver to userspace we call the fences it contains 'out-fences'. They are |
34 | related to a buffer that the driver is processing or is going to process, so | 34 | related to a buffer that the driver is processing or is going to process, so |
35 | the driver creates an out-fence to be able to notify, through fence_signal(), | 35 | the driver creates an out-fence to be able to notify, through |
36 | when it has finished using (or processing) that buffer. Out-fences are fences | 36 | dma_fence_signal(), when it has finished using (or processing) that buffer. |
37 | that the driver creates. | 37 | Out-fences are fences that the driver creates. |
38 | 38 | ||
39 | On the other hand if the driver receives fence(s) through a sync_file from | 39 | On the other hand if the driver receives fence(s) through a sync_file from |
40 | userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that | 40 | userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that |
@@ -47,7 +47,7 @@ Creating Sync Files | |||
47 | When a driver needs to send an out-fence userspace it creates a sync_file. | 47 | When a driver needs to send an out-fence userspace it creates a sync_file. |
48 | 48 | ||
49 | Interface: | 49 | Interface: |
50 | struct sync_file *sync_file_create(struct fence *fence); | 50 | struct sync_file *sync_file_create(struct dma_fence *fence); |
51 | 51 | ||
52 | The caller pass the out-fence and gets back the sync_file. That is just the | 52 | The caller pass the out-fence and gets back the sync_file. That is just the |
53 | first step, next it needs to install an fd on sync_file->file. So it gets an | 53 | first step, next it needs to install an fd on sync_file->file. So it gets an |
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences | |||
72 | from it. | 72 | from it. |
73 | 73 | ||
74 | Interface: | 74 | Interface: |
75 | struct fence *sync_file_get_fence(int fd); | 75 | struct dma_fence *sync_file_get_fence(int fd); |
76 | 76 | ||
77 | 77 | ||
78 | The returned reference is owned by the caller and must be disposed of | 78 | The returned reference is owned by the caller and must be disposed of |
79 | afterwards using fence_put(). In case of error, a NULL is returned instead. | 79 | afterwards using dma_fence_put(). In case of error, a NULL is returned instead. |
80 | 80 | ||
81 | References: | 81 | References: |
82 | [1] struct sync_file in include/linux/sync_file.h | 82 | [1] struct sync_file in include/linux/sync_file.h |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index fdf44cac08e6..37bf25c6b4a6 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -248,11 +248,11 @@ config DMA_SHARED_BUFFER | |||
248 | APIs extension; the file's descriptor can then be passed on to other | 248 | APIs extension; the file's descriptor can then be passed on to other |
249 | driver. | 249 | driver. |
250 | 250 | ||
251 | config FENCE_TRACE | 251 | config DMA_FENCE_TRACE |
252 | bool "Enable verbose FENCE_TRACE messages" | 252 | bool "Enable verbose DMA_FENCE_TRACE messages" |
253 | depends on DMA_SHARED_BUFFER | 253 | depends on DMA_SHARED_BUFFER |
254 | help | 254 | help |
255 | Enable the FENCE_TRACE printks. This will add extra | 255 | Enable the DMA_FENCE_TRACE printks. This will add extra |
256 | spam to the console log, but will make it easier to diagnose | 256 | spam to the console log, but will make it easier to diagnose |
257 | lockup related problems for dma-buffers shared across multiple | 257 | lockup related problems for dma-buffers shared across multiple |
258 | devices. | 258 | devices. |
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 2585821b24ab..ed3b785bae37 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig | |||
@@ -7,7 +7,7 @@ config SYNC_FILE | |||
7 | select DMA_SHARED_BUFFER | 7 | select DMA_SHARED_BUFFER |
8 | ---help--- | 8 | ---help--- |
9 | The Sync File Framework adds explicit syncronization via | 9 | The Sync File Framework adds explicit syncronization via |
10 | userspace. It enables send/receive 'struct fence' objects to/from | 10 | userspace. It enables send/receive 'struct dma_fence' objects to/from |
11 | userspace via Sync File fds for synchronization between drivers via | 11 | userspace via Sync File fds for synchronization between drivers via |
12 | userspace components. It has been ported from Android. | 12 | userspace components. It has been ported from Android. |
13 | 13 | ||
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 210a10bfad2b..c33bf8863147 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o | 1 | obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o |
2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o | 2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o |
3 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o | 3 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cf04d249a6a4..e72e64484131 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/dma-buf.h> | 27 | #include <linux/dma-buf.h> |
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | #include <linux/anon_inodes.h> | 29 | #include <linux/anon_inodes.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) | |||
124 | return base + offset; | 124 | return base + offset; |
125 | } | 125 | } |
126 | 126 | ||
127 | static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) | 127 | static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
128 | { | 128 | { |
129 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; | 129 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; |
130 | unsigned long flags; | 130 | unsigned long flags; |
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
140 | struct dma_buf *dmabuf; | 140 | struct dma_buf *dmabuf; |
141 | struct reservation_object *resv; | 141 | struct reservation_object *resv; |
142 | struct reservation_object_list *fobj; | 142 | struct reservation_object_list *fobj; |
143 | struct fence *fence_excl; | 143 | struct dma_fence *fence_excl; |
144 | unsigned long events; | 144 | unsigned long events; |
145 | unsigned shared_count, seq; | 145 | unsigned shared_count, seq; |
146 | 146 | ||
@@ -187,20 +187,20 @@ retry: | |||
187 | spin_unlock_irq(&dmabuf->poll.lock); | 187 | spin_unlock_irq(&dmabuf->poll.lock); |
188 | 188 | ||
189 | if (events & pevents) { | 189 | if (events & pevents) { |
190 | if (!fence_get_rcu(fence_excl)) { | 190 | if (!dma_fence_get_rcu(fence_excl)) { |
191 | /* force a recheck */ | 191 | /* force a recheck */ |
192 | events &= ~pevents; | 192 | events &= ~pevents; |
193 | dma_buf_poll_cb(NULL, &dcb->cb); | 193 | dma_buf_poll_cb(NULL, &dcb->cb); |
194 | } else if (!fence_add_callback(fence_excl, &dcb->cb, | 194 | } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, |
195 | dma_buf_poll_cb)) { | 195 | dma_buf_poll_cb)) { |
196 | events &= ~pevents; | 196 | events &= ~pevents; |
197 | fence_put(fence_excl); | 197 | dma_fence_put(fence_excl); |
198 | } else { | 198 | } else { |
199 | /* | 199 | /* |
200 | * No callback queued, wake up any additional | 200 | * No callback queued, wake up any additional |
201 | * waiters. | 201 | * waiters. |
202 | */ | 202 | */ |
203 | fence_put(fence_excl); | 203 | dma_fence_put(fence_excl); |
204 | dma_buf_poll_cb(NULL, &dcb->cb); | 204 | dma_buf_poll_cb(NULL, &dcb->cb); |
205 | } | 205 | } |
206 | } | 206 | } |
@@ -222,9 +222,9 @@ retry: | |||
222 | goto out; | 222 | goto out; |
223 | 223 | ||
224 | for (i = 0; i < shared_count; ++i) { | 224 | for (i = 0; i < shared_count; ++i) { |
225 | struct fence *fence = rcu_dereference(fobj->shared[i]); | 225 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); |
226 | 226 | ||
227 | if (!fence_get_rcu(fence)) { | 227 | if (!dma_fence_get_rcu(fence)) { |
228 | /* | 228 | /* |
229 | * fence refcount dropped to zero, this means | 229 | * fence refcount dropped to zero, this means |
230 | * that fobj has been freed | 230 | * that fobj has been freed |
@@ -235,13 +235,13 @@ retry: | |||
235 | dma_buf_poll_cb(NULL, &dcb->cb); | 235 | dma_buf_poll_cb(NULL, &dcb->cb); |
236 | break; | 236 | break; |
237 | } | 237 | } |
238 | if (!fence_add_callback(fence, &dcb->cb, | 238 | if (!dma_fence_add_callback(fence, &dcb->cb, |
239 | dma_buf_poll_cb)) { | 239 | dma_buf_poll_cb)) { |
240 | fence_put(fence); | 240 | dma_fence_put(fence); |
241 | events &= ~POLLOUT; | 241 | events &= ~POLLOUT; |
242 | break; | 242 | break; |
243 | } | 243 | } |
244 | fence_put(fence); | 244 | dma_fence_put(fence); |
245 | } | 245 | } |
246 | 246 | ||
247 | /* No callback queued, wake up any additional waiters. */ | 247 | /* No callback queued, wake up any additional waiters. */ |
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c index f1989fcaf354..67eb7c8fb88c 100644 --- a/drivers/dma-buf/fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * fence-array: aggregate fences to be waited together | 2 | * dma-fence-array: aggregate fences to be waited together |
3 | * | 3 | * |
4 | * Copyright (C) 2016 Collabora Ltd | 4 | * Copyright (C) 2016 Collabora Ltd |
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
@@ -19,35 +19,34 @@ | |||
19 | 19 | ||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/fence-array.h> | 22 | #include <linux/dma-fence-array.h> |
23 | 23 | ||
24 | static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); | 24 | static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) |
25 | |||
26 | static const char *fence_array_get_driver_name(struct fence *fence) | ||
27 | { | 25 | { |
28 | return "fence_array"; | 26 | return "dma_fence_array"; |
29 | } | 27 | } |
30 | 28 | ||
31 | static const char *fence_array_get_timeline_name(struct fence *fence) | 29 | static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) |
32 | { | 30 | { |
33 | return "unbound"; | 31 | return "unbound"; |
34 | } | 32 | } |
35 | 33 | ||
36 | static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) | 34 | static void dma_fence_array_cb_func(struct dma_fence *f, |
35 | struct dma_fence_cb *cb) | ||
37 | { | 36 | { |
38 | struct fence_array_cb *array_cb = | 37 | struct dma_fence_array_cb *array_cb = |
39 | container_of(cb, struct fence_array_cb, cb); | 38 | container_of(cb, struct dma_fence_array_cb, cb); |
40 | struct fence_array *array = array_cb->array; | 39 | struct dma_fence_array *array = array_cb->array; |
41 | 40 | ||
42 | if (atomic_dec_and_test(&array->num_pending)) | 41 | if (atomic_dec_and_test(&array->num_pending)) |
43 | fence_signal(&array->base); | 42 | dma_fence_signal(&array->base); |
44 | fence_put(&array->base); | 43 | dma_fence_put(&array->base); |
45 | } | 44 | } |
46 | 45 | ||
47 | static bool fence_array_enable_signaling(struct fence *fence) | 46 | static bool dma_fence_array_enable_signaling(struct dma_fence *fence) |
48 | { | 47 | { |
49 | struct fence_array *array = to_fence_array(fence); | 48 | struct dma_fence_array *array = to_dma_fence_array(fence); |
50 | struct fence_array_cb *cb = (void *)(&array[1]); | 49 | struct dma_fence_array_cb *cb = (void *)(&array[1]); |
51 | unsigned i; | 50 | unsigned i; |
52 | 51 | ||
53 | for (i = 0; i < array->num_fences; ++i) { | 52 | for (i = 0; i < array->num_fences; ++i) { |
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence) | |||
60 | * until we signal the array as complete (but that is now | 59 | * until we signal the array as complete (but that is now |
61 | * insufficient). | 60 | * insufficient). |
62 | */ | 61 | */ |
63 | fence_get(&array->base); | 62 | dma_fence_get(&array->base); |
64 | if (fence_add_callback(array->fences[i], &cb[i].cb, | 63 | if (dma_fence_add_callback(array->fences[i], &cb[i].cb, |
65 | fence_array_cb_func)) { | 64 | dma_fence_array_cb_func)) { |
66 | fence_put(&array->base); | 65 | dma_fence_put(&array->base); |
67 | if (atomic_dec_and_test(&array->num_pending)) | 66 | if (atomic_dec_and_test(&array->num_pending)) |
68 | return false; | 67 | return false; |
69 | } | 68 | } |
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence) | |||
72 | return true; | 71 | return true; |
73 | } | 72 | } |
74 | 73 | ||
75 | static bool fence_array_signaled(struct fence *fence) | 74 | static bool dma_fence_array_signaled(struct dma_fence *fence) |
76 | { | 75 | { |
77 | struct fence_array *array = to_fence_array(fence); | 76 | struct dma_fence_array *array = to_dma_fence_array(fence); |
78 | 77 | ||
79 | return atomic_read(&array->num_pending) <= 0; | 78 | return atomic_read(&array->num_pending) <= 0; |
80 | } | 79 | } |
81 | 80 | ||
82 | static void fence_array_release(struct fence *fence) | 81 | static void dma_fence_array_release(struct dma_fence *fence) |
83 | { | 82 | { |
84 | struct fence_array *array = to_fence_array(fence); | 83 | struct dma_fence_array *array = to_dma_fence_array(fence); |
85 | unsigned i; | 84 | unsigned i; |
86 | 85 | ||
87 | for (i = 0; i < array->num_fences; ++i) | 86 | for (i = 0; i < array->num_fences; ++i) |
88 | fence_put(array->fences[i]); | 87 | dma_fence_put(array->fences[i]); |
89 | 88 | ||
90 | kfree(array->fences); | 89 | kfree(array->fences); |
91 | fence_free(fence); | 90 | dma_fence_free(fence); |
92 | } | 91 | } |
93 | 92 | ||
94 | const struct fence_ops fence_array_ops = { | 93 | const struct dma_fence_ops dma_fence_array_ops = { |
95 | .get_driver_name = fence_array_get_driver_name, | 94 | .get_driver_name = dma_fence_array_get_driver_name, |
96 | .get_timeline_name = fence_array_get_timeline_name, | 95 | .get_timeline_name = dma_fence_array_get_timeline_name, |
97 | .enable_signaling = fence_array_enable_signaling, | 96 | .enable_signaling = dma_fence_array_enable_signaling, |
98 | .signaled = fence_array_signaled, | 97 | .signaled = dma_fence_array_signaled, |
99 | .wait = fence_default_wait, | 98 | .wait = dma_fence_default_wait, |
100 | .release = fence_array_release, | 99 | .release = dma_fence_array_release, |
101 | }; | 100 | }; |
102 | EXPORT_SYMBOL(fence_array_ops); | 101 | EXPORT_SYMBOL(dma_fence_array_ops); |
103 | 102 | ||
104 | /** | 103 | /** |
105 | * fence_array_create - Create a custom fence array | 104 | * dma_fence_array_create - Create a custom fence array |
106 | * @num_fences: [in] number of fences to add in the array | 105 | * @num_fences: [in] number of fences to add in the array |
107 | * @fences: [in] array containing the fences | 106 | * @fences: [in] array containing the fences |
108 | * @context: [in] fence context to use | 107 | * @context: [in] fence context to use |
109 | * @seqno: [in] sequence number to use | 108 | * @seqno: [in] sequence number to use |
110 | * @signal_on_any: [in] signal on any fence in the array | 109 | * @signal_on_any: [in] signal on any fence in the array |
111 | * | 110 | * |
112 | * Allocate a fence_array object and initialize the base fence with fence_init(). | 111 | * Allocate a dma_fence_array object and initialize the base fence with |
112 | * dma_fence_init(). | ||
113 | * In case of error it returns NULL. | 113 | * In case of error it returns NULL. |
114 | * | 114 | * |
115 | * The caller should allocate the fences array with num_fences size | 115 | * The caller should allocate the fences array with num_fences size |
116 | * and fill it with the fences it wants to add to the object. Ownership of this | 116 | * and fill it with the fences it wants to add to the object. Ownership of this |
117 | * array is taken and fence_put() is used on each fence on release. | 117 | * array is taken and dma_fence_put() is used on each fence on release. |
118 | * | 118 | * |
119 | * If @signal_on_any is true the fence array signals if any fence in the array | 119 | * If @signal_on_any is true the fence array signals if any fence in the array |
120 | * signals, otherwise it signals when all fences in the array signal. | 120 | * signals, otherwise it signals when all fences in the array signal. |
121 | */ | 121 | */ |
122 | struct fence_array *fence_array_create(int num_fences, struct fence **fences, | 122 | struct dma_fence_array *dma_fence_array_create(int num_fences, |
123 | u64 context, unsigned seqno, | 123 | struct dma_fence **fences, |
124 | bool signal_on_any) | 124 | u64 context, unsigned seqno, |
125 | bool signal_on_any) | ||
125 | { | 126 | { |
126 | struct fence_array *array; | 127 | struct dma_fence_array *array; |
127 | size_t size = sizeof(*array); | 128 | size_t size = sizeof(*array); |
128 | 129 | ||
129 | /* Allocate the callback structures behind the array. */ | 130 | /* Allocate the callback structures behind the array. */ |
130 | size += num_fences * sizeof(struct fence_array_cb); | 131 | size += num_fences * sizeof(struct dma_fence_array_cb); |
131 | array = kzalloc(size, GFP_KERNEL); | 132 | array = kzalloc(size, GFP_KERNEL); |
132 | if (!array) | 133 | if (!array) |
133 | return NULL; | 134 | return NULL; |
134 | 135 | ||
135 | spin_lock_init(&array->lock); | 136 | spin_lock_init(&array->lock); |
136 | fence_init(&array->base, &fence_array_ops, &array->lock, | 137 | dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, |
137 | context, seqno); | 138 | context, seqno); |
138 | 139 | ||
139 | array->num_fences = num_fences; | 140 | array->num_fences = num_fences; |
140 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); | 141 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); |
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences, | |||
142 | 143 | ||
143 | return array; | 144 | return array; |
144 | } | 145 | } |
145 | EXPORT_SYMBOL(fence_array_create); | 146 | EXPORT_SYMBOL(dma_fence_array_create); |
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c index 4d51f9e83fa8..3a7bf009c21c 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/dma-fence.c | |||
@@ -21,13 +21,13 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/atomic.h> | 23 | #include <linux/atomic.h> |
24 | #include <linux/fence.h> | 24 | #include <linux/dma-fence.h> |
25 | 25 | ||
26 | #define CREATE_TRACE_POINTS | 26 | #define CREATE_TRACE_POINTS |
27 | #include <trace/events/fence.h> | 27 | #include <trace/events/dma_fence.h> |
28 | 28 | ||
29 | EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); | 29 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); |
30 | EXPORT_TRACEPOINT_SYMBOL(fence_emit); | 30 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * fence context counter: each execution context should have its own | 33 | * fence context counter: each execution context should have its own |
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); | |||
35 | * context or not. One device can have multiple separate contexts, | 35 | * context or not. One device can have multiple separate contexts, |
36 | * and they're used if some engine can run independently of another. | 36 | * and they're used if some engine can run independently of another. |
37 | */ | 37 | */ |
38 | static atomic64_t fence_context_counter = ATOMIC64_INIT(0); | 38 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); |
39 | 39 | ||
40 | /** | 40 | /** |
41 | * fence_context_alloc - allocate an array of fence contexts | 41 | * dma_fence_context_alloc - allocate an array of fence contexts |
42 | * @num: [in] amount of contexts to allocate | 42 | * @num: [in] amount of contexts to allocate |
43 | * | 43 | * |
44 | * This function will return the first index of the number of fences allocated. | 44 | * This function will return the first index of the number of fences allocated. |
45 | * The fence context is used for setting fence->context to a unique number. | 45 | * The fence context is used for setting fence->context to a unique number. |
46 | */ | 46 | */ |
47 | u64 fence_context_alloc(unsigned num) | 47 | u64 dma_fence_context_alloc(unsigned num) |
48 | { | 48 | { |
49 | BUG_ON(!num); | 49 | BUG_ON(!num); |
50 | return atomic64_add_return(num, &fence_context_counter) - num; | 50 | return atomic64_add_return(num, &dma_fence_context_counter) - num; |
51 | } | 51 | } |
52 | EXPORT_SYMBOL(fence_context_alloc); | 52 | EXPORT_SYMBOL(dma_fence_context_alloc); |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * fence_signal_locked - signal completion of a fence | 55 | * dma_fence_signal_locked - signal completion of a fence |
56 | * @fence: the fence to signal | 56 | * @fence: the fence to signal |
57 | * | 57 | * |
58 | * Signal completion for software callbacks on a fence, this will unblock | 58 | * Signal completion for software callbacks on a fence, this will unblock |
59 | * fence_wait() calls and run all the callbacks added with | 59 | * dma_fence_wait() calls and run all the callbacks added with |
60 | * fence_add_callback(). Can be called multiple times, but since a fence | 60 | * dma_fence_add_callback(). Can be called multiple times, but since a fence |
61 | * can only go from unsignaled to signaled state, it will only be effective | 61 | * can only go from unsignaled to signaled state, it will only be effective |
62 | * the first time. | 62 | * the first time. |
63 | * | 63 | * |
64 | * Unlike fence_signal, this function must be called with fence->lock held. | 64 | * Unlike dma_fence_signal, this function must be called with fence->lock held. |
65 | */ | 65 | */ |
66 | int fence_signal_locked(struct fence *fence) | 66 | int dma_fence_signal_locked(struct dma_fence *fence) |
67 | { | 67 | { |
68 | struct fence_cb *cur, *tmp; | 68 | struct dma_fence_cb *cur, *tmp; |
69 | int ret = 0; | 69 | int ret = 0; |
70 | 70 | ||
71 | lockdep_assert_held(fence->lock); | ||
72 | |||
71 | if (WARN_ON(!fence)) | 73 | if (WARN_ON(!fence)) |
72 | return -EINVAL; | 74 | return -EINVAL; |
73 | 75 | ||
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence) | |||
76 | smp_mb__before_atomic(); | 78 | smp_mb__before_atomic(); |
77 | } | 79 | } |
78 | 80 | ||
79 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 81 | if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
80 | ret = -EINVAL; | 82 | ret = -EINVAL; |
81 | 83 | ||
82 | /* | 84 | /* |
83 | * we might have raced with the unlocked fence_signal, | 85 | * we might have raced with the unlocked dma_fence_signal, |
84 | * still run through all callbacks | 86 | * still run through all callbacks |
85 | */ | 87 | */ |
86 | } else | 88 | } else |
87 | trace_fence_signaled(fence); | 89 | trace_dma_fence_signaled(fence); |
88 | 90 | ||
89 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | 91 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
90 | list_del_init(&cur->node); | 92 | list_del_init(&cur->node); |
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence) | |||
92 | } | 94 | } |
93 | return ret; | 95 | return ret; |
94 | } | 96 | } |
95 | EXPORT_SYMBOL(fence_signal_locked); | 97 | EXPORT_SYMBOL(dma_fence_signal_locked); |
96 | 98 | ||
97 | /** | 99 | /** |
98 | * fence_signal - signal completion of a fence | 100 | * dma_fence_signal - signal completion of a fence |
99 | * @fence: the fence to signal | 101 | * @fence: the fence to signal |
100 | * | 102 | * |
101 | * Signal completion for software callbacks on a fence, this will unblock | 103 | * Signal completion for software callbacks on a fence, this will unblock |
102 | * fence_wait() calls and run all the callbacks added with | 104 | * dma_fence_wait() calls and run all the callbacks added with |
103 | * fence_add_callback(). Can be called multiple times, but since a fence | 105 | * dma_fence_add_callback(). Can be called multiple times, but since a fence |
104 | * can only go from unsignaled to signaled state, it will only be effective | 106 | * can only go from unsignaled to signaled state, it will only be effective |
105 | * the first time. | 107 | * the first time. |
106 | */ | 108 | */ |
107 | int fence_signal(struct fence *fence) | 109 | int dma_fence_signal(struct dma_fence *fence) |
108 | { | 110 | { |
109 | unsigned long flags; | 111 | unsigned long flags; |
110 | 112 | ||
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence) | |||
116 | smp_mb__before_atomic(); | 118 | smp_mb__before_atomic(); |
117 | } | 119 | } |
118 | 120 | ||
119 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 121 | if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
120 | return -EINVAL; | 122 | return -EINVAL; |
121 | 123 | ||
122 | trace_fence_signaled(fence); | 124 | trace_dma_fence_signaled(fence); |
123 | 125 | ||
124 | if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { | 126 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { |
125 | struct fence_cb *cur, *tmp; | 127 | struct dma_fence_cb *cur, *tmp; |
126 | 128 | ||
127 | spin_lock_irqsave(fence->lock, flags); | 129 | spin_lock_irqsave(fence->lock, flags); |
128 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | 130 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence) | |||
133 | } | 135 | } |
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |
136 | EXPORT_SYMBOL(fence_signal); | 138 | EXPORT_SYMBOL(dma_fence_signal); |
137 | 139 | ||
138 | /** | 140 | /** |
139 | * fence_wait_timeout - sleep until the fence gets signaled | 141 | * dma_fence_wait_timeout - sleep until the fence gets signaled |
140 | * or until timeout elapses | 142 | * or until timeout elapses |
141 | * @fence: [in] the fence to wait on | 143 | * @fence: [in] the fence to wait on |
142 | * @intr: [in] if true, do an interruptible wait | 144 | * @intr: [in] if true, do an interruptible wait |
@@ -152,7 +154,7 @@ EXPORT_SYMBOL(fence_signal); | |||
152 | * freed before return, resulting in undefined behavior. | 154 | * freed before return, resulting in undefined behavior. |
153 | */ | 155 | */ |
154 | signed long | 156 | signed long |
155 | fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | 157 | dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) |
156 | { | 158 | { |
157 | signed long ret; | 159 | signed long ret; |
158 | 160 | ||
@@ -160,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | |||
160 | return -EINVAL; | 162 | return -EINVAL; |
161 | 163 | ||
162 | if (timeout == 0) | 164 | if (timeout == 0) |
163 | return fence_is_signaled(fence); | 165 | return dma_fence_is_signaled(fence); |
164 | 166 | ||
165 | trace_fence_wait_start(fence); | 167 | trace_dma_fence_wait_start(fence); |
166 | ret = fence->ops->wait(fence, intr, timeout); | 168 | ret = fence->ops->wait(fence, intr, timeout); |
167 | trace_fence_wait_end(fence); | 169 | trace_dma_fence_wait_end(fence); |
168 | return ret; | 170 | return ret; |
169 | } | 171 | } |
170 | EXPORT_SYMBOL(fence_wait_timeout); | 172 | EXPORT_SYMBOL(dma_fence_wait_timeout); |
171 | 173 | ||
172 | void fence_release(struct kref *kref) | 174 | void dma_fence_release(struct kref *kref) |
173 | { | 175 | { |
174 | struct fence *fence = | 176 | struct dma_fence *fence = |
175 | container_of(kref, struct fence, refcount); | 177 | container_of(kref, struct dma_fence, refcount); |
176 | 178 | ||
177 | trace_fence_destroy(fence); | 179 | trace_dma_fence_destroy(fence); |
178 | 180 | ||
179 | BUG_ON(!list_empty(&fence->cb_list)); | 181 | BUG_ON(!list_empty(&fence->cb_list)); |
180 | 182 | ||
181 | if (fence->ops->release) | 183 | if (fence->ops->release) |
182 | fence->ops->release(fence); | 184 | fence->ops->release(fence); |
183 | else | 185 | else |
184 | fence_free(fence); | 186 | dma_fence_free(fence); |
185 | } | 187 | } |
186 | EXPORT_SYMBOL(fence_release); | 188 | EXPORT_SYMBOL(dma_fence_release); |
187 | 189 | ||
188 | void fence_free(struct fence *fence) | 190 | void dma_fence_free(struct dma_fence *fence) |
189 | { | 191 | { |
190 | kfree_rcu(fence, rcu); | 192 | kfree_rcu(fence, rcu); |
191 | } | 193 | } |
192 | EXPORT_SYMBOL(fence_free); | 194 | EXPORT_SYMBOL(dma_fence_free); |
193 | 195 | ||
194 | /** | 196 | /** |
195 | * fence_enable_sw_signaling - enable signaling on fence | 197 | * dma_fence_enable_sw_signaling - enable signaling on fence |
196 | * @fence: [in] the fence to enable | 198 | * @fence: [in] the fence to enable |
197 | * | 199 | * |
198 | * this will request for sw signaling to be enabled, to make the fence | 200 | * this will request for sw signaling to be enabled, to make the fence |
199 | * complete as soon as possible | 201 | * complete as soon as possible |
200 | */ | 202 | */ |
201 | void fence_enable_sw_signaling(struct fence *fence) | 203 | void dma_fence_enable_sw_signaling(struct dma_fence *fence) |
202 | { | 204 | { |
203 | unsigned long flags; | 205 | unsigned long flags; |
204 | 206 | ||
205 | if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && | 207 | if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
206 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 208 | &fence->flags) && |
207 | trace_fence_enable_signal(fence); | 209 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
210 | trace_dma_fence_enable_signal(fence); | ||
208 | 211 | ||
209 | spin_lock_irqsave(fence->lock, flags); | 212 | spin_lock_irqsave(fence->lock, flags); |
210 | 213 | ||
211 | if (!fence->ops->enable_signaling(fence)) | 214 | if (!fence->ops->enable_signaling(fence)) |
212 | fence_signal_locked(fence); | 215 | dma_fence_signal_locked(fence); |
213 | 216 | ||
214 | spin_unlock_irqrestore(fence->lock, flags); | 217 | spin_unlock_irqrestore(fence->lock, flags); |
215 | } | 218 | } |
216 | } | 219 | } |
217 | EXPORT_SYMBOL(fence_enable_sw_signaling); | 220 | EXPORT_SYMBOL(dma_fence_enable_sw_signaling); |
218 | 221 | ||
219 | /** | 222 | /** |
220 | * fence_add_callback - add a callback to be called when the fence | 223 | * dma_fence_add_callback - add a callback to be called when the fence |
221 | * is signaled | 224 | * is signaled |
222 | * @fence: [in] the fence to wait on | 225 | * @fence: [in] the fence to wait on |
223 | * @cb: [in] the callback to register | 226 | * @cb: [in] the callback to register |
224 | * @func: [in] the function to call | 227 | * @func: [in] the function to call |
225 | * | 228 | * |
226 | * cb will be initialized by fence_add_callback, no initialization | 229 | * cb will be initialized by dma_fence_add_callback, no initialization |
227 | * by the caller is required. Any number of callbacks can be registered | 230 | * by the caller is required. Any number of callbacks can be registered |
228 | * to a fence, but a callback can only be registered to one fence at a time. | 231 | * to a fence, but a callback can only be registered to one fence at a time. |
229 | * | 232 | * |
@@ -232,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling); | |||
232 | * *not* call the callback) | 235 | * *not* call the callback) |
233 | * | 236 | * |
234 | * Add a software callback to the fence. Same restrictions apply to | 237 | * Add a software callback to the fence. Same restrictions apply to |
235 | * refcount as it does to fence_wait, however the caller doesn't need to | 238 | * refcount as it does to dma_fence_wait, however the caller doesn't need to |
236 | * keep a refcount to fence afterwards: when software access is enabled, | 239 | * keep a refcount to fence afterwards: when software access is enabled, |
237 | * the creator of the fence is required to keep the fence alive until | 240 | * the creator of the fence is required to keep the fence alive until |
238 | * after it signals with fence_signal. The callback itself can be called | 241 | * after it signals with dma_fence_signal. The callback itself can be called |
239 | * from irq context. | 242 | * from irq context. |
240 | * | 243 | * |
241 | */ | 244 | */ |
242 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | 245 | int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, |
243 | fence_func_t func) | 246 | dma_fence_func_t func) |
244 | { | 247 | { |
245 | unsigned long flags; | 248 | unsigned long flags; |
246 | int ret = 0; | 249 | int ret = 0; |
@@ -249,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, | |||
249 | if (WARN_ON(!fence || !func)) | 252 | if (WARN_ON(!fence || !func)) |
250 | return -EINVAL; | 253 | return -EINVAL; |
251 | 254 | ||
252 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 255 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
253 | INIT_LIST_HEAD(&cb->node); | 256 | INIT_LIST_HEAD(&cb->node); |
254 | return -ENOENT; | 257 | return -ENOENT; |
255 | } | 258 | } |
256 | 259 | ||
257 | spin_lock_irqsave(fence->lock, flags); | 260 | spin_lock_irqsave(fence->lock, flags); |
258 | 261 | ||
259 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | 262 | was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
263 | &fence->flags); | ||
260 | 264 | ||
261 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 265 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
262 | ret = -ENOENT; | 266 | ret = -ENOENT; |
263 | else if (!was_set) { | 267 | else if (!was_set) { |
264 | trace_fence_enable_signal(fence); | 268 | trace_dma_fence_enable_signal(fence); |
265 | 269 | ||
266 | if (!fence->ops->enable_signaling(fence)) { | 270 | if (!fence->ops->enable_signaling(fence)) { |
267 | fence_signal_locked(fence); | 271 | dma_fence_signal_locked(fence); |
268 | ret = -ENOENT; | 272 | ret = -ENOENT; |
269 | } | 273 | } |
270 | } | 274 | } |
@@ -278,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, | |||
278 | 282 | ||
279 | return ret; | 283 | return ret; |
280 | } | 284 | } |
281 | EXPORT_SYMBOL(fence_add_callback); | 285 | EXPORT_SYMBOL(dma_fence_add_callback); |
282 | 286 | ||
283 | /** | 287 | /** |
284 | * fence_remove_callback - remove a callback from the signaling list | 288 | * dma_fence_remove_callback - remove a callback from the signaling list |
285 | * @fence: [in] the fence to wait on | 289 | * @fence: [in] the fence to wait on |
286 | * @cb: [in] the callback to remove | 290 | * @cb: [in] the callback to remove |
287 | * | 291 | * |
@@ -296,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback); | |||
296 | * with a reference held to the fence. | 300 | * with a reference held to the fence. |
297 | */ | 301 | */ |
298 | bool | 302 | bool |
299 | fence_remove_callback(struct fence *fence, struct fence_cb *cb) | 303 | dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) |
300 | { | 304 | { |
301 | unsigned long flags; | 305 | unsigned long flags; |
302 | bool ret; | 306 | bool ret; |
@@ -311,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb) | |||
311 | 315 | ||
312 | return ret; | 316 | return ret; |
313 | } | 317 | } |
314 | EXPORT_SYMBOL(fence_remove_callback); | 318 | EXPORT_SYMBOL(dma_fence_remove_callback); |
315 | 319 | ||
316 | struct default_wait_cb { | 320 | struct default_wait_cb { |
317 | struct fence_cb base; | 321 | struct dma_fence_cb base; |
318 | struct task_struct *task; | 322 | struct task_struct *task; |
319 | }; | 323 | }; |
320 | 324 | ||
321 | static void | 325 | static void |
322 | fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | 326 | dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
323 | { | 327 | { |
324 | struct default_wait_cb *wait = | 328 | struct default_wait_cb *wait = |
325 | container_of(cb, struct default_wait_cb, base); | 329 | container_of(cb, struct default_wait_cb, base); |
@@ -328,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
328 | } | 332 | } |
329 | 333 | ||
330 | /** | 334 | /** |
331 | * fence_default_wait - default sleep until the fence gets signaled | 335 | * dma_fence_default_wait - default sleep until the fence gets signaled |
332 | * or until timeout elapses | 336 | * or until timeout elapses |
333 | * @fence: [in] the fence to wait on | 337 | * @fence: [in] the fence to wait on |
334 | * @intr: [in] if true, do an interruptible wait | 338 | * @intr: [in] if true, do an interruptible wait |
@@ -338,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
338 | * remaining timeout in jiffies on success. | 342 | * remaining timeout in jiffies on success. |
339 | */ | 343 | */ |
340 | signed long | 344 | signed long |
341 | fence_default_wait(struct fence *fence, bool intr, signed long timeout) | 345 | dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) |
342 | { | 346 | { |
343 | struct default_wait_cb cb; | 347 | struct default_wait_cb cb; |
344 | unsigned long flags; | 348 | unsigned long flags; |
345 | signed long ret = timeout; | 349 | signed long ret = timeout; |
346 | bool was_set; | 350 | bool was_set; |
347 | 351 | ||
348 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 352 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
349 | return timeout; | 353 | return timeout; |
350 | 354 | ||
351 | spin_lock_irqsave(fence->lock, flags); | 355 | spin_lock_irqsave(fence->lock, flags); |
@@ -355,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout) | |||
355 | goto out; | 359 | goto out; |
356 | } | 360 | } |
357 | 361 | ||
358 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | 362 | was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
363 | &fence->flags); | ||
359 | 364 | ||
360 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 365 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
361 | goto out; | 366 | goto out; |
362 | 367 | ||
363 | if (!was_set) { | 368 | if (!was_set) { |
364 | trace_fence_enable_signal(fence); | 369 | trace_dma_fence_enable_signal(fence); |
365 | 370 | ||
366 | if (!fence->ops->enable_signaling(fence)) { | 371 | if (!fence->ops->enable_signaling(fence)) { |
367 | fence_signal_locked(fence); | 372 | dma_fence_signal_locked(fence); |
368 | goto out; | 373 | goto out; |
369 | } | 374 | } |
370 | } | 375 | } |
371 | 376 | ||
372 | cb.base.func = fence_default_wait_cb; | 377 | cb.base.func = dma_fence_default_wait_cb; |
373 | cb.task = current; | 378 | cb.task = current; |
374 | list_add(&cb.base.node, &fence->cb_list); | 379 | list_add(&cb.base.node, &fence->cb_list); |
375 | 380 | ||
376 | while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { | 381 | while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { |
377 | if (intr) | 382 | if (intr) |
378 | __set_current_state(TASK_INTERRUPTIBLE); | 383 | __set_current_state(TASK_INTERRUPTIBLE); |
379 | else | 384 | else |
@@ -395,23 +400,23 @@ out: | |||
395 | spin_unlock_irqrestore(fence->lock, flags); | 400 | spin_unlock_irqrestore(fence->lock, flags); |
396 | return ret; | 401 | return ret; |
397 | } | 402 | } |
398 | EXPORT_SYMBOL(fence_default_wait); | 403 | EXPORT_SYMBOL(dma_fence_default_wait); |
399 | 404 | ||
400 | static bool | 405 | static bool |
401 | fence_test_signaled_any(struct fence **fences, uint32_t count) | 406 | dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count) |
402 | { | 407 | { |
403 | int i; | 408 | int i; |
404 | 409 | ||
405 | for (i = 0; i < count; ++i) { | 410 | for (i = 0; i < count; ++i) { |
406 | struct fence *fence = fences[i]; | 411 | struct dma_fence *fence = fences[i]; |
407 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 412 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
408 | return true; | 413 | return true; |
409 | } | 414 | } |
410 | return false; | 415 | return false; |
411 | } | 416 | } |
412 | 417 | ||
413 | /** | 418 | /** |
414 | * fence_wait_any_timeout - sleep until any fence gets signaled | 419 | * dma_fence_wait_any_timeout - sleep until any fence gets signaled |
415 | * or until timeout elapses | 420 | * or until timeout elapses |
416 | * @fences: [in] array of fences to wait on | 421 | * @fences: [in] array of fences to wait on |
417 | * @count: [in] number of fences to wait on | 422 | * @count: [in] number of fences to wait on |
@@ -427,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count) | |||
427 | * fence might be freed before return, resulting in undefined behavior. | 432 | * fence might be freed before return, resulting in undefined behavior. |
428 | */ | 433 | */ |
429 | signed long | 434 | signed long |
430 | fence_wait_any_timeout(struct fence **fences, uint32_t count, | 435 | dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, |
431 | bool intr, signed long timeout) | 436 | bool intr, signed long timeout) |
432 | { | 437 | { |
433 | struct default_wait_cb *cb; | 438 | struct default_wait_cb *cb; |
434 | signed long ret = timeout; | 439 | signed long ret = timeout; |
@@ -439,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
439 | 444 | ||
440 | if (timeout == 0) { | 445 | if (timeout == 0) { |
441 | for (i = 0; i < count; ++i) | 446 | for (i = 0; i < count; ++i) |
442 | if (fence_is_signaled(fences[i])) | 447 | if (dma_fence_is_signaled(fences[i])) |
443 | return 1; | 448 | return 1; |
444 | 449 | ||
445 | return 0; | 450 | return 0; |
@@ -452,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
452 | } | 457 | } |
453 | 458 | ||
454 | for (i = 0; i < count; ++i) { | 459 | for (i = 0; i < count; ++i) { |
455 | struct fence *fence = fences[i]; | 460 | struct dma_fence *fence = fences[i]; |
456 | 461 | ||
457 | if (fence->ops->wait != fence_default_wait) { | 462 | if (fence->ops->wait != dma_fence_default_wait) { |
458 | ret = -EINVAL; | 463 | ret = -EINVAL; |
459 | goto fence_rm_cb; | 464 | goto fence_rm_cb; |
460 | } | 465 | } |
461 | 466 | ||
462 | cb[i].task = current; | 467 | cb[i].task = current; |
463 | if (fence_add_callback(fence, &cb[i].base, | 468 | if (dma_fence_add_callback(fence, &cb[i].base, |
464 | fence_default_wait_cb)) { | 469 | dma_fence_default_wait_cb)) { |
465 | /* This fence is already signaled */ | 470 | /* This fence is already signaled */ |
466 | goto fence_rm_cb; | 471 | goto fence_rm_cb; |
467 | } | 472 | } |
@@ -473,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
473 | else | 478 | else |
474 | set_current_state(TASK_UNINTERRUPTIBLE); | 479 | set_current_state(TASK_UNINTERRUPTIBLE); |
475 | 480 | ||
476 | if (fence_test_signaled_any(fences, count)) | 481 | if (dma_fence_test_signaled_any(fences, count)) |
477 | break; | 482 | break; |
478 | 483 | ||
479 | ret = schedule_timeout(ret); | 484 | ret = schedule_timeout(ret); |
@@ -486,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
486 | 491 | ||
487 | fence_rm_cb: | 492 | fence_rm_cb: |
488 | while (i-- > 0) | 493 | while (i-- > 0) |
489 | fence_remove_callback(fences[i], &cb[i].base); | 494 | dma_fence_remove_callback(fences[i], &cb[i].base); |
490 | 495 | ||
491 | err_free_cb: | 496 | err_free_cb: |
492 | kfree(cb); | 497 | kfree(cb); |
493 | 498 | ||
494 | return ret; | 499 | return ret; |
495 | } | 500 | } |
496 | EXPORT_SYMBOL(fence_wait_any_timeout); | 501 | EXPORT_SYMBOL(dma_fence_wait_any_timeout); |
497 | 502 | ||
498 | /** | 503 | /** |
499 | * fence_init - Initialize a custom fence. | 504 | * dma_fence_init - Initialize a custom fence. |
500 | * @fence: [in] the fence to initialize | 505 | * @fence: [in] the fence to initialize |
501 | * @ops: [in] the fence_ops for operations on this fence | 506 | * @ops: [in] the dma_fence_ops for operations on this fence |
502 | * @lock: [in] the irqsafe spinlock to use for locking this fence | 507 | * @lock: [in] the irqsafe spinlock to use for locking this fence |
503 | * @context: [in] the execution context this fence is run on | 508 | * @context: [in] the execution context this fence is run on |
504 | * @seqno: [in] a linear increasing sequence number for this context | 509 | * @seqno: [in] a linear increasing sequence number for this context |
505 | * | 510 | * |
506 | * Initializes an allocated fence, the caller doesn't have to keep its | 511 | * Initializes an allocated fence, the caller doesn't have to keep its |
507 | * refcount after committing with this fence, but it will need to hold a | 512 | * refcount after committing with this fence, but it will need to hold a |
508 | * refcount again if fence_ops.enable_signaling gets called. This can | 513 | * refcount again if dma_fence_ops.enable_signaling gets called. This can |
509 | * be used for other implementing other types of fence. | 514 | * be used for other implementing other types of fence. |
510 | * | 515 | * |
511 | * context and seqno are used for easy comparison between fences, allowing | 516 | * context and seqno are used for easy comparison between fences, allowing |
512 | * to check which fence is later by simply using fence_later. | 517 | * to check which fence is later by simply using dma_fence_later. |
513 | */ | 518 | */ |
514 | void | 519 | void |
515 | fence_init(struct fence *fence, const struct fence_ops *ops, | 520 | dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
516 | spinlock_t *lock, u64 context, unsigned seqno) | 521 | spinlock_t *lock, u64 context, unsigned seqno) |
517 | { | 522 | { |
518 | BUG_ON(!lock); | 523 | BUG_ON(!lock); |
519 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || | 524 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || |
@@ -527,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops, | |||
527 | fence->seqno = seqno; | 532 | fence->seqno = seqno; |
528 | fence->flags = 0UL; | 533 | fence->flags = 0UL; |
529 | 534 | ||
530 | trace_fence_init(fence); | 535 | trace_dma_fence_init(fence); |
531 | } | 536 | } |
532 | EXPORT_SYMBOL(fence_init); | 537 | EXPORT_SYMBOL(dma_fence_init); |
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 82de59f7cbbd..7ed56f3edfb7 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared); | |||
102 | static void | 102 | static void |
103 | reservation_object_add_shared_inplace(struct reservation_object *obj, | 103 | reservation_object_add_shared_inplace(struct reservation_object *obj, |
104 | struct reservation_object_list *fobj, | 104 | struct reservation_object_list *fobj, |
105 | struct fence *fence) | 105 | struct dma_fence *fence) |
106 | { | 106 | { |
107 | u32 i; | 107 | u32 i; |
108 | 108 | ||
109 | fence_get(fence); | 109 | dma_fence_get(fence); |
110 | 110 | ||
111 | preempt_disable(); | 111 | preempt_disable(); |
112 | write_seqcount_begin(&obj->seq); | 112 | write_seqcount_begin(&obj->seq); |
113 | 113 | ||
114 | for (i = 0; i < fobj->shared_count; ++i) { | 114 | for (i = 0; i < fobj->shared_count; ++i) { |
115 | struct fence *old_fence; | 115 | struct dma_fence *old_fence; |
116 | 116 | ||
117 | old_fence = rcu_dereference_protected(fobj->shared[i], | 117 | old_fence = rcu_dereference_protected(fobj->shared[i], |
118 | reservation_object_held(obj)); | 118 | reservation_object_held(obj)); |
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, | |||
123 | write_seqcount_end(&obj->seq); | 123 | write_seqcount_end(&obj->seq); |
124 | preempt_enable(); | 124 | preempt_enable(); |
125 | 125 | ||
126 | fence_put(old_fence); | 126 | dma_fence_put(old_fence); |
127 | return; | 127 | return; |
128 | } | 128 | } |
129 | } | 129 | } |
@@ -143,12 +143,12 @@ static void | |||
143 | reservation_object_add_shared_replace(struct reservation_object *obj, | 143 | reservation_object_add_shared_replace(struct reservation_object *obj, |
144 | struct reservation_object_list *old, | 144 | struct reservation_object_list *old, |
145 | struct reservation_object_list *fobj, | 145 | struct reservation_object_list *fobj, |
146 | struct fence *fence) | 146 | struct dma_fence *fence) |
147 | { | 147 | { |
148 | unsigned i; | 148 | unsigned i; |
149 | struct fence *old_fence = NULL; | 149 | struct dma_fence *old_fence = NULL; |
150 | 150 | ||
151 | fence_get(fence); | 151 | dma_fence_get(fence); |
152 | 152 | ||
153 | if (!old) { | 153 | if (!old) { |
154 | RCU_INIT_POINTER(fobj->shared[0], fence); | 154 | RCU_INIT_POINTER(fobj->shared[0], fence); |
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj, | |||
165 | fobj->shared_count = old->shared_count; | 165 | fobj->shared_count = old->shared_count; |
166 | 166 | ||
167 | for (i = 0; i < old->shared_count; ++i) { | 167 | for (i = 0; i < old->shared_count; ++i) { |
168 | struct fence *check; | 168 | struct dma_fence *check; |
169 | 169 | ||
170 | check = rcu_dereference_protected(old->shared[i], | 170 | check = rcu_dereference_protected(old->shared[i], |
171 | reservation_object_held(obj)); | 171 | reservation_object_held(obj)); |
@@ -196,7 +196,7 @@ done: | |||
196 | kfree_rcu(old, rcu); | 196 | kfree_rcu(old, rcu); |
197 | 197 | ||
198 | if (old_fence) | 198 | if (old_fence) |
199 | fence_put(old_fence); | 199 | dma_fence_put(old_fence); |
200 | } | 200 | } |
201 | 201 | ||
202 | /** | 202 | /** |
@@ -208,7 +208,7 @@ done: | |||
208 | * reservation_object_reserve_shared() has been called. | 208 | * reservation_object_reserve_shared() has been called. |
209 | */ | 209 | */ |
210 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 210 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
211 | struct fence *fence) | 211 | struct dma_fence *fence) |
212 | { | 212 | { |
213 | struct reservation_object_list *old, *fobj = obj->staged; | 213 | struct reservation_object_list *old, *fobj = obj->staged; |
214 | 214 | ||
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence); | |||
231 | * Add a fence to the exclusive slot. The obj->lock must be held. | 231 | * Add a fence to the exclusive slot. The obj->lock must be held. |
232 | */ | 232 | */ |
233 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
234 | struct fence *fence) | 234 | struct dma_fence *fence) |
235 | { | 235 | { |
236 | struct fence *old_fence = reservation_object_get_excl(obj); | 236 | struct dma_fence *old_fence = reservation_object_get_excl(obj); |
237 | struct reservation_object_list *old; | 237 | struct reservation_object_list *old; |
238 | u32 i = 0; | 238 | u32 i = 0; |
239 | 239 | ||
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
242 | i = old->shared_count; | 242 | i = old->shared_count; |
243 | 243 | ||
244 | if (fence) | 244 | if (fence) |
245 | fence_get(fence); | 245 | dma_fence_get(fence); |
246 | 246 | ||
247 | preempt_disable(); | 247 | preempt_disable(); |
248 | write_seqcount_begin(&obj->seq); | 248 | write_seqcount_begin(&obj->seq); |
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
255 | 255 | ||
256 | /* inplace update, no shared fences */ | 256 | /* inplace update, no shared fences */ |
257 | while (i--) | 257 | while (i--) |
258 | fence_put(rcu_dereference_protected(old->shared[i], | 258 | dma_fence_put(rcu_dereference_protected(old->shared[i], |
259 | reservation_object_held(obj))); | 259 | reservation_object_held(obj))); |
260 | 260 | ||
261 | if (old_fence) | 261 | if (old_fence) |
262 | fence_put(old_fence); | 262 | dma_fence_put(old_fence); |
263 | } | 263 | } |
264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
265 | 265 | ||
@@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); | |||
276 | * Zero or -errno | 276 | * Zero or -errno |
277 | */ | 277 | */ |
278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
279 | struct fence **pfence_excl, | 279 | struct dma_fence **pfence_excl, |
280 | unsigned *pshared_count, | 280 | unsigned *pshared_count, |
281 | struct fence ***pshared) | 281 | struct dma_fence ***pshared) |
282 | { | 282 | { |
283 | struct fence **shared = NULL; | 283 | struct dma_fence **shared = NULL; |
284 | struct fence *fence_excl; | 284 | struct dma_fence *fence_excl; |
285 | unsigned int shared_count; | 285 | unsigned int shared_count; |
286 | int ret = 1; | 286 | int ret = 1; |
287 | 287 | ||
@@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, | |||
296 | seq = read_seqcount_begin(&obj->seq); | 296 | seq = read_seqcount_begin(&obj->seq); |
297 | 297 | ||
298 | fence_excl = rcu_dereference(obj->fence_excl); | 298 | fence_excl = rcu_dereference(obj->fence_excl); |
299 | if (fence_excl && !fence_get_rcu(fence_excl)) | 299 | if (fence_excl && !dma_fence_get_rcu(fence_excl)) |
300 | goto unlock; | 300 | goto unlock; |
301 | 301 | ||
302 | fobj = rcu_dereference(obj->fence); | 302 | fobj = rcu_dereference(obj->fence); |
303 | if (fobj) { | 303 | if (fobj) { |
304 | struct fence **nshared; | 304 | struct dma_fence **nshared; |
305 | size_t sz = sizeof(*shared) * fobj->shared_max; | 305 | size_t sz = sizeof(*shared) * fobj->shared_max; |
306 | 306 | ||
307 | nshared = krealloc(shared, sz, | 307 | nshared = krealloc(shared, sz, |
@@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, | |||
322 | 322 | ||
323 | for (i = 0; i < shared_count; ++i) { | 323 | for (i = 0; i < shared_count; ++i) { |
324 | shared[i] = rcu_dereference(fobj->shared[i]); | 324 | shared[i] = rcu_dereference(fobj->shared[i]); |
325 | if (!fence_get_rcu(shared[i])) | 325 | if (!dma_fence_get_rcu(shared[i])) |
326 | break; | 326 | break; |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
330 | if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { | 330 | if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { |
331 | while (i--) | 331 | while (i--) |
332 | fence_put(shared[i]); | 332 | dma_fence_put(shared[i]); |
333 | fence_put(fence_excl); | 333 | dma_fence_put(fence_excl); |
334 | goto unlock; | 334 | goto unlock; |
335 | } | 335 | } |
336 | 336 | ||
@@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | |||
368 | bool wait_all, bool intr, | 368 | bool wait_all, bool intr, |
369 | unsigned long timeout) | 369 | unsigned long timeout) |
370 | { | 370 | { |
371 | struct fence *fence; | 371 | struct dma_fence *fence; |
372 | unsigned seq, shared_count, i = 0; | 372 | unsigned seq, shared_count, i = 0; |
373 | long ret = timeout; | 373 | long ret = timeout; |
374 | 374 | ||
@@ -389,16 +389,17 @@ retry: | |||
389 | shared_count = fobj->shared_count; | 389 | shared_count = fobj->shared_count; |
390 | 390 | ||
391 | for (i = 0; i < shared_count; ++i) { | 391 | for (i = 0; i < shared_count; ++i) { |
392 | struct fence *lfence = rcu_dereference(fobj->shared[i]); | 392 | struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); |
393 | 393 | ||
394 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) | 394 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
395 | &lfence->flags)) | ||
395 | continue; | 396 | continue; |
396 | 397 | ||
397 | if (!fence_get_rcu(lfence)) | 398 | if (!dma_fence_get_rcu(lfence)) |
398 | goto unlock_retry; | 399 | goto unlock_retry; |
399 | 400 | ||
400 | if (fence_is_signaled(lfence)) { | 401 | if (dma_fence_is_signaled(lfence)) { |
401 | fence_put(lfence); | 402 | dma_fence_put(lfence); |
402 | continue; | 403 | continue; |
403 | } | 404 | } |
404 | 405 | ||
@@ -408,15 +409,16 @@ retry: | |||
408 | } | 409 | } |
409 | 410 | ||
410 | if (!shared_count) { | 411 | if (!shared_count) { |
411 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | 412 | struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); |
412 | 413 | ||
413 | if (fence_excl && | 414 | if (fence_excl && |
414 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { | 415 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
415 | if (!fence_get_rcu(fence_excl)) | 416 | &fence_excl->flags)) { |
417 | if (!dma_fence_get_rcu(fence_excl)) | ||
416 | goto unlock_retry; | 418 | goto unlock_retry; |
417 | 419 | ||
418 | if (fence_is_signaled(fence_excl)) | 420 | if (dma_fence_is_signaled(fence_excl)) |
419 | fence_put(fence_excl); | 421 | dma_fence_put(fence_excl); |
420 | else | 422 | else |
421 | fence = fence_excl; | 423 | fence = fence_excl; |
422 | } | 424 | } |
@@ -425,12 +427,12 @@ retry: | |||
425 | rcu_read_unlock(); | 427 | rcu_read_unlock(); |
426 | if (fence) { | 428 | if (fence) { |
427 | if (read_seqcount_retry(&obj->seq, seq)) { | 429 | if (read_seqcount_retry(&obj->seq, seq)) { |
428 | fence_put(fence); | 430 | dma_fence_put(fence); |
429 | goto retry; | 431 | goto retry; |
430 | } | 432 | } |
431 | 433 | ||
432 | ret = fence_wait_timeout(fence, intr, ret); | 434 | ret = dma_fence_wait_timeout(fence, intr, ret); |
433 | fence_put(fence); | 435 | dma_fence_put(fence); |
434 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | 436 | if (ret > 0 && wait_all && (i + 1 < shared_count)) |
435 | goto retry; | 437 | goto retry; |
436 | } | 438 | } |
@@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | |||
444 | 446 | ||
445 | 447 | ||
446 | static inline int | 448 | static inline int |
447 | reservation_object_test_signaled_single(struct fence *passed_fence) | 449 | reservation_object_test_signaled_single(struct dma_fence *passed_fence) |
448 | { | 450 | { |
449 | struct fence *fence, *lfence = passed_fence; | 451 | struct dma_fence *fence, *lfence = passed_fence; |
450 | int ret = 1; | 452 | int ret = 1; |
451 | 453 | ||
452 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | 454 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
453 | fence = fence_get_rcu(lfence); | 455 | fence = dma_fence_get_rcu(lfence); |
454 | if (!fence) | 456 | if (!fence) |
455 | return -1; | 457 | return -1; |
456 | 458 | ||
457 | ret = !!fence_is_signaled(fence); | 459 | ret = !!dma_fence_is_signaled(fence); |
458 | fence_put(fence); | 460 | dma_fence_put(fence); |
459 | } | 461 | } |
460 | return ret; | 462 | return ret; |
461 | } | 463 | } |
@@ -492,7 +494,7 @@ retry: | |||
492 | shared_count = fobj->shared_count; | 494 | shared_count = fobj->shared_count; |
493 | 495 | ||
494 | for (i = 0; i < shared_count; ++i) { | 496 | for (i = 0; i < shared_count; ++i) { |
495 | struct fence *fence = rcu_dereference(fobj->shared[i]); | 497 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); |
496 | 498 | ||
497 | ret = reservation_object_test_signaled_single(fence); | 499 | ret = reservation_object_test_signaled_single(fence); |
498 | if (ret < 0) | 500 | if (ret < 0) |
@@ -506,7 +508,7 @@ retry: | |||
506 | } | 508 | } |
507 | 509 | ||
508 | if (!shared_count) { | 510 | if (!shared_count) { |
509 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | 511 | struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); |
510 | 512 | ||
511 | if (fence_excl) { | 513 | if (fence_excl) { |
512 | ret = reservation_object_test_signaled_single( | 514 | ret = reservation_object_test_signaled_single( |
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c index 71127f8f1626..f47112a64763 100644 --- a/drivers/dma-buf/seqno-fence.c +++ b/drivers/dma-buf/seqno-fence.c | |||
@@ -21,35 +21,35 @@ | |||
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | #include <linux/seqno-fence.h> | 22 | #include <linux/seqno-fence.h> |
23 | 23 | ||
24 | static const char *seqno_fence_get_driver_name(struct fence *fence) | 24 | static const char *seqno_fence_get_driver_name(struct dma_fence *fence) |
25 | { | 25 | { |
26 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 26 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
27 | 27 | ||
28 | return seqno_fence->ops->get_driver_name(fence); | 28 | return seqno_fence->ops->get_driver_name(fence); |
29 | } | 29 | } |
30 | 30 | ||
31 | static const char *seqno_fence_get_timeline_name(struct fence *fence) | 31 | static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) |
32 | { | 32 | { |
33 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 33 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
34 | 34 | ||
35 | return seqno_fence->ops->get_timeline_name(fence); | 35 | return seqno_fence->ops->get_timeline_name(fence); |
36 | } | 36 | } |
37 | 37 | ||
38 | static bool seqno_enable_signaling(struct fence *fence) | 38 | static bool seqno_enable_signaling(struct dma_fence *fence) |
39 | { | 39 | { |
40 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 40 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
41 | 41 | ||
42 | return seqno_fence->ops->enable_signaling(fence); | 42 | return seqno_fence->ops->enable_signaling(fence); |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool seqno_signaled(struct fence *fence) | 45 | static bool seqno_signaled(struct dma_fence *fence) |
46 | { | 46 | { |
47 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 47 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
48 | 48 | ||
49 | return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); | 49 | return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void seqno_release(struct fence *fence) | 52 | static void seqno_release(struct dma_fence *fence) |
53 | { | 53 | { |
54 | struct seqno_fence *f = to_seqno_fence(fence); | 54 | struct seqno_fence *f = to_seqno_fence(fence); |
55 | 55 | ||
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence) | |||
57 | if (f->ops->release) | 57 | if (f->ops->release) |
58 | f->ops->release(fence); | 58 | f->ops->release(fence); |
59 | else | 59 | else |
60 | fence_free(&f->base); | 60 | dma_fence_free(&f->base); |
61 | } | 61 | } |
62 | 62 | ||
63 | static signed long seqno_wait(struct fence *fence, bool intr, | 63 | static signed long seqno_wait(struct dma_fence *fence, bool intr, |
64 | signed long timeout) | 64 | signed long timeout) |
65 | { | 65 | { |
66 | struct seqno_fence *f = to_seqno_fence(fence); | 66 | struct seqno_fence *f = to_seqno_fence(fence); |
67 | 67 | ||
68 | return f->ops->wait(fence, intr, timeout); | 68 | return f->ops->wait(fence, intr, timeout); |
69 | } | 69 | } |
70 | 70 | ||
71 | const struct fence_ops seqno_fence_ops = { | 71 | const struct dma_fence_ops seqno_fence_ops = { |
72 | .get_driver_name = seqno_fence_get_driver_name, | 72 | .get_driver_name = seqno_fence_get_driver_name, |
73 | .get_timeline_name = seqno_fence_get_timeline_name, | 73 | .get_timeline_name = seqno_fence_get_timeline_name, |
74 | .enable_signaling = seqno_enable_signaling, | 74 | .enable_signaling = seqno_enable_signaling, |
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 62e8e6dc7953..82e0ca4dd0c1 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c | |||
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data { | |||
68 | 68 | ||
69 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) | 69 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) |
70 | 70 | ||
71 | static const struct fence_ops timeline_fence_ops; | 71 | static const struct dma_fence_ops timeline_fence_ops; |
72 | 72 | ||
73 | static inline struct sync_pt *fence_to_sync_pt(struct fence *fence) | 73 | static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence) |
74 | { | 74 | { |
75 | if (fence->ops != &timeline_fence_ops) | 75 | if (fence->ops != &timeline_fence_ops) |
76 | return NULL; | 76 | return NULL; |
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name) | |||
93 | return NULL; | 93 | return NULL; |
94 | 94 | ||
95 | kref_init(&obj->kref); | 95 | kref_init(&obj->kref); |
96 | obj->context = fence_context_alloc(1); | 96 | obj->context = dma_fence_context_alloc(1); |
97 | strlcpy(obj->name, name, sizeof(obj->name)); | 97 | strlcpy(obj->name, name, sizeof(obj->name)); |
98 | 98 | ||
99 | INIT_LIST_HEAD(&obj->child_list_head); | 99 | INIT_LIST_HEAD(&obj->child_list_head); |
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) | |||
146 | 146 | ||
147 | list_for_each_entry_safe(pt, next, &obj->active_list_head, | 147 | list_for_each_entry_safe(pt, next, &obj->active_list_head, |
148 | active_list) { | 148 | active_list) { |
149 | if (fence_is_signaled_locked(&pt->base)) | 149 | if (dma_fence_is_signaled_locked(&pt->base)) |
150 | list_del_init(&pt->active_list); | 150 | list_del_init(&pt->active_list); |
151 | } | 151 | } |
152 | 152 | ||
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, | |||
179 | 179 | ||
180 | spin_lock_irqsave(&obj->child_list_lock, flags); | 180 | spin_lock_irqsave(&obj->child_list_lock, flags); |
181 | sync_timeline_get(obj); | 181 | sync_timeline_get(obj); |
182 | fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, | 182 | dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, |
183 | obj->context, value); | 183 | obj->context, value); |
184 | list_add_tail(&pt->child_list, &obj->child_list_head); | 184 | list_add_tail(&pt->child_list, &obj->child_list_head); |
185 | INIT_LIST_HEAD(&pt->active_list); | 185 | INIT_LIST_HEAD(&pt->active_list); |
186 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | 186 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
187 | return pt; | 187 | return pt; |
188 | } | 188 | } |
189 | 189 | ||
190 | static const char *timeline_fence_get_driver_name(struct fence *fence) | 190 | static const char *timeline_fence_get_driver_name(struct dma_fence *fence) |
191 | { | 191 | { |
192 | return "sw_sync"; | 192 | return "sw_sync"; |
193 | } | 193 | } |
194 | 194 | ||
195 | static const char *timeline_fence_get_timeline_name(struct fence *fence) | 195 | static const char *timeline_fence_get_timeline_name(struct dma_fence *fence) |
196 | { | 196 | { |
197 | struct sync_timeline *parent = fence_parent(fence); | 197 | struct sync_timeline *parent = dma_fence_parent(fence); |
198 | 198 | ||
199 | return parent->name; | 199 | return parent->name; |
200 | } | 200 | } |
201 | 201 | ||
202 | static void timeline_fence_release(struct fence *fence) | 202 | static void timeline_fence_release(struct dma_fence *fence) |
203 | { | 203 | { |
204 | struct sync_pt *pt = fence_to_sync_pt(fence); | 204 | struct sync_pt *pt = dma_fence_to_sync_pt(fence); |
205 | struct sync_timeline *parent = fence_parent(fence); | 205 | struct sync_timeline *parent = dma_fence_parent(fence); |
206 | unsigned long flags; | 206 | unsigned long flags; |
207 | 207 | ||
208 | spin_lock_irqsave(fence->lock, flags); | 208 | spin_lock_irqsave(fence->lock, flags); |
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence) | |||
212 | spin_unlock_irqrestore(fence->lock, flags); | 212 | spin_unlock_irqrestore(fence->lock, flags); |
213 | 213 | ||
214 | sync_timeline_put(parent); | 214 | sync_timeline_put(parent); |
215 | fence_free(fence); | 215 | dma_fence_free(fence); |
216 | } | 216 | } |
217 | 217 | ||
218 | static bool timeline_fence_signaled(struct fence *fence) | 218 | static bool timeline_fence_signaled(struct dma_fence *fence) |
219 | { | 219 | { |
220 | struct sync_timeline *parent = fence_parent(fence); | 220 | struct sync_timeline *parent = dma_fence_parent(fence); |
221 | 221 | ||
222 | return (fence->seqno > parent->value) ? false : true; | 222 | return (fence->seqno > parent->value) ? false : true; |
223 | } | 223 | } |
224 | 224 | ||
225 | static bool timeline_fence_enable_signaling(struct fence *fence) | 225 | static bool timeline_fence_enable_signaling(struct dma_fence *fence) |
226 | { | 226 | { |
227 | struct sync_pt *pt = fence_to_sync_pt(fence); | 227 | struct sync_pt *pt = dma_fence_to_sync_pt(fence); |
228 | struct sync_timeline *parent = fence_parent(fence); | 228 | struct sync_timeline *parent = dma_fence_parent(fence); |
229 | 229 | ||
230 | if (timeline_fence_signaled(fence)) | 230 | if (timeline_fence_signaled(fence)) |
231 | return false; | 231 | return false; |
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence) | |||
234 | return true; | 234 | return true; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void timeline_fence_value_str(struct fence *fence, | 237 | static void timeline_fence_value_str(struct dma_fence *fence, |
238 | char *str, int size) | 238 | char *str, int size) |
239 | { | 239 | { |
240 | snprintf(str, size, "%d", fence->seqno); | 240 | snprintf(str, size, "%d", fence->seqno); |
241 | } | 241 | } |
242 | 242 | ||
243 | static void timeline_fence_timeline_value_str(struct fence *fence, | 243 | static void timeline_fence_timeline_value_str(struct dma_fence *fence, |
244 | char *str, int size) | 244 | char *str, int size) |
245 | { | 245 | { |
246 | struct sync_timeline *parent = fence_parent(fence); | 246 | struct sync_timeline *parent = dma_fence_parent(fence); |
247 | 247 | ||
248 | snprintf(str, size, "%d", parent->value); | 248 | snprintf(str, size, "%d", parent->value); |
249 | } | 249 | } |
250 | 250 | ||
251 | static const struct fence_ops timeline_fence_ops = { | 251 | static const struct dma_fence_ops timeline_fence_ops = { |
252 | .get_driver_name = timeline_fence_get_driver_name, | 252 | .get_driver_name = timeline_fence_get_driver_name, |
253 | .get_timeline_name = timeline_fence_get_timeline_name, | 253 | .get_timeline_name = timeline_fence_get_timeline_name, |
254 | .enable_signaling = timeline_fence_enable_signaling, | 254 | .enable_signaling = timeline_fence_enable_signaling, |
255 | .signaled = timeline_fence_signaled, | 255 | .signaled = timeline_fence_signaled, |
256 | .wait = fence_default_wait, | 256 | .wait = dma_fence_default_wait, |
257 | .release = timeline_fence_release, | 257 | .release = timeline_fence_release, |
258 | .fence_value_str = timeline_fence_value_str, | 258 | .fence_value_str = timeline_fence_value_str, |
259 | .timeline_value_str = timeline_fence_timeline_value_str, | 259 | .timeline_value_str = timeline_fence_timeline_value_str, |
@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, | |||
317 | 317 | ||
318 | sync_file = sync_file_create(&pt->base); | 318 | sync_file = sync_file_create(&pt->base); |
319 | if (!sync_file) { | 319 | if (!sync_file) { |
320 | fence_put(&pt->base); | 320 | dma_fence_put(&pt->base); |
321 | err = -ENOMEM; | 321 | err = -ENOMEM; |
322 | goto err; | 322 | goto err; |
323 | } | 323 | } |
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 2dd4c3db6caa..48b20e34fb6d 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c | |||
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status) | |||
71 | return "error"; | 71 | return "error"; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) | 74 | static void sync_print_fence(struct seq_file *s, |
75 | struct dma_fence *fence, bool show) | ||
75 | { | 76 | { |
76 | int status = 1; | 77 | int status = 1; |
77 | struct sync_timeline *parent = fence_parent(fence); | 78 | struct sync_timeline *parent = dma_fence_parent(fence); |
78 | 79 | ||
79 | if (fence_is_signaled_locked(fence)) | 80 | if (dma_fence_is_signaled_locked(fence)) |
80 | status = fence->status; | 81 | status = fence->status; |
81 | 82 | ||
82 | seq_printf(s, " %s%sfence %s", | 83 | seq_printf(s, " %s%sfence %s", |
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s, | |||
135 | int i; | 136 | int i; |
136 | 137 | ||
137 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, | 138 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, |
138 | sync_status_str(!fence_is_signaled(sync_file->fence))); | 139 | sync_status_str(!dma_fence_is_signaled(sync_file->fence))); |
139 | 140 | ||
140 | if (fence_is_array(sync_file->fence)) { | 141 | if (dma_fence_is_array(sync_file->fence)) { |
141 | struct fence_array *array = to_fence_array(sync_file->fence); | 142 | struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); |
142 | 143 | ||
143 | for (i = 0; i < array->num_fences; ++i) | 144 | for (i = 0; i < array->num_fences; ++i) |
144 | sync_print_fence(s, array->fences[i], true); | 145 | sync_print_fence(s, array->fences[i], true); |
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index d269aa6783aa..26fe8b9907b3 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | 19 | ||
20 | #include <linux/sync_file.h> | 20 | #include <linux/sync_file.h> |
21 | #include <uapi/linux/sync_file.h> | 21 | #include <uapi/linux/sync_file.h> |
@@ -45,10 +45,9 @@ struct sync_timeline { | |||
45 | struct list_head sync_timeline_list; | 45 | struct list_head sync_timeline_list; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static inline struct sync_timeline *fence_parent(struct fence *fence) | 48 | static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) |
49 | { | 49 | { |
50 | return container_of(fence->lock, struct sync_timeline, | 50 | return container_of(fence->lock, struct sync_timeline, child_list_lock); |
51 | child_list_lock); | ||
52 | } | 51 | } |
53 | 52 | ||
54 | /** | 53 | /** |
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence) | |||
58 | * @active_list: sync timeline active child's list | 57 | * @active_list: sync timeline active child's list |
59 | */ | 58 | */ |
60 | struct sync_pt { | 59 | struct sync_pt { |
61 | struct fence base; | 60 | struct dma_fence base; |
62 | struct list_head child_list; | 61 | struct list_head child_list; |
63 | struct list_head active_list; | 62 | struct list_head active_list; |
64 | }; | 63 | }; |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 235f8ac113cc..69d8ef98d34c 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
@@ -54,7 +54,7 @@ err: | |||
54 | return NULL; | 54 | return NULL; |
55 | } | 55 | } |
56 | 56 | ||
57 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) | 57 | static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) |
58 | { | 58 | { |
59 | struct sync_file *sync_file; | 59 | struct sync_file *sync_file; |
60 | 60 | ||
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) | |||
71 | * takes ownership of @fence. The sync_file can be released with | 71 | * takes ownership of @fence. The sync_file can be released with |
72 | * fput(sync_file->file). Returns the sync_file or NULL in case of error. | 72 | * fput(sync_file->file). Returns the sync_file or NULL in case of error. |
73 | */ | 73 | */ |
74 | struct sync_file *sync_file_create(struct fence *fence) | 74 | struct sync_file *sync_file_create(struct dma_fence *fence) |
75 | { | 75 | { |
76 | struct sync_file *sync_file; | 76 | struct sync_file *sync_file; |
77 | 77 | ||
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence) | |||
79 | if (!sync_file) | 79 | if (!sync_file) |
80 | return NULL; | 80 | return NULL; |
81 | 81 | ||
82 | sync_file->fence = fence_get(fence); | 82 | sync_file->fence = dma_fence_get(fence); |
83 | 83 | ||
84 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", | 84 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", |
85 | fence->ops->get_driver_name(fence), | 85 | fence->ops->get_driver_name(fence), |
@@ -121,16 +121,16 @@ err: | |||
121 | * Ensures @fd references a valid sync_file and returns a fence that | 121 | * Ensures @fd references a valid sync_file and returns a fence that |
122 | * represents all fence in the sync_file. On error NULL is returned. | 122 | * represents all fence in the sync_file. On error NULL is returned. |
123 | */ | 123 | */ |
124 | struct fence *sync_file_get_fence(int fd) | 124 | struct dma_fence *sync_file_get_fence(int fd) |
125 | { | 125 | { |
126 | struct sync_file *sync_file; | 126 | struct sync_file *sync_file; |
127 | struct fence *fence; | 127 | struct dma_fence *fence; |
128 | 128 | ||
129 | sync_file = sync_file_fdget(fd); | 129 | sync_file = sync_file_fdget(fd); |
130 | if (!sync_file) | 130 | if (!sync_file) |
131 | return NULL; | 131 | return NULL; |
132 | 132 | ||
133 | fence = fence_get(sync_file->fence); | 133 | fence = dma_fence_get(sync_file->fence); |
134 | fput(sync_file->file); | 134 | fput(sync_file->file); |
135 | 135 | ||
136 | return fence; | 136 | return fence; |
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd) | |||
138 | EXPORT_SYMBOL(sync_file_get_fence); | 138 | EXPORT_SYMBOL(sync_file_get_fence); |
139 | 139 | ||
140 | static int sync_file_set_fence(struct sync_file *sync_file, | 140 | static int sync_file_set_fence(struct sync_file *sync_file, |
141 | struct fence **fences, int num_fences) | 141 | struct dma_fence **fences, int num_fences) |
142 | { | 142 | { |
143 | struct fence_array *array; | 143 | struct dma_fence_array *array; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * The reference for the fences in the new sync_file and held | 146 | * The reference for the fences in the new sync_file and held |
147 | * in add_fence() during the merge procedure, so for num_fences == 1 | 147 | * in add_fence() during the merge procedure, so for num_fences == 1 |
148 | * we already own a new reference to the fence. For num_fence > 1 | 148 | * we already own a new reference to the fence. For num_fence > 1 |
149 | * we own the reference of the fence_array creation. | 149 | * we own the reference of the dma_fence_array creation. |
150 | */ | 150 | */ |
151 | if (num_fences == 1) { | 151 | if (num_fences == 1) { |
152 | sync_file->fence = fences[0]; | 152 | sync_file->fence = fences[0]; |
153 | kfree(fences); | 153 | kfree(fences); |
154 | } else { | 154 | } else { |
155 | array = fence_array_create(num_fences, fences, | 155 | array = dma_fence_array_create(num_fences, fences, |
156 | fence_context_alloc(1), 1, false); | 156 | dma_fence_context_alloc(1), |
157 | 1, false); | ||
157 | if (!array) | 158 | if (!array) |
158 | return -ENOMEM; | 159 | return -ENOMEM; |
159 | 160 | ||
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file, | |||
163 | return 0; | 164 | return 0; |
164 | } | 165 | } |
165 | 166 | ||
166 | static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) | 167 | static struct dma_fence **get_fences(struct sync_file *sync_file, |
168 | int *num_fences) | ||
167 | { | 169 | { |
168 | if (fence_is_array(sync_file->fence)) { | 170 | if (dma_fence_is_array(sync_file->fence)) { |
169 | struct fence_array *array = to_fence_array(sync_file->fence); | 171 | struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); |
170 | 172 | ||
171 | *num_fences = array->num_fences; | 173 | *num_fences = array->num_fences; |
172 | return array->fences; | 174 | return array->fences; |
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) | |||
176 | return &sync_file->fence; | 178 | return &sync_file->fence; |
177 | } | 179 | } |
178 | 180 | ||
179 | static void add_fence(struct fence **fences, int *i, struct fence *fence) | 181 | static void add_fence(struct dma_fence **fences, |
182 | int *i, struct dma_fence *fence) | ||
180 | { | 183 | { |
181 | fences[*i] = fence; | 184 | fences[*i] = fence; |
182 | 185 | ||
183 | if (!fence_is_signaled(fence)) { | 186 | if (!dma_fence_is_signaled(fence)) { |
184 | fence_get(fence); | 187 | dma_fence_get(fence); |
185 | (*i)++; | 188 | (*i)++; |
186 | } | 189 | } |
187 | } | 190 | } |
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
200 | struct sync_file *b) | 203 | struct sync_file *b) |
201 | { | 204 | { |
202 | struct sync_file *sync_file; | 205 | struct sync_file *sync_file; |
203 | struct fence **fences, **nfences, **a_fences, **b_fences; | 206 | struct dma_fence **fences, **nfences, **a_fences, **b_fences; |
204 | int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; | 207 | int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; |
205 | 208 | ||
206 | sync_file = sync_file_alloc(); | 209 | sync_file = sync_file_alloc(); |
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
226 | * and sync_file_create, this is a reasonable assumption. | 229 | * and sync_file_create, this is a reasonable assumption. |
227 | */ | 230 | */ |
228 | for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { | 231 | for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { |
229 | struct fence *pt_a = a_fences[i_a]; | 232 | struct dma_fence *pt_a = a_fences[i_a]; |
230 | struct fence *pt_b = b_fences[i_b]; | 233 | struct dma_fence *pt_b = b_fences[i_b]; |
231 | 234 | ||
232 | if (pt_a->context < pt_b->context) { | 235 | if (pt_a->context < pt_b->context) { |
233 | add_fence(fences, &i, pt_a); | 236 | add_fence(fences, &i, pt_a); |
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
255 | add_fence(fences, &i, b_fences[i_b]); | 258 | add_fence(fences, &i, b_fences[i_b]); |
256 | 259 | ||
257 | if (i == 0) | 260 | if (i == 0) |
258 | fences[i++] = fence_get(a_fences[0]); | 261 | fences[i++] = dma_fence_get(a_fences[0]); |
259 | 262 | ||
260 | if (num_fences > i) { | 263 | if (num_fences > i) { |
261 | nfences = krealloc(fences, i * sizeof(*fences), | 264 | nfences = krealloc(fences, i * sizeof(*fences), |
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref) | |||
286 | kref); | 289 | kref); |
287 | 290 | ||
288 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) | 291 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) |
289 | fence_remove_callback(sync_file->fence, &sync_file->cb); | 292 | dma_fence_remove_callback(sync_file->fence, &sync_file->cb); |
290 | fence_put(sync_file->fence); | 293 | dma_fence_put(sync_file->fence); |
291 | kfree(sync_file); | 294 | kfree(sync_file); |
292 | } | 295 | } |
293 | 296 | ||
@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) | |||
307 | 310 | ||
308 | if (!poll_does_not_wait(wait) && | 311 | if (!poll_does_not_wait(wait) && |
309 | !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { | 312 | !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { |
310 | if (fence_add_callback(sync_file->fence, &sync_file->cb, | 313 | if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, |
311 | fence_check_cb_func) < 0) | 314 | fence_check_cb_func) < 0) |
312 | wake_up_all(&sync_file->wq); | 315 | wake_up_all(&sync_file->wq); |
313 | } | 316 | } |
314 | 317 | ||
315 | return fence_is_signaled(sync_file->fence) ? POLLIN : 0; | 318 | return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; |
316 | } | 319 | } |
317 | 320 | ||
318 | static long sync_file_ioctl_merge(struct sync_file *sync_file, | 321 | static long sync_file_ioctl_merge(struct sync_file *sync_file, |
@@ -370,14 +373,14 @@ err_put_fd: | |||
370 | return err; | 373 | return err; |
371 | } | 374 | } |
372 | 375 | ||
373 | static void sync_fill_fence_info(struct fence *fence, | 376 | static void sync_fill_fence_info(struct dma_fence *fence, |
374 | struct sync_fence_info *info) | 377 | struct sync_fence_info *info) |
375 | { | 378 | { |
376 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 379 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
377 | sizeof(info->obj_name)); | 380 | sizeof(info->obj_name)); |
378 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), | 381 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), |
379 | sizeof(info->driver_name)); | 382 | sizeof(info->driver_name)); |
380 | if (fence_is_signaled(fence)) | 383 | if (dma_fence_is_signaled(fence)) |
381 | info->status = fence->status >= 0 ? 1 : fence->status; | 384 | info->status = fence->status >= 0 ? 1 : fence->status; |
382 | else | 385 | else |
383 | info->status = 0; | 386 | info->status = 0; |
@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
389 | { | 392 | { |
390 | struct sync_file_info info; | 393 | struct sync_file_info info; |
391 | struct sync_fence_info *fence_info = NULL; | 394 | struct sync_fence_info *fence_info = NULL; |
392 | struct fence **fences; | 395 | struct dma_fence **fences; |
393 | __u32 size; | 396 | __u32 size; |
394 | int num_fences, ret, i; | 397 | int num_fences, ret, i; |
395 | 398 | ||
@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
429 | 432 | ||
430 | no_fences: | 433 | no_fences: |
431 | strlcpy(info.name, sync_file->name, sizeof(info.name)); | 434 | strlcpy(info.name, sync_file->name, sizeof(info.name)); |
432 | info.status = fence_is_signaled(sync_file->fence); | 435 | info.status = dma_fence_is_signaled(sync_file->fence); |
433 | info.num_fences = num_fences; | 436 | info.num_fences = num_fences; |
434 | 437 | ||
435 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 438 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 217df2459a98..2ec7b3baeec2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/kref.h> | 34 | #include <linux/kref.h> |
35 | #include <linux/interval_tree.h> | 35 | #include <linux/interval_tree.h> |
36 | #include <linux/hashtable.h> | 36 | #include <linux/hashtable.h> |
37 | #include <linux/fence.h> | 37 | #include <linux/dma-fence.h> |
38 | 38 | ||
39 | #include <ttm/ttm_bo_api.h> | 39 | #include <ttm/ttm_bo_api.h> |
40 | #include <ttm/ttm_bo_driver.h> | 40 | #include <ttm/ttm_bo_driver.h> |
@@ -359,7 +359,7 @@ struct amdgpu_bo_va_mapping { | |||
359 | struct amdgpu_bo_va { | 359 | struct amdgpu_bo_va { |
360 | /* protected by bo being reserved */ | 360 | /* protected by bo being reserved */ |
361 | struct list_head bo_list; | 361 | struct list_head bo_list; |
362 | struct fence *last_pt_update; | 362 | struct dma_fence *last_pt_update; |
363 | unsigned ref_count; | 363 | unsigned ref_count; |
364 | 364 | ||
365 | /* protected by vm mutex and spinlock */ | 365 | /* protected by vm mutex and spinlock */ |
@@ -474,7 +474,7 @@ struct amdgpu_sa_bo { | |||
474 | struct amdgpu_sa_manager *manager; | 474 | struct amdgpu_sa_manager *manager; |
475 | unsigned soffset; | 475 | unsigned soffset; |
476 | unsigned eoffset; | 476 | unsigned eoffset; |
477 | struct fence *fence; | 477 | struct dma_fence *fence; |
478 | }; | 478 | }; |
479 | 479 | ||
480 | /* | 480 | /* |
@@ -613,10 +613,10 @@ struct amdgpu_flip_work { | |||
613 | uint64_t base; | 613 | uint64_t base; |
614 | struct drm_pending_vblank_event *event; | 614 | struct drm_pending_vblank_event *event; |
615 | struct amdgpu_bo *old_abo; | 615 | struct amdgpu_bo *old_abo; |
616 | struct fence *excl; | 616 | struct dma_fence *excl; |
617 | unsigned shared_count; | 617 | unsigned shared_count; |
618 | struct fence **shared; | 618 | struct dma_fence **shared; |
619 | struct fence_cb cb; | 619 | struct dma_fence_cb cb; |
620 | bool async; | 620 | bool async; |
621 | }; | 621 | }; |
622 | 622 | ||
@@ -644,7 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job); | |||
644 | void amdgpu_job_free(struct amdgpu_job *job); | 644 | void amdgpu_job_free(struct amdgpu_job *job); |
645 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | 645 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, |
646 | struct amd_sched_entity *entity, void *owner, | 646 | struct amd_sched_entity *entity, void *owner, |
647 | struct fence **f); | 647 | struct dma_fence **f); |
648 | 648 | ||
649 | /* | 649 | /* |
650 | * context related structures | 650 | * context related structures |
@@ -652,7 +652,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
652 | 652 | ||
653 | struct amdgpu_ctx_ring { | 653 | struct amdgpu_ctx_ring { |
654 | uint64_t sequence; | 654 | uint64_t sequence; |
655 | struct fence **fences; | 655 | struct dma_fence **fences; |
656 | struct amd_sched_entity entity; | 656 | struct amd_sched_entity entity; |
657 | }; | 657 | }; |
658 | 658 | ||
@@ -661,7 +661,7 @@ struct amdgpu_ctx { | |||
661 | struct amdgpu_device *adev; | 661 | struct amdgpu_device *adev; |
662 | unsigned reset_counter; | 662 | unsigned reset_counter; |
663 | spinlock_t ring_lock; | 663 | spinlock_t ring_lock; |
664 | struct fence **fences; | 664 | struct dma_fence **fences; |
665 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; | 665 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
666 | bool preamble_presented; | 666 | bool preamble_presented; |
667 | }; | 667 | }; |
@@ -677,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | |||
677 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | 677 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
678 | 678 | ||
679 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 679 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
680 | struct fence *fence); | 680 | struct dma_fence *fence); |
681 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 681 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
682 | struct amdgpu_ring *ring, uint64_t seq); | 682 | struct amdgpu_ring *ring, uint64_t seq); |
683 | 683 | ||
684 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | 684 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
@@ -889,10 +889,10 @@ struct amdgpu_gfx { | |||
889 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | 889 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
890 | unsigned size, struct amdgpu_ib *ib); | 890 | unsigned size, struct amdgpu_ib *ib); |
891 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | 891 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
892 | struct fence *f); | 892 | struct dma_fence *f); |
893 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 893 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
894 | struct amdgpu_ib *ib, struct fence *last_vm_update, | 894 | struct amdgpu_ib *ib, struct dma_fence *last_vm_update, |
895 | struct amdgpu_job *job, struct fence **f); | 895 | struct amdgpu_job *job, struct dma_fence **f); |
896 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 896 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
897 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 897 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
898 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | 898 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
@@ -923,7 +923,7 @@ struct amdgpu_cs_parser { | |||
923 | struct amdgpu_bo_list *bo_list; | 923 | struct amdgpu_bo_list *bo_list; |
924 | struct amdgpu_bo_list_entry vm_pd; | 924 | struct amdgpu_bo_list_entry vm_pd; |
925 | struct list_head validated; | 925 | struct list_head validated; |
926 | struct fence *fence; | 926 | struct dma_fence *fence; |
927 | uint64_t bytes_moved_threshold; | 927 | uint64_t bytes_moved_threshold; |
928 | uint64_t bytes_moved; | 928 | uint64_t bytes_moved; |
929 | struct amdgpu_bo_list_entry *evictable; | 929 | struct amdgpu_bo_list_entry *evictable; |
@@ -943,7 +943,7 @@ struct amdgpu_job { | |||
943 | struct amdgpu_ring *ring; | 943 | struct amdgpu_ring *ring; |
944 | struct amdgpu_sync sync; | 944 | struct amdgpu_sync sync; |
945 | struct amdgpu_ib *ibs; | 945 | struct amdgpu_ib *ibs; |
946 | struct fence *fence; /* the hw fence */ | 946 | struct dma_fence *fence; /* the hw fence */ |
947 | uint32_t preamble_status; | 947 | uint32_t preamble_status; |
948 | uint32_t num_ibs; | 948 | uint32_t num_ibs; |
949 | void *owner; | 949 | void *owner; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 345305235349..cc97eee93226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | |||
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, | |||
33 | { | 33 | { |
34 | unsigned long start_jiffies; | 34 | unsigned long start_jiffies; |
35 | unsigned long end_jiffies; | 35 | unsigned long end_jiffies; |
36 | struct fence *fence = NULL; | 36 | struct dma_fence *fence = NULL; |
37 | int i, r; | 37 | int i, r; |
38 | 38 | ||
39 | start_jiffies = jiffies; | 39 | start_jiffies = jiffies; |
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, | |||
43 | false); | 43 | false); |
44 | if (r) | 44 | if (r) |
45 | goto exit_do_move; | 45 | goto exit_do_move; |
46 | r = fence_wait(fence, false); | 46 | r = dma_fence_wait(fence, false); |
47 | if (r) | 47 | if (r) |
48 | goto exit_do_move; | 48 | goto exit_do_move; |
49 | fence_put(fence); | 49 | dma_fence_put(fence); |
50 | } | 50 | } |
51 | end_jiffies = jiffies; | 51 | end_jiffies = jiffies; |
52 | r = jiffies_to_msecs(end_jiffies - start_jiffies); | 52 | r = jiffies_to_msecs(end_jiffies - start_jiffies); |
53 | 53 | ||
54 | exit_do_move: | 54 | exit_do_move: |
55 | if (fence) | 55 | if (fence) |
56 | fence_put(fence); | 56 | dma_fence_put(fence); |
57 | return r; | 57 | return r; |
58 | } | 58 | } |
59 | 59 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf03f9f01f40..a024217896fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -735,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
735 | ttm_eu_backoff_reservation(&parser->ticket, | 735 | ttm_eu_backoff_reservation(&parser->ticket, |
736 | &parser->validated); | 736 | &parser->validated); |
737 | } | 737 | } |
738 | fence_put(parser->fence); | 738 | dma_fence_put(parser->fence); |
739 | 739 | ||
740 | if (parser->ctx) | 740 | if (parser->ctx) |
741 | amdgpu_ctx_put(parser->ctx); | 741 | amdgpu_ctx_put(parser->ctx); |
@@ -772,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |||
772 | 772 | ||
773 | if (p->bo_list) { | 773 | if (p->bo_list) { |
774 | for (i = 0; i < p->bo_list->num_entries; i++) { | 774 | for (i = 0; i < p->bo_list->num_entries; i++) { |
775 | struct fence *f; | 775 | struct dma_fence *f; |
776 | 776 | ||
777 | /* ignore duplicates */ | 777 | /* ignore duplicates */ |
778 | bo = p->bo_list->array[i].robj; | 778 | bo = p->bo_list->array[i].robj; |
@@ -973,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
973 | for (j = 0; j < num_deps; ++j) { | 973 | for (j = 0; j < num_deps; ++j) { |
974 | struct amdgpu_ring *ring; | 974 | struct amdgpu_ring *ring; |
975 | struct amdgpu_ctx *ctx; | 975 | struct amdgpu_ctx *ctx; |
976 | struct fence *fence; | 976 | struct dma_fence *fence; |
977 | 977 | ||
978 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | 978 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, |
979 | deps[j].ip_instance, | 979 | deps[j].ip_instance, |
@@ -995,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
995 | } else if (fence) { | 995 | } else if (fence) { |
996 | r = amdgpu_sync_fence(adev, &p->job->sync, | 996 | r = amdgpu_sync_fence(adev, &p->job->sync, |
997 | fence); | 997 | fence); |
998 | fence_put(fence); | 998 | dma_fence_put(fence); |
999 | amdgpu_ctx_put(ctx); | 999 | amdgpu_ctx_put(ctx); |
1000 | if (r) | 1000 | if (r) |
1001 | return r; | 1001 | return r; |
@@ -1025,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1025 | 1025 | ||
1026 | job->owner = p->filp; | 1026 | job->owner = p->filp; |
1027 | job->fence_ctx = entity->fence_context; | 1027 | job->fence_ctx = entity->fence_context; |
1028 | p->fence = fence_get(&job->base.s_fence->finished); | 1028 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
1029 | cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); | 1029 | cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); |
1030 | job->uf_sequence = cs->out.handle; | 1030 | job->uf_sequence = cs->out.handle; |
1031 | amdgpu_job_free_resources(job); | 1031 | amdgpu_job_free_resources(job); |
@@ -1108,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
1108 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | 1108 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
1109 | struct amdgpu_ring *ring = NULL; | 1109 | struct amdgpu_ring *ring = NULL; |
1110 | struct amdgpu_ctx *ctx; | 1110 | struct amdgpu_ctx *ctx; |
1111 | struct fence *fence; | 1111 | struct dma_fence *fence; |
1112 | long r; | 1112 | long r; |
1113 | 1113 | ||
1114 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, | 1114 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, |
@@ -1124,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
1124 | if (IS_ERR(fence)) | 1124 | if (IS_ERR(fence)) |
1125 | r = PTR_ERR(fence); | 1125 | r = PTR_ERR(fence); |
1126 | else if (fence) { | 1126 | else if (fence) { |
1127 | r = fence_wait_timeout(fence, true, timeout); | 1127 | r = dma_fence_wait_timeout(fence, true, timeout); |
1128 | fence_put(fence); | 1128 | dma_fence_put(fence); |
1129 | } else | 1129 | } else |
1130 | r = 1; | 1130 | r = 1; |
1131 | 1131 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6d86eaef934c..400c66ba4c6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
35 | kref_init(&ctx->refcount); | 35 | kref_init(&ctx->refcount); |
36 | spin_lock_init(&ctx->ring_lock); | 36 | spin_lock_init(&ctx->ring_lock); |
37 | ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, | 37 | ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, |
38 | sizeof(struct fence*), GFP_KERNEL); | 38 | sizeof(struct dma_fence*), GFP_KERNEL); |
39 | if (!ctx->fences) | 39 | if (!ctx->fences) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | 41 | ||
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
79 | 79 | ||
80 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 80 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
81 | for (j = 0; j < amdgpu_sched_jobs; ++j) | 81 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
82 | fence_put(ctx->rings[i].fences[j]); | 82 | dma_fence_put(ctx->rings[i].fences[j]); |
83 | kfree(ctx->fences); | 83 | kfree(ctx->fences); |
84 | ctx->fences = NULL; | 84 | ctx->fences = NULL; |
85 | 85 | ||
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 243 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
244 | struct fence *fence) | 244 | struct dma_fence *fence) |
245 | { | 245 | { |
246 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | 246 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
247 | uint64_t seq = cring->sequence; | 247 | uint64_t seq = cring->sequence; |
248 | unsigned idx = 0; | 248 | unsigned idx = 0; |
249 | struct fence *other = NULL; | 249 | struct dma_fence *other = NULL; |
250 | 250 | ||
251 | idx = seq & (amdgpu_sched_jobs - 1); | 251 | idx = seq & (amdgpu_sched_jobs - 1); |
252 | other = cring->fences[idx]; | 252 | other = cring->fences[idx]; |
253 | if (other) { | 253 | if (other) { |
254 | signed long r; | 254 | signed long r; |
255 | r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); | 255 | r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); |
256 | if (r < 0) | 256 | if (r < 0) |
257 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | 257 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); |
258 | } | 258 | } |
259 | 259 | ||
260 | fence_get(fence); | 260 | dma_fence_get(fence); |
261 | 261 | ||
262 | spin_lock(&ctx->ring_lock); | 262 | spin_lock(&ctx->ring_lock); |
263 | cring->fences[idx] = fence; | 263 | cring->fences[idx] = fence; |
264 | cring->sequence++; | 264 | cring->sequence++; |
265 | spin_unlock(&ctx->ring_lock); | 265 | spin_unlock(&ctx->ring_lock); |
266 | 266 | ||
267 | fence_put(other); | 267 | dma_fence_put(other); |
268 | 268 | ||
269 | return seq; | 269 | return seq; |
270 | } | 270 | } |
271 | 271 | ||
272 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 272 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
273 | struct amdgpu_ring *ring, uint64_t seq) | 273 | struct amdgpu_ring *ring, uint64_t seq) |
274 | { | 274 | { |
275 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | 275 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
276 | struct fence *fence; | 276 | struct dma_fence *fence; |
277 | 277 | ||
278 | spin_lock(&ctx->ring_lock); | 278 | spin_lock(&ctx->ring_lock); |
279 | 279 | ||
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | |||
288 | return NULL; | 288 | return NULL; |
289 | } | 289 | } |
290 | 290 | ||
291 | fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); | 291 | fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); |
292 | spin_unlock(&ctx->ring_lock); | 292 | spin_unlock(&ctx->ring_lock); |
293 | 293 | ||
294 | return fence; | 294 | return fence; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3b9b58debabd..6958d4af017f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -1620,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1620 | adev->vm_manager.vm_pte_funcs = NULL; | 1620 | adev->vm_manager.vm_pte_funcs = NULL; |
1621 | adev->vm_manager.vm_pte_num_rings = 0; | 1621 | adev->vm_manager.vm_pte_num_rings = 0; |
1622 | adev->gart.gart_funcs = NULL; | 1622 | adev->gart.gart_funcs = NULL; |
1623 | adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); | 1623 | adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
1624 | 1624 | ||
1625 | adev->smc_rreg = &amdgpu_invalid_rreg; | 1625 | adev->smc_rreg = &amdgpu_invalid_rreg; |
1626 | adev->smc_wreg = &amdgpu_invalid_wreg; | 1626 | adev->smc_wreg = &amdgpu_invalid_wreg; |
@@ -2215,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) | |||
2215 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, | 2215 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, |
2216 | struct amdgpu_ring *ring, | 2216 | struct amdgpu_ring *ring, |
2217 | struct amdgpu_bo *bo, | 2217 | struct amdgpu_bo *bo, |
2218 | struct fence **fence) | 2218 | struct dma_fence **fence) |
2219 | { | 2219 | { |
2220 | uint32_t domain; | 2220 | uint32_t domain; |
2221 | int r; | 2221 | int r; |
@@ -2334,30 +2334,30 @@ retry: | |||
2334 | if (need_full_reset && amdgpu_need_backup(adev)) { | 2334 | if (need_full_reset && amdgpu_need_backup(adev)) { |
2335 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 2335 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
2336 | struct amdgpu_bo *bo, *tmp; | 2336 | struct amdgpu_bo *bo, *tmp; |
2337 | struct fence *fence = NULL, *next = NULL; | 2337 | struct dma_fence *fence = NULL, *next = NULL; |
2338 | 2338 | ||
2339 | DRM_INFO("recover vram bo from shadow\n"); | 2339 | DRM_INFO("recover vram bo from shadow\n"); |
2340 | mutex_lock(&adev->shadow_list_lock); | 2340 | mutex_lock(&adev->shadow_list_lock); |
2341 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { | 2341 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { |
2342 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); | 2342 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); |
2343 | if (fence) { | 2343 | if (fence) { |
2344 | r = fence_wait(fence, false); | 2344 | r = dma_fence_wait(fence, false); |
2345 | if (r) { | 2345 | if (r) { |
2346 | WARN(r, "recovery from shadow isn't comleted\n"); | 2346 | WARN(r, "recovery from shadow isn't comleted\n"); |
2347 | break; | 2347 | break; |
2348 | } | 2348 | } |
2349 | } | 2349 | } |
2350 | 2350 | ||
2351 | fence_put(fence); | 2351 | dma_fence_put(fence); |
2352 | fence = next; | 2352 | fence = next; |
2353 | } | 2353 | } |
2354 | mutex_unlock(&adev->shadow_list_lock); | 2354 | mutex_unlock(&adev->shadow_list_lock); |
2355 | if (fence) { | 2355 | if (fence) { |
2356 | r = fence_wait(fence, false); | 2356 | r = dma_fence_wait(fence, false); |
2357 | if (r) | 2357 | if (r) |
2358 | WARN(r, "recovery from shadow isn't comleted\n"); | 2358 | WARN(r, "recovery from shadow isn't comleted\n"); |
2359 | } | 2359 | } |
2360 | fence_put(fence); | 2360 | dma_fence_put(fence); |
2361 | } | 2361 | } |
2362 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 2362 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
2363 | struct amdgpu_ring *ring = adev->rings[i]; | 2363 | struct amdgpu_ring *ring = adev->rings[i]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index c7bc2b3c1b97..741144fcc7bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -35,29 +35,29 @@ | |||
35 | #include <drm/drm_crtc_helper.h> | 35 | #include <drm/drm_crtc_helper.h> |
36 | #include <drm/drm_edid.h> | 36 | #include <drm/drm_edid.h> |
37 | 37 | ||
38 | static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) | 38 | static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) |
39 | { | 39 | { |
40 | struct amdgpu_flip_work *work = | 40 | struct amdgpu_flip_work *work = |
41 | container_of(cb, struct amdgpu_flip_work, cb); | 41 | container_of(cb, struct amdgpu_flip_work, cb); |
42 | 42 | ||
43 | fence_put(f); | 43 | dma_fence_put(f); |
44 | schedule_work(&work->flip_work.work); | 44 | schedule_work(&work->flip_work.work); |
45 | } | 45 | } |
46 | 46 | ||
47 | static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, | 47 | static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, |
48 | struct fence **f) | 48 | struct dma_fence **f) |
49 | { | 49 | { |
50 | struct fence *fence= *f; | 50 | struct dma_fence *fence= *f; |
51 | 51 | ||
52 | if (fence == NULL) | 52 | if (fence == NULL) |
53 | return false; | 53 | return false; |
54 | 54 | ||
55 | *f = NULL; | 55 | *f = NULL; |
56 | 56 | ||
57 | if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) | 57 | if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) |
58 | return true; | 58 | return true; |
59 | 59 | ||
60 | fence_put(fence); | 60 | dma_fence_put(fence); |
61 | return false; | 61 | return false; |
62 | } | 62 | } |
63 | 63 | ||
@@ -244,9 +244,9 @@ unreserve: | |||
244 | 244 | ||
245 | cleanup: | 245 | cleanup: |
246 | amdgpu_bo_unref(&work->old_abo); | 246 | amdgpu_bo_unref(&work->old_abo); |
247 | fence_put(work->excl); | 247 | dma_fence_put(work->excl); |
248 | for (i = 0; i < work->shared_count; ++i) | 248 | for (i = 0; i < work->shared_count; ++i) |
249 | fence_put(work->shared[i]); | 249 | dma_fence_put(work->shared[i]); |
250 | kfree(work->shared); | 250 | kfree(work->shared); |
251 | kfree(work); | 251 | kfree(work); |
252 | 252 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..57552c79ec58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -48,7 +48,7 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | struct amdgpu_fence { | 50 | struct amdgpu_fence { |
51 | struct fence base; | 51 | struct dma_fence base; |
52 | 52 | ||
53 | /* RB, DMA, etc. */ | 53 | /* RB, DMA, etc. */ |
54 | struct amdgpu_ring *ring; | 54 | struct amdgpu_ring *ring; |
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void) | |||
73 | /* | 73 | /* |
74 | * Cast helper | 74 | * Cast helper |
75 | */ | 75 | */ |
76 | static const struct fence_ops amdgpu_fence_ops; | 76 | static const struct dma_fence_ops amdgpu_fence_ops; |
77 | static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) | 77 | static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) |
78 | { | 78 | { |
79 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); | 79 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); |
80 | 80 | ||
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |||
130 | * Emits a fence command on the requested ring (all asics). | 130 | * Emits a fence command on the requested ring (all asics). |
131 | * Returns 0 on success, -ENOMEM on failure. | 131 | * Returns 0 on success, -ENOMEM on failure. |
132 | */ | 132 | */ |
133 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | 133 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) |
134 | { | 134 | { |
135 | struct amdgpu_device *adev = ring->adev; | 135 | struct amdgpu_device *adev = ring->adev; |
136 | struct amdgpu_fence *fence; | 136 | struct amdgpu_fence *fence; |
137 | struct fence *old, **ptr; | 137 | struct dma_fence *old, **ptr; |
138 | uint32_t seq; | 138 | uint32_t seq; |
139 | 139 | ||
140 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); | 140 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | |||
143 | 143 | ||
144 | seq = ++ring->fence_drv.sync_seq; | 144 | seq = ++ring->fence_drv.sync_seq; |
145 | fence->ring = ring; | 145 | fence->ring = ring; |
146 | fence_init(&fence->base, &amdgpu_fence_ops, | 146 | dma_fence_init(&fence->base, &amdgpu_fence_ops, |
147 | &ring->fence_drv.lock, | 147 | &ring->fence_drv.lock, |
148 | adev->fence_context + ring->idx, | 148 | adev->fence_context + ring->idx, |
149 | seq); | 149 | seq); |
150 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, | 150 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
151 | seq, AMDGPU_FENCE_FLAG_INT); | 151 | seq, AMDGPU_FENCE_FLAG_INT); |
152 | 152 | ||
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | |||
155 | * emitting the fence would mess up the hardware ring buffer. | 155 | * emitting the fence would mess up the hardware ring buffer. |
156 | */ | 156 | */ |
157 | old = rcu_dereference_protected(*ptr, 1); | 157 | old = rcu_dereference_protected(*ptr, 1); |
158 | if (old && !fence_is_signaled(old)) { | 158 | if (old && !dma_fence_is_signaled(old)) { |
159 | DRM_INFO("rcu slot is busy\n"); | 159 | DRM_INFO("rcu slot is busy\n"); |
160 | fence_wait(old, false); | 160 | dma_fence_wait(old, false); |
161 | } | 161 | } |
162 | 162 | ||
163 | rcu_assign_pointer(*ptr, fence_get(&fence->base)); | 163 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
164 | 164 | ||
165 | *f = &fence->base; | 165 | *f = &fence->base; |
166 | 166 | ||
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) | |||
211 | seq &= drv->num_fences_mask; | 211 | seq &= drv->num_fences_mask; |
212 | 212 | ||
213 | do { | 213 | do { |
214 | struct fence *fence, **ptr; | 214 | struct dma_fence *fence, **ptr; |
215 | 215 | ||
216 | ++last_seq; | 216 | ++last_seq; |
217 | last_seq &= drv->num_fences_mask; | 217 | last_seq &= drv->num_fences_mask; |
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) | |||
224 | if (!fence) | 224 | if (!fence) |
225 | continue; | 225 | continue; |
226 | 226 | ||
227 | r = fence_signal(fence); | 227 | r = dma_fence_signal(fence); |
228 | if (!r) | 228 | if (!r) |
229 | FENCE_TRACE(fence, "signaled from irq context\n"); | 229 | DMA_FENCE_TRACE(fence, "signaled from irq context\n"); |
230 | else | 230 | else |
231 | BUG(); | 231 | BUG(); |
232 | 232 | ||
233 | fence_put(fence); | 233 | dma_fence_put(fence); |
234 | } while (last_seq != seq); | 234 | } while (last_seq != seq); |
235 | } | 235 | } |
236 | 236 | ||
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) | |||
260 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 260 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
261 | { | 261 | { |
262 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); | 262 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); |
263 | struct fence *fence, **ptr; | 263 | struct dma_fence *fence, **ptr; |
264 | int r; | 264 | int r; |
265 | 265 | ||
266 | if (!seq) | 266 | if (!seq) |
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | |||
269 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; | 269 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
270 | rcu_read_lock(); | 270 | rcu_read_lock(); |
271 | fence = rcu_dereference(*ptr); | 271 | fence = rcu_dereference(*ptr); |
272 | if (!fence || !fence_get_rcu(fence)) { | 272 | if (!fence || !dma_fence_get_rcu(fence)) { |
273 | rcu_read_unlock(); | 273 | rcu_read_unlock(); |
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | rcu_read_unlock(); | 276 | rcu_read_unlock(); |
277 | 277 | ||
278 | r = fence_wait(fence, false); | 278 | r = dma_fence_wait(fence, false); |
279 | fence_put(fence); | 279 | dma_fence_put(fence); |
280 | return r; | 280 | return r; |
281 | } | 281 | } |
282 | 282 | ||
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
452 | amd_sched_fini(&ring->sched); | 452 | amd_sched_fini(&ring->sched); |
453 | del_timer_sync(&ring->fence_drv.fallback_timer); | 453 | del_timer_sync(&ring->fence_drv.fallback_timer); |
454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) | 454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
455 | fence_put(ring->fence_drv.fences[j]); | 455 | dma_fence_put(ring->fence_drv.fences[j]); |
456 | kfree(ring->fence_drv.fences); | 456 | kfree(ring->fence_drv.fences); |
457 | ring->fence_drv.fences = NULL; | 457 | ring->fence_drv.fences = NULL; |
458 | ring->fence_drv.initialized = false; | 458 | ring->fence_drv.initialized = false; |
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | |||
541 | * Common fence implementation | 541 | * Common fence implementation |
542 | */ | 542 | */ |
543 | 543 | ||
544 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | 544 | static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) |
545 | { | 545 | { |
546 | return "amdgpu"; | 546 | return "amdgpu"; |
547 | } | 547 | } |
548 | 548 | ||
549 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | 549 | static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) |
550 | { | 550 | { |
551 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 551 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
552 | return (const char *)fence->ring->name; | 552 | return (const char *)fence->ring->name; |
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) | |||
560 | * to fence_queue that checks if this fence is signaled, and if so it | 560 | * to fence_queue that checks if this fence is signaled, and if so it |
561 | * signals the fence and removes itself. | 561 | * signals the fence and removes itself. |
562 | */ | 562 | */ |
563 | static bool amdgpu_fence_enable_signaling(struct fence *f) | 563 | static bool amdgpu_fence_enable_signaling(struct dma_fence *f) |
564 | { | 564 | { |
565 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 565 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
566 | struct amdgpu_ring *ring = fence->ring; | 566 | struct amdgpu_ring *ring = fence->ring; |
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
568 | if (!timer_pending(&ring->fence_drv.fallback_timer)) | 568 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
569 | amdgpu_fence_schedule_fallback(ring); | 569 | amdgpu_fence_schedule_fallback(ring); |
570 | 570 | ||
571 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | 571 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
572 | 572 | ||
573 | return true; | 573 | return true; |
574 | } | 574 | } |
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
582 | */ | 582 | */ |
583 | static void amdgpu_fence_free(struct rcu_head *rcu) | 583 | static void amdgpu_fence_free(struct rcu_head *rcu) |
584 | { | 584 | { |
585 | struct fence *f = container_of(rcu, struct fence, rcu); | 585 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
586 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 586 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
587 | kmem_cache_free(amdgpu_fence_slab, fence); | 587 | kmem_cache_free(amdgpu_fence_slab, fence); |
588 | } | 588 | } |
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu) | |||
595 | * This function is called when the reference count becomes zero. | 595 | * This function is called when the reference count becomes zero. |
596 | * It just RCU schedules freeing up the fence. | 596 | * It just RCU schedules freeing up the fence. |
597 | */ | 597 | */ |
598 | static void amdgpu_fence_release(struct fence *f) | 598 | static void amdgpu_fence_release(struct dma_fence *f) |
599 | { | 599 | { |
600 | call_rcu(&f->rcu, amdgpu_fence_free); | 600 | call_rcu(&f->rcu, amdgpu_fence_free); |
601 | } | 601 | } |
602 | 602 | ||
603 | static const struct fence_ops amdgpu_fence_ops = { | 603 | static const struct dma_fence_ops amdgpu_fence_ops = { |
604 | .get_driver_name = amdgpu_fence_get_driver_name, | 604 | .get_driver_name = amdgpu_fence_get_driver_name, |
605 | .get_timeline_name = amdgpu_fence_get_timeline_name, | 605 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
606 | .enable_signaling = amdgpu_fence_enable_signaling, | 606 | .enable_signaling = amdgpu_fence_enable_signaling, |
607 | .wait = fence_default_wait, | 607 | .wait = dma_fence_default_wait, |
608 | .release = amdgpu_fence_release, | 608 | .release = amdgpu_fence_release, |
609 | }; | 609 | }; |
610 | 610 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 16308eb22e7f..216a9572d946 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
89 | * Free an IB (all asics). | 89 | * Free an IB (all asics). |
90 | */ | 90 | */ |
91 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | 91 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
92 | struct fence *f) | 92 | struct dma_fence *f) |
93 | { | 93 | { |
94 | amdgpu_sa_bo_free(adev, &ib->sa_bo, f); | 94 | amdgpu_sa_bo_free(adev, &ib->sa_bo, f); |
95 | } | 95 | } |
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | |||
116 | * to SI there was just a DE IB. | 116 | * to SI there was just a DE IB. |
117 | */ | 117 | */ |
118 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 118 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
119 | struct amdgpu_ib *ibs, struct fence *last_vm_update, | 119 | struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, |
120 | struct amdgpu_job *job, struct fence **f) | 120 | struct amdgpu_job *job, struct dma_fence **f) |
121 | { | 121 | { |
122 | struct amdgpu_device *adev = ring->adev; | 122 | struct amdgpu_device *adev = ring->adev; |
123 | struct amdgpu_ib *ib = &ibs[0]; | 123 | struct amdgpu_ib *ib = &ibs[0]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 8c5807994073..a0de6286c453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, | |||
81 | 81 | ||
82 | void amdgpu_job_free_resources(struct amdgpu_job *job) | 82 | void amdgpu_job_free_resources(struct amdgpu_job *job) |
83 | { | 83 | { |
84 | struct fence *f; | 84 | struct dma_fence *f; |
85 | unsigned i; | 85 | unsigned i; |
86 | 86 | ||
87 | /* use sched fence if available */ | 87 | /* use sched fence if available */ |
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) | |||
95 | { | 95 | { |
96 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); | 96 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); |
97 | 97 | ||
98 | fence_put(job->fence); | 98 | dma_fence_put(job->fence); |
99 | amdgpu_sync_free(&job->sync); | 99 | amdgpu_sync_free(&job->sync); |
100 | kfree(job); | 100 | kfree(job); |
101 | } | 101 | } |
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job) | |||
104 | { | 104 | { |
105 | amdgpu_job_free_resources(job); | 105 | amdgpu_job_free_resources(job); |
106 | 106 | ||
107 | fence_put(job->fence); | 107 | dma_fence_put(job->fence); |
108 | amdgpu_sync_free(&job->sync); | 108 | amdgpu_sync_free(&job->sync); |
109 | kfree(job); | 109 | kfree(job); |
110 | } | 110 | } |
111 | 111 | ||
112 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | 112 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, |
113 | struct amd_sched_entity *entity, void *owner, | 113 | struct amd_sched_entity *entity, void *owner, |
114 | struct fence **f) | 114 | struct dma_fence **f) |
115 | { | 115 | { |
116 | int r; | 116 | int r; |
117 | job->ring = ring; | 117 | job->ring = ring; |
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
125 | 125 | ||
126 | job->owner = owner; | 126 | job->owner = owner; |
127 | job->fence_ctx = entity->fence_context; | 127 | job->fence_ctx = entity->fence_context; |
128 | *f = fence_get(&job->base.s_fence->finished); | 128 | *f = dma_fence_get(&job->base.s_fence->finished); |
129 | amdgpu_job_free_resources(job); | 129 | amdgpu_job_free_resources(job); |
130 | amd_sched_entity_push_job(&job->base); | 130 | amd_sched_entity_push_job(&job->base); |
131 | 131 | ||
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | 135 | static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) |
136 | { | 136 | { |
137 | struct amdgpu_job *job = to_amdgpu_job(sched_job); | 137 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
138 | struct amdgpu_vm *vm = job->vm; | 138 | struct amdgpu_vm *vm = job->vm; |
139 | 139 | ||
140 | struct fence *fence = amdgpu_sync_get_fence(&job->sync); | 140 | struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); |
141 | 141 | ||
142 | if (fence == NULL && vm && !job->vm_id) { | 142 | if (fence == NULL && vm && !job->vm_id) { |
143 | struct amdgpu_ring *ring = job->ring; | 143 | struct amdgpu_ring *ring = job->ring; |
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | |||
155 | return fence; | 155 | return fence; |
156 | } | 156 | } |
157 | 157 | ||
158 | static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) | 158 | static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) |
159 | { | 159 | { |
160 | struct fence *fence = NULL; | 160 | struct dma_fence *fence = NULL; |
161 | struct amdgpu_job *job; | 161 | struct amdgpu_job *job; |
162 | int r; | 162 | int r; |
163 | 163 | ||
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) | |||
176 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 176 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
177 | 177 | ||
178 | /* if gpu reset, hw fence will be replaced here */ | 178 | /* if gpu reset, hw fence will be replaced here */ |
179 | fence_put(job->fence); | 179 | dma_fence_put(job->fence); |
180 | job->fence = fence_get(fence); | 180 | job->fence = dma_fence_get(fence); |
181 | amdgpu_job_free_resources(job); | 181 | amdgpu_job_free_resources(job); |
182 | return fence; | 182 | return fence; |
183 | } | 183 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6efa8d73b394..f0a0513ef4c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -391,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
391 | 391 | ||
392 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | 392 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
393 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | 393 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
394 | struct fence *fence; | 394 | struct dma_fence *fence; |
395 | 395 | ||
396 | if (adev->mman.buffer_funcs_ring == NULL || | 396 | if (adev->mman.buffer_funcs_ring == NULL || |
397 | !adev->mman.buffer_funcs_ring->ready) { | 397 | !adev->mman.buffer_funcs_ring->ready) { |
@@ -411,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
411 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | 411 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); |
412 | amdgpu_bo_fence(bo, fence, false); | 412 | amdgpu_bo_fence(bo, fence, false); |
413 | amdgpu_bo_unreserve(bo); | 413 | amdgpu_bo_unreserve(bo); |
414 | fence_put(bo->tbo.moving); | 414 | dma_fence_put(bo->tbo.moving); |
415 | bo->tbo.moving = fence_get(fence); | 415 | bo->tbo.moving = dma_fence_get(fence); |
416 | fence_put(fence); | 416 | dma_fence_put(fence); |
417 | } | 417 | } |
418 | *bo_ptr = bo; | 418 | *bo_ptr = bo; |
419 | 419 | ||
@@ -499,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, | |||
499 | struct amdgpu_ring *ring, | 499 | struct amdgpu_ring *ring, |
500 | struct amdgpu_bo *bo, | 500 | struct amdgpu_bo *bo, |
501 | struct reservation_object *resv, | 501 | struct reservation_object *resv, |
502 | struct fence **fence, | 502 | struct dma_fence **fence, |
503 | bool direct) | 503 | bool direct) |
504 | 504 | ||
505 | { | 505 | { |
@@ -531,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | |||
531 | struct amdgpu_ring *ring, | 531 | struct amdgpu_ring *ring, |
532 | struct amdgpu_bo *bo, | 532 | struct amdgpu_bo *bo, |
533 | struct reservation_object *resv, | 533 | struct reservation_object *resv, |
534 | struct fence **fence, | 534 | struct dma_fence **fence, |
535 | bool direct) | 535 | bool direct) |
536 | 536 | ||
537 | { | 537 | { |
@@ -941,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
941 | * @shared: true if fence should be added shared | 941 | * @shared: true if fence should be added shared |
942 | * | 942 | * |
943 | */ | 943 | */ |
944 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, | 944 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
945 | bool shared) | 945 | bool shared) |
946 | { | 946 | { |
947 | struct reservation_object *resv = bo->tbo.resv; | 947 | struct reservation_object *resv = bo->tbo.resv; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d3baf834ac24..5cbf59ec0f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -157,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
157 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 157 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
158 | struct ttm_mem_reg *new_mem); | 158 | struct ttm_mem_reg *new_mem); |
159 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | 159 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
160 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, | 160 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
161 | bool shared); | 161 | bool shared); |
162 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); | 162 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
163 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, | 163 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
164 | struct amdgpu_ring *ring, | 164 | struct amdgpu_ring *ring, |
165 | struct amdgpu_bo *bo, | 165 | struct amdgpu_bo *bo, |
166 | struct reservation_object *resv, | 166 | struct reservation_object *resv, |
167 | struct fence **fence, bool direct); | 167 | struct dma_fence **fence, bool direct); |
168 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | 168 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, |
169 | struct amdgpu_ring *ring, | 169 | struct amdgpu_ring *ring, |
170 | struct amdgpu_bo *bo, | 170 | struct amdgpu_bo *bo, |
171 | struct reservation_object *resv, | 171 | struct reservation_object *resv, |
172 | struct fence **fence, | 172 | struct dma_fence **fence, |
173 | bool direct); | 173 | bool direct); |
174 | 174 | ||
175 | 175 | ||
@@ -201,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
201 | unsigned size, unsigned align); | 201 | unsigned size, unsigned align); |
202 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, | 202 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
203 | struct amdgpu_sa_bo **sa_bo, | 203 | struct amdgpu_sa_bo **sa_bo, |
204 | struct fence *fence); | 204 | struct dma_fence *fence); |
205 | #if defined(CONFIG_DEBUG_FS) | 205 | #if defined(CONFIG_DEBUG_FS) |
206 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | 206 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
207 | struct seq_file *m); | 207 | struct seq_file *m); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 1ee1b65d7eff..f2ad49c8e85b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | |||
@@ -67,7 +67,7 @@ struct amdgpu_fence_driver { | |||
67 | struct timer_list fallback_timer; | 67 | struct timer_list fallback_timer; |
68 | unsigned num_fences_mask; | 68 | unsigned num_fences_mask; |
69 | spinlock_t lock; | 69 | spinlock_t lock; |
70 | struct fence **fences; | 70 | struct dma_fence **fences; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | 73 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); |
@@ -81,7 +81,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
81 | unsigned irq_type); | 81 | unsigned irq_type); |
82 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | 82 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); |
83 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | 83 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); |
84 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); | 84 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); |
85 | void amdgpu_fence_process(struct amdgpu_ring *ring); | 85 | void amdgpu_fence_process(struct amdgpu_ring *ring); |
86 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | 86 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
87 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | 87 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index d8af37a845f4..fd26c4b8d793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) | |||
147 | } | 147 | } |
148 | list_del_init(&sa_bo->olist); | 148 | list_del_init(&sa_bo->olist); |
149 | list_del_init(&sa_bo->flist); | 149 | list_del_init(&sa_bo->flist); |
150 | fence_put(sa_bo->fence); | 150 | dma_fence_put(sa_bo->fence); |
151 | kfree(sa_bo); | 151 | kfree(sa_bo); |
152 | } | 152 | } |
153 | 153 | ||
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) | |||
161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); | 161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); |
162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { | 162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { |
163 | if (sa_bo->fence == NULL || | 163 | if (sa_bo->fence == NULL || |
164 | !fence_is_signaled(sa_bo->fence)) { | 164 | !dma_fence_is_signaled(sa_bo->fence)) { |
165 | return; | 165 | return; |
166 | } | 166 | } |
167 | amdgpu_sa_bo_remove_locked(sa_bo); | 167 | amdgpu_sa_bo_remove_locked(sa_bo); |
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, | |||
244 | } | 244 | } |
245 | 245 | ||
246 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | 246 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, |
247 | struct fence **fences, | 247 | struct dma_fence **fences, |
248 | unsigned *tries) | 248 | unsigned *tries) |
249 | { | 249 | { |
250 | struct amdgpu_sa_bo *best_bo = NULL; | 250 | struct amdgpu_sa_bo *best_bo = NULL; |
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
272 | sa_bo = list_first_entry(&sa_manager->flist[i], | 272 | sa_bo = list_first_entry(&sa_manager->flist[i], |
273 | struct amdgpu_sa_bo, flist); | 273 | struct amdgpu_sa_bo, flist); |
274 | 274 | ||
275 | if (!fence_is_signaled(sa_bo->fence)) { | 275 | if (!dma_fence_is_signaled(sa_bo->fence)) { |
276 | fences[i] = sa_bo->fence; | 276 | fences[i] = sa_bo->fence; |
277 | continue; | 277 | continue; |
278 | } | 278 | } |
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
314 | struct amdgpu_sa_bo **sa_bo, | 314 | struct amdgpu_sa_bo **sa_bo, |
315 | unsigned size, unsigned align) | 315 | unsigned size, unsigned align) |
316 | { | 316 | { |
317 | struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; | 317 | struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; |
318 | unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; | 318 | unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; |
319 | unsigned count; | 319 | unsigned count; |
320 | int i, r; | 320 | int i, r; |
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
356 | 356 | ||
357 | for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) | 357 | for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) |
358 | if (fences[i]) | 358 | if (fences[i]) |
359 | fences[count++] = fence_get(fences[i]); | 359 | fences[count++] = dma_fence_get(fences[i]); |
360 | 360 | ||
361 | if (count) { | 361 | if (count) { |
362 | spin_unlock(&sa_manager->wq.lock); | 362 | spin_unlock(&sa_manager->wq.lock); |
363 | t = fence_wait_any_timeout(fences, count, false, | 363 | t = dma_fence_wait_any_timeout(fences, count, false, |
364 | MAX_SCHEDULE_TIMEOUT); | 364 | MAX_SCHEDULE_TIMEOUT); |
365 | for (i = 0; i < count; ++i) | 365 | for (i = 0; i < count; ++i) |
366 | fence_put(fences[i]); | 366 | dma_fence_put(fences[i]); |
367 | 367 | ||
368 | r = (t > 0) ? 0 : t; | 368 | r = (t > 0) ? 0 : t; |
369 | spin_lock(&sa_manager->wq.lock); | 369 | spin_lock(&sa_manager->wq.lock); |
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
384 | } | 384 | } |
385 | 385 | ||
386 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | 386 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, |
387 | struct fence *fence) | 387 | struct dma_fence *fence) |
388 | { | 388 | { |
389 | struct amdgpu_sa_manager *sa_manager; | 389 | struct amdgpu_sa_manager *sa_manager; |
390 | 390 | ||
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | |||
394 | 394 | ||
395 | sa_manager = (*sa_bo)->manager; | 395 | sa_manager = (*sa_bo)->manager; |
396 | spin_lock(&sa_manager->wq.lock); | 396 | spin_lock(&sa_manager->wq.lock); |
397 | if (fence && !fence_is_signaled(fence)) { | 397 | if (fence && !dma_fence_is_signaled(fence)) { |
398 | uint32_t idx; | 398 | uint32_t idx; |
399 | 399 | ||
400 | (*sa_bo)->fence = fence_get(fence); | 400 | (*sa_bo)->fence = dma_fence_get(fence); |
401 | idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; | 401 | idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; |
402 | list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); | 402 | list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); |
403 | } else { | 403 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 5c8d3022fb87..ed814e6d0207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | struct amdgpu_sync_entry { | 35 | struct amdgpu_sync_entry { |
36 | struct hlist_node node; | 36 | struct hlist_node node; |
37 | struct fence *fence; | 37 | struct dma_fence *fence; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct kmem_cache *amdgpu_sync_slab; | 40 | static struct kmem_cache *amdgpu_sync_slab; |
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) | |||
60 | * | 60 | * |
61 | * Test if the fence was issued by us. | 61 | * Test if the fence was issued by us. |
62 | */ | 62 | */ |
63 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) | 63 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, |
64 | struct dma_fence *f) | ||
64 | { | 65 | { |
65 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 66 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
66 | 67 | ||
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) | |||
81 | * | 82 | * |
82 | * Extract who originally created the fence. | 83 | * Extract who originally created the fence. |
83 | */ | 84 | */ |
84 | static void *amdgpu_sync_get_owner(struct fence *f) | 85 | static void *amdgpu_sync_get_owner(struct dma_fence *f) |
85 | { | 86 | { |
86 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 87 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
87 | 88 | ||
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f) | |||
99 | * | 100 | * |
100 | * Either keep the existing fence or the new one, depending which one is later. | 101 | * Either keep the existing fence or the new one, depending which one is later. |
101 | */ | 102 | */ |
102 | static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) | 103 | static void amdgpu_sync_keep_later(struct dma_fence **keep, |
104 | struct dma_fence *fence) | ||
103 | { | 105 | { |
104 | if (*keep && fence_is_later(*keep, fence)) | 106 | if (*keep && dma_fence_is_later(*keep, fence)) |
105 | return; | 107 | return; |
106 | 108 | ||
107 | fence_put(*keep); | 109 | dma_fence_put(*keep); |
108 | *keep = fence_get(fence); | 110 | *keep = dma_fence_get(fence); |
109 | } | 111 | } |
110 | 112 | ||
111 | /** | 113 | /** |
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) | |||
117 | * Tries to add the fence to an existing hash entry. Returns true when an entry | 119 | * Tries to add the fence to an existing hash entry. Returns true when an entry |
118 | * was found, false otherwise. | 120 | * was found, false otherwise. |
119 | */ | 121 | */ |
120 | static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) | 122 | static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) |
121 | { | 123 | { |
122 | struct amdgpu_sync_entry *e; | 124 | struct amdgpu_sync_entry *e; |
123 | 125 | ||
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) | |||
139 | * | 141 | * |
140 | */ | 142 | */ |
141 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 143 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
142 | struct fence *f) | 144 | struct dma_fence *f) |
143 | { | 145 | { |
144 | struct amdgpu_sync_entry *e; | 146 | struct amdgpu_sync_entry *e; |
145 | 147 | ||
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
158 | return -ENOMEM; | 160 | return -ENOMEM; |
159 | 161 | ||
160 | hash_add(sync->fences, &e->node, f->context); | 162 | hash_add(sync->fences, &e->node, f->context); |
161 | e->fence = fence_get(f); | 163 | e->fence = dma_fence_get(f); |
162 | return 0; | 164 | return 0; |
163 | } | 165 | } |
164 | 166 | ||
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
177 | void *owner) | 179 | void *owner) |
178 | { | 180 | { |
179 | struct reservation_object_list *flist; | 181 | struct reservation_object_list *flist; |
180 | struct fence *f; | 182 | struct dma_fence *f; |
181 | void *fence_owner; | 183 | void *fence_owner; |
182 | unsigned i; | 184 | unsigned i; |
183 | int r = 0; | 185 | int r = 0; |
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
231 | * Returns the next fence not signaled yet without removing it from the sync | 233 | * Returns the next fence not signaled yet without removing it from the sync |
232 | * object. | 234 | * object. |
233 | */ | 235 | */ |
234 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | 236 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, |
235 | struct amdgpu_ring *ring) | 237 | struct amdgpu_ring *ring) |
236 | { | 238 | { |
237 | struct amdgpu_sync_entry *e; | 239 | struct amdgpu_sync_entry *e; |
238 | struct hlist_node *tmp; | 240 | struct hlist_node *tmp; |
239 | int i; | 241 | int i; |
240 | 242 | ||
241 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 243 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
242 | struct fence *f = e->fence; | 244 | struct dma_fence *f = e->fence; |
243 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
244 | 246 | ||
245 | if (ring && s_fence) { | 247 | if (ring && s_fence) { |
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
247 | * when they are scheduled. | 249 | * when they are scheduled. |
248 | */ | 250 | */ |
249 | if (s_fence->sched == &ring->sched) { | 251 | if (s_fence->sched == &ring->sched) { |
250 | if (fence_is_signaled(&s_fence->scheduled)) | 252 | if (dma_fence_is_signaled(&s_fence->scheduled)) |
251 | continue; | 253 | continue; |
252 | 254 | ||
253 | return &s_fence->scheduled; | 255 | return &s_fence->scheduled; |
254 | } | 256 | } |
255 | } | 257 | } |
256 | 258 | ||
257 | if (fence_is_signaled(f)) { | 259 | if (dma_fence_is_signaled(f)) { |
258 | hash_del(&e->node); | 260 | hash_del(&e->node); |
259 | fence_put(f); | 261 | dma_fence_put(f); |
260 | kmem_cache_free(amdgpu_sync_slab, e); | 262 | kmem_cache_free(amdgpu_sync_slab, e); |
261 | continue; | 263 | continue; |
262 | } | 264 | } |
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
274 | * | 276 | * |
275 | * Get and removes the next fence from the sync object not signaled yet. | 277 | * Get and removes the next fence from the sync object not signaled yet. |
276 | */ | 278 | */ |
277 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | 279 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) |
278 | { | 280 | { |
279 | struct amdgpu_sync_entry *e; | 281 | struct amdgpu_sync_entry *e; |
280 | struct hlist_node *tmp; | 282 | struct hlist_node *tmp; |
281 | struct fence *f; | 283 | struct dma_fence *f; |
282 | int i; | 284 | int i; |
283 | 285 | ||
284 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 286 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | |||
288 | hash_del(&e->node); | 290 | hash_del(&e->node); |
289 | kmem_cache_free(amdgpu_sync_slab, e); | 291 | kmem_cache_free(amdgpu_sync_slab, e); |
290 | 292 | ||
291 | if (!fence_is_signaled(f)) | 293 | if (!dma_fence_is_signaled(f)) |
292 | return f; | 294 | return f; |
293 | 295 | ||
294 | fence_put(f); | 296 | dma_fence_put(f); |
295 | } | 297 | } |
296 | return NULL; | 298 | return NULL; |
297 | } | 299 | } |
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) | |||
311 | 313 | ||
312 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 314 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
313 | hash_del(&e->node); | 315 | hash_del(&e->node); |
314 | fence_put(e->fence); | 316 | dma_fence_put(e->fence); |
315 | kmem_cache_free(amdgpu_sync_slab, e); | 317 | kmem_cache_free(amdgpu_sync_slab, e); |
316 | } | 318 | } |
317 | 319 | ||
318 | fence_put(sync->last_vm_update); | 320 | dma_fence_put(sync->last_vm_update); |
319 | } | 321 | } |
320 | 322 | ||
321 | /** | 323 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 405f379ac186..605be266e07f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/hashtable.h> | 27 | #include <linux/hashtable.h> |
28 | 28 | ||
29 | struct fence; | 29 | struct dma_fence; |
30 | struct reservation_object; | 30 | struct reservation_object; |
31 | struct amdgpu_device; | 31 | struct amdgpu_device; |
32 | struct amdgpu_ring; | 32 | struct amdgpu_ring; |
@@ -36,19 +36,19 @@ struct amdgpu_ring; | |||
36 | */ | 36 | */ |
37 | struct amdgpu_sync { | 37 | struct amdgpu_sync { |
38 | DECLARE_HASHTABLE(fences, 4); | 38 | DECLARE_HASHTABLE(fences, 4); |
39 | struct fence *last_vm_update; | 39 | struct dma_fence *last_vm_update; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | void amdgpu_sync_create(struct amdgpu_sync *sync); | 42 | void amdgpu_sync_create(struct amdgpu_sync *sync); |
43 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 43 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
44 | struct fence *f); | 44 | struct dma_fence *f); |
45 | int amdgpu_sync_resv(struct amdgpu_device *adev, | 45 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
46 | struct amdgpu_sync *sync, | 46 | struct amdgpu_sync *sync, |
47 | struct reservation_object *resv, | 47 | struct reservation_object *resv, |
48 | void *owner); | 48 | void *owner); |
49 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | 49 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, |
50 | struct amdgpu_ring *ring); | 50 | struct amdgpu_ring *ring); |
51 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | 51 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); |
52 | void amdgpu_sync_free(struct amdgpu_sync *sync); | 52 | void amdgpu_sync_free(struct amdgpu_sync *sync); |
53 | int amdgpu_sync_init(void); | 53 | int amdgpu_sync_init(void); |
54 | void amdgpu_sync_fini(void); | 54 | void amdgpu_sync_fini(void); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b827c75e95de..e05a24325eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
78 | void *gtt_map, *vram_map; | 78 | void *gtt_map, *vram_map; |
79 | void **gtt_start, **gtt_end; | 79 | void **gtt_start, **gtt_end; |
80 | void **vram_start, **vram_end; | 80 | void **vram_start, **vram_end; |
81 | struct fence *fence = NULL; | 81 | struct dma_fence *fence = NULL; |
82 | 82 | ||
83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, |
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
118 | goto out_lclean_unpin; | 118 | goto out_lclean_unpin; |
119 | } | 119 | } |
120 | 120 | ||
121 | r = fence_wait(fence, false); | 121 | r = dma_fence_wait(fence, false); |
122 | if (r) { | 122 | if (r) { |
123 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | 123 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); |
124 | goto out_lclean_unpin; | 124 | goto out_lclean_unpin; |
125 | } | 125 | } |
126 | 126 | ||
127 | fence_put(fence); | 127 | dma_fence_put(fence); |
128 | 128 | ||
129 | r = amdgpu_bo_kmap(vram_obj, &vram_map); | 129 | r = amdgpu_bo_kmap(vram_obj, &vram_map); |
130 | if (r) { | 130 | if (r) { |
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
163 | goto out_lclean_unpin; | 163 | goto out_lclean_unpin; |
164 | } | 164 | } |
165 | 165 | ||
166 | r = fence_wait(fence, false); | 166 | r = dma_fence_wait(fence, false); |
167 | if (r) { | 167 | if (r) { |
168 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | 168 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); |
169 | goto out_lclean_unpin; | 169 | goto out_lclean_unpin; |
170 | } | 170 | } |
171 | 171 | ||
172 | fence_put(fence); | 172 | dma_fence_put(fence); |
173 | 173 | ||
174 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | 174 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); |
175 | if (r) { | 175 | if (r) { |
@@ -216,7 +216,7 @@ out_lclean: | |||
216 | amdgpu_bo_unref(>t_obj[i]); | 216 | amdgpu_bo_unref(>t_obj[i]); |
217 | } | 217 | } |
218 | if (fence) | 218 | if (fence) |
219 | fence_put(fence); | 219 | dma_fence_put(fence); |
220 | break; | 220 | break; |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 067e5e683bb3..bb964a8ff938 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, | |||
104 | __field(struct amdgpu_device *, adev) | 104 | __field(struct amdgpu_device *, adev) |
105 | __field(struct amd_sched_job *, sched_job) | 105 | __field(struct amd_sched_job *, sched_job) |
106 | __field(struct amdgpu_ib *, ib) | 106 | __field(struct amdgpu_ib *, ib) |
107 | __field(struct fence *, fence) | 107 | __field(struct dma_fence *, fence) |
108 | __field(char *, ring_name) | 108 | __field(char *, ring_name) |
109 | __field(u32, num_ibs) | 109 | __field(u32, num_ibs) |
110 | ), | 110 | ), |
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job, | |||
129 | __field(struct amdgpu_device *, adev) | 129 | __field(struct amdgpu_device *, adev) |
130 | __field(struct amd_sched_job *, sched_job) | 130 | __field(struct amd_sched_job *, sched_job) |
131 | __field(struct amdgpu_ib *, ib) | 131 | __field(struct amdgpu_ib *, ib) |
132 | __field(struct fence *, fence) | 132 | __field(struct dma_fence *, fence) |
133 | __field(char *, ring_name) | 133 | __field(char *, ring_name) |
134 | __field(u32, num_ibs) | 134 | __field(u32, num_ibs) |
135 | ), | 135 | ), |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f1a206df9823..1821c05484d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -287,7 +287,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
287 | struct drm_mm_node *old_mm, *new_mm; | 287 | struct drm_mm_node *old_mm, *new_mm; |
288 | uint64_t old_start, old_size, new_start, new_size; | 288 | uint64_t old_start, old_size, new_start, new_size; |
289 | unsigned long num_pages; | 289 | unsigned long num_pages; |
290 | struct fence *fence = NULL; | 290 | struct dma_fence *fence = NULL; |
291 | int r; | 291 | int r; |
292 | 292 | ||
293 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | 293 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); |
@@ -313,7 +313,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
313 | num_pages = new_mem->num_pages; | 313 | num_pages = new_mem->num_pages; |
314 | while (num_pages) { | 314 | while (num_pages) { |
315 | unsigned long cur_pages = min(old_size, new_size); | 315 | unsigned long cur_pages = min(old_size, new_size); |
316 | struct fence *next; | 316 | struct dma_fence *next; |
317 | 317 | ||
318 | r = amdgpu_copy_buffer(ring, old_start, new_start, | 318 | r = amdgpu_copy_buffer(ring, old_start, new_start, |
319 | cur_pages * PAGE_SIZE, | 319 | cur_pages * PAGE_SIZE, |
@@ -321,7 +321,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
321 | if (r) | 321 | if (r) |
322 | goto error; | 322 | goto error; |
323 | 323 | ||
324 | fence_put(fence); | 324 | dma_fence_put(fence); |
325 | fence = next; | 325 | fence = next; |
326 | 326 | ||
327 | num_pages -= cur_pages; | 327 | num_pages -= cur_pages; |
@@ -353,13 +353,13 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
353 | } | 353 | } |
354 | 354 | ||
355 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | 355 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
356 | fence_put(fence); | 356 | dma_fence_put(fence); |
357 | return r; | 357 | return r; |
358 | 358 | ||
359 | error: | 359 | error: |
360 | if (fence) | 360 | if (fence) |
361 | fence_wait(fence, false); | 361 | dma_fence_wait(fence, false); |
362 | fence_put(fence); | 362 | dma_fence_put(fence); |
363 | return r; | 363 | return r; |
364 | } | 364 | } |
365 | 365 | ||
@@ -1316,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1316 | uint64_t dst_offset, | 1316 | uint64_t dst_offset, |
1317 | uint32_t byte_count, | 1317 | uint32_t byte_count, |
1318 | struct reservation_object *resv, | 1318 | struct reservation_object *resv, |
1319 | struct fence **fence, bool direct_submit) | 1319 | struct dma_fence **fence, bool direct_submit) |
1320 | { | 1320 | { |
1321 | struct amdgpu_device *adev = ring->adev; | 1321 | struct amdgpu_device *adev = ring->adev; |
1322 | struct amdgpu_job *job; | 1322 | struct amdgpu_job *job; |
@@ -1363,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1363 | if (direct_submit) { | 1363 | if (direct_submit) { |
1364 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, | 1364 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, |
1365 | NULL, NULL, fence); | 1365 | NULL, NULL, fence); |
1366 | job->fence = fence_get(*fence); | 1366 | job->fence = dma_fence_get(*fence); |
1367 | if (r) | 1367 | if (r) |
1368 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 1368 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
1369 | amdgpu_job_free(job); | 1369 | amdgpu_job_free(job); |
@@ -1384,7 +1384,7 @@ error_free: | |||
1384 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 1384 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
1385 | uint32_t src_data, | 1385 | uint32_t src_data, |
1386 | struct reservation_object *resv, | 1386 | struct reservation_object *resv, |
1387 | struct fence **fence) | 1387 | struct dma_fence **fence) |
1388 | { | 1388 | { |
1389 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | 1389 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1390 | struct amdgpu_job *job; | 1390 | struct amdgpu_job *job; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index d1c00c04782f..98ee384f0fca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -78,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
78 | uint64_t dst_offset, | 78 | uint64_t dst_offset, |
79 | uint32_t byte_count, | 79 | uint32_t byte_count, |
80 | struct reservation_object *resv, | 80 | struct reservation_object *resv, |
81 | struct fence **fence, bool direct_submit); | 81 | struct dma_fence **fence, bool direct_submit); |
82 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 82 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
83 | uint32_t src_data, | 83 | uint32_t src_data, |
84 | struct reservation_object *resv, | 84 | struct reservation_object *resv, |
85 | struct fence **fence); | 85 | struct dma_fence **fence); |
86 | 86 | ||
87 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 87 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
88 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); | 88 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 1b54cc218b47..fb270c7e7171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
333 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 333 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
334 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | 334 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); |
335 | if (handle != 0 && adev->uvd.filp[i] == filp) { | 335 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
336 | struct fence *fence; | 336 | struct dma_fence *fence; |
337 | 337 | ||
338 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | 338 | r = amdgpu_uvd_get_destroy_msg(ring, handle, |
339 | false, &fence); | 339 | false, &fence); |
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
342 | continue; | 342 | continue; |
343 | } | 343 | } |
344 | 344 | ||
345 | fence_wait(fence, false); | 345 | dma_fence_wait(fence, false); |
346 | fence_put(fence); | 346 | dma_fence_put(fence); |
347 | 347 | ||
348 | adev->uvd.filp[i] = NULL; | 348 | adev->uvd.filp[i] = NULL; |
349 | atomic_set(&adev->uvd.handles[i], 0); | 349 | atomic_set(&adev->uvd.handles[i], 0); |
@@ -912,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
912 | } | 912 | } |
913 | 913 | ||
914 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | 914 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
915 | bool direct, struct fence **fence) | 915 | bool direct, struct dma_fence **fence) |
916 | { | 916 | { |
917 | struct ttm_validate_buffer tv; | 917 | struct ttm_validate_buffer tv; |
918 | struct ww_acquire_ctx ticket; | 918 | struct ww_acquire_ctx ticket; |
919 | struct list_head head; | 919 | struct list_head head; |
920 | struct amdgpu_job *job; | 920 | struct amdgpu_job *job; |
921 | struct amdgpu_ib *ib; | 921 | struct amdgpu_ib *ib; |
922 | struct fence *f = NULL; | 922 | struct dma_fence *f = NULL; |
923 | struct amdgpu_device *adev = ring->adev; | 923 | struct amdgpu_device *adev = ring->adev; |
924 | uint64_t addr; | 924 | uint64_t addr; |
925 | int i, r; | 925 | int i, r; |
@@ -963,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
963 | 963 | ||
964 | if (direct) { | 964 | if (direct) { |
965 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 965 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
966 | job->fence = fence_get(f); | 966 | job->fence = dma_fence_get(f); |
967 | if (r) | 967 | if (r) |
968 | goto err_free; | 968 | goto err_free; |
969 | 969 | ||
@@ -978,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
978 | ttm_eu_fence_buffer_objects(&ticket, &head, f); | 978 | ttm_eu_fence_buffer_objects(&ticket, &head, f); |
979 | 979 | ||
980 | if (fence) | 980 | if (fence) |
981 | *fence = fence_get(f); | 981 | *fence = dma_fence_get(f); |
982 | amdgpu_bo_unref(&bo); | 982 | amdgpu_bo_unref(&bo); |
983 | fence_put(f); | 983 | dma_fence_put(f); |
984 | 984 | ||
985 | return 0; | 985 | return 0; |
986 | 986 | ||
@@ -996,7 +996,7 @@ err: | |||
996 | crash the vcpu so just try to emmit a dummy create/destroy msg to | 996 | crash the vcpu so just try to emmit a dummy create/destroy msg to |
997 | avoid this */ | 997 | avoid this */ |
998 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 998 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
999 | struct fence **fence) | 999 | struct dma_fence **fence) |
1000 | { | 1000 | { |
1001 | struct amdgpu_device *adev = ring->adev; | 1001 | struct amdgpu_device *adev = ring->adev; |
1002 | struct amdgpu_bo *bo; | 1002 | struct amdgpu_bo *bo; |
@@ -1046,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 1048 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
1049 | bool direct, struct fence **fence) | 1049 | bool direct, struct dma_fence **fence) |
1050 | { | 1050 | { |
1051 | struct amdgpu_device *adev = ring->adev; | 1051 | struct amdgpu_device *adev = ring->adev; |
1052 | struct amdgpu_bo *bo; | 1052 | struct amdgpu_bo *bo; |
@@ -1133,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | |||
1133 | */ | 1133 | */ |
1134 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | 1134 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
1135 | { | 1135 | { |
1136 | struct fence *fence; | 1136 | struct dma_fence *fence; |
1137 | long r; | 1137 | long r; |
1138 | 1138 | ||
1139 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | 1139 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); |
@@ -1148,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1148 | goto error; | 1148 | goto error; |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | r = fence_wait_timeout(fence, false, timeout); | 1151 | r = dma_fence_wait_timeout(fence, false, timeout); |
1152 | if (r == 0) { | 1152 | if (r == 0) { |
1153 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 1153 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
1154 | r = -ETIMEDOUT; | 1154 | r = -ETIMEDOUT; |
@@ -1159,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1159 | r = 0; | 1159 | r = 0; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | fence_put(fence); | 1162 | dma_fence_put(fence); |
1163 | 1163 | ||
1164 | error: | 1164 | error: |
1165 | return r; | 1165 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index c850009602d1..6249ba1bde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); | |||
29 | int amdgpu_uvd_suspend(struct amdgpu_device *adev); | 29 | int amdgpu_uvd_suspend(struct amdgpu_device *adev); |
30 | int amdgpu_uvd_resume(struct amdgpu_device *adev); | 30 | int amdgpu_uvd_resume(struct amdgpu_device *adev); |
31 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 31 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
32 | struct fence **fence); | 32 | struct dma_fence **fence); |
33 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 33 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
34 | bool direct, struct fence **fence); | 34 | bool direct, struct dma_fence **fence); |
35 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, | 35 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, |
36 | struct drm_file *filp); | 36 | struct drm_file *filp); |
37 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); | 37 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 3d6f86cd028f..69b66b9e7f57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -396,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
396 | * Open up a stream for HW test | 396 | * Open up a stream for HW test |
397 | */ | 397 | */ |
398 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 398 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
399 | struct fence **fence) | 399 | struct dma_fence **fence) |
400 | { | 400 | { |
401 | const unsigned ib_size_dw = 1024; | 401 | const unsigned ib_size_dw = 1024; |
402 | struct amdgpu_job *job; | 402 | struct amdgpu_job *job; |
403 | struct amdgpu_ib *ib; | 403 | struct amdgpu_ib *ib; |
404 | struct fence *f = NULL; | 404 | struct dma_fence *f = NULL; |
405 | uint64_t dummy; | 405 | uint64_t dummy; |
406 | int i, r; | 406 | int i, r; |
407 | 407 | ||
@@ -451,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
451 | ib->ptr[i] = 0x0; | 451 | ib->ptr[i] = 0x0; |
452 | 452 | ||
453 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 453 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
454 | job->fence = fence_get(f); | 454 | job->fence = dma_fence_get(f); |
455 | if (r) | 455 | if (r) |
456 | goto err; | 456 | goto err; |
457 | 457 | ||
458 | amdgpu_job_free(job); | 458 | amdgpu_job_free(job); |
459 | if (fence) | 459 | if (fence) |
460 | *fence = fence_get(f); | 460 | *fence = dma_fence_get(f); |
461 | fence_put(f); | 461 | dma_fence_put(f); |
462 | return 0; | 462 | return 0; |
463 | 463 | ||
464 | err: | 464 | err: |
@@ -477,12 +477,12 @@ err: | |||
477 | * Close up a stream for HW test or if userspace failed to do so | 477 | * Close up a stream for HW test or if userspace failed to do so |
478 | */ | 478 | */ |
479 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 479 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
480 | bool direct, struct fence **fence) | 480 | bool direct, struct dma_fence **fence) |
481 | { | 481 | { |
482 | const unsigned ib_size_dw = 1024; | 482 | const unsigned ib_size_dw = 1024; |
483 | struct amdgpu_job *job; | 483 | struct amdgpu_job *job; |
484 | struct amdgpu_ib *ib; | 484 | struct amdgpu_ib *ib; |
485 | struct fence *f = NULL; | 485 | struct dma_fence *f = NULL; |
486 | int i, r; | 486 | int i, r; |
487 | 487 | ||
488 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | 488 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
@@ -514,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
514 | 514 | ||
515 | if (direct) { | 515 | if (direct) { |
516 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 516 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
517 | job->fence = fence_get(f); | 517 | job->fence = dma_fence_get(f); |
518 | if (r) | 518 | if (r) |
519 | goto err; | 519 | goto err; |
520 | 520 | ||
@@ -527,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
527 | } | 527 | } |
528 | 528 | ||
529 | if (fence) | 529 | if (fence) |
530 | *fence = fence_get(f); | 530 | *fence = dma_fence_get(f); |
531 | fence_put(f); | 531 | dma_fence_put(f); |
532 | return 0; | 532 | return 0; |
533 | 533 | ||
534 | err: | 534 | err: |
@@ -965,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
965 | */ | 965 | */ |
966 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | 966 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
967 | { | 967 | { |
968 | struct fence *fence = NULL; | 968 | struct dma_fence *fence = NULL; |
969 | long r; | 969 | long r; |
970 | 970 | ||
971 | /* skip vce ring1/2 ib test for now, since it's not reliable */ | 971 | /* skip vce ring1/2 ib test for now, since it's not reliable */ |
@@ -984,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
984 | goto error; | 984 | goto error; |
985 | } | 985 | } |
986 | 986 | ||
987 | r = fence_wait_timeout(fence, false, timeout); | 987 | r = dma_fence_wait_timeout(fence, false, timeout); |
988 | if (r == 0) { | 988 | if (r == 0) { |
989 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 989 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
990 | r = -ETIMEDOUT; | 990 | r = -ETIMEDOUT; |
@@ -995,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
995 | r = 0; | 995 | r = 0; |
996 | } | 996 | } |
997 | error: | 997 | error: |
998 | fence_put(fence); | 998 | dma_fence_put(fence); |
999 | return r; | 999 | return r; |
1000 | } | 1000 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 44d49b576513..d98041f7508d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | |||
@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); | |||
29 | int amdgpu_vce_suspend(struct amdgpu_device *adev); | 29 | int amdgpu_vce_suspend(struct amdgpu_device *adev); |
30 | int amdgpu_vce_resume(struct amdgpu_device *adev); | 30 | int amdgpu_vce_resume(struct amdgpu_device *adev); |
31 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 31 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
32 | struct fence **fence); | 32 | struct dma_fence **fence); |
33 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 33 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
34 | bool direct, struct fence **fence); | 34 | bool direct, struct dma_fence **fence); |
35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); | 35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); |
36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
37 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 37 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ded57dd538e2..e480263387e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Alex Deucher | 25 | * Alex Deucher |
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include <linux/fence-array.h> | 28 | #include <linux/dma-fence-array.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/amdgpu_drm.h> | 30 | #include <drm/amdgpu_drm.h> |
31 | #include "amdgpu.h" | 31 | #include "amdgpu.h" |
@@ -199,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, | |||
199 | * Allocate an id for the vm, adding fences to the sync obj as necessary. | 199 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
200 | */ | 200 | */ |
201 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 201 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
202 | struct amdgpu_sync *sync, struct fence *fence, | 202 | struct amdgpu_sync *sync, struct dma_fence *fence, |
203 | struct amdgpu_job *job) | 203 | struct amdgpu_job *job) |
204 | { | 204 | { |
205 | struct amdgpu_device *adev = ring->adev; | 205 | struct amdgpu_device *adev = ring->adev; |
206 | uint64_t fence_context = adev->fence_context + ring->idx; | 206 | uint64_t fence_context = adev->fence_context + ring->idx; |
207 | struct fence *updates = sync->last_vm_update; | 207 | struct dma_fence *updates = sync->last_vm_update; |
208 | struct amdgpu_vm_id *id, *idle; | 208 | struct amdgpu_vm_id *id, *idle; |
209 | struct fence **fences; | 209 | struct dma_fence **fences; |
210 | unsigned i; | 210 | unsigned i; |
211 | int r = 0; | 211 | int r = 0; |
212 | 212 | ||
@@ -230,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
230 | if (&idle->list == &adev->vm_manager.ids_lru) { | 230 | if (&idle->list == &adev->vm_manager.ids_lru) { |
231 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; | 231 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
232 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | 232 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; |
233 | struct fence_array *array; | 233 | struct dma_fence_array *array; |
234 | unsigned j; | 234 | unsigned j; |
235 | 235 | ||
236 | for (j = 0; j < i; ++j) | 236 | for (j = 0; j < i; ++j) |
237 | fence_get(fences[j]); | 237 | dma_fence_get(fences[j]); |
238 | 238 | ||
239 | array = fence_array_create(i, fences, fence_context, | 239 | array = dma_fence_array_create(i, fences, fence_context, |
240 | seqno, true); | 240 | seqno, true); |
241 | if (!array) { | 241 | if (!array) { |
242 | for (j = 0; j < i; ++j) | 242 | for (j = 0; j < i; ++j) |
243 | fence_put(fences[j]); | 243 | dma_fence_put(fences[j]); |
244 | kfree(fences); | 244 | kfree(fences); |
245 | r = -ENOMEM; | 245 | r = -ENOMEM; |
246 | goto error; | 246 | goto error; |
@@ -248,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
248 | 248 | ||
249 | 249 | ||
250 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); | 250 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); |
251 | fence_put(&array->base); | 251 | dma_fence_put(&array->base); |
252 | if (r) | 252 | if (r) |
253 | goto error; | 253 | goto error; |
254 | 254 | ||
@@ -262,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
262 | /* Check if we can use a VMID already assigned to this VM */ | 262 | /* Check if we can use a VMID already assigned to this VM */ |
263 | i = ring->idx; | 263 | i = ring->idx; |
264 | do { | 264 | do { |
265 | struct fence *flushed; | 265 | struct dma_fence *flushed; |
266 | 266 | ||
267 | id = vm->ids[i++]; | 267 | id = vm->ids[i++]; |
268 | if (i == AMDGPU_MAX_RINGS) | 268 | if (i == AMDGPU_MAX_RINGS) |
@@ -284,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
284 | continue; | 284 | continue; |
285 | 285 | ||
286 | if (id->last_flush->context != fence_context && | 286 | if (id->last_flush->context != fence_context && |
287 | !fence_is_signaled(id->last_flush)) | 287 | !dma_fence_is_signaled(id->last_flush)) |
288 | continue; | 288 | continue; |
289 | 289 | ||
290 | flushed = id->flushed_updates; | 290 | flushed = id->flushed_updates; |
291 | if (updates && | 291 | if (updates && |
292 | (!flushed || fence_is_later(updates, flushed))) | 292 | (!flushed || dma_fence_is_later(updates, flushed))) |
293 | continue; | 293 | continue; |
294 | 294 | ||
295 | /* Good we can use this VMID. Remember this submission as | 295 | /* Good we can use this VMID. Remember this submission as |
@@ -320,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
320 | if (r) | 320 | if (r) |
321 | goto error; | 321 | goto error; |
322 | 322 | ||
323 | fence_put(id->first); | 323 | dma_fence_put(id->first); |
324 | id->first = fence_get(fence); | 324 | id->first = dma_fence_get(fence); |
325 | 325 | ||
326 | fence_put(id->last_flush); | 326 | dma_fence_put(id->last_flush); |
327 | id->last_flush = NULL; | 327 | id->last_flush = NULL; |
328 | 328 | ||
329 | fence_put(id->flushed_updates); | 329 | dma_fence_put(id->flushed_updates); |
330 | id->flushed_updates = fence_get(updates); | 330 | id->flushed_updates = dma_fence_get(updates); |
331 | 331 | ||
332 | id->pd_gpu_addr = job->vm_pd_addr; | 332 | id->pd_gpu_addr = job->vm_pd_addr; |
333 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); | 333 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); |
@@ -398,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
398 | 398 | ||
399 | if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || | 399 | if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || |
400 | amdgpu_vm_is_gpu_reset(adev, id))) { | 400 | amdgpu_vm_is_gpu_reset(adev, id))) { |
401 | struct fence *fence; | 401 | struct dma_fence *fence; |
402 | 402 | ||
403 | trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); | 403 | trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); |
404 | amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); | 404 | amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); |
@@ -408,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
408 | return r; | 408 | return r; |
409 | 409 | ||
410 | mutex_lock(&adev->vm_manager.lock); | 410 | mutex_lock(&adev->vm_manager.lock); |
411 | fence_put(id->last_flush); | 411 | dma_fence_put(id->last_flush); |
412 | id->last_flush = fence; | 412 | id->last_flush = fence; |
413 | mutex_unlock(&adev->vm_manager.lock); | 413 | mutex_unlock(&adev->vm_manager.lock); |
414 | } | 414 | } |
@@ -542,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
542 | struct amdgpu_bo *bo) | 542 | struct amdgpu_bo *bo) |
543 | { | 543 | { |
544 | struct amdgpu_ring *ring; | 544 | struct amdgpu_ring *ring; |
545 | struct fence *fence = NULL; | 545 | struct dma_fence *fence = NULL; |
546 | struct amdgpu_job *job; | 546 | struct amdgpu_job *job; |
547 | struct amdgpu_pte_update_params params; | 547 | struct amdgpu_pte_update_params params; |
548 | unsigned entries; | 548 | unsigned entries; |
@@ -583,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
583 | goto error_free; | 583 | goto error_free; |
584 | 584 | ||
585 | amdgpu_bo_fence(bo, fence, true); | 585 | amdgpu_bo_fence(bo, fence, true); |
586 | fence_put(fence); | 586 | dma_fence_put(fence); |
587 | return 0; | 587 | return 0; |
588 | 588 | ||
589 | error_free: | 589 | error_free: |
@@ -640,7 +640,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
640 | unsigned count = 0, pt_idx, ndw; | 640 | unsigned count = 0, pt_idx, ndw; |
641 | struct amdgpu_job *job; | 641 | struct amdgpu_job *job; |
642 | struct amdgpu_pte_update_params params; | 642 | struct amdgpu_pte_update_params params; |
643 | struct fence *fence = NULL; | 643 | struct dma_fence *fence = NULL; |
644 | 644 | ||
645 | int r; | 645 | int r; |
646 | 646 | ||
@@ -750,9 +750,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
750 | goto error_free; | 750 | goto error_free; |
751 | 751 | ||
752 | amdgpu_bo_fence(vm->page_directory, fence, true); | 752 | amdgpu_bo_fence(vm->page_directory, fence, true); |
753 | fence_put(vm->page_directory_fence); | 753 | dma_fence_put(vm->page_directory_fence); |
754 | vm->page_directory_fence = fence_get(fence); | 754 | vm->page_directory_fence = dma_fence_get(fence); |
755 | fence_put(fence); | 755 | dma_fence_put(fence); |
756 | 756 | ||
757 | return 0; | 757 | return 0; |
758 | 758 | ||
@@ -938,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, | |||
938 | * Returns 0 for success, -EINVAL for failure. | 938 | * Returns 0 for success, -EINVAL for failure. |
939 | */ | 939 | */ |
940 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | 940 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, |
941 | struct fence *exclusive, | 941 | struct dma_fence *exclusive, |
942 | uint64_t src, | 942 | uint64_t src, |
943 | dma_addr_t *pages_addr, | 943 | dma_addr_t *pages_addr, |
944 | struct amdgpu_vm *vm, | 944 | struct amdgpu_vm *vm, |
945 | uint64_t start, uint64_t last, | 945 | uint64_t start, uint64_t last, |
946 | uint32_t flags, uint64_t addr, | 946 | uint32_t flags, uint64_t addr, |
947 | struct fence **fence) | 947 | struct dma_fence **fence) |
948 | { | 948 | { |
949 | struct amdgpu_ring *ring; | 949 | struct amdgpu_ring *ring; |
950 | void *owner = AMDGPU_FENCE_OWNER_VM; | 950 | void *owner = AMDGPU_FENCE_OWNER_VM; |
951 | unsigned nptes, ncmds, ndw; | 951 | unsigned nptes, ncmds, ndw; |
952 | struct amdgpu_job *job; | 952 | struct amdgpu_job *job; |
953 | struct amdgpu_pte_update_params params; | 953 | struct amdgpu_pte_update_params params; |
954 | struct fence *f = NULL; | 954 | struct dma_fence *f = NULL; |
955 | int r; | 955 | int r; |
956 | 956 | ||
957 | memset(¶ms, 0, sizeof(params)); | 957 | memset(¶ms, 0, sizeof(params)); |
@@ -1054,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1054 | 1054 | ||
1055 | amdgpu_bo_fence(vm->page_directory, f, true); | 1055 | amdgpu_bo_fence(vm->page_directory, f, true); |
1056 | if (fence) { | 1056 | if (fence) { |
1057 | fence_put(*fence); | 1057 | dma_fence_put(*fence); |
1058 | *fence = fence_get(f); | 1058 | *fence = dma_fence_get(f); |
1059 | } | 1059 | } |
1060 | fence_put(f); | 1060 | dma_fence_put(f); |
1061 | return 0; | 1061 | return 0; |
1062 | 1062 | ||
1063 | error_free: | 1063 | error_free: |
@@ -1083,14 +1083,14 @@ error_free: | |||
1083 | * Returns 0 for success, -EINVAL for failure. | 1083 | * Returns 0 for success, -EINVAL for failure. |
1084 | */ | 1084 | */ |
1085 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | 1085 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, |
1086 | struct fence *exclusive, | 1086 | struct dma_fence *exclusive, |
1087 | uint32_t gtt_flags, | 1087 | uint32_t gtt_flags, |
1088 | dma_addr_t *pages_addr, | 1088 | dma_addr_t *pages_addr, |
1089 | struct amdgpu_vm *vm, | 1089 | struct amdgpu_vm *vm, |
1090 | struct amdgpu_bo_va_mapping *mapping, | 1090 | struct amdgpu_bo_va_mapping *mapping, |
1091 | uint32_t flags, | 1091 | uint32_t flags, |
1092 | struct drm_mm_node *nodes, | 1092 | struct drm_mm_node *nodes, |
1093 | struct fence **fence) | 1093 | struct dma_fence **fence) |
1094 | { | 1094 | { |
1095 | uint64_t pfn, src = 0, start = mapping->it.start; | 1095 | uint64_t pfn, src = 0, start = mapping->it.start; |
1096 | int r; | 1096 | int r; |
@@ -1178,7 +1178,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1178 | uint32_t gtt_flags, flags; | 1178 | uint32_t gtt_flags, flags; |
1179 | struct ttm_mem_reg *mem; | 1179 | struct ttm_mem_reg *mem; |
1180 | struct drm_mm_node *nodes; | 1180 | struct drm_mm_node *nodes; |
1181 | struct fence *exclusive; | 1181 | struct dma_fence *exclusive; |
1182 | int r; | 1182 | int r; |
1183 | 1183 | ||
1184 | if (clear) { | 1184 | if (clear) { |
@@ -1562,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1562 | kfree(mapping); | 1562 | kfree(mapping); |
1563 | } | 1563 | } |
1564 | 1564 | ||
1565 | fence_put(bo_va->last_pt_update); | 1565 | dma_fence_put(bo_va->last_pt_update); |
1566 | kfree(bo_va); | 1566 | kfree(bo_va); |
1567 | } | 1567 | } |
1568 | 1568 | ||
@@ -1725,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1725 | 1725 | ||
1726 | amdgpu_bo_unref(&vm->page_directory->shadow); | 1726 | amdgpu_bo_unref(&vm->page_directory->shadow); |
1727 | amdgpu_bo_unref(&vm->page_directory); | 1727 | amdgpu_bo_unref(&vm->page_directory); |
1728 | fence_put(vm->page_directory_fence); | 1728 | dma_fence_put(vm->page_directory_fence); |
1729 | } | 1729 | } |
1730 | 1730 | ||
1731 | /** | 1731 | /** |
@@ -1749,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |||
1749 | &adev->vm_manager.ids_lru); | 1749 | &adev->vm_manager.ids_lru); |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); | 1752 | adev->vm_manager.fence_context = |
1753 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | ||
1753 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 1754 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
1754 | adev->vm_manager.seqno[i] = 0; | 1755 | adev->vm_manager.seqno[i] = 0; |
1755 | 1756 | ||
@@ -1771,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |||
1771 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { | 1772 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { |
1772 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; | 1773 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; |
1773 | 1774 | ||
1774 | fence_put(adev->vm_manager.ids[i].first); | 1775 | dma_fence_put(adev->vm_manager.ids[i].first); |
1775 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); | 1776 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); |
1776 | fence_put(id->flushed_updates); | 1777 | dma_fence_put(id->flushed_updates); |
1777 | } | 1778 | } |
1778 | } | 1779 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 42a629b56095..adbc2f5e5c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -94,7 +94,7 @@ struct amdgpu_vm { | |||
94 | /* contains the page directory */ | 94 | /* contains the page directory */ |
95 | struct amdgpu_bo *page_directory; | 95 | struct amdgpu_bo *page_directory; |
96 | unsigned max_pde_used; | 96 | unsigned max_pde_used; |
97 | struct fence *page_directory_fence; | 97 | struct dma_fence *page_directory_fence; |
98 | uint64_t last_eviction_counter; | 98 | uint64_t last_eviction_counter; |
99 | 99 | ||
100 | /* array of page tables, one for each page directory entry */ | 100 | /* array of page tables, one for each page directory entry */ |
@@ -115,14 +115,14 @@ struct amdgpu_vm { | |||
115 | 115 | ||
116 | struct amdgpu_vm_id { | 116 | struct amdgpu_vm_id { |
117 | struct list_head list; | 117 | struct list_head list; |
118 | struct fence *first; | 118 | struct dma_fence *first; |
119 | struct amdgpu_sync active; | 119 | struct amdgpu_sync active; |
120 | struct fence *last_flush; | 120 | struct dma_fence *last_flush; |
121 | atomic64_t owner; | 121 | atomic64_t owner; |
122 | 122 | ||
123 | uint64_t pd_gpu_addr; | 123 | uint64_t pd_gpu_addr; |
124 | /* last flushed PD/PT update */ | 124 | /* last flushed PD/PT update */ |
125 | struct fence *flushed_updates; | 125 | struct dma_fence *flushed_updates; |
126 | 126 | ||
127 | uint32_t current_gpu_reset_count; | 127 | uint32_t current_gpu_reset_count; |
128 | 128 | ||
@@ -172,7 +172,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | 172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, |
173 | struct amdgpu_vm *vm); | 173 | struct amdgpu_vm *vm); |
174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
175 | struct amdgpu_sync *sync, struct fence *fence, | 175 | struct amdgpu_sync *sync, struct dma_fence *fence, |
176 | struct amdgpu_job *job); | 176 | struct amdgpu_job *job); |
177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | 177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); |
178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | 178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c7340b6e17c9..4c34dbc7a254 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
622 | { | 622 | { |
623 | struct amdgpu_device *adev = ring->adev; | 623 | struct amdgpu_device *adev = ring->adev; |
624 | struct amdgpu_ib ib; | 624 | struct amdgpu_ib ib; |
625 | struct fence *f = NULL; | 625 | struct dma_fence *f = NULL; |
626 | unsigned index; | 626 | unsigned index; |
627 | u32 tmp = 0; | 627 | u32 tmp = 0; |
628 | u64 gpu_addr; | 628 | u64 gpu_addr; |
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
655 | if (r) | 655 | if (r) |
656 | goto err1; | 656 | goto err1; |
657 | 657 | ||
658 | r = fence_wait_timeout(f, false, timeout); | 658 | r = dma_fence_wait_timeout(f, false, timeout); |
659 | if (r == 0) { | 659 | if (r == 0) { |
660 | DRM_ERROR("amdgpu: IB test timed out\n"); | 660 | DRM_ERROR("amdgpu: IB test timed out\n"); |
661 | r = -ETIMEDOUT; | 661 | r = -ETIMEDOUT; |
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
675 | 675 | ||
676 | err1: | 676 | err1: |
677 | amdgpu_ib_free(adev, &ib, NULL); | 677 | amdgpu_ib_free(adev, &ib, NULL); |
678 | fence_put(f); | 678 | dma_fence_put(f); |
679 | err0: | 679 | err0: |
680 | amdgpu_wb_free(adev, index); | 680 | amdgpu_wb_free(adev, index); |
681 | return r; | 681 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 96dd05dca694..21c086e02e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1522 | { | 1522 | { |
1523 | struct amdgpu_device *adev = ring->adev; | 1523 | struct amdgpu_device *adev = ring->adev; |
1524 | struct amdgpu_ib ib; | 1524 | struct amdgpu_ib ib; |
1525 | struct fence *f = NULL; | 1525 | struct dma_fence *f = NULL; |
1526 | uint32_t scratch; | 1526 | uint32_t scratch; |
1527 | uint32_t tmp = 0; | 1527 | uint32_t tmp = 0; |
1528 | long r; | 1528 | long r; |
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1548 | if (r) | 1548 | if (r) |
1549 | goto err2; | 1549 | goto err2; |
1550 | 1550 | ||
1551 | r = fence_wait_timeout(f, false, timeout); | 1551 | r = dma_fence_wait_timeout(f, false, timeout); |
1552 | if (r == 0) { | 1552 | if (r == 0) { |
1553 | DRM_ERROR("amdgpu: IB test timed out\n"); | 1553 | DRM_ERROR("amdgpu: IB test timed out\n"); |
1554 | r = -ETIMEDOUT; | 1554 | r = -ETIMEDOUT; |
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1569 | 1569 | ||
1570 | err2: | 1570 | err2: |
1571 | amdgpu_ib_free(adev, &ib, NULL); | 1571 | amdgpu_ib_free(adev, &ib, NULL); |
1572 | fence_put(f); | 1572 | dma_fence_put(f); |
1573 | err1: | 1573 | err1: |
1574 | amdgpu_gfx_scratch_free(adev, scratch); | 1574 | amdgpu_gfx_scratch_free(adev, scratch); |
1575 | return r; | 1575 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 903aa240e946..5b631fd1a879 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2286 | { | 2286 | { |
2287 | struct amdgpu_device *adev = ring->adev; | 2287 | struct amdgpu_device *adev = ring->adev; |
2288 | struct amdgpu_ib ib; | 2288 | struct amdgpu_ib ib; |
2289 | struct fence *f = NULL; | 2289 | struct dma_fence *f = NULL; |
2290 | uint32_t scratch; | 2290 | uint32_t scratch; |
2291 | uint32_t tmp = 0; | 2291 | uint32_t tmp = 0; |
2292 | long r; | 2292 | long r; |
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2312 | if (r) | 2312 | if (r) |
2313 | goto err2; | 2313 | goto err2; |
2314 | 2314 | ||
2315 | r = fence_wait_timeout(f, false, timeout); | 2315 | r = dma_fence_wait_timeout(f, false, timeout); |
2316 | if (r == 0) { | 2316 | if (r == 0) { |
2317 | DRM_ERROR("amdgpu: IB test timed out\n"); | 2317 | DRM_ERROR("amdgpu: IB test timed out\n"); |
2318 | r = -ETIMEDOUT; | 2318 | r = -ETIMEDOUT; |
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2333 | 2333 | ||
2334 | err2: | 2334 | err2: |
2335 | amdgpu_ib_free(adev, &ib, NULL); | 2335 | amdgpu_ib_free(adev, &ib, NULL); |
2336 | fence_put(f); | 2336 | dma_fence_put(f); |
2337 | err1: | 2337 | err1: |
2338 | amdgpu_gfx_scratch_free(adev, scratch); | 2338 | amdgpu_gfx_scratch_free(adev, scratch); |
2339 | return r; | 2339 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1c2544f314c0..86a7ca5d8511 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
798 | { | 798 | { |
799 | struct amdgpu_device *adev = ring->adev; | 799 | struct amdgpu_device *adev = ring->adev; |
800 | struct amdgpu_ib ib; | 800 | struct amdgpu_ib ib; |
801 | struct fence *f = NULL; | 801 | struct dma_fence *f = NULL; |
802 | uint32_t scratch; | 802 | uint32_t scratch; |
803 | uint32_t tmp = 0; | 803 | uint32_t tmp = 0; |
804 | long r; | 804 | long r; |
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
824 | if (r) | 824 | if (r) |
825 | goto err2; | 825 | goto err2; |
826 | 826 | ||
827 | r = fence_wait_timeout(f, false, timeout); | 827 | r = dma_fence_wait_timeout(f, false, timeout); |
828 | if (r == 0) { | 828 | if (r == 0) { |
829 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 829 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
830 | r = -ETIMEDOUT; | 830 | r = -ETIMEDOUT; |
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
844 | } | 844 | } |
845 | err2: | 845 | err2: |
846 | amdgpu_ib_free(adev, &ib, NULL); | 846 | amdgpu_ib_free(adev, &ib, NULL); |
847 | fence_put(f); | 847 | dma_fence_put(f); |
848 | err1: | 848 | err1: |
849 | amdgpu_gfx_scratch_free(adev, scratch); | 849 | amdgpu_gfx_scratch_free(adev, scratch); |
850 | return r; | 850 | return r; |
@@ -1564,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1564 | { | 1564 | { |
1565 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; | 1565 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; |
1566 | struct amdgpu_ib ib; | 1566 | struct amdgpu_ib ib; |
1567 | struct fence *f = NULL; | 1567 | struct dma_fence *f = NULL; |
1568 | int r, i; | 1568 | int r, i; |
1569 | u32 tmp; | 1569 | u32 tmp; |
1570 | unsigned total_size, vgpr_offset, sgpr_offset; | 1570 | unsigned total_size, vgpr_offset, sgpr_offset; |
@@ -1697,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | /* wait for the GPU to finish processing the IB */ | 1699 | /* wait for the GPU to finish processing the IB */ |
1700 | r = fence_wait(f, false); | 1700 | r = dma_fence_wait(f, false); |
1701 | if (r) { | 1701 | if (r) { |
1702 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | 1702 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); |
1703 | goto fail; | 1703 | goto fail; |
@@ -1718,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1718 | 1718 | ||
1719 | fail: | 1719 | fail: |
1720 | amdgpu_ib_free(adev, &ib, NULL); | 1720 | amdgpu_ib_free(adev, &ib, NULL); |
1721 | fence_put(f); | 1721 | dma_fence_put(f); |
1722 | 1722 | ||
1723 | return r; | 1723 | return r; |
1724 | } | 1724 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 03e8856b08ce..e81aa4682760 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
668 | { | 668 | { |
669 | struct amdgpu_device *adev = ring->adev; | 669 | struct amdgpu_device *adev = ring->adev; |
670 | struct amdgpu_ib ib; | 670 | struct amdgpu_ib ib; |
671 | struct fence *f = NULL; | 671 | struct dma_fence *f = NULL; |
672 | unsigned index; | 672 | unsigned index; |
673 | u32 tmp = 0; | 673 | u32 tmp = 0; |
674 | u64 gpu_addr; | 674 | u64 gpu_addr; |
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
705 | if (r) | 705 | if (r) |
706 | goto err1; | 706 | goto err1; |
707 | 707 | ||
708 | r = fence_wait_timeout(f, false, timeout); | 708 | r = dma_fence_wait_timeout(f, false, timeout); |
709 | if (r == 0) { | 709 | if (r == 0) { |
710 | DRM_ERROR("amdgpu: IB test timed out\n"); | 710 | DRM_ERROR("amdgpu: IB test timed out\n"); |
711 | r = -ETIMEDOUT; | 711 | r = -ETIMEDOUT; |
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
725 | 725 | ||
726 | err1: | 726 | err1: |
727 | amdgpu_ib_free(adev, &ib, NULL); | 727 | amdgpu_ib_free(adev, &ib, NULL); |
728 | fence_put(f); | 728 | dma_fence_put(f); |
729 | err0: | 729 | err0: |
730 | amdgpu_wb_free(adev, index); | 730 | amdgpu_wb_free(adev, index); |
731 | return r; | 731 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6172d01e985a..77f146587c60 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
871 | { | 871 | { |
872 | struct amdgpu_device *adev = ring->adev; | 872 | struct amdgpu_device *adev = ring->adev; |
873 | struct amdgpu_ib ib; | 873 | struct amdgpu_ib ib; |
874 | struct fence *f = NULL; | 874 | struct dma_fence *f = NULL; |
875 | unsigned index; | 875 | unsigned index; |
876 | u32 tmp = 0; | 876 | u32 tmp = 0; |
877 | u64 gpu_addr; | 877 | u64 gpu_addr; |
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
908 | if (r) | 908 | if (r) |
909 | goto err1; | 909 | goto err1; |
910 | 910 | ||
911 | r = fence_wait_timeout(f, false, timeout); | 911 | r = dma_fence_wait_timeout(f, false, timeout); |
912 | if (r == 0) { | 912 | if (r == 0) { |
913 | DRM_ERROR("amdgpu: IB test timed out\n"); | 913 | DRM_ERROR("amdgpu: IB test timed out\n"); |
914 | r = -ETIMEDOUT; | 914 | r = -ETIMEDOUT; |
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
927 | } | 927 | } |
928 | err1: | 928 | err1: |
929 | amdgpu_ib_free(adev, &ib, NULL); | 929 | amdgpu_ib_free(adev, &ib, NULL); |
930 | fence_put(f); | 930 | dma_fence_put(f); |
931 | err0: | 931 | err0: |
932 | amdgpu_wb_free(adev, index); | 932 | amdgpu_wb_free(adev, index); |
933 | return r; | 933 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 14265c5c349e..3dd552ae0b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c | |||
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
274 | { | 274 | { |
275 | struct amdgpu_device *adev = ring->adev; | 275 | struct amdgpu_device *adev = ring->adev; |
276 | struct amdgpu_ib ib; | 276 | struct amdgpu_ib ib; |
277 | struct fence *f = NULL; | 277 | struct dma_fence *f = NULL; |
278 | unsigned index; | 278 | unsigned index; |
279 | u32 tmp = 0; | 279 | u32 tmp = 0; |
280 | u64 gpu_addr; | 280 | u64 gpu_addr; |
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
305 | if (r) | 305 | if (r) |
306 | goto err1; | 306 | goto err1; |
307 | 307 | ||
308 | r = fence_wait_timeout(f, false, timeout); | 308 | r = dma_fence_wait_timeout(f, false, timeout); |
309 | if (r == 0) { | 309 | if (r == 0) { |
310 | DRM_ERROR("amdgpu: IB test timed out\n"); | 310 | DRM_ERROR("amdgpu: IB test timed out\n"); |
311 | r = -ETIMEDOUT; | 311 | r = -ETIMEDOUT; |
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
325 | 325 | ||
326 | err1: | 326 | err1: |
327 | amdgpu_ib_free(adev, &ib, NULL); | 327 | amdgpu_ib_free(adev, &ib, NULL); |
328 | fence_put(f); | 328 | dma_fence_put(f); |
329 | err0: | 329 | err0: |
330 | amdgpu_wb_free(adev, index); | 330 | amdgpu_wb_free(adev, index); |
331 | return r; | 331 | return r; |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index b961a1c6caf3..dbd4fd3a810b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | |||
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job, | |||
17 | TP_STRUCT__entry( | 17 | TP_STRUCT__entry( |
18 | __field(struct amd_sched_entity *, entity) | 18 | __field(struct amd_sched_entity *, entity) |
19 | __field(struct amd_sched_job *, sched_job) | 19 | __field(struct amd_sched_job *, sched_job) |
20 | __field(struct fence *, fence) | 20 | __field(struct dma_fence *, fence) |
21 | __field(const char *, name) | 21 | __field(const char *, name) |
22 | __field(u32, job_count) | 22 | __field(u32, job_count) |
23 | __field(int, hw_job_count) | 23 | __field(int, hw_job_count) |
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job, | |||
42 | TP_PROTO(struct amd_sched_fence *fence), | 42 | TP_PROTO(struct amd_sched_fence *fence), |
43 | TP_ARGS(fence), | 43 | TP_ARGS(fence), |
44 | TP_STRUCT__entry( | 44 | TP_STRUCT__entry( |
45 | __field(struct fence *, fence) | 45 | __field(struct dma_fence *, fence) |
46 | ), | 46 | ), |
47 | 47 | ||
48 | TP_fast_assign( | 48 | TP_fast_assign( |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..5364e6a7ec8f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); | 35 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); |
36 | 36 | ||
37 | struct kmem_cache *sched_fence_slab; | 37 | struct kmem_cache *sched_fence_slab; |
38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | 38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); |
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
141 | return r; | 141 | return r; |
142 | 142 | ||
143 | atomic_set(&entity->fence_seq, 0); | 143 | atomic_set(&entity->fence_seq, 0); |
144 | entity->fence_context = fence_context_alloc(2); | 144 | entity->fence_context = dma_fence_context_alloc(2); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
221 | kfifo_free(&entity->job_queue); | 221 | kfifo_free(&entity->job_queue); |
222 | } | 222 | } |
223 | 223 | ||
224 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) | 224 | static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) |
225 | { | 225 | { |
226 | struct amd_sched_entity *entity = | 226 | struct amd_sched_entity *entity = |
227 | container_of(cb, struct amd_sched_entity, cb); | 227 | container_of(cb, struct amd_sched_entity, cb); |
228 | entity->dependency = NULL; | 228 | entity->dependency = NULL; |
229 | fence_put(f); | 229 | dma_fence_put(f); |
230 | amd_sched_wakeup(entity->sched); | 230 | amd_sched_wakeup(entity->sched); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) | 233 | static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) |
234 | { | 234 | { |
235 | struct amd_sched_entity *entity = | 235 | struct amd_sched_entity *entity = |
236 | container_of(cb, struct amd_sched_entity, cb); | 236 | container_of(cb, struct amd_sched_entity, cb); |
237 | entity->dependency = NULL; | 237 | entity->dependency = NULL; |
238 | fence_put(f); | 238 | dma_fence_put(f); |
239 | } | 239 | } |
240 | 240 | ||
241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | 241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
242 | { | 242 | { |
243 | struct amd_gpu_scheduler *sched = entity->sched; | 243 | struct amd_gpu_scheduler *sched = entity->sched; |
244 | struct fence * fence = entity->dependency; | 244 | struct dma_fence * fence = entity->dependency; |
245 | struct amd_sched_fence *s_fence; | 245 | struct amd_sched_fence *s_fence; |
246 | 246 | ||
247 | if (fence->context == entity->fence_context) { | 247 | if (fence->context == entity->fence_context) { |
248 | /* We can ignore fences from ourself */ | 248 | /* We can ignore fences from ourself */ |
249 | fence_put(entity->dependency); | 249 | dma_fence_put(entity->dependency); |
250 | return false; | 250 | return false; |
251 | } | 251 | } |
252 | 252 | ||
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | |||
257 | * Fence is from the same scheduler, only need to wait for | 257 | * Fence is from the same scheduler, only need to wait for |
258 | * it to be scheduled | 258 | * it to be scheduled |
259 | */ | 259 | */ |
260 | fence = fence_get(&s_fence->scheduled); | 260 | fence = dma_fence_get(&s_fence->scheduled); |
261 | fence_put(entity->dependency); | 261 | dma_fence_put(entity->dependency); |
262 | entity->dependency = fence; | 262 | entity->dependency = fence; |
263 | if (!fence_add_callback(fence, &entity->cb, | 263 | if (!dma_fence_add_callback(fence, &entity->cb, |
264 | amd_sched_entity_clear_dep)) | 264 | amd_sched_entity_clear_dep)) |
265 | return true; | 265 | return true; |
266 | 266 | ||
267 | /* Ignore it when it is already scheduled */ | 267 | /* Ignore it when it is already scheduled */ |
268 | fence_put(fence); | 268 | dma_fence_put(fence); |
269 | return false; | 269 | return false; |
270 | } | 270 | } |
271 | 271 | ||
272 | if (!fence_add_callback(entity->dependency, &entity->cb, | 272 | if (!dma_fence_add_callback(entity->dependency, &entity->cb, |
273 | amd_sched_entity_wakeup)) | 273 | amd_sched_entity_wakeup)) |
274 | return true; | 274 | return true; |
275 | 275 | ||
276 | fence_put(entity->dependency); | 276 | dma_fence_put(entity->dependency); |
277 | return false; | 277 | return false; |
278 | } | 278 | } |
279 | 279 | ||
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work) | |||
354 | sched->ops->free_job(s_job); | 354 | sched->ops->free_job(s_job); |
355 | } | 355 | } |
356 | 356 | ||
357 | static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) | 357 | static void amd_sched_job_finish_cb(struct dma_fence *f, |
358 | struct dma_fence_cb *cb) | ||
358 | { | 359 | { |
359 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, | 360 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, |
360 | finish_cb); | 361 | finish_cb); |
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) | |||
388 | 389 | ||
389 | spin_lock(&sched->job_list_lock); | 390 | spin_lock(&sched->job_list_lock); |
390 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | 391 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { |
391 | if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { | 392 | if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { |
392 | fence_put(s_job->s_fence->parent); | 393 | dma_fence_put(s_job->s_fence->parent); |
393 | s_job->s_fence->parent = NULL; | 394 | s_job->s_fence->parent = NULL; |
394 | } | 395 | } |
395 | } | 396 | } |
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) | |||
410 | 411 | ||
411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 412 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
412 | struct amd_sched_fence *s_fence = s_job->s_fence; | 413 | struct amd_sched_fence *s_fence = s_job->s_fence; |
413 | struct fence *fence; | 414 | struct dma_fence *fence; |
414 | 415 | ||
415 | spin_unlock(&sched->job_list_lock); | 416 | spin_unlock(&sched->job_list_lock); |
416 | fence = sched->ops->run_job(s_job); | 417 | fence = sched->ops->run_job(s_job); |
417 | atomic_inc(&sched->hw_rq_count); | 418 | atomic_inc(&sched->hw_rq_count); |
418 | if (fence) { | 419 | if (fence) { |
419 | s_fence->parent = fence_get(fence); | 420 | s_fence->parent = dma_fence_get(fence); |
420 | r = fence_add_callback(fence, &s_fence->cb, | 421 | r = dma_fence_add_callback(fence, &s_fence->cb, |
421 | amd_sched_process_job); | 422 | amd_sched_process_job); |
422 | if (r == -ENOENT) | 423 | if (r == -ENOENT) |
423 | amd_sched_process_job(fence, &s_fence->cb); | 424 | amd_sched_process_job(fence, &s_fence->cb); |
424 | else if (r) | 425 | else if (r) |
425 | DRM_ERROR("fence add callback failed (%d)\n", | 426 | DRM_ERROR("fence add callback failed (%d)\n", |
426 | r); | 427 | r); |
427 | fence_put(fence); | 428 | dma_fence_put(fence); |
428 | } else { | 429 | } else { |
429 | DRM_ERROR("Failed to run job!\n"); | 430 | DRM_ERROR("Failed to run job!\n"); |
430 | amd_sched_process_job(NULL, &s_fence->cb); | 431 | amd_sched_process_job(NULL, &s_fence->cb); |
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |||
446 | struct amd_sched_entity *entity = sched_job->s_entity; | 447 | struct amd_sched_entity *entity = sched_job->s_entity; |
447 | 448 | ||
448 | trace_amd_sched_job(sched_job); | 449 | trace_amd_sched_job(sched_job); |
449 | fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, | 450 | dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
450 | amd_sched_job_finish_cb); | 451 | amd_sched_job_finish_cb); |
451 | wait_event(entity->sched->job_scheduled, | 452 | wait_event(entity->sched->job_scheduled, |
452 | amd_sched_entity_in(sched_job)); | 453 | amd_sched_entity_in(sched_job)); |
453 | } | 454 | } |
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched) | |||
511 | return entity; | 512 | return entity; |
512 | } | 513 | } |
513 | 514 | ||
514 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | 515 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) |
515 | { | 516 | { |
516 | struct amd_sched_fence *s_fence = | 517 | struct amd_sched_fence *s_fence = |
517 | container_of(cb, struct amd_sched_fence, cb); | 518 | container_of(cb, struct amd_sched_fence, cb); |
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | |||
521 | amd_sched_fence_finished(s_fence); | 522 | amd_sched_fence_finished(s_fence); |
522 | 523 | ||
523 | trace_amd_sched_process_job(s_fence); | 524 | trace_amd_sched_process_job(s_fence); |
524 | fence_put(&s_fence->finished); | 525 | dma_fence_put(&s_fence->finished); |
525 | wake_up_interruptible(&sched->wake_up_worker); | 526 | wake_up_interruptible(&sched->wake_up_worker); |
526 | } | 527 | } |
527 | 528 | ||
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param) | |||
547 | struct amd_sched_entity *entity = NULL; | 548 | struct amd_sched_entity *entity = NULL; |
548 | struct amd_sched_fence *s_fence; | 549 | struct amd_sched_fence *s_fence; |
549 | struct amd_sched_job *sched_job; | 550 | struct amd_sched_job *sched_job; |
550 | struct fence *fence; | 551 | struct dma_fence *fence; |
551 | 552 | ||
552 | wait_event_interruptible(sched->wake_up_worker, | 553 | wait_event_interruptible(sched->wake_up_worker, |
553 | (!amd_sched_blocked(sched) && | 554 | (!amd_sched_blocked(sched) && |
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param) | |||
569 | fence = sched->ops->run_job(sched_job); | 570 | fence = sched->ops->run_job(sched_job); |
570 | amd_sched_fence_scheduled(s_fence); | 571 | amd_sched_fence_scheduled(s_fence); |
571 | if (fence) { | 572 | if (fence) { |
572 | s_fence->parent = fence_get(fence); | 573 | s_fence->parent = dma_fence_get(fence); |
573 | r = fence_add_callback(fence, &s_fence->cb, | 574 | r = dma_fence_add_callback(fence, &s_fence->cb, |
574 | amd_sched_process_job); | 575 | amd_sched_process_job); |
575 | if (r == -ENOENT) | 576 | if (r == -ENOENT) |
576 | amd_sched_process_job(fence, &s_fence->cb); | 577 | amd_sched_process_job(fence, &s_fence->cb); |
577 | else if (r) | 578 | else if (r) |
578 | DRM_ERROR("fence add callback failed (%d)\n", | 579 | DRM_ERROR("fence add callback failed (%d)\n", |
579 | r); | 580 | r); |
580 | fence_put(fence); | 581 | dma_fence_put(fence); |
581 | } else { | 582 | } else { |
582 | DRM_ERROR("Failed to run job!\n"); | 583 | DRM_ERROR("Failed to run job!\n"); |
583 | amd_sched_process_job(NULL, &s_fence->cb); | 584 | amd_sched_process_job(NULL, &s_fence->cb); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..876aa43b57df 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define _GPU_SCHEDULER_H_ | 25 | #define _GPU_SCHEDULER_H_ |
26 | 26 | ||
27 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | 29 | ||
30 | struct amd_gpu_scheduler; | 30 | struct amd_gpu_scheduler; |
31 | struct amd_sched_rq; | 31 | struct amd_sched_rq; |
@@ -50,8 +50,8 @@ struct amd_sched_entity { | |||
50 | atomic_t fence_seq; | 50 | atomic_t fence_seq; |
51 | uint64_t fence_context; | 51 | uint64_t fence_context; |
52 | 52 | ||
53 | struct fence *dependency; | 53 | struct dma_fence *dependency; |
54 | struct fence_cb cb; | 54 | struct dma_fence_cb cb; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | /** | 57 | /** |
@@ -66,10 +66,10 @@ struct amd_sched_rq { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct amd_sched_fence { | 68 | struct amd_sched_fence { |
69 | struct fence scheduled; | 69 | struct dma_fence scheduled; |
70 | struct fence finished; | 70 | struct dma_fence finished; |
71 | struct fence_cb cb; | 71 | struct dma_fence_cb cb; |
72 | struct fence *parent; | 72 | struct dma_fence *parent; |
73 | struct amd_gpu_scheduler *sched; | 73 | struct amd_gpu_scheduler *sched; |
74 | spinlock_t lock; | 74 | spinlock_t lock; |
75 | void *owner; | 75 | void *owner; |
@@ -79,15 +79,15 @@ struct amd_sched_job { | |||
79 | struct amd_gpu_scheduler *sched; | 79 | struct amd_gpu_scheduler *sched; |
80 | struct amd_sched_entity *s_entity; | 80 | struct amd_sched_entity *s_entity; |
81 | struct amd_sched_fence *s_fence; | 81 | struct amd_sched_fence *s_fence; |
82 | struct fence_cb finish_cb; | 82 | struct dma_fence_cb finish_cb; |
83 | struct work_struct finish_work; | 83 | struct work_struct finish_work; |
84 | struct list_head node; | 84 | struct list_head node; |
85 | struct delayed_work work_tdr; | 85 | struct delayed_work work_tdr; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | extern const struct fence_ops amd_sched_fence_ops_scheduled; | 88 | extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; |
89 | extern const struct fence_ops amd_sched_fence_ops_finished; | 89 | extern const struct dma_fence_ops amd_sched_fence_ops_finished; |
90 | static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) | 90 | static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) |
91 | { | 91 | { |
92 | if (f->ops == &amd_sched_fence_ops_scheduled) | 92 | if (f->ops == &amd_sched_fence_ops_scheduled) |
93 | return container_of(f, struct amd_sched_fence, scheduled); | 93 | return container_of(f, struct amd_sched_fence, scheduled); |
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) | |||
103 | * these functions should be implemented in driver side | 103 | * these functions should be implemented in driver side |
104 | */ | 104 | */ |
105 | struct amd_sched_backend_ops { | 105 | struct amd_sched_backend_ops { |
106 | struct fence *(*dependency)(struct amd_sched_job *sched_job); | 106 | struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); |
107 | struct fence *(*run_job)(struct amd_sched_job *sched_job); | 107 | struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); |
108 | void (*timedout_job)(struct amd_sched_job *sched_job); | 108 | void (*timedout_job)(struct amd_sched_job *sched_job); |
109 | void (*free_job)(struct amd_sched_job *sched_job); | 109 | void (*free_job)(struct amd_sched_job *sched_job); |
110 | }; | 110 | }; |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..c26fa298fe9e 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, | |||
42 | spin_lock_init(&fence->lock); | 42 | spin_lock_init(&fence->lock); |
43 | 43 | ||
44 | seq = atomic_inc_return(&entity->fence_seq); | 44 | seq = atomic_inc_return(&entity->fence_seq); |
45 | fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, | 45 | dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, |
46 | &fence->lock, entity->fence_context, seq); | 46 | &fence->lock, entity->fence_context, seq); |
47 | fence_init(&fence->finished, &amd_sched_fence_ops_finished, | 47 | dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, |
48 | &fence->lock, entity->fence_context + 1, seq); | 48 | &fence->lock, entity->fence_context + 1, seq); |
49 | 49 | ||
50 | return fence; | 50 | return fence; |
51 | } | 51 | } |
52 | 52 | ||
53 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence) | 53 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence) |
54 | { | 54 | { |
55 | int ret = fence_signal(&fence->scheduled); | 55 | int ret = dma_fence_signal(&fence->scheduled); |
56 | 56 | ||
57 | if (!ret) | 57 | if (!ret) |
58 | FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); | 58 | DMA_FENCE_TRACE(&fence->scheduled, |
59 | "signaled from irq context\n"); | ||
59 | else | 60 | else |
60 | FENCE_TRACE(&fence->scheduled, "was already signaled\n"); | 61 | DMA_FENCE_TRACE(&fence->scheduled, |
62 | "was already signaled\n"); | ||
61 | } | 63 | } |
62 | 64 | ||
63 | void amd_sched_fence_finished(struct amd_sched_fence *fence) | 65 | void amd_sched_fence_finished(struct amd_sched_fence *fence) |
64 | { | 66 | { |
65 | int ret = fence_signal(&fence->finished); | 67 | int ret = dma_fence_signal(&fence->finished); |
66 | 68 | ||
67 | if (!ret) | 69 | if (!ret) |
68 | FENCE_TRACE(&fence->finished, "signaled from irq context\n"); | 70 | DMA_FENCE_TRACE(&fence->finished, |
71 | "signaled from irq context\n"); | ||
69 | else | 72 | else |
70 | FENCE_TRACE(&fence->finished, "was already signaled\n"); | 73 | DMA_FENCE_TRACE(&fence->finished, |
74 | "was already signaled\n"); | ||
71 | } | 75 | } |
72 | 76 | ||
73 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) | 77 | static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) |
74 | { | 78 | { |
75 | return "amd_sched"; | 79 | return "amd_sched"; |
76 | } | 80 | } |
77 | 81 | ||
78 | static const char *amd_sched_fence_get_timeline_name(struct fence *f) | 82 | static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) |
79 | { | 83 | { |
80 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 84 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
81 | return (const char *)fence->sched->name; | 85 | return (const char *)fence->sched->name; |
82 | } | 86 | } |
83 | 87 | ||
84 | static bool amd_sched_fence_enable_signaling(struct fence *f) | 88 | static bool amd_sched_fence_enable_signaling(struct dma_fence *f) |
85 | { | 89 | { |
86 | return true; | 90 | return true; |
87 | } | 91 | } |
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) | |||
95 | */ | 99 | */ |
96 | static void amd_sched_fence_free(struct rcu_head *rcu) | 100 | static void amd_sched_fence_free(struct rcu_head *rcu) |
97 | { | 101 | { |
98 | struct fence *f = container_of(rcu, struct fence, rcu); | 102 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
99 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 103 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
100 | 104 | ||
101 | fence_put(fence->parent); | 105 | dma_fence_put(fence->parent); |
102 | kmem_cache_free(sched_fence_slab, fence); | 106 | kmem_cache_free(sched_fence_slab, fence); |
103 | } | 107 | } |
104 | 108 | ||
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) | |||
110 | * This function is called when the reference count becomes zero. | 114 | * This function is called when the reference count becomes zero. |
111 | * It just RCU schedules freeing up the fence. | 115 | * It just RCU schedules freeing up the fence. |
112 | */ | 116 | */ |
113 | static void amd_sched_fence_release_scheduled(struct fence *f) | 117 | static void amd_sched_fence_release_scheduled(struct dma_fence *f) |
114 | { | 118 | { |
115 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 119 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
116 | 120 | ||
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f) | |||
124 | * | 128 | * |
125 | * Drop the extra reference from the scheduled fence to the base fence. | 129 | * Drop the extra reference from the scheduled fence to the base fence. |
126 | */ | 130 | */ |
127 | static void amd_sched_fence_release_finished(struct fence *f) | 131 | static void amd_sched_fence_release_finished(struct dma_fence *f) |
128 | { | 132 | { |
129 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 133 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
130 | 134 | ||
131 | fence_put(&fence->scheduled); | 135 | dma_fence_put(&fence->scheduled); |
132 | } | 136 | } |
133 | 137 | ||
134 | const struct fence_ops amd_sched_fence_ops_scheduled = { | 138 | const struct dma_fence_ops amd_sched_fence_ops_scheduled = { |
135 | .get_driver_name = amd_sched_fence_get_driver_name, | 139 | .get_driver_name = amd_sched_fence_get_driver_name, |
136 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 140 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
137 | .enable_signaling = amd_sched_fence_enable_signaling, | 141 | .enable_signaling = amd_sched_fence_enable_signaling, |
138 | .signaled = NULL, | 142 | .signaled = NULL, |
139 | .wait = fence_default_wait, | 143 | .wait = dma_fence_default_wait, |
140 | .release = amd_sched_fence_release_scheduled, | 144 | .release = amd_sched_fence_release_scheduled, |
141 | }; | 145 | }; |
142 | 146 | ||
143 | const struct fence_ops amd_sched_fence_ops_finished = { | 147 | const struct dma_fence_ops amd_sched_fence_ops_finished = { |
144 | .get_driver_name = amd_sched_fence_get_driver_name, | 148 | .get_driver_name = amd_sched_fence_get_driver_name, |
145 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 149 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
146 | .enable_signaling = amd_sched_fence_enable_signaling, | 150 | .enable_signaling = amd_sched_fence_enable_signaling, |
147 | .signaled = NULL, | 151 | .signaled = NULL, |
148 | .wait = fence_default_wait, | 152 | .wait = dma_fence_default_wait, |
149 | .release = amd_sched_fence_release_finished, | 153 | .release = amd_sched_fence_release_finished, |
150 | }; | 154 | }; |
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index fb6a418ce6be..6477d1a65266 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c | |||
@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev) | |||
453 | return -EAGAIN; | 453 | return -EAGAIN; |
454 | } | 454 | } |
455 | 455 | ||
456 | component_match_add(&pdev->dev, &match, compare_dev, port); | 456 | drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); |
457 | of_node_put(port); | ||
457 | 458 | ||
458 | return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, | 459 | return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, |
459 | match); | 460 | match); |
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 9280358b8f15..9f4739452a25 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c | |||
@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev) | |||
493 | return -EAGAIN; | 493 | return -EAGAIN; |
494 | } | 494 | } |
495 | 495 | ||
496 | component_match_add(&pdev->dev, &match, malidp_compare_dev, port); | 496 | drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, |
497 | port); | ||
498 | of_node_put(port); | ||
497 | return component_master_add_with_match(&pdev->dev, &malidp_master_ops, | 499 | return component_master_add_with_match(&pdev->dev, &malidp_master_ops, |
498 | match); | 500 | match); |
499 | } | 501 | } |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f608e4..94e46da9a758 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev, | |||
254 | continue; | 254 | continue; |
255 | } | 255 | } |
256 | 256 | ||
257 | component_match_add(dev, match, compare_of, remote); | 257 | drm_of_component_match_add(dev, match, compare_of, remote); |
258 | of_node_put(remote); | 258 | of_node_put(remote); |
259 | } | 259 | } |
260 | } | 260 | } |
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 10e12e74fc9f..bd6acc829f97 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig | |||
@@ -57,6 +57,13 @@ config DRM_PARADE_PS8622 | |||
57 | ---help--- | 57 | ---help--- |
58 | Parade eDP-LVDS bridge chip driver. | 58 | Parade eDP-LVDS bridge chip driver. |
59 | 59 | ||
60 | config DRM_SIL_SII8620 | ||
61 | tristate "Silicon Image SII8620 HDMI/MHL bridge" | ||
62 | depends on OF | ||
63 | select DRM_KMS_HELPER | ||
64 | help | ||
65 | Silicon Image SII8620 HDMI/MHL bridge chip driver. | ||
66 | |||
60 | config DRM_SII902X | 67 | config DRM_SII902X |
61 | tristate "Silicon Image sii902x RGB/HDMI bridge" | 68 | tristate "Silicon Image sii902x RGB/HDMI bridge" |
62 | depends on OF | 69 | depends on OF |
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index cdf3a3cf765d..97ed1a5fea9a 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o | |||
6 | obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o | 6 | obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o |
7 | obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o | 7 | obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o |
8 | obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o | 8 | obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o |
9 | obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o | ||
9 | obj-$(CONFIG_DRM_SII902X) += sii902x.o | 10 | obj-$(CONFIG_DRM_SII902X) += sii902x.o |
10 | obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o | 11 | obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o |
11 | obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ | 12 | obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ |
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c new file mode 100644 index 000000000000..b2c267df7ee7 --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.c | |||
@@ -0,0 +1,1564 @@ | |||
1 | /* | ||
2 | * Silicon Image SiI8620 HDMI/MHL bridge driver | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <drm/bridge/mhl.h> | ||
13 | #include <drm/drm_crtc.h> | ||
14 | #include <drm/drm_edid.h> | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/gpio/consumer.h> | ||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/irq.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/mutex.h> | ||
26 | #include <linux/regulator/consumer.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "sil-sii8620.h" | ||
30 | |||
31 | #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) | ||
32 | |||
33 | enum sii8620_mode { | ||
34 | CM_DISCONNECTED, | ||
35 | CM_DISCOVERY, | ||
36 | CM_MHL1, | ||
37 | CM_MHL3, | ||
38 | CM_ECBUS_S | ||
39 | }; | ||
40 | |||
41 | enum sii8620_sink_type { | ||
42 | SINK_NONE, | ||
43 | SINK_HDMI, | ||
44 | SINK_DVI | ||
45 | }; | ||
46 | |||
47 | enum sii8620_mt_state { | ||
48 | MT_STATE_READY, | ||
49 | MT_STATE_BUSY, | ||
50 | MT_STATE_DONE | ||
51 | }; | ||
52 | |||
53 | struct sii8620 { | ||
54 | struct drm_bridge bridge; | ||
55 | struct device *dev; | ||
56 | struct clk *clk_xtal; | ||
57 | struct gpio_desc *gpio_reset; | ||
58 | struct gpio_desc *gpio_int; | ||
59 | struct regulator_bulk_data supplies[2]; | ||
60 | struct mutex lock; /* context lock, protects fields below */ | ||
61 | int error; | ||
62 | enum sii8620_mode mode; | ||
63 | enum sii8620_sink_type sink_type; | ||
64 | u8 cbus_status; | ||
65 | u8 stat[MHL_DST_SIZE]; | ||
66 | u8 xstat[MHL_XDS_SIZE]; | ||
67 | u8 devcap[MHL_DCAP_SIZE]; | ||
68 | u8 xdevcap[MHL_XDC_SIZE]; | ||
69 | u8 avif[19]; | ||
70 | struct edid *edid; | ||
71 | unsigned int gen2_write_burst:1; | ||
72 | enum sii8620_mt_state mt_state; | ||
73 | struct list_head mt_queue; | ||
74 | }; | ||
75 | |||
76 | struct sii8620_mt_msg; | ||
77 | |||
78 | typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx, | ||
79 | struct sii8620_mt_msg *msg); | ||
80 | |||
81 | struct sii8620_mt_msg { | ||
82 | struct list_head node; | ||
83 | u8 reg[4]; | ||
84 | u8 ret; | ||
85 | sii8620_mt_msg_cb send; | ||
86 | sii8620_mt_msg_cb recv; | ||
87 | }; | ||
88 | |||
89 | static const u8 sii8620_i2c_page[] = { | ||
90 | 0x39, /* Main System */ | ||
91 | 0x3d, /* TDM and HSIC */ | ||
92 | 0x49, /* TMDS Receiver, MHL EDID */ | ||
93 | 0x4d, /* eMSC, HDCP, HSIC */ | ||
94 | 0x5d, /* MHL Spec */ | ||
95 | 0x64, /* MHL CBUS */ | ||
96 | 0x59, /* Hardware TPI (Transmitter Programming Interface) */ | ||
97 | 0x61, /* eCBUS-S, eCBUS-D */ | ||
98 | }; | ||
99 | |||
100 | static void sii8620_fetch_edid(struct sii8620 *ctx); | ||
101 | static void sii8620_set_upstream_edid(struct sii8620 *ctx); | ||
102 | static void sii8620_enable_hpd(struct sii8620 *ctx); | ||
103 | static void sii8620_mhl_disconnected(struct sii8620 *ctx); | ||
104 | |||
105 | static int sii8620_clear_error(struct sii8620 *ctx) | ||
106 | { | ||
107 | int ret = ctx->error; | ||
108 | |||
109 | ctx->error = 0; | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) | ||
114 | { | ||
115 | struct device *dev = ctx->dev; | ||
116 | struct i2c_client *client = to_i2c_client(dev); | ||
117 | u8 data = addr; | ||
118 | struct i2c_msg msg[] = { | ||
119 | { | ||
120 | .addr = sii8620_i2c_page[addr >> 8], | ||
121 | .flags = client->flags, | ||
122 | .len = 1, | ||
123 | .buf = &data | ||
124 | }, | ||
125 | { | ||
126 | .addr = sii8620_i2c_page[addr >> 8], | ||
127 | .flags = client->flags | I2C_M_RD, | ||
128 | .len = len, | ||
129 | .buf = buf | ||
130 | }, | ||
131 | }; | ||
132 | int ret; | ||
133 | |||
134 | if (ctx->error) | ||
135 | return; | ||
136 | |||
137 | ret = i2c_transfer(client->adapter, msg, 2); | ||
138 | dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret); | ||
139 | |||
140 | if (ret != 2) { | ||
141 | dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n", | ||
142 | addr, len, ret); | ||
143 | ctx->error = ret < 0 ? ret : -EIO; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) | ||
148 | { | ||
149 | u8 ret; | ||
150 | |||
151 | sii8620_read_buf(ctx, addr, &ret, 1); | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf, | ||
156 | int len) | ||
157 | { | ||
158 | struct device *dev = ctx->dev; | ||
159 | struct i2c_client *client = to_i2c_client(dev); | ||
160 | u8 data[2]; | ||
161 | struct i2c_msg msg = { | ||
162 | .addr = sii8620_i2c_page[addr >> 8], | ||
163 | .flags = client->flags, | ||
164 | .len = len + 1, | ||
165 | }; | ||
166 | int ret; | ||
167 | |||
168 | if (ctx->error) | ||
169 | return; | ||
170 | |||
171 | if (len > 1) { | ||
172 | msg.buf = kmalloc(len + 1, GFP_KERNEL); | ||
173 | if (!msg.buf) { | ||
174 | ctx->error = -ENOMEM; | ||
175 | return; | ||
176 | } | ||
177 | memcpy(msg.buf + 1, buf, len); | ||
178 | } else { | ||
179 | msg.buf = data; | ||
180 | msg.buf[1] = *buf; | ||
181 | } | ||
182 | |||
183 | msg.buf[0] = addr; | ||
184 | |||
185 | ret = i2c_transfer(client->adapter, &msg, 1); | ||
186 | dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret); | ||
187 | |||
188 | if (ret != 1) { | ||
189 | dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n", | ||
190 | addr, len, buf, ret); | ||
191 | ctx->error = ret ?: -EIO; | ||
192 | } | ||
193 | |||
194 | if (len > 1) | ||
195 | kfree(msg.buf); | ||
196 | } | ||
197 | |||
198 | #define sii8620_write(ctx, addr, arr...) \ | ||
199 | ({\ | ||
200 | u8 d[] = { arr }; \ | ||
201 | sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \ | ||
202 | }) | ||
203 | |||
204 | static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len) | ||
205 | { | ||
206 | int i; | ||
207 | |||
208 | for (i = 0; i < len; i += 2) | ||
209 | sii8620_write(ctx, seq[i], seq[i + 1]); | ||
210 | } | ||
211 | |||
212 | #define sii8620_write_seq(ctx, seq...) \ | ||
213 | ({\ | ||
214 | const u16 d[] = { seq }; \ | ||
215 | __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ | ||
216 | }) | ||
217 | |||
218 | #define sii8620_write_seq_static(ctx, seq...) \ | ||
219 | ({\ | ||
220 | static const u16 d[] = { seq }; \ | ||
221 | __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ | ||
222 | }) | ||
223 | |||
224 | static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val) | ||
225 | { | ||
226 | val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask); | ||
227 | sii8620_write(ctx, addr, val); | ||
228 | } | ||
229 | |||
230 | static void sii8620_mt_cleanup(struct sii8620 *ctx) | ||
231 | { | ||
232 | struct sii8620_mt_msg *msg, *n; | ||
233 | |||
234 | list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { | ||
235 | list_del(&msg->node); | ||
236 | kfree(msg); | ||
237 | } | ||
238 | ctx->mt_state = MT_STATE_READY; | ||
239 | } | ||
240 | |||
241 | static void sii8620_mt_work(struct sii8620 *ctx) | ||
242 | { | ||
243 | struct sii8620_mt_msg *msg; | ||
244 | |||
245 | if (ctx->error) | ||
246 | return; | ||
247 | if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue)) | ||
248 | return; | ||
249 | |||
250 | if (ctx->mt_state == MT_STATE_DONE) { | ||
251 | ctx->mt_state = MT_STATE_READY; | ||
252 | msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, | ||
253 | node); | ||
254 | if (msg->recv) | ||
255 | msg->recv(ctx, msg); | ||
256 | list_del(&msg->node); | ||
257 | kfree(msg); | ||
258 | } | ||
259 | |||
260 | if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue)) | ||
261 | return; | ||
262 | |||
263 | ctx->mt_state = MT_STATE_BUSY; | ||
264 | msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); | ||
265 | if (msg->send) | ||
266 | msg->send(ctx, msg); | ||
267 | } | ||
268 | |||
269 | static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx, | ||
270 | struct sii8620_mt_msg *msg) | ||
271 | { | ||
272 | switch (msg->reg[0]) { | ||
273 | case MHL_WRITE_STAT: | ||
274 | case MHL_SET_INT: | ||
275 | sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2); | ||
276 | sii8620_write(ctx, REG_MSC_COMMAND_START, | ||
277 | BIT_MSC_COMMAND_START_WRITE_STAT); | ||
278 | break; | ||
279 | case MHL_MSC_MSG: | ||
280 | sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3); | ||
281 | sii8620_write(ctx, REG_MSC_COMMAND_START, | ||
282 | BIT_MSC_COMMAND_START_MSC_MSG); | ||
283 | break; | ||
284 | default: | ||
285 | dev_err(ctx->dev, "%s: command %#x not supported\n", __func__, | ||
286 | msg->reg[0]); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx) | ||
291 | { | ||
292 | struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); | ||
293 | |||
294 | if (!msg) | ||
295 | ctx->error = -ENOMEM; | ||
296 | else | ||
297 | list_add_tail(&msg->node, &ctx->mt_queue); | ||
298 | |||
299 | return msg; | ||
300 | } | ||
301 | |||
302 | static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2) | ||
303 | { | ||
304 | struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); | ||
305 | |||
306 | if (!msg) | ||
307 | return; | ||
308 | |||
309 | msg->reg[0] = cmd; | ||
310 | msg->reg[1] = arg1; | ||
311 | msg->reg[2] = arg2; | ||
312 | msg->send = sii8620_mt_msc_cmd_send; | ||
313 | } | ||
314 | |||
315 | static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val) | ||
316 | { | ||
317 | sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val); | ||
318 | } | ||
319 | |||
320 | static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask) | ||
321 | { | ||
322 | sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask); | ||
323 | } | ||
324 | |||
325 | static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data) | ||
326 | { | ||
327 | sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data); | ||
328 | } | ||
329 | |||
330 | static void sii8620_mt_rap(struct sii8620 *ctx, u8 code) | ||
331 | { | ||
332 | sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); | ||
333 | } | ||
334 | |||
335 | static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, | ||
336 | struct sii8620_mt_msg *msg) | ||
337 | { | ||
338 | u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | ||
339 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
340 | | BIT_EDID_CTRL_EDID_MODE_EN; | ||
341 | |||
342 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
343 | ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; | ||
344 | |||
345 | sii8620_write_seq(ctx, | ||
346 | REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE, | ||
347 | REG_EDID_CTRL, ctrl, | ||
348 | REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START | ||
349 | ); | ||
350 | } | ||
351 | |||
352 | /* copy src to dst and set changed bits in src */ | ||
353 | static void sii8620_update_array(u8 *dst, u8 *src, int count) | ||
354 | { | ||
355 | while (--count >= 0) { | ||
356 | *src ^= *dst; | ||
357 | *dst++ ^= *src++; | ||
358 | } | ||
359 | } | ||
360 | |||
361 | static void sii8620_mr_devcap(struct sii8620 *ctx) | ||
362 | { | ||
363 | static const char * const sink_str[] = { | ||
364 | [SINK_NONE] = "NONE", | ||
365 | [SINK_HDMI] = "HDMI", | ||
366 | [SINK_DVI] = "DVI" | ||
367 | }; | ||
368 | |||
369 | u8 dcap[MHL_DCAP_SIZE]; | ||
370 | char sink_name[20]; | ||
371 | struct device *dev = ctx->dev; | ||
372 | |||
373 | sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE); | ||
374 | if (ctx->error < 0) | ||
375 | return; | ||
376 | |||
377 | dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap); | ||
378 | dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n", | ||
379 | dcap[MHL_DCAP_MHL_VERSION] / 16, | ||
380 | dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H], | ||
381 | dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], | ||
382 | dcap[MHL_DCAP_DEVICE_ID_L]); | ||
383 | sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); | ||
384 | |||
385 | if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK)) | ||
386 | return; | ||
387 | |||
388 | sii8620_fetch_edid(ctx); | ||
389 | if (!ctx->edid) { | ||
390 | dev_err(ctx->dev, "Cannot fetch EDID\n"); | ||
391 | sii8620_mhl_disconnected(ctx); | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | if (drm_detect_hdmi_monitor(ctx->edid)) | ||
396 | ctx->sink_type = SINK_HDMI; | ||
397 | else | ||
398 | ctx->sink_type = SINK_DVI; | ||
399 | |||
400 | drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name)); | ||
401 | |||
402 | dev_info(dev, "detected sink(type: %s): %s\n", | ||
403 | sink_str[ctx->sink_type], sink_name); | ||
404 | sii8620_set_upstream_edid(ctx); | ||
405 | sii8620_enable_hpd(ctx); | ||
406 | } | ||
407 | |||
408 | static void sii8620_mr_xdevcap(struct sii8620 *ctx) | ||
409 | { | ||
410 | sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap, | ||
411 | MHL_XDC_SIZE); | ||
412 | |||
413 | sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE), | ||
414 | MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT); | ||
415 | sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP); | ||
416 | } | ||
417 | |||
418 | static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx, | ||
419 | struct sii8620_mt_msg *msg) | ||
420 | { | ||
421 | u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | ||
422 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
423 | | BIT_EDID_CTRL_EDID_MODE_EN; | ||
424 | |||
425 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
426 | ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; | ||
427 | |||
428 | sii8620_write_seq(ctx, | ||
429 | REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE | ||
430 | | BIT_INTR9_EDID_ERROR, | ||
431 | REG_EDID_CTRL, ctrl, | ||
432 | REG_EDID_FIFO_ADDR, 0 | ||
433 | ); | ||
434 | |||
435 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
436 | sii8620_mr_xdevcap(ctx); | ||
437 | else | ||
438 | sii8620_mr_devcap(ctx); | ||
439 | } | ||
440 | |||
441 | static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) | ||
442 | { | ||
443 | struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); | ||
444 | |||
445 | if (!msg) | ||
446 | return; | ||
447 | |||
448 | msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP; | ||
449 | msg->send = sii8620_mt_read_devcap_send; | ||
450 | msg->recv = sii8620_mt_read_devcap_recv; | ||
451 | } | ||
452 | |||
453 | static void sii8620_fetch_edid(struct sii8620 *ctx) | ||
454 | { | ||
455 | u8 lm_ddc, ddc_cmd, int3, cbus; | ||
456 | int fetched, i; | ||
457 | int edid_len = EDID_LENGTH; | ||
458 | u8 *edid; | ||
459 | |||
460 | sii8620_readb(ctx, REG_CBUS_STATUS); | ||
461 | lm_ddc = sii8620_readb(ctx, REG_LM_DDC); | ||
462 | ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD); | ||
463 | |||
464 | sii8620_write_seq(ctx, | ||
465 | REG_INTR9_MASK, 0, | ||
466 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, | ||
467 | REG_HDCP2X_POLL_CS, 0x71, | ||
468 | REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX, | ||
469 | REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED, | ||
470 | ); | ||
471 | |||
472 | for (i = 0; i < 256; ++i) { | ||
473 | u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS); | ||
474 | |||
475 | if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG)) | ||
476 | break; | ||
477 | sii8620_write(ctx, REG_DDC_STATUS, | ||
478 | BIT_DDC_STATUS_DDC_FIFO_EMPTY); | ||
479 | } | ||
480 | |||
481 | sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1); | ||
482 | |||
483 | edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
484 | if (!edid) { | ||
485 | ctx->error = -ENOMEM; | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | #define FETCH_SIZE 16 | ||
490 | for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) { | ||
491 | sii8620_readb(ctx, REG_DDC_STATUS); | ||
492 | sii8620_write_seq(ctx, | ||
493 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT, | ||
494 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO, | ||
495 | REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY | ||
496 | ); | ||
497 | sii8620_write_seq(ctx, | ||
498 | REG_DDC_SEGM, fetched >> 8, | ||
499 | REG_DDC_OFFSET, fetched & 0xff, | ||
500 | REG_DDC_DIN_CNT1, FETCH_SIZE, | ||
501 | REG_DDC_DIN_CNT2, 0, | ||
502 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK | ||
503 | ); | ||
504 | |||
505 | do { | ||
506 | int3 = sii8620_readb(ctx, REG_INTR3); | ||
507 | cbus = sii8620_readb(ctx, REG_CBUS_STATUS); | ||
508 | |||
509 | if (int3 & BIT_DDC_CMD_DONE) | ||
510 | break; | ||
511 | |||
512 | if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { | ||
513 | kfree(edid); | ||
514 | edid = NULL; | ||
515 | goto end; | ||
516 | } | ||
517 | } while (1); | ||
518 | |||
519 | sii8620_readb(ctx, REG_DDC_STATUS); | ||
520 | while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) | ||
521 | usleep_range(10, 20); | ||
522 | |||
523 | sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); | ||
524 | if (fetched + FETCH_SIZE == EDID_LENGTH) { | ||
525 | u8 ext = ((struct edid *)edid)->extensions; | ||
526 | |||
527 | if (ext) { | ||
528 | u8 *new_edid; | ||
529 | |||
530 | edid_len += ext * EDID_LENGTH; | ||
531 | new_edid = krealloc(edid, edid_len, GFP_KERNEL); | ||
532 | if (!new_edid) { | ||
533 | kfree(edid); | ||
534 | ctx->error = -ENOMEM; | ||
535 | return; | ||
536 | } | ||
537 | edid = new_edid; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | if (fetched + FETCH_SIZE == edid_len) | ||
542 | sii8620_write(ctx, REG_INTR3, int3); | ||
543 | } | ||
544 | |||
545 | sii8620_write(ctx, REG_LM_DDC, lm_ddc); | ||
546 | |||
547 | end: | ||
548 | kfree(ctx->edid); | ||
549 | ctx->edid = (struct edid *)edid; | ||
550 | } | ||
551 | |||
552 | static void sii8620_set_upstream_edid(struct sii8620 *ctx) | ||
553 | { | ||
554 | sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N | ||
555 | | BIT_DPD_PD_MHL_CLK_N, 0xff); | ||
556 | |||
557 | sii8620_write_seq_static(ctx, | ||
558 | REG_RX_HDMI_CTRL3, 0x00, | ||
559 | REG_PKT_FILTER_0, 0xFF, | ||
560 | REG_PKT_FILTER_1, 0xFF, | ||
561 | REG_ALICE0_BW_I2C, 0x06 | ||
562 | ); | ||
563 | |||
564 | sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER, | ||
565 | BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff); | ||
566 | |||
567 | sii8620_write_seq_static(ctx, | ||
568 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
569 | | BIT_EDID_CTRL_EDID_MODE_EN, | ||
570 | REG_EDID_FIFO_ADDR, 0, | ||
571 | ); | ||
572 | |||
573 | sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid, | ||
574 | (ctx->edid->extensions + 1) * EDID_LENGTH); | ||
575 | |||
576 | sii8620_write_seq_static(ctx, | ||
577 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID | ||
578 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
579 | | BIT_EDID_CTRL_EDID_MODE_EN, | ||
580 | REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE, | ||
581 | REG_INTR9_MASK, 0 | ||
582 | ); | ||
583 | } | ||
584 | |||
585 | static void sii8620_xtal_set_rate(struct sii8620 *ctx) | ||
586 | { | ||
587 | static const struct { | ||
588 | unsigned int rate; | ||
589 | u8 div; | ||
590 | u8 tp1; | ||
591 | } rates[] = { | ||
592 | { 19200, 0x04, 0x53 }, | ||
593 | { 20000, 0x04, 0x62 }, | ||
594 | { 24000, 0x05, 0x75 }, | ||
595 | { 30000, 0x06, 0x92 }, | ||
596 | { 38400, 0x0c, 0xbc }, | ||
597 | }; | ||
598 | unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000; | ||
599 | int i; | ||
600 | |||
601 | for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i) | ||
602 | if (rate <= rates[i].rate) | ||
603 | break; | ||
604 | |||
605 | if (rate != rates[i].rate) | ||
606 | dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n", | ||
607 | rate, rates[i].rate); | ||
608 | |||
609 | sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div); | ||
610 | sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1); | ||
611 | } | ||
612 | |||
613 | static int sii8620_hw_on(struct sii8620 *ctx) | ||
614 | { | ||
615 | int ret; | ||
616 | |||
617 | ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | ||
618 | if (ret) | ||
619 | return ret; | ||
620 | usleep_range(10000, 20000); | ||
621 | return clk_prepare_enable(ctx->clk_xtal); | ||
622 | } | ||
623 | |||
624 | static int sii8620_hw_off(struct sii8620 *ctx) | ||
625 | { | ||
626 | clk_disable_unprepare(ctx->clk_xtal); | ||
627 | gpiod_set_value(ctx->gpio_reset, 1); | ||
628 | return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | ||
629 | } | ||
630 | |||
631 | static void sii8620_hw_reset(struct sii8620 *ctx) | ||
632 | { | ||
633 | usleep_range(10000, 20000); | ||
634 | gpiod_set_value(ctx->gpio_reset, 0); | ||
635 | usleep_range(5000, 20000); | ||
636 | gpiod_set_value(ctx->gpio_reset, 1); | ||
637 | usleep_range(10000, 20000); | ||
638 | gpiod_set_value(ctx->gpio_reset, 0); | ||
639 | msleep(300); | ||
640 | } | ||
641 | |||
642 | static void sii8620_cbus_reset(struct sii8620 *ctx) | ||
643 | { | ||
644 | sii8620_write_seq_static(ctx, | ||
645 | REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST | ||
646 | | BIT_PWD_SRST_CBUS_RST_SW_EN, | ||
647 | REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN | ||
648 | ); | ||
649 | } | ||
650 | |||
651 | static void sii8620_set_auto_zone(struct sii8620 *ctx) | ||
652 | { | ||
653 | if (ctx->mode != CM_MHL1) { | ||
654 | sii8620_write_seq_static(ctx, | ||
655 | REG_TX_ZONE_CTL1, 0x0, | ||
656 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
657 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
658 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE | ||
659 | ); | ||
660 | } else { | ||
661 | sii8620_write_seq_static(ctx, | ||
662 | REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE, | ||
663 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
664 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE | ||
665 | ); | ||
666 | } | ||
667 | } | ||
668 | |||
669 | static void sii8620_stop_video(struct sii8620 *ctx) | ||
670 | { | ||
671 | u8 uninitialized_var(val); | ||
672 | |||
673 | sii8620_write_seq_static(ctx, | ||
674 | REG_TPI_INTR_EN, 0, | ||
675 | REG_HDCP2X_INTR0_MASK, 0, | ||
676 | REG_TPI_COPP_DATA2, 0, | ||
677 | REG_TPI_INTR_ST0, ~0, | ||
678 | ); | ||
679 | |||
680 | switch (ctx->sink_type) { | ||
681 | case SINK_DVI: | ||
682 | val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | ||
683 | | BIT_TPI_SC_TPI_AV_MUTE; | ||
684 | break; | ||
685 | case SINK_HDMI: | ||
686 | val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | ||
687 | | BIT_TPI_SC_TPI_AV_MUTE | ||
688 | | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI; | ||
689 | break; | ||
690 | default: | ||
691 | return; | ||
692 | } | ||
693 | |||
694 | sii8620_write(ctx, REG_TPI_SC, val); | ||
695 | } | ||
696 | |||
697 | static void sii8620_start_hdmi(struct sii8620 *ctx) | ||
698 | { | ||
699 | sii8620_write_seq_static(ctx, | ||
700 | REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL | ||
701 | | BIT_RX_HDMI_CTRL2_USE_AV_MUTE, | ||
702 | REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE | ||
703 | | BIT_VID_OVRRD_M1080P_OVRRD, | ||
704 | REG_VID_MODE, 0, | ||
705 | REG_MHL_TOP_CTL, 0x1, | ||
706 | REG_MHLTX_CTL6, 0xa0, | ||
707 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), | ||
708 | REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), | ||
709 | ); | ||
710 | |||
711 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
712 | MHL_DST_LM_CLK_MODE_NORMAL | | ||
713 | MHL_DST_LM_PATH_ENABLED); | ||
714 | |||
715 | sii8620_set_auto_zone(ctx); | ||
716 | |||
717 | sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); | ||
718 | |||
719 | sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif, | ||
720 | ARRAY_SIZE(ctx->avif)); | ||
721 | |||
722 | sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2); | ||
723 | } | ||
724 | |||
725 | static void sii8620_start_video(struct sii8620 *ctx) | ||
726 | { | ||
727 | if (ctx->mode < CM_MHL3) | ||
728 | sii8620_stop_video(ctx); | ||
729 | |||
730 | switch (ctx->sink_type) { | ||
731 | case SINK_HDMI: | ||
732 | sii8620_start_hdmi(ctx); | ||
733 | break; | ||
734 | case SINK_DVI: | ||
735 | default: | ||
736 | break; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | static void sii8620_disable_hpd(struct sii8620 *ctx) | ||
741 | { | ||
742 | sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0); | ||
743 | sii8620_write_seq_static(ctx, | ||
744 | REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN, | ||
745 | REG_INTR8_MASK, 0 | ||
746 | ); | ||
747 | } | ||
748 | |||
749 | static void sii8620_enable_hpd(struct sii8620 *ctx) | ||
750 | { | ||
751 | sii8620_setbits(ctx, REG_TMDS_CSTAT_P3, | ||
752 | BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | ||
753 | | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0); | ||
754 | sii8620_write_seq_static(ctx, | ||
755 | REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN | ||
756 | | BIT_HPD_CTRL_HPD_HIGH, | ||
757 | ); | ||
758 | } | ||
759 | |||
760 | static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx) | ||
761 | { | ||
762 | if (ctx->gen2_write_burst) | ||
763 | return; | ||
764 | |||
765 | sii8620_write_seq_static(ctx, | ||
766 | REG_MDT_RCV_TIMEOUT, 100, | ||
767 | REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN | ||
768 | ); | ||
769 | ctx->gen2_write_burst = 1; | ||
770 | } | ||
771 | |||
772 | static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx) | ||
773 | { | ||
774 | if (!ctx->gen2_write_burst) | ||
775 | return; | ||
776 | |||
777 | sii8620_write_seq_static(ctx, | ||
778 | REG_MDT_XMIT_CTRL, 0, | ||
779 | REG_MDT_RCV_CTRL, 0 | ||
780 | ); | ||
781 | ctx->gen2_write_burst = 0; | ||
782 | } | ||
783 | |||
784 | static void sii8620_start_gen2_write_burst(struct sii8620 *ctx) | ||
785 | { | ||
786 | sii8620_write_seq_static(ctx, | ||
787 | REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT | ||
788 | | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR | ||
789 | | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD | ||
790 | | BIT_MDT_XMIT_SM_ERROR, | ||
791 | REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY | ||
792 | | BIT_MDT_IDLE_AFTER_HAWB_DISABLE | ||
793 | | BIT_MDT_RFIFO_DATA_RDY | ||
794 | ); | ||
795 | sii8620_enable_gen2_write_burst(ctx); | ||
796 | } | ||
797 | |||
798 | static void sii8620_mhl_discover(struct sii8620 *ctx) | ||
799 | { | ||
800 | sii8620_write_seq_static(ctx, | ||
801 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
802 | | BIT_DISC_CTRL9_DISC_PULSE_PROCEED, | ||
803 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K), | ||
804 | REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT | ||
805 | | BIT_MHL_EST_INT | ||
806 | | BIT_NOT_MHL_EST_INT | ||
807 | | BIT_CBUS_MHL3_DISCON_INT | ||
808 | | BIT_CBUS_MHL12_DISCON_INT | ||
809 | | BIT_RGND_READY_INT, | ||
810 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
811 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
812 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, | ||
813 | REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | ||
814 | | BIT_MHL_DP_CTL0_TX_OE_OVR, | ||
815 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
816 | REG_MHL_DP_CTL1, 0xA2, | ||
817 | REG_MHL_DP_CTL2, 0x03, | ||
818 | REG_MHL_DP_CTL3, 0x35, | ||
819 | REG_MHL_DP_CTL5, 0x02, | ||
820 | REG_MHL_DP_CTL6, 0x02, | ||
821 | REG_MHL_DP_CTL7, 0x03, | ||
822 | REG_COC_CTLC, 0xFF, | ||
823 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | ||
824 | | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC, | ||
825 | REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE | ||
826 | | BIT_COC_CALIBRATION_DONE, | ||
827 | REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD | ||
828 | | BIT_CBUS_CMD_ABORT, | ||
829 | REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE | ||
830 | | BIT_CBUS_HPD_CHG | ||
831 | | BIT_CBUS_MSC_MR_WRITE_STAT | ||
832 | | BIT_CBUS_MSC_MR_MSC_MSG | ||
833 | | BIT_CBUS_MSC_MR_WRITE_BURST | ||
834 | | BIT_CBUS_MSC_MR_SET_INT | ||
835 | | BIT_CBUS_MSC_MT_DONE_NACK | ||
836 | ); | ||
837 | } | ||
838 | |||
839 | static void sii8620_peer_specific_init(struct sii8620 *ctx) | ||
840 | { | ||
841 | if (ctx->mode == CM_MHL3) | ||
842 | sii8620_write_seq_static(ctx, | ||
843 | REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD, | ||
844 | REG_EMSCINTRMASK1, | ||
845 | BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR | ||
846 | ); | ||
847 | else | ||
848 | sii8620_write_seq_static(ctx, | ||
849 | REG_HDCP2X_INTR0_MASK, 0x00, | ||
850 | REG_EMSCINTRMASK1, 0x00, | ||
851 | REG_HDCP2X_INTR0, 0xFF, | ||
852 | REG_INTR1, 0xFF, | ||
853 | REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD | ||
854 | | BIT_SYS_CTRL1_TX_CTRL_HDMI | ||
855 | ); | ||
856 | } | ||
857 | |||
858 | #define SII8620_MHL_VERSION 0x32 | ||
859 | #define SII8620_SCRATCHPAD_SIZE 16 | ||
860 | #define SII8620_INT_STAT_SIZE 0x33 | ||
861 | |||
862 | static void sii8620_set_dev_cap(struct sii8620 *ctx) | ||
863 | { | ||
864 | static const u8 devcap[MHL_DCAP_SIZE] = { | ||
865 | [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION, | ||
866 | [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER, | ||
867 | [MHL_DCAP_ADOPTER_ID_H] = 0x01, | ||
868 | [MHL_DCAP_ADOPTER_ID_L] = 0x41, | ||
869 | [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444 | ||
870 | | MHL_DCAP_VID_LINK_PPIXEL | ||
871 | | MHL_DCAP_VID_LINK_16BPP, | ||
872 | [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH, | ||
873 | [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS, | ||
874 | [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI, | ||
875 | [MHL_DCAP_BANDWIDTH] = 0x0f, | ||
876 | [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT | ||
877 | | MHL_DCAP_FEATURE_RAP_SUPPORT | ||
878 | | MHL_DCAP_FEATURE_SP_SUPPORT, | ||
879 | [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE, | ||
880 | [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE, | ||
881 | }; | ||
882 | static const u8 xdcap[MHL_XDC_SIZE] = { | ||
883 | [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075 | ||
884 | | MHL_XDC_ECBUS_S_8BIT, | ||
885 | [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150 | ||
886 | | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600, | ||
887 | [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST, | ||
888 | [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE, | ||
889 | }; | ||
890 | |||
891 | sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap)); | ||
892 | sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap)); | ||
893 | } | ||
894 | |||
895 | static void sii8620_mhl_init(struct sii8620 *ctx) | ||
896 | { | ||
897 | sii8620_write_seq_static(ctx, | ||
898 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
899 | REG_CBUS_MSC_COMPAT_CTRL, | ||
900 | BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN, | ||
901 | ); | ||
902 | |||
903 | sii8620_peer_specific_init(ctx); | ||
904 | |||
905 | sii8620_disable_hpd(ctx); | ||
906 | |||
907 | sii8620_write_seq_static(ctx, | ||
908 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, | ||
909 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
910 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
911 | REG_TMDS0_CCTRL1, 0x90, | ||
912 | REG_TMDS_CLK_EN, 0x01, | ||
913 | REG_TMDS_CH_EN, 0x11, | ||
914 | REG_BGR_BIAS, 0x87, | ||
915 | REG_ALICE0_ZONE_CTRL, 0xE8, | ||
916 | REG_ALICE0_MODE_CTRL, 0x04, | ||
917 | ); | ||
918 | sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0); | ||
919 | sii8620_write_seq_static(ctx, | ||
920 | REG_TPI_HW_OPT3, 0x76, | ||
921 | REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE, | ||
922 | REG_TPI_DTD_B2, 79, | ||
923 | ); | ||
924 | sii8620_set_dev_cap(ctx); | ||
925 | sii8620_write_seq_static(ctx, | ||
926 | REG_MDT_XMIT_TIMEOUT, 100, | ||
927 | REG_MDT_XMIT_CTRL, 0x03, | ||
928 | REG_MDT_XFIFO_STAT, 0x00, | ||
929 | REG_MDT_RCV_TIMEOUT, 100, | ||
930 | REG_CBUS_LINK_CTRL_8, 0x1D, | ||
931 | ); | ||
932 | |||
933 | sii8620_start_gen2_write_burst(ctx); | ||
934 | sii8620_write_seq_static(ctx, | ||
935 | REG_BIST_CTRL, 0x00, | ||
936 | REG_COC_CTL1, 0x10, | ||
937 | REG_COC_CTL2, 0x18, | ||
938 | REG_COC_CTLF, 0x07, | ||
939 | REG_COC_CTL11, 0xF8, | ||
940 | REG_COC_CTL17, 0x61, | ||
941 | REG_COC_CTL18, 0x46, | ||
942 | REG_COC_CTL19, 0x15, | ||
943 | REG_COC_CTL1A, 0x01, | ||
944 | REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN, | ||
945 | REG_MHL_COC_CTL4, 0x2D, | ||
946 | REG_MHL_COC_CTL5, 0xF9, | ||
947 | REG_MSC_HEARTBEAT_CTRL, 0x27, | ||
948 | ); | ||
949 | sii8620_disable_gen2_write_burst(ctx); | ||
950 | |||
951 | /* currently MHL3 is not supported, so we force version to 0 */ | ||
952 | sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0); | ||
953 | sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY), | ||
954 | MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP | ||
955 | | MHL_DST_CONN_POW_STAT); | ||
956 | sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG); | ||
957 | } | ||
958 | |||
959 | static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) | ||
960 | { | ||
961 | if (ctx->mode == mode) | ||
962 | return; | ||
963 | |||
964 | ctx->mode = mode; | ||
965 | |||
966 | switch (mode) { | ||
967 | case CM_MHL1: | ||
968 | sii8620_write_seq_static(ctx, | ||
969 | REG_CBUS_MSC_COMPAT_CTRL, 0x02, | ||
970 | REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE, | ||
971 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | ||
972 | | BIT_DPD_OSC_EN, | ||
973 | REG_COC_INTR_MASK, 0 | ||
974 | ); | ||
975 | break; | ||
976 | case CM_MHL3: | ||
977 | sii8620_write_seq_static(ctx, | ||
978 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
979 | REG_COC_CTL0, 0x40, | ||
980 | REG_MHL_COC_CTL1, 0x07 | ||
981 | ); | ||
982 | break; | ||
983 | case CM_DISCONNECTED: | ||
984 | break; | ||
985 | default: | ||
986 | dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); | ||
987 | break; | ||
988 | } | ||
989 | |||
990 | sii8620_set_auto_zone(ctx); | ||
991 | |||
992 | if (mode != CM_MHL1) | ||
993 | return; | ||
994 | |||
995 | sii8620_write_seq_static(ctx, | ||
996 | REG_MHL_DP_CTL0, 0xBC, | ||
997 | REG_MHL_DP_CTL1, 0xBB, | ||
998 | REG_MHL_DP_CTL3, 0x48, | ||
999 | REG_MHL_DP_CTL5, 0x39, | ||
1000 | REG_MHL_DP_CTL2, 0x2A, | ||
1001 | REG_MHL_DP_CTL6, 0x2A, | ||
1002 | REG_MHL_DP_CTL7, 0x08 | ||
1003 | ); | ||
1004 | } | ||
1005 | |||
1006 | static void sii8620_disconnect(struct sii8620 *ctx) | ||
1007 | { | ||
1008 | sii8620_disable_gen2_write_burst(ctx); | ||
1009 | sii8620_stop_video(ctx); | ||
1010 | msleep(50); | ||
1011 | sii8620_cbus_reset(ctx); | ||
1012 | sii8620_set_mode(ctx, CM_DISCONNECTED); | ||
1013 | sii8620_write_seq_static(ctx, | ||
1014 | REG_COC_CTL0, 0x40, | ||
1015 | REG_CBUS3_CNVT, 0x84, | ||
1016 | REG_COC_CTL14, 0x00, | ||
1017 | REG_COC_CTL0, 0x40, | ||
1018 | REG_HRXCTRL3, 0x07, | ||
1019 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
1020 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
1021 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, | ||
1022 | REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | ||
1023 | | BIT_MHL_DP_CTL0_TX_OE_OVR, | ||
1024 | REG_MHL_DP_CTL1, 0xBB, | ||
1025 | REG_MHL_DP_CTL3, 0x48, | ||
1026 | REG_MHL_DP_CTL5, 0x3F, | ||
1027 | REG_MHL_DP_CTL2, 0x2F, | ||
1028 | REG_MHL_DP_CTL6, 0x2A, | ||
1029 | REG_MHL_DP_CTL7, 0x03 | ||
1030 | ); | ||
1031 | sii8620_disable_hpd(ctx); | ||
1032 | sii8620_write_seq_static(ctx, | ||
1033 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
1034 | REG_MHL_COC_CTL1, 0x07, | ||
1035 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
1036 | REG_DISC_CTRL8, 0x00, | ||
1037 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
1038 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
1039 | REG_INT_CTRL, 0x00, | ||
1040 | REG_MSC_HEARTBEAT_CTRL, 0x27, | ||
1041 | REG_DISC_CTRL1, 0x25, | ||
1042 | REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT, | ||
1043 | REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT, | ||
1044 | REG_MDT_INT_1, 0xff, | ||
1045 | REG_MDT_INT_1_MASK, 0x00, | ||
1046 | REG_MDT_INT_0, 0xff, | ||
1047 | REG_MDT_INT_0_MASK, 0x00, | ||
1048 | REG_COC_INTR, 0xff, | ||
1049 | REG_COC_INTR_MASK, 0x00, | ||
1050 | REG_TRXINTH, 0xff, | ||
1051 | REG_TRXINTMH, 0x00, | ||
1052 | REG_CBUS_INT_0, 0xff, | ||
1053 | REG_CBUS_INT_0_MASK, 0x00, | ||
1054 | REG_CBUS_INT_1, 0xff, | ||
1055 | REG_CBUS_INT_1_MASK, 0x00, | ||
1056 | REG_EMSCINTR, 0xff, | ||
1057 | REG_EMSCINTRMASK, 0x00, | ||
1058 | REG_EMSCINTR1, 0xff, | ||
1059 | REG_EMSCINTRMASK1, 0x00, | ||
1060 | REG_INTR8, 0xff, | ||
1061 | REG_INTR8_MASK, 0x00, | ||
1062 | REG_TPI_INTR_ST0, 0xff, | ||
1063 | REG_TPI_INTR_EN, 0x00, | ||
1064 | REG_HDCP2X_INTR0, 0xff, | ||
1065 | REG_HDCP2X_INTR0_MASK, 0x00, | ||
1066 | REG_INTR9, 0xff, | ||
1067 | REG_INTR9_MASK, 0x00, | ||
1068 | REG_INTR3, 0xff, | ||
1069 | REG_INTR3_MASK, 0x00, | ||
1070 | REG_INTR5, 0xff, | ||
1071 | REG_INTR5_MASK, 0x00, | ||
1072 | REG_INTR2, 0xff, | ||
1073 | REG_INTR2_MASK, 0x00, | ||
1074 | ); | ||
1075 | memset(ctx->stat, 0, sizeof(ctx->stat)); | ||
1076 | memset(ctx->xstat, 0, sizeof(ctx->xstat)); | ||
1077 | memset(ctx->devcap, 0, sizeof(ctx->devcap)); | ||
1078 | memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); | ||
1079 | ctx->cbus_status = 0; | ||
1080 | ctx->sink_type = SINK_NONE; | ||
1081 | kfree(ctx->edid); | ||
1082 | ctx->edid = NULL; | ||
1083 | sii8620_mt_cleanup(ctx); | ||
1084 | } | ||
1085 | |||
1086 | static void sii8620_mhl_disconnected(struct sii8620 *ctx) | ||
1087 | { | ||
1088 | sii8620_write_seq_static(ctx, | ||
1089 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
1090 | REG_CBUS_MSC_COMPAT_CTRL, | ||
1091 | BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN | ||
1092 | ); | ||
1093 | sii8620_disconnect(ctx); | ||
1094 | } | ||
1095 | |||
1096 | static void sii8620_irq_disc(struct sii8620 *ctx) | ||
1097 | { | ||
1098 | u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0); | ||
1099 | |||
1100 | if (stat & VAL_CBUS_MHL_DISCON) | ||
1101 | sii8620_mhl_disconnected(ctx); | ||
1102 | |||
1103 | if (stat & BIT_RGND_READY_INT) { | ||
1104 | u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2); | ||
1105 | |||
1106 | if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) { | ||
1107 | sii8620_mhl_discover(ctx); | ||
1108 | } else { | ||
1109 | sii8620_write_seq_static(ctx, | ||
1110 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
1111 | | BIT_DISC_CTRL9_NOMHL_EST | ||
1112 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
1113 | REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT | ||
1114 | | BIT_CBUS_MHL3_DISCON_INT | ||
1115 | | BIT_CBUS_MHL12_DISCON_INT | ||
1116 | | BIT_NOT_MHL_EST_INT | ||
1117 | ); | ||
1118 | } | ||
1119 | } | ||
1120 | if (stat & BIT_MHL_EST_INT) | ||
1121 | sii8620_mhl_init(ctx); | ||
1122 | |||
1123 | sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat); | ||
1124 | } | ||
1125 | |||
1126 | static void sii8620_irq_g2wb(struct sii8620 *ctx) | ||
1127 | { | ||
1128 | u8 stat = sii8620_readb(ctx, REG_MDT_INT_0); | ||
1129 | |||
1130 | if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE) | ||
1131 | dev_dbg(ctx->dev, "HAWB idle\n"); | ||
1132 | |||
1133 | sii8620_write(ctx, REG_MDT_INT_0, stat); | ||
1134 | } | ||
1135 | |||
1136 | static void sii8620_status_changed_dcap(struct sii8620 *ctx) | ||
1137 | { | ||
1138 | if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) { | ||
1139 | sii8620_set_mode(ctx, CM_MHL1); | ||
1140 | sii8620_peer_specific_init(ctx); | ||
1141 | sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | ||
1142 | | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR); | ||
1143 | } | ||
1144 | } | ||
1145 | |||
1146 | static void sii8620_status_changed_path(struct sii8620 *ctx) | ||
1147 | { | ||
1148 | if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { | ||
1149 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
1150 | MHL_DST_LM_CLK_MODE_NORMAL | ||
1151 | | MHL_DST_LM_PATH_ENABLED); | ||
1152 | sii8620_mt_read_devcap(ctx, false); | ||
1153 | } else { | ||
1154 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
1155 | MHL_DST_LM_CLK_MODE_NORMAL); | ||
1156 | } | ||
1157 | } | ||
1158 | |||
1159 | static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) | ||
1160 | { | ||
1161 | u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE]; | ||
1162 | |||
1163 | sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE); | ||
1164 | sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE); | ||
1165 | |||
1166 | sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); | ||
1167 | sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); | ||
1168 | |||
1169 | if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) | ||
1170 | sii8620_status_changed_dcap(ctx); | ||
1171 | |||
1172 | if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) | ||
1173 | sii8620_status_changed_path(ctx); | ||
1174 | } | ||
1175 | |||
1176 | static void sii8620_msc_mr_set_int(struct sii8620 *ctx) | ||
1177 | { | ||
1178 | u8 ints[MHL_INT_SIZE]; | ||
1179 | |||
1180 | sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); | ||
1181 | sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); | ||
1182 | } | ||
1183 | |||
1184 | static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) | ||
1185 | { | ||
1186 | struct device *dev = ctx->dev; | ||
1187 | |||
1188 | if (list_empty(&ctx->mt_queue)) { | ||
1189 | dev_err(dev, "unexpected MSC MT response\n"); | ||
1190 | return NULL; | ||
1191 | } | ||
1192 | |||
1193 | return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); | ||
1194 | } | ||
1195 | |||
1196 | static void sii8620_msc_mt_done(struct sii8620 *ctx) | ||
1197 | { | ||
1198 | struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); | ||
1199 | |||
1200 | if (!msg) | ||
1201 | return; | ||
1202 | |||
1203 | msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0); | ||
1204 | ctx->mt_state = MT_STATE_DONE; | ||
1205 | } | ||
1206 | |||
1207 | static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) | ||
1208 | { | ||
1209 | struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); | ||
1210 | u8 buf[2]; | ||
1211 | |||
1212 | if (!msg) | ||
1213 | return; | ||
1214 | |||
1215 | sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); | ||
1216 | |||
1217 | switch (buf[0]) { | ||
1218 | case MHL_MSC_MSG_RAPK: | ||
1219 | msg->ret = buf[1]; | ||
1220 | ctx->mt_state = MT_STATE_DONE; | ||
1221 | break; | ||
1222 | default: | ||
1223 | dev_err(ctx->dev, "%s message type %d,%d not supported", | ||
1224 | __func__, buf[0], buf[1]); | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1228 | static void sii8620_irq_msc(struct sii8620 *ctx) | ||
1229 | { | ||
1230 | u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0); | ||
1231 | |||
1232 | if (stat & ~BIT_CBUS_HPD_CHG) | ||
1233 | sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG); | ||
1234 | |||
1235 | if (stat & BIT_CBUS_HPD_CHG) { | ||
1236 | u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS); | ||
1237 | |||
1238 | if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) { | ||
1239 | sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG); | ||
1240 | } else { | ||
1241 | stat ^= BIT_CBUS_STATUS_CBUS_HPD; | ||
1242 | cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD; | ||
1243 | } | ||
1244 | ctx->cbus_status = cbus_stat; | ||
1245 | } | ||
1246 | |||
1247 | if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) | ||
1248 | sii8620_msc_mr_write_stat(ctx); | ||
1249 | |||
1250 | if (stat & BIT_CBUS_MSC_MR_SET_INT) | ||
1251 | sii8620_msc_mr_set_int(ctx); | ||
1252 | |||
1253 | if (stat & BIT_CBUS_MSC_MT_DONE) | ||
1254 | sii8620_msc_mt_done(ctx); | ||
1255 | |||
1256 | if (stat & BIT_CBUS_MSC_MR_MSC_MSG) | ||
1257 | sii8620_msc_mr_msc_msg(ctx); | ||
1258 | } | ||
1259 | |||
1260 | static void sii8620_irq_coc(struct sii8620 *ctx) | ||
1261 | { | ||
1262 | u8 stat = sii8620_readb(ctx, REG_COC_INTR); | ||
1263 | |||
1264 | sii8620_write(ctx, REG_COC_INTR, stat); | ||
1265 | } | ||
1266 | |||
1267 | static void sii8620_irq_merr(struct sii8620 *ctx) | ||
1268 | { | ||
1269 | u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1); | ||
1270 | |||
1271 | sii8620_write(ctx, REG_CBUS_INT_1, stat); | ||
1272 | } | ||
1273 | |||
1274 | static void sii8620_irq_edid(struct sii8620 *ctx) | ||
1275 | { | ||
1276 | u8 stat = sii8620_readb(ctx, REG_INTR9); | ||
1277 | |||
1278 | sii8620_write(ctx, REG_INTR9, stat); | ||
1279 | |||
1280 | if (stat & BIT_INTR9_DEVCAP_DONE) | ||
1281 | ctx->mt_state = MT_STATE_DONE; | ||
1282 | } | ||
1283 | |||
1284 | static void sii8620_scdt_high(struct sii8620 *ctx) | ||
1285 | { | ||
1286 | sii8620_write_seq_static(ctx, | ||
1287 | REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, | ||
1288 | REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, | ||
1289 | ); | ||
1290 | } | ||
1291 | |||
1292 | static void sii8620_scdt_low(struct sii8620 *ctx) | ||
1293 | { | ||
1294 | sii8620_write(ctx, REG_TMDS_CSTAT_P3, | ||
1295 | BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | | ||
1296 | BIT_TMDS_CSTAT_P3_CLR_AVI); | ||
1297 | |||
1298 | sii8620_stop_video(ctx); | ||
1299 | |||
1300 | sii8620_write(ctx, REG_INTR8_MASK, 0); | ||
1301 | } | ||
1302 | |||
1303 | static void sii8620_irq_scdt(struct sii8620 *ctx) | ||
1304 | { | ||
1305 | u8 stat = sii8620_readb(ctx, REG_INTR5); | ||
1306 | |||
1307 | if (stat & BIT_INTR_SCDT_CHANGE) { | ||
1308 | u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); | ||
1309 | |||
1310 | if (cstat & BIT_TMDS_CSTAT_P3_SCDT) | ||
1311 | sii8620_scdt_high(ctx); | ||
1312 | else | ||
1313 | sii8620_scdt_low(ctx); | ||
1314 | } | ||
1315 | |||
1316 | sii8620_write(ctx, REG_INTR5, stat); | ||
1317 | } | ||
1318 | |||
1319 | static void sii8620_new_vsi(struct sii8620 *ctx) | ||
1320 | { | ||
1321 | u8 vsif[11]; | ||
1322 | |||
1323 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, | ||
1324 | VAL_RX_HDMI_CTRL2_DEFVAL | | ||
1325 | BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); | ||
1326 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, | ||
1327 | ARRAY_SIZE(vsif)); | ||
1328 | } | ||
1329 | |||
1330 | static void sii8620_new_avi(struct sii8620 *ctx) | ||
1331 | { | ||
1332 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); | ||
1333 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, | ||
1334 | ARRAY_SIZE(ctx->avif)); | ||
1335 | } | ||
1336 | |||
1337 | static void sii8620_irq_infr(struct sii8620 *ctx) | ||
1338 | { | ||
1339 | u8 stat = sii8620_readb(ctx, REG_INTR8) | ||
1340 | & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); | ||
1341 | |||
1342 | sii8620_write(ctx, REG_INTR8, stat); | ||
1343 | |||
1344 | if (stat & BIT_CEA_NEW_VSI) | ||
1345 | sii8620_new_vsi(ctx); | ||
1346 | |||
1347 | if (stat & BIT_CEA_NEW_AVI) | ||
1348 | sii8620_new_avi(ctx); | ||
1349 | |||
1350 | if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) | ||
1351 | sii8620_start_video(ctx); | ||
1352 | } | ||
1353 | |||
1354 | /* endian agnostic, non-volatile version of test_bit */ | ||
1355 | static bool sii8620_test_bit(unsigned int nr, const u8 *addr) | ||
1356 | { | ||
1357 | return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE)); | ||
1358 | } | ||
1359 | |||
1360 | static irqreturn_t sii8620_irq_thread(int irq, void *data) | ||
1361 | { | ||
1362 | static const struct { | ||
1363 | int bit; | ||
1364 | void (*handler)(struct sii8620 *ctx); | ||
1365 | } irq_vec[] = { | ||
1366 | { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc }, | ||
1367 | { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb }, | ||
1368 | { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc }, | ||
1369 | { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc }, | ||
1370 | { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr }, | ||
1371 | { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, | ||
1372 | { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, | ||
1373 | { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, | ||
1374 | }; | ||
1375 | struct sii8620 *ctx = data; | ||
1376 | u8 stats[LEN_FAST_INTR_STAT]; | ||
1377 | int i, ret; | ||
1378 | |||
1379 | mutex_lock(&ctx->lock); | ||
1380 | |||
1381 | sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats)); | ||
1382 | for (i = 0; i < ARRAY_SIZE(irq_vec); ++i) | ||
1383 | if (sii8620_test_bit(irq_vec[i].bit, stats)) | ||
1384 | irq_vec[i].handler(ctx); | ||
1385 | |||
1386 | sii8620_mt_work(ctx); | ||
1387 | |||
1388 | ret = sii8620_clear_error(ctx); | ||
1389 | if (ret) { | ||
1390 | dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret); | ||
1391 | sii8620_mhl_disconnected(ctx); | ||
1392 | } | ||
1393 | mutex_unlock(&ctx->lock); | ||
1394 | |||
1395 | return IRQ_HANDLED; | ||
1396 | } | ||
1397 | |||
1398 | static void sii8620_cable_in(struct sii8620 *ctx) | ||
1399 | { | ||
1400 | struct device *dev = ctx->dev; | ||
1401 | u8 ver[5]; | ||
1402 | int ret; | ||
1403 | |||
1404 | ret = sii8620_hw_on(ctx); | ||
1405 | if (ret) { | ||
1406 | dev_err(dev, "Error powering on, %d.\n", ret); | ||
1407 | return; | ||
1408 | } | ||
1409 | sii8620_hw_reset(ctx); | ||
1410 | |||
1411 | sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); | ||
1412 | ret = sii8620_clear_error(ctx); | ||
1413 | if (ret) { | ||
1414 | dev_err(dev, "Error accessing I2C bus, %d.\n", ret); | ||
1415 | return; | ||
1416 | } | ||
1417 | |||
1418 | dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0], | ||
1419 | ver[3], ver[2], ver[4]); | ||
1420 | |||
1421 | sii8620_write(ctx, REG_DPD, | ||
1422 | BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN); | ||
1423 | |||
1424 | sii8620_xtal_set_rate(ctx); | ||
1425 | sii8620_disconnect(ctx); | ||
1426 | |||
1427 | sii8620_write_seq_static(ctx, | ||
1428 | REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG | ||
1429 | | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734, | ||
1430 | REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM, | ||
1431 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, | ||
1432 | ); | ||
1433 | |||
1434 | ret = sii8620_clear_error(ctx); | ||
1435 | if (ret) { | ||
1436 | dev_err(dev, "Error accessing I2C bus, %d.\n", ret); | ||
1437 | return; | ||
1438 | } | ||
1439 | |||
1440 | enable_irq(to_i2c_client(ctx->dev)->irq); | ||
1441 | } | ||
1442 | |||
1443 | static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) | ||
1444 | { | ||
1445 | return container_of(bridge, struct sii8620, bridge); | ||
1446 | } | ||
1447 | |||
1448 | static bool sii8620_mode_fixup(struct drm_bridge *bridge, | ||
1449 | const struct drm_display_mode *mode, | ||
1450 | struct drm_display_mode *adjusted_mode) | ||
1451 | { | ||
1452 | struct sii8620 *ctx = bridge_to_sii8620(bridge); | ||
1453 | bool ret = false; | ||
1454 | int max_clock = 74250; | ||
1455 | |||
1456 | mutex_lock(&ctx->lock); | ||
1457 | |||
1458 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1459 | goto out; | ||
1460 | |||
1461 | if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) | ||
1462 | max_clock = 300000; | ||
1463 | |||
1464 | ret = mode->clock <= max_clock; | ||
1465 | |||
1466 | out: | ||
1467 | mutex_unlock(&ctx->lock); | ||
1468 | |||
1469 | return ret; | ||
1470 | } | ||
1471 | |||
1472 | static const struct drm_bridge_funcs sii8620_bridge_funcs = { | ||
1473 | .mode_fixup = sii8620_mode_fixup, | ||
1474 | }; | ||
1475 | |||
1476 | static int sii8620_probe(struct i2c_client *client, | ||
1477 | const struct i2c_device_id *id) | ||
1478 | { | ||
1479 | struct device *dev = &client->dev; | ||
1480 | struct sii8620 *ctx; | ||
1481 | int ret; | ||
1482 | |||
1483 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | ||
1484 | if (!ctx) | ||
1485 | return -ENOMEM; | ||
1486 | |||
1487 | ctx->dev = dev; | ||
1488 | mutex_init(&ctx->lock); | ||
1489 | INIT_LIST_HEAD(&ctx->mt_queue); | ||
1490 | |||
1491 | ctx->clk_xtal = devm_clk_get(dev, "xtal"); | ||
1492 | if (IS_ERR(ctx->clk_xtal)) { | ||
1493 | dev_err(dev, "failed to get xtal clock from DT\n"); | ||
1494 | return PTR_ERR(ctx->clk_xtal); | ||
1495 | } | ||
1496 | |||
1497 | if (!client->irq) { | ||
1498 | dev_err(dev, "no irq provided\n"); | ||
1499 | return -EINVAL; | ||
1500 | } | ||
1501 | irq_set_status_flags(client->irq, IRQ_NOAUTOEN); | ||
1502 | ret = devm_request_threaded_irq(dev, client->irq, NULL, | ||
1503 | sii8620_irq_thread, | ||
1504 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, | ||
1505 | "sii8620", ctx); | ||
1506 | |||
1507 | ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); | ||
1508 | if (IS_ERR(ctx->gpio_reset)) { | ||
1509 | dev_err(dev, "failed to get reset gpio from DT\n"); | ||
1510 | return PTR_ERR(ctx->gpio_reset); | ||
1511 | } | ||
1512 | |||
1513 | ctx->supplies[0].supply = "cvcc10"; | ||
1514 | ctx->supplies[1].supply = "iovcc18"; | ||
1515 | ret = devm_regulator_bulk_get(dev, 2, ctx->supplies); | ||
1516 | if (ret) | ||
1517 | return ret; | ||
1518 | |||
1519 | i2c_set_clientdata(client, ctx); | ||
1520 | |||
1521 | ctx->bridge.funcs = &sii8620_bridge_funcs; | ||
1522 | ctx->bridge.of_node = dev->of_node; | ||
1523 | drm_bridge_add(&ctx->bridge); | ||
1524 | |||
1525 | sii8620_cable_in(ctx); | ||
1526 | |||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static int sii8620_remove(struct i2c_client *client) | ||
1531 | { | ||
1532 | struct sii8620 *ctx = i2c_get_clientdata(client); | ||
1533 | |||
1534 | disable_irq(to_i2c_client(ctx->dev)->irq); | ||
1535 | drm_bridge_remove(&ctx->bridge); | ||
1536 | sii8620_hw_off(ctx); | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | static const struct of_device_id sii8620_dt_match[] = { | ||
1542 | { .compatible = "sil,sii8620" }, | ||
1543 | { }, | ||
1544 | }; | ||
1545 | MODULE_DEVICE_TABLE(of, sii8620_dt_match); | ||
1546 | |||
1547 | static const struct i2c_device_id sii8620_id[] = { | ||
1548 | { "sii8620", 0 }, | ||
1549 | { }, | ||
1550 | }; | ||
1551 | |||
1552 | MODULE_DEVICE_TABLE(i2c, sii8620_id); | ||
1553 | static struct i2c_driver sii8620_driver = { | ||
1554 | .driver = { | ||
1555 | .name = "sii8620", | ||
1556 | .of_match_table = of_match_ptr(sii8620_dt_match), | ||
1557 | }, | ||
1558 | .probe = sii8620_probe, | ||
1559 | .remove = sii8620_remove, | ||
1560 | .id_table = sii8620_id, | ||
1561 | }; | ||
1562 | |||
1563 | module_i2c_driver(sii8620_driver); | ||
1564 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h new file mode 100644 index 000000000000..6ff616a4f6ce --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.h | |||
@@ -0,0 +1,1517 @@ | |||
1 | /* | ||
2 | * Registers of Silicon Image SiI8620 Mobile HD Transmitter | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * Based on MHL driver for Android devices. | ||
8 | * Copyright (C) 2013-2014 Silicon Image, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __SIL_SII8620_H__ | ||
16 | #define __SIL_SII8620_H__ | ||
17 | |||
18 | /* Vendor ID Low byte, default value: 0x01 */ | ||
19 | #define REG_VND_IDL 0x0000 | ||
20 | |||
21 | /* Vendor ID High byte, default value: 0x00 */ | ||
22 | #define REG_VND_IDH 0x0001 | ||
23 | |||
24 | /* Device ID Low byte, default value: 0x60 */ | ||
25 | #define REG_DEV_IDL 0x0002 | ||
26 | |||
27 | /* Device ID High byte, default value: 0x86 */ | ||
28 | #define REG_DEV_IDH 0x0003 | ||
29 | |||
30 | /* Device Revision, default value: 0x10 */ | ||
31 | #define REG_DEV_REV 0x0004 | ||
32 | |||
33 | /* OTP DBYTE510, default value: 0x00 */ | ||
34 | #define REG_OTP_DBYTE510 0x0006 | ||
35 | |||
36 | /* System Control #1, default value: 0x00 */ | ||
37 | #define REG_SYS_CTRL1 0x0008 | ||
38 | #define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7) | ||
39 | #define BIT_SYS_CTRL1_VSYNCPIN BIT(6) | ||
40 | #define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5) | ||
41 | #define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4) | ||
42 | #define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3) | ||
43 | #define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2) | ||
44 | #define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1) | ||
45 | #define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0) | ||
46 | |||
47 | /* System Control DPD, default value: 0x90 */ | ||
48 | #define REG_DPD 0x000b | ||
49 | #define BIT_DPD_PWRON_PLL BIT(7) | ||
50 | #define BIT_DPD_PDNTX12 BIT(6) | ||
51 | #define BIT_DPD_PDNRX12 BIT(5) | ||
52 | #define BIT_DPD_OSC_EN BIT(4) | ||
53 | #define BIT_DPD_PWRON_HSIC BIT(3) | ||
54 | #define BIT_DPD_PDIDCK_N BIT(2) | ||
55 | #define BIT_DPD_PD_MHL_CLK_N BIT(1) | ||
56 | |||
57 | /* Dual link Control, default value: 0x00 */ | ||
58 | #define REG_DCTL 0x000d | ||
59 | #define BIT_DCTL_TDM_LCLK_PHASE BIT(7) | ||
60 | #define BIT_DCTL_HSIC_CLK_PHASE BIT(6) | ||
61 | #define BIT_DCTL_CTS_TCK_PHASE BIT(5) | ||
62 | #define BIT_DCTL_EXT_DDC_SEL BIT(4) | ||
63 | #define BIT_DCTL_TRANSCODE BIT(3) | ||
64 | #define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2) | ||
65 | #define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1) | ||
66 | #define BIT_DCTL_TCLKNX_PHASE BIT(0) | ||
67 | |||
68 | /* PWD Software Reset, default value: 0x20 */ | ||
69 | #define REG_PWD_SRST 0x000e | ||
70 | #define BIT_PWD_SRST_COC_DOC_RST BIT(7) | ||
71 | #define BIT_PWD_SRST_CBUS_RST_SW BIT(6) | ||
72 | #define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5) | ||
73 | #define BIT_PWD_SRST_MHLFIFO_RST BIT(4) | ||
74 | #define BIT_PWD_SRST_CBUS_RST BIT(3) | ||
75 | #define BIT_PWD_SRST_SW_RST_AUTO BIT(2) | ||
76 | #define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1) | ||
77 | #define BIT_PWD_SRST_SW_RST BIT(0) | ||
78 | |||
79 | /* AKSV_1, default value: 0x00 */ | ||
80 | #define REG_AKSV_1 0x001d | ||
81 | |||
82 | /* Video H Resolution #1, default value: 0x00 */ | ||
83 | #define REG_H_RESL 0x003a | ||
84 | |||
85 | /* Video Mode, default value: 0x00 */ | ||
86 | #define REG_VID_MODE 0x004a | ||
87 | #define BIT_VID_MODE_M1080P BIT(6) | ||
88 | |||
89 | /* Video Input Mode, default value: 0xc0 */ | ||
90 | #define REG_VID_OVRRD 0x0051 | ||
91 | #define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7) | ||
92 | #define BIT_VID_OVRRD_M1080P_OVRRD BIT(6) | ||
93 | #define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5) | ||
94 | #define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4) | ||
95 | #define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3) | ||
96 | #define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2) | ||
97 | #define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0) | ||
98 | |||
99 | /* I2C Address reassignment, default value: 0x00 */ | ||
100 | #define REG_PAGE_MHLSPEC_ADDR 0x0057 | ||
101 | #define REG_PAGE7_ADDR 0x0058 | ||
102 | #define REG_PAGE8_ADDR 0x005c | ||
103 | |||
104 | /* Fast Interrupt Status, default value: 0x00 */ | ||
105 | #define REG_FAST_INTR_STAT 0x005f | ||
106 | #define LEN_FAST_INTR_STAT 7 | ||
107 | #define BIT_FAST_INTR_STAT_TIMR 8 | ||
108 | #define BIT_FAST_INTR_STAT_INT2 9 | ||
109 | #define BIT_FAST_INTR_STAT_DDC 10 | ||
110 | #define BIT_FAST_INTR_STAT_SCDT 11 | ||
111 | #define BIT_FAST_INTR_STAT_INFR 13 | ||
112 | #define BIT_FAST_INTR_STAT_EDID 14 | ||
113 | #define BIT_FAST_INTR_STAT_HDCP 15 | ||
114 | #define BIT_FAST_INTR_STAT_MSC 16 | ||
115 | #define BIT_FAST_INTR_STAT_MERR 17 | ||
116 | #define BIT_FAST_INTR_STAT_G2WB 18 | ||
117 | #define BIT_FAST_INTR_STAT_G2WB_ERR 19 | ||
118 | #define BIT_FAST_INTR_STAT_DISC 28 | ||
119 | #define BIT_FAST_INTR_STAT_BLOCK 30 | ||
120 | #define BIT_FAST_INTR_STAT_LTRN 31 | ||
121 | #define BIT_FAST_INTR_STAT_HDCP2 32 | ||
122 | #define BIT_FAST_INTR_STAT_TDM 42 | ||
123 | #define BIT_FAST_INTR_STAT_COC 51 | ||
124 | |||
125 | /* GPIO Control, default value: 0x15 */ | ||
126 | #define REG_GPIO_CTRL1 0x006e | ||
127 | #define BIT_CTRL1_GPIO_I_8 BIT(5) | ||
128 | #define BIT_CTRL1_GPIO_OEN_8 BIT(4) | ||
129 | #define BIT_CTRL1_GPIO_I_7 BIT(3) | ||
130 | #define BIT_CTRL1_GPIO_OEN_7 BIT(2) | ||
131 | #define BIT_CTRL1_GPIO_I_6 BIT(1) | ||
132 | #define BIT_CTRL1_GPIO_OEN_6 BIT(0) | ||
133 | |||
134 | /* Interrupt Control, default value: 0x06 */ | ||
135 | #define REG_INT_CTRL 0x006f | ||
136 | #define BIT_INT_CTRL_SOFTWARE_WP BIT(7) | ||
137 | #define BIT_INT_CTRL_INTR_OD BIT(2) | ||
138 | #define BIT_INT_CTRL_INTR_POLARITY BIT(1) | ||
139 | |||
140 | /* Interrupt State, default value: 0x00 */ | ||
141 | #define REG_INTR_STATE 0x0070 | ||
142 | #define BIT_INTR_STATE_INTR_STATE BIT(0) | ||
143 | |||
144 | /* Interrupt Source #1, default value: 0x00 */ | ||
145 | #define REG_INTR1 0x0071 | ||
146 | |||
147 | /* Interrupt Source #2, default value: 0x00 */ | ||
148 | #define REG_INTR2 0x0072 | ||
149 | |||
150 | /* Interrupt Source #3, default value: 0x01 */ | ||
151 | #define REG_INTR3 0x0073 | ||
152 | #define BIT_DDC_CMD_DONE BIT(3) | ||
153 | |||
154 | /* Interrupt Source #5, default value: 0x00 */ | ||
155 | #define REG_INTR5 0x0074 | ||
156 | |||
157 | /* Interrupt #1 Mask, default value: 0x00 */ | ||
158 | #define REG_INTR1_MASK 0x0075 | ||
159 | |||
160 | /* Interrupt #2 Mask, default value: 0x00 */ | ||
161 | #define REG_INTR2_MASK 0x0076 | ||
162 | |||
163 | /* Interrupt #3 Mask, default value: 0x00 */ | ||
164 | #define REG_INTR3_MASK 0x0077 | ||
165 | |||
166 | /* Interrupt #5 Mask, default value: 0x00 */ | ||
167 | #define REG_INTR5_MASK 0x0078 | ||
168 | #define BIT_INTR_SCDT_CHANGE BIT(0) | ||
169 | |||
170 | /* Hot Plug Connection Control, default value: 0x45 */ | ||
171 | #define REG_HPD_CTRL 0x0079 | ||
172 | #define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7) | ||
173 | #define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6) | ||
174 | #define BIT_HPD_CTRL_HPD_HIGH BIT(5) | ||
175 | #define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4) | ||
176 | #define BIT_HPD_CTRL_GPIO_I_1 BIT(3) | ||
177 | #define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2) | ||
178 | #define BIT_HPD_CTRL_GPIO_I_0 BIT(1) | ||
179 | #define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0) | ||
180 | |||
181 | /* GPIO Control, default value: 0x55 */ | ||
182 | #define REG_GPIO_CTRL 0x007a | ||
183 | #define BIT_CTRL_GPIO_I_5 BIT(7) | ||
184 | #define BIT_CTRL_GPIO_OEN_5 BIT(6) | ||
185 | #define BIT_CTRL_GPIO_I_4 BIT(5) | ||
186 | #define BIT_CTRL_GPIO_OEN_4 BIT(4) | ||
187 | #define BIT_CTRL_GPIO_I_3 BIT(3) | ||
188 | #define BIT_CTRL_GPIO_OEN_3 BIT(2) | ||
189 | #define BIT_CTRL_GPIO_I_2 BIT(1) | ||
190 | #define BIT_CTRL_GPIO_OEN_2 BIT(0) | ||
191 | |||
192 | /* Interrupt Source 7, default value: 0x00 */ | ||
193 | #define REG_INTR7 0x007b | ||
194 | |||
195 | /* Interrupt Source 8, default value: 0x00 */ | ||
196 | #define REG_INTR8 0x007c | ||
197 | |||
198 | /* Interrupt #7 Mask, default value: 0x00 */ | ||
199 | #define REG_INTR7_MASK 0x007d | ||
200 | |||
201 | /* Interrupt #8 Mask, default value: 0x00 */ | ||
202 | #define REG_INTR8_MASK 0x007e | ||
203 | #define BIT_CEA_NEW_VSI BIT(2) | ||
204 | #define BIT_CEA_NEW_AVI BIT(1) | ||
205 | |||
206 | /* IEEE, default value: 0x10 */ | ||
207 | #define REG_TMDS_CCTRL 0x0080 | ||
208 | #define BIT_TMDS_CCTRL_TMDS_OE BIT(4) | ||
209 | |||
210 | /* TMDS Control #4, default value: 0x02 */ | ||
211 | #define REG_TMDS_CTRL4 0x0085 | ||
212 | #define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1) | ||
213 | #define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0) | ||
214 | |||
215 | /* BIST CNTL, default value: 0x00 */ | ||
216 | #define REG_BIST_CTRL 0x00bb | ||
217 | #define BIT_RXBIST_VGB_EN BIT(7) | ||
218 | #define BIT_TXBIST_VGB_EN BIT(6) | ||
219 | #define BIT_BIST_START_SEL BIT(5) | ||
220 | #define BIT_BIST_START_BIT BIT(4) | ||
221 | #define BIT_BIST_ALWAYS_ON BIT(3) | ||
222 | #define BIT_BIST_TRANS BIT(2) | ||
223 | #define BIT_BIST_RESET BIT(1) | ||
224 | #define BIT_BIST_EN BIT(0) | ||
225 | |||
226 | /* BIST DURATION0, default value: 0x00 */ | ||
227 | #define REG_BIST_TEST_SEL 0x00bd | ||
228 | #define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f | ||
229 | |||
230 | /* BIST VIDEO_MODE, default value: 0x00 */ | ||
231 | #define REG_BIST_VIDEO_MODE 0x00be | ||
232 | #define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f | ||
233 | |||
234 | /* BIST DURATION0, default value: 0x00 */ | ||
235 | #define REG_BIST_DURATION_0 0x00bf | ||
236 | |||
237 | /* BIST DURATION1, default value: 0x00 */ | ||
238 | #define REG_BIST_DURATION_1 0x00c0 | ||
239 | |||
240 | /* BIST DURATION2, default value: 0x00 */ | ||
241 | #define REG_BIST_DURATION_2 0x00c1 | ||
242 | |||
243 | /* BIST 8BIT_PATTERN, default value: 0x00 */ | ||
244 | #define REG_BIST_8BIT_PATTERN 0x00c2 | ||
245 | |||
246 | /* LM DDC, default value: 0x80 */ | ||
247 | #define REG_LM_DDC 0x00c7 | ||
248 | #define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7) | ||
249 | |||
250 | #define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5) | ||
251 | #define BIT_LM_DDC_DDC_TPI_SW BIT(2) | ||
252 | #define BIT_LM_DDC_DDC_GRANT BIT(1) | ||
253 | #define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0) | ||
254 | |||
255 | /* DDC I2C Manual, default value: 0x03 */ | ||
256 | #define REG_DDC_MANUAL 0x00ec | ||
257 | #define BIT_DDC_MANUAL_MAN_DDC BIT(7) | ||
258 | #define BIT_DDC_MANUAL_VP_SEL BIT(6) | ||
259 | #define BIT_DDC_MANUAL_DSDA BIT(5) | ||
260 | #define BIT_DDC_MANUAL_DSCL BIT(4) | ||
261 | #define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3) | ||
262 | #define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2) | ||
263 | #define BIT_DDC_MANUAL_IO_DSDA BIT(1) | ||
264 | #define BIT_DDC_MANUAL_IO_DSCL BIT(0) | ||
265 | |||
266 | /* DDC I2C Target Slave Address, default value: 0x00 */ | ||
267 | #define REG_DDC_ADDR 0x00ed | ||
268 | #define MSK_DDC_ADDR_DDC_ADDR 0xfe | ||
269 | |||
270 | /* DDC I2C Target Segment Address, default value: 0x00 */ | ||
271 | #define REG_DDC_SEGM 0x00ee | ||
272 | |||
273 | /* DDC I2C Target Offset Address, default value: 0x00 */ | ||
274 | #define REG_DDC_OFFSET 0x00ef | ||
275 | |||
276 | /* DDC I2C Data In count #1, default value: 0x00 */ | ||
277 | #define REG_DDC_DIN_CNT1 0x00f0 | ||
278 | |||
279 | /* DDC I2C Data In count #2, default value: 0x00 */ | ||
280 | #define REG_DDC_DIN_CNT2 0x00f1 | ||
281 | #define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03 | ||
282 | |||
283 | /* DDC I2C Status, default value: 0x04 */ | ||
284 | #define REG_DDC_STATUS 0x00f2 | ||
285 | #define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6) | ||
286 | #define BIT_DDC_STATUS_DDC_NO_ACK BIT(5) | ||
287 | #define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4) | ||
288 | #define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3) | ||
289 | #define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2) | ||
290 | #define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1) | ||
291 | #define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0) | ||
292 | |||
293 | /* DDC I2C Command, default value: 0x70 */ | ||
294 | #define REG_DDC_CMD 0x00f3 | ||
295 | #define BIT_DDC_CMD_HDCP_DDC_EN BIT(6) | ||
296 | #define BIT_DDC_CMD_SDA_DEL_EN BIT(5) | ||
297 | #define BIT_DDC_CMD_DDC_FLT_EN BIT(4) | ||
298 | |||
299 | #define MSK_DDC_CMD_DDC_CMD 0x0f | ||
300 | #define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04 | ||
301 | #define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09 | ||
302 | #define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f | ||
303 | |||
304 | /* DDC I2C FIFO Data In/Out, default value: 0x00 */ | ||
305 | #define REG_DDC_DATA 0x00f4 | ||
306 | |||
307 | /* DDC I2C Data Out Counter, default value: 0x00 */ | ||
308 | #define REG_DDC_DOUT_CNT 0x00f5 | ||
309 | #define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7) | ||
310 | #define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f | ||
311 | |||
312 | /* DDC I2C Delay Count, default value: 0x14 */ | ||
313 | #define REG_DDC_DELAY_CNT 0x00f6 | ||
314 | |||
315 | /* Test Control, default value: 0x80 */ | ||
316 | #define REG_TEST_TXCTRL 0x00f7 | ||
317 | #define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7) | ||
318 | #define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6) | ||
319 | #define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c | ||
320 | #define BIT_TEST_TXCTRL_HDMI_MODE BIT(1) | ||
321 | #define BIT_TEST_TXCTRL_TST_PLLCK BIT(0) | ||
322 | |||
323 | /* CBUS Address, default value: 0x00 */ | ||
324 | #define REG_PAGE_CBUS_ADDR 0x00f8 | ||
325 | |||
326 | /* I2C Device Address re-assignment */ | ||
327 | #define REG_PAGE1_ADDR 0x00fc | ||
328 | #define REG_PAGE2_ADDR 0x00fd | ||
329 | #define REG_PAGE3_ADDR 0x00fe | ||
330 | #define REG_HW_TPI_ADDR 0x00ff | ||
331 | |||
332 | /* USBT CTRL0, default value: 0x00 */ | ||
333 | #define REG_UTSRST 0x0100 | ||
334 | #define BIT_UTSRST_FC_SRST BIT(5) | ||
335 | #define BIT_UTSRST_KEEPER_SRST BIT(4) | ||
336 | #define BIT_UTSRST_HTX_SRST BIT(3) | ||
337 | #define BIT_UTSRST_TRX_SRST BIT(2) | ||
338 | #define BIT_UTSRST_TTX_SRST BIT(1) | ||
339 | #define BIT_UTSRST_HRX_SRST BIT(0) | ||
340 | |||
341 | /* HSIC RX Control3, default value: 0x07 */ | ||
342 | #define REG_HRXCTRL3 0x0104 | ||
343 | #define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0 | ||
344 | #define BIT_HRXCTRL3_HRX_OUT_EN BIT(2) | ||
345 | #define BIT_HRXCTRL3_STATUS_EN BIT(1) | ||
346 | #define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0) | ||
347 | |||
348 | /* HSIC RX INT Registers */ | ||
349 | #define REG_HRXINTL 0x0111 | ||
350 | #define REG_HRXINTH 0x0112 | ||
351 | |||
352 | /* TDM TX NUMBITS, default value: 0x0c */ | ||
353 | #define REG_TTXNUMB 0x0116 | ||
354 | #define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0 | ||
355 | #define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3) | ||
356 | #define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07 | ||
357 | |||
358 | /* TDM TX NUMSPISYM, default value: 0x04 */ | ||
359 | #define REG_TTXSPINUMS 0x0117 | ||
360 | |||
361 | /* TDM TX NUMHSICSYM, default value: 0x14 */ | ||
362 | #define REG_TTXHSICNUMS 0x0118 | ||
363 | |||
364 | /* TDM TX NUMTOTSYM, default value: 0x18 */ | ||
365 | #define REG_TTXTOTNUMS 0x0119 | ||
366 | |||
367 | /* TDM TX INT Low, default value: 0x00 */ | ||
368 | #define REG_TTXINTL 0x0136 | ||
369 | #define BIT_TTXINTL_TTX_INTR7 BIT(7) | ||
370 | #define BIT_TTXINTL_TTX_INTR6 BIT(6) | ||
371 | #define BIT_TTXINTL_TTX_INTR5 BIT(5) | ||
372 | #define BIT_TTXINTL_TTX_INTR4 BIT(4) | ||
373 | #define BIT_TTXINTL_TTX_INTR3 BIT(3) | ||
374 | #define BIT_TTXINTL_TTX_INTR2 BIT(2) | ||
375 | #define BIT_TTXINTL_TTX_INTR1 BIT(1) | ||
376 | #define BIT_TTXINTL_TTX_INTR0 BIT(0) | ||
377 | |||
378 | /* TDM TX INT High, default value: 0x00 */ | ||
379 | #define REG_TTXINTH 0x0137 | ||
380 | #define BIT_TTXINTH_TTX_INTR15 BIT(7) | ||
381 | #define BIT_TTXINTH_TTX_INTR14 BIT(6) | ||
382 | #define BIT_TTXINTH_TTX_INTR13 BIT(5) | ||
383 | #define BIT_TTXINTH_TTX_INTR12 BIT(4) | ||
384 | #define BIT_TTXINTH_TTX_INTR11 BIT(3) | ||
385 | #define BIT_TTXINTH_TTX_INTR10 BIT(2) | ||
386 | #define BIT_TTXINTH_TTX_INTR9 BIT(1) | ||
387 | #define BIT_TTXINTH_TTX_INTR8 BIT(0) | ||
388 | |||
389 | /* TDM RX Control, default value: 0x1c */ | ||
390 | #define REG_TRXCTRL 0x013b | ||
391 | #define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4) | ||
392 | #define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3) | ||
393 | #define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07 | ||
394 | |||
395 | /* TDM RX NUMSPISYM, default value: 0x04 */ | ||
396 | #define REG_TRXSPINUMS 0x013c | ||
397 | |||
398 | /* TDM RX NUMHSICSYM, default value: 0x14 */ | ||
399 | #define REG_TRXHSICNUMS 0x013d | ||
400 | |||
401 | /* TDM RX NUMTOTSYM, default value: 0x18 */ | ||
402 | #define REG_TRXTOTNUMS 0x013e | ||
403 | |||
404 | /* TDM RX Status 2nd, default value: 0x00 */ | ||
405 | #define REG_TRXSTA2 0x015c | ||
406 | |||
407 | /* TDM RX INT Low, default value: 0x00 */ | ||
408 | #define REG_TRXINTL 0x0163 | ||
409 | |||
410 | /* TDM RX INT High, default value: 0x00 */ | ||
411 | #define REG_TRXINTH 0x0164 | ||
412 | |||
413 | /* TDM RX INTMASK High, default value: 0x00 */ | ||
414 | #define REG_TRXINTMH 0x0166 | ||
415 | |||
416 | /* HSIC TX CRTL, default value: 0x00 */ | ||
417 | #define REG_HTXCTRL 0x0169 | ||
418 | #define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4) | ||
419 | #define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3) | ||
420 | #define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2) | ||
421 | #define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1) | ||
422 | #define BIT_HTXCTRL_HTX_DRVRST1 BIT(0) | ||
423 | |||
424 | /* HSIC TX INT Low, default value: 0x00 */ | ||
425 | #define REG_HTXINTL 0x017d | ||
426 | |||
427 | /* HSIC TX INT High, default value: 0x00 */ | ||
428 | #define REG_HTXINTH 0x017e | ||
429 | |||
430 | /* HSIC Keeper, default value: 0x00 */ | ||
431 | #define REG_KEEPER 0x0181 | ||
432 | #define MSK_KEEPER_KEEPER_MODE_1_0 0x03 | ||
433 | |||
434 | /* HSIC Flow Control General, default value: 0x02 */ | ||
435 | #define REG_FCGC 0x0183 | ||
436 | #define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1) | ||
437 | #define BIT_FCGC_HSIC_FC_ENABLE BIT(0) | ||
438 | |||
439 | /* HSIC Flow Control CTR13, default value: 0xfc */ | ||
440 | #define REG_FCCTR13 0x0191 | ||
441 | |||
442 | /* HSIC Flow Control CTR14, default value: 0xff */ | ||
443 | #define REG_FCCTR14 0x0192 | ||
444 | |||
445 | /* HSIC Flow Control CTR15, default value: 0xff */ | ||
446 | #define REG_FCCTR15 0x0193 | ||
447 | |||
448 | /* HSIC Flow Control CTR50, default value: 0x03 */ | ||
449 | #define REG_FCCTR50 0x01b6 | ||
450 | |||
451 | /* HSIC Flow Control INTR0, default value: 0x00 */ | ||
452 | #define REG_FCINTR0 0x01ec | ||
453 | #define REG_FCINTR1 0x01ed | ||
454 | #define REG_FCINTR2 0x01ee | ||
455 | #define REG_FCINTR3 0x01ef | ||
456 | #define REG_FCINTR4 0x01f0 | ||
457 | #define REG_FCINTR5 0x01f1 | ||
458 | #define REG_FCINTR6 0x01f2 | ||
459 | #define REG_FCINTR7 0x01f3 | ||
460 | |||
461 | /* TDM Low Latency, default value: 0x20 */ | ||
462 | #define REG_TDMLLCTL 0x01fc | ||
463 | #define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0 | ||
464 | #define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30 | ||
465 | #define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c | ||
466 | #define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1) | ||
467 | #define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0) | ||
468 | |||
469 | /* TMDS 0 Clock Control, default value: 0x10 */ | ||
470 | #define REG_TMDS0_CCTRL1 0x0210 | ||
471 | #define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0 | ||
472 | #define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30 | ||
473 | |||
474 | /* TMDS Clock Enable, default value: 0x00 */ | ||
475 | #define REG_TMDS_CLK_EN 0x0211 | ||
476 | #define BIT_TMDS_CLK_EN_CLK_EN BIT(0) | ||
477 | |||
478 | /* TMDS Channel Enable, default value: 0x00 */ | ||
479 | #define REG_TMDS_CH_EN 0x0212 | ||
480 | #define BIT_TMDS_CH_EN_CH0_EN BIT(4) | ||
481 | #define BIT_TMDS_CH_EN_CH12_EN BIT(0) | ||
482 | |||
483 | /* BGR_BIAS, default value: 0x07 */ | ||
484 | #define REG_BGR_BIAS 0x0215 | ||
485 | #define BIT_BGR_BIAS_BGR_EN BIT(7) | ||
486 | #define MSK_BGR_BIAS_BIAS_BGR_D 0x0f | ||
487 | |||
488 | /* TMDS 0 Digital I2C BW, default value: 0x0a */ | ||
489 | #define REG_ALICE0_BW_I2C 0x0231 | ||
490 | |||
491 | /* TMDS 0 Digital Zone Control, default value: 0xe0 */ | ||
492 | #define REG_ALICE0_ZONE_CTRL 0x024c | ||
493 | #define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7) | ||
494 | #define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6) | ||
495 | #define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30 | ||
496 | #define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f | ||
497 | |||
498 | /* TMDS 0 Digital PLL Mode Control, default value: 0x00 */ | ||
499 | #define REG_ALICE0_MODE_CTRL 0x024d | ||
500 | #define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c | ||
501 | #define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03 | ||
502 | |||
503 | /* MHL Tx Control 6th, default value: 0xa0 */ | ||
504 | #define REG_MHLTX_CTL6 0x0285 | ||
505 | #define MSK_MHLTX_CTL6_EMI_SEL 0xe0 | ||
506 | #define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03 | ||
507 | |||
508 | /* Packet Filter0, default value: 0x00 */ | ||
509 | #define REG_PKT_FILTER_0 0x0290 | ||
510 | #define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7) | ||
511 | #define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6) | ||
512 | #define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5) | ||
513 | #define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4) | ||
514 | #define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3) | ||
515 | #define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2) | ||
516 | #define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1) | ||
517 | #define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0) | ||
518 | |||
519 | /* Packet Filter1, default value: 0x00 */ | ||
520 | #define REG_PKT_FILTER_1 0x0291 | ||
521 | #define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7) | ||
522 | #define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6) | ||
523 | #define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3) | ||
524 | #define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2) | ||
525 | #define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1) | ||
526 | #define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0) | ||
527 | |||
528 | /* TMDS Clock Status, default value: 0x10 */ | ||
529 | #define REG_TMDS_CSTAT_P3 0x02a0 | ||
530 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7) | ||
531 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6) | ||
532 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5) | ||
533 | #define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3) | ||
534 | #define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2) | ||
535 | #define BIT_TMDS_CSTAT_P3_SCDT BIT(1) | ||
536 | #define BIT_TMDS_CSTAT_P3_CKDT BIT(0) | ||
537 | |||
538 | /* RX_HDMI Control, default value: 0x10 */ | ||
539 | #define REG_RX_HDMI_CTRL0 0x02a1 | ||
540 | #define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5) | ||
541 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4) | ||
542 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3) | ||
543 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2) | ||
544 | #define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1) | ||
545 | #define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0) | ||
546 | |||
547 | /* RX_HDMI Control, default value: 0x38 */ | ||
548 | #define REG_RX_HDMI_CTRL2 0x02a3 | ||
549 | #define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0 | ||
550 | #define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4) | ||
551 | #define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3) | ||
552 | #define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0) | ||
553 | |||
554 | /* RX_HDMI Control, default value: 0x0f */ | ||
555 | #define REG_RX_HDMI_CTRL3 0x02a4 | ||
556 | #define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f | ||
557 | |||
558 | /* rx_hdmi Clear Buffer, default value: 0x00 */ | ||
559 | #define REG_RX_HDMI_CLR_BUFFER 0x02ac | ||
560 | #define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0 | ||
561 | #define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5) | ||
562 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4) | ||
563 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3) | ||
564 | #define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2) | ||
565 | #define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1) | ||
566 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0) | ||
567 | |||
568 | /* RX_HDMI VSI Header1, default value: 0x00 */ | ||
569 | #define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8 | ||
570 | |||
571 | /* RX_HDMI VSI MHL Monitor, default value: 0x3c */ | ||
572 | #define REG_RX_HDMI_VSIF_MHL_MON 0x02d7 | ||
573 | |||
574 | #define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c | ||
575 | #define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03 | ||
576 | |||
577 | /* Interrupt Source 9, default value: 0x00 */ | ||
578 | #define REG_INTR9 0x02e0 | ||
579 | #define BIT_INTR9_EDID_ERROR BIT(6) | ||
580 | #define BIT_INTR9_EDID_DONE BIT(5) | ||
581 | #define BIT_INTR9_DEVCAP_DONE BIT(4) | ||
582 | |||
583 | /* Interrupt 9 Mask, default value: 0x00 */ | ||
584 | #define REG_INTR9_MASK 0x02e1 | ||
585 | |||
586 | /* TPI CBUS Start, default value: 0x00 */ | ||
587 | #define REG_TPI_CBUS_START 0x02e2 | ||
588 | #define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7) | ||
589 | #define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6) | ||
590 | #define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5) | ||
591 | #define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4) | ||
592 | #define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3) | ||
593 | #define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2) | ||
594 | #define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1) | ||
595 | #define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0) | ||
596 | |||
597 | /* EDID Control, default value: 0x10 */ | ||
598 | #define REG_EDID_CTRL 0x02e3 | ||
599 | #define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7) | ||
600 | #define BIT_EDID_CTRL_XDEVCAP_EN BIT(6) | ||
601 | #define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5) | ||
602 | #define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4) | ||
603 | #define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3) | ||
604 | #define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2) | ||
605 | #define BIT_EDID_CTRL_INVALID_BKSV BIT(1) | ||
606 | #define BIT_EDID_CTRL_EDID_MODE_EN BIT(0) | ||
607 | |||
608 | /* EDID FIFO Addr, default value: 0x00 */ | ||
609 | #define REG_EDID_FIFO_ADDR 0x02e9 | ||
610 | |||
611 | /* EDID FIFO Write Data, default value: 0x00 */ | ||
612 | #define REG_EDID_FIFO_WR_DATA 0x02ea | ||
613 | |||
614 | /* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */ | ||
615 | #define REG_EDID_FIFO_ADDR_MON 0x02eb | ||
616 | |||
617 | /* EDID FIFO Read Data, default value: 0x00 */ | ||
618 | #define REG_EDID_FIFO_RD_DATA 0x02ec | ||
619 | |||
620 | /* EDID DDC Segment Pointer, default value: 0x00 */ | ||
621 | #define REG_EDID_START_EXT 0x02ed | ||
622 | |||
623 | /* TX IP BIST CNTL and Status, default value: 0x00 */ | ||
624 | #define REG_TX_IP_BIST_CNTLSTA 0x02f2 | ||
625 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6) | ||
626 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5) | ||
627 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4) | ||
628 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3) | ||
629 | #define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2) | ||
630 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1) | ||
631 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0) | ||
632 | |||
633 | /* TX IP BIST INST LOW, default value: 0x00 */ | ||
634 | #define REG_TX_IP_BIST_INST_LOW 0x02f3 | ||
635 | #define REG_TX_IP_BIST_INST_HIGH 0x02f4 | ||
636 | |||
637 | /* TX IP BIST PATTERN LOW, default value: 0x00 */ | ||
638 | #define REG_TX_IP_BIST_PAT_LOW 0x02f5 | ||
639 | #define REG_TX_IP_BIST_PAT_HIGH 0x02f6 | ||
640 | |||
641 | /* TX IP BIST CONFIGURE LOW, default value: 0x00 */ | ||
642 | #define REG_TX_IP_BIST_CONF_LOW 0x02f7 | ||
643 | #define REG_TX_IP_BIST_CONF_HIGH 0x02f8 | ||
644 | |||
645 | /* E-MSC General Control, default value: 0x80 */ | ||
646 | #define REG_GENCTL 0x0300 | ||
647 | #define BIT_GENCTL_SPEC_TRANS_DIS BIT(7) | ||
648 | #define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6) | ||
649 | #define BIT_GENCTL_SPI_MISO_EDGE BIT(5) | ||
650 | #define BIT_GENCTL_SPI_MOSI_EDGE BIT(4) | ||
651 | #define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3) | ||
652 | #define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2) | ||
653 | #define BIT_GENCTL_START_TRAIN_SEQ BIT(1) | ||
654 | #define BIT_GENCTL_EMSC_EN BIT(0) | ||
655 | |||
656 | /* E-MSC Comma ErrorCNT, default value: 0x03 */ | ||
657 | #define REG_COMMECNT 0x0305 | ||
658 | #define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7) | ||
659 | #define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f | ||
660 | |||
661 | /* E-MSC RFIFO ByteCnt, default value: 0x00 */ | ||
662 | #define REG_EMSCRFIFOBCNTL 0x031a | ||
663 | #define REG_EMSCRFIFOBCNTH 0x031b | ||
664 | |||
665 | /* SPI Burst Cnt Status, default value: 0x00 */ | ||
666 | #define REG_SPIBURSTCNT 0x031e | ||
667 | |||
668 | /* SPI Burst Status and SWRST, default value: 0x00 */ | ||
669 | #define REG_SPIBURSTSTAT 0x0322 | ||
670 | #define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7) | ||
671 | #define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6) | ||
672 | #define BIT_SPIBURSTSTAT_SPI_SRST BIT(5) | ||
673 | #define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0) | ||
674 | |||
675 | /* E-MSC 1st Interrupt, default value: 0x00 */ | ||
676 | #define REG_EMSCINTR 0x0323 | ||
677 | #define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7) | ||
678 | #define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6) | ||
679 | #define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5) | ||
680 | #define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4) | ||
681 | #define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3) | ||
682 | #define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2) | ||
683 | #define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1) | ||
684 | #define BIT_EMSCINTR_SPI_DVLD BIT(0) | ||
685 | |||
686 | /* E-MSC Interrupt Mask, default value: 0x00 */ | ||
687 | #define REG_EMSCINTRMASK 0x0324 | ||
688 | |||
689 | /* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */ | ||
690 | #define REG_EMSC_XMIT_WRITE_PORT 0x032a | ||
691 | |||
692 | /* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */ | ||
693 | #define REG_EMSC_RCV_READ_PORT 0x032b | ||
694 | |||
695 | /* E-MSC 2nd Interrupt, default value: 0x00 */ | ||
696 | #define REG_EMSCINTR1 0x032c | ||
697 | #define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0) | ||
698 | |||
699 | /* E-MSC Interrupt Mask, default value: 0x00 */ | ||
700 | #define REG_EMSCINTRMASK1 0x032d | ||
701 | #define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0) | ||
702 | |||
703 | /* MHL Top Ctl, default value: 0x00 */ | ||
704 | #define REG_MHL_TOP_CTL 0x0330 | ||
705 | #define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7) | ||
706 | #define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6) | ||
707 | #define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03 | ||
708 | |||
709 | /* MHL DataPath 1st Ctl, default value: 0xbc */ | ||
710 | #define REG_MHL_DP_CTL0 0x0331 | ||
711 | #define BIT_MHL_DP_CTL0_DP_OE BIT(7) | ||
712 | #define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6) | ||
713 | #define MSK_MHL_DP_CTL0_TX_OE 0x3f | ||
714 | |||
715 | /* MHL DataPath 2nd Ctl, default value: 0xbb */ | ||
716 | #define REG_MHL_DP_CTL1 0x0332 | ||
717 | #define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0 | ||
718 | #define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f | ||
719 | |||
720 | /* MHL DataPath 3rd Ctl, default value: 0x2f */ | ||
721 | #define REG_MHL_DP_CTL2 0x0333 | ||
722 | #define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7) | ||
723 | #define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30 | ||
724 | #define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c | ||
725 | #define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03 | ||
726 | |||
727 | /* MHL DataPath 4th Ctl, default value: 0x48 */ | ||
728 | #define REG_MHL_DP_CTL3 0x0334 | ||
729 | #define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0 | ||
730 | #define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f | ||
731 | |||
732 | /* MHL DataPath 5th Ctl, default value: 0x48 */ | ||
733 | #define REG_MHL_DP_CTL4 0x0335 | ||
734 | #define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0 | ||
735 | #define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f | ||
736 | |||
737 | /* MHL DataPath 6th Ctl, default value: 0x3f */ | ||
738 | #define REG_MHL_DP_CTL5 0x0336 | ||
739 | #define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7) | ||
740 | #define BIT_MHL_DP_CTL5_RSEN_EN BIT(6) | ||
741 | #define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30 | ||
742 | #define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c | ||
743 | #define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03 | ||
744 | |||
745 | /* MHL PLL 1st Ctl, default value: 0x05 */ | ||
746 | #define REG_MHL_PLL_CTL0 0x0337 | ||
747 | #define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7) | ||
748 | |||
749 | #define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70 | ||
750 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70 | ||
751 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60 | ||
752 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50 | ||
753 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40 | ||
754 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30 | ||
755 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20 | ||
756 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10 | ||
757 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00 | ||
758 | |||
759 | #define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c | ||
760 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c | ||
761 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08 | ||
762 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04 | ||
763 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00 | ||
764 | |||
765 | #define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1) | ||
766 | #define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0) | ||
767 | |||
768 | /* MHL PLL 3rd Ctl, default value: 0x80 */ | ||
769 | #define REG_MHL_PLL_CTL2 0x0339 | ||
770 | #define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7) | ||
771 | #define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3) | ||
772 | #define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2) | ||
773 | #define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03 | ||
774 | |||
775 | /* MHL CBUS 1st Ctl, default value: 0x12 */ | ||
776 | #define REG_MHL_CBUS_CTL0 0x0340 | ||
777 | #define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7) | ||
778 | |||
779 | #define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30 | ||
780 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00 | ||
781 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10 | ||
782 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20 | ||
783 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30 | ||
784 | |||
785 | #define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c | ||
786 | |||
787 | #define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03 | ||
788 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00 | ||
789 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01 | ||
790 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02 | ||
791 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03 | ||
792 | |||
793 | /* MHL CBUS 2nd Ctl, default value: 0x03 */ | ||
794 | #define REG_MHL_CBUS_CTL1 0x0341 | ||
795 | #define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07 | ||
796 | #define VAL_MHL_CBUS_CTL1_0888_OHM 0x00 | ||
797 | #define VAL_MHL_CBUS_CTL1_1115_OHM 0x04 | ||
798 | #define VAL_MHL_CBUS_CTL1_1378_OHM 0x07 | ||
799 | |||
800 | /* MHL CoC 1st Ctl, default value: 0xc3 */ | ||
801 | #define REG_MHL_COC_CTL0 0x0342 | ||
802 | #define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7) | ||
803 | #define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70 | ||
804 | #define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07 | ||
805 | |||
806 | /* MHL CoC 2nd Ctl, default value: 0x87 */ | ||
807 | #define REG_MHL_COC_CTL1 0x0343 | ||
808 | #define BIT_MHL_COC_CTL1_COC_EN BIT(7) | ||
809 | #define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f | ||
810 | |||
811 | /* MHL CoC 4th Ctl, default value: 0x00 */ | ||
812 | #define REG_MHL_COC_CTL3 0x0345 | ||
813 | #define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0) | ||
814 | |||
815 | /* MHL CoC 5th Ctl, default value: 0x28 */ | ||
816 | #define REG_MHL_COC_CTL4 0x0346 | ||
817 | #define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0 | ||
818 | #define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f | ||
819 | |||
820 | /* MHL CoC 6th Ctl, default value: 0x0d */ | ||
821 | #define REG_MHL_COC_CTL5 0x0347 | ||
822 | |||
823 | /* MHL DoC 1st Ctl, default value: 0x18 */ | ||
824 | #define REG_MHL_DOC_CTL0 0x0349 | ||
825 | #define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7) | ||
826 | #define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38 | ||
827 | #define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06 | ||
828 | #define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0) | ||
829 | |||
830 | /* MHL DataPath 7th Ctl, default value: 0x2a */ | ||
831 | #define REG_MHL_DP_CTL6 0x0350 | ||
832 | #define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5) | ||
833 | #define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4) | ||
834 | #define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3) | ||
835 | #define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2) | ||
836 | #define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1) | ||
837 | #define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0) | ||
838 | |||
839 | /* MHL DataPath 8th Ctl, default value: 0x06 */ | ||
840 | #define REG_MHL_DP_CTL7 0x0351 | ||
841 | #define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0 | ||
842 | #define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f | ||
843 | |||
844 | /* Tx Zone Ctl1, default value: 0x00 */ | ||
845 | #define REG_TX_ZONE_CTL1 0x0361 | ||
846 | #define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08 | ||
847 | |||
848 | /* MHL3 Tx Zone Ctl, default value: 0x00 */ | ||
849 | #define REG_MHL3_TX_ZONE_CTL 0x0364 | ||
850 | #define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7) | ||
851 | #define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03 | ||
852 | |||
853 | #define MSK_TX_ZONE_CTL3_TX_ZONE 0x03 | ||
854 | #define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00 | ||
855 | #define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01 | ||
856 | #define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02 | ||
857 | |||
858 | /* HDCP Polling Control and Status, default value: 0x70 */ | ||
859 | #define REG_HDCP2X_POLL_CS 0x0391 | ||
860 | |||
861 | #define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6) | ||
862 | #define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5) | ||
863 | #define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4) | ||
864 | #define MSK_HDCP2X_POLL_CS_ 0x0c | ||
865 | #define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1) | ||
866 | #define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0) | ||
867 | |||
868 | /* HDCP Interrupt 0, default value: 0x00 */ | ||
869 | #define REG_HDCP2X_INTR0 0x0398 | ||
870 | |||
871 | /* HDCP Interrupt 0 Mask, default value: 0x00 */ | ||
872 | #define REG_HDCP2X_INTR0_MASK 0x0399 | ||
873 | |||
874 | /* HDCP General Control 0, default value: 0x02 */ | ||
875 | #define REG_HDCP2X_CTRL_0 0x03a0 | ||
876 | #define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7) | ||
877 | #define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6) | ||
878 | #define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5) | ||
879 | #define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4) | ||
880 | #define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3) | ||
881 | #define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2) | ||
882 | #define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1) | ||
883 | #define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0) | ||
884 | |||
885 | /* HDCP General Control 1, default value: 0x08 */ | ||
886 | #define REG_HDCP2X_CTRL_1 0x03a1 | ||
887 | #define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0 | ||
888 | #define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3) | ||
889 | #define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2) | ||
890 | #define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1) | ||
891 | #define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0) | ||
892 | |||
893 | /* HDCP Misc Control, default value: 0x00 */ | ||
894 | #define REG_HDCP2X_MISC_CTRL 0x03a5 | ||
895 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4) | ||
896 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3) | ||
897 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2) | ||
898 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1) | ||
899 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0) | ||
900 | |||
901 | /* HDCP RPT SMNG K, default value: 0x00 */ | ||
902 | #define REG_HDCP2X_RPT_SMNG_K 0x03a6 | ||
903 | |||
904 | /* HDCP RPT SMNG In, default value: 0x00 */ | ||
905 | #define REG_HDCP2X_RPT_SMNG_IN 0x03a7 | ||
906 | |||
907 | /* HDCP Auth Status, default value: 0x00 */ | ||
908 | #define REG_HDCP2X_AUTH_STAT 0x03aa | ||
909 | |||
910 | /* HDCP RPT RCVID Out, default value: 0x00 */ | ||
911 | #define REG_HDCP2X_RPT_RCVID_OUT 0x03ac | ||
912 | |||
913 | /* HDCP TP1, default value: 0x62 */ | ||
914 | #define REG_HDCP2X_TP1 0x03b4 | ||
915 | |||
916 | /* HDCP GP Out 0, default value: 0x00 */ | ||
917 | #define REG_HDCP2X_GP_OUT0 0x03c7 | ||
918 | |||
919 | /* HDCP Repeater RCVR ID 0, default value: 0x00 */ | ||
920 | #define REG_HDCP2X_RPT_RCVR_ID0 0x03d1 | ||
921 | |||
922 | /* HDCP DDCM Status, default value: 0x00 */ | ||
923 | #define REG_HDCP2X_DDCM_STS 0x03d8 | ||
924 | #define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0 | ||
925 | #define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f | ||
926 | |||
927 | /* HDMI2MHL3 Control, default value: 0x0a */ | ||
928 | #define REG_M3_CTRL 0x03e0 | ||
929 | #define BIT_M3_CTRL_H2M_SWRST BIT(4) | ||
930 | #define BIT_M3_CTRL_SW_MHL3_SEL BIT(3) | ||
931 | #define BIT_M3_CTRL_M3AV_EN BIT(2) | ||
932 | #define BIT_M3_CTRL_ENC_TMDS BIT(1) | ||
933 | #define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0) | ||
934 | |||
935 | #define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ | ||
936 | | BIT_M3_CTRL_ENC_TMDS) | ||
937 | #define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ | ||
938 | | BIT_M3_CTRL_M3AV_EN \ | ||
939 | | BIT_M3_CTRL_ENC_TMDS \ | ||
940 | | BIT_M3_CTRL_MHL3_MASTER_EN) | ||
941 | |||
942 | /* HDMI2MHL3 Port0 Control, default value: 0x04 */ | ||
943 | #define REG_M3_P0CTRL 0x03e1 | ||
944 | #define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4) | ||
945 | #define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3) | ||
946 | #define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2) | ||
947 | #define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1) | ||
948 | #define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0) | ||
949 | |||
950 | #define REG_M3_POSTM 0x03e2 | ||
951 | #define MSK_M3_POSTM_RRP_DECODE 0xf8 | ||
952 | #define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07 | ||
953 | |||
954 | /* HDMI2MHL3 Scramble Control, default value: 0x41 */ | ||
955 | #define REG_M3_SCTRL 0x03e6 | ||
956 | #define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0 | ||
957 | #define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0) | ||
958 | |||
959 | /* HSIC Div Ctl, default value: 0x05 */ | ||
960 | #define REG_DIV_CTL_MAIN 0x03f2 | ||
961 | #define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c | ||
962 | #define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03 | ||
963 | |||
964 | /* MHL Capability 1st Byte, default value: 0x00 */ | ||
965 | #define REG_MHL_DEVCAP_0 0x0400 | ||
966 | |||
967 | /* MHL Interrupt 1st Byte, default value: 0x00 */ | ||
968 | #define REG_MHL_INT_0 0x0420 | ||
969 | |||
970 | /* Device Status 1st byte, default value: 0x00 */ | ||
971 | #define REG_MHL_STAT_0 0x0430 | ||
972 | |||
973 | /* CBUS Scratch Pad 1st Byte, default value: 0x00 */ | ||
974 | #define REG_MHL_SCRPAD_0 0x0440 | ||
975 | |||
976 | /* MHL Extended Capability 1st Byte, default value: 0x00 */ | ||
977 | #define REG_MHL_EXTDEVCAP_0 0x0480 | ||
978 | |||
979 | /* Device Extended Status 1st byte, default value: 0x00 */ | ||
980 | #define REG_MHL_EXTSTAT_0 0x0490 | ||
981 | |||
982 | /* TPI DTD Byte2, default value: 0x00 */ | ||
983 | #define REG_TPI_DTD_B2 0x0602 | ||
984 | |||
985 | #define VAL_TPI_QUAN_RANGE_LIMITED 0x01 | ||
986 | #define VAL_TPI_QUAN_RANGE_FULL 0x02 | ||
987 | #define VAL_TPI_FORMAT_RGB 0x00 | ||
988 | #define VAL_TPI_FORMAT_YCBCR444 0x01 | ||
989 | #define VAL_TPI_FORMAT_YCBCR422 0x02 | ||
990 | #define VAL_TPI_FORMAT_INTERNAL_RGB 0x03 | ||
991 | #define VAL_TPI_FORMAT(_fmt, _qr) \ | ||
992 | (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2)) | ||
993 | |||
994 | /* Input Format, default value: 0x00 */ | ||
995 | #define REG_TPI_INPUT 0x0609 | ||
996 | #define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7) | ||
997 | #define BIT_TPI_INPUT_ENDITHER BIT(6) | ||
998 | #define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c | ||
999 | #define MSK_TPI_INPUT_INPUT_FORMAT 0x03 | ||
1000 | |||
1001 | /* Output Format, default value: 0x00 */ | ||
1002 | #define REG_TPI_OUTPUT 0x060a | ||
1003 | #define BIT_TPI_OUTPUT_CSCMODE709 BIT(4) | ||
1004 | #define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c | ||
1005 | #define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03 | ||
1006 | |||
1007 | /* TPI AVI Check Sum, default value: 0x00 */ | ||
1008 | #define REG_TPI_AVI_CHSUM 0x060c | ||
1009 | |||
1010 | /* TPI System Control, default value: 0x00 */ | ||
1011 | #define REG_TPI_SC 0x061a | ||
1012 | #define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7) | ||
1013 | #define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6) | ||
1014 | #define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5) | ||
1015 | #define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4) | ||
1016 | #define BIT_TPI_SC_TPI_AV_MUTE BIT(3) | ||
1017 | #define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2) | ||
1018 | #define BIT_TPI_SC_DDC_TPI_SW BIT(1) | ||
1019 | #define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0) | ||
1020 | |||
1021 | /* TPI COPP Query Data, default value: 0x00 */ | ||
1022 | #define REG_TPI_COPP_DATA1 0x0629 | ||
1023 | #define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7) | ||
1024 | #define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6) | ||
1025 | #define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30 | ||
1026 | #define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00 | ||
1027 | #define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10 | ||
1028 | #define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20 | ||
1029 | #define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30 | ||
1030 | #define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3) | ||
1031 | #define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2) | ||
1032 | #define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1) | ||
1033 | #define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0) | ||
1034 | |||
1035 | /* TPI COPP Control Data, default value: 0x00 */ | ||
1036 | #define REG_TPI_COPP_DATA2 0x062a | ||
1037 | #define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5) | ||
1038 | #define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4) | ||
1039 | #define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3) | ||
1040 | #define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2) | ||
1041 | #define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1) | ||
1042 | #define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0) | ||
1043 | |||
1044 | /* TPI Interrupt Enable, default value: 0x00 */ | ||
1045 | #define REG_TPI_INTR_EN 0x063c | ||
1046 | |||
1047 | /* TPI Interrupt Status Low Byte, default value: 0x00 */ | ||
1048 | #define REG_TPI_INTR_ST0 0x063d | ||
1049 | #define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7) | ||
1050 | #define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6) | ||
1051 | #define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5) | ||
1052 | #define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3) | ||
1053 | #define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2) | ||
1054 | #define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1) | ||
1055 | #define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0) | ||
1056 | |||
1057 | /* TPI DS BCAPS Status, default value: 0x00 */ | ||
1058 | #define REG_TPI_DS_BCAPS 0x0644 | ||
1059 | |||
1060 | /* TPI BStatus1, default value: 0x00 */ | ||
1061 | #define REG_TPI_BSTATUS1 0x0645 | ||
1062 | #define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7) | ||
1063 | #define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f | ||
1064 | |||
1065 | /* TPI BStatus2, default value: 0x10 */ | ||
1066 | #define REG_TPI_BSTATUS2 0x0646 | ||
1067 | #define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0 | ||
1068 | #define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4) | ||
1069 | #define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3) | ||
1070 | #define MSK_TPI_BSTATUS2_DS_DEPTH 0x07 | ||
1071 | |||
1072 | /* TPI HW Optimization Control #3, default value: 0x00 */ | ||
1073 | #define REG_TPI_HW_OPT3 0x06bb | ||
1074 | #define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7) | ||
1075 | #define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3) | ||
1076 | #define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2) | ||
1077 | #define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03 | ||
1078 | |||
1079 | /* TPI Info Frame Select, default value: 0x00 */ | ||
1080 | #define REG_TPI_INFO_FSEL 0x06bf | ||
1081 | #define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7) | ||
1082 | #define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6) | ||
1083 | #define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5) | ||
1084 | #define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07 | ||
1085 | |||
1086 | /* TPI Info Byte #0, default value: 0x00 */ | ||
1087 | #define REG_TPI_INFO_B0 0x06c0 | ||
1088 | |||
1089 | /* CoC Status, default value: 0x00 */ | ||
1090 | #define REG_COC_STAT_0 0x0700 | ||
1091 | #define REG_COC_STAT_1 0x0701 | ||
1092 | #define REG_COC_STAT_2 0x0702 | ||
1093 | #define REG_COC_STAT_3 0x0703 | ||
1094 | #define REG_COC_STAT_4 0x0704 | ||
1095 | #define REG_COC_STAT_5 0x0705 | ||
1096 | |||
1097 | /* CoC 1st Ctl, default value: 0x40 */ | ||
1098 | #define REG_COC_CTL0 0x0710 | ||
1099 | |||
1100 | /* CoC 2nd Ctl, default value: 0x0a */ | ||
1101 | #define REG_COC_CTL1 0x0711 | ||
1102 | #define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0 | ||
1103 | #define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f | ||
1104 | |||
1105 | /* CoC 3rd Ctl, default value: 0x14 */ | ||
1106 | #define REG_COC_CTL2 0x0712 | ||
1107 | #define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0 | ||
1108 | #define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f | ||
1109 | |||
1110 | /* CoC 4th Ctl, default value: 0x40 */ | ||
1111 | #define REG_COC_CTL3 0x0713 | ||
1112 | #define BIT_COC_CTL3_COC_CTRL3_7 BIT(7) | ||
1113 | #define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f | ||
1114 | |||
1115 | /* CoC 7th Ctl, default value: 0x00 */ | ||
1116 | #define REG_COC_CTL6 0x0716 | ||
1117 | #define BIT_COC_CTL6_COC_CTRL6_7 BIT(7) | ||
1118 | #define BIT_COC_CTL6_COC_CTRL6_6 BIT(6) | ||
1119 | #define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f | ||
1120 | |||
1121 | /* CoC 8th Ctl, default value: 0x06 */ | ||
1122 | #define REG_COC_CTL7 0x0717 | ||
1123 | #define BIT_COC_CTL7_COC_CTRL7_7 BIT(7) | ||
1124 | #define BIT_COC_CTL7_COC_CTRL7_6 BIT(6) | ||
1125 | #define BIT_COC_CTL7_COC_CTRL7_5 BIT(5) | ||
1126 | #define MSK_COC_CTL7_COC_CTRL7_4_3 0x18 | ||
1127 | #define MSK_COC_CTL7_COC_CTRL7_2_0 0x07 | ||
1128 | |||
1129 | /* CoC 10th Ctl, default value: 0x00 */ | ||
1130 | #define REG_COC_CTL9 0x0719 | ||
1131 | |||
1132 | /* CoC 11th Ctl, default value: 0x00 */ | ||
1133 | #define REG_COC_CTLA 0x071a | ||
1134 | |||
1135 | /* CoC 12th Ctl, default value: 0x00 */ | ||
1136 | #define REG_COC_CTLB 0x071b | ||
1137 | |||
1138 | /* CoC 13th Ctl, default value: 0x0f */ | ||
1139 | #define REG_COC_CTLC 0x071c | ||
1140 | |||
1141 | /* CoC 14th Ctl, default value: 0x0a */ | ||
1142 | #define REG_COC_CTLD 0x071d | ||
1143 | #define BIT_COC_CTLD_COC_CTRLD_7 BIT(7) | ||
1144 | #define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f | ||
1145 | |||
1146 | /* CoC 15th Ctl, default value: 0x0a */ | ||
1147 | #define REG_COC_CTLE 0x071e | ||
1148 | #define BIT_COC_CTLE_COC_CTRLE_7 BIT(7) | ||
1149 | #define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f | ||
1150 | |||
1151 | /* CoC 16th Ctl, default value: 0x00 */ | ||
1152 | #define REG_COC_CTLF 0x071f | ||
1153 | #define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8 | ||
1154 | #define MSK_COC_CTLF_COC_CTRLF_2_0 0x07 | ||
1155 | |||
1156 | /* CoC 18th Ctl, default value: 0x32 */ | ||
1157 | #define REG_COC_CTL11 0x0721 | ||
1158 | #define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0 | ||
1159 | #define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f | ||
1160 | |||
1161 | /* CoC 21st Ctl, default value: 0x00 */ | ||
1162 | #define REG_COC_CTL14 0x0724 | ||
1163 | #define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0 | ||
1164 | #define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f | ||
1165 | |||
1166 | /* CoC 22nd Ctl, default value: 0x00 */ | ||
1167 | #define REG_COC_CTL15 0x0725 | ||
1168 | #define BIT_COC_CTL15_COC_CTRL15_7 BIT(7) | ||
1169 | #define MSK_COC_CTL15_COC_CTRL15_6_4 0x70 | ||
1170 | #define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f | ||
1171 | |||
1172 | /* CoC Interrupt, default value: 0x00 */ | ||
1173 | #define REG_COC_INTR 0x0726 | ||
1174 | |||
1175 | /* CoC Interrupt Mask, default value: 0x00 */ | ||
1176 | #define REG_COC_INTR_MASK 0x0727 | ||
1177 | #define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0) | ||
1178 | #define BIT_COC_CALIBRATION_DONE BIT(1) | ||
1179 | |||
1180 | /* CoC Misc Ctl, default value: 0x00 */ | ||
1181 | #define REG_COC_MISC_CTL0 0x0728 | ||
1182 | #define BIT_COC_MISC_CTL0_FSM_MON BIT(7) | ||
1183 | |||
1184 | /* CoC 24th Ctl, default value: 0x00 */ | ||
1185 | #define REG_COC_CTL17 0x072a | ||
1186 | #define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0 | ||
1187 | #define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f | ||
1188 | |||
1189 | /* CoC 25th Ctl, default value: 0x00 */ | ||
1190 | #define REG_COC_CTL18 0x072b | ||
1191 | #define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0 | ||
1192 | #define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f | ||
1193 | |||
1194 | /* CoC 26th Ctl, default value: 0x00 */ | ||
1195 | #define REG_COC_CTL19 0x072c | ||
1196 | #define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0 | ||
1197 | #define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f | ||
1198 | |||
1199 | /* CoC 27th Ctl, default value: 0x00 */ | ||
1200 | #define REG_COC_CTL1A 0x072d | ||
1201 | #define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc | ||
1202 | #define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03 | ||
1203 | |||
1204 | /* DoC 9th Status, default value: 0x00 */ | ||
1205 | #define REG_DOC_STAT_8 0x0740 | ||
1206 | |||
1207 | /* DoC 10th Status, default value: 0x00 */ | ||
1208 | #define REG_DOC_STAT_9 0x0741 | ||
1209 | |||
1210 | /* DoC 5th CFG, default value: 0x00 */ | ||
1211 | #define REG_DOC_CFG4 0x074e | ||
1212 | #define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f | ||
1213 | |||
1214 | /* DoC 1st Ctl, default value: 0x40 */ | ||
1215 | #define REG_DOC_CTL0 0x0751 | ||
1216 | |||
1217 | /* DoC 7th Ctl, default value: 0x00 */ | ||
1218 | #define REG_DOC_CTL6 0x0757 | ||
1219 | #define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7) | ||
1220 | #define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6) | ||
1221 | #define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30 | ||
1222 | #define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f | ||
1223 | |||
1224 | /* DoC 8th Ctl, default value: 0x00 */ | ||
1225 | #define REG_DOC_CTL7 0x0758 | ||
1226 | #define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7) | ||
1227 | #define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6) | ||
1228 | #define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5) | ||
1229 | #define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18 | ||
1230 | #define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07 | ||
1231 | |||
1232 | /* DoC 9th Ctl, default value: 0x00 */ | ||
1233 | #define REG_DOC_CTL8 0x076c | ||
1234 | #define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7) | ||
1235 | #define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70 | ||
1236 | #define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c | ||
1237 | #define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03 | ||
1238 | |||
1239 | /* DoC 10th Ctl, default value: 0x00 */ | ||
1240 | #define REG_DOC_CTL9 0x076d | ||
1241 | |||
1242 | /* DoC 11th Ctl, default value: 0x00 */ | ||
1243 | #define REG_DOC_CTLA 0x076e | ||
1244 | |||
1245 | /* DoC 15th Ctl, default value: 0x00 */ | ||
1246 | #define REG_DOC_CTLE 0x0772 | ||
1247 | #define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7) | ||
1248 | #define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6) | ||
1249 | #define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30 | ||
1250 | #define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f | ||
1251 | |||
1252 | /* Interrupt Mask 1st, default value: 0x00 */ | ||
1253 | #define REG_MHL_INT_0_MASK 0x0580 | ||
1254 | |||
1255 | /* Interrupt Mask 2nd, default value: 0x00 */ | ||
1256 | #define REG_MHL_INT_1_MASK 0x0581 | ||
1257 | |||
1258 | /* Interrupt Mask 3rd, default value: 0x00 */ | ||
1259 | #define REG_MHL_INT_2_MASK 0x0582 | ||
1260 | |||
1261 | /* Interrupt Mask 4th, default value: 0x00 */ | ||
1262 | #define REG_MHL_INT_3_MASK 0x0583 | ||
1263 | |||
1264 | /* MDT Receive Time Out, default value: 0x00 */ | ||
1265 | #define REG_MDT_RCV_TIMEOUT 0x0584 | ||
1266 | |||
1267 | /* MDT Transmit Time Out, default value: 0x00 */ | ||
1268 | #define REG_MDT_XMIT_TIMEOUT 0x0585 | ||
1269 | |||
1270 | /* MDT Receive Control, default value: 0x00 */ | ||
1271 | #define REG_MDT_RCV_CTRL 0x0586 | ||
1272 | #define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7) | ||
1273 | #define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6) | ||
1274 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4) | ||
1275 | #define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3) | ||
1276 | #define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2) | ||
1277 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1) | ||
1278 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0) | ||
1279 | |||
1280 | /* MDT Receive Read Port, default value: 0x00 */ | ||
1281 | #define REG_MDT_RCV_READ_PORT 0x0587 | ||
1282 | |||
1283 | /* MDT Transmit Control, default value: 0x70 */ | ||
1284 | #define REG_MDT_XMIT_CTRL 0x0588 | ||
1285 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7) | ||
1286 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6) | ||
1287 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5) | ||
1288 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4) | ||
1289 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3) | ||
1290 | #define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2) | ||
1291 | #define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1) | ||
1292 | #define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0) | ||
1293 | |||
1294 | /* MDT Receive WRITE Port, default value: 0x00 */ | ||
1295 | #define REG_MDT_XMIT_WRITE_PORT 0x0589 | ||
1296 | |||
1297 | /* MDT RFIFO Status, default value: 0x00 */ | ||
1298 | #define REG_MDT_RFIFO_STAT 0x058a | ||
1299 | #define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0 | ||
1300 | #define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f | ||
1301 | |||
1302 | /* MDT XFIFO Status, default value: 0x80 */ | ||
1303 | #define REG_MDT_XFIFO_STAT 0x058b | ||
1304 | #define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0 | ||
1305 | #define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4) | ||
1306 | #define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f | ||
1307 | |||
1308 | /* MDT Interrupt 0, default value: 0x0c */ | ||
1309 | #define REG_MDT_INT_0 0x058c | ||
1310 | #define BIT_MDT_RFIFO_DATA_RDY BIT(0) | ||
1311 | #define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2) | ||
1312 | #define BIT_MDT_XFIFO_EMPTY BIT(3) | ||
1313 | |||
1314 | /* MDT Interrupt 0 Mask, default value: 0x00 */ | ||
1315 | #define REG_MDT_INT_0_MASK 0x058d | ||
1316 | |||
1317 | /* MDT Interrupt 1, default value: 0x00 */ | ||
1318 | #define REG_MDT_INT_1 0x058e | ||
1319 | #define BIT_MDT_RCV_TIMEOUT BIT(0) | ||
1320 | #define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1) | ||
1321 | #define BIT_MDT_RCV_SM_ERROR BIT(2) | ||
1322 | #define BIT_MDT_XMIT_TIMEOUT BIT(5) | ||
1323 | #define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6) | ||
1324 | #define BIT_MDT_XMIT_SM_ERROR BIT(7) | ||
1325 | |||
1326 | /* MDT Interrupt 1 Mask, default value: 0x00 */ | ||
1327 | #define REG_MDT_INT_1_MASK 0x058f | ||
1328 | |||
1329 | /* CBUS Vendor ID, default value: 0x01 */ | ||
1330 | #define REG_CBUS_VENDOR_ID 0x0590 | ||
1331 | |||
1332 | /* CBUS Connection Status, default value: 0x00 */ | ||
1333 | #define REG_CBUS_STATUS 0x0591 | ||
1334 | #define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4) | ||
1335 | #define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3) | ||
1336 | #define BIT_CBUS_STATUS_CBUS_HPD BIT(2) | ||
1337 | #define BIT_CBUS_STATUS_MHL_MODE BIT(1) | ||
1338 | #define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0) | ||
1339 | |||
1340 | /* CBUS Interrupt 1st, default value: 0x00 */ | ||
1341 | #define REG_CBUS_INT_0 0x0592 | ||
1342 | #define BIT_CBUS_MSC_MT_DONE_NACK BIT(7) | ||
1343 | #define BIT_CBUS_MSC_MR_SET_INT BIT(6) | ||
1344 | #define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5) | ||
1345 | #define BIT_CBUS_MSC_MR_MSC_MSG BIT(4) | ||
1346 | #define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3) | ||
1347 | #define BIT_CBUS_HPD_CHG BIT(2) | ||
1348 | #define BIT_CBUS_MSC_MT_DONE BIT(1) | ||
1349 | #define BIT_CBUS_CNX_CHG BIT(0) | ||
1350 | |||
1351 | /* CBUS Interrupt Mask 1st, default value: 0x00 */ | ||
1352 | #define REG_CBUS_INT_0_MASK 0x0593 | ||
1353 | |||
1354 | /* CBUS Interrupt 2nd, default value: 0x00 */ | ||
1355 | #define REG_CBUS_INT_1 0x0594 | ||
1356 | #define BIT_CBUS_CMD_ABORT BIT(6) | ||
1357 | #define BIT_CBUS_MSC_ABORT_RCVD BIT(3) | ||
1358 | #define BIT_CBUS_DDC_ABORT BIT(2) | ||
1359 | #define BIT_CBUS_CEC_ABORT BIT(1) | ||
1360 | |||
1361 | /* CBUS Interrupt Mask 2nd, default value: 0x00 */ | ||
1362 | #define REG_CBUS_INT_1_MASK 0x0595 | ||
1363 | |||
1364 | /* CBUS DDC Abort Interrupt, default value: 0x00 */ | ||
1365 | #define REG_DDC_ABORT_INT 0x0598 | ||
1366 | |||
1367 | /* CBUS DDC Abort Interrupt Mask, default value: 0x00 */ | ||
1368 | #define REG_DDC_ABORT_INT_MASK 0x0599 | ||
1369 | |||
1370 | /* CBUS MSC Requester Abort Interrupt, default value: 0x00 */ | ||
1371 | #define REG_MSC_MT_ABORT_INT 0x059a | ||
1372 | |||
1373 | /* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */ | ||
1374 | #define REG_MSC_MT_ABORT_INT_MASK 0x059b | ||
1375 | |||
1376 | /* CBUS MSC Responder Abort Interrupt, default value: 0x00 */ | ||
1377 | #define REG_MSC_MR_ABORT_INT 0x059c | ||
1378 | |||
1379 | /* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */ | ||
1380 | #define REG_MSC_MR_ABORT_INT_MASK 0x059d | ||
1381 | |||
1382 | /* CBUS RX DISCOVERY interrupt, default value: 0x00 */ | ||
1383 | #define REG_CBUS_RX_DISC_INT0 0x059e | ||
1384 | |||
1385 | /* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */ | ||
1386 | #define REG_CBUS_RX_DISC_INT0_MASK 0x059f | ||
1387 | |||
1388 | /* CBUS_Link_Layer Control #8, default value: 0x00 */ | ||
1389 | #define REG_CBUS_LINK_CTRL_8 0x05a7 | ||
1390 | |||
1391 | /* MDT State Machine Status, default value: 0x00 */ | ||
1392 | #define REG_MDT_SM_STAT 0x05b5 | ||
1393 | #define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0 | ||
1394 | #define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f | ||
1395 | |||
1396 | /* CBUS MSC command trigger, default value: 0x00 */ | ||
1397 | #define REG_MSC_COMMAND_START 0x05b8 | ||
1398 | #define BIT_MSC_COMMAND_START_DEBUG BIT(5) | ||
1399 | #define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4) | ||
1400 | #define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3) | ||
1401 | #define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2) | ||
1402 | #define BIT_MSC_COMMAND_START_MSC_MSG BIT(1) | ||
1403 | #define BIT_MSC_COMMAND_START_PEER BIT(0) | ||
1404 | |||
1405 | /* CBUS MSC Command/Offset, default value: 0x00 */ | ||
1406 | #define REG_MSC_CMD_OR_OFFSET 0x05b9 | ||
1407 | |||
1408 | /* CBUS MSC Transmit Data */ | ||
1409 | #define REG_MSC_1ST_TRANSMIT_DATA 0x05ba | ||
1410 | #define REG_MSC_2ND_TRANSMIT_DATA 0x05bb | ||
1411 | |||
1412 | /* CBUS MSC Requester Received Data */ | ||
1413 | #define REG_MSC_MT_RCVD_DATA0 0x05bc | ||
1414 | #define REG_MSC_MT_RCVD_DATA1 0x05bd | ||
1415 | |||
1416 | /* CBUS MSC Responder MSC_MSG Received Data */ | ||
1417 | #define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf | ||
1418 | #define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0 | ||
1419 | |||
1420 | /* CBUS MSC Heartbeat Control, default value: 0x27 */ | ||
1421 | #define REG_MSC_HEARTBEAT_CTRL 0x05c4 | ||
1422 | #define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7) | ||
1423 | #define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70 | ||
1424 | #define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f | ||
1425 | |||
1426 | /* CBUS MSC Compatibility Control, default value: 0x02 */ | ||
1427 | #define REG_CBUS_MSC_COMPAT_CTRL 0x05c7 | ||
1428 | #define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7) | ||
1429 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6) | ||
1430 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5) | ||
1431 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3) | ||
1432 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2) | ||
1433 | |||
1434 | /* CBUS3 Converter Control, default value: 0x24 */ | ||
1435 | #define REG_CBUS3_CNVT 0x05dc | ||
1436 | #define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0 | ||
1437 | #define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c | ||
1438 | #define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1) | ||
1439 | #define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0) | ||
1440 | |||
1441 | /* Discovery Control1, default value: 0x24 */ | ||
1442 | #define REG_DISC_CTRL1 0x05e0 | ||
1443 | #define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7) | ||
1444 | #define BIT_DISC_CTRL1_HB_ONLY BIT(6) | ||
1445 | #define MSK_DISC_CTRL1_DISC_ATT 0x30 | ||
1446 | #define MSK_DISC_CTRL1_DISC_CYC 0x0c | ||
1447 | #define BIT_DISC_CTRL1_DISC_EN BIT(0) | ||
1448 | |||
1449 | #define VAL_PUP_OFF 0 | ||
1450 | #define VAL_PUP_20K 1 | ||
1451 | #define VAL_PUP_5K 2 | ||
1452 | |||
1453 | /* Discovery Control4, default value: 0x80 */ | ||
1454 | #define REG_DISC_CTRL4 0x05e3 | ||
1455 | #define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0 | ||
1456 | #define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30 | ||
1457 | #define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4)) | ||
1458 | |||
1459 | /* Discovery Control5, default value: 0x03 */ | ||
1460 | #define REG_DISC_CTRL5 0x05e4 | ||
1461 | #define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3) | ||
1462 | #define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03 | ||
1463 | |||
1464 | /* Discovery Control8, default value: 0x81 */ | ||
1465 | #define REG_DISC_CTRL8 0x05e7 | ||
1466 | #define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7) | ||
1467 | #define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0) | ||
1468 | |||
1469 | /* Discovery Control9, default value: 0x54 */ | ||
1470 | #define REG_DISC_CTRL9 0x05e8 | ||
1471 | #define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7) | ||
1472 | #define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6) | ||
1473 | #define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4) | ||
1474 | #define BIT_DISC_CTRL9_NOMHL_EST BIT(3) | ||
1475 | #define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2) | ||
1476 | #define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1) | ||
1477 | #define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0) | ||
1478 | |||
1479 | /* Discovery Status1, default value: 0x00 */ | ||
1480 | #define REG_DISC_STAT1 0x05eb | ||
1481 | #define BIT_DISC_STAT1_PSM_OVRIDE BIT(5) | ||
1482 | #define MSK_DISC_STAT1_DISC_SM 0x0f | ||
1483 | |||
1484 | /* Discovery Status2, default value: 0x00 */ | ||
1485 | #define REG_DISC_STAT2 0x05ec | ||
1486 | #define BIT_DISC_STAT2_CBUS_OE_POL BIT(6) | ||
1487 | #define BIT_DISC_STAT2_CBUS_SATUS BIT(5) | ||
1488 | #define BIT_DISC_STAT2_RSEN BIT(4) | ||
1489 | |||
1490 | #define MSK_DISC_STAT2_MHL_VRSN 0x0c | ||
1491 | #define VAL_DISC_STAT2_DEFAULT 0x00 | ||
1492 | #define VAL_DISC_STAT2_MHL1_2 0x04 | ||
1493 | #define VAL_DISC_STAT2_MHL3 0x08 | ||
1494 | #define VAL_DISC_STAT2_RESERVED 0x0c | ||
1495 | |||
1496 | #define MSK_DISC_STAT2_RGND 0x03 | ||
1497 | #define VAL_RGND_OPEN 0x00 | ||
1498 | #define VAL_RGND_2K 0x01 | ||
1499 | #define VAL_RGND_1K 0x02 | ||
1500 | #define VAL_RGND_SHORT 0x03 | ||
1501 | |||
1502 | /* Interrupt CBUS_reg1 INTR0, default value: 0x00 */ | ||
1503 | #define REG_CBUS_DISC_INTR0 0x05ed | ||
1504 | #define BIT_RGND_READY_INT BIT(6) | ||
1505 | #define BIT_CBUS_MHL12_DISCON_INT BIT(5) | ||
1506 | #define BIT_CBUS_MHL3_DISCON_INT BIT(4) | ||
1507 | #define BIT_NOT_MHL_EST_INT BIT(3) | ||
1508 | #define BIT_MHL_EST_INT BIT(2) | ||
1509 | #define BIT_MHL3_EST_INT BIT(1) | ||
1510 | #define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \ | ||
1511 | | BIT_CBUS_MHL3_DISCON_INT \ | ||
1512 | | BIT_NOT_MHL_EST_INT) | ||
1513 | |||
1514 | /* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */ | ||
1515 | #define REG_CBUS_DISC_INTR0_MASK 0x05ee | ||
1516 | |||
1517 | #endif /* __SIL_SII8620_H__ */ | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f81706387889..c32fb3c1d6f0 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, | |||
705 | state->src_w = val; | 705 | state->src_w = val; |
706 | } else if (property == config->prop_src_h) { | 706 | } else if (property == config->prop_src_h) { |
707 | state->src_h = val; | 707 | state->src_h = val; |
708 | } else if (property == config->rotation_property || | 708 | } else if (property == plane->rotation_property) { |
709 | property == plane->rotation_property) { | ||
710 | if (!is_power_of_2(val & DRM_ROTATE_MASK)) | 709 | if (!is_power_of_2(val & DRM_ROTATE_MASK)) |
711 | return -EINVAL; | 710 | return -EINVAL; |
712 | state->rotation = val; | 711 | state->rotation = val; |
@@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane, | |||
766 | *val = state->src_w; | 765 | *val = state->src_w; |
767 | } else if (property == config->prop_src_h) { | 766 | } else if (property == config->prop_src_h) { |
768 | *val = state->src_h; | 767 | *val = state->src_h; |
769 | } else if (property == config->rotation_property || | 768 | } else if (property == plane->rotation_property) { |
770 | property == plane->rotation_property) { | ||
771 | *val = state->rotation; | 769 | *val = state->rotation; |
772 | } else if (property == plane->zpos_property) { | 770 | } else if (property == plane->zpos_property) { |
773 | *val = state->zpos; | 771 | *val = state->zpos; |
@@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); | |||
1465 | 1463 | ||
1466 | static struct drm_pending_vblank_event *create_vblank_event( | 1464 | static struct drm_pending_vblank_event *create_vblank_event( |
1467 | struct drm_device *dev, struct drm_file *file_priv, | 1465 | struct drm_device *dev, struct drm_file *file_priv, |
1468 | struct fence *fence, uint64_t user_data) | 1466 | struct dma_fence *fence, uint64_t user_data) |
1469 | { | 1467 | { |
1470 | struct drm_pending_vblank_event *e = NULL; | 1468 | struct drm_pending_vblank_event *e = NULL; |
1471 | int ret; | 1469 | int ret; |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f9362760bfb2..75ad01d595fd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <drm/drm_plane_helper.h> | 30 | #include <drm/drm_plane_helper.h> |
31 | #include <drm/drm_crtc_helper.h> | 31 | #include <drm/drm_crtc_helper.h> |
32 | #include <drm/drm_atomic_helper.h> | 32 | #include <drm/drm_atomic_helper.h> |
33 | #include <linux/fence.h> | 33 | #include <linux/dma-fence.h> |
34 | 34 | ||
35 | #include "drm_crtc_internal.h" | 35 | #include "drm_crtc_internal.h" |
36 | 36 | ||
@@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); | |||
1017 | * drm_atomic_helper_swap_state() so it uses the current plane state (and | 1017 | * drm_atomic_helper_swap_state() so it uses the current plane state (and |
1018 | * just uses the atomic state to find the changed planes) | 1018 | * just uses the atomic state to find the changed planes) |
1019 | * | 1019 | * |
1020 | * Returns zero if success or < 0 if fence_wait() fails. | 1020 | * Returns zero if success or < 0 if dma_fence_wait() fails. |
1021 | */ | 1021 | */ |
1022 | int drm_atomic_helper_wait_for_fences(struct drm_device *dev, | 1022 | int drm_atomic_helper_wait_for_fences(struct drm_device *dev, |
1023 | struct drm_atomic_state *state, | 1023 | struct drm_atomic_state *state, |
@@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, | |||
1041 | * still interrupt the operation. Instead of blocking until the | 1041 | * still interrupt the operation. Instead of blocking until the |
1042 | * timer expires, make the wait interruptible. | 1042 | * timer expires, make the wait interruptible. |
1043 | */ | 1043 | */ |
1044 | ret = fence_wait(plane_state->fence, pre_swap); | 1044 | ret = dma_fence_wait(plane_state->fence, pre_swap); |
1045 | if (ret) | 1045 | if (ret) |
1046 | return ret; | 1046 | return ret; |
1047 | 1047 | ||
1048 | fence_put(plane_state->fence); | 1048 | dma_fence_put(plane_state->fence); |
1049 | plane_state->fence = NULL; | 1049 | plane_state->fence = NULL; |
1050 | } | 1050 | } |
1051 | 1051 | ||
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index e52aece30900..1f2412c7ccfd 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c | |||
@@ -89,7 +89,7 @@ | |||
89 | * On top of this basic transformation additional properties can be exposed by | 89 | * On top of this basic transformation additional properties can be exposed by |
90 | * the driver: | 90 | * the driver: |
91 | * | 91 | * |
92 | * - Rotation is set up with drm_mode_create_rotation_property(). It adds a | 92 | * - Rotation is set up with drm_plane_create_rotation_property(). It adds a |
93 | * rotation and reflection step between the source and destination rectangles. | 93 | * rotation and reflection step between the source and destination rectangles. |
94 | * Without this property the rectangle is only scaled, but not rotated or | 94 | * Without this property the rectangle is only scaled, but not rotated or |
95 | * reflected. | 95 | * reflected. |
@@ -105,18 +105,12 @@ | |||
105 | */ | 105 | */ |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * drm_mode_create_rotation_property - create a new rotation property | 108 | * drm_plane_create_rotation_property - create a new rotation property |
109 | * @dev: DRM device | 109 | * @plane: drm plane |
110 | * @rotation: initial value of the rotation property | ||
110 | * @supported_rotations: bitmask of supported rotations and reflections | 111 | * @supported_rotations: bitmask of supported rotations and reflections |
111 | * | 112 | * |
112 | * This creates a new property with the selected support for transformations. | 113 | * This creates a new property with the selected support for transformations. |
113 | * The resulting property should be stored in @rotation_property in | ||
114 | * &drm_mode_config. It then must be attached to each plane which supports | ||
115 | * rotations using drm_object_attach_property(). | ||
116 | * | ||
117 | * FIXME: Probably better if the rotation property is created on each plane, | ||
118 | * like the zpos property. Otherwise it's not possible to allow different | ||
119 | * rotation modes on different planes. | ||
120 | * | 114 | * |
121 | * Since a rotation by 180° degress is the same as reflecting both along the x | 115 | * Since a rotation by 180° degress is the same as reflecting both along the x |
122 | * and the y axis the rotation property is somewhat redundant. Drivers can use | 116 | * and the y axis the rotation property is somewhat redundant. Drivers can use |
@@ -144,24 +138,6 @@ | |||
144 | * rotation. After reflection, the rotation is applied to the image sampled from | 138 | * rotation. After reflection, the rotation is applied to the image sampled from |
145 | * the source rectangle, before scaling it to fit the destination rectangle. | 139 | * the source rectangle, before scaling it to fit the destination rectangle. |
146 | */ | 140 | */ |
147 | struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, | ||
148 | unsigned int supported_rotations) | ||
149 | { | ||
150 | static const struct drm_prop_enum_list props[] = { | ||
151 | { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, | ||
152 | { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, | ||
153 | { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, | ||
154 | { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, | ||
155 | { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, | ||
156 | { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, | ||
157 | }; | ||
158 | |||
159 | return drm_property_create_bitmask(dev, 0, "rotation", | ||
160 | props, ARRAY_SIZE(props), | ||
161 | supported_rotations); | ||
162 | } | ||
163 | EXPORT_SYMBOL(drm_mode_create_rotation_property); | ||
164 | |||
165 | int drm_plane_create_rotation_property(struct drm_plane *plane, | 141 | int drm_plane_create_rotation_property(struct drm_plane *plane, |
166 | unsigned int rotation, | 142 | unsigned int rotation, |
167 | unsigned int supported_rotations) | 143 | unsigned int supported_rotations) |
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 488355bdafb9..e02563966271 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c | |||
@@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) | |||
142 | sizeof(dp_dual_mode_hdmi_id)) == 0; | 142 | sizeof(dp_dual_mode_hdmi_id)) == 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | static bool is_type1_adaptor(uint8_t adaptor_id) | ||
146 | { | ||
147 | return adaptor_id == 0 || adaptor_id == 0xff; | ||
148 | } | ||
149 | |||
145 | static bool is_type2_adaptor(uint8_t adaptor_id) | 150 | static bool is_type2_adaptor(uint8_t adaptor_id) |
146 | { | 151 | { |
147 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | | 152 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | |
@@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
193 | */ | 198 | */ |
194 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, | 199 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, |
195 | hdmi_id, sizeof(hdmi_id)); | 200 | hdmi_id, sizeof(hdmi_id)); |
201 | DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n", | ||
202 | ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret); | ||
196 | if (ret) | 203 | if (ret) |
197 | return DRM_DP_DUAL_MODE_UNKNOWN; | 204 | return DRM_DP_DUAL_MODE_UNKNOWN; |
198 | 205 | ||
@@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
210 | */ | 217 | */ |
211 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, | 218 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, |
212 | &adaptor_id, sizeof(adaptor_id)); | 219 | &adaptor_id, sizeof(adaptor_id)); |
220 | DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", | ||
221 | adaptor_id, ret); | ||
213 | if (ret == 0) { | 222 | if (ret == 0) { |
214 | if (is_lspcon_adaptor(hdmi_id, adaptor_id)) | 223 | if (is_lspcon_adaptor(hdmi_id, adaptor_id)) |
215 | return DRM_DP_DUAL_MODE_LSPCON; | 224 | return DRM_DP_DUAL_MODE_LSPCON; |
@@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
219 | else | 228 | else |
220 | return DRM_DP_DUAL_MODE_TYPE2_DVI; | 229 | return DRM_DP_DUAL_MODE_TYPE2_DVI; |
221 | } | 230 | } |
231 | /* | ||
232 | * If neither a proper type 1 ID nor a broken type 1 adaptor | ||
233 | * as described above, assume type 1, but let the user know | ||
234 | * that we may have misdetected the type. | ||
235 | */ | ||
236 | if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) | ||
237 | DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", | ||
238 | adaptor_id); | ||
239 | |||
222 | } | 240 | } |
223 | 241 | ||
224 | if (is_hdmi_adaptor(hdmi_id)) | 242 | if (is_hdmi_adaptor(hdmi_id)) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95de47ba1e77..9506933b41cd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) | |||
1260 | return ret == xfers ? 0 : -1; | 1260 | return ret == xfers ? 0 : -1; |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static void connector_bad_edid(struct drm_connector *connector, | ||
1264 | u8 *edid, int num_blocks) | ||
1265 | { | ||
1266 | int i; | ||
1267 | |||
1268 | if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) | ||
1269 | return; | ||
1270 | |||
1271 | dev_warn(connector->dev->dev, | ||
1272 | "%s: EDID is invalid:\n", | ||
1273 | connector->name); | ||
1274 | for (i = 0; i < num_blocks; i++) { | ||
1275 | u8 *block = edid + i * EDID_LENGTH; | ||
1276 | char prefix[20]; | ||
1277 | |||
1278 | if (drm_edid_is_zero(block, EDID_LENGTH)) | ||
1279 | sprintf(prefix, "\t[%02x] ZERO ", i); | ||
1280 | else if (!drm_edid_block_valid(block, i, false, NULL)) | ||
1281 | sprintf(prefix, "\t[%02x] BAD ", i); | ||
1282 | else | ||
1283 | sprintf(prefix, "\t[%02x] GOOD ", i); | ||
1284 | |||
1285 | print_hex_dump(KERN_WARNING, | ||
1286 | prefix, DUMP_PREFIX_NONE, 16, 1, | ||
1287 | block, EDID_LENGTH, false); | ||
1288 | } | ||
1289 | } | ||
1290 | |||
1263 | /** | 1291 | /** |
1264 | * drm_do_get_edid - get EDID data using a custom EDID block read function | 1292 | * drm_do_get_edid - get EDID data using a custom EDID block read function |
1265 | * @connector: connector we're probing | 1293 | * @connector: connector we're probing |
@@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1283 | { | 1311 | { |
1284 | int i, j = 0, valid_extensions = 0; | 1312 | int i, j = 0, valid_extensions = 0; |
1285 | u8 *edid, *new; | 1313 | u8 *edid, *new; |
1286 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); | ||
1287 | 1314 | ||
1288 | if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | 1315 | if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
1289 | return NULL; | 1316 | return NULL; |
@@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1292 | for (i = 0; i < 4; i++) { | 1319 | for (i = 0; i < 4; i++) { |
1293 | if (get_edid_block(data, edid, 0, EDID_LENGTH)) | 1320 | if (get_edid_block(data, edid, 0, EDID_LENGTH)) |
1294 | goto out; | 1321 | goto out; |
1295 | if (drm_edid_block_valid(edid, 0, print_bad_edid, | 1322 | if (drm_edid_block_valid(edid, 0, false, |
1296 | &connector->edid_corrupt)) | 1323 | &connector->edid_corrupt)) |
1297 | break; | 1324 | break; |
1298 | if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { | 1325 | if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { |
@@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1304 | goto carp; | 1331 | goto carp; |
1305 | 1332 | ||
1306 | /* if there's no extensions, we're done */ | 1333 | /* if there's no extensions, we're done */ |
1307 | if (edid[0x7e] == 0) | 1334 | valid_extensions = edid[0x7e]; |
1335 | if (valid_extensions == 0) | ||
1308 | return (struct edid *)edid; | 1336 | return (struct edid *)edid; |
1309 | 1337 | ||
1310 | new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); | 1338 | new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); |
1311 | if (!new) | 1339 | if (!new) |
1312 | goto out; | 1340 | goto out; |
1313 | edid = new; | 1341 | edid = new; |
1314 | 1342 | ||
1315 | for (j = 1; j <= edid[0x7e]; j++) { | 1343 | for (j = 1; j <= edid[0x7e]; j++) { |
1316 | u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH; | 1344 | u8 *block = edid + j * EDID_LENGTH; |
1317 | 1345 | ||
1318 | for (i = 0; i < 4; i++) { | 1346 | for (i = 0; i < 4; i++) { |
1319 | if (get_edid_block(data, block, j, EDID_LENGTH)) | 1347 | if (get_edid_block(data, block, j, EDID_LENGTH)) |
1320 | goto out; | 1348 | goto out; |
1321 | if (drm_edid_block_valid(block, j, | 1349 | if (drm_edid_block_valid(block, j, false, NULL)) |
1322 | print_bad_edid, NULL)) { | ||
1323 | valid_extensions++; | ||
1324 | break; | 1350 | break; |
1325 | } | ||
1326 | } | 1351 | } |
1327 | 1352 | ||
1328 | if (i == 4 && print_bad_edid) { | 1353 | if (i == 4) |
1329 | dev_warn(connector->dev->dev, | 1354 | valid_extensions--; |
1330 | "%s: Ignoring invalid EDID block %d.\n", | ||
1331 | connector->name, j); | ||
1332 | |||
1333 | connector->bad_edid_counter++; | ||
1334 | } | ||
1335 | } | 1355 | } |
1336 | 1356 | ||
1337 | if (valid_extensions != edid[0x7e]) { | 1357 | if (valid_extensions != edid[0x7e]) { |
1358 | u8 *base; | ||
1359 | |||
1360 | connector_bad_edid(connector, edid, edid[0x7e] + 1); | ||
1361 | |||
1338 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; | 1362 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; |
1339 | edid[0x7e] = valid_extensions; | 1363 | edid[0x7e] = valid_extensions; |
1340 | new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | 1364 | |
1365 | new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
1341 | if (!new) | 1366 | if (!new) |
1342 | goto out; | 1367 | goto out; |
1368 | |||
1369 | base = new; | ||
1370 | for (i = 0; i <= edid[0x7e]; i++) { | ||
1371 | u8 *block = edid + i * EDID_LENGTH; | ||
1372 | |||
1373 | if (!drm_edid_block_valid(block, i, false, NULL)) | ||
1374 | continue; | ||
1375 | |||
1376 | memcpy(base, block, EDID_LENGTH); | ||
1377 | base += EDID_LENGTH; | ||
1378 | } | ||
1379 | |||
1380 | kfree(edid); | ||
1343 | edid = new; | 1381 | edid = new; |
1344 | } | 1382 | } |
1345 | 1383 | ||
1346 | return (struct edid *)edid; | 1384 | return (struct edid *)edid; |
1347 | 1385 | ||
1348 | carp: | 1386 | carp: |
1349 | if (print_bad_edid) { | 1387 | connector_bad_edid(connector, edid, 1); |
1350 | dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", | ||
1351 | connector->name, j); | ||
1352 | } | ||
1353 | connector->bad_edid_counter++; | ||
1354 | |||
1355 | out: | 1388 | out: |
1356 | kfree(edid); | 1389 | kfree(edid); |
1357 | return NULL; | 1390 | return NULL; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e0d428f9d1cb..83dbae0fabcf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) | |||
392 | if (plane->type != DRM_PLANE_TYPE_PRIMARY) | 392 | if (plane->type != DRM_PLANE_TYPE_PRIMARY) |
393 | drm_plane_force_disable(plane); | 393 | drm_plane_force_disable(plane); |
394 | 394 | ||
395 | if (plane->rotation_property) { | 395 | if (plane->rotation_property) |
396 | drm_mode_plane_set_obj_prop(plane, | 396 | drm_mode_plane_set_obj_prop(plane, |
397 | plane->rotation_property, | 397 | plane->rotation_property, |
398 | DRM_ROTATE_0); | 398 | DRM_ROTATE_0); |
399 | } else if (dev->mode_config.rotation_property) { | ||
400 | drm_mode_plane_set_obj_prop(plane, | ||
401 | dev->mode_config.rotation_property, | ||
402 | DRM_ROTATE_0); | ||
403 | } | ||
404 | } | 399 | } |
405 | 400 | ||
406 | for (i = 0; i < fb_helper->crtc_count; i++) { | 401 | for (i = 0; i < fb_helper->crtc_count; i++) { |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8bed5f459182..cf993dbf602e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev, | |||
665 | spin_unlock_irqrestore(&dev->event_lock, flags); | 665 | spin_unlock_irqrestore(&dev->event_lock, flags); |
666 | 666 | ||
667 | if (p->fence) | 667 | if (p->fence) |
668 | fence_put(p->fence); | 668 | dma_fence_put(p->fence); |
669 | 669 | ||
670 | kfree(p); | 670 | kfree(p); |
671 | } | 671 | } |
@@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) | |||
696 | } | 696 | } |
697 | 697 | ||
698 | if (e->fence) { | 698 | if (e->fence) { |
699 | fence_signal(e->fence); | 699 | dma_fence_signal(e->fence); |
700 | fence_put(e->fence); | 700 | dma_fence_put(e->fence); |
701 | } | 701 | } |
702 | 702 | ||
703 | if (!e->file_priv) { | 703 | if (!e->file_priv) { |
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index bc98bb94264d..47848ed8ca48 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c | |||
@@ -6,6 +6,11 @@ | |||
6 | #include <drm/drm_crtc.h> | 6 | #include <drm/drm_crtc.h> |
7 | #include <drm/drm_of.h> | 7 | #include <drm/drm_of.h> |
8 | 8 | ||
9 | static void drm_release_of(struct device *dev, void *data) | ||
10 | { | ||
11 | of_node_put(data); | ||
12 | } | ||
13 | |||
9 | /** | 14 | /** |
10 | * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node | 15 | * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node |
11 | * @dev: DRM device | 16 | * @dev: DRM device |
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | |||
64 | EXPORT_SYMBOL(drm_of_find_possible_crtcs); | 69 | EXPORT_SYMBOL(drm_of_find_possible_crtcs); |
65 | 70 | ||
66 | /** | 71 | /** |
72 | * drm_of_component_match_add - Add a component helper OF node match rule | ||
73 | * @master: master device | ||
74 | * @matchptr: component match pointer | ||
75 | * @compare: compare function used for matching component | ||
76 | * @node: of_node | ||
77 | */ | ||
78 | void drm_of_component_match_add(struct device *master, | ||
79 | struct component_match **matchptr, | ||
80 | int (*compare)(struct device *, void *), | ||
81 | struct device_node *node) | ||
82 | { | ||
83 | of_node_get(node); | ||
84 | component_match_add_release(master, matchptr, drm_release_of, | ||
85 | compare, node); | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(drm_of_component_match_add); | ||
88 | |||
89 | /** | ||
67 | * drm_of_component_probe - Generic probe function for a component based master | 90 | * drm_of_component_probe - Generic probe function for a component based master |
68 | * @dev: master device containing the OF node | 91 | * @dev: master device containing the OF node |
69 | * @compare_of: compare function used for matching components | 92 | * @compare_of: compare function used for matching components |
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev, | |||
101 | continue; | 124 | continue; |
102 | } | 125 | } |
103 | 126 | ||
104 | component_match_add(dev, &match, compare_of, port); | 127 | drm_of_component_match_add(dev, &match, compare_of, port); |
105 | of_node_put(port); | 128 | of_node_put(port); |
106 | } | 129 | } |
107 | 130 | ||
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev, | |||
140 | continue; | 163 | continue; |
141 | } | 164 | } |
142 | 165 | ||
143 | component_match_add(dev, &match, compare_of, remote); | 166 | drm_of_component_match_add(dev, &match, compare_of, |
167 | remote); | ||
144 | of_node_put(remote); | 168 | of_node_put(remote); |
145 | } | 169 | } |
146 | of_node_put(port); | 170 | of_node_put(port); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa687669e22b..0dee6acbd880 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/component.h> | 17 | #include <linux/component.h> |
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <drm/drm_of.h> | ||
19 | 20 | ||
20 | #include "etnaviv_drv.h" | 21 | #include "etnaviv_drv.h" |
21 | #include "etnaviv_gpu.h" | 22 | #include "etnaviv_gpu.h" |
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) | |||
629 | if (!core_node) | 630 | if (!core_node) |
630 | break; | 631 | break; |
631 | 632 | ||
632 | component_match_add(&pdev->dev, &match, compare_of, | 633 | drm_of_component_match_add(&pdev->dev, &match, |
633 | core_node); | 634 | compare_of, core_node); |
634 | of_node_put(core_node); | 635 | of_node_put(core_node); |
635 | } | 636 | } |
636 | } else if (dev->platform_data) { | 637 | } else if (dev->platform_data) { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 3755ef935af4..7d066a91d778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | |||
466 | } | 466 | } |
467 | 467 | ||
468 | #ifdef CONFIG_DEBUG_FS | 468 | #ifdef CONFIG_DEBUG_FS |
469 | static void etnaviv_gem_describe_fence(struct fence *fence, | 469 | static void etnaviv_gem_describe_fence(struct dma_fence *fence, |
470 | const char *type, struct seq_file *m) | 470 | const char *type, struct seq_file *m) |
471 | { | 471 | { |
472 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 472 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
473 | seq_printf(m, "\t%9s: %s %s seq %u\n", | 473 | seq_printf(m, "\t%9s: %s %s seq %u\n", |
474 | type, | 474 | type, |
475 | fence->ops->get_driver_name(fence), | 475 | fence->ops->get_driver_name(fence), |
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
482 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 482 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
483 | struct reservation_object *robj = etnaviv_obj->resv; | 483 | struct reservation_object *robj = etnaviv_obj->resv; |
484 | struct reservation_object_list *fobj; | 484 | struct reservation_object_list *fobj; |
485 | struct fence *fence; | 485 | struct dma_fence *fence; |
486 | unsigned long off = drm_vma_node_start(&obj->vma_node); | 486 | unsigned long off = drm_vma_node_start(&obj->vma_node); |
487 | 487 | ||
488 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", | 488 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index b1254f885fed..d2211825e5c8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -15,7 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/component.h> | 17 | #include <linux/component.h> |
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/of_device.h> | 20 | #include <linux/of_device.h> |
21 | #include "etnaviv_dump.h" | 21 | #include "etnaviv_dump.h" |
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work) | |||
882 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | 882 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { |
883 | if (!gpu->event[i].used) | 883 | if (!gpu->event[i].used) |
884 | continue; | 884 | continue; |
885 | fence_signal(gpu->event[i].fence); | 885 | dma_fence_signal(gpu->event[i].fence); |
886 | gpu->event[i].fence = NULL; | 886 | gpu->event[i].fence = NULL; |
887 | gpu->event[i].used = false; | 887 | gpu->event[i].used = false; |
888 | complete(&gpu->event_free); | 888 | complete(&gpu->event_free); |
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) | |||
952 | /* fence object management */ | 952 | /* fence object management */ |
953 | struct etnaviv_fence { | 953 | struct etnaviv_fence { |
954 | struct etnaviv_gpu *gpu; | 954 | struct etnaviv_gpu *gpu; |
955 | struct fence base; | 955 | struct dma_fence base; |
956 | }; | 956 | }; |
957 | 957 | ||
958 | static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) | 958 | static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) |
959 | { | 959 | { |
960 | return container_of(fence, struct etnaviv_fence, base); | 960 | return container_of(fence, struct etnaviv_fence, base); |
961 | } | 961 | } |
962 | 962 | ||
963 | static const char *etnaviv_fence_get_driver_name(struct fence *fence) | 963 | static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) |
964 | { | 964 | { |
965 | return "etnaviv"; | 965 | return "etnaviv"; |
966 | } | 966 | } |
967 | 967 | ||
968 | static const char *etnaviv_fence_get_timeline_name(struct fence *fence) | 968 | static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) |
969 | { | 969 | { |
970 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 970 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
971 | 971 | ||
972 | return dev_name(f->gpu->dev); | 972 | return dev_name(f->gpu->dev); |
973 | } | 973 | } |
974 | 974 | ||
975 | static bool etnaviv_fence_enable_signaling(struct fence *fence) | 975 | static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) |
976 | { | 976 | { |
977 | return true; | 977 | return true; |
978 | } | 978 | } |
979 | 979 | ||
980 | static bool etnaviv_fence_signaled(struct fence *fence) | 980 | static bool etnaviv_fence_signaled(struct dma_fence *fence) |
981 | { | 981 | { |
982 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 982 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
983 | 983 | ||
984 | return fence_completed(f->gpu, f->base.seqno); | 984 | return fence_completed(f->gpu, f->base.seqno); |
985 | } | 985 | } |
986 | 986 | ||
987 | static void etnaviv_fence_release(struct fence *fence) | 987 | static void etnaviv_fence_release(struct dma_fence *fence) |
988 | { | 988 | { |
989 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 989 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
990 | 990 | ||
991 | kfree_rcu(f, base.rcu); | 991 | kfree_rcu(f, base.rcu); |
992 | } | 992 | } |
993 | 993 | ||
994 | static const struct fence_ops etnaviv_fence_ops = { | 994 | static const struct dma_fence_ops etnaviv_fence_ops = { |
995 | .get_driver_name = etnaviv_fence_get_driver_name, | 995 | .get_driver_name = etnaviv_fence_get_driver_name, |
996 | .get_timeline_name = etnaviv_fence_get_timeline_name, | 996 | .get_timeline_name = etnaviv_fence_get_timeline_name, |
997 | .enable_signaling = etnaviv_fence_enable_signaling, | 997 | .enable_signaling = etnaviv_fence_enable_signaling, |
998 | .signaled = etnaviv_fence_signaled, | 998 | .signaled = etnaviv_fence_signaled, |
999 | .wait = fence_default_wait, | 999 | .wait = dma_fence_default_wait, |
1000 | .release = etnaviv_fence_release, | 1000 | .release = etnaviv_fence_release, |
1001 | }; | 1001 | }; |
1002 | 1002 | ||
1003 | static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | 1003 | static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) |
1004 | { | 1004 | { |
1005 | struct etnaviv_fence *f; | 1005 | struct etnaviv_fence *f; |
1006 | 1006 | ||
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | |||
1010 | 1010 | ||
1011 | f->gpu = gpu; | 1011 | f->gpu = gpu; |
1012 | 1012 | ||
1013 | fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, | 1013 | dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, |
1014 | gpu->fence_context, ++gpu->next_fence); | 1014 | gpu->fence_context, ++gpu->next_fence); |
1015 | 1015 | ||
1016 | return &f->base; | 1016 | return &f->base; |
1017 | } | 1017 | } |
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1021 | { | 1021 | { |
1022 | struct reservation_object *robj = etnaviv_obj->resv; | 1022 | struct reservation_object *robj = etnaviv_obj->resv; |
1023 | struct reservation_object_list *fobj; | 1023 | struct reservation_object_list *fobj; |
1024 | struct fence *fence; | 1024 | struct dma_fence *fence; |
1025 | int i, ret; | 1025 | int i, ret; |
1026 | 1026 | ||
1027 | if (!exclusive) { | 1027 | if (!exclusive) { |
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1039 | /* Wait on any existing exclusive fence which isn't our own */ | 1039 | /* Wait on any existing exclusive fence which isn't our own */ |
1040 | fence = reservation_object_get_excl(robj); | 1040 | fence = reservation_object_get_excl(robj); |
1041 | if (fence && fence->context != context) { | 1041 | if (fence && fence->context != context) { |
1042 | ret = fence_wait(fence, true); | 1042 | ret = dma_fence_wait(fence, true); |
1043 | if (ret) | 1043 | if (ret) |
1044 | return ret; | 1044 | return ret; |
1045 | } | 1045 | } |
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1052 | fence = rcu_dereference_protected(fobj->shared[i], | 1052 | fence = rcu_dereference_protected(fobj->shared[i], |
1053 | reservation_object_held(robj)); | 1053 | reservation_object_held(robj)); |
1054 | if (fence->context != context) { | 1054 | if (fence->context != context) { |
1055 | ret = fence_wait(fence, true); | 1055 | ret = dma_fence_wait(fence, true); |
1056 | if (ret) | 1056 | if (ret) |
1057 | return ret; | 1057 | return ret; |
1058 | } | 1058 | } |
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work) | |||
1158 | 1158 | ||
1159 | mutex_lock(&gpu->lock); | 1159 | mutex_lock(&gpu->lock); |
1160 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { | 1160 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { |
1161 | if (!fence_is_signaled(cmdbuf->fence)) | 1161 | if (!dma_fence_is_signaled(cmdbuf->fence)) |
1162 | break; | 1162 | break; |
1163 | 1163 | ||
1164 | list_del(&cmdbuf->node); | 1164 | list_del(&cmdbuf->node); |
1165 | fence_put(cmdbuf->fence); | 1165 | dma_fence_put(cmdbuf->fence); |
1166 | 1166 | ||
1167 | for (i = 0; i < cmdbuf->nr_bos; i++) { | 1167 | for (i = 0; i < cmdbuf->nr_bos; i++) { |
1168 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; | 1168 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; |
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) | |||
1275 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | 1275 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, |
1276 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) | 1276 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) |
1277 | { | 1277 | { |
1278 | struct fence *fence; | 1278 | struct dma_fence *fence; |
1279 | unsigned int event, i; | 1279 | unsigned int event, i; |
1280 | int ret; | 1280 | int ret; |
1281 | 1281 | ||
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | while ((event = ffs(intr)) != 0) { | 1393 | while ((event = ffs(intr)) != 0) { |
1394 | struct fence *fence; | 1394 | struct dma_fence *fence; |
1395 | 1395 | ||
1396 | event -= 1; | 1396 | event -= 1; |
1397 | 1397 | ||
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1401 | 1401 | ||
1402 | fence = gpu->event[event].fence; | 1402 | fence = gpu->event[event].fence; |
1403 | gpu->event[event].fence = NULL; | 1403 | gpu->event[event].fence = NULL; |
1404 | fence_signal(fence); | 1404 | dma_fence_signal(fence); |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * Events can be processed out of order. Eg, | 1407 | * Events can be processed out of order. Eg, |
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, | |||
1553 | return ret; | 1553 | return ret; |
1554 | 1554 | ||
1555 | gpu->drm = drm; | 1555 | gpu->drm = drm; |
1556 | gpu->fence_context = fence_context_alloc(1); | 1556 | gpu->fence_context = dma_fence_context_alloc(1); |
1557 | spin_lock_init(&gpu->fence_spinlock); | 1557 | spin_lock_init(&gpu->fence_spinlock); |
1558 | 1558 | ||
1559 | INIT_LIST_HEAD(&gpu->active_cmd_list); | 1559 | INIT_LIST_HEAD(&gpu->active_cmd_list); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 73c278dc3706..8c6b824e9d0a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity { | |||
89 | 89 | ||
90 | struct etnaviv_event { | 90 | struct etnaviv_event { |
91 | bool used; | 91 | bool used; |
92 | struct fence *fence; | 92 | struct dma_fence *fence; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct etnaviv_cmdbuf; | 95 | struct etnaviv_cmdbuf; |
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf { | |||
163 | /* vram node used if the cmdbuf is mapped through the MMUv2 */ | 163 | /* vram node used if the cmdbuf is mapped through the MMUv2 */ |
164 | struct drm_mm_node vram_node; | 164 | struct drm_mm_node vram_node; |
165 | /* fence after which this buffer is to be disposed */ | 165 | /* fence after which this buffer is to be disposed */ |
166 | struct fence *fence; | 166 | struct dma_fence *fence; |
167 | /* target exec state */ | 167 | /* target exec state */ |
168 | u32 exec_state; | 168 | u32 exec_state; |
169 | /* per GPU in-flight list */ | 169 | /* per GPU in-flight list */ |
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 90377a609c98..e88fde18c946 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <drm/drm_fb_cma_helper.h> | 24 | #include <drm/drm_fb_cma_helper.h> |
25 | #include <drm/drm_atomic_helper.h> | 25 | #include <drm/drm_atomic_helper.h> |
26 | #include <drm/drm_crtc_helper.h> | 26 | #include <drm/drm_crtc_helper.h> |
27 | #include <drm/drm_of.h> | ||
27 | 28 | ||
28 | #include "kirin_drm_drv.h" | 29 | #include "kirin_drm_drv.h" |
29 | 30 | ||
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np) | |||
260 | DRM_ERROR("no valid endpoint node\n"); | 261 | DRM_ERROR("no valid endpoint node\n"); |
261 | return ERR_PTR(-ENODEV); | 262 | return ERR_PTR(-ENODEV); |
262 | } | 263 | } |
263 | of_node_put(endpoint); | ||
264 | 264 | ||
265 | remote = of_graph_get_remote_port_parent(endpoint); | 265 | remote = of_graph_get_remote_port_parent(endpoint); |
266 | of_node_put(endpoint); | ||
266 | if (!remote) { | 267 | if (!remote) { |
267 | DRM_ERROR("no valid remote node\n"); | 268 | DRM_ERROR("no valid remote node\n"); |
268 | return ERR_PTR(-ENODEV); | 269 | return ERR_PTR(-ENODEV); |
269 | } | 270 | } |
270 | of_node_put(remote); | ||
271 | 271 | ||
272 | if (!of_device_is_available(remote)) { | 272 | if (!of_device_is_available(remote)) { |
273 | DRM_ERROR("not available for remote node\n"); | 273 | DRM_ERROR("not available for remote node\n"); |
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev) | |||
294 | if (IS_ERR(remote)) | 294 | if (IS_ERR(remote)) |
295 | return PTR_ERR(remote); | 295 | return PTR_ERR(remote); |
296 | 296 | ||
297 | component_match_add(dev, &match, compare_of, remote); | 297 | drm_of_component_match_add(dev, &match, compare_of, remote); |
298 | of_node_put(remote); | ||
298 | 299 | ||
299 | return component_master_add_with_match(dev, &kirin_drm_ops, match); | 300 | return component_master_add_with_match(dev, &kirin_drm_ops, match); |
300 | 301 | ||
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9798d400d817..af8683e0dd54 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) | |||
1289 | mutex_unlock(&priv->audio_mutex); | 1289 | mutex_unlock(&priv->audio_mutex); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) | 1292 | static int |
1293 | tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) | ||
1293 | { | 1294 | { |
1294 | struct tda998x_priv *priv = dev_get_drvdata(dev); | 1295 | struct tda998x_priv *priv = dev_get_drvdata(dev); |
1295 | 1296 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 74ede1f53372..f9af2a00625e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -26,12 +26,12 @@ | |||
26 | 26 | ||
27 | #include "i915_drv.h" | 27 | #include "i915_drv.h" |
28 | 28 | ||
29 | static const char *i915_fence_get_driver_name(struct fence *fence) | 29 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
30 | { | 30 | { |
31 | return "i915"; | 31 | return "i915"; |
32 | } | 32 | } |
33 | 33 | ||
34 | static const char *i915_fence_get_timeline_name(struct fence *fence) | 34 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
35 | { | 35 | { |
36 | /* Timelines are bound by eviction to a VM. However, since | 36 | /* Timelines are bound by eviction to a VM. However, since |
37 | * we only have a global seqno at the moment, we only have | 37 | * we only have a global seqno at the moment, we only have |
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence) | |||
42 | return "global"; | 42 | return "global"; |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool i915_fence_signaled(struct fence *fence) | 45 | static bool i915_fence_signaled(struct dma_fence *fence) |
46 | { | 46 | { |
47 | return i915_gem_request_completed(to_request(fence)); | 47 | return i915_gem_request_completed(to_request(fence)); |
48 | } | 48 | } |
49 | 49 | ||
50 | static bool i915_fence_enable_signaling(struct fence *fence) | 50 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
51 | { | 51 | { |
52 | if (i915_fence_signaled(fence)) | 52 | if (i915_fence_signaled(fence)) |
53 | return false; | 53 | return false; |
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence) | |||
56 | return true; | 56 | return true; |
57 | } | 57 | } |
58 | 58 | ||
59 | static signed long i915_fence_wait(struct fence *fence, | 59 | static signed long i915_fence_wait(struct dma_fence *fence, |
60 | bool interruptible, | 60 | bool interruptible, |
61 | signed long timeout_jiffies) | 61 | signed long timeout_jiffies) |
62 | { | 62 | { |
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence, | |||
85 | return timeout_jiffies; | 85 | return timeout_jiffies; |
86 | } | 86 | } |
87 | 87 | ||
88 | static void i915_fence_value_str(struct fence *fence, char *str, int size) | 88 | static void i915_fence_value_str(struct dma_fence *fence, char *str, int size) |
89 | { | 89 | { |
90 | snprintf(str, size, "%u", fence->seqno); | 90 | snprintf(str, size, "%u", fence->seqno); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void i915_fence_timeline_value_str(struct fence *fence, char *str, | 93 | static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str, |
94 | int size) | 94 | int size) |
95 | { | 95 | { |
96 | snprintf(str, size, "%u", | 96 | snprintf(str, size, "%u", |
97 | intel_engine_get_seqno(to_request(fence)->engine)); | 97 | intel_engine_get_seqno(to_request(fence)->engine)); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void i915_fence_release(struct fence *fence) | 100 | static void i915_fence_release(struct dma_fence *fence) |
101 | { | 101 | { |
102 | struct drm_i915_gem_request *req = to_request(fence); | 102 | struct drm_i915_gem_request *req = to_request(fence); |
103 | 103 | ||
104 | kmem_cache_free(req->i915->requests, req); | 104 | kmem_cache_free(req->i915->requests, req); |
105 | } | 105 | } |
106 | 106 | ||
107 | const struct fence_ops i915_fence_ops = { | 107 | const struct dma_fence_ops i915_fence_ops = { |
108 | .get_driver_name = i915_fence_get_driver_name, | 108 | .get_driver_name = i915_fence_get_driver_name, |
109 | .get_timeline_name = i915_fence_get_timeline_name, | 109 | .get_timeline_name = i915_fence_get_timeline_name, |
110 | .enable_signaling = i915_fence_enable_signaling, | 110 | .enable_signaling = i915_fence_enable_signaling, |
@@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
388 | * The reference count is incremented atomically. If it is zero, | 388 | * The reference count is incremented atomically. If it is zero, |
389 | * the lookup knows the request is unallocated and complete. Otherwise, | 389 | * the lookup knows the request is unallocated and complete. Otherwise, |
390 | * it is either still in use, or has been reallocated and reset | 390 | * it is either still in use, or has been reallocated and reset |
391 | * with fence_init(). This increment is safe for release as we check | 391 | * with dma_fence_init(). This increment is safe for release as we |
392 | * that the request we have a reference to and matches the active | 392 | * check that the request we have a reference to and matches the active |
393 | * request. | 393 | * request. |
394 | * | 394 | * |
395 | * Before we increment the refcount, we chase the request->engine | 395 | * Before we increment the refcount, we chase the request->engine |
@@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
412 | goto err; | 412 | goto err; |
413 | 413 | ||
414 | spin_lock_init(&req->lock); | 414 | spin_lock_init(&req->lock); |
415 | fence_init(&req->fence, | 415 | dma_fence_init(&req->fence, |
416 | &i915_fence_ops, | 416 | &i915_fence_ops, |
417 | &req->lock, | 417 | &req->lock, |
418 | engine->fence_context, | 418 | engine->fence_context, |
419 | seqno); | 419 | seqno); |
420 | 420 | ||
421 | i915_sw_fence_init(&req->submit, submit_notify); | 421 | i915_sw_fence_init(&req->submit, submit_notify); |
422 | 422 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 974bd7bcc801..bceeaa3a5193 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #ifndef I915_GEM_REQUEST_H | 25 | #ifndef I915_GEM_REQUEST_H |
26 | #define I915_GEM_REQUEST_H | 26 | #define I915_GEM_REQUEST_H |
27 | 27 | ||
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | 29 | ||
30 | #include "i915_gem.h" | 30 | #include "i915_gem.h" |
31 | #include "i915_sw_fence.h" | 31 | #include "i915_sw_fence.h" |
@@ -62,7 +62,7 @@ struct intel_signal_node { | |||
62 | * The requests are reference counted. | 62 | * The requests are reference counted. |
63 | */ | 63 | */ |
64 | struct drm_i915_gem_request { | 64 | struct drm_i915_gem_request { |
65 | struct fence fence; | 65 | struct dma_fence fence; |
66 | spinlock_t lock; | 66 | spinlock_t lock; |
67 | 67 | ||
68 | /** On Which ring this request was generated */ | 68 | /** On Which ring this request was generated */ |
@@ -145,9 +145,9 @@ struct drm_i915_gem_request { | |||
145 | struct list_head execlist_link; | 145 | struct list_head execlist_link; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | extern const struct fence_ops i915_fence_ops; | 148 | extern const struct dma_fence_ops i915_fence_ops; |
149 | 149 | ||
150 | static inline bool fence_is_i915(struct fence *fence) | 150 | static inline bool fence_is_i915(struct dma_fence *fence) |
151 | { | 151 | { |
152 | return fence->ops == &i915_fence_ops; | 152 | return fence->ops == &i915_fence_ops; |
153 | } | 153 | } |
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | static inline struct drm_i915_gem_request * | 174 | static inline struct drm_i915_gem_request * |
175 | to_request(struct fence *fence) | 175 | to_request(struct dma_fence *fence) |
176 | { | 176 | { |
177 | /* We assume that NULL fence/request are interoperable */ | 177 | /* We assume that NULL fence/request are interoperable */ |
178 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); | 178 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); |
@@ -183,19 +183,19 @@ to_request(struct fence *fence) | |||
183 | static inline struct drm_i915_gem_request * | 183 | static inline struct drm_i915_gem_request * |
184 | i915_gem_request_get(struct drm_i915_gem_request *req) | 184 | i915_gem_request_get(struct drm_i915_gem_request *req) |
185 | { | 185 | { |
186 | return to_request(fence_get(&req->fence)); | 186 | return to_request(dma_fence_get(&req->fence)); |
187 | } | 187 | } |
188 | 188 | ||
189 | static inline struct drm_i915_gem_request * | 189 | static inline struct drm_i915_gem_request * |
190 | i915_gem_request_get_rcu(struct drm_i915_gem_request *req) | 190 | i915_gem_request_get_rcu(struct drm_i915_gem_request *req) |
191 | { | 191 | { |
192 | return to_request(fence_get_rcu(&req->fence)); | 192 | return to_request(dma_fence_get_rcu(&req->fence)); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline void | 195 | static inline void |
196 | i915_gem_request_put(struct drm_i915_gem_request *req) | 196 | i915_gem_request_put(struct drm_i915_gem_request *req) |
197 | { | 197 | { |
198 | fence_put(&req->fence); | 198 | dma_fence_put(&req->fence); |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, | 201 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, |
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) | |||
497 | * compiler. | 497 | * compiler. |
498 | * | 498 | * |
499 | * The atomic operation at the heart of | 499 | * The atomic operation at the heart of |
500 | * i915_gem_request_get_rcu(), see fence_get_rcu(), is | 500 | * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is |
501 | * atomic_inc_not_zero() which is only a full memory barrier | 501 | * atomic_inc_not_zero() which is only a full memory barrier |
502 | * when successful. That is, if i915_gem_request_get_rcu() | 502 | * when successful. That is, if i915_gem_request_get_rcu() |
503 | * returns the request (and so with the reference counted | 503 | * returns the request (and so with the reference counted |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 1e5cbc585ca2..8185002d7ec8 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/fence.h> | 11 | #include <linux/dma-fence.h> |
12 | #include <linux/reservation.h> | 12 | #include <linux/reservation.h> |
13 | 13 | ||
14 | #include "i915_sw_fence.h" | 14 | #include "i915_sw_fence.h" |
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
226 | return pending; | 226 | return pending; |
227 | } | 227 | } |
228 | 228 | ||
229 | struct dma_fence_cb { | 229 | struct i915_sw_dma_fence_cb { |
230 | struct fence_cb base; | 230 | struct dma_fence_cb base; |
231 | struct i915_sw_fence *fence; | 231 | struct i915_sw_fence *fence; |
232 | struct fence *dma; | 232 | struct dma_fence *dma; |
233 | struct timer_list timer; | 233 | struct timer_list timer; |
234 | }; | 234 | }; |
235 | 235 | ||
236 | static void timer_i915_sw_fence_wake(unsigned long data) | 236 | static void timer_i915_sw_fence_wake(unsigned long data) |
237 | { | 237 | { |
238 | struct dma_fence_cb *cb = (struct dma_fence_cb *)data; | 238 | struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; |
239 | 239 | ||
240 | printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", | 240 | printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", |
241 | cb->dma->ops->get_driver_name(cb->dma), | 241 | cb->dma->ops->get_driver_name(cb->dma), |
242 | cb->dma->ops->get_timeline_name(cb->dma), | 242 | cb->dma->ops->get_timeline_name(cb->dma), |
243 | cb->dma->seqno); | 243 | cb->dma->seqno); |
244 | fence_put(cb->dma); | 244 | dma_fence_put(cb->dma); |
245 | cb->dma = NULL; | 245 | cb->dma = NULL; |
246 | 246 | ||
247 | i915_sw_fence_commit(cb->fence); | 247 | i915_sw_fence_commit(cb->fence); |
248 | cb->timer.function = NULL; | 248 | cb->timer.function = NULL; |
249 | } | 249 | } |
250 | 250 | ||
251 | static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data) | 251 | static void dma_i915_sw_fence_wake(struct dma_fence *dma, |
252 | struct dma_fence_cb *data) | ||
252 | { | 253 | { |
253 | struct dma_fence_cb *cb = container_of(data, typeof(*cb), base); | 254 | struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); |
254 | 255 | ||
255 | del_timer_sync(&cb->timer); | 256 | del_timer_sync(&cb->timer); |
256 | if (cb->timer.function) | 257 | if (cb->timer.function) |
257 | i915_sw_fence_commit(cb->fence); | 258 | i915_sw_fence_commit(cb->fence); |
258 | fence_put(cb->dma); | 259 | dma_fence_put(cb->dma); |
259 | 260 | ||
260 | kfree(cb); | 261 | kfree(cb); |
261 | } | 262 | } |
262 | 263 | ||
263 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 264 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, |
264 | struct fence *dma, | 265 | struct dma_fence *dma, |
265 | unsigned long timeout, | 266 | unsigned long timeout, |
266 | gfp_t gfp) | 267 | gfp_t gfp) |
267 | { | 268 | { |
268 | struct dma_fence_cb *cb; | 269 | struct i915_sw_dma_fence_cb *cb; |
269 | int ret; | 270 | int ret; |
270 | 271 | ||
271 | if (fence_is_signaled(dma)) | 272 | if (dma_fence_is_signaled(dma)) |
272 | return 0; | 273 | return 0; |
273 | 274 | ||
274 | cb = kmalloc(sizeof(*cb), gfp); | 275 | cb = kmalloc(sizeof(*cb), gfp); |
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
276 | if (!gfpflags_allow_blocking(gfp)) | 277 | if (!gfpflags_allow_blocking(gfp)) |
277 | return -ENOMEM; | 278 | return -ENOMEM; |
278 | 279 | ||
279 | return fence_wait(dma, false); | 280 | return dma_fence_wait(dma, false); |
280 | } | 281 | } |
281 | 282 | ||
282 | cb->fence = i915_sw_fence_get(fence); | 283 | cb->fence = i915_sw_fence_get(fence); |
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
287 | timer_i915_sw_fence_wake, (unsigned long)cb, | 288 | timer_i915_sw_fence_wake, (unsigned long)cb, |
288 | TIMER_IRQSAFE); | 289 | TIMER_IRQSAFE); |
289 | if (timeout) { | 290 | if (timeout) { |
290 | cb->dma = fence_get(dma); | 291 | cb->dma = dma_fence_get(dma); |
291 | mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); | 292 | mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); |
292 | } | 293 | } |
293 | 294 | ||
294 | ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); | 295 | ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); |
295 | if (ret == 0) { | 296 | if (ret == 0) { |
296 | ret = 1; | 297 | ret = 1; |
297 | } else { | 298 | } else { |
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
305 | 306 | ||
306 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 307 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
307 | struct reservation_object *resv, | 308 | struct reservation_object *resv, |
308 | const struct fence_ops *exclude, | 309 | const struct dma_fence_ops *exclude, |
309 | bool write, | 310 | bool write, |
310 | unsigned long timeout, | 311 | unsigned long timeout, |
311 | gfp_t gfp) | 312 | gfp_t gfp) |
312 | { | 313 | { |
313 | struct fence *excl; | 314 | struct dma_fence *excl; |
314 | int ret = 0, pending; | 315 | int ret = 0, pending; |
315 | 316 | ||
316 | if (write) { | 317 | if (write) { |
317 | struct fence **shared; | 318 | struct dma_fence **shared; |
318 | unsigned int count, i; | 319 | unsigned int count, i; |
319 | 320 | ||
320 | ret = reservation_object_get_fences_rcu(resv, | 321 | ret = reservation_object_get_fences_rcu(resv, |
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
339 | } | 340 | } |
340 | 341 | ||
341 | for (i = 0; i < count; i++) | 342 | for (i = 0; i < count; i++) |
342 | fence_put(shared[i]); | 343 | dma_fence_put(shared[i]); |
343 | kfree(shared); | 344 | kfree(shared); |
344 | } else { | 345 | } else { |
345 | excl = reservation_object_get_excl_rcu(resv); | 346 | excl = reservation_object_get_excl_rcu(resv); |
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
356 | ret |= pending; | 357 | ret |= pending; |
357 | } | 358 | } |
358 | 359 | ||
359 | fence_put(excl); | 360 | dma_fence_put(excl); |
360 | 361 | ||
361 | return ret; | 362 | return ret; |
362 | } | 363 | } |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 373141602ca4..cd239e92f67f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -16,8 +16,8 @@ | |||
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | 17 | ||
18 | struct completion; | 18 | struct completion; |
19 | struct fence; | 19 | struct dma_fence; |
20 | struct fence_ops; | 20 | struct dma_fence_ops; |
21 | struct reservation_object; | 21 | struct reservation_object; |
22 | 22 | ||
23 | struct i915_sw_fence { | 23 | struct i915_sw_fence { |
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
47 | struct i915_sw_fence *after, | 47 | struct i915_sw_fence *after, |
48 | wait_queue_t *wq); | 48 | wait_queue_t *wq); |
49 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 49 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, |
50 | struct fence *dma, | 50 | struct dma_fence *dma, |
51 | unsigned long timeout, | 51 | unsigned long timeout, |
52 | gfp_t gfp); | 52 | gfp_t gfp); |
53 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 53 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
54 | struct reservation_object *resv, | 54 | struct reservation_object *resv, |
55 | const struct fence_ops *exclude, | 55 | const struct dma_fence_ops *exclude, |
56 | bool write, | 56 | bool write, |
57 | unsigned long timeout, | 57 | unsigned long timeout, |
58 | gfp_t gfp); | 58 | gfp_t gfp); |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 178798002a73..5c912c25f7d3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, | |||
491 | __entry->ring = req->engine->id; | 491 | __entry->ring = req->engine->id; |
492 | __entry->seqno = req->fence.seqno; | 492 | __entry->seqno = req->fence.seqno; |
493 | __entry->flags = flags; | 493 | __entry->flags = flags; |
494 | fence_enable_sw_signaling(&req->fence); | 494 | dma_fence_enable_sw_signaling(&req->fence); |
495 | ), | 495 | ), |
496 | 496 | ||
497 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", | 497 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", |
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 23fc1042fed4..56efcc507ea2 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg) | |||
464 | &request->signaling.wait); | 464 | &request->signaling.wait); |
465 | 465 | ||
466 | local_bh_disable(); | 466 | local_bh_disable(); |
467 | fence_signal(&request->fence); | 467 | dma_fence_signal(&request->fence); |
468 | local_bh_enable(); /* kick start the tasklets */ | 468 | local_bh_enable(); /* kick start the tasklets */ |
469 | 469 | ||
470 | /* Find the next oldest signal. Note that as we have | 470 | /* Find the next oldest signal. Note that as we have |
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) | |||
502 | struct rb_node *parent, **p; | 502 | struct rb_node *parent, **p; |
503 | bool first, wakeup; | 503 | bool first, wakeup; |
504 | 504 | ||
505 | /* locked by fence_enable_sw_signaling() */ | 505 | /* locked by dma_fence_enable_sw_signaling() */ |
506 | assert_spin_locked(&request->lock); | 506 | assert_spin_locked(&request->lock); |
507 | 507 | ||
508 | request->signaling.wait.tsk = b->signaler; | 508 | request->signaling.wait.tsk = b->signaler; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f30db8f2425e..3c2293bd24bf 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1459,8 +1459,7 @@ static void intel_dp_print_hw_revision(struct intel_dp *intel_dp) | |||
1459 | if ((drm_debug & DRM_UT_KMS) == 0) | 1459 | if ((drm_debug & DRM_UT_KMS) == 0) |
1460 | return; | 1460 | return; |
1461 | 1461 | ||
1462 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 1462 | if (!drm_dp_is_branch(intel_dp->dpcd)) |
1463 | DP_DWN_STRM_PORT_PRESENT)) | ||
1464 | return; | 1463 | return; |
1465 | 1464 | ||
1466 | len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1); | 1465 | len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1); |
@@ -1478,8 +1477,7 @@ static void intel_dp_print_sw_revision(struct intel_dp *intel_dp) | |||
1478 | if ((drm_debug & DRM_UT_KMS) == 0) | 1477 | if ((drm_debug & DRM_UT_KMS) == 0) |
1479 | return; | 1478 | return; |
1480 | 1479 | ||
1481 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 1480 | if (!drm_dp_is_branch(intel_dp->dpcd)) |
1482 | DP_DWN_STRM_PORT_PRESENT)) | ||
1483 | return; | 1481 | return; |
1484 | 1482 | ||
1485 | len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2); | 1483 | len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2); |
@@ -3615,8 +3613,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3615 | if (!is_edp(intel_dp) && !intel_dp->sink_count) | 3613 | if (!is_edp(intel_dp) && !intel_dp->sink_count) |
3616 | return false; | 3614 | return false; |
3617 | 3615 | ||
3618 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 3616 | if (!drm_dp_is_branch(intel_dp->dpcd)) |
3619 | DP_DWN_STRM_PORT_PRESENT)) | ||
3620 | return true; /* native DP sink */ | 3617 | return true; /* native DP sink */ |
3621 | 3618 | ||
3622 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) | 3619 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
@@ -4134,7 +4131,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
4134 | return connector_status_connected; | 4131 | return connector_status_connected; |
4135 | 4132 | ||
4136 | /* if there's no downstream port, we're done */ | 4133 | /* if there's no downstream port, we're done */ |
4137 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) | 4134 | if (!drm_dp_is_branch(dpcd)) |
4138 | return connector_status_connected; | 4135 | return connector_status_connected; |
4139 | 4136 | ||
4140 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ | 4137 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2dc94812bea5..8cceb345aa0f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) | |||
245 | INIT_LIST_HEAD(&engine->execlist_queue); | 245 | INIT_LIST_HEAD(&engine->execlist_queue); |
246 | spin_lock_init(&engine->execlist_lock); | 246 | spin_lock_init(&engine->execlist_lock); |
247 | 247 | ||
248 | engine->fence_context = fence_context_alloc(1); | 248 | engine->fence_context = dma_fence_context_alloc(1); |
249 | 249 | ||
250 | intel_engine_init_requests(engine); | 250 | intel_engine_init_requests(engine); |
251 | intel_engine_init_hangcheck(engine); | 251 | intel_engine_init_hangcheck(engine); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index db61aa5f32ef..296f541fbe2f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <drm/drm_crtc_helper.h> | 18 | #include <drm/drm_crtc_helper.h> |
19 | #include <drm/drm_gem.h> | 19 | #include <drm/drm_gem.h> |
20 | #include <drm/drm_gem_cma_helper.h> | 20 | #include <drm/drm_gem_cma_helper.h> |
21 | #include <drm/drm_of.h> | ||
21 | #include <linux/component.h> | 22 | #include <linux/component.h> |
22 | #include <linux/iommu.h> | 23 | #include <linux/iommu.h> |
23 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
@@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev) | |||
416 | comp_type == MTK_DPI) { | 417 | comp_type == MTK_DPI) { |
417 | dev_info(dev, "Adding component match for %s\n", | 418 | dev_info(dev, "Adding component match for %s\n", |
418 | node->full_name); | 419 | node->full_name); |
419 | component_match_add(dev, &match, compare_of, node); | 420 | drm_of_component_match_add(dev, &match, compare_of, |
421 | node); | ||
420 | } else { | 422 | } else { |
421 | struct mtk_ddp_comp *comp; | 423 | struct mtk_ddp_comp *comp; |
422 | 424 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 5127b75dbf40..7250ffc6322f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c | |||
@@ -25,9 +25,6 @@ bool hang_debug = false; | |||
25 | MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); | 25 | MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); |
26 | module_param_named(hang_debug, hang_debug, bool, 0600); | 26 | module_param_named(hang_debug, hang_debug, bool, 0600); |
27 | 27 | ||
28 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); | ||
29 | struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); | ||
30 | |||
31 | static const struct adreno_info gpulist[] = { | 28 | static const struct adreno_info gpulist[] = { |
32 | { | 29 | { |
33 | .rev = ADRENO_REV(3, 0, 5, ANY_ID), | 30 | .rev = ADRENO_REV(3, 0, 5, ANY_ID), |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a54f6e036b4a..07d99bdf7c99 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h | |||
@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, | |||
311 | gpu_write(&gpu->base, reg - 1, data); | 311 | gpu_write(&gpu->base, reg - 1, data); |
312 | } | 312 | } |
313 | 313 | ||
314 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); | ||
315 | struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); | ||
316 | |||
314 | #endif /* __ADRENO_GPU_H__ */ | 317 | #endif /* __ADRENO_GPU_H__ */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..cf50d3ec8d1b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, | |||
75 | !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) | 75 | !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | if (!dev->mode_config.rotation_property) | 78 | drm_plane_create_rotation_property(plane, |
79 | dev->mode_config.rotation_property = | 79 | DRM_ROTATE_0, |
80 | drm_mode_create_rotation_property(dev, | 80 | DRM_ROTATE_0 | |
81 | DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); | 81 | DRM_ROTATE_180 | |
82 | 82 | DRM_REFLECT_X | | |
83 | if (dev->mode_config.rotation_property) | 83 | DRM_REFLECT_Y); |
84 | drm_object_attach_property(&plane->base, | ||
85 | dev->mode_config.rotation_property, | ||
86 | DRM_ROTATE_0); | ||
87 | } | 84 | } |
88 | 85 | ||
89 | /* helper to install properties which are common to planes and crtcs */ | 86 | /* helper to install properties which are common to planes and crtcs */ |
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
289 | plane_enabled(old_state), plane_enabled(state)); | 286 | plane_enabled(old_state), plane_enabled(state)); |
290 | 287 | ||
291 | if (plane_enabled(state)) { | 288 | if (plane_enabled(state)) { |
289 | unsigned int rotation; | ||
290 | |||
292 | format = to_mdp_format(msm_framebuffer_format(state->fb)); | 291 | format = to_mdp_format(msm_framebuffer_format(state->fb)); |
293 | if (MDP_FORMAT_IS_YUV(format) && | 292 | if (MDP_FORMAT_IS_YUV(format) && |
294 | !pipe_supports_yuv(mdp5_plane->caps)) { | 293 | !pipe_supports_yuv(mdp5_plane->caps)) { |
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
309 | return -EINVAL; | 308 | return -EINVAL; |
310 | } | 309 | } |
311 | 310 | ||
312 | hflip = !!(state->rotation & DRM_REFLECT_X); | 311 | rotation = drm_rotation_simplify(state->rotation, |
313 | vflip = !!(state->rotation & DRM_REFLECT_Y); | 312 | DRM_ROTATE_0 | |
313 | DRM_REFLECT_X | | ||
314 | DRM_REFLECT_Y); | ||
315 | hflip = !!(rotation & DRM_REFLECT_X); | ||
316 | vflip = !!(rotation & DRM_REFLECT_Y); | ||
317 | |||
314 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || | 318 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || |
315 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { | 319 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { |
316 | dev_err(plane->dev->dev, | 320 | dev_err(plane->dev->dev, |
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
681 | int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; | 685 | int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; |
682 | uint32_t hdecm = 0, vdecm = 0; | 686 | uint32_t hdecm = 0, vdecm = 0; |
683 | uint32_t pix_format; | 687 | uint32_t pix_format; |
688 | unsigned int rotation; | ||
684 | bool vflip, hflip; | 689 | bool vflip, hflip; |
685 | unsigned long flags; | 690 | unsigned long flags; |
686 | int ret; | 691 | int ret; |
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
743 | config |= get_scale_config(format, src_h, crtc_h, false); | 748 | config |= get_scale_config(format, src_h, crtc_h, false); |
744 | DBG("scale config = %x", config); | 749 | DBG("scale config = %x", config); |
745 | 750 | ||
746 | hflip = !!(pstate->rotation & DRM_REFLECT_X); | 751 | rotation = drm_rotation_simplify(pstate->rotation, |
747 | vflip = !!(pstate->rotation & DRM_REFLECT_Y); | 752 | DRM_ROTATE_0 | |
753 | DRM_REFLECT_X | | ||
754 | DRM_REFLECT_Y); | ||
755 | hflip = !!(rotation & DRM_REFLECT_X); | ||
756 | vflip = !!(rotation & DRM_REFLECT_Y); | ||
748 | 757 | ||
749 | spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); | 758 | spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); |
750 | 759 | ||
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 663f2b6ef091..3c853733c99a 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #ifdef CONFIG_DEBUG_FS | 18 | #ifdef CONFIG_DEBUG_FS |
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "msm_gpu.h" | 20 | #include "msm_gpu.h" |
21 | #include "msm_debugfs.h" | ||
21 | 22 | ||
22 | static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) | 23 | static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) |
23 | { | 24 | { |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..84d38eaea585 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <drm/drm_of.h> | ||
19 | |||
18 | #include "msm_drv.h" | 20 | #include "msm_drv.h" |
19 | #include "msm_debugfs.h" | 21 | #include "msm_debugfs.h" |
20 | #include "msm_fence.h" | 22 | #include "msm_fence.h" |
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev, | |||
919 | continue; | 921 | continue; |
920 | } | 922 | } |
921 | 923 | ||
922 | component_match_add(master_dev, matchptr, compare_of, intf); | 924 | drm_of_component_match_add(master_dev, matchptr, compare_of, |
923 | 925 | intf); | |
924 | of_node_put(intf); | 926 | of_node_put(intf); |
925 | of_node_put(ep_node); | 927 | of_node_put(ep_node); |
926 | } | 928 | } |
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev, | |||
962 | put_device(mdp_dev); | 964 | put_device(mdp_dev); |
963 | 965 | ||
964 | /* add the MDP component itself */ | 966 | /* add the MDP component itself */ |
965 | component_match_add(dev, matchptr, compare_of, | 967 | drm_of_component_match_add(dev, matchptr, compare_of, |
966 | mdp_dev->of_node); | 968 | mdp_dev->of_node); |
967 | } else { | 969 | } else { |
968 | /* MDP4 */ | 970 | /* MDP4 */ |
969 | mdp_dev = dev; | 971 | mdp_dev = dev; |
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev, | |||
996 | if (!np) | 998 | if (!np) |
997 | return 0; | 999 | return 0; |
998 | 1000 | ||
999 | component_match_add(dev, matchptr, compare_of, np); | 1001 | drm_of_component_match_add(dev, matchptr, compare_of, np); |
1000 | 1002 | ||
1001 | of_node_put(np); | 1003 | of_node_put(np); |
1002 | 1004 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d0da52f2a806..940bf4992fe2 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj); | |||
217 | int msm_gem_sync_object(struct drm_gem_object *obj, | 217 | int msm_gem_sync_object(struct drm_gem_object *obj, |
218 | struct msm_fence_context *fctx, bool exclusive); | 218 | struct msm_fence_context *fctx, bool exclusive); |
219 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 219 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
220 | struct msm_gpu *gpu, bool exclusive, struct fence *fence); | 220 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); |
221 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); | 221 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
222 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); | 222 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
223 | int msm_gem_cpu_fini(struct drm_gem_object *obj); | 223 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index a9b9b1c95a2e..3f299c537b77 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | 19 | ||
20 | #include "msm_drv.h" | 20 | #include "msm_drv.h" |
21 | #include "msm_fence.h" | 21 | #include "msm_fence.h" |
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) | |||
32 | 32 | ||
33 | fctx->dev = dev; | 33 | fctx->dev = dev; |
34 | fctx->name = name; | 34 | fctx->name = name; |
35 | fctx->context = fence_context_alloc(1); | 35 | fctx->context = dma_fence_context_alloc(1); |
36 | init_waitqueue_head(&fctx->event); | 36 | init_waitqueue_head(&fctx->event); |
37 | spin_lock_init(&fctx->spinlock); | 37 | spin_lock_init(&fctx->spinlock); |
38 | 38 | ||
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | |||
100 | 100 | ||
101 | struct msm_fence { | 101 | struct msm_fence { |
102 | struct msm_fence_context *fctx; | 102 | struct msm_fence_context *fctx; |
103 | struct fence base; | 103 | struct dma_fence base; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | static inline struct msm_fence *to_msm_fence(struct fence *fence) | 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) |
107 | { | 107 | { |
108 | return container_of(fence, struct msm_fence, base); | 108 | return container_of(fence, struct msm_fence, base); |
109 | } | 109 | } |
110 | 110 | ||
111 | static const char *msm_fence_get_driver_name(struct fence *fence) | 111 | static const char *msm_fence_get_driver_name(struct dma_fence *fence) |
112 | { | 112 | { |
113 | return "msm"; | 113 | return "msm"; |
114 | } | 114 | } |
115 | 115 | ||
116 | static const char *msm_fence_get_timeline_name(struct fence *fence) | 116 | static const char *msm_fence_get_timeline_name(struct dma_fence *fence) |
117 | { | 117 | { |
118 | struct msm_fence *f = to_msm_fence(fence); | 118 | struct msm_fence *f = to_msm_fence(fence); |
119 | return f->fctx->name; | 119 | return f->fctx->name; |
120 | } | 120 | } |
121 | 121 | ||
122 | static bool msm_fence_enable_signaling(struct fence *fence) | 122 | static bool msm_fence_enable_signaling(struct dma_fence *fence) |
123 | { | 123 | { |
124 | return true; | 124 | return true; |
125 | } | 125 | } |
126 | 126 | ||
127 | static bool msm_fence_signaled(struct fence *fence) | 127 | static bool msm_fence_signaled(struct dma_fence *fence) |
128 | { | 128 | { |
129 | struct msm_fence *f = to_msm_fence(fence); | 129 | struct msm_fence *f = to_msm_fence(fence); |
130 | return fence_completed(f->fctx, f->base.seqno); | 130 | return fence_completed(f->fctx, f->base.seqno); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void msm_fence_release(struct fence *fence) | 133 | static void msm_fence_release(struct dma_fence *fence) |
134 | { | 134 | { |
135 | struct msm_fence *f = to_msm_fence(fence); | 135 | struct msm_fence *f = to_msm_fence(fence); |
136 | kfree_rcu(f, base.rcu); | 136 | kfree_rcu(f, base.rcu); |
137 | } | 137 | } |
138 | 138 | ||
139 | static const struct fence_ops msm_fence_ops = { | 139 | static const struct dma_fence_ops msm_fence_ops = { |
140 | .get_driver_name = msm_fence_get_driver_name, | 140 | .get_driver_name = msm_fence_get_driver_name, |
141 | .get_timeline_name = msm_fence_get_timeline_name, | 141 | .get_timeline_name = msm_fence_get_timeline_name, |
142 | .enable_signaling = msm_fence_enable_signaling, | 142 | .enable_signaling = msm_fence_enable_signaling, |
143 | .signaled = msm_fence_signaled, | 143 | .signaled = msm_fence_signaled, |
144 | .wait = fence_default_wait, | 144 | .wait = dma_fence_default_wait, |
145 | .release = msm_fence_release, | 145 | .release = msm_fence_release, |
146 | }; | 146 | }; |
147 | 147 | ||
148 | struct fence * | 148 | struct dma_fence * |
149 | msm_fence_alloc(struct msm_fence_context *fctx) | 149 | msm_fence_alloc(struct msm_fence_context *fctx) |
150 | { | 150 | { |
151 | struct msm_fence *f; | 151 | struct msm_fence *f; |
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx) | |||
156 | 156 | ||
157 | f->fctx = fctx; | 157 | f->fctx = fctx; |
158 | 158 | ||
159 | fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, | 159 | dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, |
160 | fctx->context, ++fctx->last_fence); | 160 | fctx->context, ++fctx->last_fence); |
161 | 161 | ||
162 | return &f->base; | 162 | return &f->base; |
163 | } | 163 | } |
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index ceb5b3d314b4..56061aa1959d 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h | |||
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx, | |||
41 | struct msm_fence_cb *cb, uint32_t fence); | 41 | struct msm_fence_cb *cb, uint32_t fence); |
42 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); | 42 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); |
43 | 43 | ||
44 | struct fence * msm_fence_alloc(struct msm_fence_context *fctx); | 44 | struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); |
45 | 45 | ||
46 | #endif | 46 | #endif |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..57db7dbbb618 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
521 | { | 521 | { |
522 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 522 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
523 | struct reservation_object_list *fobj; | 523 | struct reservation_object_list *fobj; |
524 | struct fence *fence; | 524 | struct dma_fence *fence; |
525 | int i, ret; | 525 | int i, ret; |
526 | 526 | ||
527 | if (!exclusive) { | 527 | if (!exclusive) { |
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
540 | fence = reservation_object_get_excl(msm_obj->resv); | 540 | fence = reservation_object_get_excl(msm_obj->resv); |
541 | /* don't need to wait on our own fences, since ring is fifo */ | 541 | /* don't need to wait on our own fences, since ring is fifo */ |
542 | if (fence && (fence->context != fctx->context)) { | 542 | if (fence && (fence->context != fctx->context)) { |
543 | ret = fence_wait(fence, true); | 543 | ret = dma_fence_wait(fence, true); |
544 | if (ret) | 544 | if (ret) |
545 | return ret; | 545 | return ret; |
546 | } | 546 | } |
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
553 | fence = rcu_dereference_protected(fobj->shared[i], | 553 | fence = rcu_dereference_protected(fobj->shared[i], |
554 | reservation_object_held(msm_obj->resv)); | 554 | reservation_object_held(msm_obj->resv)); |
555 | if (fence->context != fctx->context) { | 555 | if (fence->context != fctx->context) { |
556 | ret = fence_wait(fence, true); | 556 | ret = dma_fence_wait(fence, true); |
557 | if (ret) | 557 | if (ret) |
558 | return ret; | 558 | return ret; |
559 | } | 559 | } |
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
563 | } | 563 | } |
564 | 564 | ||
565 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 565 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
566 | struct msm_gpu *gpu, bool exclusive, struct fence *fence) | 566 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
567 | { | 567 | { |
568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
569 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); | 569 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) | |||
616 | } | 616 | } |
617 | 617 | ||
618 | #ifdef CONFIG_DEBUG_FS | 618 | #ifdef CONFIG_DEBUG_FS |
619 | static void describe_fence(struct fence *fence, const char *type, | 619 | static void describe_fence(struct dma_fence *fence, const char *type, |
620 | struct seq_file *m) | 620 | struct seq_file *m) |
621 | { | 621 | { |
622 | if (!fence_is_signaled(fence)) | 622 | if (!dma_fence_is_signaled(fence)) |
623 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, | 623 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
624 | fence->ops->get_driver_name(fence), | 624 | fence->ops->get_driver_name(fence), |
625 | fence->ops->get_timeline_name(fence), | 625 | fence->ops->get_timeline_name(fence), |
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
631 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 631 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
632 | struct reservation_object *robj = msm_obj->resv; | 632 | struct reservation_object *robj = msm_obj->resv; |
633 | struct reservation_object_list *fobj; | 633 | struct reservation_object_list *fobj; |
634 | struct fence *fence; | 634 | struct dma_fence *fence; |
635 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 635 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
636 | const char *madv; | 636 | const char *madv; |
637 | 637 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b2f13cfe945e..2cb8551fda70 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -104,7 +104,7 @@ struct msm_gem_submit { | |||
104 | struct list_head node; /* node in gpu submit_list */ | 104 | struct list_head node; /* node in gpu submit_list */ |
105 | struct list_head bo_list; | 105 | struct list_head bo_list; |
106 | struct ww_acquire_ctx ticket; | 106 | struct ww_acquire_ctx ticket; |
107 | struct fence *fence; | 107 | struct dma_fence *fence; |
108 | struct pid *pid; /* submitting process */ | 108 | struct pid *pid; /* submitting process */ |
109 | bool valid; /* true if no cmdstream patching needed */ | 109 | bool valid; /* true if no cmdstream patching needed */ |
110 | unsigned int nr_cmds; | 110 | unsigned int nr_cmds; |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b6a0f37a65f3..25e8786fa4ca 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
60 | 60 | ||
61 | void msm_gem_submit_free(struct msm_gem_submit *submit) | 61 | void msm_gem_submit_free(struct msm_gem_submit *submit) |
62 | { | 62 | { |
63 | fence_put(submit->fence); | 63 | dma_fence_put(submit->fence); |
64 | list_del(&submit->node); | 64 | list_del(&submit->node); |
65 | put_pid(submit->pid); | 65 | put_pid(submit->pid); |
66 | kfree(submit); | 66 | kfree(submit); |
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
380 | struct msm_file_private *ctx = file->driver_priv; | 380 | struct msm_file_private *ctx = file->driver_priv; |
381 | struct msm_gem_submit *submit; | 381 | struct msm_gem_submit *submit; |
382 | struct msm_gpu *gpu = priv->gpu; | 382 | struct msm_gpu *gpu = priv->gpu; |
383 | struct fence *in_fence = NULL; | 383 | struct dma_fence *in_fence = NULL; |
384 | struct sync_file *sync_file = NULL; | 384 | struct sync_file *sync_file = NULL; |
385 | int out_fence_fd = -1; | 385 | int out_fence_fd = -1; |
386 | unsigned i; | 386 | unsigned i; |
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
439 | */ | 439 | */ |
440 | 440 | ||
441 | if (in_fence->context != gpu->fctx->context) { | 441 | if (in_fence->context != gpu->fctx->context) { |
442 | ret = fence_wait(in_fence, true); | 442 | ret = dma_fence_wait(in_fence, true); |
443 | if (ret) | 443 | if (ret) |
444 | goto out; | 444 | goto out; |
445 | } | 445 | } |
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
542 | 542 | ||
543 | out: | 543 | out: |
544 | if (in_fence) | 544 | if (in_fence) |
545 | fence_put(in_fence); | 545 | dma_fence_put(in_fence); |
546 | submit_cleanup(submit); | 546 | submit_cleanup(submit); |
547 | if (ret) | 547 | if (ret) |
548 | msm_gem_submit_free(submit); | 548 | msm_gem_submit_free(submit); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..3249707e6834 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu) | |||
476 | submit = list_first_entry(&gpu->submit_list, | 476 | submit = list_first_entry(&gpu->submit_list, |
477 | struct msm_gem_submit, node); | 477 | struct msm_gem_submit, node); |
478 | 478 | ||
479 | if (fence_is_signaled(submit->fence)) { | 479 | if (dma_fence_is_signaled(submit->fence)) { |
480 | retire_submit(gpu, submit); | 480 | retire_submit(gpu, submit); |
481 | } else { | 481 | } else { |
482 | break; | 482 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e395cb6f511f..e0c0007689e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) | |||
83 | 83 | ||
84 | static void | 84 | static void |
85 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, | 85 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
86 | struct fence *fence) | 86 | struct dma_fence *fence) |
87 | { | 87 | { |
88 | struct nouveau_drm *drm = nouveau_drm(dev); | 88 | struct nouveau_drm *drm = nouveau_drm(dev); |
89 | 89 | ||
90 | if (tile) { | 90 | if (tile) { |
91 | spin_lock(&drm->tile.lock); | 91 | spin_lock(&drm->tile.lock); |
92 | tile->fence = (struct nouveau_fence *)fence_get(fence); | 92 | tile->fence = (struct nouveau_fence *)dma_fence_get(fence); |
93 | tile->used = false; | 93 | tile->used = false; |
94 | spin_unlock(&drm->tile.lock); | 94 | spin_unlock(&drm->tile.lock); |
95 | } | 95 | } |
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
1243 | { | 1243 | { |
1244 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1244 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1245 | struct drm_device *dev = drm->dev; | 1245 | struct drm_device *dev = drm->dev; |
1246 | struct fence *fence = reservation_object_get_excl(bo->resv); | 1246 | struct dma_fence *fence = reservation_object_get_excl(bo->resv); |
1247 | 1247 | ||
1248 | nv10_bo_put_tile_region(dev, *old_tile, fence); | 1248 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
1249 | *old_tile = new_tile; | 1249 | *old_tile = new_tile; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 4bb9ab892ae1..e9529ee6bc23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/ktime.h> | 29 | #include <linux/ktime.h> |
30 | #include <linux/hrtimer.h> | 30 | #include <linux/hrtimer.h> |
31 | #include <trace/events/fence.h> | 31 | #include <trace/events/dma_fence.h> |
32 | 32 | ||
33 | #include <nvif/cl826e.h> | 33 | #include <nvif/cl826e.h> |
34 | #include <nvif/notify.h> | 34 | #include <nvif/notify.h> |
@@ -38,11 +38,11 @@ | |||
38 | #include "nouveau_dma.h" | 38 | #include "nouveau_dma.h" |
39 | #include "nouveau_fence.h" | 39 | #include "nouveau_fence.h" |
40 | 40 | ||
41 | static const struct fence_ops nouveau_fence_ops_uevent; | 41 | static const struct dma_fence_ops nouveau_fence_ops_uevent; |
42 | static const struct fence_ops nouveau_fence_ops_legacy; | 42 | static const struct dma_fence_ops nouveau_fence_ops_legacy; |
43 | 43 | ||
44 | static inline struct nouveau_fence * | 44 | static inline struct nouveau_fence * |
45 | from_fence(struct fence *fence) | 45 | from_fence(struct dma_fence *fence) |
46 | { | 46 | { |
47 | return container_of(fence, struct nouveau_fence, base); | 47 | return container_of(fence, struct nouveau_fence, base); |
48 | } | 48 | } |
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence) | |||
58 | { | 58 | { |
59 | int drop = 0; | 59 | int drop = 0; |
60 | 60 | ||
61 | fence_signal_locked(&fence->base); | 61 | dma_fence_signal_locked(&fence->base); |
62 | list_del(&fence->head); | 62 | list_del(&fence->head); |
63 | rcu_assign_pointer(fence->channel, NULL); | 63 | rcu_assign_pointer(fence->channel, NULL); |
64 | 64 | ||
65 | if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { | 65 | if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { |
66 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 66 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
67 | 67 | ||
68 | if (!--fctx->notify_ref) | 68 | if (!--fctx->notify_ref) |
69 | drop = 1; | 69 | drop = 1; |
70 | } | 70 | } |
71 | 71 | ||
72 | fence_put(&fence->base); | 72 | dma_fence_put(&fence->base); |
73 | return drop; | 73 | return drop; |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct nouveau_fence * | 76 | static struct nouveau_fence * |
77 | nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) { | 77 | nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) { |
78 | struct nouveau_fence_priv *priv = (void*)drm->fence; | 78 | struct nouveau_fence_priv *priv = (void*)drm->fence; |
79 | 79 | ||
80 | if (fence->ops != &nouveau_fence_ops_legacy && | 80 | if (fence->ops != &nouveau_fence_ops_legacy && |
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha | |||
201 | 201 | ||
202 | struct nouveau_fence_work { | 202 | struct nouveau_fence_work { |
203 | struct work_struct work; | 203 | struct work_struct work; |
204 | struct fence_cb cb; | 204 | struct dma_fence_cb cb; |
205 | void (*func)(void *); | 205 | void (*func)(void *); |
206 | void *data; | 206 | void *data; |
207 | }; | 207 | }; |
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork) | |||
214 | kfree(work); | 214 | kfree(work); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) | 217 | static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
218 | { | 218 | { |
219 | struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); | 219 | struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); |
220 | 220 | ||
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | void | 224 | void |
225 | nouveau_fence_work(struct fence *fence, | 225 | nouveau_fence_work(struct dma_fence *fence, |
226 | void (*func)(void *), void *data) | 226 | void (*func)(void *), void *data) |
227 | { | 227 | { |
228 | struct nouveau_fence_work *work; | 228 | struct nouveau_fence_work *work; |
229 | 229 | ||
230 | if (fence_is_signaled(fence)) | 230 | if (dma_fence_is_signaled(fence)) |
231 | goto err; | 231 | goto err; |
232 | 232 | ||
233 | work = kmalloc(sizeof(*work), GFP_KERNEL); | 233 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence, | |||
245 | work->func = func; | 245 | work->func = func; |
246 | work->data = data; | 246 | work->data = data; |
247 | 247 | ||
248 | if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) | 248 | if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) |
249 | goto err_free; | 249 | goto err_free; |
250 | return; | 250 | return; |
251 | 251 | ||
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
266 | fence->timeout = jiffies + (15 * HZ); | 266 | fence->timeout = jiffies + (15 * HZ); |
267 | 267 | ||
268 | if (priv->uevent) | 268 | if (priv->uevent) |
269 | fence_init(&fence->base, &nouveau_fence_ops_uevent, | 269 | dma_fence_init(&fence->base, &nouveau_fence_ops_uevent, |
270 | &fctx->lock, fctx->context, ++fctx->sequence); | 270 | &fctx->lock, fctx->context, ++fctx->sequence); |
271 | else | 271 | else |
272 | fence_init(&fence->base, &nouveau_fence_ops_legacy, | 272 | dma_fence_init(&fence->base, &nouveau_fence_ops_legacy, |
273 | &fctx->lock, fctx->context, ++fctx->sequence); | 273 | &fctx->lock, fctx->context, ++fctx->sequence); |
274 | kref_get(&fctx->fence_ref); | 274 | kref_get(&fctx->fence_ref); |
275 | 275 | ||
276 | trace_fence_emit(&fence->base); | 276 | trace_dma_fence_emit(&fence->base); |
277 | ret = fctx->emit(fence); | 277 | ret = fctx->emit(fence); |
278 | if (!ret) { | 278 | if (!ret) { |
279 | fence_get(&fence->base); | 279 | dma_fence_get(&fence->base); |
280 | spin_lock_irq(&fctx->lock); | 280 | spin_lock_irq(&fctx->lock); |
281 | 281 | ||
282 | if (nouveau_fence_update(chan, fctx)) | 282 | if (nouveau_fence_update(chan, fctx)) |
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
298 | struct nouveau_channel *chan; | 298 | struct nouveau_channel *chan; |
299 | unsigned long flags; | 299 | unsigned long flags; |
300 | 300 | ||
301 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) | 301 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
302 | return true; | 302 | return true; |
303 | 303 | ||
304 | spin_lock_irqsave(&fctx->lock, flags); | 304 | spin_lock_irqsave(&fctx->lock, flags); |
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
307 | nvif_notify_put(&fctx->notify); | 307 | nvif_notify_put(&fctx->notify); |
308 | spin_unlock_irqrestore(&fctx->lock, flags); | 308 | spin_unlock_irqrestore(&fctx->lock, flags); |
309 | } | 309 | } |
310 | return fence_is_signaled(&fence->base); | 310 | return dma_fence_is_signaled(&fence->base); |
311 | } | 311 | } |
312 | 312 | ||
313 | static long | 313 | static long |
314 | nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait) | 314 | nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) |
315 | { | 315 | { |
316 | struct nouveau_fence *fence = from_fence(f); | 316 | struct nouveau_fence *fence = from_fence(f); |
317 | unsigned long sleep_time = NSEC_PER_MSEC / 1000; | 317 | unsigned long sleep_time = NSEC_PER_MSEC / 1000; |
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) | |||
378 | if (!lazy) | 378 | if (!lazy) |
379 | return nouveau_fence_wait_busy(fence, intr); | 379 | return nouveau_fence_wait_busy(fence, intr); |
380 | 380 | ||
381 | ret = fence_wait_timeout(&fence->base, intr, 15 * HZ); | 381 | ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); |
382 | if (ret < 0) | 382 | if (ret < 0) |
383 | return ret; | 383 | return ret; |
384 | else if (!ret) | 384 | else if (!ret) |
@@ -391,7 +391,7 @@ int | |||
391 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) | 391 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) |
392 | { | 392 | { |
393 | struct nouveau_fence_chan *fctx = chan->fence; | 393 | struct nouveau_fence_chan *fctx = chan->fence; |
394 | struct fence *fence; | 394 | struct dma_fence *fence; |
395 | struct reservation_object *resv = nvbo->bo.resv; | 395 | struct reservation_object *resv = nvbo->bo.resv; |
396 | struct reservation_object_list *fobj; | 396 | struct reservation_object_list *fobj; |
397 | struct nouveau_fence *f; | 397 | struct nouveau_fence *f; |
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
421 | } | 421 | } |
422 | 422 | ||
423 | if (must_wait) | 423 | if (must_wait) |
424 | ret = fence_wait(fence, intr); | 424 | ret = dma_fence_wait(fence, intr); |
425 | 425 | ||
426 | return ret; | 426 | return ret; |
427 | } | 427 | } |
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
446 | } | 446 | } |
447 | 447 | ||
448 | if (must_wait) | 448 | if (must_wait) |
449 | ret = fence_wait(fence, intr); | 449 | ret = dma_fence_wait(fence, intr); |
450 | } | 450 | } |
451 | 451 | ||
452 | return ret; | 452 | return ret; |
@@ -456,7 +456,7 @@ void | |||
456 | nouveau_fence_unref(struct nouveau_fence **pfence) | 456 | nouveau_fence_unref(struct nouveau_fence **pfence) |
457 | { | 457 | { |
458 | if (*pfence) | 458 | if (*pfence) |
459 | fence_put(&(*pfence)->base); | 459 | dma_fence_put(&(*pfence)->base); |
460 | *pfence = NULL; | 460 | *pfence = NULL; |
461 | } | 461 | } |
462 | 462 | ||
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, | |||
484 | return ret; | 484 | return ret; |
485 | } | 485 | } |
486 | 486 | ||
487 | static const char *nouveau_fence_get_get_driver_name(struct fence *fence) | 487 | static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) |
488 | { | 488 | { |
489 | return "nouveau"; | 489 | return "nouveau"; |
490 | } | 490 | } |
491 | 491 | ||
492 | static const char *nouveau_fence_get_timeline_name(struct fence *f) | 492 | static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) |
493 | { | 493 | { |
494 | struct nouveau_fence *fence = from_fence(f); | 494 | struct nouveau_fence *fence = from_fence(f); |
495 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 495 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) | |||
503 | * result. The drm node should still be there, so we can derive the index from | 503 | * result. The drm node should still be there, so we can derive the index from |
504 | * the fence context. | 504 | * the fence context. |
505 | */ | 505 | */ |
506 | static bool nouveau_fence_is_signaled(struct fence *f) | 506 | static bool nouveau_fence_is_signaled(struct dma_fence *f) |
507 | { | 507 | { |
508 | struct nouveau_fence *fence = from_fence(f); | 508 | struct nouveau_fence *fence = from_fence(f); |
509 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 509 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f) | |||
519 | return ret; | 519 | return ret; |
520 | } | 520 | } |
521 | 521 | ||
522 | static bool nouveau_fence_no_signaling(struct fence *f) | 522 | static bool nouveau_fence_no_signaling(struct dma_fence *f) |
523 | { | 523 | { |
524 | struct nouveau_fence *fence = from_fence(f); | 524 | struct nouveau_fence *fence = from_fence(f); |
525 | 525 | ||
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f) | |||
530 | WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); | 530 | WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * This needs uevents to work correctly, but fence_add_callback relies on | 533 | * This needs uevents to work correctly, but dma_fence_add_callback relies on |
534 | * being able to enable signaling. It will still get signaled eventually, | 534 | * being able to enable signaling. It will still get signaled eventually, |
535 | * just not right away. | 535 | * just not right away. |
536 | */ | 536 | */ |
537 | if (nouveau_fence_is_signaled(f)) { | 537 | if (nouveau_fence_is_signaled(f)) { |
538 | list_del(&fence->head); | 538 | list_del(&fence->head); |
539 | 539 | ||
540 | fence_put(&fence->base); | 540 | dma_fence_put(&fence->base); |
541 | return false; | 541 | return false; |
542 | } | 542 | } |
543 | 543 | ||
544 | return true; | 544 | return true; |
545 | } | 545 | } |
546 | 546 | ||
547 | static void nouveau_fence_release(struct fence *f) | 547 | static void nouveau_fence_release(struct dma_fence *f) |
548 | { | 548 | { |
549 | struct nouveau_fence *fence = from_fence(f); | 549 | struct nouveau_fence *fence = from_fence(f); |
550 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 550 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
551 | 551 | ||
552 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); | 552 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); |
553 | fence_free(&fence->base); | 553 | dma_fence_free(&fence->base); |
554 | } | 554 | } |
555 | 555 | ||
556 | static const struct fence_ops nouveau_fence_ops_legacy = { | 556 | static const struct dma_fence_ops nouveau_fence_ops_legacy = { |
557 | .get_driver_name = nouveau_fence_get_get_driver_name, | 557 | .get_driver_name = nouveau_fence_get_get_driver_name, |
558 | .get_timeline_name = nouveau_fence_get_timeline_name, | 558 | .get_timeline_name = nouveau_fence_get_timeline_name, |
559 | .enable_signaling = nouveau_fence_no_signaling, | 559 | .enable_signaling = nouveau_fence_no_signaling, |
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = { | |||
562 | .release = nouveau_fence_release | 562 | .release = nouveau_fence_release |
563 | }; | 563 | }; |
564 | 564 | ||
565 | static bool nouveau_fence_enable_signaling(struct fence *f) | 565 | static bool nouveau_fence_enable_signaling(struct dma_fence *f) |
566 | { | 566 | { |
567 | struct nouveau_fence *fence = from_fence(f); | 567 | struct nouveau_fence *fence = from_fence(f); |
568 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 568 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f) | |||
573 | 573 | ||
574 | ret = nouveau_fence_no_signaling(f); | 574 | ret = nouveau_fence_no_signaling(f); |
575 | if (ret) | 575 | if (ret) |
576 | set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags); | 576 | set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); |
577 | else if (!--fctx->notify_ref) | 577 | else if (!--fctx->notify_ref) |
578 | nvif_notify_put(&fctx->notify); | 578 | nvif_notify_put(&fctx->notify); |
579 | 579 | ||
580 | return ret; | 580 | return ret; |
581 | } | 581 | } |
582 | 582 | ||
583 | static const struct fence_ops nouveau_fence_ops_uevent = { | 583 | static const struct dma_fence_ops nouveau_fence_ops_uevent = { |
584 | .get_driver_name = nouveau_fence_get_get_driver_name, | 584 | .get_driver_name = nouveau_fence_get_get_driver_name, |
585 | .get_timeline_name = nouveau_fence_get_timeline_name, | 585 | .get_timeline_name = nouveau_fence_get_timeline_name, |
586 | .enable_signaling = nouveau_fence_enable_signaling, | 586 | .enable_signaling = nouveau_fence_enable_signaling, |
587 | .signaled = nouveau_fence_is_signaled, | 587 | .signaled = nouveau_fence_is_signaled, |
588 | .wait = fence_default_wait, | 588 | .wait = dma_fence_default_wait, |
589 | .release = NULL | 589 | .release = NULL |
590 | }; | 590 | }; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64c4ce7115ad..41f3c019e534 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -1,14 +1,14 @@ | |||
1 | #ifndef __NOUVEAU_FENCE_H__ | 1 | #ifndef __NOUVEAU_FENCE_H__ |
2 | #define __NOUVEAU_FENCE_H__ | 2 | #define __NOUVEAU_FENCE_H__ |
3 | 3 | ||
4 | #include <linux/fence.h> | 4 | #include <linux/dma-fence.h> |
5 | #include <nvif/notify.h> | 5 | #include <nvif/notify.h> |
6 | 6 | ||
7 | struct nouveau_drm; | 7 | struct nouveau_drm; |
8 | struct nouveau_bo; | 8 | struct nouveau_bo; |
9 | 9 | ||
10 | struct nouveau_fence { | 10 | struct nouveau_fence { |
11 | struct fence base; | 11 | struct dma_fence base; |
12 | 12 | ||
13 | struct list_head head; | 13 | struct list_head head; |
14 | 14 | ||
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **); | |||
24 | 24 | ||
25 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); | 25 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); |
26 | bool nouveau_fence_done(struct nouveau_fence *); | 26 | bool nouveau_fence_done(struct nouveau_fence *); |
27 | void nouveau_fence_work(struct fence *, void (*)(void *), void *); | 27 | void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *); |
28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); | 28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); |
29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); | 29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); |
30 | 30 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0bd7164bc817..7f083c95f422 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) | |||
119 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; | 119 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; |
120 | struct reservation_object *resv = nvbo->bo.resv; | 120 | struct reservation_object *resv = nvbo->bo.resv; |
121 | struct reservation_object_list *fobj; | 121 | struct reservation_object_list *fobj; |
122 | struct fence *fence = NULL; | 122 | struct dma_fence *fence = NULL; |
123 | 123 | ||
124 | fobj = reservation_object_get_list(resv); | 124 | fobj = reservation_object_get_list(resv); |
125 | 125 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 1915b7b82a59..fa8f2375c398 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c | |||
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm) | |||
110 | priv->base.context_new = nv04_fence_context_new; | 110 | priv->base.context_new = nv04_fence_context_new; |
111 | priv->base.context_del = nv04_fence_context_del; | 111 | priv->base.context_del = nv04_fence_context_del; |
112 | priv->base.contexts = 15; | 112 | priv->base.contexts = 15; |
113 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 113 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 4e3de34ff6f4..f99fcf56928a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c | |||
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm) | |||
107 | priv->base.context_new = nv10_fence_context_new; | 107 | priv->base.context_new = nv10_fence_context_new; |
108 | priv->base.context_del = nv10_fence_context_del; | 108 | priv->base.context_del = nv10_fence_context_del; |
109 | priv->base.contexts = 31; | 109 | priv->base.contexts = 31; |
110 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 110 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
111 | spin_lock_init(&priv->lock); | 111 | spin_lock_init(&priv->lock); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 7d5e562a55c5..79bc01111351 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm) | |||
126 | priv->base.context_new = nv17_fence_context_new; | 126 | priv->base.context_new = nv17_fence_context_new; |
127 | priv->base.context_del = nv10_fence_context_del; | 127 | priv->base.context_del = nv10_fence_context_del; |
128 | priv->base.contexts = 31; | 128 | priv->base.contexts = 31; |
129 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 129 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
130 | spin_lock_init(&priv->lock); | 130 | spin_lock_init(&priv->lock); |
131 | 131 | ||
132 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 132 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 4d6f202b7770..8c5295414578 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
97 | priv->base.context_new = nv50_fence_context_new; | 97 | priv->base.context_new = nv50_fence_context_new; |
98 | priv->base.context_del = nv10_fence_context_del; | 98 | priv->base.context_del = nv10_fence_context_del; |
99 | priv->base.contexts = 127; | 99 | priv->base.contexts = 127; |
100 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 100 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
101 | spin_lock_init(&priv->lock); | 101 | spin_lock_init(&priv->lock); |
102 | 102 | ||
103 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 103 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 18bde9d8e6d6..23ef04b4e0b2 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
229 | priv->base.context_del = nv84_fence_context_del; | 229 | priv->base.context_del = nv84_fence_context_del; |
230 | 230 | ||
231 | priv->base.contexts = fifo->nr; | 231 | priv->base.contexts = fifo->nr; |
232 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 232 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
233 | priv->base.uevent = true; | 233 | priv->base.uevent = true; |
234 | 234 | ||
235 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | 235 | /* Use VRAM if there is any ; otherwise fallback to system memory */ |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 5f3e5ad99de7..84995ebc6ffc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -31,7 +31,7 @@ | |||
31 | * Definitions taken from spice-protocol, plus kernel driver specific bits. | 31 | * Definitions taken from spice-protocol, plus kernel driver specific bits. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/fence.h> | 34 | #include <linux/dma-fence.h> |
35 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
36 | #include <linux/firmware.h> | 36 | #include <linux/firmware.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
@@ -190,7 +190,7 @@ enum { | |||
190 | * spice-protocol/qxl_dev.h */ | 190 | * spice-protocol/qxl_dev.h */ |
191 | #define QXL_MAX_RES 96 | 191 | #define QXL_MAX_RES 96 |
192 | struct qxl_release { | 192 | struct qxl_release { |
193 | struct fence base; | 193 | struct dma_fence base; |
194 | 194 | ||
195 | int id; | 195 | int id; |
196 | int type; | 196 | int type; |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cd83f050cf3e..50b4e522f05f 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | #include "qxl_drv.h" | 22 | #include "qxl_drv.h" |
23 | #include "qxl_object.h" | 23 | #include "qxl_object.h" |
24 | #include <trace/events/fence.h> | 24 | #include <trace/events/dma_fence.h> |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * drawable cmd cache - allocate a bunch of VRAM pages, suballocate | 27 | * drawable cmd cache - allocate a bunch of VRAM pages, suballocate |
@@ -40,23 +40,24 @@ | |||
40 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 40 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
41 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 41 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
42 | 42 | ||
43 | static const char *qxl_get_driver_name(struct fence *fence) | 43 | static const char *qxl_get_driver_name(struct dma_fence *fence) |
44 | { | 44 | { |
45 | return "qxl"; | 45 | return "qxl"; |
46 | } | 46 | } |
47 | 47 | ||
48 | static const char *qxl_get_timeline_name(struct fence *fence) | 48 | static const char *qxl_get_timeline_name(struct dma_fence *fence) |
49 | { | 49 | { |
50 | return "release"; | 50 | return "release"; |
51 | } | 51 | } |
52 | 52 | ||
53 | static bool qxl_nop_signaling(struct fence *fence) | 53 | static bool qxl_nop_signaling(struct dma_fence *fence) |
54 | { | 54 | { |
55 | /* fences are always automatically signaled, so just pretend we did this.. */ | 55 | /* fences are always automatically signaled, so just pretend we did this.. */ |
56 | return true; | 56 | return true; |
57 | } | 57 | } |
58 | 58 | ||
59 | static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) | 59 | static long qxl_fence_wait(struct dma_fence *fence, bool intr, |
60 | signed long timeout) | ||
60 | { | 61 | { |
61 | struct qxl_device *qdev; | 62 | struct qxl_device *qdev; |
62 | struct qxl_release *release; | 63 | struct qxl_release *release; |
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) | |||
71 | retry: | 72 | retry: |
72 | sc++; | 73 | sc++; |
73 | 74 | ||
74 | if (fence_is_signaled(fence)) | 75 | if (dma_fence_is_signaled(fence)) |
75 | goto signaled; | 76 | goto signaled; |
76 | 77 | ||
77 | qxl_io_notify_oom(qdev); | 78 | qxl_io_notify_oom(qdev); |
@@ -80,11 +81,11 @@ retry: | |||
80 | if (!qxl_queue_garbage_collect(qdev, true)) | 81 | if (!qxl_queue_garbage_collect(qdev, true)) |
81 | break; | 82 | break; |
82 | 83 | ||
83 | if (fence_is_signaled(fence)) | 84 | if (dma_fence_is_signaled(fence)) |
84 | goto signaled; | 85 | goto signaled; |
85 | } | 86 | } |
86 | 87 | ||
87 | if (fence_is_signaled(fence)) | 88 | if (dma_fence_is_signaled(fence)) |
88 | goto signaled; | 89 | goto signaled; |
89 | 90 | ||
90 | if (have_drawable_releases || sc < 4) { | 91 | if (have_drawable_releases || sc < 4) { |
@@ -96,9 +97,9 @@ retry: | |||
96 | return 0; | 97 | return 0; |
97 | 98 | ||
98 | if (have_drawable_releases && sc > 300) { | 99 | if (have_drawable_releases && sc > 300) { |
99 | FENCE_WARN(fence, "failed to wait on release %llu " | 100 | DMA_FENCE_WARN(fence, "failed to wait on release %llu " |
100 | "after spincount %d\n", | 101 | "after spincount %d\n", |
101 | fence->context & ~0xf0000000, sc); | 102 | fence->context & ~0xf0000000, sc); |
102 | goto signaled; | 103 | goto signaled; |
103 | } | 104 | } |
104 | goto retry; | 105 | goto retry; |
@@ -115,7 +116,7 @@ signaled: | |||
115 | return end - cur; | 116 | return end - cur; |
116 | } | 117 | } |
117 | 118 | ||
118 | static const struct fence_ops qxl_fence_ops = { | 119 | static const struct dma_fence_ops qxl_fence_ops = { |
119 | .get_driver_name = qxl_get_driver_name, | 120 | .get_driver_name = qxl_get_driver_name, |
120 | .get_timeline_name = qxl_get_timeline_name, | 121 | .get_timeline_name = qxl_get_timeline_name, |
121 | .enable_signaling = qxl_nop_signaling, | 122 | .enable_signaling = qxl_nop_signaling, |
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev, | |||
192 | WARN_ON(list_empty(&release->bos)); | 193 | WARN_ON(list_empty(&release->bos)); |
193 | qxl_release_free_list(release); | 194 | qxl_release_free_list(release); |
194 | 195 | ||
195 | fence_signal(&release->base); | 196 | dma_fence_signal(&release->base); |
196 | fence_put(&release->base); | 197 | dma_fence_put(&release->base); |
197 | } else { | 198 | } else { |
198 | qxl_release_free_list(release); | 199 | qxl_release_free_list(release); |
199 | kfree(release); | 200 | kfree(release); |
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
453 | * Since we never really allocated a context and we don't want to conflict, | 454 | * Since we never really allocated a context and we don't want to conflict, |
454 | * set the highest bits. This will break if we really allow exporting of dma-bufs. | 455 | * set the highest bits. This will break if we really allow exporting of dma-bufs. |
455 | */ | 456 | */ |
456 | fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, | 457 | dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, |
457 | release->id | 0xf0000000, release->base.seqno); | 458 | release->id | 0xf0000000, release->base.seqno); |
458 | trace_fence_emit(&release->base); | 459 | trace_dma_fence_emit(&release->base); |
459 | 460 | ||
460 | driver = bdev->driver; | 461 | driver = bdev->driver; |
461 | glob = bo->glob; | 462 | glob = bo->glob; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1b0dcad916b0..44e0c5ed6418 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -66,7 +66,7 @@ | |||
66 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
67 | #include <linux/interval_tree.h> | 67 | #include <linux/interval_tree.h> |
68 | #include <linux/hashtable.h> | 68 | #include <linux/hashtable.h> |
69 | #include <linux/fence.h> | 69 | #include <linux/dma-fence.h> |
70 | 70 | ||
71 | #include <ttm/ttm_bo_api.h> | 71 | #include <ttm/ttm_bo_api.h> |
72 | #include <ttm/ttm_bo_driver.h> | 72 | #include <ttm/ttm_bo_driver.h> |
@@ -367,7 +367,7 @@ struct radeon_fence_driver { | |||
367 | }; | 367 | }; |
368 | 368 | ||
369 | struct radeon_fence { | 369 | struct radeon_fence { |
370 | struct fence base; | 370 | struct dma_fence base; |
371 | 371 | ||
372 | struct radeon_device *rdev; | 372 | struct radeon_device *rdev; |
373 | uint64_t seq; | 373 | uint64_t seq; |
@@ -746,7 +746,7 @@ struct radeon_flip_work { | |||
746 | uint64_t base; | 746 | uint64_t base; |
747 | struct drm_pending_vblank_event *event; | 747 | struct drm_pending_vblank_event *event; |
748 | struct radeon_bo *old_rbo; | 748 | struct radeon_bo *old_rbo; |
749 | struct fence *fence; | 749 | struct dma_fence *fence; |
750 | bool async; | 750 | bool async; |
751 | }; | 751 | }; |
752 | 752 | ||
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); | |||
2514 | /* | 2514 | /* |
2515 | * Cast helper | 2515 | * Cast helper |
2516 | */ | 2516 | */ |
2517 | extern const struct fence_ops radeon_fence_ops; | 2517 | extern const struct dma_fence_ops radeon_fence_ops; |
2518 | 2518 | ||
2519 | static inline struct radeon_fence *to_radeon_fence(struct fence *f) | 2519 | static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f) |
2520 | { | 2520 | { |
2521 | struct radeon_fence *__f = container_of(f, struct radeon_fence, base); | 2521 | struct radeon_fence *__f = container_of(f, struct radeon_fence, base); |
2522 | 2522 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 79c9b6f3f013..0be8d5cd7826 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1320 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 1320 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
1321 | rdev->ring[i].idx = i; | 1321 | rdev->ring[i].idx = i; |
1322 | } | 1322 | } |
1323 | rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); | 1323 | rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); |
1324 | 1324 | ||
1325 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", | 1325 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", |
1326 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, | 1326 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index cdb8cb568c15..e7409e8a9f87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
437 | down_read(&rdev->exclusive_lock); | 437 | down_read(&rdev->exclusive_lock); |
438 | } | 438 | } |
439 | } else | 439 | } else |
440 | r = fence_wait(work->fence, false); | 440 | r = dma_fence_wait(work->fence, false); |
441 | 441 | ||
442 | if (r) | 442 | if (r) |
443 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); | 443 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); |
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
447 | * confused about which BO the CRTC is scanning out | 447 | * confused about which BO the CRTC is scanning out |
448 | */ | 448 | */ |
449 | 449 | ||
450 | fence_put(work->fence); | 450 | dma_fence_put(work->fence); |
451 | work->fence = NULL; | 451 | work->fence = NULL; |
452 | } | 452 | } |
453 | 453 | ||
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, | |||
542 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 542 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
543 | goto cleanup; | 543 | goto cleanup; |
544 | } | 544 | } |
545 | work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); | 545 | work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); |
546 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); | 546 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
547 | radeon_bo_unreserve(new_rbo); | 547 | radeon_bo_unreserve(new_rbo); |
548 | 548 | ||
@@ -617,7 +617,7 @@ pflip_cleanup: | |||
617 | 617 | ||
618 | cleanup: | 618 | cleanup: |
619 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 619 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
620 | fence_put(work->fence); | 620 | dma_fence_put(work->fence); |
621 | kfree(work); | 621 | kfree(work); |
622 | return r; | 622 | return r; |
623 | } | 623 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ef075acde9c..ef09f0a63754 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev, | |||
141 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; | 141 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
142 | (*fence)->ring = ring; | 142 | (*fence)->ring = ring; |
143 | (*fence)->is_vm_update = false; | 143 | (*fence)->is_vm_update = false; |
144 | fence_init(&(*fence)->base, &radeon_fence_ops, | 144 | dma_fence_init(&(*fence)->base, &radeon_fence_ops, |
145 | &rdev->fence_queue.lock, rdev->fence_context + ring, seq); | 145 | &rdev->fence_queue.lock, |
146 | rdev->fence_context + ring, | ||
147 | seq); | ||
146 | radeon_fence_ring_emit(rdev, ring, *fence); | 148 | radeon_fence_ring_emit(rdev, ring, *fence); |
147 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); | 149 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
148 | radeon_fence_schedule_check(rdev, ring); | 150 | radeon_fence_schedule_check(rdev, ring); |
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl | |||
169 | */ | 171 | */ |
170 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); | 172 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); |
171 | if (seq >= fence->seq) { | 173 | if (seq >= fence->seq) { |
172 | int ret = fence_signal_locked(&fence->base); | 174 | int ret = dma_fence_signal_locked(&fence->base); |
173 | 175 | ||
174 | if (!ret) | 176 | if (!ret) |
175 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); | 177 | DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); |
176 | else | 178 | else |
177 | FENCE_TRACE(&fence->base, "was already signaled\n"); | 179 | DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); |
178 | 180 | ||
179 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); | 181 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); |
180 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); | 182 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); |
181 | fence_put(&fence->base); | 183 | dma_fence_put(&fence->base); |
182 | } else | 184 | } else |
183 | FENCE_TRACE(&fence->base, "pending\n"); | 185 | DMA_FENCE_TRACE(&fence->base, "pending\n"); |
184 | return 0; | 186 | return 0; |
185 | } | 187 | } |
186 | 188 | ||
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, | |||
351 | return false; | 353 | return false; |
352 | } | 354 | } |
353 | 355 | ||
354 | static bool radeon_fence_is_signaled(struct fence *f) | 356 | static bool radeon_fence_is_signaled(struct dma_fence *f) |
355 | { | 357 | { |
356 | struct radeon_fence *fence = to_radeon_fence(f); | 358 | struct radeon_fence *fence = to_radeon_fence(f); |
357 | struct radeon_device *rdev = fence->rdev; | 359 | struct radeon_device *rdev = fence->rdev; |
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f) | |||
381 | * to fence_queue that checks if this fence is signaled, and if so it | 383 | * to fence_queue that checks if this fence is signaled, and if so it |
382 | * signals the fence and removes itself. | 384 | * signals the fence and removes itself. |
383 | */ | 385 | */ |
384 | static bool radeon_fence_enable_signaling(struct fence *f) | 386 | static bool radeon_fence_enable_signaling(struct dma_fence *f) |
385 | { | 387 | { |
386 | struct radeon_fence *fence = to_radeon_fence(f); | 388 | struct radeon_fence *fence = to_radeon_fence(f); |
387 | struct radeon_device *rdev = fence->rdev; | 389 | struct radeon_device *rdev = fence->rdev; |
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f) | |||
414 | fence->fence_wake.private = NULL; | 416 | fence->fence_wake.private = NULL; |
415 | fence->fence_wake.func = radeon_fence_check_signaled; | 417 | fence->fence_wake.func = radeon_fence_check_signaled; |
416 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); | 418 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); |
417 | fence_get(f); | 419 | dma_fence_get(f); |
418 | 420 | ||
419 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); | 421 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); |
420 | return true; | 422 | return true; |
421 | } | 423 | } |
422 | 424 | ||
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence) | |||
436 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { | 438 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
437 | int ret; | 439 | int ret; |
438 | 440 | ||
439 | ret = fence_signal(&fence->base); | 441 | ret = dma_fence_signal(&fence->base); |
440 | if (!ret) | 442 | if (!ret) |
441 | FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); | 443 | DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); |
442 | return true; | 444 | return true; |
443 | } | 445 | } |
444 | return false; | 446 | return false; |
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo | |||
552 | * exclusive_lock is not held in that case. | 554 | * exclusive_lock is not held in that case. |
553 | */ | 555 | */ |
554 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) | 556 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) |
555 | return fence_wait(&fence->base, intr); | 557 | return dma_fence_wait(&fence->base, intr); |
556 | 558 | ||
557 | seq[fence->ring] = fence->seq; | 559 | seq[fence->ring] = fence->seq; |
558 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); | 560 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); |
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo | |||
560 | return r; | 562 | return r; |
561 | } | 563 | } |
562 | 564 | ||
563 | r_sig = fence_signal(&fence->base); | 565 | r_sig = dma_fence_signal(&fence->base); |
564 | if (!r_sig) | 566 | if (!r_sig) |
565 | FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); | 567 | DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
566 | return r; | 568 | return r; |
567 | } | 569 | } |
568 | 570 | ||
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) | |||
697 | */ | 699 | */ |
698 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) | 700 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
699 | { | 701 | { |
700 | fence_get(&fence->base); | 702 | dma_fence_get(&fence->base); |
701 | return fence; | 703 | return fence; |
702 | } | 704 | } |
703 | 705 | ||
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence) | |||
714 | 716 | ||
715 | *fence = NULL; | 717 | *fence = NULL; |
716 | if (tmp) { | 718 | if (tmp) { |
717 | fence_put(&tmp->base); | 719 | dma_fence_put(&tmp->base); |
718 | } | 720 | } |
719 | } | 721 | } |
720 | 722 | ||
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev) | |||
1028 | #endif | 1030 | #endif |
1029 | } | 1031 | } |
1030 | 1032 | ||
1031 | static const char *radeon_fence_get_driver_name(struct fence *fence) | 1033 | static const char *radeon_fence_get_driver_name(struct dma_fence *fence) |
1032 | { | 1034 | { |
1033 | return "radeon"; | 1035 | return "radeon"; |
1034 | } | 1036 | } |
1035 | 1037 | ||
1036 | static const char *radeon_fence_get_timeline_name(struct fence *f) | 1038 | static const char *radeon_fence_get_timeline_name(struct dma_fence *f) |
1037 | { | 1039 | { |
1038 | struct radeon_fence *fence = to_radeon_fence(f); | 1040 | struct radeon_fence *fence = to_radeon_fence(f); |
1039 | switch (fence->ring) { | 1041 | switch (fence->ring) { |
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f) | |||
1051 | 1053 | ||
1052 | static inline bool radeon_test_signaled(struct radeon_fence *fence) | 1054 | static inline bool radeon_test_signaled(struct radeon_fence *fence) |
1053 | { | 1055 | { |
1054 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1056 | return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | struct radeon_wait_cb { | 1059 | struct radeon_wait_cb { |
1058 | struct fence_cb base; | 1060 | struct dma_fence_cb base; |
1059 | struct task_struct *task; | 1061 | struct task_struct *task; |
1060 | }; | 1062 | }; |
1061 | 1063 | ||
1062 | static void | 1064 | static void |
1063 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | 1065 | radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
1064 | { | 1066 | { |
1065 | struct radeon_wait_cb *wait = | 1067 | struct radeon_wait_cb *wait = |
1066 | container_of(cb, struct radeon_wait_cb, base); | 1068 | container_of(cb, struct radeon_wait_cb, base); |
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
1068 | wake_up_process(wait->task); | 1070 | wake_up_process(wait->task); |
1069 | } | 1071 | } |
1070 | 1072 | ||
1071 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1073 | static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, |
1072 | signed long t) | 1074 | signed long t) |
1073 | { | 1075 | { |
1074 | struct radeon_fence *fence = to_radeon_fence(f); | 1076 | struct radeon_fence *fence = to_radeon_fence(f); |
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, | |||
1077 | 1079 | ||
1078 | cb.task = current; | 1080 | cb.task = current; |
1079 | 1081 | ||
1080 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) | 1082 | if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1081 | return t; | 1083 | return t; |
1082 | 1084 | ||
1083 | while (t > 0) { | 1085 | while (t > 0) { |
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, | |||
1105 | } | 1107 | } |
1106 | 1108 | ||
1107 | __set_current_state(TASK_RUNNING); | 1109 | __set_current_state(TASK_RUNNING); |
1108 | fence_remove_callback(f, &cb.base); | 1110 | dma_fence_remove_callback(f, &cb.base); |
1109 | 1111 | ||
1110 | return t; | 1112 | return t; |
1111 | } | 1113 | } |
1112 | 1114 | ||
1113 | const struct fence_ops radeon_fence_ops = { | 1115 | const struct dma_fence_ops radeon_fence_ops = { |
1114 | .get_driver_name = radeon_fence_get_driver_name, | 1116 | .get_driver_name = radeon_fence_get_driver_name, |
1115 | .get_timeline_name = radeon_fence_get_timeline_name, | 1117 | .get_timeline_name = radeon_fence_get_timeline_name, |
1116 | .enable_signaling = radeon_fence_enable_signaling, | 1118 | .enable_signaling = radeon_fence_enable_signaling, |
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 02ac8a1de4ff..be5d7a38d3aa 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c | |||
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
92 | bool shared) | 92 | bool shared) |
93 | { | 93 | { |
94 | struct reservation_object_list *flist; | 94 | struct reservation_object_list *flist; |
95 | struct fence *f; | 95 | struct dma_fence *f; |
96 | struct radeon_fence *fence; | 96 | struct radeon_fence *fence; |
97 | unsigned i; | 97 | unsigned i; |
98 | int r = 0; | 98 | int r = 0; |
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
103 | if (fence && fence->rdev == rdev) | 103 | if (fence && fence->rdev == rdev) |
104 | radeon_sync_fence(sync, fence); | 104 | radeon_sync_fence(sync, fence); |
105 | else if (f) | 105 | else if (f) |
106 | r = fence_wait(f, true); | 106 | r = dma_fence_wait(f, true); |
107 | 107 | ||
108 | flist = reservation_object_get_list(resv); | 108 | flist = reservation_object_get_list(resv); |
109 | if (shared || !flist || r) | 109 | if (shared || !flist || r) |
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
116 | if (fence && fence->rdev == rdev) | 116 | if (fence && fence->rdev == rdev) |
117 | radeon_sync_fence(sync, fence); | 117 | radeon_sync_fence(sync, fence); |
118 | else | 118 | else |
119 | r = fence_wait(f, true); | 119 | r = dma_fence_wait(f, true); |
120 | 120 | ||
121 | if (r) | 121 | if (r) |
122 | break; | 122 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 0cd0e7bdee55..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
467 | { | 467 | { |
468 | int32_t *msg, msg_type, handle; | 468 | int32_t *msg, msg_type, handle; |
469 | unsigned img_size = 0; | 469 | unsigned img_size = 0; |
470 | struct fence *f; | 470 | struct dma_fence *f; |
471 | void *ptr; | 471 | void *ptr; |
472 | 472 | ||
473 | int i, r; | 473 | int i, r; |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8c8cbe837e61..6fe161192bb4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <drm/drm_crtc_helper.h> | 20 | #include <drm/drm_crtc_helper.h> |
21 | #include <drm/drm_fb_helper.h> | 21 | #include <drm/drm_fb_helper.h> |
22 | #include <drm/drm_gem_cma_helper.h> | 22 | #include <drm/drm_gem_cma_helper.h> |
23 | #include <drm/drm_of.h> | ||
23 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
24 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev, | |||
388 | continue; | 389 | continue; |
389 | } | 390 | } |
390 | 391 | ||
391 | component_match_add(dev, match, compare_of, remote); | 392 | drm_of_component_match_add(dev, match, compare_of, remote); |
392 | of_node_put(remote); | 393 | of_node_put(remote); |
393 | } | 394 | } |
394 | } | 395 | } |
@@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) | |||
437 | } | 438 | } |
438 | 439 | ||
439 | of_node_put(iommu); | 440 | of_node_put(iommu); |
440 | component_match_add(dev, &match, compare_of, port->parent); | 441 | drm_of_component_match_add(dev, &match, compare_of, |
442 | port->parent); | ||
441 | of_node_put(port); | 443 | of_node_put(port); |
442 | } | 444 | } |
443 | 445 | ||
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 7087499969bc..6aead2013b62 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <drm/drm_crtc_helper.h> | 17 | #include <drm/drm_crtc_helper.h> |
18 | #include <drm/drm_gem_cma_helper.h> | 18 | #include <drm/drm_gem_cma_helper.h> |
19 | #include <drm/drm_fb_cma_helper.h> | 19 | #include <drm/drm_fb_cma_helper.h> |
20 | #include <drm/drm_of.h> | ||
20 | 21 | ||
21 | #include "sti_crtc.h" | 22 | #include "sti_crtc.h" |
22 | #include "sti_drv.h" | 23 | #include "sti_drv.h" |
@@ -424,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev) | |||
424 | child_np = of_get_next_available_child(node, NULL); | 425 | child_np = of_get_next_available_child(node, NULL); |
425 | 426 | ||
426 | while (child_np) { | 427 | while (child_np) { |
427 | component_match_add(dev, &match, compare_of, child_np); | 428 | drm_of_component_match_add(dev, &match, compare_of, |
428 | of_node_put(child_np); | 429 | child_np); |
429 | child_np = of_get_next_available_child(node, child_np); | 430 | child_np = of_get_next_available_child(node, child_np); |
430 | } | 431 | } |
431 | 432 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..b3c4ad605e81 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <drm/drm_fb_cma_helper.h> | 18 | #include <drm/drm_fb_cma_helper.h> |
19 | #include <drm/drm_gem_cma_helper.h> | 19 | #include <drm/drm_gem_cma_helper.h> |
20 | #include <drm/drm_fb_helper.h> | 20 | #include <drm/drm_fb_helper.h> |
21 | #include <drm/drm_of.h> | ||
21 | 22 | ||
22 | #include "sun4i_crtc.h" | 23 | #include "sun4i_crtc.h" |
23 | #include "sun4i_drv.h" | 24 | #include "sun4i_drv.h" |
@@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, | |||
239 | /* Add current component */ | 240 | /* Add current component */ |
240 | DRM_DEBUG_DRIVER("Adding component %s\n", | 241 | DRM_DEBUG_DRIVER("Adding component %s\n", |
241 | of_node_full_name(node)); | 242 | of_node_full_name(node)); |
242 | component_match_add(dev, match, compare_of, node); | 243 | drm_of_component_match_add(dev, match, compare_of, node); |
243 | count++; | 244 | count++; |
244 | } | 245 | } |
245 | 246 | ||
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021005..06a4c584f3cb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/component.h> | 11 | #include <linux/component.h> |
12 | #include <linux/of_graph.h> | 12 | #include <linux/of_graph.h> |
13 | #include <drm/drm_of.h> | ||
13 | 14 | ||
14 | #include "tilcdc_drv.h" | 15 | #include "tilcdc_drv.h" |
15 | #include "tilcdc_external.h" | 16 | #include "tilcdc_external.h" |
@@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev, | |||
160 | 161 | ||
161 | dev_dbg(dev, "Subdevice node '%s' found\n", node->name); | 162 | dev_dbg(dev, "Subdevice node '%s' found\n", node->name); |
162 | if (match) | 163 | if (match) |
163 | component_match_add(dev, match, dev_match_of, node); | 164 | drm_of_component_match_add(dev, match, dev_match_of, |
165 | node); | ||
164 | of_node_put(node); | 166 | of_node_put(node); |
165 | count++; | 167 | count++; |
166 | } | 168 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 31fcf11a2831..f6ff579e8918 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
148 | BUG_ON(!list_empty(&bo->ddestroy)); | 148 | BUG_ON(!list_empty(&bo->ddestroy)); |
149 | ttm_tt_destroy(bo->ttm); | 149 | ttm_tt_destroy(bo->ttm); |
150 | atomic_dec(&bo->glob->bo_count); | 150 | atomic_dec(&bo->glob->bo_count); |
151 | fence_put(bo->moving); | 151 | dma_fence_put(bo->moving); |
152 | if (bo->resv == &bo->ttm_resv) | 152 | if (bo->resv == &bo->ttm_resv) |
153 | reservation_object_fini(&bo->ttm_resv); | 153 | reservation_object_fini(&bo->ttm_resv); |
154 | mutex_destroy(&bo->wu_mutex); | 154 | mutex_destroy(&bo->wu_mutex); |
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
426 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) | 426 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
427 | { | 427 | { |
428 | struct reservation_object_list *fobj; | 428 | struct reservation_object_list *fobj; |
429 | struct fence *fence; | 429 | struct dma_fence *fence; |
430 | int i; | 430 | int i; |
431 | 431 | ||
432 | fobj = reservation_object_get_list(bo->resv); | 432 | fobj = reservation_object_get_list(bo->resv); |
433 | fence = reservation_object_get_excl(bo->resv); | 433 | fence = reservation_object_get_excl(bo->resv); |
434 | if (fence && !fence->ops->signaled) | 434 | if (fence && !fence->ops->signaled) |
435 | fence_enable_sw_signaling(fence); | 435 | dma_fence_enable_sw_signaling(fence); |
436 | 436 | ||
437 | for (i = 0; fobj && i < fobj->shared_count; ++i) { | 437 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
438 | fence = rcu_dereference_protected(fobj->shared[i], | 438 | fence = rcu_dereference_protected(fobj->shared[i], |
439 | reservation_object_held(bo->resv)); | 439 | reservation_object_held(bo->resv)); |
440 | 440 | ||
441 | if (!fence->ops->signaled) | 441 | if (!fence->ops->signaled) |
442 | fence_enable_sw_signaling(fence); | 442 | dma_fence_enable_sw_signaling(fence); |
443 | } | 443 | } |
444 | } | 444 | } |
445 | 445 | ||
@@ -801,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
801 | struct ttm_mem_type_manager *man, | 801 | struct ttm_mem_type_manager *man, |
802 | struct ttm_mem_reg *mem) | 802 | struct ttm_mem_reg *mem) |
803 | { | 803 | { |
804 | struct fence *fence; | 804 | struct dma_fence *fence; |
805 | int ret; | 805 | int ret; |
806 | 806 | ||
807 | spin_lock(&man->move_lock); | 807 | spin_lock(&man->move_lock); |
808 | fence = fence_get(man->move); | 808 | fence = dma_fence_get(man->move); |
809 | spin_unlock(&man->move_lock); | 809 | spin_unlock(&man->move_lock); |
810 | 810 | ||
811 | if (fence) { | 811 | if (fence) { |
@@ -815,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
815 | if (unlikely(ret)) | 815 | if (unlikely(ret)) |
816 | return ret; | 816 | return ret; |
817 | 817 | ||
818 | fence_put(bo->moving); | 818 | dma_fence_put(bo->moving); |
819 | bo->moving = fence; | 819 | bo->moving = fence; |
820 | } | 820 | } |
821 | 821 | ||
@@ -1295,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1295 | { | 1295 | { |
1296 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 1296 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
1297 | struct ttm_bo_global *glob = bdev->glob; | 1297 | struct ttm_bo_global *glob = bdev->glob; |
1298 | struct fence *fence; | 1298 | struct dma_fence *fence; |
1299 | int ret; | 1299 | int ret; |
1300 | 1300 | ||
1301 | /* | 1301 | /* |
@@ -1318,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1318 | spin_unlock(&glob->lru_lock); | 1318 | spin_unlock(&glob->lru_lock); |
1319 | 1319 | ||
1320 | spin_lock(&man->move_lock); | 1320 | spin_lock(&man->move_lock); |
1321 | fence = fence_get(man->move); | 1321 | fence = dma_fence_get(man->move); |
1322 | spin_unlock(&man->move_lock); | 1322 | spin_unlock(&man->move_lock); |
1323 | 1323 | ||
1324 | if (fence) { | 1324 | if (fence) { |
1325 | ret = fence_wait(fence, false); | 1325 | ret = dma_fence_wait(fence, false); |
1326 | fence_put(fence); | 1326 | dma_fence_put(fence); |
1327 | if (ret) { | 1327 | if (ret) { |
1328 | if (allow_errors) { | 1328 | if (allow_errors) { |
1329 | return ret; | 1329 | return ret; |
@@ -1352,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1352 | mem_type); | 1352 | mem_type); |
1353 | return ret; | 1353 | return ret; |
1354 | } | 1354 | } |
1355 | fence_put(man->move); | 1355 | dma_fence_put(man->move); |
1356 | 1356 | ||
1357 | man->use_type = false; | 1357 | man->use_type = false; |
1358 | man->has_type = false; | 1358 | man->has_type = false; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bf6e21655c57..d0459b392e5e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
644 | EXPORT_SYMBOL(ttm_bo_kunmap); | 644 | EXPORT_SYMBOL(ttm_bo_kunmap); |
645 | 645 | ||
646 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 646 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
647 | struct fence *fence, | 647 | struct dma_fence *fence, |
648 | bool evict, | 648 | bool evict, |
649 | struct ttm_mem_reg *new_mem) | 649 | struct ttm_mem_reg *new_mem) |
650 | { | 650 | { |
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
674 | * operation has completed. | 674 | * operation has completed. |
675 | */ | 675 | */ |
676 | 676 | ||
677 | fence_put(bo->moving); | 677 | dma_fence_put(bo->moving); |
678 | bo->moving = fence_get(fence); | 678 | bo->moving = dma_fence_get(fence); |
679 | 679 | ||
680 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 680 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
681 | if (ret) | 681 | if (ret) |
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
706 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | 706 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
707 | 707 | ||
708 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | 708 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, |
709 | struct fence *fence, bool evict, | 709 | struct dma_fence *fence, bool evict, |
710 | struct ttm_mem_reg *new_mem) | 710 | struct ttm_mem_reg *new_mem) |
711 | { | 711 | { |
712 | struct ttm_bo_device *bdev = bo->bdev; | 712 | struct ttm_bo_device *bdev = bo->bdev; |
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
730 | * operation has completed. | 730 | * operation has completed. |
731 | */ | 731 | */ |
732 | 732 | ||
733 | fence_put(bo->moving); | 733 | dma_fence_put(bo->moving); |
734 | bo->moving = fence_get(fence); | 734 | bo->moving = dma_fence_get(fence); |
735 | 735 | ||
736 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 736 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
737 | if (ret) | 737 | if (ret) |
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
761 | */ | 761 | */ |
762 | 762 | ||
763 | spin_lock(&from->move_lock); | 763 | spin_lock(&from->move_lock); |
764 | if (!from->move || fence_is_later(fence, from->move)) { | 764 | if (!from->move || dma_fence_is_later(fence, from->move)) { |
765 | fence_put(from->move); | 765 | dma_fence_put(from->move); |
766 | from->move = fence_get(fence); | 766 | from->move = dma_fence_get(fence); |
767 | } | 767 | } |
768 | spin_unlock(&from->move_lock); | 768 | spin_unlock(&from->move_lock); |
769 | 769 | ||
770 | ttm_bo_free_old_node(bo); | 770 | ttm_bo_free_old_node(bo); |
771 | 771 | ||
772 | fence_put(bo->moving); | 772 | dma_fence_put(bo->moving); |
773 | bo->moving = fence_get(fence); | 773 | bo->moving = dma_fence_get(fence); |
774 | 774 | ||
775 | } else { | 775 | } else { |
776 | /** | 776 | /** |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a6ed9d5e5167..4748aedc933a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
54 | /* | 54 | /* |
55 | * Quick non-stalling check for idle. | 55 | * Quick non-stalling check for idle. |
56 | */ | 56 | */ |
57 | if (fence_is_signaled(bo->moving)) | 57 | if (dma_fence_is_signaled(bo->moving)) |
58 | goto out_clear; | 58 | goto out_clear; |
59 | 59 | ||
60 | /* | 60 | /* |
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
67 | goto out_unlock; | 67 | goto out_unlock; |
68 | 68 | ||
69 | up_read(&vma->vm_mm->mmap_sem); | 69 | up_read(&vma->vm_mm->mmap_sem); |
70 | (void) fence_wait(bo->moving, true); | 70 | (void) dma_fence_wait(bo->moving, true); |
71 | goto out_unlock; | 71 | goto out_unlock; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Ordinary wait. | 75 | * Ordinary wait. |
76 | */ | 76 | */ |
77 | ret = fence_wait(bo->moving, true); | 77 | ret = dma_fence_wait(bo->moving, true); |
78 | if (unlikely(ret != 0)) { | 78 | if (unlikely(ret != 0)) { |
79 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | 79 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : |
80 | VM_FAULT_NOPAGE; | 80 | VM_FAULT_NOPAGE; |
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
82 | } | 82 | } |
83 | 83 | ||
84 | out_clear: | 84 | out_clear: |
85 | fence_put(bo->moving); | 85 | dma_fence_put(bo->moving); |
86 | bo->moving = NULL; | 86 | bo->moving = NULL; |
87 | 87 | ||
88 | out_unlock: | 88 | out_unlock: |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index a80717b35dc6..d35bc491e8de 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
179 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 179 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
180 | 180 | ||
181 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 181 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
182 | struct list_head *list, struct fence *fence) | 182 | struct list_head *list, |
183 | struct dma_fence *fence) | ||
183 | { | 184 | { |
184 | struct ttm_validate_buffer *entry; | 185 | struct ttm_validate_buffer *entry; |
185 | struct ttm_buffer_object *bo; | 186 | struct ttm_buffer_object *bo; |
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 5c57c1ffa1f9..488909a21ed8 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c | |||
@@ -28,56 +28,57 @@ | |||
28 | #define VGEM_FENCE_TIMEOUT (10*HZ) | 28 | #define VGEM_FENCE_TIMEOUT (10*HZ) |
29 | 29 | ||
30 | struct vgem_fence { | 30 | struct vgem_fence { |
31 | struct fence base; | 31 | struct dma_fence base; |
32 | struct spinlock lock; | 32 | struct spinlock lock; |
33 | struct timer_list timer; | 33 | struct timer_list timer; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static const char *vgem_fence_get_driver_name(struct fence *fence) | 36 | static const char *vgem_fence_get_driver_name(struct dma_fence *fence) |
37 | { | 37 | { |
38 | return "vgem"; | 38 | return "vgem"; |
39 | } | 39 | } |
40 | 40 | ||
41 | static const char *vgem_fence_get_timeline_name(struct fence *fence) | 41 | static const char *vgem_fence_get_timeline_name(struct dma_fence *fence) |
42 | { | 42 | { |
43 | return "unbound"; | 43 | return "unbound"; |
44 | } | 44 | } |
45 | 45 | ||
46 | static bool vgem_fence_signaled(struct fence *fence) | 46 | static bool vgem_fence_signaled(struct dma_fence *fence) |
47 | { | 47 | { |
48 | return false; | 48 | return false; |
49 | } | 49 | } |
50 | 50 | ||
51 | static bool vgem_fence_enable_signaling(struct fence *fence) | 51 | static bool vgem_fence_enable_signaling(struct dma_fence *fence) |
52 | { | 52 | { |
53 | return true; | 53 | return true; |
54 | } | 54 | } |
55 | 55 | ||
56 | static void vgem_fence_release(struct fence *base) | 56 | static void vgem_fence_release(struct dma_fence *base) |
57 | { | 57 | { |
58 | struct vgem_fence *fence = container_of(base, typeof(*fence), base); | 58 | struct vgem_fence *fence = container_of(base, typeof(*fence), base); |
59 | 59 | ||
60 | del_timer_sync(&fence->timer); | 60 | del_timer_sync(&fence->timer); |
61 | fence_free(&fence->base); | 61 | dma_fence_free(&fence->base); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void vgem_fence_value_str(struct fence *fence, char *str, int size) | 64 | static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) |
65 | { | 65 | { |
66 | snprintf(str, size, "%u", fence->seqno); | 66 | snprintf(str, size, "%u", fence->seqno); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void vgem_fence_timeline_value_str(struct fence *fence, char *str, | 69 | static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, |
70 | int size) | 70 | int size) |
71 | { | 71 | { |
72 | snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); | 72 | snprintf(str, size, "%u", |
73 | dma_fence_is_signaled(fence) ? fence->seqno : 0); | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static const struct fence_ops vgem_fence_ops = { | 76 | static const struct dma_fence_ops vgem_fence_ops = { |
76 | .get_driver_name = vgem_fence_get_driver_name, | 77 | .get_driver_name = vgem_fence_get_driver_name, |
77 | .get_timeline_name = vgem_fence_get_timeline_name, | 78 | .get_timeline_name = vgem_fence_get_timeline_name, |
78 | .enable_signaling = vgem_fence_enable_signaling, | 79 | .enable_signaling = vgem_fence_enable_signaling, |
79 | .signaled = vgem_fence_signaled, | 80 | .signaled = vgem_fence_signaled, |
80 | .wait = fence_default_wait, | 81 | .wait = dma_fence_default_wait, |
81 | .release = vgem_fence_release, | 82 | .release = vgem_fence_release, |
82 | 83 | ||
83 | .fence_value_str = vgem_fence_value_str, | 84 | .fence_value_str = vgem_fence_value_str, |
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data) | |||
88 | { | 89 | { |
89 | struct vgem_fence *fence = (struct vgem_fence *)data; | 90 | struct vgem_fence *fence = (struct vgem_fence *)data; |
90 | 91 | ||
91 | fence_signal(&fence->base); | 92 | dma_fence_signal(&fence->base); |
92 | } | 93 | } |
93 | 94 | ||
94 | static struct fence *vgem_fence_create(struct vgem_file *vfile, | 95 | static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, |
95 | unsigned int flags) | 96 | unsigned int flags) |
96 | { | 97 | { |
97 | struct vgem_fence *fence; | 98 | struct vgem_fence *fence; |
98 | 99 | ||
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile, | |||
101 | return NULL; | 102 | return NULL; |
102 | 103 | ||
103 | spin_lock_init(&fence->lock); | 104 | spin_lock_init(&fence->lock); |
104 | fence_init(&fence->base, &vgem_fence_ops, &fence->lock, | 105 | dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, |
105 | fence_context_alloc(1), 1); | 106 | dma_fence_context_alloc(1), 1); |
106 | 107 | ||
107 | setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); | 108 | setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); |
108 | 109 | ||
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
157 | struct vgem_file *vfile = file->driver_priv; | 158 | struct vgem_file *vfile = file->driver_priv; |
158 | struct reservation_object *resv; | 159 | struct reservation_object *resv; |
159 | struct drm_gem_object *obj; | 160 | struct drm_gem_object *obj; |
160 | struct fence *fence; | 161 | struct dma_fence *fence; |
161 | int ret; | 162 | int ret; |
162 | 163 | ||
163 | if (arg->flags & ~VGEM_FENCE_WRITE) | 164 | if (arg->flags & ~VGEM_FENCE_WRITE) |
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
209 | } | 210 | } |
210 | err_fence: | 211 | err_fence: |
211 | if (ret) { | 212 | if (ret) { |
212 | fence_signal(fence); | 213 | dma_fence_signal(fence); |
213 | fence_put(fence); | 214 | dma_fence_put(fence); |
214 | } | 215 | } |
215 | err: | 216 | err: |
216 | drm_gem_object_unreference_unlocked(obj); | 217 | drm_gem_object_unreference_unlocked(obj); |
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, | |||
239 | { | 240 | { |
240 | struct vgem_file *vfile = file->driver_priv; | 241 | struct vgem_file *vfile = file->driver_priv; |
241 | struct drm_vgem_fence_signal *arg = data; | 242 | struct drm_vgem_fence_signal *arg = data; |
242 | struct fence *fence; | 243 | struct dma_fence *fence; |
243 | int ret = 0; | 244 | int ret = 0; |
244 | 245 | ||
245 | if (arg->flags) | 246 | if (arg->flags) |
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, | |||
253 | if (IS_ERR(fence)) | 254 | if (IS_ERR(fence)) |
254 | return PTR_ERR(fence); | 255 | return PTR_ERR(fence); |
255 | 256 | ||
256 | if (fence_is_signaled(fence)) | 257 | if (dma_fence_is_signaled(fence)) |
257 | ret = -ETIMEDOUT; | 258 | ret = -ETIMEDOUT; |
258 | 259 | ||
259 | fence_signal(fence); | 260 | dma_fence_signal(fence); |
260 | fence_put(fence); | 261 | dma_fence_put(fence); |
261 | return ret; | 262 | return ret; |
262 | } | 263 | } |
263 | 264 | ||
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile) | |||
271 | 272 | ||
272 | static int __vgem_fence_idr_fini(int id, void *p, void *data) | 273 | static int __vgem_fence_idr_fini(int id, void *p, void *data) |
273 | { | 274 | { |
274 | fence_signal(p); | 275 | dma_fence_signal(p); |
275 | fence_put(p); | 276 | dma_fence_put(p); |
276 | return 0; | 277 | return 0; |
277 | } | 278 | } |
278 | 279 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ae59080d63d1..ec1ebdcfe80b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver { | |||
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct virtio_gpu_fence { | 84 | struct virtio_gpu_fence { |
85 | struct fence f; | 85 | struct dma_fence f; |
86 | struct virtio_gpu_fence_driver *drv; | 86 | struct virtio_gpu_fence_driver *drv; |
87 | struct list_head node; | 87 | struct list_head node; |
88 | uint64_t seq; | 88 | uint64_t seq; |
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f3f70fa8a4c7..23353521f903 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c | |||
@@ -26,22 +26,22 @@ | |||
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | #include "virtgpu_drv.h" | 27 | #include "virtgpu_drv.h" |
28 | 28 | ||
29 | static const char *virtio_get_driver_name(struct fence *f) | 29 | static const char *virtio_get_driver_name(struct dma_fence *f) |
30 | { | 30 | { |
31 | return "virtio_gpu"; | 31 | return "virtio_gpu"; |
32 | } | 32 | } |
33 | 33 | ||
34 | static const char *virtio_get_timeline_name(struct fence *f) | 34 | static const char *virtio_get_timeline_name(struct dma_fence *f) |
35 | { | 35 | { |
36 | return "controlq"; | 36 | return "controlq"; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool virtio_enable_signaling(struct fence *f) | 39 | static bool virtio_enable_signaling(struct dma_fence *f) |
40 | { | 40 | { |
41 | return true; | 41 | return true; |
42 | } | 42 | } |
43 | 43 | ||
44 | static bool virtio_signaled(struct fence *f) | 44 | static bool virtio_signaled(struct dma_fence *f) |
45 | { | 45 | { |
46 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 46 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
47 | 47 | ||
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f) | |||
50 | return false; | 50 | return false; |
51 | } | 51 | } |
52 | 52 | ||
53 | static void virtio_fence_value_str(struct fence *f, char *str, int size) | 53 | static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) |
54 | { | 54 | { |
55 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 55 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
56 | 56 | ||
57 | snprintf(str, size, "%llu", fence->seq); | 57 | snprintf(str, size, "%llu", fence->seq); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void virtio_timeline_value_str(struct fence *f, char *str, int size) | 60 | static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) |
61 | { | 61 | { |
62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
63 | 63 | ||
64 | snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); | 64 | snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); |
65 | } | 65 | } |
66 | 66 | ||
67 | static const struct fence_ops virtio_fence_ops = { | 67 | static const struct dma_fence_ops virtio_fence_ops = { |
68 | .get_driver_name = virtio_get_driver_name, | 68 | .get_driver_name = virtio_get_driver_name, |
69 | .get_timeline_name = virtio_get_timeline_name, | 69 | .get_timeline_name = virtio_get_timeline_name, |
70 | .enable_signaling = virtio_enable_signaling, | 70 | .enable_signaling = virtio_enable_signaling, |
71 | .signaled = virtio_signaled, | 71 | .signaled = virtio_signaled, |
72 | .wait = fence_default_wait, | 72 | .wait = dma_fence_default_wait, |
73 | .fence_value_str = virtio_fence_value_str, | 73 | .fence_value_str = virtio_fence_value_str, |
74 | .timeline_value_str = virtio_timeline_value_str, | 74 | .timeline_value_str = virtio_timeline_value_str, |
75 | }; | 75 | }; |
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, | |||
88 | spin_lock_irqsave(&drv->lock, irq_flags); | 88 | spin_lock_irqsave(&drv->lock, irq_flags); |
89 | (*fence)->drv = drv; | 89 | (*fence)->drv = drv; |
90 | (*fence)->seq = ++drv->sync_seq; | 90 | (*fence)->seq = ++drv->sync_seq; |
91 | fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, | 91 | dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, |
92 | drv->context, (*fence)->seq); | 92 | drv->context, (*fence)->seq); |
93 | fence_get(&(*fence)->f); | 93 | dma_fence_get(&(*fence)->f); |
94 | list_add_tail(&(*fence)->node, &drv->fences); | 94 | list_add_tail(&(*fence)->node, &drv->fences); |
95 | spin_unlock_irqrestore(&drv->lock, irq_flags); | 95 | spin_unlock_irqrestore(&drv->lock, irq_flags); |
96 | 96 | ||
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, | |||
111 | list_for_each_entry_safe(fence, tmp, &drv->fences, node) { | 111 | list_for_each_entry_safe(fence, tmp, &drv->fences, node) { |
112 | if (last_seq < fence->seq) | 112 | if (last_seq < fence->seq) |
113 | continue; | 113 | continue; |
114 | fence_signal_locked(&fence->f); | 114 | dma_fence_signal_locked(&fence->f); |
115 | list_del(&fence->node); | 115 | list_del(&fence->node); |
116 | fence_put(&fence->f); | 116 | dma_fence_put(&fence->f); |
117 | } | 117 | } |
118 | spin_unlock_irqrestore(&drv->lock, irq_flags); | 118 | spin_unlock_irqrestore(&drv->lock, irq_flags); |
119 | } | 119 | } |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 818478b4c4f0..61f3a963af95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, | |||
172 | /* fence the command bo */ | 172 | /* fence the command bo */ |
173 | virtio_gpu_unref_list(&validate_list); | 173 | virtio_gpu_unref_list(&validate_list); |
174 | drm_free_large(buflist); | 174 | drm_free_large(buflist); |
175 | fence_put(&fence->f); | 175 | dma_fence_put(&fence->f); |
176 | return 0; | 176 | return 0; |
177 | 177 | ||
178 | out_unresv: | 178 | out_unresv: |
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, | |||
298 | drm_gem_object_release(obj); | 298 | drm_gem_object_release(obj); |
299 | if (vgdev->has_virgl_3d) { | 299 | if (vgdev->has_virgl_3d) { |
300 | virtio_gpu_unref_list(&validate_list); | 300 | virtio_gpu_unref_list(&validate_list); |
301 | fence_put(&fence->f); | 301 | dma_fence_put(&fence->f); |
302 | } | 302 | } |
303 | return ret; | 303 | return ret; |
304 | } | 304 | } |
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, | |||
309 | 309 | ||
310 | if (vgdev->has_virgl_3d) { | 310 | if (vgdev->has_virgl_3d) { |
311 | virtio_gpu_unref_list(&validate_list); | 311 | virtio_gpu_unref_list(&validate_list); |
312 | fence_put(&fence->f); | 312 | dma_fence_put(&fence->f); |
313 | } | 313 | } |
314 | return 0; | 314 | return 0; |
315 | fail_unref: | 315 | fail_unref: |
316 | if (vgdev->has_virgl_3d) { | 316 | if (vgdev->has_virgl_3d) { |
317 | virtio_gpu_unref_list(&validate_list); | 317 | virtio_gpu_unref_list(&validate_list); |
318 | fence_put(&fence->f); | 318 | dma_fence_put(&fence->f); |
319 | } | 319 | } |
320 | //fail_obj: | 320 | //fail_obj: |
321 | // drm_gem_object_handle_unreference_unlocked(obj); | 321 | // drm_gem_object_handle_unreference_unlocked(obj); |
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, | |||
383 | reservation_object_add_excl_fence(qobj->tbo.resv, | 383 | reservation_object_add_excl_fence(qobj->tbo.resv, |
384 | &fence->f); | 384 | &fence->f); |
385 | 385 | ||
386 | fence_put(&fence->f); | 386 | dma_fence_put(&fence->f); |
387 | out_unres: | 387 | out_unres: |
388 | virtio_gpu_object_unreserve(qobj); | 388 | virtio_gpu_object_unreserve(qobj); |
389 | out: | 389 | out: |
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, | |||
431 | args->level, &box, &fence); | 431 | args->level, &box, &fence); |
432 | reservation_object_add_excl_fence(qobj->tbo.resv, | 432 | reservation_object_add_excl_fence(qobj->tbo.resv, |
433 | &fence->f); | 433 | &fence->f); |
434 | fence_put(&fence->f); | 434 | dma_fence_put(&fence->f); |
435 | } | 435 | } |
436 | 436 | ||
437 | out_unres: | 437 | out_unres: |
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 036b0fbae0fb..1235519853f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c | |||
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) | |||
159 | virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); | 159 | virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); |
160 | virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); | 160 | virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); |
161 | 161 | ||
162 | vgdev->fence_drv.context = fence_context_alloc(1); | 162 | vgdev->fence_drv.context = dma_fence_context_alloc(1); |
163 | spin_lock_init(&vgdev->fence_drv.lock); | 163 | spin_lock_init(&vgdev->fence_drv.lock); |
164 | INIT_LIST_HEAD(&vgdev->fence_drv.fences); | 164 | INIT_LIST_HEAD(&vgdev->fence_drv.fences); |
165 | INIT_LIST_HEAD(&vgdev->cap_cache); | 165 | INIT_LIST_HEAD(&vgdev->cap_cache); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index ba28c0f6f28a..cb75f0663ba0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c | |||
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, | |||
152 | if (!ret) { | 152 | if (!ret) { |
153 | reservation_object_add_excl_fence(bo->tbo.resv, | 153 | reservation_object_add_excl_fence(bo->tbo.resv, |
154 | &fence->f); | 154 | &fence->f); |
155 | fence_put(&fence->f); | 155 | dma_fence_put(&fence->f); |
156 | fence = NULL; | 156 | fence = NULL; |
157 | virtio_gpu_object_unreserve(bo); | 157 | virtio_gpu_object_unreserve(bo); |
158 | virtio_gpu_object_wait(bo, false); | 158 | virtio_gpu_object_wait(bo, false); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 26ac8e80a478..6541dd8b82dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence) | |||
108 | * objects with actions attached to them. | 108 | * objects with actions attached to them. |
109 | */ | 109 | */ |
110 | 110 | ||
111 | static void vmw_fence_obj_destroy(struct fence *f) | 111 | static void vmw_fence_obj_destroy(struct dma_fence *f) |
112 | { | 112 | { |
113 | struct vmw_fence_obj *fence = | 113 | struct vmw_fence_obj *fence = |
114 | container_of(f, struct vmw_fence_obj, base); | 114 | container_of(f, struct vmw_fence_obj, base); |
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f) | |||
123 | fence->destroy(fence); | 123 | fence->destroy(fence); |
124 | } | 124 | } |
125 | 125 | ||
126 | static const char *vmw_fence_get_driver_name(struct fence *f) | 126 | static const char *vmw_fence_get_driver_name(struct dma_fence *f) |
127 | { | 127 | { |
128 | return "vmwgfx"; | 128 | return "vmwgfx"; |
129 | } | 129 | } |
130 | 130 | ||
131 | static const char *vmw_fence_get_timeline_name(struct fence *f) | 131 | static const char *vmw_fence_get_timeline_name(struct dma_fence *f) |
132 | { | 132 | { |
133 | return "svga"; | 133 | return "svga"; |
134 | } | 134 | } |
135 | 135 | ||
136 | static bool vmw_fence_enable_signaling(struct fence *f) | 136 | static bool vmw_fence_enable_signaling(struct dma_fence *f) |
137 | { | 137 | { |
138 | struct vmw_fence_obj *fence = | 138 | struct vmw_fence_obj *fence = |
139 | container_of(f, struct vmw_fence_obj, base); | 139 | container_of(f, struct vmw_fence_obj, base); |
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | struct vmwgfx_wait_cb { | 154 | struct vmwgfx_wait_cb { |
155 | struct fence_cb base; | 155 | struct dma_fence_cb base; |
156 | struct task_struct *task; | 156 | struct task_struct *task; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static void | 159 | static void |
160 | vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) | 160 | vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
161 | { | 161 | { |
162 | struct vmwgfx_wait_cb *wait = | 162 | struct vmwgfx_wait_cb *wait = |
163 | container_of(cb, struct vmwgfx_wait_cb, base); | 163 | container_of(cb, struct vmwgfx_wait_cb, base); |
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
167 | 167 | ||
168 | static void __vmw_fences_update(struct vmw_fence_manager *fman); | 168 | static void __vmw_fences_update(struct vmw_fence_manager *fman); |
169 | 169 | ||
170 | static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) | 170 | static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) |
171 | { | 171 | { |
172 | struct vmw_fence_obj *fence = | 172 | struct vmw_fence_obj *fence = |
173 | container_of(f, struct vmw_fence_obj, base); | 173 | container_of(f, struct vmw_fence_obj, base); |
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) | |||
197 | 197 | ||
198 | while (ret > 0) { | 198 | while (ret > 0) { |
199 | __vmw_fences_update(fman); | 199 | __vmw_fences_update(fman); |
200 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) | 200 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) |
201 | break; | 201 | break; |
202 | 202 | ||
203 | if (intr) | 203 | if (intr) |
@@ -225,7 +225,7 @@ out: | |||
225 | return ret; | 225 | return ret; |
226 | } | 226 | } |
227 | 227 | ||
228 | static struct fence_ops vmw_fence_ops = { | 228 | static struct dma_fence_ops vmw_fence_ops = { |
229 | .get_driver_name = vmw_fence_get_driver_name, | 229 | .get_driver_name = vmw_fence_get_driver_name, |
230 | .get_timeline_name = vmw_fence_get_timeline_name, | 230 | .get_timeline_name = vmw_fence_get_timeline_name, |
231 | .enable_signaling = vmw_fence_enable_signaling, | 231 | .enable_signaling = vmw_fence_enable_signaling, |
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
298 | fman->event_fence_action_size = | 298 | fman->event_fence_action_size = |
299 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); | 299 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); |
300 | mutex_init(&fman->goal_irq_mutex); | 300 | mutex_init(&fman->goal_irq_mutex); |
301 | fman->ctx = fence_context_alloc(1); | 301 | fman->ctx = dma_fence_context_alloc(1); |
302 | 302 | ||
303 | return fman; | 303 | return fman; |
304 | } | 304 | } |
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |||
326 | unsigned long irq_flags; | 326 | unsigned long irq_flags; |
327 | int ret = 0; | 327 | int ret = 0; |
328 | 328 | ||
329 | fence_init(&fence->base, &vmw_fence_ops, &fman->lock, | 329 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, |
330 | fman->ctx, seqno); | 330 | fman->ctx, seqno); |
331 | INIT_LIST_HEAD(&fence->seq_passed_actions); | 331 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
332 | fence->destroy = destroy; | 332 | fence->destroy = destroy; |
333 | 333 | ||
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) | |||
431 | u32 goal_seqno; | 431 | u32 goal_seqno; |
432 | u32 *fifo_mem; | 432 | u32 *fifo_mem; |
433 | 433 | ||
434 | if (fence_is_signaled_locked(&fence->base)) | 434 | if (dma_fence_is_signaled_locked(&fence->base)) |
435 | return false; | 435 | return false; |
436 | 436 | ||
437 | fifo_mem = fman->dev_priv->mmio_virt; | 437 | fifo_mem = fman->dev_priv->mmio_virt; |
@@ -459,7 +459,7 @@ rerun: | |||
459 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { | 459 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { |
460 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { | 460 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { |
461 | list_del_init(&fence->head); | 461 | list_del_init(&fence->head); |
462 | fence_signal_locked(&fence->base); | 462 | dma_fence_signal_locked(&fence->base); |
463 | INIT_LIST_HEAD(&action_list); | 463 | INIT_LIST_HEAD(&action_list); |
464 | list_splice_init(&fence->seq_passed_actions, | 464 | list_splice_init(&fence->seq_passed_actions, |
465 | &action_list); | 465 | &action_list); |
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) | |||
500 | { | 500 | { |
501 | struct vmw_fence_manager *fman = fman_from_fence(fence); | 501 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
502 | 502 | ||
503 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) | 503 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
504 | return 1; | 504 | return 1; |
505 | 505 | ||
506 | vmw_fences_update(fman); | 506 | vmw_fences_update(fman); |
507 | 507 | ||
508 | return fence_is_signaled(&fence->base); | 508 | return dma_fence_is_signaled(&fence->base); |
509 | } | 509 | } |
510 | 510 | ||
511 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, | 511 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, |
512 | bool interruptible, unsigned long timeout) | 512 | bool interruptible, unsigned long timeout) |
513 | { | 513 | { |
514 | long ret = fence_wait_timeout(&fence->base, interruptible, timeout); | 514 | long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); |
515 | 515 | ||
516 | if (likely(ret > 0)) | 516 | if (likely(ret > 0)) |
517 | return 0; | 517 | return 0; |
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) | |||
530 | 530 | ||
531 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) | 531 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) |
532 | { | 532 | { |
533 | fence_free(&fence->base); | 533 | dma_fence_free(&fence->base); |
534 | } | 534 | } |
535 | 535 | ||
536 | int vmw_fence_create(struct vmw_fence_manager *fman, | 536 | int vmw_fence_create(struct vmw_fence_manager *fman, |
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
669 | struct vmw_fence_obj *fence = | 669 | struct vmw_fence_obj *fence = |
670 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, | 670 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, |
671 | head); | 671 | head); |
672 | fence_get(&fence->base); | 672 | dma_fence_get(&fence->base); |
673 | spin_unlock_irq(&fman->lock); | 673 | spin_unlock_irq(&fman->lock); |
674 | 674 | ||
675 | ret = vmw_fence_obj_wait(fence, false, false, | 675 | ret = vmw_fence_obj_wait(fence, false, false, |
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
677 | 677 | ||
678 | if (unlikely(ret != 0)) { | 678 | if (unlikely(ret != 0)) { |
679 | list_del_init(&fence->head); | 679 | list_del_init(&fence->head); |
680 | fence_signal(&fence->base); | 680 | dma_fence_signal(&fence->base); |
681 | INIT_LIST_HEAD(&action_list); | 681 | INIT_LIST_HEAD(&action_list); |
682 | list_splice_init(&fence->seq_passed_actions, | 682 | list_splice_init(&fence->seq_passed_actions, |
683 | &action_list); | 683 | &action_list); |
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
685 | } | 685 | } |
686 | 686 | ||
687 | BUG_ON(!list_empty(&fence->head)); | 687 | BUG_ON(!list_empty(&fence->head)); |
688 | fence_put(&fence->base); | 688 | dma_fence_put(&fence->base); |
689 | spin_lock_irq(&fman->lock); | 689 | spin_lock_irq(&fman->lock); |
690 | } | 690 | } |
691 | spin_unlock_irq(&fman->lock); | 691 | spin_unlock_irq(&fman->lock); |
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |||
884 | spin_lock_irqsave(&fman->lock, irq_flags); | 884 | spin_lock_irqsave(&fman->lock, irq_flags); |
885 | 885 | ||
886 | fman->pending_actions[action->type]++; | 886 | fman->pending_actions[action->type]++; |
887 | if (fence_is_signaled_locked(&fence->base)) { | 887 | if (dma_fence_is_signaled_locked(&fence->base)) { |
888 | struct list_head action_list; | 888 | struct list_head action_list; |
889 | 889 | ||
890 | INIT_LIST_HEAD(&action_list); | 890 | INIT_LIST_HEAD(&action_list); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 83ae301ee141..d9d85aa6ed20 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | #ifndef _VMWGFX_FENCE_H_ | 28 | #ifndef _VMWGFX_FENCE_H_ |
29 | 29 | ||
30 | #include <linux/fence.h> | 30 | #include <linux/dma-fence.h> |
31 | 31 | ||
32 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) | 32 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) |
33 | 33 | ||
@@ -52,7 +52,7 @@ struct vmw_fence_action { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct vmw_fence_obj { | 54 | struct vmw_fence_obj { |
55 | struct fence base; | 55 | struct dma_fence base; |
56 | 56 | ||
57 | struct list_head head; | 57 | struct list_head head; |
58 | struct list_head seq_passed_actions; | 58 | struct list_head seq_passed_actions; |
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | |||
71 | 71 | ||
72 | *fence_p = NULL; | 72 | *fence_p = NULL; |
73 | if (fence) | 73 | if (fence) |
74 | fence_put(&fence->base); | 74 | dma_fence_put(&fence->base); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline struct vmw_fence_obj * | 77 | static inline struct vmw_fence_obj * |
78 | vmw_fence_obj_reference(struct vmw_fence_obj *fence) | 78 | vmw_fence_obj_reference(struct vmw_fence_obj *fence) |
79 | { | 79 | { |
80 | if (fence) | 80 | if (fence) |
81 | fence_get(&fence->base); | 81 | dma_fence_get(&fence->base); |
82 | return fence; | 82 | return fence; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1a85fb2d4dc6..8e86d6d4141b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1454 | if (fence == NULL) { | 1454 | if (fence == NULL) { |
1455 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1455 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1456 | reservation_object_add_excl_fence(bo->resv, &fence->base); | 1456 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1457 | fence_put(&fence->base); | 1457 | dma_fence_put(&fence->base); |
1458 | } else | 1458 | } else |
1459 | reservation_object_add_excl_fence(bo->resv, &fence->base); | 1459 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1460 | } | 1460 | } |
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h new file mode 100644 index 000000000000..3629b2734db6 --- /dev/null +++ b/include/drm/bridge/mhl.h | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * Defines for Mobile High-Definition Link (MHL) interface | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics, Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * Based on MHL driver for Android devices. | ||
8 | * Copyright (C) 2013-2014 Silicon Image, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MHL_H__ | ||
16 | #define __MHL_H__ | ||
17 | |||
18 | /* Device Capabilities Registers */ | ||
19 | enum { | ||
20 | MHL_DCAP_DEV_STATE, | ||
21 | MHL_DCAP_MHL_VERSION, | ||
22 | MHL_DCAP_CAT, | ||
23 | MHL_DCAP_ADOPTER_ID_H, | ||
24 | MHL_DCAP_ADOPTER_ID_L, | ||
25 | MHL_DCAP_VID_LINK_MODE, | ||
26 | MHL_DCAP_AUD_LINK_MODE, | ||
27 | MHL_DCAP_VIDEO_TYPE, | ||
28 | MHL_DCAP_LOG_DEV_MAP, | ||
29 | MHL_DCAP_BANDWIDTH, | ||
30 | MHL_DCAP_FEATURE_FLAG, | ||
31 | MHL_DCAP_DEVICE_ID_H, | ||
32 | MHL_DCAP_DEVICE_ID_L, | ||
33 | MHL_DCAP_SCRATCHPAD_SIZE, | ||
34 | MHL_DCAP_INT_STAT_SIZE, | ||
35 | MHL_DCAP_RESERVED, | ||
36 | MHL_DCAP_SIZE | ||
37 | }; | ||
38 | |||
39 | #define MHL_DCAP_CAT_SINK 0x01 | ||
40 | #define MHL_DCAP_CAT_SOURCE 0x02 | ||
41 | #define MHL_DCAP_CAT_POWER 0x10 | ||
42 | #define MHL_DCAP_CAT_PLIM(x) ((x) << 5) | ||
43 | |||
44 | #define MHL_DCAP_VID_LINK_RGB444 0x01 | ||
45 | #define MHL_DCAP_VID_LINK_YCBCR444 0x02 | ||
46 | #define MHL_DCAP_VID_LINK_YCBCR422 0x04 | ||
47 | #define MHL_DCAP_VID_LINK_PPIXEL 0x08 | ||
48 | #define MHL_DCAP_VID_LINK_ISLANDS 0x10 | ||
49 | #define MHL_DCAP_VID_LINK_VGA 0x20 | ||
50 | #define MHL_DCAP_VID_LINK_16BPP 0x40 | ||
51 | |||
52 | #define MHL_DCAP_AUD_LINK_2CH 0x01 | ||
53 | #define MHL_DCAP_AUD_LINK_8CH 0x02 | ||
54 | |||
55 | #define MHL_DCAP_VT_GRAPHICS 0x00 | ||
56 | #define MHL_DCAP_VT_PHOTO 0x02 | ||
57 | #define MHL_DCAP_VT_CINEMA 0x04 | ||
58 | #define MHL_DCAP_VT_GAMES 0x08 | ||
59 | #define MHL_DCAP_SUPP_VT 0x80 | ||
60 | |||
61 | #define MHL_DCAP_LD_DISPLAY 0x01 | ||
62 | #define MHL_DCAP_LD_VIDEO 0x02 | ||
63 | #define MHL_DCAP_LD_AUDIO 0x04 | ||
64 | #define MHL_DCAP_LD_MEDIA 0x08 | ||
65 | #define MHL_DCAP_LD_TUNER 0x10 | ||
66 | #define MHL_DCAP_LD_RECORD 0x20 | ||
67 | #define MHL_DCAP_LD_SPEAKER 0x40 | ||
68 | #define MHL_DCAP_LD_GUI 0x80 | ||
69 | #define MHL_DCAP_LD_ALL 0xFF | ||
70 | |||
71 | #define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01 | ||
72 | #define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02 | ||
73 | #define MHL_DCAP_FEATURE_SP_SUPPORT 0x04 | ||
74 | #define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08 | ||
75 | #define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10 | ||
76 | #define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40 | ||
77 | |||
78 | /* Extended Device Capabilities Registers */ | ||
79 | enum { | ||
80 | MHL_XDC_ECBUS_SPEEDS, | ||
81 | MHL_XDC_TMDS_SPEEDS, | ||
82 | MHL_XDC_ECBUS_ROLES, | ||
83 | MHL_XDC_LOG_DEV_MAPX, | ||
84 | MHL_XDC_SIZE | ||
85 | }; | ||
86 | |||
87 | #define MHL_XDC_ECBUS_S_075 0x01 | ||
88 | #define MHL_XDC_ECBUS_S_8BIT 0x02 | ||
89 | #define MHL_XDC_ECBUS_S_12BIT 0x04 | ||
90 | #define MHL_XDC_ECBUS_D_150 0x10 | ||
91 | #define MHL_XDC_ECBUS_D_8BIT 0x20 | ||
92 | |||
93 | #define MHL_XDC_TMDS_000 0x00 | ||
94 | #define MHL_XDC_TMDS_150 0x01 | ||
95 | #define MHL_XDC_TMDS_300 0x02 | ||
96 | #define MHL_XDC_TMDS_600 0x04 | ||
97 | |||
98 | /* MHL_XDC_ECBUS_ROLES flags */ | ||
99 | #define MHL_XDC_DEV_HOST 0x01 | ||
100 | #define MHL_XDC_DEV_DEVICE 0x02 | ||
101 | #define MHL_XDC_DEV_CHARGER 0x04 | ||
102 | #define MHL_XDC_HID_HOST 0x08 | ||
103 | #define MHL_XDC_HID_DEVICE 0x10 | ||
104 | |||
105 | /* MHL_XDC_LOG_DEV_MAPX flags */ | ||
106 | #define MHL_XDC_LD_PHONE 0x01 | ||
107 | |||
108 | /* Device Status Registers */ | ||
109 | enum { | ||
110 | MHL_DST_CONNECTED_RDY, | ||
111 | MHL_DST_LINK_MODE, | ||
112 | MHL_DST_VERSION, | ||
113 | MHL_DST_SIZE | ||
114 | }; | ||
115 | |||
116 | /* Offset of DEVSTAT registers */ | ||
117 | #define MHL_DST_OFFSET 0x30 | ||
118 | #define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name) | ||
119 | |||
120 | #define MHL_DST_CONN_DCAP_RDY 0x01 | ||
121 | #define MHL_DST_CONN_XDEVCAPP_SUPP 0x02 | ||
122 | #define MHL_DST_CONN_POW_STAT 0x04 | ||
123 | #define MHL_DST_CONN_PLIM_STAT_MASK 0x38 | ||
124 | |||
125 | #define MHL_DST_LM_CLK_MODE_MASK 0x07 | ||
126 | #define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02 | ||
127 | #define MHL_DST_LM_CLK_MODE_NORMAL 0x03 | ||
128 | #define MHL_DST_LM_PATH_EN_MASK 0x08 | ||
129 | #define MHL_DST_LM_PATH_ENABLED 0x08 | ||
130 | #define MHL_DST_LM_PATH_DISABLED 0x00 | ||
131 | #define MHL_DST_LM_MUTED_MASK 0x10 | ||
132 | |||
133 | /* Extended Device Status Registers */ | ||
134 | enum { | ||
135 | MHL_XDS_CURR_ECBUS_MODE, | ||
136 | MHL_XDS_AVLINK_MODE_STATUS, | ||
137 | MHL_XDS_AVLINK_MODE_CONTROL, | ||
138 | MHL_XDS_MULTI_SINK_STATUS, | ||
139 | MHL_XDS_SIZE | ||
140 | }; | ||
141 | |||
142 | /* Offset of XDEVSTAT registers */ | ||
143 | #define MHL_XDS_OFFSET 0x90 | ||
144 | #define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name) | ||
145 | |||
146 | /* MHL_XDS_REG_CURR_ECBUS_MODE flags */ | ||
147 | #define MHL_XDS_SLOT_MODE_8BIT 0x00 | ||
148 | #define MHL_XDS_SLOT_MODE_6BIT 0x01 | ||
149 | #define MHL_XDS_ECBUS_S 0x04 | ||
150 | #define MHL_XDS_ECBUS_D 0x08 | ||
151 | |||
152 | #define MHL_XDS_LINK_CLOCK_75MHZ 0x00 | ||
153 | #define MHL_XDS_LINK_CLOCK_150MHZ 0x10 | ||
154 | #define MHL_XDS_LINK_CLOCK_300MHZ 0x20 | ||
155 | #define MHL_XDS_LINK_CLOCK_600MHZ 0x30 | ||
156 | |||
157 | #define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00 | ||
158 | #define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01 | ||
159 | #define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02 | ||
160 | #define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03 | ||
161 | |||
162 | #define MHL_XDS_LINK_RATE_1_5_GBPS 0x00 | ||
163 | #define MHL_XDS_LINK_RATE_3_0_GBPS 0x01 | ||
164 | #define MHL_XDS_LINK_RATE_6_0_GBPS 0x02 | ||
165 | #define MHL_XDS_ATT_CAPABLE 0x08 | ||
166 | |||
167 | #define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00 | ||
168 | #define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01 | ||
169 | #define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00 | ||
170 | #define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04 | ||
171 | #define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00 | ||
172 | #define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10 | ||
173 | #define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00 | ||
174 | #define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40 | ||
175 | |||
176 | /* Interrupt Registers */ | ||
177 | enum { | ||
178 | MHL_INT_RCHANGE, | ||
179 | MHL_INT_DCHANGE, | ||
180 | MHL_INT_SIZE | ||
181 | }; | ||
182 | |||
183 | /* Offset of DEVSTAT registers */ | ||
184 | #define MHL_INT_OFFSET 0x20 | ||
185 | #define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name) | ||
186 | |||
187 | #define MHL_INT_RC_DCAP_CHG 0x01 | ||
188 | #define MHL_INT_RC_DSCR_CHG 0x02 | ||
189 | #define MHL_INT_RC_REQ_WRT 0x04 | ||
190 | #define MHL_INT_RC_GRT_WRT 0x08 | ||
191 | #define MHL_INT_RC_3D_REQ 0x10 | ||
192 | #define MHL_INT_RC_FEAT_REQ 0x20 | ||
193 | #define MHL_INT_RC_FEAT_COMPLETE 0x40 | ||
194 | |||
195 | #define MHL_INT_DC_EDID_CHG 0x02 | ||
196 | |||
197 | enum { | ||
198 | MHL_ACK = 0x33, /* Command or Data byte acknowledge */ | ||
199 | MHL_NACK = 0x34, /* Command or Data byte not acknowledge */ | ||
200 | MHL_ABORT = 0x35, /* Transaction abort */ | ||
201 | MHL_WRITE_STAT = 0xe0, /* Write one status register */ | ||
202 | MHL_SET_INT = 0x60, /* Write one interrupt register */ | ||
203 | MHL_READ_DEVCAP_REG = 0x61, /* Read one register */ | ||
204 | MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */ | ||
205 | MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */ | ||
206 | MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */ | ||
207 | MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */ | ||
208 | MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */ | ||
209 | MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */ | ||
210 | MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */ | ||
211 | MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */ | ||
212 | MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */ | ||
213 | MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */ | ||
214 | MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */ | ||
215 | MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */ | ||
216 | MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */ | ||
217 | MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */ | ||
218 | /* let the rest of these float, they are software specific */ | ||
219 | MHL_READ_EDID_BLOCK, | ||
220 | MHL_SEND_3D_REQ_OR_FEAT_REQ, | ||
221 | MHL_READ_DEVCAP, | ||
222 | MHL_READ_XDEVCAP | ||
223 | }; | ||
224 | |||
225 | /* MSC message types */ | ||
226 | enum { | ||
227 | MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */ | ||
228 | MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */ | ||
229 | MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */ | ||
230 | MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */ | ||
231 | MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */ | ||
232 | MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */ | ||
233 | MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */ | ||
234 | MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */ | ||
235 | MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */ | ||
236 | MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */ | ||
237 | MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */ | ||
238 | MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */ | ||
239 | MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */ | ||
240 | MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */ | ||
241 | MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */ | ||
242 | MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */ | ||
243 | MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */ | ||
244 | MHL_MSC_MSG_BIST_TRIGGER = 0x60, | ||
245 | MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61, | ||
246 | MHL_MSC_MSG_BIST_READY = 0x62, | ||
247 | MHL_MSC_MSG_BIST_STOP = 0x63, | ||
248 | }; | ||
249 | |||
250 | /* RAP action codes */ | ||
251 | #define MHL_RAP_POLL 0x00 /* Just do an ack */ | ||
252 | #define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */ | ||
253 | #define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */ | ||
254 | #define MHL_RAP_CBUS_MODE_DOWN 0x20 | ||
255 | #define MHL_RAP_CBUS_MODE_UP 0x21 | ||
256 | |||
257 | /* RAPK status codes */ | ||
258 | #define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */ | ||
259 | #define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */ | ||
260 | #define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ | ||
261 | #define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ | ||
262 | |||
263 | /* | ||
264 | * Error status codes for RCPE messages | ||
265 | */ | ||
266 | /* No error. (Not allowed in RCPE messages) */ | ||
267 | #define MHL_RCPE_STATUS_NO_ERROR 0x00 | ||
268 | /* Unsupported/unrecognized key code */ | ||
269 | #define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 | ||
270 | /* Responder busy. Initiator may retry message */ | ||
271 | #define MHL_RCPE_STATUS_BUSY 0x02 | ||
272 | |||
273 | /* | ||
274 | * Error status codes for RBPE messages | ||
275 | */ | ||
276 | /* No error. (Not allowed in RBPE messages) */ | ||
277 | #define MHL_RBPE_STATUS_NO_ERROR 0x00 | ||
278 | /* Unsupported/unrecognized button code */ | ||
279 | #define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01 | ||
280 | /* Responder busy. Initiator may retry message */ | ||
281 | #define MHL_RBPE_STATUS_BUSY 0x02 | ||
282 | |||
283 | /* | ||
284 | * Error status codes for UCPE messages | ||
285 | */ | ||
286 | /* No error. (Not allowed in UCPE messages) */ | ||
287 | #define MHL_UCPE_STATUS_NO_ERROR 0x00 | ||
288 | /* Unsupported/unrecognized key code */ | ||
289 | #define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 | ||
290 | |||
291 | #endif /* __MHL_H__ */ | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 672644031bd5..e336e3901876 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/types.h> | 57 | #include <linux/types.h> |
58 | #include <linux/vmalloc.h> | 58 | #include <linux/vmalloc.h> |
59 | #include <linux/workqueue.h> | 59 | #include <linux/workqueue.h> |
60 | #include <linux/fence.h> | 60 | #include <linux/dma-fence.h> |
61 | 61 | ||
62 | #include <asm/mman.h> | 62 | #include <asm/mman.h> |
63 | #include <asm/pgalloc.h> | 63 | #include <asm/pgalloc.h> |
@@ -362,7 +362,7 @@ struct drm_ioctl_desc { | |||
362 | struct drm_pending_event { | 362 | struct drm_pending_event { |
363 | struct completion *completion; | 363 | struct completion *completion; |
364 | struct drm_event *event; | 364 | struct drm_event *event; |
365 | struct fence *fence; | 365 | struct dma_fence *fence; |
366 | struct list_head link; | 366 | struct list_head link; |
367 | struct list_head pending_link; | 367 | struct list_head pending_link; |
368 | struct drm_file *file_priv; | 368 | struct drm_file *file_priv; |
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index fd351924e1c5..13221cf9b3eb 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h | |||
@@ -52,8 +52,6 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation) | |||
52 | return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); | 52 | return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); |
53 | } | 53 | } |
54 | 54 | ||
55 | struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, | ||
56 | unsigned int supported_rotations); | ||
57 | int drm_plane_create_rotation_property(struct drm_plane *plane, | 55 | int drm_plane_create_rotation_property(struct drm_plane *plane, |
58 | unsigned int rotation, | 56 | unsigned int rotation, |
59 | unsigned int supported_rotations); | 57 | unsigned int supported_rotations); |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 284c1b3aec10..fa1aa214c8ea 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -54,7 +54,7 @@ struct drm_mode_set; | |||
54 | struct drm_file; | 54 | struct drm_file; |
55 | struct drm_clip_rect; | 55 | struct drm_clip_rect; |
56 | struct device_node; | 56 | struct device_node; |
57 | struct fence; | 57 | struct dma_fence; |
58 | struct edid; | 58 | struct edid; |
59 | 59 | ||
60 | static inline int64_t U642I64(uint64_t val) | 60 | static inline int64_t U642I64(uint64_t val) |
@@ -1156,11 +1156,6 @@ struct drm_mode_config { | |||
1156 | */ | 1156 | */ |
1157 | struct drm_property *plane_type_property; | 1157 | struct drm_property *plane_type_property; |
1158 | /** | 1158 | /** |
1159 | * @rotation_property: Optional property for planes or CRTCs to specifiy | ||
1160 | * rotation. | ||
1161 | */ | ||
1162 | struct drm_property *rotation_property; | ||
1163 | /** | ||
1164 | * @prop_src_x: Default atomic plane property for the plane source | 1159 | * @prop_src_x: Default atomic plane property for the plane source |
1165 | * position in the connected &drm_framebuffer. | 1160 | * position in the connected &drm_framebuffer. |
1166 | */ | 1161 | */ |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2a79882cb68e..55bbeb0ff594 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | |||
690 | dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; | 690 | dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; |
691 | } | 691 | } |
692 | 692 | ||
693 | static inline bool | ||
694 | drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | ||
695 | { | ||
696 | return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; | ||
697 | } | ||
698 | |||
693 | /* | 699 | /* |
694 | * DisplayPort AUX channel | 700 | * DisplayPort AUX channel |
695 | */ | 701 | */ |
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 3fd87b386ed7..26a64805cc15 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/of_graph.h> | 4 | #include <linux/of_graph.h> |
5 | 5 | ||
6 | struct component_master_ops; | 6 | struct component_master_ops; |
7 | struct component_match; | ||
7 | struct device; | 8 | struct device; |
8 | struct drm_device; | 9 | struct drm_device; |
9 | struct drm_encoder; | 10 | struct drm_encoder; |
@@ -12,6 +13,10 @@ struct device_node; | |||
12 | #ifdef CONFIG_OF | 13 | #ifdef CONFIG_OF |
13 | extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | 14 | extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, |
14 | struct device_node *port); | 15 | struct device_node *port); |
16 | extern void drm_of_component_match_add(struct device *master, | ||
17 | struct component_match **matchptr, | ||
18 | int (*compare)(struct device *, void *), | ||
19 | struct device_node *node); | ||
15 | extern int drm_of_component_probe(struct device *dev, | 20 | extern int drm_of_component_probe(struct device *dev, |
16 | int (*compare_of)(struct device *, void *), | 21 | int (*compare_of)(struct device *, void *), |
17 | const struct component_master_ops *m_ops); | 22 | const struct component_master_ops *m_ops); |
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | |||
25 | return 0; | 30 | return 0; |
26 | } | 31 | } |
27 | 32 | ||
33 | static inline void | ||
34 | drm_of_component_match_add(struct device *master, | ||
35 | struct component_match **matchptr, | ||
36 | int (*compare)(struct device *, void *), | ||
37 | struct device_node *node) | ||
38 | { | ||
39 | } | ||
40 | |||
28 | static inline int | 41 | static inline int |
29 | drm_of_component_probe(struct device *dev, | 42 | drm_of_component_probe(struct device *dev, |
30 | int (*compare_of)(struct device *, void *), | 43 | int (*compare_of)(struct device *, void *), |
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 98b39d66eb32..c5e8a0df1623 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h | |||
@@ -59,7 +59,7 @@ struct drm_plane_state { | |||
59 | 59 | ||
60 | struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ | 60 | struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ |
61 | struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ | 61 | struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ |
62 | struct fence *fence; | 62 | struct dma_fence *fence; |
63 | 63 | ||
64 | /* Signed dest location allows it to be partially off screen */ | 64 | /* Signed dest location allows it to be partially off screen */ |
65 | int32_t crtc_x, crtc_y; | 65 | int32_t crtc_x, crtc_y; |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index bb6a3357a817..652e45be97c8 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -211,7 +211,7 @@ struct ttm_buffer_object { | |||
211 | * Members protected by a bo reservation. | 211 | * Members protected by a bo reservation. |
212 | */ | 212 | */ |
213 | 213 | ||
214 | struct fence *moving; | 214 | struct dma_fence *moving; |
215 | 215 | ||
216 | struct drm_vma_offset_node vma_node; | 216 | struct drm_vma_offset_node vma_node; |
217 | 217 | ||
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d3d83dfe89e2..cdbdb40eb5bd 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager { | |||
303 | /* | 303 | /* |
304 | * Protected by @move_lock. | 304 | * Protected by @move_lock. |
305 | */ | 305 | */ |
306 | struct fence *move; | 306 | struct dma_fence *move; |
307 | }; | 307 | }; |
308 | 308 | ||
309 | /** | 309 | /** |
@@ -1039,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); | |||
1039 | */ | 1039 | */ |
1040 | 1040 | ||
1041 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 1041 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
1042 | struct fence *fence, bool evict, | 1042 | struct dma_fence *fence, bool evict, |
1043 | struct ttm_mem_reg *new_mem); | 1043 | struct ttm_mem_reg *new_mem); |
1044 | 1044 | ||
1045 | /** | 1045 | /** |
@@ -1054,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
1054 | * immediately or hang it on a temporary buffer object. | 1054 | * immediately or hang it on a temporary buffer object. |
1055 | */ | 1055 | */ |
1056 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | 1056 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, |
1057 | struct fence *fence, bool evict, | 1057 | struct dma_fence *fence, bool evict, |
1058 | struct ttm_mem_reg *new_mem); | 1058 | struct ttm_mem_reg *new_mem); |
1059 | 1059 | ||
1060 | /** | 1060 | /** |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index b620c317c772..47f35b8e6d09 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
114 | 114 | ||
115 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 115 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
116 | struct list_head *list, | 116 | struct list_head *list, |
117 | struct fence *fence); | 117 | struct dma_fence *fence); |
118 | 118 | ||
119 | #endif | 119 | #endif |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
33 | #include <linux/fence.h> | 33 | #include <linux/dma-fence.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | 35 | ||
36 | struct device; | 36 | struct device; |
@@ -143,7 +143,7 @@ struct dma_buf { | |||
143 | wait_queue_head_t poll; | 143 | wait_queue_head_t poll; |
144 | 144 | ||
145 | struct dma_buf_poll_cb_t { | 145 | struct dma_buf_poll_cb_t { |
146 | struct fence_cb cb; | 146 | struct dma_fence_cb cb; |
147 | wait_queue_head_t *poll; | 147 | wait_queue_head_t *poll; |
148 | 148 | ||
149 | unsigned long active; | 149 | unsigned long active; |
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * fence-array: aggregates fence to be waited together | ||
3 | * | ||
4 | * Copyright (C) 2016 Collabora Ltd | ||
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
6 | * Authors: | ||
7 | * Gustavo Padovan <gustavo@padovan.org> | ||
8 | * Christian König <christian.koenig@amd.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published by | ||
12 | * the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_DMA_FENCE_ARRAY_H | ||
21 | #define __LINUX_DMA_FENCE_ARRAY_H | ||
22 | |||
23 | #include <linux/dma-fence.h> | ||
24 | |||
25 | /** | ||
26 | * struct dma_fence_array_cb - callback helper for fence array | ||
27 | * @cb: fence callback structure for signaling | ||
28 | * @array: reference to the parent fence array object | ||
29 | */ | ||
30 | struct dma_fence_array_cb { | ||
31 | struct dma_fence_cb cb; | ||
32 | struct dma_fence_array *array; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct dma_fence_array - fence to represent an array of fences | ||
37 | * @base: fence base class | ||
38 | * @lock: spinlock for fence handling | ||
39 | * @num_fences: number of fences in the array | ||
40 | * @num_pending: fences in the array still pending | ||
41 | * @fences: array of the fences | ||
42 | */ | ||
43 | struct dma_fence_array { | ||
44 | struct dma_fence base; | ||
45 | |||
46 | spinlock_t lock; | ||
47 | unsigned num_fences; | ||
48 | atomic_t num_pending; | ||
49 | struct dma_fence **fences; | ||
50 | }; | ||
51 | |||
52 | extern const struct dma_fence_ops dma_fence_array_ops; | ||
53 | |||
54 | /** | ||
55 | * dma_fence_is_array - check if a fence is from the array subsclass | ||
56 | * @fence: fence to test | ||
57 | * | ||
58 | * Return true if it is a dma_fence_array and false otherwise. | ||
59 | */ | ||
60 | static inline bool dma_fence_is_array(struct dma_fence *fence) | ||
61 | { | ||
62 | return fence->ops == &dma_fence_array_ops; | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * to_dma_fence_array - cast a fence to a dma_fence_array | ||
67 | * @fence: fence to cast to a dma_fence_array | ||
68 | * | ||
69 | * Returns NULL if the fence is not a dma_fence_array, | ||
70 | * or the dma_fence_array otherwise. | ||
71 | */ | ||
72 | static inline struct dma_fence_array * | ||
73 | to_dma_fence_array(struct dma_fence *fence) | ||
74 | { | ||
75 | if (fence->ops != &dma_fence_array_ops) | ||
76 | return NULL; | ||
77 | |||
78 | return container_of(fence, struct dma_fence_array, base); | ||
79 | } | ||
80 | |||
81 | struct dma_fence_array *dma_fence_array_create(int num_fences, | ||
82 | struct dma_fence **fences, | ||
83 | u64 context, unsigned seqno, | ||
84 | bool signal_on_any); | ||
85 | |||
86 | #endif /* __LINUX_DMA_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/fence.h b/include/linux/dma-fence.h index c9c5ba98c302..ba60c043a5d3 100644 --- a/include/linux/fence.h +++ b/include/linux/dma-fence.h | |||
@@ -18,8 +18,8 @@ | |||
18 | * more details. | 18 | * more details. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef __LINUX_FENCE_H | 21 | #ifndef __LINUX_DMA_FENCE_H |
22 | #define __LINUX_FENCE_H | 22 | #define __LINUX_DMA_FENCE_H |
23 | 23 | ||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
@@ -30,48 +30,48 @@ | |||
30 | #include <linux/printk.h> | 30 | #include <linux/printk.h> |
31 | #include <linux/rcupdate.h> | 31 | #include <linux/rcupdate.h> |
32 | 32 | ||
33 | struct fence; | 33 | struct dma_fence; |
34 | struct fence_ops; | 34 | struct dma_fence_ops; |
35 | struct fence_cb; | 35 | struct dma_fence_cb; |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * struct fence - software synchronization primitive | 38 | * struct dma_fence - software synchronization primitive |
39 | * @refcount: refcount for this fence | 39 | * @refcount: refcount for this fence |
40 | * @ops: fence_ops associated with this fence | 40 | * @ops: dma_fence_ops associated with this fence |
41 | * @rcu: used for releasing fence with kfree_rcu | 41 | * @rcu: used for releasing fence with kfree_rcu |
42 | * @cb_list: list of all callbacks to call | 42 | * @cb_list: list of all callbacks to call |
43 | * @lock: spin_lock_irqsave used for locking | 43 | * @lock: spin_lock_irqsave used for locking |
44 | * @context: execution context this fence belongs to, returned by | 44 | * @context: execution context this fence belongs to, returned by |
45 | * fence_context_alloc() | 45 | * dma_fence_context_alloc() |
46 | * @seqno: the sequence number of this fence inside the execution context, | 46 | * @seqno: the sequence number of this fence inside the execution context, |
47 | * can be compared to decide which fence would be signaled later. | 47 | * can be compared to decide which fence would be signaled later. |
48 | * @flags: A mask of FENCE_FLAG_* defined below | 48 | * @flags: A mask of DMA_FENCE_FLAG_* defined below |
49 | * @timestamp: Timestamp when the fence was signaled. | 49 | * @timestamp: Timestamp when the fence was signaled. |
50 | * @status: Optional, only valid if < 0, must be set before calling | 50 | * @status: Optional, only valid if < 0, must be set before calling |
51 | * fence_signal, indicates that the fence has completed with an error. | 51 | * dma_fence_signal, indicates that the fence has completed with an error. |
52 | * | 52 | * |
53 | * the flags member must be manipulated and read using the appropriate | 53 | * the flags member must be manipulated and read using the appropriate |
54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | 54 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
55 | * of the time. | 55 | * of the time. |
56 | * | 56 | * |
57 | * FENCE_FLAG_SIGNALED_BIT - fence is already signaled | 57 | * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
58 | * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* | 58 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called |
59 | * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the | 59 | * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
60 | * implementer of the fence for its own purposes. Can be used in different | 60 | * implementer of the fence for its own purposes. Can be used in different |
61 | * ways by different fence implementers, so do not rely on this. | 61 | * ways by different fence implementers, so do not rely on this. |
62 | * | 62 | * |
63 | * Since atomic bitops are used, this is not guaranteed to be the case. | 63 | * Since atomic bitops are used, this is not guaranteed to be the case. |
64 | * Particularly, if the bit was set, but fence_signal was called right | 64 | * Particularly, if the bit was set, but dma_fence_signal was called right |
65 | * before this bit was set, it would have been able to set the | 65 | * before this bit was set, it would have been able to set the |
66 | * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. | 66 | * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
67 | * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting | 67 | * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting |
68 | * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that | 68 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
69 | * after fence_signal was called, any enable_signaling call will have either | 69 | * after dma_fence_signal was called, any enable_signaling call will have either |
70 | * been completed, or never called at all. | 70 | * been completed, or never called at all. |
71 | */ | 71 | */ |
72 | struct fence { | 72 | struct dma_fence { |
73 | struct kref refcount; | 73 | struct kref refcount; |
74 | const struct fence_ops *ops; | 74 | const struct dma_fence_ops *ops; |
75 | struct rcu_head rcu; | 75 | struct rcu_head rcu; |
76 | struct list_head cb_list; | 76 | struct list_head cb_list; |
77 | spinlock_t *lock; | 77 | spinlock_t *lock; |
@@ -82,34 +82,35 @@ struct fence { | |||
82 | int status; | 82 | int status; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | enum fence_flag_bits { | 85 | enum dma_fence_flag_bits { |
86 | FENCE_FLAG_SIGNALED_BIT, | 86 | DMA_FENCE_FLAG_SIGNALED_BIT, |
87 | FENCE_FLAG_ENABLE_SIGNAL_BIT, | 87 | DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
88 | FENCE_FLAG_USER_BITS, /* must always be last member */ | 88 | DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ |
89 | }; | 89 | }; |
90 | 90 | ||
91 | typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); | 91 | typedef void (*dma_fence_func_t)(struct dma_fence *fence, |
92 | struct dma_fence_cb *cb); | ||
92 | 93 | ||
93 | /** | 94 | /** |
94 | * struct fence_cb - callback for fence_add_callback | 95 | * struct dma_fence_cb - callback for dma_fence_add_callback |
95 | * @node: used by fence_add_callback to append this struct to fence::cb_list | 96 | * @node: used by dma_fence_add_callback to append this struct to fence::cb_list |
96 | * @func: fence_func_t to call | 97 | * @func: dma_fence_func_t to call |
97 | * | 98 | * |
98 | * This struct will be initialized by fence_add_callback, additional | 99 | * This struct will be initialized by dma_fence_add_callback, additional |
99 | * data can be passed along by embedding fence_cb in another struct. | 100 | * data can be passed along by embedding dma_fence_cb in another struct. |
100 | */ | 101 | */ |
101 | struct fence_cb { | 102 | struct dma_fence_cb { |
102 | struct list_head node; | 103 | struct list_head node; |
103 | fence_func_t func; | 104 | dma_fence_func_t func; |
104 | }; | 105 | }; |
105 | 106 | ||
106 | /** | 107 | /** |
107 | * struct fence_ops - operations implemented for fence | 108 | * struct dma_fence_ops - operations implemented for fence |
108 | * @get_driver_name: returns the driver name. | 109 | * @get_driver_name: returns the driver name. |
109 | * @get_timeline_name: return the name of the context this fence belongs to. | 110 | * @get_timeline_name: return the name of the context this fence belongs to. |
110 | * @enable_signaling: enable software signaling of fence. | 111 | * @enable_signaling: enable software signaling of fence. |
111 | * @signaled: [optional] peek whether the fence is signaled, can be null. | 112 | * @signaled: [optional] peek whether the fence is signaled, can be null. |
112 | * @wait: custom wait implementation, or fence_default_wait. | 113 | * @wait: custom wait implementation, or dma_fence_default_wait. |
113 | * @release: [optional] called on destruction of fence, can be null | 114 | * @release: [optional] called on destruction of fence, can be null |
114 | * @fill_driver_data: [optional] callback to fill in free-form debug info | 115 | * @fill_driver_data: [optional] callback to fill in free-form debug info |
115 | * Returns amount of bytes filled, or -errno. | 116 | * Returns amount of bytes filled, or -errno. |
@@ -135,20 +136,20 @@ struct fence_cb { | |||
135 | * fence->status may be set in enable_signaling, but only when false is | 136 | * fence->status may be set in enable_signaling, but only when false is |
136 | * returned. | 137 | * returned. |
137 | * | 138 | * |
138 | * Calling fence_signal before enable_signaling is called allows | 139 | * Calling dma_fence_signal before enable_signaling is called allows |
139 | * for a tiny race window in which enable_signaling is called during, | 140 | * for a tiny race window in which enable_signaling is called during, |
140 | * before, or after fence_signal. To fight this, it is recommended | 141 | * before, or after dma_fence_signal. To fight this, it is recommended |
141 | * that before enable_signaling returns true an extra reference is | 142 | * that before enable_signaling returns true an extra reference is |
142 | * taken on the fence, to be released when the fence is signaled. | 143 | * taken on the fence, to be released when the fence is signaled. |
143 | * This will mean fence_signal will still be called twice, but | 144 | * This will mean dma_fence_signal will still be called twice, but |
144 | * the second time will be a noop since it was already signaled. | 145 | * the second time will be a noop since it was already signaled. |
145 | * | 146 | * |
146 | * Notes on signaled: | 147 | * Notes on signaled: |
147 | * May set fence->status if returning true. | 148 | * May set fence->status if returning true. |
148 | * | 149 | * |
149 | * Notes on wait: | 150 | * Notes on wait: |
150 | * Must not be NULL, set to fence_default_wait for default implementation. | 151 | * Must not be NULL, set to dma_fence_default_wait for default implementation. |
151 | * the fence_default_wait implementation should work for any fence, as long | 152 | * the dma_fence_default_wait implementation should work for any fence, as long |
152 | * as enable_signaling works correctly. | 153 | * as enable_signaling works correctly. |
153 | * | 154 | * |
154 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was | 155 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was |
@@ -163,42 +164,44 @@ struct fence_cb { | |||
163 | * If pointer is set to NULL, kfree will get called instead. | 164 | * If pointer is set to NULL, kfree will get called instead. |
164 | */ | 165 | */ |
165 | 166 | ||
166 | struct fence_ops { | 167 | struct dma_fence_ops { |
167 | const char * (*get_driver_name)(struct fence *fence); | 168 | const char * (*get_driver_name)(struct dma_fence *fence); |
168 | const char * (*get_timeline_name)(struct fence *fence); | 169 | const char * (*get_timeline_name)(struct dma_fence *fence); |
169 | bool (*enable_signaling)(struct fence *fence); | 170 | bool (*enable_signaling)(struct dma_fence *fence); |
170 | bool (*signaled)(struct fence *fence); | 171 | bool (*signaled)(struct dma_fence *fence); |
171 | signed long (*wait)(struct fence *fence, bool intr, signed long timeout); | 172 | signed long (*wait)(struct dma_fence *fence, |
172 | void (*release)(struct fence *fence); | 173 | bool intr, signed long timeout); |
173 | 174 | void (*release)(struct dma_fence *fence); | |
174 | int (*fill_driver_data)(struct fence *fence, void *data, int size); | 175 | |
175 | void (*fence_value_str)(struct fence *fence, char *str, int size); | 176 | int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); |
176 | void (*timeline_value_str)(struct fence *fence, char *str, int size); | 177 | void (*fence_value_str)(struct dma_fence *fence, char *str, int size); |
178 | void (*timeline_value_str)(struct dma_fence *fence, | ||
179 | char *str, int size); | ||
177 | }; | 180 | }; |
178 | 181 | ||
179 | void fence_init(struct fence *fence, const struct fence_ops *ops, | 182 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
180 | spinlock_t *lock, u64 context, unsigned seqno); | 183 | spinlock_t *lock, u64 context, unsigned seqno); |
181 | 184 | ||
182 | void fence_release(struct kref *kref); | 185 | void dma_fence_release(struct kref *kref); |
183 | void fence_free(struct fence *fence); | 186 | void dma_fence_free(struct dma_fence *fence); |
184 | 187 | ||
185 | /** | 188 | /** |
186 | * fence_put - decreases refcount of the fence | 189 | * dma_fence_put - decreases refcount of the fence |
187 | * @fence: [in] fence to reduce refcount of | 190 | * @fence: [in] fence to reduce refcount of |
188 | */ | 191 | */ |
189 | static inline void fence_put(struct fence *fence) | 192 | static inline void dma_fence_put(struct dma_fence *fence) |
190 | { | 193 | { |
191 | if (fence) | 194 | if (fence) |
192 | kref_put(&fence->refcount, fence_release); | 195 | kref_put(&fence->refcount, dma_fence_release); |
193 | } | 196 | } |
194 | 197 | ||
195 | /** | 198 | /** |
196 | * fence_get - increases refcount of the fence | 199 | * dma_fence_get - increases refcount of the fence |
197 | * @fence: [in] fence to increase refcount of | 200 | * @fence: [in] fence to increase refcount of |
198 | * | 201 | * |
199 | * Returns the same fence, with refcount increased by 1. | 202 | * Returns the same fence, with refcount increased by 1. |
200 | */ | 203 | */ |
201 | static inline struct fence *fence_get(struct fence *fence) | 204 | static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) |
202 | { | 205 | { |
203 | if (fence) | 206 | if (fence) |
204 | kref_get(&fence->refcount); | 207 | kref_get(&fence->refcount); |
@@ -206,12 +209,13 @@ static inline struct fence *fence_get(struct fence *fence) | |||
206 | } | 209 | } |
207 | 210 | ||
208 | /** | 211 | /** |
209 | * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock | 212 | * dma_fence_get_rcu - get a fence from a reservation_object_list with |
213 | * rcu read lock | ||
210 | * @fence: [in] fence to increase refcount of | 214 | * @fence: [in] fence to increase refcount of |
211 | * | 215 | * |
212 | * Function returns NULL if no refcount could be obtained, or the fence. | 216 | * Function returns NULL if no refcount could be obtained, or the fence. |
213 | */ | 217 | */ |
214 | static inline struct fence *fence_get_rcu(struct fence *fence) | 218 | static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) |
215 | { | 219 | { |
216 | if (kref_get_unless_zero(&fence->refcount)) | 220 | if (kref_get_unless_zero(&fence->refcount)) |
217 | return fence; | 221 | return fence; |
@@ -220,7 +224,7 @@ static inline struct fence *fence_get_rcu(struct fence *fence) | |||
220 | } | 224 | } |
221 | 225 | ||
222 | /** | 226 | /** |
223 | * fence_get_rcu_safe - acquire a reference to an RCU tracked fence | 227 | * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence |
224 | * @fence: [in] pointer to fence to increase refcount of | 228 | * @fence: [in] pointer to fence to increase refcount of |
225 | * | 229 | * |
226 | * Function returns NULL if no refcount could be obtained, or the fence. | 230 | * Function returns NULL if no refcount could be obtained, or the fence. |
@@ -235,16 +239,17 @@ static inline struct fence *fence_get_rcu(struct fence *fence) | |||
235 | * | 239 | * |
236 | * The caller is required to hold the RCU read lock. | 240 | * The caller is required to hold the RCU read lock. |
237 | */ | 241 | */ |
238 | static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep) | 242 | static inline struct dma_fence * |
243 | dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) | ||
239 | { | 244 | { |
240 | do { | 245 | do { |
241 | struct fence *fence; | 246 | struct dma_fence *fence; |
242 | 247 | ||
243 | fence = rcu_dereference(*fencep); | 248 | fence = rcu_dereference(*fencep); |
244 | if (!fence || !fence_get_rcu(fence)) | 249 | if (!fence || !dma_fence_get_rcu(fence)) |
245 | return NULL; | 250 | return NULL; |
246 | 251 | ||
247 | /* The atomic_inc_not_zero() inside fence_get_rcu() | 252 | /* The atomic_inc_not_zero() inside dma_fence_get_rcu() |
248 | * provides a full memory barrier upon success (such as now). | 253 | * provides a full memory barrier upon success (such as now). |
249 | * This is paired with the write barrier from assigning | 254 | * This is paired with the write barrier from assigning |
250 | * to the __rcu protected fence pointer so that if that | 255 | * to the __rcu protected fence pointer so that if that |
@@ -261,37 +266,41 @@ static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep) | |||
261 | if (fence == rcu_access_pointer(*fencep)) | 266 | if (fence == rcu_access_pointer(*fencep)) |
262 | return rcu_pointer_handoff(fence); | 267 | return rcu_pointer_handoff(fence); |
263 | 268 | ||
264 | fence_put(fence); | 269 | dma_fence_put(fence); |
265 | } while (1); | 270 | } while (1); |
266 | } | 271 | } |
267 | 272 | ||
268 | int fence_signal(struct fence *fence); | 273 | int dma_fence_signal(struct dma_fence *fence); |
269 | int fence_signal_locked(struct fence *fence); | 274 | int dma_fence_signal_locked(struct dma_fence *fence); |
270 | signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); | 275 | signed long dma_fence_default_wait(struct dma_fence *fence, |
271 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | 276 | bool intr, signed long timeout); |
272 | fence_func_t func); | 277 | int dma_fence_add_callback(struct dma_fence *fence, |
273 | bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); | 278 | struct dma_fence_cb *cb, |
274 | void fence_enable_sw_signaling(struct fence *fence); | 279 | dma_fence_func_t func); |
280 | bool dma_fence_remove_callback(struct dma_fence *fence, | ||
281 | struct dma_fence_cb *cb); | ||
282 | void dma_fence_enable_sw_signaling(struct dma_fence *fence); | ||
275 | 283 | ||
276 | /** | 284 | /** |
277 | * fence_is_signaled_locked - Return an indication if the fence is signaled yet. | 285 | * dma_fence_is_signaled_locked - Return an indication if the fence |
286 | * is signaled yet. | ||
278 | * @fence: [in] the fence to check | 287 | * @fence: [in] the fence to check |
279 | * | 288 | * |
280 | * Returns true if the fence was already signaled, false if not. Since this | 289 | * Returns true if the fence was already signaled, false if not. Since this |
281 | * function doesn't enable signaling, it is not guaranteed to ever return | 290 | * function doesn't enable signaling, it is not guaranteed to ever return |
282 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | 291 | * true if dma_fence_add_callback, dma_fence_wait or |
283 | * haven't been called before. | 292 | * dma_fence_enable_sw_signaling haven't been called before. |
284 | * | 293 | * |
285 | * This function requires fence->lock to be held. | 294 | * This function requires fence->lock to be held. |
286 | */ | 295 | */ |
287 | static inline bool | 296 | static inline bool |
288 | fence_is_signaled_locked(struct fence *fence) | 297 | dma_fence_is_signaled_locked(struct dma_fence *fence) |
289 | { | 298 | { |
290 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 299 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
291 | return true; | 300 | return true; |
292 | 301 | ||
293 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | 302 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
294 | fence_signal_locked(fence); | 303 | dma_fence_signal_locked(fence); |
295 | return true; | 304 | return true; |
296 | } | 305 | } |
297 | 306 | ||
@@ -299,27 +308,27 @@ fence_is_signaled_locked(struct fence *fence) | |||
299 | } | 308 | } |
300 | 309 | ||
301 | /** | 310 | /** |
302 | * fence_is_signaled - Return an indication if the fence is signaled yet. | 311 | * dma_fence_is_signaled - Return an indication if the fence is signaled yet. |
303 | * @fence: [in] the fence to check | 312 | * @fence: [in] the fence to check |
304 | * | 313 | * |
305 | * Returns true if the fence was already signaled, false if not. Since this | 314 | * Returns true if the fence was already signaled, false if not. Since this |
306 | * function doesn't enable signaling, it is not guaranteed to ever return | 315 | * function doesn't enable signaling, it is not guaranteed to ever return |
307 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | 316 | * true if dma_fence_add_callback, dma_fence_wait or |
308 | * haven't been called before. | 317 | * dma_fence_enable_sw_signaling haven't been called before. |
309 | * | 318 | * |
310 | * It's recommended for seqno fences to call fence_signal when the | 319 | * It's recommended for seqno fences to call dma_fence_signal when the |
311 | * operation is complete, it makes it possible to prevent issues from | 320 | * operation is complete, it makes it possible to prevent issues from |
312 | * wraparound between time of issue and time of use by checking the return | 321 | * wraparound between time of issue and time of use by checking the return |
313 | * value of this function before calling hardware-specific wait instructions. | 322 | * value of this function before calling hardware-specific wait instructions. |
314 | */ | 323 | */ |
315 | static inline bool | 324 | static inline bool |
316 | fence_is_signaled(struct fence *fence) | 325 | dma_fence_is_signaled(struct dma_fence *fence) |
317 | { | 326 | { |
318 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 327 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
319 | return true; | 328 | return true; |
320 | 329 | ||
321 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | 330 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
322 | fence_signal(fence); | 331 | dma_fence_signal(fence); |
323 | return true; | 332 | return true; |
324 | } | 333 | } |
325 | 334 | ||
@@ -327,14 +336,15 @@ fence_is_signaled(struct fence *fence) | |||
327 | } | 336 | } |
328 | 337 | ||
329 | /** | 338 | /** |
330 | * fence_is_later - return if f1 is chronologically later than f2 | 339 | * dma_fence_is_later - return if f1 is chronologically later than f2 |
331 | * @f1: [in] the first fence from the same context | 340 | * @f1: [in] the first fence from the same context |
332 | * @f2: [in] the second fence from the same context | 341 | * @f2: [in] the second fence from the same context |
333 | * | 342 | * |
334 | * Returns true if f1 is chronologically later than f2. Both fences must be | 343 | * Returns true if f1 is chronologically later than f2. Both fences must be |
335 | * from the same context, since a seqno is not re-used across contexts. | 344 | * from the same context, since a seqno is not re-used across contexts. |
336 | */ | 345 | */ |
337 | static inline bool fence_is_later(struct fence *f1, struct fence *f2) | 346 | static inline bool dma_fence_is_later(struct dma_fence *f1, |
347 | struct dma_fence *f2) | ||
338 | { | 348 | { |
339 | if (WARN_ON(f1->context != f2->context)) | 349 | if (WARN_ON(f1->context != f2->context)) |
340 | return false; | 350 | return false; |
@@ -343,7 +353,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) | |||
343 | } | 353 | } |
344 | 354 | ||
345 | /** | 355 | /** |
346 | * fence_later - return the chronologically later fence | 356 | * dma_fence_later - return the chronologically later fence |
347 | * @f1: [in] the first fence from the same context | 357 | * @f1: [in] the first fence from the same context |
348 | * @f2: [in] the second fence from the same context | 358 | * @f2: [in] the second fence from the same context |
349 | * | 359 | * |
@@ -351,28 +361,31 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) | |||
351 | * signaled last. Both fences must be from the same context, since a seqno is | 361 | * signaled last. Both fences must be from the same context, since a seqno is |
352 | * not re-used across contexts. | 362 | * not re-used across contexts. |
353 | */ | 363 | */ |
354 | static inline struct fence *fence_later(struct fence *f1, struct fence *f2) | 364 | static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, |
365 | struct dma_fence *f2) | ||
355 | { | 366 | { |
356 | if (WARN_ON(f1->context != f2->context)) | 367 | if (WARN_ON(f1->context != f2->context)) |
357 | return NULL; | 368 | return NULL; |
358 | 369 | ||
359 | /* | 370 | /* |
360 | * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been | 371 | * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never |
361 | * set if enable_signaling wasn't called, and enabling that here is | 372 | * have been set if enable_signaling wasn't called, and enabling that |
362 | * overkill. | 373 | * here is overkill. |
363 | */ | 374 | */ |
364 | if (fence_is_later(f1, f2)) | 375 | if (dma_fence_is_later(f1, f2)) |
365 | return fence_is_signaled(f1) ? NULL : f1; | 376 | return dma_fence_is_signaled(f1) ? NULL : f1; |
366 | else | 377 | else |
367 | return fence_is_signaled(f2) ? NULL : f2; | 378 | return dma_fence_is_signaled(f2) ? NULL : f2; |
368 | } | 379 | } |
369 | 380 | ||
370 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); | 381 | signed long dma_fence_wait_timeout(struct dma_fence *, |
371 | signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | ||
372 | bool intr, signed long timeout); | 382 | bool intr, signed long timeout); |
383 | signed long dma_fence_wait_any_timeout(struct dma_fence **fences, | ||
384 | uint32_t count, | ||
385 | bool intr, signed long timeout); | ||
373 | 386 | ||
374 | /** | 387 | /** |
375 | * fence_wait - sleep until the fence gets signaled | 388 | * dma_fence_wait - sleep until the fence gets signaled |
376 | * @fence: [in] the fence to wait on | 389 | * @fence: [in] the fence to wait on |
377 | * @intr: [in] if true, do an interruptible wait | 390 | * @intr: [in] if true, do an interruptible wait |
378 | * | 391 | * |
@@ -384,41 +397,41 @@ signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
384 | * directly or indirectly holds a reference to the fence, otherwise the | 397 | * directly or indirectly holds a reference to the fence, otherwise the |
385 | * fence might be freed before return, resulting in undefined behavior. | 398 | * fence might be freed before return, resulting in undefined behavior. |
386 | */ | 399 | */ |
387 | static inline signed long fence_wait(struct fence *fence, bool intr) | 400 | static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) |
388 | { | 401 | { |
389 | signed long ret; | 402 | signed long ret; |
390 | 403 | ||
391 | /* Since fence_wait_timeout cannot timeout with | 404 | /* Since dma_fence_wait_timeout cannot timeout with |
392 | * MAX_SCHEDULE_TIMEOUT, only valid return values are | 405 | * MAX_SCHEDULE_TIMEOUT, only valid return values are |
393 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. | 406 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
394 | */ | 407 | */ |
395 | ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | 408 | ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
396 | 409 | ||
397 | return ret < 0 ? ret : 0; | 410 | return ret < 0 ? ret : 0; |
398 | } | 411 | } |
399 | 412 | ||
400 | u64 fence_context_alloc(unsigned num); | 413 | u64 dma_fence_context_alloc(unsigned num); |
401 | 414 | ||
402 | #define FENCE_TRACE(f, fmt, args...) \ | 415 | #define DMA_FENCE_TRACE(f, fmt, args...) \ |
403 | do { \ | 416 | do { \ |
404 | struct fence *__ff = (f); \ | 417 | struct dma_fence *__ff = (f); \ |
405 | if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ | 418 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ |
406 | pr_info("f %llu#%u: " fmt, \ | 419 | pr_info("f %llu#%u: " fmt, \ |
407 | __ff->context, __ff->seqno, ##args); \ | 420 | __ff->context, __ff->seqno, ##args); \ |
408 | } while (0) | 421 | } while (0) |
409 | 422 | ||
410 | #define FENCE_WARN(f, fmt, args...) \ | 423 | #define DMA_FENCE_WARN(f, fmt, args...) \ |
411 | do { \ | 424 | do { \ |
412 | struct fence *__ff = (f); \ | 425 | struct dma_fence *__ff = (f); \ |
413 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 426 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
414 | ##args); \ | 427 | ##args); \ |
415 | } while (0) | 428 | } while (0) |
416 | 429 | ||
417 | #define FENCE_ERR(f, fmt, args...) \ | 430 | #define DMA_FENCE_ERR(f, fmt, args...) \ |
418 | do { \ | 431 | do { \ |
419 | struct fence *__ff = (f); \ | 432 | struct dma_fence *__ff = (f); \ |
420 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 433 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
421 | ##args); \ | 434 | ##args); \ |
422 | } while (0) | 435 | } while (0) |
423 | 436 | ||
424 | #endif /* __LINUX_FENCE_H */ | 437 | #endif /* __LINUX_DMA_FENCE_H */ |
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index a44794e508df..000000000000 --- a/include/linux/fence-array.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * fence-array: aggregates fence to be waited together | ||
3 | * | ||
4 | * Copyright (C) 2016 Collabora Ltd | ||
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
6 | * Authors: | ||
7 | * Gustavo Padovan <gustavo@padovan.org> | ||
8 | * Christian König <christian.koenig@amd.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published by | ||
12 | * the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_FENCE_ARRAY_H | ||
21 | #define __LINUX_FENCE_ARRAY_H | ||
22 | |||
23 | #include <linux/fence.h> | ||
24 | |||
25 | /** | ||
26 | * struct fence_array_cb - callback helper for fence array | ||
27 | * @cb: fence callback structure for signaling | ||
28 | * @array: reference to the parent fence array object | ||
29 | */ | ||
30 | struct fence_array_cb { | ||
31 | struct fence_cb cb; | ||
32 | struct fence_array *array; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct fence_array - fence to represent an array of fences | ||
37 | * @base: fence base class | ||
38 | * @lock: spinlock for fence handling | ||
39 | * @num_fences: number of fences in the array | ||
40 | * @num_pending: fences in the array still pending | ||
41 | * @fences: array of the fences | ||
42 | */ | ||
43 | struct fence_array { | ||
44 | struct fence base; | ||
45 | |||
46 | spinlock_t lock; | ||
47 | unsigned num_fences; | ||
48 | atomic_t num_pending; | ||
49 | struct fence **fences; | ||
50 | }; | ||
51 | |||
52 | extern const struct fence_ops fence_array_ops; | ||
53 | |||
54 | /** | ||
55 | * fence_is_array - check if a fence is from the array subsclass | ||
56 | * | ||
57 | * Return true if it is a fence_array and false otherwise. | ||
58 | */ | ||
59 | static inline bool fence_is_array(struct fence *fence) | ||
60 | { | ||
61 | return fence->ops == &fence_array_ops; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * to_fence_array - cast a fence to a fence_array | ||
66 | * @fence: fence to cast to a fence_array | ||
67 | * | ||
68 | * Returns NULL if the fence is not a fence_array, | ||
69 | * or the fence_array otherwise. | ||
70 | */ | ||
71 | static inline struct fence_array *to_fence_array(struct fence *fence) | ||
72 | { | ||
73 | if (fence->ops != &fence_array_ops) | ||
74 | return NULL; | ||
75 | |||
76 | return container_of(fence, struct fence_array, base); | ||
77 | } | ||
78 | |||
79 | struct fence_array *fence_array_create(int num_fences, struct fence **fences, | ||
80 | u64 context, unsigned seqno, | ||
81 | bool signal_on_any); | ||
82 | |||
83 | #endif /* __LINUX_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..2e313cca08f0 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define _LINUX_RESERVATION_H | 40 | #define _LINUX_RESERVATION_H |
41 | 41 | ||
42 | #include <linux/ww_mutex.h> | 42 | #include <linux/ww_mutex.h> |
43 | #include <linux/fence.h> | 43 | #include <linux/dma-fence.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/seqlock.h> | 45 | #include <linux/seqlock.h> |
46 | #include <linux/rcupdate.h> | 46 | #include <linux/rcupdate.h> |
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; | |||
59 | struct reservation_object_list { | 59 | struct reservation_object_list { |
60 | struct rcu_head rcu; | 60 | struct rcu_head rcu; |
61 | u32 shared_count, shared_max; | 61 | u32 shared_count, shared_max; |
62 | struct fence __rcu *shared[]; | 62 | struct dma_fence __rcu *shared[]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** | 65 | /** |
@@ -74,7 +74,7 @@ struct reservation_object { | |||
74 | struct ww_mutex lock; | 74 | struct ww_mutex lock; |
75 | seqcount_t seq; | 75 | seqcount_t seq; |
76 | 76 | ||
77 | struct fence __rcu *fence_excl; | 77 | struct dma_fence __rcu *fence_excl; |
78 | struct reservation_object_list __rcu *fence; | 78 | struct reservation_object_list __rcu *fence; |
79 | struct reservation_object_list *staged; | 79 | struct reservation_object_list *staged; |
80 | }; | 80 | }; |
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) | |||
107 | { | 107 | { |
108 | int i; | 108 | int i; |
109 | struct reservation_object_list *fobj; | 109 | struct reservation_object_list *fobj; |
110 | struct fence *excl; | 110 | struct dma_fence *excl; |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * This object should be dead and all references must have | 113 | * This object should be dead and all references must have |
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) | |||
115 | */ | 115 | */ |
116 | excl = rcu_dereference_protected(obj->fence_excl, 1); | 116 | excl = rcu_dereference_protected(obj->fence_excl, 1); |
117 | if (excl) | 117 | if (excl) |
118 | fence_put(excl); | 118 | dma_fence_put(excl); |
119 | 119 | ||
120 | fobj = rcu_dereference_protected(obj->fence, 1); | 120 | fobj = rcu_dereference_protected(obj->fence, 1); |
121 | if (fobj) { | 121 | if (fobj) { |
122 | for (i = 0; i < fobj->shared_count; ++i) | 122 | for (i = 0; i < fobj->shared_count; ++i) |
123 | fence_put(rcu_dereference_protected(fobj->shared[i], 1)); | 123 | dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
124 | 124 | ||
125 | kfree(fobj); | 125 | kfree(fobj); |
126 | } | 126 | } |
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) | |||
155 | * RETURNS | 155 | * RETURNS |
156 | * The exclusive fence or NULL | 156 | * The exclusive fence or NULL |
157 | */ | 157 | */ |
158 | static inline struct fence * | 158 | static inline struct dma_fence * |
159 | reservation_object_get_excl(struct reservation_object *obj) | 159 | reservation_object_get_excl(struct reservation_object *obj) |
160 | { | 160 | { |
161 | return rcu_dereference_protected(obj->fence_excl, | 161 | return rcu_dereference_protected(obj->fence_excl, |
@@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj) | |||
173 | * RETURNS | 173 | * RETURNS |
174 | * The exclusive fence or NULL if none | 174 | * The exclusive fence or NULL if none |
175 | */ | 175 | */ |
176 | static inline struct fence * | 176 | static inline struct dma_fence * |
177 | reservation_object_get_excl_rcu(struct reservation_object *obj) | 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) |
178 | { | 178 | { |
179 | struct fence *fence; | 179 | struct dma_fence *fence; |
180 | unsigned seq; | 180 | unsigned seq; |
181 | retry: | 181 | retry: |
182 | seq = read_seqcount_begin(&obj->seq); | 182 | seq = read_seqcount_begin(&obj->seq); |
@@ -186,22 +186,22 @@ retry: | |||
186 | rcu_read_unlock(); | 186 | rcu_read_unlock(); |
187 | goto retry; | 187 | goto retry; |
188 | } | 188 | } |
189 | fence = fence_get(fence); | 189 | fence = dma_fence_get(fence); |
190 | rcu_read_unlock(); | 190 | rcu_read_unlock(); |
191 | return fence; | 191 | return fence; |
192 | } | 192 | } |
193 | 193 | ||
194 | int reservation_object_reserve_shared(struct reservation_object *obj); | 194 | int reservation_object_reserve_shared(struct reservation_object *obj); |
195 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 195 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
196 | struct fence *fence); | 196 | struct dma_fence *fence); |
197 | 197 | ||
198 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 198 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
199 | struct fence *fence); | 199 | struct dma_fence *fence); |
200 | 200 | ||
201 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 201 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
202 | struct fence **pfence_excl, | 202 | struct dma_fence **pfence_excl, |
203 | unsigned *pshared_count, | 203 | unsigned *pshared_count, |
204 | struct fence ***pshared); | 204 | struct dma_fence ***pshared); |
205 | 205 | ||
206 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 206 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
207 | bool wait_all, bool intr, | 207 | bool wait_all, bool intr, |
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #ifndef __LINUX_SEQNO_FENCE_H | 20 | #ifndef __LINUX_SEQNO_FENCE_H |
21 | #define __LINUX_SEQNO_FENCE_H | 21 | #define __LINUX_SEQNO_FENCE_H |
22 | 22 | ||
23 | #include <linux/fence.h> | 23 | #include <linux/dma-fence.h> |
24 | #include <linux/dma-buf.h> | 24 | #include <linux/dma-buf.h> |
25 | 25 | ||
26 | enum seqno_fence_condition { | 26 | enum seqno_fence_condition { |
@@ -29,15 +29,15 @@ enum seqno_fence_condition { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | struct seqno_fence { | 31 | struct seqno_fence { |
32 | struct fence base; | 32 | struct dma_fence base; |
33 | 33 | ||
34 | const struct fence_ops *ops; | 34 | const struct dma_fence_ops *ops; |
35 | struct dma_buf *sync_buf; | 35 | struct dma_buf *sync_buf; |
36 | uint32_t seqno_ofs; | 36 | uint32_t seqno_ofs; |
37 | enum seqno_fence_condition condition; | 37 | enum seqno_fence_condition condition; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | extern const struct fence_ops seqno_fence_ops; | 40 | extern const struct dma_fence_ops seqno_fence_ops; |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * to_seqno_fence - cast a fence to a seqno_fence | 43 | * to_seqno_fence - cast a fence to a seqno_fence |
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; | |||
47 | * or the seqno_fence otherwise. | 47 | * or the seqno_fence otherwise. |
48 | */ | 48 | */ |
49 | static inline struct seqno_fence * | 49 | static inline struct seqno_fence * |
50 | to_seqno_fence(struct fence *fence) | 50 | to_seqno_fence(struct dma_fence *fence) |
51 | { | 51 | { |
52 | if (fence->ops != &seqno_fence_ops) | 52 | if (fence->ops != &seqno_fence_ops) |
53 | return NULL; | 53 | return NULL; |
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) | |||
83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the | 83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the |
84 | * device's vm can be expensive. | 84 | * device's vm can be expensive. |
85 | * | 85 | * |
86 | * It is recommended for creators of seqno_fence to call fence_signal | 86 | * It is recommended for creators of seqno_fence to call dma_fence_signal() |
87 | * before destruction. This will prevent possible issues from wraparound at | 87 | * before destruction. This will prevent possible issues from wraparound at |
88 | * time of issue vs time of check, since users can check fence_is_signaled | 88 | * time of issue vs time of check, since users can check dma_fence_is_signaled() |
89 | * before submitting instructions for the hardware to wait on the fence. | 89 | * before submitting instructions for the hardware to wait on the fence. |
90 | * However, when ops.enable_signaling is not called, it doesn't have to be | 90 | * However, when ops.enable_signaling is not called, it doesn't have to be |
91 | * done as soon as possible, just before there's any real danger of seqno | 91 | * done as soon as possible, just before there's any real danger of seqno |
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, | |||
96 | struct dma_buf *sync_buf, uint32_t context, | 96 | struct dma_buf *sync_buf, uint32_t context, |
97 | uint32_t seqno_ofs, uint32_t seqno, | 97 | uint32_t seqno_ofs, uint32_t seqno, |
98 | enum seqno_fence_condition cond, | 98 | enum seqno_fence_condition cond, |
99 | const struct fence_ops *ops) | 99 | const struct dma_fence_ops *ops) |
100 | { | 100 | { |
101 | BUG_ON(!fence || !sync_buf || !ops); | 101 | BUG_ON(!fence || !sync_buf || !ops); |
102 | BUG_ON(!ops->wait || !ops->enable_signaling || | 102 | BUG_ON(!ops->wait || !ops->enable_signaling || |
103 | !ops->get_driver_name || !ops->get_timeline_name); | 103 | !ops->get_driver_name || !ops->get_timeline_name); |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * ops is used in fence_init for get_driver_name, so needs to be | 106 | * ops is used in dma_fence_init for get_driver_name, so needs to be |
107 | * initialized first | 107 | * initialized first |
108 | */ | 108 | */ |
109 | fence->ops = ops; | 109 | fence->ops = ops; |
110 | fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); | 110 | dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); |
111 | get_dma_buf(sync_buf); | 111 | get_dma_buf(sync_buf); |
112 | fence->sync_buf = sync_buf; | 112 | fence->sync_buf = sync_buf; |
113 | fence->seqno_ofs = seqno_ofs; | 113 | fence->seqno_ofs = seqno_ofs; |
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/ktime.h> | 18 | #include <linux/ktime.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/fence.h> | 21 | #include <linux/dma-fence.h> |
22 | #include <linux/fence-array.h> | 22 | #include <linux/dma-fence-array.h> |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * struct sync_file - sync file to export to the userspace | 25 | * struct sync_file - sync file to export to the userspace |
@@ -41,13 +41,13 @@ struct sync_file { | |||
41 | 41 | ||
42 | wait_queue_head_t wq; | 42 | wait_queue_head_t wq; |
43 | 43 | ||
44 | struct fence *fence; | 44 | struct dma_fence *fence; |
45 | struct fence_cb cb; | 45 | struct dma_fence_cb cb; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define POLL_ENABLED FENCE_FLAG_USER_BITS | 48 | #define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS |
49 | 49 | ||
50 | struct sync_file *sync_file_create(struct fence *fence); | 50 | struct sync_file *sync_file_create(struct dma_fence *fence); |
51 | struct fence *sync_file_get_fence(int fd); | 51 | struct dma_fence *sync_file_get_fence(int fd); |
52 | 52 | ||
53 | #endif /* _LINUX_SYNC_H */ | 53 | #endif /* _LINUX_SYNC_H */ |
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h index d6dfa05ba322..1157cb4c3c6f 100644 --- a/include/trace/events/fence.h +++ b/include/trace/events/dma_fence.h | |||
@@ -1,17 +1,17 @@ | |||
1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM fence | 2 | #define TRACE_SYSTEM dma_fence |
3 | 3 | ||
4 | #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) | 4 | #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) |
5 | #define _TRACE_FENCE_H | 5 | #define _TRACE_DMA_FENCE_H |
6 | 6 | ||
7 | #include <linux/tracepoint.h> | 7 | #include <linux/tracepoint.h> |
8 | 8 | ||
9 | struct fence; | 9 | struct dma_fence; |
10 | 10 | ||
11 | TRACE_EVENT(fence_annotate_wait_on, | 11 | TRACE_EVENT(dma_fence_annotate_wait_on, |
12 | 12 | ||
13 | /* fence: the fence waiting on f1, f1: the fence to be waited on. */ | 13 | /* fence: the fence waiting on f1, f1: the fence to be waited on. */ |
14 | TP_PROTO(struct fence *fence, struct fence *f1), | 14 | TP_PROTO(struct dma_fence *fence, struct dma_fence *f1), |
15 | 15 | ||
16 | TP_ARGS(fence, f1), | 16 | TP_ARGS(fence, f1), |
17 | 17 | ||
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on, | |||
48 | __entry->waiting_context, __entry->waiting_seqno) | 48 | __entry->waiting_context, __entry->waiting_seqno) |
49 | ); | 49 | ); |
50 | 50 | ||
51 | DECLARE_EVENT_CLASS(fence, | 51 | DECLARE_EVENT_CLASS(dma_fence, |
52 | 52 | ||
53 | TP_PROTO(struct fence *fence), | 53 | TP_PROTO(struct dma_fence *fence), |
54 | 54 | ||
55 | TP_ARGS(fence), | 55 | TP_ARGS(fence), |
56 | 56 | ||
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence, | |||
73 | __entry->seqno) | 73 | __entry->seqno) |
74 | ); | 74 | ); |
75 | 75 | ||
76 | DEFINE_EVENT(fence, fence_emit, | 76 | DEFINE_EVENT(dma_fence, dma_fence_emit, |
77 | 77 | ||
78 | TP_PROTO(struct fence *fence), | 78 | TP_PROTO(struct dma_fence *fence), |
79 | 79 | ||
80 | TP_ARGS(fence) | 80 | TP_ARGS(fence) |
81 | ); | 81 | ); |
82 | 82 | ||
83 | DEFINE_EVENT(fence, fence_init, | 83 | DEFINE_EVENT(dma_fence, dma_fence_init, |
84 | 84 | ||
85 | TP_PROTO(struct fence *fence), | 85 | TP_PROTO(struct dma_fence *fence), |
86 | 86 | ||
87 | TP_ARGS(fence) | 87 | TP_ARGS(fence) |
88 | ); | 88 | ); |
89 | 89 | ||
90 | DEFINE_EVENT(fence, fence_destroy, | 90 | DEFINE_EVENT(dma_fence, dma_fence_destroy, |
91 | 91 | ||
92 | TP_PROTO(struct fence *fence), | 92 | TP_PROTO(struct dma_fence *fence), |
93 | 93 | ||
94 | TP_ARGS(fence) | 94 | TP_ARGS(fence) |
95 | ); | 95 | ); |
96 | 96 | ||
97 | DEFINE_EVENT(fence, fence_enable_signal, | 97 | DEFINE_EVENT(dma_fence, dma_fence_enable_signal, |
98 | 98 | ||
99 | TP_PROTO(struct fence *fence), | 99 | TP_PROTO(struct dma_fence *fence), |
100 | 100 | ||
101 | TP_ARGS(fence) | 101 | TP_ARGS(fence) |
102 | ); | 102 | ); |
103 | 103 | ||
104 | DEFINE_EVENT(fence, fence_signaled, | 104 | DEFINE_EVENT(dma_fence, dma_fence_signaled, |
105 | 105 | ||
106 | TP_PROTO(struct fence *fence), | 106 | TP_PROTO(struct dma_fence *fence), |
107 | 107 | ||
108 | TP_ARGS(fence) | 108 | TP_ARGS(fence) |
109 | ); | 109 | ); |
110 | 110 | ||
111 | DEFINE_EVENT(fence, fence_wait_start, | 111 | DEFINE_EVENT(dma_fence, dma_fence_wait_start, |
112 | 112 | ||
113 | TP_PROTO(struct fence *fence), | 113 | TP_PROTO(struct dma_fence *fence), |
114 | 114 | ||
115 | TP_ARGS(fence) | 115 | TP_ARGS(fence) |
116 | ); | 116 | ); |
117 | 117 | ||
118 | DEFINE_EVENT(fence, fence_wait_end, | 118 | DEFINE_EVENT(dma_fence, dma_fence_wait_end, |
119 | 119 | ||
120 | TP_PROTO(struct fence *fence), | 120 | TP_PROTO(struct dma_fence *fence), |
121 | 121 | ||
122 | TP_ARGS(fence) | 122 | TP_ARGS(fence) |
123 | ); | 123 | ); |
124 | 124 | ||
125 | #endif /* _TRACE_FENCE_H */ | 125 | #endif /* _TRACE_DMA_FENCE_H */ |
126 | 126 | ||
127 | /* This part must be outside protection */ | 127 | /* This part must be outside protection */ |
128 | #include <trace/define_trace.h> | 128 | #include <trace/define_trace.h> |