diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-10-28 03:14:08 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-10-28 03:14:08 -0400 |
commit | 96583ddbec291929880edefa2141c06c63e16aa5 (patch) | |
tree | 6ae6bd3aec381d7461a687e3314bdf949798c554 | |
parent | 1353ec3833360ffab479d17781493ead1d38a006 (diff) | |
parent | fb422950c6cd726fd36eb72a7cf84583440a18a2 (diff) |
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Backmerge latest drm-next to pull in the s/fence/dma_fence/ rework,
needed before we merge more i915 fencing patches.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
310 files changed, 10032 insertions, 6107 deletions
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt new file mode 100644 index 000000000000..9409d9c6a260 --- /dev/null +++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | Silicon Image SiI8620 HDMI/MHL bridge bindings | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: "sil,sii8620" | ||
5 | - reg: i2c address of the bridge | ||
6 | - cvcc10-supply: Digital Core Supply Voltage (1.0V) | ||
7 | - iovcc18-supply: I/O Supply Voltage (1.8V) | ||
8 | - interrupts, interrupt-parent: interrupt specifier of INT pin | ||
9 | - reset-gpios: gpio specifier of RESET pin | ||
10 | - clocks, clock-names: specification and name of "xtal" clock | ||
11 | - video interfaces: Device node can contain video interface port | ||
12 | node for HDMI encoder according to [1]. | ||
13 | |||
14 | [1]: Documentation/devicetree/bindings/media/video-interfaces.txt | ||
15 | |||
16 | Example: | ||
17 | sii8620@39 { | ||
18 | reg = <0x39>; | ||
19 | compatible = "sil,sii8620"; | ||
20 | cvcc10-supply = <&ldo36_reg>; | ||
21 | iovcc18-supply = <&ldo34_reg>; | ||
22 | interrupt-parent = <&gpf0>; | ||
23 | interrupts = <2 0>; | ||
24 | reset-gpio = <&gpv7 0 0>; | ||
25 | clocks = <&pmu_system_controller 0>; | ||
26 | clock-names = "xtal"; | ||
27 | |||
28 | port { | ||
29 | mhl_to_hdmi: endpoint { | ||
30 | remote-endpoint = <&hdmi_to_mhl>; | ||
31 | }; | ||
32 | }; | ||
33 | }; | ||
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt index b63a68531afd..269681a6faec 100644 --- a/Documentation/sync_file.txt +++ b/Documentation/sync_file.txt | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | This document serves as a guide for device drivers writers on what the | 7 | This document serves as a guide for device drivers writers on what the |
8 | sync_file API is, and how drivers can support it. Sync file is the carrier of | 8 | sync_file API is, and how drivers can support it. Sync file is the carrier of |
9 | the fences(struct fence) that are needed to synchronize between drivers or | 9 | the fences(struct dma_fence) that are needed to synchronize between drivers or |
10 | across process boundaries. | 10 | across process boundaries. |
11 | 11 | ||
12 | The sync_file API is meant to be used to send and receive fence information | 12 | The sync_file API is meant to be used to send and receive fence information |
@@ -32,9 +32,9 @@ in-fences and out-fences | |||
32 | Sync files can go either to or from userspace. When a sync_file is sent from | 32 | Sync files can go either to or from userspace. When a sync_file is sent from |
33 | the driver to userspace we call the fences it contains 'out-fences'. They are | 33 | the driver to userspace we call the fences it contains 'out-fences'. They are |
34 | related to a buffer that the driver is processing or is going to process, so | 34 | related to a buffer that the driver is processing or is going to process, so |
35 | the driver creates an out-fence to be able to notify, through fence_signal(), | 35 | the driver creates an out-fence to be able to notify, through |
36 | when it has finished using (or processing) that buffer. Out-fences are fences | 36 | dma_fence_signal(), when it has finished using (or processing) that buffer. |
37 | that the driver creates. | 37 | Out-fences are fences that the driver creates. |
38 | 38 | ||
39 | On the other hand if the driver receives fence(s) through a sync_file from | 39 | On the other hand if the driver receives fence(s) through a sync_file from |
40 | userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that | 40 | userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that |
@@ -47,7 +47,7 @@ Creating Sync Files | |||
47 | When a driver needs to send an out-fence userspace it creates a sync_file. | 47 | When a driver needs to send an out-fence userspace it creates a sync_file. |
48 | 48 | ||
49 | Interface: | 49 | Interface: |
50 | struct sync_file *sync_file_create(struct fence *fence); | 50 | struct sync_file *sync_file_create(struct dma_fence *fence); |
51 | 51 | ||
52 | The caller pass the out-fence and gets back the sync_file. That is just the | 52 | The caller pass the out-fence and gets back the sync_file. That is just the |
53 | first step, next it needs to install an fd on sync_file->file. So it gets an | 53 | first step, next it needs to install an fd on sync_file->file. So it gets an |
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences | |||
72 | from it. | 72 | from it. |
73 | 73 | ||
74 | Interface: | 74 | Interface: |
75 | struct fence *sync_file_get_fence(int fd); | 75 | struct dma_fence *sync_file_get_fence(int fd); |
76 | 76 | ||
77 | 77 | ||
78 | The returned reference is owned by the caller and must be disposed of | 78 | The returned reference is owned by the caller and must be disposed of |
79 | afterwards using fence_put(). In case of error, a NULL is returned instead. | 79 | afterwards using dma_fence_put(). In case of error, a NULL is returned instead. |
80 | 80 | ||
81 | References: | 81 | References: |
82 | [1] struct sync_file in include/linux/sync_file.h | 82 | [1] struct sync_file in include/linux/sync_file.h |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index fdf44cac08e6..37bf25c6b4a6 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -248,11 +248,11 @@ config DMA_SHARED_BUFFER | |||
248 | APIs extension; the file's descriptor can then be passed on to other | 248 | APIs extension; the file's descriptor can then be passed on to other |
249 | driver. | 249 | driver. |
250 | 250 | ||
251 | config FENCE_TRACE | 251 | config DMA_FENCE_TRACE |
252 | bool "Enable verbose FENCE_TRACE messages" | 252 | bool "Enable verbose DMA_FENCE_TRACE messages" |
253 | depends on DMA_SHARED_BUFFER | 253 | depends on DMA_SHARED_BUFFER |
254 | help | 254 | help |
255 | Enable the FENCE_TRACE printks. This will add extra | 255 | Enable the DMA_FENCE_TRACE printks. This will add extra |
256 | spam to the console log, but will make it easier to diagnose | 256 | spam to the console log, but will make it easier to diagnose |
257 | lockup related problems for dma-buffers shared across multiple | 257 | lockup related problems for dma-buffers shared across multiple |
258 | devices. | 258 | devices. |
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 2585821b24ab..ed3b785bae37 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig | |||
@@ -7,7 +7,7 @@ config SYNC_FILE | |||
7 | select DMA_SHARED_BUFFER | 7 | select DMA_SHARED_BUFFER |
8 | ---help--- | 8 | ---help--- |
9 | The Sync File Framework adds explicit syncronization via | 9 | The Sync File Framework adds explicit syncronization via |
10 | userspace. It enables send/receive 'struct fence' objects to/from | 10 | userspace. It enables send/receive 'struct dma_fence' objects to/from |
11 | userspace via Sync File fds for synchronization between drivers via | 11 | userspace via Sync File fds for synchronization between drivers via |
12 | userspace components. It has been ported from Android. | 12 | userspace components. It has been ported from Android. |
13 | 13 | ||
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 210a10bfad2b..c33bf8863147 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o | 1 | obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o |
2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o | 2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o |
3 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o | 3 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cf04d249a6a4..e72e64484131 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/dma-buf.h> | 27 | #include <linux/dma-buf.h> |
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | #include <linux/anon_inodes.h> | 29 | #include <linux/anon_inodes.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) | |||
124 | return base + offset; | 124 | return base + offset; |
125 | } | 125 | } |
126 | 126 | ||
127 | static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) | 127 | static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
128 | { | 128 | { |
129 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; | 129 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; |
130 | unsigned long flags; | 130 | unsigned long flags; |
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
140 | struct dma_buf *dmabuf; | 140 | struct dma_buf *dmabuf; |
141 | struct reservation_object *resv; | 141 | struct reservation_object *resv; |
142 | struct reservation_object_list *fobj; | 142 | struct reservation_object_list *fobj; |
143 | struct fence *fence_excl; | 143 | struct dma_fence *fence_excl; |
144 | unsigned long events; | 144 | unsigned long events; |
145 | unsigned shared_count, seq; | 145 | unsigned shared_count, seq; |
146 | 146 | ||
@@ -187,20 +187,20 @@ retry: | |||
187 | spin_unlock_irq(&dmabuf->poll.lock); | 187 | spin_unlock_irq(&dmabuf->poll.lock); |
188 | 188 | ||
189 | if (events & pevents) { | 189 | if (events & pevents) { |
190 | if (!fence_get_rcu(fence_excl)) { | 190 | if (!dma_fence_get_rcu(fence_excl)) { |
191 | /* force a recheck */ | 191 | /* force a recheck */ |
192 | events &= ~pevents; | 192 | events &= ~pevents; |
193 | dma_buf_poll_cb(NULL, &dcb->cb); | 193 | dma_buf_poll_cb(NULL, &dcb->cb); |
194 | } else if (!fence_add_callback(fence_excl, &dcb->cb, | 194 | } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, |
195 | dma_buf_poll_cb)) { | 195 | dma_buf_poll_cb)) { |
196 | events &= ~pevents; | 196 | events &= ~pevents; |
197 | fence_put(fence_excl); | 197 | dma_fence_put(fence_excl); |
198 | } else { | 198 | } else { |
199 | /* | 199 | /* |
200 | * No callback queued, wake up any additional | 200 | * No callback queued, wake up any additional |
201 | * waiters. | 201 | * waiters. |
202 | */ | 202 | */ |
203 | fence_put(fence_excl); | 203 | dma_fence_put(fence_excl); |
204 | dma_buf_poll_cb(NULL, &dcb->cb); | 204 | dma_buf_poll_cb(NULL, &dcb->cb); |
205 | } | 205 | } |
206 | } | 206 | } |
@@ -222,9 +222,9 @@ retry: | |||
222 | goto out; | 222 | goto out; |
223 | 223 | ||
224 | for (i = 0; i < shared_count; ++i) { | 224 | for (i = 0; i < shared_count; ++i) { |
225 | struct fence *fence = rcu_dereference(fobj->shared[i]); | 225 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); |
226 | 226 | ||
227 | if (!fence_get_rcu(fence)) { | 227 | if (!dma_fence_get_rcu(fence)) { |
228 | /* | 228 | /* |
229 | * fence refcount dropped to zero, this means | 229 | * fence refcount dropped to zero, this means |
230 | * that fobj has been freed | 230 | * that fobj has been freed |
@@ -235,13 +235,13 @@ retry: | |||
235 | dma_buf_poll_cb(NULL, &dcb->cb); | 235 | dma_buf_poll_cb(NULL, &dcb->cb); |
236 | break; | 236 | break; |
237 | } | 237 | } |
238 | if (!fence_add_callback(fence, &dcb->cb, | 238 | if (!dma_fence_add_callback(fence, &dcb->cb, |
239 | dma_buf_poll_cb)) { | 239 | dma_buf_poll_cb)) { |
240 | fence_put(fence); | 240 | dma_fence_put(fence); |
241 | events &= ~POLLOUT; | 241 | events &= ~POLLOUT; |
242 | break; | 242 | break; |
243 | } | 243 | } |
244 | fence_put(fence); | 244 | dma_fence_put(fence); |
245 | } | 245 | } |
246 | 246 | ||
247 | /* No callback queued, wake up any additional waiters. */ | 247 | /* No callback queued, wake up any additional waiters. */ |
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c index f1989fcaf354..67eb7c8fb88c 100644 --- a/drivers/dma-buf/fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * fence-array: aggregate fences to be waited together | 2 | * dma-fence-array: aggregate fences to be waited together |
3 | * | 3 | * |
4 | * Copyright (C) 2016 Collabora Ltd | 4 | * Copyright (C) 2016 Collabora Ltd |
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
@@ -19,35 +19,34 @@ | |||
19 | 19 | ||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/fence-array.h> | 22 | #include <linux/dma-fence-array.h> |
23 | 23 | ||
24 | static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); | 24 | static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) |
25 | |||
26 | static const char *fence_array_get_driver_name(struct fence *fence) | ||
27 | { | 25 | { |
28 | return "fence_array"; | 26 | return "dma_fence_array"; |
29 | } | 27 | } |
30 | 28 | ||
31 | static const char *fence_array_get_timeline_name(struct fence *fence) | 29 | static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) |
32 | { | 30 | { |
33 | return "unbound"; | 31 | return "unbound"; |
34 | } | 32 | } |
35 | 33 | ||
36 | static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) | 34 | static void dma_fence_array_cb_func(struct dma_fence *f, |
35 | struct dma_fence_cb *cb) | ||
37 | { | 36 | { |
38 | struct fence_array_cb *array_cb = | 37 | struct dma_fence_array_cb *array_cb = |
39 | container_of(cb, struct fence_array_cb, cb); | 38 | container_of(cb, struct dma_fence_array_cb, cb); |
40 | struct fence_array *array = array_cb->array; | 39 | struct dma_fence_array *array = array_cb->array; |
41 | 40 | ||
42 | if (atomic_dec_and_test(&array->num_pending)) | 41 | if (atomic_dec_and_test(&array->num_pending)) |
43 | fence_signal(&array->base); | 42 | dma_fence_signal(&array->base); |
44 | fence_put(&array->base); | 43 | dma_fence_put(&array->base); |
45 | } | 44 | } |
46 | 45 | ||
47 | static bool fence_array_enable_signaling(struct fence *fence) | 46 | static bool dma_fence_array_enable_signaling(struct dma_fence *fence) |
48 | { | 47 | { |
49 | struct fence_array *array = to_fence_array(fence); | 48 | struct dma_fence_array *array = to_dma_fence_array(fence); |
50 | struct fence_array_cb *cb = (void *)(&array[1]); | 49 | struct dma_fence_array_cb *cb = (void *)(&array[1]); |
51 | unsigned i; | 50 | unsigned i; |
52 | 51 | ||
53 | for (i = 0; i < array->num_fences; ++i) { | 52 | for (i = 0; i < array->num_fences; ++i) { |
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence) | |||
60 | * until we signal the array as complete (but that is now | 59 | * until we signal the array as complete (but that is now |
61 | * insufficient). | 60 | * insufficient). |
62 | */ | 61 | */ |
63 | fence_get(&array->base); | 62 | dma_fence_get(&array->base); |
64 | if (fence_add_callback(array->fences[i], &cb[i].cb, | 63 | if (dma_fence_add_callback(array->fences[i], &cb[i].cb, |
65 | fence_array_cb_func)) { | 64 | dma_fence_array_cb_func)) { |
66 | fence_put(&array->base); | 65 | dma_fence_put(&array->base); |
67 | if (atomic_dec_and_test(&array->num_pending)) | 66 | if (atomic_dec_and_test(&array->num_pending)) |
68 | return false; | 67 | return false; |
69 | } | 68 | } |
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence) | |||
72 | return true; | 71 | return true; |
73 | } | 72 | } |
74 | 73 | ||
75 | static bool fence_array_signaled(struct fence *fence) | 74 | static bool dma_fence_array_signaled(struct dma_fence *fence) |
76 | { | 75 | { |
77 | struct fence_array *array = to_fence_array(fence); | 76 | struct dma_fence_array *array = to_dma_fence_array(fence); |
78 | 77 | ||
79 | return atomic_read(&array->num_pending) <= 0; | 78 | return atomic_read(&array->num_pending) <= 0; |
80 | } | 79 | } |
81 | 80 | ||
82 | static void fence_array_release(struct fence *fence) | 81 | static void dma_fence_array_release(struct dma_fence *fence) |
83 | { | 82 | { |
84 | struct fence_array *array = to_fence_array(fence); | 83 | struct dma_fence_array *array = to_dma_fence_array(fence); |
85 | unsigned i; | 84 | unsigned i; |
86 | 85 | ||
87 | for (i = 0; i < array->num_fences; ++i) | 86 | for (i = 0; i < array->num_fences; ++i) |
88 | fence_put(array->fences[i]); | 87 | dma_fence_put(array->fences[i]); |
89 | 88 | ||
90 | kfree(array->fences); | 89 | kfree(array->fences); |
91 | fence_free(fence); | 90 | dma_fence_free(fence); |
92 | } | 91 | } |
93 | 92 | ||
94 | const struct fence_ops fence_array_ops = { | 93 | const struct dma_fence_ops dma_fence_array_ops = { |
95 | .get_driver_name = fence_array_get_driver_name, | 94 | .get_driver_name = dma_fence_array_get_driver_name, |
96 | .get_timeline_name = fence_array_get_timeline_name, | 95 | .get_timeline_name = dma_fence_array_get_timeline_name, |
97 | .enable_signaling = fence_array_enable_signaling, | 96 | .enable_signaling = dma_fence_array_enable_signaling, |
98 | .signaled = fence_array_signaled, | 97 | .signaled = dma_fence_array_signaled, |
99 | .wait = fence_default_wait, | 98 | .wait = dma_fence_default_wait, |
100 | .release = fence_array_release, | 99 | .release = dma_fence_array_release, |
101 | }; | 100 | }; |
102 | EXPORT_SYMBOL(fence_array_ops); | 101 | EXPORT_SYMBOL(dma_fence_array_ops); |
103 | 102 | ||
104 | /** | 103 | /** |
105 | * fence_array_create - Create a custom fence array | 104 | * dma_fence_array_create - Create a custom fence array |
106 | * @num_fences: [in] number of fences to add in the array | 105 | * @num_fences: [in] number of fences to add in the array |
107 | * @fences: [in] array containing the fences | 106 | * @fences: [in] array containing the fences |
108 | * @context: [in] fence context to use | 107 | * @context: [in] fence context to use |
109 | * @seqno: [in] sequence number to use | 108 | * @seqno: [in] sequence number to use |
110 | * @signal_on_any: [in] signal on any fence in the array | 109 | * @signal_on_any: [in] signal on any fence in the array |
111 | * | 110 | * |
112 | * Allocate a fence_array object and initialize the base fence with fence_init(). | 111 | * Allocate a dma_fence_array object and initialize the base fence with |
112 | * dma_fence_init(). | ||
113 | * In case of error it returns NULL. | 113 | * In case of error it returns NULL. |
114 | * | 114 | * |
115 | * The caller should allocate the fences array with num_fences size | 115 | * The caller should allocate the fences array with num_fences size |
116 | * and fill it with the fences it wants to add to the object. Ownership of this | 116 | * and fill it with the fences it wants to add to the object. Ownership of this |
117 | * array is taken and fence_put() is used on each fence on release. | 117 | * array is taken and dma_fence_put() is used on each fence on release. |
118 | * | 118 | * |
119 | * If @signal_on_any is true the fence array signals if any fence in the array | 119 | * If @signal_on_any is true the fence array signals if any fence in the array |
120 | * signals, otherwise it signals when all fences in the array signal. | 120 | * signals, otherwise it signals when all fences in the array signal. |
121 | */ | 121 | */ |
122 | struct fence_array *fence_array_create(int num_fences, struct fence **fences, | 122 | struct dma_fence_array *dma_fence_array_create(int num_fences, |
123 | u64 context, unsigned seqno, | 123 | struct dma_fence **fences, |
124 | bool signal_on_any) | 124 | u64 context, unsigned seqno, |
125 | bool signal_on_any) | ||
125 | { | 126 | { |
126 | struct fence_array *array; | 127 | struct dma_fence_array *array; |
127 | size_t size = sizeof(*array); | 128 | size_t size = sizeof(*array); |
128 | 129 | ||
129 | /* Allocate the callback structures behind the array. */ | 130 | /* Allocate the callback structures behind the array. */ |
130 | size += num_fences * sizeof(struct fence_array_cb); | 131 | size += num_fences * sizeof(struct dma_fence_array_cb); |
131 | array = kzalloc(size, GFP_KERNEL); | 132 | array = kzalloc(size, GFP_KERNEL); |
132 | if (!array) | 133 | if (!array) |
133 | return NULL; | 134 | return NULL; |
134 | 135 | ||
135 | spin_lock_init(&array->lock); | 136 | spin_lock_init(&array->lock); |
136 | fence_init(&array->base, &fence_array_ops, &array->lock, | 137 | dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, |
137 | context, seqno); | 138 | context, seqno); |
138 | 139 | ||
139 | array->num_fences = num_fences; | 140 | array->num_fences = num_fences; |
140 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); | 141 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); |
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences, | |||
142 | 143 | ||
143 | return array; | 144 | return array; |
144 | } | 145 | } |
145 | EXPORT_SYMBOL(fence_array_create); | 146 | EXPORT_SYMBOL(dma_fence_array_create); |
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c index 4d51f9e83fa8..3a7bf009c21c 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/dma-fence.c | |||
@@ -21,13 +21,13 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/atomic.h> | 23 | #include <linux/atomic.h> |
24 | #include <linux/fence.h> | 24 | #include <linux/dma-fence.h> |
25 | 25 | ||
26 | #define CREATE_TRACE_POINTS | 26 | #define CREATE_TRACE_POINTS |
27 | #include <trace/events/fence.h> | 27 | #include <trace/events/dma_fence.h> |
28 | 28 | ||
29 | EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); | 29 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); |
30 | EXPORT_TRACEPOINT_SYMBOL(fence_emit); | 30 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * fence context counter: each execution context should have its own | 33 | * fence context counter: each execution context should have its own |
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); | |||
35 | * context or not. One device can have multiple separate contexts, | 35 | * context or not. One device can have multiple separate contexts, |
36 | * and they're used if some engine can run independently of another. | 36 | * and they're used if some engine can run independently of another. |
37 | */ | 37 | */ |
38 | static atomic64_t fence_context_counter = ATOMIC64_INIT(0); | 38 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); |
39 | 39 | ||
40 | /** | 40 | /** |
41 | * fence_context_alloc - allocate an array of fence contexts | 41 | * dma_fence_context_alloc - allocate an array of fence contexts |
42 | * @num: [in] amount of contexts to allocate | 42 | * @num: [in] amount of contexts to allocate |
43 | * | 43 | * |
44 | * This function will return the first index of the number of fences allocated. | 44 | * This function will return the first index of the number of fences allocated. |
45 | * The fence context is used for setting fence->context to a unique number. | 45 | * The fence context is used for setting fence->context to a unique number. |
46 | */ | 46 | */ |
47 | u64 fence_context_alloc(unsigned num) | 47 | u64 dma_fence_context_alloc(unsigned num) |
48 | { | 48 | { |
49 | BUG_ON(!num); | 49 | BUG_ON(!num); |
50 | return atomic64_add_return(num, &fence_context_counter) - num; | 50 | return atomic64_add_return(num, &dma_fence_context_counter) - num; |
51 | } | 51 | } |
52 | EXPORT_SYMBOL(fence_context_alloc); | 52 | EXPORT_SYMBOL(dma_fence_context_alloc); |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * fence_signal_locked - signal completion of a fence | 55 | * dma_fence_signal_locked - signal completion of a fence |
56 | * @fence: the fence to signal | 56 | * @fence: the fence to signal |
57 | * | 57 | * |
58 | * Signal completion for software callbacks on a fence, this will unblock | 58 | * Signal completion for software callbacks on a fence, this will unblock |
59 | * fence_wait() calls and run all the callbacks added with | 59 | * dma_fence_wait() calls and run all the callbacks added with |
60 | * fence_add_callback(). Can be called multiple times, but since a fence | 60 | * dma_fence_add_callback(). Can be called multiple times, but since a fence |
61 | * can only go from unsignaled to signaled state, it will only be effective | 61 | * can only go from unsignaled to signaled state, it will only be effective |
62 | * the first time. | 62 | * the first time. |
63 | * | 63 | * |
64 | * Unlike fence_signal, this function must be called with fence->lock held. | 64 | * Unlike dma_fence_signal, this function must be called with fence->lock held. |
65 | */ | 65 | */ |
66 | int fence_signal_locked(struct fence *fence) | 66 | int dma_fence_signal_locked(struct dma_fence *fence) |
67 | { | 67 | { |
68 | struct fence_cb *cur, *tmp; | 68 | struct dma_fence_cb *cur, *tmp; |
69 | int ret = 0; | 69 | int ret = 0; |
70 | 70 | ||
71 | lockdep_assert_held(fence->lock); | ||
72 | |||
71 | if (WARN_ON(!fence)) | 73 | if (WARN_ON(!fence)) |
72 | return -EINVAL; | 74 | return -EINVAL; |
73 | 75 | ||
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence) | |||
76 | smp_mb__before_atomic(); | 78 | smp_mb__before_atomic(); |
77 | } | 79 | } |
78 | 80 | ||
79 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 81 | if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
80 | ret = -EINVAL; | 82 | ret = -EINVAL; |
81 | 83 | ||
82 | /* | 84 | /* |
83 | * we might have raced with the unlocked fence_signal, | 85 | * we might have raced with the unlocked dma_fence_signal, |
84 | * still run through all callbacks | 86 | * still run through all callbacks |
85 | */ | 87 | */ |
86 | } else | 88 | } else |
87 | trace_fence_signaled(fence); | 89 | trace_dma_fence_signaled(fence); |
88 | 90 | ||
89 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | 91 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
90 | list_del_init(&cur->node); | 92 | list_del_init(&cur->node); |
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence) | |||
92 | } | 94 | } |
93 | return ret; | 95 | return ret; |
94 | } | 96 | } |
95 | EXPORT_SYMBOL(fence_signal_locked); | 97 | EXPORT_SYMBOL(dma_fence_signal_locked); |
96 | 98 | ||
97 | /** | 99 | /** |
98 | * fence_signal - signal completion of a fence | 100 | * dma_fence_signal - signal completion of a fence |
99 | * @fence: the fence to signal | 101 | * @fence: the fence to signal |
100 | * | 102 | * |
101 | * Signal completion for software callbacks on a fence, this will unblock | 103 | * Signal completion for software callbacks on a fence, this will unblock |
102 | * fence_wait() calls and run all the callbacks added with | 104 | * dma_fence_wait() calls and run all the callbacks added with |
103 | * fence_add_callback(). Can be called multiple times, but since a fence | 105 | * dma_fence_add_callback(). Can be called multiple times, but since a fence |
104 | * can only go from unsignaled to signaled state, it will only be effective | 106 | * can only go from unsignaled to signaled state, it will only be effective |
105 | * the first time. | 107 | * the first time. |
106 | */ | 108 | */ |
107 | int fence_signal(struct fence *fence) | 109 | int dma_fence_signal(struct dma_fence *fence) |
108 | { | 110 | { |
109 | unsigned long flags; | 111 | unsigned long flags; |
110 | 112 | ||
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence) | |||
116 | smp_mb__before_atomic(); | 118 | smp_mb__before_atomic(); |
117 | } | 119 | } |
118 | 120 | ||
119 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 121 | if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
120 | return -EINVAL; | 122 | return -EINVAL; |
121 | 123 | ||
122 | trace_fence_signaled(fence); | 124 | trace_dma_fence_signaled(fence); |
123 | 125 | ||
124 | if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { | 126 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { |
125 | struct fence_cb *cur, *tmp; | 127 | struct dma_fence_cb *cur, *tmp; |
126 | 128 | ||
127 | spin_lock_irqsave(fence->lock, flags); | 129 | spin_lock_irqsave(fence->lock, flags); |
128 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | 130 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence) | |||
133 | } | 135 | } |
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |
136 | EXPORT_SYMBOL(fence_signal); | 138 | EXPORT_SYMBOL(dma_fence_signal); |
137 | 139 | ||
138 | /** | 140 | /** |
139 | * fence_wait_timeout - sleep until the fence gets signaled | 141 | * dma_fence_wait_timeout - sleep until the fence gets signaled |
140 | * or until timeout elapses | 142 | * or until timeout elapses |
141 | * @fence: [in] the fence to wait on | 143 | * @fence: [in] the fence to wait on |
142 | * @intr: [in] if true, do an interruptible wait | 144 | * @intr: [in] if true, do an interruptible wait |
@@ -152,7 +154,7 @@ EXPORT_SYMBOL(fence_signal); | |||
152 | * freed before return, resulting in undefined behavior. | 154 | * freed before return, resulting in undefined behavior. |
153 | */ | 155 | */ |
154 | signed long | 156 | signed long |
155 | fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | 157 | dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) |
156 | { | 158 | { |
157 | signed long ret; | 159 | signed long ret; |
158 | 160 | ||
@@ -160,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | |||
160 | return -EINVAL; | 162 | return -EINVAL; |
161 | 163 | ||
162 | if (timeout == 0) | 164 | if (timeout == 0) |
163 | return fence_is_signaled(fence); | 165 | return dma_fence_is_signaled(fence); |
164 | 166 | ||
165 | trace_fence_wait_start(fence); | 167 | trace_dma_fence_wait_start(fence); |
166 | ret = fence->ops->wait(fence, intr, timeout); | 168 | ret = fence->ops->wait(fence, intr, timeout); |
167 | trace_fence_wait_end(fence); | 169 | trace_dma_fence_wait_end(fence); |
168 | return ret; | 170 | return ret; |
169 | } | 171 | } |
170 | EXPORT_SYMBOL(fence_wait_timeout); | 172 | EXPORT_SYMBOL(dma_fence_wait_timeout); |
171 | 173 | ||
172 | void fence_release(struct kref *kref) | 174 | void dma_fence_release(struct kref *kref) |
173 | { | 175 | { |
174 | struct fence *fence = | 176 | struct dma_fence *fence = |
175 | container_of(kref, struct fence, refcount); | 177 | container_of(kref, struct dma_fence, refcount); |
176 | 178 | ||
177 | trace_fence_destroy(fence); | 179 | trace_dma_fence_destroy(fence); |
178 | 180 | ||
179 | BUG_ON(!list_empty(&fence->cb_list)); | 181 | BUG_ON(!list_empty(&fence->cb_list)); |
180 | 182 | ||
181 | if (fence->ops->release) | 183 | if (fence->ops->release) |
182 | fence->ops->release(fence); | 184 | fence->ops->release(fence); |
183 | else | 185 | else |
184 | fence_free(fence); | 186 | dma_fence_free(fence); |
185 | } | 187 | } |
186 | EXPORT_SYMBOL(fence_release); | 188 | EXPORT_SYMBOL(dma_fence_release); |
187 | 189 | ||
188 | void fence_free(struct fence *fence) | 190 | void dma_fence_free(struct dma_fence *fence) |
189 | { | 191 | { |
190 | kfree_rcu(fence, rcu); | 192 | kfree_rcu(fence, rcu); |
191 | } | 193 | } |
192 | EXPORT_SYMBOL(fence_free); | 194 | EXPORT_SYMBOL(dma_fence_free); |
193 | 195 | ||
194 | /** | 196 | /** |
195 | * fence_enable_sw_signaling - enable signaling on fence | 197 | * dma_fence_enable_sw_signaling - enable signaling on fence |
196 | * @fence: [in] the fence to enable | 198 | * @fence: [in] the fence to enable |
197 | * | 199 | * |
198 | * this will request for sw signaling to be enabled, to make the fence | 200 | * this will request for sw signaling to be enabled, to make the fence |
199 | * complete as soon as possible | 201 | * complete as soon as possible |
200 | */ | 202 | */ |
201 | void fence_enable_sw_signaling(struct fence *fence) | 203 | void dma_fence_enable_sw_signaling(struct dma_fence *fence) |
202 | { | 204 | { |
203 | unsigned long flags; | 205 | unsigned long flags; |
204 | 206 | ||
205 | if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && | 207 | if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
206 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 208 | &fence->flags) && |
207 | trace_fence_enable_signal(fence); | 209 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
210 | trace_dma_fence_enable_signal(fence); | ||
208 | 211 | ||
209 | spin_lock_irqsave(fence->lock, flags); | 212 | spin_lock_irqsave(fence->lock, flags); |
210 | 213 | ||
211 | if (!fence->ops->enable_signaling(fence)) | 214 | if (!fence->ops->enable_signaling(fence)) |
212 | fence_signal_locked(fence); | 215 | dma_fence_signal_locked(fence); |
213 | 216 | ||
214 | spin_unlock_irqrestore(fence->lock, flags); | 217 | spin_unlock_irqrestore(fence->lock, flags); |
215 | } | 218 | } |
216 | } | 219 | } |
217 | EXPORT_SYMBOL(fence_enable_sw_signaling); | 220 | EXPORT_SYMBOL(dma_fence_enable_sw_signaling); |
218 | 221 | ||
219 | /** | 222 | /** |
220 | * fence_add_callback - add a callback to be called when the fence | 223 | * dma_fence_add_callback - add a callback to be called when the fence |
221 | * is signaled | 224 | * is signaled |
222 | * @fence: [in] the fence to wait on | 225 | * @fence: [in] the fence to wait on |
223 | * @cb: [in] the callback to register | 226 | * @cb: [in] the callback to register |
224 | * @func: [in] the function to call | 227 | * @func: [in] the function to call |
225 | * | 228 | * |
226 | * cb will be initialized by fence_add_callback, no initialization | 229 | * cb will be initialized by dma_fence_add_callback, no initialization |
227 | * by the caller is required. Any number of callbacks can be registered | 230 | * by the caller is required. Any number of callbacks can be registered |
228 | * to a fence, but a callback can only be registered to one fence at a time. | 231 | * to a fence, but a callback can only be registered to one fence at a time. |
229 | * | 232 | * |
@@ -232,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling); | |||
232 | * *not* call the callback) | 235 | * *not* call the callback) |
233 | * | 236 | * |
234 | * Add a software callback to the fence. Same restrictions apply to | 237 | * Add a software callback to the fence. Same restrictions apply to |
235 | * refcount as it does to fence_wait, however the caller doesn't need to | 238 | * refcount as it does to dma_fence_wait, however the caller doesn't need to |
236 | * keep a refcount to fence afterwards: when software access is enabled, | 239 | * keep a refcount to fence afterwards: when software access is enabled, |
237 | * the creator of the fence is required to keep the fence alive until | 240 | * the creator of the fence is required to keep the fence alive until |
238 | * after it signals with fence_signal. The callback itself can be called | 241 | * after it signals with dma_fence_signal. The callback itself can be called |
239 | * from irq context. | 242 | * from irq context. |
240 | * | 243 | * |
241 | */ | 244 | */ |
242 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | 245 | int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, |
243 | fence_func_t func) | 246 | dma_fence_func_t func) |
244 | { | 247 | { |
245 | unsigned long flags; | 248 | unsigned long flags; |
246 | int ret = 0; | 249 | int ret = 0; |
@@ -249,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, | |||
249 | if (WARN_ON(!fence || !func)) | 252 | if (WARN_ON(!fence || !func)) |
250 | return -EINVAL; | 253 | return -EINVAL; |
251 | 254 | ||
252 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 255 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
253 | INIT_LIST_HEAD(&cb->node); | 256 | INIT_LIST_HEAD(&cb->node); |
254 | return -ENOENT; | 257 | return -ENOENT; |
255 | } | 258 | } |
256 | 259 | ||
257 | spin_lock_irqsave(fence->lock, flags); | 260 | spin_lock_irqsave(fence->lock, flags); |
258 | 261 | ||
259 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | 262 | was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
263 | &fence->flags); | ||
260 | 264 | ||
261 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 265 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
262 | ret = -ENOENT; | 266 | ret = -ENOENT; |
263 | else if (!was_set) { | 267 | else if (!was_set) { |
264 | trace_fence_enable_signal(fence); | 268 | trace_dma_fence_enable_signal(fence); |
265 | 269 | ||
266 | if (!fence->ops->enable_signaling(fence)) { | 270 | if (!fence->ops->enable_signaling(fence)) { |
267 | fence_signal_locked(fence); | 271 | dma_fence_signal_locked(fence); |
268 | ret = -ENOENT; | 272 | ret = -ENOENT; |
269 | } | 273 | } |
270 | } | 274 | } |
@@ -278,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, | |||
278 | 282 | ||
279 | return ret; | 283 | return ret; |
280 | } | 284 | } |
281 | EXPORT_SYMBOL(fence_add_callback); | 285 | EXPORT_SYMBOL(dma_fence_add_callback); |
282 | 286 | ||
283 | /** | 287 | /** |
284 | * fence_remove_callback - remove a callback from the signaling list | 288 | * dma_fence_remove_callback - remove a callback from the signaling list |
285 | * @fence: [in] the fence to wait on | 289 | * @fence: [in] the fence to wait on |
286 | * @cb: [in] the callback to remove | 290 | * @cb: [in] the callback to remove |
287 | * | 291 | * |
@@ -296,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback); | |||
296 | * with a reference held to the fence. | 300 | * with a reference held to the fence. |
297 | */ | 301 | */ |
298 | bool | 302 | bool |
299 | fence_remove_callback(struct fence *fence, struct fence_cb *cb) | 303 | dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) |
300 | { | 304 | { |
301 | unsigned long flags; | 305 | unsigned long flags; |
302 | bool ret; | 306 | bool ret; |
@@ -311,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb) | |||
311 | 315 | ||
312 | return ret; | 316 | return ret; |
313 | } | 317 | } |
314 | EXPORT_SYMBOL(fence_remove_callback); | 318 | EXPORT_SYMBOL(dma_fence_remove_callback); |
315 | 319 | ||
316 | struct default_wait_cb { | 320 | struct default_wait_cb { |
317 | struct fence_cb base; | 321 | struct dma_fence_cb base; |
318 | struct task_struct *task; | 322 | struct task_struct *task; |
319 | }; | 323 | }; |
320 | 324 | ||
321 | static void | 325 | static void |
322 | fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | 326 | dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
323 | { | 327 | { |
324 | struct default_wait_cb *wait = | 328 | struct default_wait_cb *wait = |
325 | container_of(cb, struct default_wait_cb, base); | 329 | container_of(cb, struct default_wait_cb, base); |
@@ -328,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
328 | } | 332 | } |
329 | 333 | ||
330 | /** | 334 | /** |
331 | * fence_default_wait - default sleep until the fence gets signaled | 335 | * dma_fence_default_wait - default sleep until the fence gets signaled |
332 | * or until timeout elapses | 336 | * or until timeout elapses |
333 | * @fence: [in] the fence to wait on | 337 | * @fence: [in] the fence to wait on |
334 | * @intr: [in] if true, do an interruptible wait | 338 | * @intr: [in] if true, do an interruptible wait |
@@ -338,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
338 | * remaining timeout in jiffies on success. | 342 | * remaining timeout in jiffies on success. |
339 | */ | 343 | */ |
340 | signed long | 344 | signed long |
341 | fence_default_wait(struct fence *fence, bool intr, signed long timeout) | 345 | dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) |
342 | { | 346 | { |
343 | struct default_wait_cb cb; | 347 | struct default_wait_cb cb; |
344 | unsigned long flags; | 348 | unsigned long flags; |
345 | signed long ret = timeout; | 349 | signed long ret = timeout; |
346 | bool was_set; | 350 | bool was_set; |
347 | 351 | ||
348 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 352 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
349 | return timeout; | 353 | return timeout; |
350 | 354 | ||
351 | spin_lock_irqsave(fence->lock, flags); | 355 | spin_lock_irqsave(fence->lock, flags); |
@@ -355,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout) | |||
355 | goto out; | 359 | goto out; |
356 | } | 360 | } |
357 | 361 | ||
358 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | 362 | was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
363 | &fence->flags); | ||
359 | 364 | ||
360 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 365 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
361 | goto out; | 366 | goto out; |
362 | 367 | ||
363 | if (!was_set) { | 368 | if (!was_set) { |
364 | trace_fence_enable_signal(fence); | 369 | trace_dma_fence_enable_signal(fence); |
365 | 370 | ||
366 | if (!fence->ops->enable_signaling(fence)) { | 371 | if (!fence->ops->enable_signaling(fence)) { |
367 | fence_signal_locked(fence); | 372 | dma_fence_signal_locked(fence); |
368 | goto out; | 373 | goto out; |
369 | } | 374 | } |
370 | } | 375 | } |
371 | 376 | ||
372 | cb.base.func = fence_default_wait_cb; | 377 | cb.base.func = dma_fence_default_wait_cb; |
373 | cb.task = current; | 378 | cb.task = current; |
374 | list_add(&cb.base.node, &fence->cb_list); | 379 | list_add(&cb.base.node, &fence->cb_list); |
375 | 380 | ||
376 | while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { | 381 | while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { |
377 | if (intr) | 382 | if (intr) |
378 | __set_current_state(TASK_INTERRUPTIBLE); | 383 | __set_current_state(TASK_INTERRUPTIBLE); |
379 | else | 384 | else |
@@ -395,23 +400,23 @@ out: | |||
395 | spin_unlock_irqrestore(fence->lock, flags); | 400 | spin_unlock_irqrestore(fence->lock, flags); |
396 | return ret; | 401 | return ret; |
397 | } | 402 | } |
398 | EXPORT_SYMBOL(fence_default_wait); | 403 | EXPORT_SYMBOL(dma_fence_default_wait); |
399 | 404 | ||
400 | static bool | 405 | static bool |
401 | fence_test_signaled_any(struct fence **fences, uint32_t count) | 406 | dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count) |
402 | { | 407 | { |
403 | int i; | 408 | int i; |
404 | 409 | ||
405 | for (i = 0; i < count; ++i) { | 410 | for (i = 0; i < count; ++i) { |
406 | struct fence *fence = fences[i]; | 411 | struct dma_fence *fence = fences[i]; |
407 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 412 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
408 | return true; | 413 | return true; |
409 | } | 414 | } |
410 | return false; | 415 | return false; |
411 | } | 416 | } |
412 | 417 | ||
413 | /** | 418 | /** |
414 | * fence_wait_any_timeout - sleep until any fence gets signaled | 419 | * dma_fence_wait_any_timeout - sleep until any fence gets signaled |
415 | * or until timeout elapses | 420 | * or until timeout elapses |
416 | * @fences: [in] array of fences to wait on | 421 | * @fences: [in] array of fences to wait on |
417 | * @count: [in] number of fences to wait on | 422 | * @count: [in] number of fences to wait on |
@@ -427,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count) | |||
427 | * fence might be freed before return, resulting in undefined behavior. | 432 | * fence might be freed before return, resulting in undefined behavior. |
428 | */ | 433 | */ |
429 | signed long | 434 | signed long |
430 | fence_wait_any_timeout(struct fence **fences, uint32_t count, | 435 | dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, |
431 | bool intr, signed long timeout) | 436 | bool intr, signed long timeout) |
432 | { | 437 | { |
433 | struct default_wait_cb *cb; | 438 | struct default_wait_cb *cb; |
434 | signed long ret = timeout; | 439 | signed long ret = timeout; |
@@ -439,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
439 | 444 | ||
440 | if (timeout == 0) { | 445 | if (timeout == 0) { |
441 | for (i = 0; i < count; ++i) | 446 | for (i = 0; i < count; ++i) |
442 | if (fence_is_signaled(fences[i])) | 447 | if (dma_fence_is_signaled(fences[i])) |
443 | return 1; | 448 | return 1; |
444 | 449 | ||
445 | return 0; | 450 | return 0; |
@@ -452,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
452 | } | 457 | } |
453 | 458 | ||
454 | for (i = 0; i < count; ++i) { | 459 | for (i = 0; i < count; ++i) { |
455 | struct fence *fence = fences[i]; | 460 | struct dma_fence *fence = fences[i]; |
456 | 461 | ||
457 | if (fence->ops->wait != fence_default_wait) { | 462 | if (fence->ops->wait != dma_fence_default_wait) { |
458 | ret = -EINVAL; | 463 | ret = -EINVAL; |
459 | goto fence_rm_cb; | 464 | goto fence_rm_cb; |
460 | } | 465 | } |
461 | 466 | ||
462 | cb[i].task = current; | 467 | cb[i].task = current; |
463 | if (fence_add_callback(fence, &cb[i].base, | 468 | if (dma_fence_add_callback(fence, &cb[i].base, |
464 | fence_default_wait_cb)) { | 469 | dma_fence_default_wait_cb)) { |
465 | /* This fence is already signaled */ | 470 | /* This fence is already signaled */ |
466 | goto fence_rm_cb; | 471 | goto fence_rm_cb; |
467 | } | 472 | } |
@@ -473,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
473 | else | 478 | else |
474 | set_current_state(TASK_UNINTERRUPTIBLE); | 479 | set_current_state(TASK_UNINTERRUPTIBLE); |
475 | 480 | ||
476 | if (fence_test_signaled_any(fences, count)) | 481 | if (dma_fence_test_signaled_any(fences, count)) |
477 | break; | 482 | break; |
478 | 483 | ||
479 | ret = schedule_timeout(ret); | 484 | ret = schedule_timeout(ret); |
@@ -486,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
486 | 491 | ||
487 | fence_rm_cb: | 492 | fence_rm_cb: |
488 | while (i-- > 0) | 493 | while (i-- > 0) |
489 | fence_remove_callback(fences[i], &cb[i].base); | 494 | dma_fence_remove_callback(fences[i], &cb[i].base); |
490 | 495 | ||
491 | err_free_cb: | 496 | err_free_cb: |
492 | kfree(cb); | 497 | kfree(cb); |
493 | 498 | ||
494 | return ret; | 499 | return ret; |
495 | } | 500 | } |
496 | EXPORT_SYMBOL(fence_wait_any_timeout); | 501 | EXPORT_SYMBOL(dma_fence_wait_any_timeout); |
497 | 502 | ||
498 | /** | 503 | /** |
499 | * fence_init - Initialize a custom fence. | 504 | * dma_fence_init - Initialize a custom fence. |
500 | * @fence: [in] the fence to initialize | 505 | * @fence: [in] the fence to initialize |
501 | * @ops: [in] the fence_ops for operations on this fence | 506 | * @ops: [in] the dma_fence_ops for operations on this fence |
502 | * @lock: [in] the irqsafe spinlock to use for locking this fence | 507 | * @lock: [in] the irqsafe spinlock to use for locking this fence |
503 | * @context: [in] the execution context this fence is run on | 508 | * @context: [in] the execution context this fence is run on |
504 | * @seqno: [in] a linear increasing sequence number for this context | 509 | * @seqno: [in] a linear increasing sequence number for this context |
505 | * | 510 | * |
506 | * Initializes an allocated fence, the caller doesn't have to keep its | 511 | * Initializes an allocated fence, the caller doesn't have to keep its |
507 | * refcount after committing with this fence, but it will need to hold a | 512 | * refcount after committing with this fence, but it will need to hold a |
508 | * refcount again if fence_ops.enable_signaling gets called. This can | 513 | * refcount again if dma_fence_ops.enable_signaling gets called. This can |
509 | * be used for other implementing other types of fence. | 514 | * be used for other implementing other types of fence. |
510 | * | 515 | * |
511 | * context and seqno are used for easy comparison between fences, allowing | 516 | * context and seqno are used for easy comparison between fences, allowing |
512 | * to check which fence is later by simply using fence_later. | 517 | * to check which fence is later by simply using dma_fence_later. |
513 | */ | 518 | */ |
514 | void | 519 | void |
515 | fence_init(struct fence *fence, const struct fence_ops *ops, | 520 | dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
516 | spinlock_t *lock, u64 context, unsigned seqno) | 521 | spinlock_t *lock, u64 context, unsigned seqno) |
517 | { | 522 | { |
518 | BUG_ON(!lock); | 523 | BUG_ON(!lock); |
519 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || | 524 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || |
@@ -527,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops, | |||
527 | fence->seqno = seqno; | 532 | fence->seqno = seqno; |
528 | fence->flags = 0UL; | 533 | fence->flags = 0UL; |
529 | 534 | ||
530 | trace_fence_init(fence); | 535 | trace_dma_fence_init(fence); |
531 | } | 536 | } |
532 | EXPORT_SYMBOL(fence_init); | 537 | EXPORT_SYMBOL(dma_fence_init); |
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 82de59f7cbbd..7ed56f3edfb7 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared); | |||
102 | static void | 102 | static void |
103 | reservation_object_add_shared_inplace(struct reservation_object *obj, | 103 | reservation_object_add_shared_inplace(struct reservation_object *obj, |
104 | struct reservation_object_list *fobj, | 104 | struct reservation_object_list *fobj, |
105 | struct fence *fence) | 105 | struct dma_fence *fence) |
106 | { | 106 | { |
107 | u32 i; | 107 | u32 i; |
108 | 108 | ||
109 | fence_get(fence); | 109 | dma_fence_get(fence); |
110 | 110 | ||
111 | preempt_disable(); | 111 | preempt_disable(); |
112 | write_seqcount_begin(&obj->seq); | 112 | write_seqcount_begin(&obj->seq); |
113 | 113 | ||
114 | for (i = 0; i < fobj->shared_count; ++i) { | 114 | for (i = 0; i < fobj->shared_count; ++i) { |
115 | struct fence *old_fence; | 115 | struct dma_fence *old_fence; |
116 | 116 | ||
117 | old_fence = rcu_dereference_protected(fobj->shared[i], | 117 | old_fence = rcu_dereference_protected(fobj->shared[i], |
118 | reservation_object_held(obj)); | 118 | reservation_object_held(obj)); |
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, | |||
123 | write_seqcount_end(&obj->seq); | 123 | write_seqcount_end(&obj->seq); |
124 | preempt_enable(); | 124 | preempt_enable(); |
125 | 125 | ||
126 | fence_put(old_fence); | 126 | dma_fence_put(old_fence); |
127 | return; | 127 | return; |
128 | } | 128 | } |
129 | } | 129 | } |
@@ -143,12 +143,12 @@ static void | |||
143 | reservation_object_add_shared_replace(struct reservation_object *obj, | 143 | reservation_object_add_shared_replace(struct reservation_object *obj, |
144 | struct reservation_object_list *old, | 144 | struct reservation_object_list *old, |
145 | struct reservation_object_list *fobj, | 145 | struct reservation_object_list *fobj, |
146 | struct fence *fence) | 146 | struct dma_fence *fence) |
147 | { | 147 | { |
148 | unsigned i; | 148 | unsigned i; |
149 | struct fence *old_fence = NULL; | 149 | struct dma_fence *old_fence = NULL; |
150 | 150 | ||
151 | fence_get(fence); | 151 | dma_fence_get(fence); |
152 | 152 | ||
153 | if (!old) { | 153 | if (!old) { |
154 | RCU_INIT_POINTER(fobj->shared[0], fence); | 154 | RCU_INIT_POINTER(fobj->shared[0], fence); |
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj, | |||
165 | fobj->shared_count = old->shared_count; | 165 | fobj->shared_count = old->shared_count; |
166 | 166 | ||
167 | for (i = 0; i < old->shared_count; ++i) { | 167 | for (i = 0; i < old->shared_count; ++i) { |
168 | struct fence *check; | 168 | struct dma_fence *check; |
169 | 169 | ||
170 | check = rcu_dereference_protected(old->shared[i], | 170 | check = rcu_dereference_protected(old->shared[i], |
171 | reservation_object_held(obj)); | 171 | reservation_object_held(obj)); |
@@ -196,7 +196,7 @@ done: | |||
196 | kfree_rcu(old, rcu); | 196 | kfree_rcu(old, rcu); |
197 | 197 | ||
198 | if (old_fence) | 198 | if (old_fence) |
199 | fence_put(old_fence); | 199 | dma_fence_put(old_fence); |
200 | } | 200 | } |
201 | 201 | ||
202 | /** | 202 | /** |
@@ -208,7 +208,7 @@ done: | |||
208 | * reservation_object_reserve_shared() has been called. | 208 | * reservation_object_reserve_shared() has been called. |
209 | */ | 209 | */ |
210 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 210 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
211 | struct fence *fence) | 211 | struct dma_fence *fence) |
212 | { | 212 | { |
213 | struct reservation_object_list *old, *fobj = obj->staged; | 213 | struct reservation_object_list *old, *fobj = obj->staged; |
214 | 214 | ||
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence); | |||
231 | * Add a fence to the exclusive slot. The obj->lock must be held. | 231 | * Add a fence to the exclusive slot. The obj->lock must be held. |
232 | */ | 232 | */ |
233 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
234 | struct fence *fence) | 234 | struct dma_fence *fence) |
235 | { | 235 | { |
236 | struct fence *old_fence = reservation_object_get_excl(obj); | 236 | struct dma_fence *old_fence = reservation_object_get_excl(obj); |
237 | struct reservation_object_list *old; | 237 | struct reservation_object_list *old; |
238 | u32 i = 0; | 238 | u32 i = 0; |
239 | 239 | ||
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
242 | i = old->shared_count; | 242 | i = old->shared_count; |
243 | 243 | ||
244 | if (fence) | 244 | if (fence) |
245 | fence_get(fence); | 245 | dma_fence_get(fence); |
246 | 246 | ||
247 | preempt_disable(); | 247 | preempt_disable(); |
248 | write_seqcount_begin(&obj->seq); | 248 | write_seqcount_begin(&obj->seq); |
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
255 | 255 | ||
256 | /* inplace update, no shared fences */ | 256 | /* inplace update, no shared fences */ |
257 | while (i--) | 257 | while (i--) |
258 | fence_put(rcu_dereference_protected(old->shared[i], | 258 | dma_fence_put(rcu_dereference_protected(old->shared[i], |
259 | reservation_object_held(obj))); | 259 | reservation_object_held(obj))); |
260 | 260 | ||
261 | if (old_fence) | 261 | if (old_fence) |
262 | fence_put(old_fence); | 262 | dma_fence_put(old_fence); |
263 | } | 263 | } |
264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
265 | 265 | ||
@@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); | |||
276 | * Zero or -errno | 276 | * Zero or -errno |
277 | */ | 277 | */ |
278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
279 | struct fence **pfence_excl, | 279 | struct dma_fence **pfence_excl, |
280 | unsigned *pshared_count, | 280 | unsigned *pshared_count, |
281 | struct fence ***pshared) | 281 | struct dma_fence ***pshared) |
282 | { | 282 | { |
283 | struct fence **shared = NULL; | 283 | struct dma_fence **shared = NULL; |
284 | struct fence *fence_excl; | 284 | struct dma_fence *fence_excl; |
285 | unsigned int shared_count; | 285 | unsigned int shared_count; |
286 | int ret = 1; | 286 | int ret = 1; |
287 | 287 | ||
@@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, | |||
296 | seq = read_seqcount_begin(&obj->seq); | 296 | seq = read_seqcount_begin(&obj->seq); |
297 | 297 | ||
298 | fence_excl = rcu_dereference(obj->fence_excl); | 298 | fence_excl = rcu_dereference(obj->fence_excl); |
299 | if (fence_excl && !fence_get_rcu(fence_excl)) | 299 | if (fence_excl && !dma_fence_get_rcu(fence_excl)) |
300 | goto unlock; | 300 | goto unlock; |
301 | 301 | ||
302 | fobj = rcu_dereference(obj->fence); | 302 | fobj = rcu_dereference(obj->fence); |
303 | if (fobj) { | 303 | if (fobj) { |
304 | struct fence **nshared; | 304 | struct dma_fence **nshared; |
305 | size_t sz = sizeof(*shared) * fobj->shared_max; | 305 | size_t sz = sizeof(*shared) * fobj->shared_max; |
306 | 306 | ||
307 | nshared = krealloc(shared, sz, | 307 | nshared = krealloc(shared, sz, |
@@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, | |||
322 | 322 | ||
323 | for (i = 0; i < shared_count; ++i) { | 323 | for (i = 0; i < shared_count; ++i) { |
324 | shared[i] = rcu_dereference(fobj->shared[i]); | 324 | shared[i] = rcu_dereference(fobj->shared[i]); |
325 | if (!fence_get_rcu(shared[i])) | 325 | if (!dma_fence_get_rcu(shared[i])) |
326 | break; | 326 | break; |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
330 | if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { | 330 | if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { |
331 | while (i--) | 331 | while (i--) |
332 | fence_put(shared[i]); | 332 | dma_fence_put(shared[i]); |
333 | fence_put(fence_excl); | 333 | dma_fence_put(fence_excl); |
334 | goto unlock; | 334 | goto unlock; |
335 | } | 335 | } |
336 | 336 | ||
@@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | |||
368 | bool wait_all, bool intr, | 368 | bool wait_all, bool intr, |
369 | unsigned long timeout) | 369 | unsigned long timeout) |
370 | { | 370 | { |
371 | struct fence *fence; | 371 | struct dma_fence *fence; |
372 | unsigned seq, shared_count, i = 0; | 372 | unsigned seq, shared_count, i = 0; |
373 | long ret = timeout; | 373 | long ret = timeout; |
374 | 374 | ||
@@ -389,16 +389,17 @@ retry: | |||
389 | shared_count = fobj->shared_count; | 389 | shared_count = fobj->shared_count; |
390 | 390 | ||
391 | for (i = 0; i < shared_count; ++i) { | 391 | for (i = 0; i < shared_count; ++i) { |
392 | struct fence *lfence = rcu_dereference(fobj->shared[i]); | 392 | struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); |
393 | 393 | ||
394 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) | 394 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
395 | &lfence->flags)) | ||
395 | continue; | 396 | continue; |
396 | 397 | ||
397 | if (!fence_get_rcu(lfence)) | 398 | if (!dma_fence_get_rcu(lfence)) |
398 | goto unlock_retry; | 399 | goto unlock_retry; |
399 | 400 | ||
400 | if (fence_is_signaled(lfence)) { | 401 | if (dma_fence_is_signaled(lfence)) { |
401 | fence_put(lfence); | 402 | dma_fence_put(lfence); |
402 | continue; | 403 | continue; |
403 | } | 404 | } |
404 | 405 | ||
@@ -408,15 +409,16 @@ retry: | |||
408 | } | 409 | } |
409 | 410 | ||
410 | if (!shared_count) { | 411 | if (!shared_count) { |
411 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | 412 | struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); |
412 | 413 | ||
413 | if (fence_excl && | 414 | if (fence_excl && |
414 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { | 415 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
415 | if (!fence_get_rcu(fence_excl)) | 416 | &fence_excl->flags)) { |
417 | if (!dma_fence_get_rcu(fence_excl)) | ||
416 | goto unlock_retry; | 418 | goto unlock_retry; |
417 | 419 | ||
418 | if (fence_is_signaled(fence_excl)) | 420 | if (dma_fence_is_signaled(fence_excl)) |
419 | fence_put(fence_excl); | 421 | dma_fence_put(fence_excl); |
420 | else | 422 | else |
421 | fence = fence_excl; | 423 | fence = fence_excl; |
422 | } | 424 | } |
@@ -425,12 +427,12 @@ retry: | |||
425 | rcu_read_unlock(); | 427 | rcu_read_unlock(); |
426 | if (fence) { | 428 | if (fence) { |
427 | if (read_seqcount_retry(&obj->seq, seq)) { | 429 | if (read_seqcount_retry(&obj->seq, seq)) { |
428 | fence_put(fence); | 430 | dma_fence_put(fence); |
429 | goto retry; | 431 | goto retry; |
430 | } | 432 | } |
431 | 433 | ||
432 | ret = fence_wait_timeout(fence, intr, ret); | 434 | ret = dma_fence_wait_timeout(fence, intr, ret); |
433 | fence_put(fence); | 435 | dma_fence_put(fence); |
434 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | 436 | if (ret > 0 && wait_all && (i + 1 < shared_count)) |
435 | goto retry; | 437 | goto retry; |
436 | } | 438 | } |
@@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | |||
444 | 446 | ||
445 | 447 | ||
446 | static inline int | 448 | static inline int |
447 | reservation_object_test_signaled_single(struct fence *passed_fence) | 449 | reservation_object_test_signaled_single(struct dma_fence *passed_fence) |
448 | { | 450 | { |
449 | struct fence *fence, *lfence = passed_fence; | 451 | struct dma_fence *fence, *lfence = passed_fence; |
450 | int ret = 1; | 452 | int ret = 1; |
451 | 453 | ||
452 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | 454 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
453 | fence = fence_get_rcu(lfence); | 455 | fence = dma_fence_get_rcu(lfence); |
454 | if (!fence) | 456 | if (!fence) |
455 | return -1; | 457 | return -1; |
456 | 458 | ||
457 | ret = !!fence_is_signaled(fence); | 459 | ret = !!dma_fence_is_signaled(fence); |
458 | fence_put(fence); | 460 | dma_fence_put(fence); |
459 | } | 461 | } |
460 | return ret; | 462 | return ret; |
461 | } | 463 | } |
@@ -492,7 +494,7 @@ retry: | |||
492 | shared_count = fobj->shared_count; | 494 | shared_count = fobj->shared_count; |
493 | 495 | ||
494 | for (i = 0; i < shared_count; ++i) { | 496 | for (i = 0; i < shared_count; ++i) { |
495 | struct fence *fence = rcu_dereference(fobj->shared[i]); | 497 | struct dma_fence *fence = rcu_dereference(fobj->shared[i]); |
496 | 498 | ||
497 | ret = reservation_object_test_signaled_single(fence); | 499 | ret = reservation_object_test_signaled_single(fence); |
498 | if (ret < 0) | 500 | if (ret < 0) |
@@ -506,7 +508,7 @@ retry: | |||
506 | } | 508 | } |
507 | 509 | ||
508 | if (!shared_count) { | 510 | if (!shared_count) { |
509 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | 511 | struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); |
510 | 512 | ||
511 | if (fence_excl) { | 513 | if (fence_excl) { |
512 | ret = reservation_object_test_signaled_single( | 514 | ret = reservation_object_test_signaled_single( |
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c index 71127f8f1626..f47112a64763 100644 --- a/drivers/dma-buf/seqno-fence.c +++ b/drivers/dma-buf/seqno-fence.c | |||
@@ -21,35 +21,35 @@ | |||
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | #include <linux/seqno-fence.h> | 22 | #include <linux/seqno-fence.h> |
23 | 23 | ||
24 | static const char *seqno_fence_get_driver_name(struct fence *fence) | 24 | static const char *seqno_fence_get_driver_name(struct dma_fence *fence) |
25 | { | 25 | { |
26 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 26 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
27 | 27 | ||
28 | return seqno_fence->ops->get_driver_name(fence); | 28 | return seqno_fence->ops->get_driver_name(fence); |
29 | } | 29 | } |
30 | 30 | ||
31 | static const char *seqno_fence_get_timeline_name(struct fence *fence) | 31 | static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) |
32 | { | 32 | { |
33 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 33 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
34 | 34 | ||
35 | return seqno_fence->ops->get_timeline_name(fence); | 35 | return seqno_fence->ops->get_timeline_name(fence); |
36 | } | 36 | } |
37 | 37 | ||
38 | static bool seqno_enable_signaling(struct fence *fence) | 38 | static bool seqno_enable_signaling(struct dma_fence *fence) |
39 | { | 39 | { |
40 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 40 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
41 | 41 | ||
42 | return seqno_fence->ops->enable_signaling(fence); | 42 | return seqno_fence->ops->enable_signaling(fence); |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool seqno_signaled(struct fence *fence) | 45 | static bool seqno_signaled(struct dma_fence *fence) |
46 | { | 46 | { |
47 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | 47 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); |
48 | 48 | ||
49 | return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); | 49 | return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void seqno_release(struct fence *fence) | 52 | static void seqno_release(struct dma_fence *fence) |
53 | { | 53 | { |
54 | struct seqno_fence *f = to_seqno_fence(fence); | 54 | struct seqno_fence *f = to_seqno_fence(fence); |
55 | 55 | ||
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence) | |||
57 | if (f->ops->release) | 57 | if (f->ops->release) |
58 | f->ops->release(fence); | 58 | f->ops->release(fence); |
59 | else | 59 | else |
60 | fence_free(&f->base); | 60 | dma_fence_free(&f->base); |
61 | } | 61 | } |
62 | 62 | ||
63 | static signed long seqno_wait(struct fence *fence, bool intr, | 63 | static signed long seqno_wait(struct dma_fence *fence, bool intr, |
64 | signed long timeout) | 64 | signed long timeout) |
65 | { | 65 | { |
66 | struct seqno_fence *f = to_seqno_fence(fence); | 66 | struct seqno_fence *f = to_seqno_fence(fence); |
67 | 67 | ||
68 | return f->ops->wait(fence, intr, timeout); | 68 | return f->ops->wait(fence, intr, timeout); |
69 | } | 69 | } |
70 | 70 | ||
71 | const struct fence_ops seqno_fence_ops = { | 71 | const struct dma_fence_ops seqno_fence_ops = { |
72 | .get_driver_name = seqno_fence_get_driver_name, | 72 | .get_driver_name = seqno_fence_get_driver_name, |
73 | .get_timeline_name = seqno_fence_get_timeline_name, | 73 | .get_timeline_name = seqno_fence_get_timeline_name, |
74 | .enable_signaling = seqno_enable_signaling, | 74 | .enable_signaling = seqno_enable_signaling, |
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 62e8e6dc7953..82e0ca4dd0c1 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c | |||
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data { | |||
68 | 68 | ||
69 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) | 69 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) |
70 | 70 | ||
71 | static const struct fence_ops timeline_fence_ops; | 71 | static const struct dma_fence_ops timeline_fence_ops; |
72 | 72 | ||
73 | static inline struct sync_pt *fence_to_sync_pt(struct fence *fence) | 73 | static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence) |
74 | { | 74 | { |
75 | if (fence->ops != &timeline_fence_ops) | 75 | if (fence->ops != &timeline_fence_ops) |
76 | return NULL; | 76 | return NULL; |
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name) | |||
93 | return NULL; | 93 | return NULL; |
94 | 94 | ||
95 | kref_init(&obj->kref); | 95 | kref_init(&obj->kref); |
96 | obj->context = fence_context_alloc(1); | 96 | obj->context = dma_fence_context_alloc(1); |
97 | strlcpy(obj->name, name, sizeof(obj->name)); | 97 | strlcpy(obj->name, name, sizeof(obj->name)); |
98 | 98 | ||
99 | INIT_LIST_HEAD(&obj->child_list_head); | 99 | INIT_LIST_HEAD(&obj->child_list_head); |
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) | |||
146 | 146 | ||
147 | list_for_each_entry_safe(pt, next, &obj->active_list_head, | 147 | list_for_each_entry_safe(pt, next, &obj->active_list_head, |
148 | active_list) { | 148 | active_list) { |
149 | if (fence_is_signaled_locked(&pt->base)) | 149 | if (dma_fence_is_signaled_locked(&pt->base)) |
150 | list_del_init(&pt->active_list); | 150 | list_del_init(&pt->active_list); |
151 | } | 151 | } |
152 | 152 | ||
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, | |||
179 | 179 | ||
180 | spin_lock_irqsave(&obj->child_list_lock, flags); | 180 | spin_lock_irqsave(&obj->child_list_lock, flags); |
181 | sync_timeline_get(obj); | 181 | sync_timeline_get(obj); |
182 | fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, | 182 | dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, |
183 | obj->context, value); | 183 | obj->context, value); |
184 | list_add_tail(&pt->child_list, &obj->child_list_head); | 184 | list_add_tail(&pt->child_list, &obj->child_list_head); |
185 | INIT_LIST_HEAD(&pt->active_list); | 185 | INIT_LIST_HEAD(&pt->active_list); |
186 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | 186 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
187 | return pt; | 187 | return pt; |
188 | } | 188 | } |
189 | 189 | ||
190 | static const char *timeline_fence_get_driver_name(struct fence *fence) | 190 | static const char *timeline_fence_get_driver_name(struct dma_fence *fence) |
191 | { | 191 | { |
192 | return "sw_sync"; | 192 | return "sw_sync"; |
193 | } | 193 | } |
194 | 194 | ||
195 | static const char *timeline_fence_get_timeline_name(struct fence *fence) | 195 | static const char *timeline_fence_get_timeline_name(struct dma_fence *fence) |
196 | { | 196 | { |
197 | struct sync_timeline *parent = fence_parent(fence); | 197 | struct sync_timeline *parent = dma_fence_parent(fence); |
198 | 198 | ||
199 | return parent->name; | 199 | return parent->name; |
200 | } | 200 | } |
201 | 201 | ||
202 | static void timeline_fence_release(struct fence *fence) | 202 | static void timeline_fence_release(struct dma_fence *fence) |
203 | { | 203 | { |
204 | struct sync_pt *pt = fence_to_sync_pt(fence); | 204 | struct sync_pt *pt = dma_fence_to_sync_pt(fence); |
205 | struct sync_timeline *parent = fence_parent(fence); | 205 | struct sync_timeline *parent = dma_fence_parent(fence); |
206 | unsigned long flags; | 206 | unsigned long flags; |
207 | 207 | ||
208 | spin_lock_irqsave(fence->lock, flags); | 208 | spin_lock_irqsave(fence->lock, flags); |
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence) | |||
212 | spin_unlock_irqrestore(fence->lock, flags); | 212 | spin_unlock_irqrestore(fence->lock, flags); |
213 | 213 | ||
214 | sync_timeline_put(parent); | 214 | sync_timeline_put(parent); |
215 | fence_free(fence); | 215 | dma_fence_free(fence); |
216 | } | 216 | } |
217 | 217 | ||
218 | static bool timeline_fence_signaled(struct fence *fence) | 218 | static bool timeline_fence_signaled(struct dma_fence *fence) |
219 | { | 219 | { |
220 | struct sync_timeline *parent = fence_parent(fence); | 220 | struct sync_timeline *parent = dma_fence_parent(fence); |
221 | 221 | ||
222 | return (fence->seqno > parent->value) ? false : true; | 222 | return (fence->seqno > parent->value) ? false : true; |
223 | } | 223 | } |
224 | 224 | ||
225 | static bool timeline_fence_enable_signaling(struct fence *fence) | 225 | static bool timeline_fence_enable_signaling(struct dma_fence *fence) |
226 | { | 226 | { |
227 | struct sync_pt *pt = fence_to_sync_pt(fence); | 227 | struct sync_pt *pt = dma_fence_to_sync_pt(fence); |
228 | struct sync_timeline *parent = fence_parent(fence); | 228 | struct sync_timeline *parent = dma_fence_parent(fence); |
229 | 229 | ||
230 | if (timeline_fence_signaled(fence)) | 230 | if (timeline_fence_signaled(fence)) |
231 | return false; | 231 | return false; |
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence) | |||
234 | return true; | 234 | return true; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void timeline_fence_value_str(struct fence *fence, | 237 | static void timeline_fence_value_str(struct dma_fence *fence, |
238 | char *str, int size) | 238 | char *str, int size) |
239 | { | 239 | { |
240 | snprintf(str, size, "%d", fence->seqno); | 240 | snprintf(str, size, "%d", fence->seqno); |
241 | } | 241 | } |
242 | 242 | ||
243 | static void timeline_fence_timeline_value_str(struct fence *fence, | 243 | static void timeline_fence_timeline_value_str(struct dma_fence *fence, |
244 | char *str, int size) | 244 | char *str, int size) |
245 | { | 245 | { |
246 | struct sync_timeline *parent = fence_parent(fence); | 246 | struct sync_timeline *parent = dma_fence_parent(fence); |
247 | 247 | ||
248 | snprintf(str, size, "%d", parent->value); | 248 | snprintf(str, size, "%d", parent->value); |
249 | } | 249 | } |
250 | 250 | ||
251 | static const struct fence_ops timeline_fence_ops = { | 251 | static const struct dma_fence_ops timeline_fence_ops = { |
252 | .get_driver_name = timeline_fence_get_driver_name, | 252 | .get_driver_name = timeline_fence_get_driver_name, |
253 | .get_timeline_name = timeline_fence_get_timeline_name, | 253 | .get_timeline_name = timeline_fence_get_timeline_name, |
254 | .enable_signaling = timeline_fence_enable_signaling, | 254 | .enable_signaling = timeline_fence_enable_signaling, |
255 | .signaled = timeline_fence_signaled, | 255 | .signaled = timeline_fence_signaled, |
256 | .wait = fence_default_wait, | 256 | .wait = dma_fence_default_wait, |
257 | .release = timeline_fence_release, | 257 | .release = timeline_fence_release, |
258 | .fence_value_str = timeline_fence_value_str, | 258 | .fence_value_str = timeline_fence_value_str, |
259 | .timeline_value_str = timeline_fence_timeline_value_str, | 259 | .timeline_value_str = timeline_fence_timeline_value_str, |
@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, | |||
317 | 317 | ||
318 | sync_file = sync_file_create(&pt->base); | 318 | sync_file = sync_file_create(&pt->base); |
319 | if (!sync_file) { | 319 | if (!sync_file) { |
320 | fence_put(&pt->base); | 320 | dma_fence_put(&pt->base); |
321 | err = -ENOMEM; | 321 | err = -ENOMEM; |
322 | goto err; | 322 | goto err; |
323 | } | 323 | } |
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 2dd4c3db6caa..48b20e34fb6d 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c | |||
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status) | |||
71 | return "error"; | 71 | return "error"; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) | 74 | static void sync_print_fence(struct seq_file *s, |
75 | struct dma_fence *fence, bool show) | ||
75 | { | 76 | { |
76 | int status = 1; | 77 | int status = 1; |
77 | struct sync_timeline *parent = fence_parent(fence); | 78 | struct sync_timeline *parent = dma_fence_parent(fence); |
78 | 79 | ||
79 | if (fence_is_signaled_locked(fence)) | 80 | if (dma_fence_is_signaled_locked(fence)) |
80 | status = fence->status; | 81 | status = fence->status; |
81 | 82 | ||
82 | seq_printf(s, " %s%sfence %s", | 83 | seq_printf(s, " %s%sfence %s", |
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s, | |||
135 | int i; | 136 | int i; |
136 | 137 | ||
137 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, | 138 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, |
138 | sync_status_str(!fence_is_signaled(sync_file->fence))); | 139 | sync_status_str(!dma_fence_is_signaled(sync_file->fence))); |
139 | 140 | ||
140 | if (fence_is_array(sync_file->fence)) { | 141 | if (dma_fence_is_array(sync_file->fence)) { |
141 | struct fence_array *array = to_fence_array(sync_file->fence); | 142 | struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); |
142 | 143 | ||
143 | for (i = 0; i < array->num_fences; ++i) | 144 | for (i = 0; i < array->num_fences; ++i) |
144 | sync_print_fence(s, array->fences[i], true); | 145 | sync_print_fence(s, array->fences[i], true); |
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index d269aa6783aa..26fe8b9907b3 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | 19 | ||
20 | #include <linux/sync_file.h> | 20 | #include <linux/sync_file.h> |
21 | #include <uapi/linux/sync_file.h> | 21 | #include <uapi/linux/sync_file.h> |
@@ -45,10 +45,9 @@ struct sync_timeline { | |||
45 | struct list_head sync_timeline_list; | 45 | struct list_head sync_timeline_list; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static inline struct sync_timeline *fence_parent(struct fence *fence) | 48 | static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) |
49 | { | 49 | { |
50 | return container_of(fence->lock, struct sync_timeline, | 50 | return container_of(fence->lock, struct sync_timeline, child_list_lock); |
51 | child_list_lock); | ||
52 | } | 51 | } |
53 | 52 | ||
54 | /** | 53 | /** |
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence) | |||
58 | * @active_list: sync timeline active child's list | 57 | * @active_list: sync timeline active child's list |
59 | */ | 58 | */ |
60 | struct sync_pt { | 59 | struct sync_pt { |
61 | struct fence base; | 60 | struct dma_fence base; |
62 | struct list_head child_list; | 61 | struct list_head child_list; |
63 | struct list_head active_list; | 62 | struct list_head active_list; |
64 | }; | 63 | }; |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 235f8ac113cc..69d8ef98d34c 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
@@ -54,7 +54,7 @@ err: | |||
54 | return NULL; | 54 | return NULL; |
55 | } | 55 | } |
56 | 56 | ||
57 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) | 57 | static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) |
58 | { | 58 | { |
59 | struct sync_file *sync_file; | 59 | struct sync_file *sync_file; |
60 | 60 | ||
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) | |||
71 | * takes ownership of @fence. The sync_file can be released with | 71 | * takes ownership of @fence. The sync_file can be released with |
72 | * fput(sync_file->file). Returns the sync_file or NULL in case of error. | 72 | * fput(sync_file->file). Returns the sync_file or NULL in case of error. |
73 | */ | 73 | */ |
74 | struct sync_file *sync_file_create(struct fence *fence) | 74 | struct sync_file *sync_file_create(struct dma_fence *fence) |
75 | { | 75 | { |
76 | struct sync_file *sync_file; | 76 | struct sync_file *sync_file; |
77 | 77 | ||
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence) | |||
79 | if (!sync_file) | 79 | if (!sync_file) |
80 | return NULL; | 80 | return NULL; |
81 | 81 | ||
82 | sync_file->fence = fence_get(fence); | 82 | sync_file->fence = dma_fence_get(fence); |
83 | 83 | ||
84 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", | 84 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", |
85 | fence->ops->get_driver_name(fence), | 85 | fence->ops->get_driver_name(fence), |
@@ -121,16 +121,16 @@ err: | |||
121 | * Ensures @fd references a valid sync_file and returns a fence that | 121 | * Ensures @fd references a valid sync_file and returns a fence that |
122 | * represents all fence in the sync_file. On error NULL is returned. | 122 | * represents all fence in the sync_file. On error NULL is returned. |
123 | */ | 123 | */ |
124 | struct fence *sync_file_get_fence(int fd) | 124 | struct dma_fence *sync_file_get_fence(int fd) |
125 | { | 125 | { |
126 | struct sync_file *sync_file; | 126 | struct sync_file *sync_file; |
127 | struct fence *fence; | 127 | struct dma_fence *fence; |
128 | 128 | ||
129 | sync_file = sync_file_fdget(fd); | 129 | sync_file = sync_file_fdget(fd); |
130 | if (!sync_file) | 130 | if (!sync_file) |
131 | return NULL; | 131 | return NULL; |
132 | 132 | ||
133 | fence = fence_get(sync_file->fence); | 133 | fence = dma_fence_get(sync_file->fence); |
134 | fput(sync_file->file); | 134 | fput(sync_file->file); |
135 | 135 | ||
136 | return fence; | 136 | return fence; |
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd) | |||
138 | EXPORT_SYMBOL(sync_file_get_fence); | 138 | EXPORT_SYMBOL(sync_file_get_fence); |
139 | 139 | ||
140 | static int sync_file_set_fence(struct sync_file *sync_file, | 140 | static int sync_file_set_fence(struct sync_file *sync_file, |
141 | struct fence **fences, int num_fences) | 141 | struct dma_fence **fences, int num_fences) |
142 | { | 142 | { |
143 | struct fence_array *array; | 143 | struct dma_fence_array *array; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * The reference for the fences in the new sync_file and held | 146 | * The reference for the fences in the new sync_file and held |
147 | * in add_fence() during the merge procedure, so for num_fences == 1 | 147 | * in add_fence() during the merge procedure, so for num_fences == 1 |
148 | * we already own a new reference to the fence. For num_fence > 1 | 148 | * we already own a new reference to the fence. For num_fence > 1 |
149 | * we own the reference of the fence_array creation. | 149 | * we own the reference of the dma_fence_array creation. |
150 | */ | 150 | */ |
151 | if (num_fences == 1) { | 151 | if (num_fences == 1) { |
152 | sync_file->fence = fences[0]; | 152 | sync_file->fence = fences[0]; |
153 | kfree(fences); | 153 | kfree(fences); |
154 | } else { | 154 | } else { |
155 | array = fence_array_create(num_fences, fences, | 155 | array = dma_fence_array_create(num_fences, fences, |
156 | fence_context_alloc(1), 1, false); | 156 | dma_fence_context_alloc(1), |
157 | 1, false); | ||
157 | if (!array) | 158 | if (!array) |
158 | return -ENOMEM; | 159 | return -ENOMEM; |
159 | 160 | ||
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file, | |||
163 | return 0; | 164 | return 0; |
164 | } | 165 | } |
165 | 166 | ||
166 | static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) | 167 | static struct dma_fence **get_fences(struct sync_file *sync_file, |
168 | int *num_fences) | ||
167 | { | 169 | { |
168 | if (fence_is_array(sync_file->fence)) { | 170 | if (dma_fence_is_array(sync_file->fence)) { |
169 | struct fence_array *array = to_fence_array(sync_file->fence); | 171 | struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); |
170 | 172 | ||
171 | *num_fences = array->num_fences; | 173 | *num_fences = array->num_fences; |
172 | return array->fences; | 174 | return array->fences; |
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) | |||
176 | return &sync_file->fence; | 178 | return &sync_file->fence; |
177 | } | 179 | } |
178 | 180 | ||
179 | static void add_fence(struct fence **fences, int *i, struct fence *fence) | 181 | static void add_fence(struct dma_fence **fences, |
182 | int *i, struct dma_fence *fence) | ||
180 | { | 183 | { |
181 | fences[*i] = fence; | 184 | fences[*i] = fence; |
182 | 185 | ||
183 | if (!fence_is_signaled(fence)) { | 186 | if (!dma_fence_is_signaled(fence)) { |
184 | fence_get(fence); | 187 | dma_fence_get(fence); |
185 | (*i)++; | 188 | (*i)++; |
186 | } | 189 | } |
187 | } | 190 | } |
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
200 | struct sync_file *b) | 203 | struct sync_file *b) |
201 | { | 204 | { |
202 | struct sync_file *sync_file; | 205 | struct sync_file *sync_file; |
203 | struct fence **fences, **nfences, **a_fences, **b_fences; | 206 | struct dma_fence **fences, **nfences, **a_fences, **b_fences; |
204 | int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; | 207 | int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; |
205 | 208 | ||
206 | sync_file = sync_file_alloc(); | 209 | sync_file = sync_file_alloc(); |
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
226 | * and sync_file_create, this is a reasonable assumption. | 229 | * and sync_file_create, this is a reasonable assumption. |
227 | */ | 230 | */ |
228 | for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { | 231 | for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { |
229 | struct fence *pt_a = a_fences[i_a]; | 232 | struct dma_fence *pt_a = a_fences[i_a]; |
230 | struct fence *pt_b = b_fences[i_b]; | 233 | struct dma_fence *pt_b = b_fences[i_b]; |
231 | 234 | ||
232 | if (pt_a->context < pt_b->context) { | 235 | if (pt_a->context < pt_b->context) { |
233 | add_fence(fences, &i, pt_a); | 236 | add_fence(fences, &i, pt_a); |
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
255 | add_fence(fences, &i, b_fences[i_b]); | 258 | add_fence(fences, &i, b_fences[i_b]); |
256 | 259 | ||
257 | if (i == 0) | 260 | if (i == 0) |
258 | fences[i++] = fence_get(a_fences[0]); | 261 | fences[i++] = dma_fence_get(a_fences[0]); |
259 | 262 | ||
260 | if (num_fences > i) { | 263 | if (num_fences > i) { |
261 | nfences = krealloc(fences, i * sizeof(*fences), | 264 | nfences = krealloc(fences, i * sizeof(*fences), |
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref) | |||
286 | kref); | 289 | kref); |
287 | 290 | ||
288 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) | 291 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) |
289 | fence_remove_callback(sync_file->fence, &sync_file->cb); | 292 | dma_fence_remove_callback(sync_file->fence, &sync_file->cb); |
290 | fence_put(sync_file->fence); | 293 | dma_fence_put(sync_file->fence); |
291 | kfree(sync_file); | 294 | kfree(sync_file); |
292 | } | 295 | } |
293 | 296 | ||
@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) | |||
307 | 310 | ||
308 | if (!poll_does_not_wait(wait) && | 311 | if (!poll_does_not_wait(wait) && |
309 | !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { | 312 | !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { |
310 | if (fence_add_callback(sync_file->fence, &sync_file->cb, | 313 | if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, |
311 | fence_check_cb_func) < 0) | 314 | fence_check_cb_func) < 0) |
312 | wake_up_all(&sync_file->wq); | 315 | wake_up_all(&sync_file->wq); |
313 | } | 316 | } |
314 | 317 | ||
315 | return fence_is_signaled(sync_file->fence) ? POLLIN : 0; | 318 | return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; |
316 | } | 319 | } |
317 | 320 | ||
318 | static long sync_file_ioctl_merge(struct sync_file *sync_file, | 321 | static long sync_file_ioctl_merge(struct sync_file *sync_file, |
@@ -370,14 +373,14 @@ err_put_fd: | |||
370 | return err; | 373 | return err; |
371 | } | 374 | } |
372 | 375 | ||
373 | static void sync_fill_fence_info(struct fence *fence, | 376 | static void sync_fill_fence_info(struct dma_fence *fence, |
374 | struct sync_fence_info *info) | 377 | struct sync_fence_info *info) |
375 | { | 378 | { |
376 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 379 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
377 | sizeof(info->obj_name)); | 380 | sizeof(info->obj_name)); |
378 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), | 381 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), |
379 | sizeof(info->driver_name)); | 382 | sizeof(info->driver_name)); |
380 | if (fence_is_signaled(fence)) | 383 | if (dma_fence_is_signaled(fence)) |
381 | info->status = fence->status >= 0 ? 1 : fence->status; | 384 | info->status = fence->status >= 0 ? 1 : fence->status; |
382 | else | 385 | else |
383 | info->status = 0; | 386 | info->status = 0; |
@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
389 | { | 392 | { |
390 | struct sync_file_info info; | 393 | struct sync_file_info info; |
391 | struct sync_fence_info *fence_info = NULL; | 394 | struct sync_fence_info *fence_info = NULL; |
392 | struct fence **fences; | 395 | struct dma_fence **fences; |
393 | __u32 size; | 396 | __u32 size; |
394 | int num_fences, ret, i; | 397 | int num_fences, ret, i; |
395 | 398 | ||
@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
429 | 432 | ||
430 | no_fences: | 433 | no_fences: |
431 | strlcpy(info.name, sync_file->name, sizeof(info.name)); | 434 | strlcpy(info.name, sync_file->name, sizeof(info.name)); |
432 | info.status = fence_is_signaled(sync_file->fence); | 435 | info.status = dma_fence_is_signaled(sync_file->fence); |
433 | info.num_fences = num_fences; | 436 | info.num_fences = num_fences; |
434 | 437 | ||
435 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 438 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 248a05d02917..41bd2bf28f4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ | 24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ |
25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | 25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ |
26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ | 26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ |
27 | amdgpu_gtt_mgr.o | 27 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o |
28 | 28 | ||
29 | # add asic specific block | 29 | # add asic specific block |
30 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ | 30 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index b8d66670bb17..06192698bd96 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h | |||
@@ -90,7 +90,6 @@ | |||
90 | #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 | 90 | #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 |
91 | #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 | 91 | #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 |
92 | #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 | 92 | #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 |
93 | #define ENCODER_OBJECT_ID_VIRTUAL 0x28 | ||
94 | 93 | ||
95 | #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF | 94 | #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF |
96 | 95 | ||
@@ -120,7 +119,6 @@ | |||
120 | #define CONNECTOR_OBJECT_ID_eDP 0x14 | 119 | #define CONNECTOR_OBJECT_ID_eDP 0x14 |
121 | #define CONNECTOR_OBJECT_ID_MXM 0x15 | 120 | #define CONNECTOR_OBJECT_ID_MXM 0x15 |
122 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 | 121 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 |
123 | #define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 | ||
124 | 122 | ||
125 | /* deleted */ | 123 | /* deleted */ |
126 | 124 | ||
@@ -149,7 +147,6 @@ | |||
149 | #define GRAPH_OBJECT_ENUM_ID5 0x05 | 147 | #define GRAPH_OBJECT_ENUM_ID5 0x05 |
150 | #define GRAPH_OBJECT_ENUM_ID6 0x06 | 148 | #define GRAPH_OBJECT_ENUM_ID6 0x06 |
151 | #define GRAPH_OBJECT_ENUM_ID7 0x07 | 149 | #define GRAPH_OBJECT_ENUM_ID7 0x07 |
152 | #define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 | ||
153 | 150 | ||
154 | /****************************************************/ | 151 | /****************************************************/ |
155 | /* Graphics Object ID Bit definition */ | 152 | /* Graphics Object ID Bit definition */ |
@@ -411,10 +408,6 @@ | |||
411 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 408 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
412 | ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) | 409 | ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) |
413 | 410 | ||
414 | #define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
415 | GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ | ||
416 | ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) | ||
417 | |||
418 | /****************************************************/ | 411 | /****************************************************/ |
419 | /* Connector Object ID definition - Shared with BIOS */ | 412 | /* Connector Object ID definition - Shared with BIOS */ |
420 | /****************************************************/ | 413 | /****************************************************/ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..2ec7b3baeec2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/kref.h> | 34 | #include <linux/kref.h> |
35 | #include <linux/interval_tree.h> | 35 | #include <linux/interval_tree.h> |
36 | #include <linux/hashtable.h> | 36 | #include <linux/hashtable.h> |
37 | #include <linux/fence.h> | 37 | #include <linux/dma-fence.h> |
38 | 38 | ||
39 | #include <ttm/ttm_bo_api.h> | 39 | #include <ttm/ttm_bo_api.h> |
40 | #include <ttm/ttm_bo_driver.h> | 40 | #include <ttm/ttm_bo_driver.h> |
@@ -53,7 +53,11 @@ | |||
53 | #include "amdgpu_ucode.h" | 53 | #include "amdgpu_ucode.h" |
54 | #include "amdgpu_ttm.h" | 54 | #include "amdgpu_ttm.h" |
55 | #include "amdgpu_gds.h" | 55 | #include "amdgpu_gds.h" |
56 | #include "amdgpu_sync.h" | ||
57 | #include "amdgpu_ring.h" | ||
58 | #include "amdgpu_vm.h" | ||
56 | #include "amd_powerplay.h" | 59 | #include "amd_powerplay.h" |
60 | #include "amdgpu_dpm.h" | ||
57 | #include "amdgpu_acp.h" | 61 | #include "amdgpu_acp.h" |
58 | 62 | ||
59 | #include "gpu_scheduler.h" | 63 | #include "gpu_scheduler.h" |
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu; | |||
97 | extern int amdgpu_sclk_deep_sleep_en; | 101 | extern int amdgpu_sclk_deep_sleep_en; |
98 | extern char *amdgpu_virtual_display; | 102 | extern char *amdgpu_virtual_display; |
99 | extern unsigned amdgpu_pp_feature_mask; | 103 | extern unsigned amdgpu_pp_feature_mask; |
104 | extern int amdgpu_vram_page_split; | ||
100 | 105 | ||
101 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 106 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
102 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 107 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
107 | #define AMDGPUFB_CONN_LIMIT 4 | 112 | #define AMDGPUFB_CONN_LIMIT 4 |
108 | #define AMDGPU_BIOS_NUM_SCRATCH 8 | 113 | #define AMDGPU_BIOS_NUM_SCRATCH 8 |
109 | 114 | ||
110 | /* max number of rings */ | ||
111 | #define AMDGPU_MAX_RINGS 16 | ||
112 | #define AMDGPU_MAX_GFX_RINGS 1 | ||
113 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | ||
114 | #define AMDGPU_MAX_VCE_RINGS 3 | ||
115 | |||
116 | /* max number of IP instances */ | 115 | /* max number of IP instances */ |
117 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | 116 | #define AMDGPU_MAX_SDMA_INSTANCES 2 |
118 | 117 | ||
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
152 | 151 | ||
153 | struct amdgpu_device; | 152 | struct amdgpu_device; |
154 | struct amdgpu_ib; | 153 | struct amdgpu_ib; |
155 | struct amdgpu_vm; | ||
156 | struct amdgpu_ring; | ||
157 | struct amdgpu_cs_parser; | 154 | struct amdgpu_cs_parser; |
158 | struct amdgpu_job; | 155 | struct amdgpu_job; |
159 | struct amdgpu_irq_src; | 156 | struct amdgpu_irq_src; |
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, | |||
198 | bool amdgpu_is_idle(struct amdgpu_device *adev, | 195 | bool amdgpu_is_idle(struct amdgpu_device *adev, |
199 | enum amd_ip_block_type block_type); | 196 | enum amd_ip_block_type block_type); |
200 | 197 | ||
198 | #define AMDGPU_MAX_IP_NUM 16 | ||
199 | |||
200 | struct amdgpu_ip_block_status { | ||
201 | bool valid; | ||
202 | bool sw; | ||
203 | bool hw; | ||
204 | bool late_initialized; | ||
205 | bool hang; | ||
206 | }; | ||
207 | |||
201 | struct amdgpu_ip_block_version { | 208 | struct amdgpu_ip_block_version { |
202 | enum amd_ip_block_type type; | 209 | const enum amd_ip_block_type type; |
203 | u32 major; | 210 | const u32 major; |
204 | u32 minor; | 211 | const u32 minor; |
205 | u32 rev; | 212 | const u32 rev; |
206 | const struct amd_ip_funcs *funcs; | 213 | const struct amd_ip_funcs *funcs; |
207 | }; | 214 | }; |
208 | 215 | ||
216 | struct amdgpu_ip_block { | ||
217 | struct amdgpu_ip_block_status status; | ||
218 | const struct amdgpu_ip_block_version *version; | ||
219 | }; | ||
220 | |||
209 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | 221 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, |
210 | enum amd_ip_block_type type, | 222 | enum amd_ip_block_type type, |
211 | u32 major, u32 minor); | 223 | u32 major, u32 minor); |
212 | 224 | ||
213 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 225 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
214 | struct amdgpu_device *adev, | 226 | enum amd_ip_block_type type); |
215 | enum amd_ip_block_type type); | 227 | |
228 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | ||
229 | const struct amdgpu_ip_block_version *ip_block_version); | ||
216 | 230 | ||
217 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ | 231 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
218 | struct amdgpu_buffer_funcs { | 232 | struct amdgpu_buffer_funcs { |
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs { | |||
286 | void (*set_rptr)(struct amdgpu_device *adev); | 300 | void (*set_rptr)(struct amdgpu_device *adev); |
287 | }; | 301 | }; |
288 | 302 | ||
289 | /* provided by hw blocks that expose a ring buffer for commands */ | ||
290 | struct amdgpu_ring_funcs { | ||
291 | /* ring read/write ptr handling */ | ||
292 | u32 (*get_rptr)(struct amdgpu_ring *ring); | ||
293 | u32 (*get_wptr)(struct amdgpu_ring *ring); | ||
294 | void (*set_wptr)(struct amdgpu_ring *ring); | ||
295 | /* validating and patching of IBs */ | ||
296 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
297 | /* command emit functions */ | ||
298 | void (*emit_ib)(struct amdgpu_ring *ring, | ||
299 | struct amdgpu_ib *ib, | ||
300 | unsigned vm_id, bool ctx_switch); | ||
301 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | ||
302 | uint64_t seq, unsigned flags); | ||
303 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | ||
304 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | ||
305 | uint64_t pd_addr); | ||
306 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
307 | void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); | ||
308 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | ||
309 | uint32_t gds_base, uint32_t gds_size, | ||
310 | uint32_t gws_base, uint32_t gws_size, | ||
311 | uint32_t oa_base, uint32_t oa_size); | ||
312 | /* testing functions */ | ||
313 | int (*test_ring)(struct amdgpu_ring *ring); | ||
314 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); | ||
315 | /* insert NOP packets */ | ||
316 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
317 | /* pad the indirect buffer to the necessary number of dw */ | ||
318 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
319 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); | ||
320 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); | ||
321 | /* note usage for clock and power gating */ | ||
322 | void (*begin_use)(struct amdgpu_ring *ring); | ||
323 | void (*end_use)(struct amdgpu_ring *ring); | ||
324 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); | ||
325 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); | ||
326 | unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); | ||
327 | unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); | ||
328 | }; | ||
329 | |||
330 | /* | 303 | /* |
331 | * BIOS. | 304 | * BIOS. |
332 | */ | 305 | */ |
@@ -364,47 +337,6 @@ struct amdgpu_clock { | |||
364 | }; | 337 | }; |
365 | 338 | ||
366 | /* | 339 | /* |
367 | * Fences. | ||
368 | */ | ||
369 | struct amdgpu_fence_driver { | ||
370 | uint64_t gpu_addr; | ||
371 | volatile uint32_t *cpu_addr; | ||
372 | /* sync_seq is protected by ring emission lock */ | ||
373 | uint32_t sync_seq; | ||
374 | atomic_t last_seq; | ||
375 | bool initialized; | ||
376 | struct amdgpu_irq_src *irq_src; | ||
377 | unsigned irq_type; | ||
378 | struct timer_list fallback_timer; | ||
379 | unsigned num_fences_mask; | ||
380 | spinlock_t lock; | ||
381 | struct fence **fences; | ||
382 | }; | ||
383 | |||
384 | /* some special values for the owner field */ | ||
385 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) | ||
386 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) | ||
387 | |||
388 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) | ||
389 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) | ||
390 | |||
391 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | ||
392 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | ||
393 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | ||
394 | |||
395 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||
396 | unsigned num_hw_submission); | ||
397 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
398 | struct amdgpu_irq_src *irq_src, | ||
399 | unsigned irq_type); | ||
400 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
401 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
402 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); | ||
403 | void amdgpu_fence_process(struct amdgpu_ring *ring); | ||
404 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | ||
405 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | ||
406 | |||
407 | /* | ||
408 | * BO. | 340 | * BO. |
409 | */ | 341 | */ |
410 | struct amdgpu_bo_list_entry { | 342 | struct amdgpu_bo_list_entry { |
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping { | |||
427 | struct amdgpu_bo_va { | 359 | struct amdgpu_bo_va { |
428 | /* protected by bo being reserved */ | 360 | /* protected by bo being reserved */ |
429 | struct list_head bo_list; | 361 | struct list_head bo_list; |
430 | struct fence *last_pt_update; | 362 | struct dma_fence *last_pt_update; |
431 | unsigned ref_count; | 363 | unsigned ref_count; |
432 | 364 | ||
433 | /* protected by vm mutex and spinlock */ | 365 | /* protected by vm mutex and spinlock */ |
@@ -464,7 +396,6 @@ struct amdgpu_bo { | |||
464 | */ | 396 | */ |
465 | struct list_head va; | 397 | struct list_head va; |
466 | /* Constant after initialization */ | 398 | /* Constant after initialization */ |
467 | struct amdgpu_device *adev; | ||
468 | struct drm_gem_object gem_base; | 399 | struct drm_gem_object gem_base; |
469 | struct amdgpu_bo *parent; | 400 | struct amdgpu_bo *parent; |
470 | struct amdgpu_bo *shadow; | 401 | struct amdgpu_bo *shadow; |
@@ -543,7 +474,7 @@ struct amdgpu_sa_bo { | |||
543 | struct amdgpu_sa_manager *manager; | 474 | struct amdgpu_sa_manager *manager; |
544 | unsigned soffset; | 475 | unsigned soffset; |
545 | unsigned eoffset; | 476 | unsigned eoffset; |
546 | struct fence *fence; | 477 | struct dma_fence *fence; |
547 | }; | 478 | }; |
548 | 479 | ||
549 | /* | 480 | /* |
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
561 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | 492 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
562 | struct drm_device *dev, | 493 | struct drm_device *dev, |
563 | uint32_t handle, uint64_t *offset_p); | 494 | uint32_t handle, uint64_t *offset_p); |
564 | /* | ||
565 | * Synchronization | ||
566 | */ | ||
567 | struct amdgpu_sync { | ||
568 | DECLARE_HASHTABLE(fences, 4); | ||
569 | struct fence *last_vm_update; | ||
570 | }; | ||
571 | |||
572 | void amdgpu_sync_create(struct amdgpu_sync *sync); | ||
573 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
574 | struct fence *f); | ||
575 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
576 | struct amdgpu_sync *sync, | ||
577 | struct reservation_object *resv, | ||
578 | void *owner); | ||
579 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | ||
580 | struct amdgpu_ring *ring); | ||
581 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
582 | void amdgpu_sync_free(struct amdgpu_sync *sync); | ||
583 | int amdgpu_sync_init(void); | ||
584 | void amdgpu_sync_fini(void); | ||
585 | int amdgpu_fence_slab_init(void); | 495 | int amdgpu_fence_slab_init(void); |
586 | void amdgpu_fence_slab_fini(void); | 496 | void amdgpu_fence_slab_fini(void); |
587 | 497 | ||
@@ -703,10 +613,10 @@ struct amdgpu_flip_work { | |||
703 | uint64_t base; | 613 | uint64_t base; |
704 | struct drm_pending_vblank_event *event; | 614 | struct drm_pending_vblank_event *event; |
705 | struct amdgpu_bo *old_abo; | 615 | struct amdgpu_bo *old_abo; |
706 | struct fence *excl; | 616 | struct dma_fence *excl; |
707 | unsigned shared_count; | 617 | unsigned shared_count; |
708 | struct fence **shared; | 618 | struct dma_fence **shared; |
709 | struct fence_cb cb; | 619 | struct dma_fence_cb cb; |
710 | bool async; | 620 | bool async; |
711 | }; | 621 | }; |
712 | 622 | ||
@@ -723,14 +633,6 @@ struct amdgpu_ib { | |||
723 | uint32_t flags; | 633 | uint32_t flags; |
724 | }; | 634 | }; |
725 | 635 | ||
726 | enum amdgpu_ring_type { | ||
727 | AMDGPU_RING_TYPE_GFX, | ||
728 | AMDGPU_RING_TYPE_COMPUTE, | ||
729 | AMDGPU_RING_TYPE_SDMA, | ||
730 | AMDGPU_RING_TYPE_UVD, | ||
731 | AMDGPU_RING_TYPE_VCE | ||
732 | }; | ||
733 | |||
734 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; | 636 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; |
735 | 637 | ||
736 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | 638 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
@@ -742,214 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job); | |||
742 | void amdgpu_job_free(struct amdgpu_job *job); | 644 | void amdgpu_job_free(struct amdgpu_job *job); |
743 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | 645 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, |
744 | struct amd_sched_entity *entity, void *owner, | 646 | struct amd_sched_entity *entity, void *owner, |
745 | struct fence **f); | 647 | struct dma_fence **f); |
746 | |||
747 | struct amdgpu_ring { | ||
748 | struct amdgpu_device *adev; | ||
749 | const struct amdgpu_ring_funcs *funcs; | ||
750 | struct amdgpu_fence_driver fence_drv; | ||
751 | struct amd_gpu_scheduler sched; | ||
752 | |||
753 | struct amdgpu_bo *ring_obj; | ||
754 | volatile uint32_t *ring; | ||
755 | unsigned rptr_offs; | ||
756 | unsigned wptr; | ||
757 | unsigned wptr_old; | ||
758 | unsigned ring_size; | ||
759 | unsigned max_dw; | ||
760 | int count_dw; | ||
761 | uint64_t gpu_addr; | ||
762 | uint32_t align_mask; | ||
763 | uint32_t ptr_mask; | ||
764 | bool ready; | ||
765 | u32 nop; | ||
766 | u32 idx; | ||
767 | u32 me; | ||
768 | u32 pipe; | ||
769 | u32 queue; | ||
770 | struct amdgpu_bo *mqd_obj; | ||
771 | u32 doorbell_index; | ||
772 | bool use_doorbell; | ||
773 | unsigned wptr_offs; | ||
774 | unsigned fence_offs; | ||
775 | uint64_t current_ctx; | ||
776 | enum amdgpu_ring_type type; | ||
777 | char name[16]; | ||
778 | unsigned cond_exe_offs; | ||
779 | u64 cond_exe_gpu_addr; | ||
780 | volatile u32 *cond_exe_cpu_addr; | ||
781 | #if defined(CONFIG_DEBUG_FS) | ||
782 | struct dentry *ent; | ||
783 | #endif | ||
784 | }; | ||
785 | |||
786 | /* | ||
787 | * VM | ||
788 | */ | ||
789 | |||
790 | /* maximum number of VMIDs */ | ||
791 | #define AMDGPU_NUM_VM 16 | ||
792 | |||
793 | /* Maximum number of PTEs the hardware can write with one command */ | ||
794 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
795 | |||
796 | /* number of entries in page table */ | ||
797 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
798 | |||
799 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
800 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
801 | |||
802 | /* LOG2 number of continuous pages for the fragment field */ | ||
803 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
804 | |||
805 | #define AMDGPU_PTE_VALID (1 << 0) | ||
806 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
807 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
808 | |||
809 | /* VI only */ | ||
810 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
811 | |||
812 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
813 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
814 | |||
815 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
816 | |||
817 | /* How to programm VM fault handling */ | ||
818 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
819 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
820 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
821 | |||
822 | struct amdgpu_vm_pt { | ||
823 | struct amdgpu_bo_list_entry entry; | ||
824 | uint64_t addr; | ||
825 | uint64_t shadow_addr; | ||
826 | }; | ||
827 | |||
828 | struct amdgpu_vm { | ||
829 | /* tree of virtual addresses mapped */ | ||
830 | struct rb_root va; | ||
831 | |||
832 | /* protecting invalidated */ | ||
833 | spinlock_t status_lock; | ||
834 | |||
835 | /* BOs moved, but not yet updated in the PT */ | ||
836 | struct list_head invalidated; | ||
837 | |||
838 | /* BOs cleared in the PT because of a move */ | ||
839 | struct list_head cleared; | ||
840 | |||
841 | /* BO mappings freed, but not yet updated in the PT */ | ||
842 | struct list_head freed; | ||
843 | |||
844 | /* contains the page directory */ | ||
845 | struct amdgpu_bo *page_directory; | ||
846 | unsigned max_pde_used; | ||
847 | struct fence *page_directory_fence; | ||
848 | uint64_t last_eviction_counter; | ||
849 | |||
850 | /* array of page tables, one for each page directory entry */ | ||
851 | struct amdgpu_vm_pt *page_tables; | ||
852 | |||
853 | /* for id and flush management per ring */ | ||
854 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
855 | |||
856 | /* protecting freed */ | ||
857 | spinlock_t freed_lock; | ||
858 | |||
859 | /* Scheduler entity for page table updates */ | ||
860 | struct amd_sched_entity entity; | ||
861 | |||
862 | /* client id */ | ||
863 | u64 client_id; | ||
864 | }; | ||
865 | |||
866 | struct amdgpu_vm_id { | ||
867 | struct list_head list; | ||
868 | struct fence *first; | ||
869 | struct amdgpu_sync active; | ||
870 | struct fence *last_flush; | ||
871 | atomic64_t owner; | ||
872 | |||
873 | uint64_t pd_gpu_addr; | ||
874 | /* last flushed PD/PT update */ | ||
875 | struct fence *flushed_updates; | ||
876 | |||
877 | uint32_t current_gpu_reset_count; | ||
878 | |||
879 | uint32_t gds_base; | ||
880 | uint32_t gds_size; | ||
881 | uint32_t gws_base; | ||
882 | uint32_t gws_size; | ||
883 | uint32_t oa_base; | ||
884 | uint32_t oa_size; | ||
885 | }; | ||
886 | |||
887 | struct amdgpu_vm_manager { | ||
888 | /* Handling of VMIDs */ | ||
889 | struct mutex lock; | ||
890 | unsigned num_ids; | ||
891 | struct list_head ids_lru; | ||
892 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
893 | |||
894 | /* Handling of VM fences */ | ||
895 | u64 fence_context; | ||
896 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
897 | |||
898 | uint32_t max_pfn; | ||
899 | /* vram base address for page table entry */ | ||
900 | u64 vram_base_offset; | ||
901 | /* is vm enabled? */ | ||
902 | bool enabled; | ||
903 | /* vm pte handling */ | ||
904 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
905 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
906 | unsigned vm_pte_num_rings; | ||
907 | atomic_t vm_pte_next_ring; | ||
908 | /* client id counter */ | ||
909 | atomic64_t client_counter; | ||
910 | }; | ||
911 | |||
912 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
913 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
914 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
915 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
916 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
917 | struct list_head *validated, | ||
918 | struct amdgpu_bo_list_entry *entry); | ||
919 | void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
920 | struct list_head *duplicates); | ||
921 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
922 | struct amdgpu_vm *vm); | ||
923 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
924 | struct amdgpu_sync *sync, struct fence *fence, | ||
925 | struct amdgpu_job *job); | ||
926 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
927 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
928 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
929 | struct amdgpu_vm *vm); | ||
930 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
931 | struct amdgpu_vm *vm); | ||
932 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
933 | struct amdgpu_sync *sync); | ||
934 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
935 | struct amdgpu_bo_va *bo_va, | ||
936 | bool clear); | ||
937 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
938 | struct amdgpu_bo *bo); | ||
939 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
940 | struct amdgpu_bo *bo); | ||
941 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
942 | struct amdgpu_vm *vm, | ||
943 | struct amdgpu_bo *bo); | ||
944 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
945 | struct amdgpu_bo_va *bo_va, | ||
946 | uint64_t addr, uint64_t offset, | ||
947 | uint64_t size, uint32_t flags); | ||
948 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
949 | struct amdgpu_bo_va *bo_va, | ||
950 | uint64_t addr); | ||
951 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
952 | struct amdgpu_bo_va *bo_va); | ||
953 | 648 | ||
954 | /* | 649 | /* |
955 | * context related structures | 650 | * context related structures |
@@ -957,7 +652,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
957 | 652 | ||
958 | struct amdgpu_ctx_ring { | 653 | struct amdgpu_ctx_ring { |
959 | uint64_t sequence; | 654 | uint64_t sequence; |
960 | struct fence **fences; | 655 | struct dma_fence **fences; |
961 | struct amd_sched_entity entity; | 656 | struct amd_sched_entity entity; |
962 | }; | 657 | }; |
963 | 658 | ||
@@ -966,7 +661,7 @@ struct amdgpu_ctx { | |||
966 | struct amdgpu_device *adev; | 661 | struct amdgpu_device *adev; |
967 | unsigned reset_counter; | 662 | unsigned reset_counter; |
968 | spinlock_t ring_lock; | 663 | spinlock_t ring_lock; |
969 | struct fence **fences; | 664 | struct dma_fence **fences; |
970 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; | 665 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
971 | bool preamble_presented; | 666 | bool preamble_presented; |
972 | }; | 667 | }; |
@@ -982,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | |||
982 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | 677 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
983 | 678 | ||
984 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 679 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
985 | struct fence *fence); | 680 | struct dma_fence *fence); |
986 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 681 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
987 | struct amdgpu_ring *ring, uint64_t seq); | 682 | struct amdgpu_ring *ring, uint64_t seq); |
988 | 683 | ||
989 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | 684 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
@@ -1093,6 +788,16 @@ struct amdgpu_scratch { | |||
1093 | /* | 788 | /* |
1094 | * GFX configurations | 789 | * GFX configurations |
1095 | */ | 790 | */ |
791 | #define AMDGPU_GFX_MAX_SE 4 | ||
792 | #define AMDGPU_GFX_MAX_SH_PER_SE 2 | ||
793 | |||
794 | struct amdgpu_rb_config { | ||
795 | uint32_t rb_backend_disable; | ||
796 | uint32_t user_rb_backend_disable; | ||
797 | uint32_t raster_config; | ||
798 | uint32_t raster_config_1; | ||
799 | }; | ||
800 | |||
1096 | struct amdgpu_gca_config { | 801 | struct amdgpu_gca_config { |
1097 | unsigned max_shader_engines; | 802 | unsigned max_shader_engines; |
1098 | unsigned max_tile_pipes; | 803 | unsigned max_tile_pipes; |
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config { | |||
1121 | 826 | ||
1122 | uint32_t tile_mode_array[32]; | 827 | uint32_t tile_mode_array[32]; |
1123 | uint32_t macrotile_mode_array[16]; | 828 | uint32_t macrotile_mode_array[16]; |
829 | |||
830 | struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; | ||
1124 | }; | 831 | }; |
1125 | 832 | ||
1126 | struct amdgpu_cu_info { | 833 | struct amdgpu_cu_info { |
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs { | |||
1133 | /* get the gpu clock counter */ | 840 | /* get the gpu clock counter */ |
1134 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); | 841 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
1135 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); | 842 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); |
843 | void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); | ||
1136 | }; | 844 | }; |
1137 | 845 | ||
1138 | struct amdgpu_gfx { | 846 | struct amdgpu_gfx { |
@@ -1181,23 +889,13 @@ struct amdgpu_gfx { | |||
1181 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | 889 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
1182 | unsigned size, struct amdgpu_ib *ib); | 890 | unsigned size, struct amdgpu_ib *ib); |
1183 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | 891 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
1184 | struct fence *f); | 892 | struct dma_fence *f); |
1185 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 893 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
1186 | struct amdgpu_ib *ib, struct fence *last_vm_update, | 894 | struct amdgpu_ib *ib, struct dma_fence *last_vm_update, |
1187 | struct amdgpu_job *job, struct fence **f); | 895 | struct amdgpu_job *job, struct dma_fence **f); |
1188 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 896 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
1189 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 897 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
1190 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | 898 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
1191 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | ||
1192 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
1193 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
1194 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | ||
1195 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | ||
1196 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
1197 | unsigned ring_size, u32 nop, u32 align_mask, | ||
1198 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | ||
1199 | enum amdgpu_ring_type ring_type); | ||
1200 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | ||
1201 | 899 | ||
1202 | /* | 900 | /* |
1203 | * CS. | 901 | * CS. |
@@ -1225,7 +923,7 @@ struct amdgpu_cs_parser { | |||
1225 | struct amdgpu_bo_list *bo_list; | 923 | struct amdgpu_bo_list *bo_list; |
1226 | struct amdgpu_bo_list_entry vm_pd; | 924 | struct amdgpu_bo_list_entry vm_pd; |
1227 | struct list_head validated; | 925 | struct list_head validated; |
1228 | struct fence *fence; | 926 | struct dma_fence *fence; |
1229 | uint64_t bytes_moved_threshold; | 927 | uint64_t bytes_moved_threshold; |
1230 | uint64_t bytes_moved; | 928 | uint64_t bytes_moved; |
1231 | struct amdgpu_bo_list_entry *evictable; | 929 | struct amdgpu_bo_list_entry *evictable; |
@@ -1245,7 +943,7 @@ struct amdgpu_job { | |||
1245 | struct amdgpu_ring *ring; | 943 | struct amdgpu_ring *ring; |
1246 | struct amdgpu_sync sync; | 944 | struct amdgpu_sync sync; |
1247 | struct amdgpu_ib *ibs; | 945 | struct amdgpu_ib *ibs; |
1248 | struct fence *fence; /* the hw fence */ | 946 | struct dma_fence *fence; /* the hw fence */ |
1249 | uint32_t preamble_status; | 947 | uint32_t preamble_status; |
1250 | uint32_t num_ibs; | 948 | uint32_t num_ibs; |
1251 | void *owner; | 949 | void *owner; |
@@ -1294,354 +992,6 @@ struct amdgpu_wb { | |||
1294 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); | 992 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
1295 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); | 993 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
1296 | 994 | ||
1297 | |||
1298 | |||
1299 | enum amdgpu_int_thermal_type { | ||
1300 | THERMAL_TYPE_NONE, | ||
1301 | THERMAL_TYPE_EXTERNAL, | ||
1302 | THERMAL_TYPE_EXTERNAL_GPIO, | ||
1303 | THERMAL_TYPE_RV6XX, | ||
1304 | THERMAL_TYPE_RV770, | ||
1305 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, | ||
1306 | THERMAL_TYPE_EVERGREEN, | ||
1307 | THERMAL_TYPE_SUMO, | ||
1308 | THERMAL_TYPE_NI, | ||
1309 | THERMAL_TYPE_SI, | ||
1310 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, | ||
1311 | THERMAL_TYPE_CI, | ||
1312 | THERMAL_TYPE_KV, | ||
1313 | }; | ||
1314 | |||
1315 | enum amdgpu_dpm_auto_throttle_src { | ||
1316 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | ||
1317 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | ||
1318 | }; | ||
1319 | |||
1320 | enum amdgpu_dpm_event_src { | ||
1321 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | ||
1322 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | ||
1323 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | ||
1324 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | ||
1325 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | ||
1326 | }; | ||
1327 | |||
1328 | #define AMDGPU_MAX_VCE_LEVELS 6 | ||
1329 | |||
1330 | enum amdgpu_vce_level { | ||
1331 | AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
1332 | AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
1333 | AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
1334 | AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
1335 | AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
1336 | AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
1337 | }; | ||
1338 | |||
1339 | struct amdgpu_ps { | ||
1340 | u32 caps; /* vbios flags */ | ||
1341 | u32 class; /* vbios flags */ | ||
1342 | u32 class2; /* vbios flags */ | ||
1343 | /* UVD clocks */ | ||
1344 | u32 vclk; | ||
1345 | u32 dclk; | ||
1346 | /* VCE clocks */ | ||
1347 | u32 evclk; | ||
1348 | u32 ecclk; | ||
1349 | bool vce_active; | ||
1350 | enum amdgpu_vce_level vce_level; | ||
1351 | /* asic priv */ | ||
1352 | void *ps_priv; | ||
1353 | }; | ||
1354 | |||
1355 | struct amdgpu_dpm_thermal { | ||
1356 | /* thermal interrupt work */ | ||
1357 | struct work_struct work; | ||
1358 | /* low temperature threshold */ | ||
1359 | int min_temp; | ||
1360 | /* high temperature threshold */ | ||
1361 | int max_temp; | ||
1362 | /* was last interrupt low to high or high to low */ | ||
1363 | bool high_to_low; | ||
1364 | /* interrupt source */ | ||
1365 | struct amdgpu_irq_src irq; | ||
1366 | }; | ||
1367 | |||
1368 | enum amdgpu_clk_action | ||
1369 | { | ||
1370 | AMDGPU_SCLK_UP = 1, | ||
1371 | AMDGPU_SCLK_DOWN | ||
1372 | }; | ||
1373 | |||
1374 | struct amdgpu_blacklist_clocks | ||
1375 | { | ||
1376 | u32 sclk; | ||
1377 | u32 mclk; | ||
1378 | enum amdgpu_clk_action action; | ||
1379 | }; | ||
1380 | |||
1381 | struct amdgpu_clock_and_voltage_limits { | ||
1382 | u32 sclk; | ||
1383 | u32 mclk; | ||
1384 | u16 vddc; | ||
1385 | u16 vddci; | ||
1386 | }; | ||
1387 | |||
1388 | struct amdgpu_clock_array { | ||
1389 | u32 count; | ||
1390 | u32 *values; | ||
1391 | }; | ||
1392 | |||
1393 | struct amdgpu_clock_voltage_dependency_entry { | ||
1394 | u32 clk; | ||
1395 | u16 v; | ||
1396 | }; | ||
1397 | |||
1398 | struct amdgpu_clock_voltage_dependency_table { | ||
1399 | u32 count; | ||
1400 | struct amdgpu_clock_voltage_dependency_entry *entries; | ||
1401 | }; | ||
1402 | |||
1403 | union amdgpu_cac_leakage_entry { | ||
1404 | struct { | ||
1405 | u16 vddc; | ||
1406 | u32 leakage; | ||
1407 | }; | ||
1408 | struct { | ||
1409 | u16 vddc1; | ||
1410 | u16 vddc2; | ||
1411 | u16 vddc3; | ||
1412 | }; | ||
1413 | }; | ||
1414 | |||
1415 | struct amdgpu_cac_leakage_table { | ||
1416 | u32 count; | ||
1417 | union amdgpu_cac_leakage_entry *entries; | ||
1418 | }; | ||
1419 | |||
1420 | struct amdgpu_phase_shedding_limits_entry { | ||
1421 | u16 voltage; | ||
1422 | u32 sclk; | ||
1423 | u32 mclk; | ||
1424 | }; | ||
1425 | |||
1426 | struct amdgpu_phase_shedding_limits_table { | ||
1427 | u32 count; | ||
1428 | struct amdgpu_phase_shedding_limits_entry *entries; | ||
1429 | }; | ||
1430 | |||
1431 | struct amdgpu_uvd_clock_voltage_dependency_entry { | ||
1432 | u32 vclk; | ||
1433 | u32 dclk; | ||
1434 | u16 v; | ||
1435 | }; | ||
1436 | |||
1437 | struct amdgpu_uvd_clock_voltage_dependency_table { | ||
1438 | u8 count; | ||
1439 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | ||
1440 | }; | ||
1441 | |||
1442 | struct amdgpu_vce_clock_voltage_dependency_entry { | ||
1443 | u32 ecclk; | ||
1444 | u32 evclk; | ||
1445 | u16 v; | ||
1446 | }; | ||
1447 | |||
1448 | struct amdgpu_vce_clock_voltage_dependency_table { | ||
1449 | u8 count; | ||
1450 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; | ||
1451 | }; | ||
1452 | |||
1453 | struct amdgpu_ppm_table { | ||
1454 | u8 ppm_design; | ||
1455 | u16 cpu_core_number; | ||
1456 | u32 platform_tdp; | ||
1457 | u32 small_ac_platform_tdp; | ||
1458 | u32 platform_tdc; | ||
1459 | u32 small_ac_platform_tdc; | ||
1460 | u32 apu_tdp; | ||
1461 | u32 dgpu_tdp; | ||
1462 | u32 dgpu_ulv_power; | ||
1463 | u32 tj_max; | ||
1464 | }; | ||
1465 | |||
1466 | struct amdgpu_cac_tdp_table { | ||
1467 | u16 tdp; | ||
1468 | u16 configurable_tdp; | ||
1469 | u16 tdc; | ||
1470 | u16 battery_power_limit; | ||
1471 | u16 small_power_limit; | ||
1472 | u16 low_cac_leakage; | ||
1473 | u16 high_cac_leakage; | ||
1474 | u16 maximum_power_delivery_limit; | ||
1475 | }; | ||
1476 | |||
1477 | struct amdgpu_dpm_dynamic_state { | ||
1478 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | ||
1479 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | ||
1480 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | ||
1481 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | ||
1482 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | ||
1483 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | ||
1484 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | ||
1485 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | ||
1486 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | ||
1487 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | ||
1488 | struct amdgpu_clock_array valid_sclk_values; | ||
1489 | struct amdgpu_clock_array valid_mclk_values; | ||
1490 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | ||
1491 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | ||
1492 | u32 mclk_sclk_ratio; | ||
1493 | u32 sclk_mclk_delta; | ||
1494 | u16 vddc_vddci_delta; | ||
1495 | u16 min_vddc_for_pcie_gen2; | ||
1496 | struct amdgpu_cac_leakage_table cac_leakage_table; | ||
1497 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | ||
1498 | struct amdgpu_ppm_table *ppm_table; | ||
1499 | struct amdgpu_cac_tdp_table *cac_tdp_table; | ||
1500 | }; | ||
1501 | |||
1502 | struct amdgpu_dpm_fan { | ||
1503 | u16 t_min; | ||
1504 | u16 t_med; | ||
1505 | u16 t_high; | ||
1506 | u16 pwm_min; | ||
1507 | u16 pwm_med; | ||
1508 | u16 pwm_high; | ||
1509 | u8 t_hyst; | ||
1510 | u32 cycle_delay; | ||
1511 | u16 t_max; | ||
1512 | u8 control_mode; | ||
1513 | u16 default_max_fan_pwm; | ||
1514 | u16 default_fan_output_sensitivity; | ||
1515 | u16 fan_output_sensitivity; | ||
1516 | bool ucode_fan_control; | ||
1517 | }; | ||
1518 | |||
1519 | enum amdgpu_pcie_gen { | ||
1520 | AMDGPU_PCIE_GEN1 = 0, | ||
1521 | AMDGPU_PCIE_GEN2 = 1, | ||
1522 | AMDGPU_PCIE_GEN3 = 2, | ||
1523 | AMDGPU_PCIE_GEN_INVALID = 0xffff | ||
1524 | }; | ||
1525 | |||
1526 | enum amdgpu_dpm_forced_level { | ||
1527 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | ||
1528 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | ||
1529 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | ||
1530 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, | ||
1531 | }; | ||
1532 | |||
1533 | struct amdgpu_vce_state { | ||
1534 | /* vce clocks */ | ||
1535 | u32 evclk; | ||
1536 | u32 ecclk; | ||
1537 | /* gpu clocks */ | ||
1538 | u32 sclk; | ||
1539 | u32 mclk; | ||
1540 | u8 clk_idx; | ||
1541 | u8 pstate; | ||
1542 | }; | ||
1543 | |||
1544 | struct amdgpu_dpm_funcs { | ||
1545 | int (*get_temperature)(struct amdgpu_device *adev); | ||
1546 | int (*pre_set_power_state)(struct amdgpu_device *adev); | ||
1547 | int (*set_power_state)(struct amdgpu_device *adev); | ||
1548 | void (*post_set_power_state)(struct amdgpu_device *adev); | ||
1549 | void (*display_configuration_changed)(struct amdgpu_device *adev); | ||
1550 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | ||
1551 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | ||
1552 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | ||
1553 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | ||
1554 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | ||
1555 | bool (*vblank_too_short)(struct amdgpu_device *adev); | ||
1556 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | ||
1557 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | ||
1558 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | ||
1559 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | ||
1560 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | ||
1561 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | ||
1562 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | ||
1563 | int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); | ||
1564 | int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); | ||
1565 | int (*get_sclk_od)(struct amdgpu_device *adev); | ||
1566 | int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1567 | int (*get_mclk_od)(struct amdgpu_device *adev); | ||
1568 | int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1569 | }; | ||
1570 | |||
1571 | struct amdgpu_dpm { | ||
1572 | struct amdgpu_ps *ps; | ||
1573 | /* number of valid power states */ | ||
1574 | int num_ps; | ||
1575 | /* current power state that is active */ | ||
1576 | struct amdgpu_ps *current_ps; | ||
1577 | /* requested power state */ | ||
1578 | struct amdgpu_ps *requested_ps; | ||
1579 | /* boot up power state */ | ||
1580 | struct amdgpu_ps *boot_ps; | ||
1581 | /* default uvd power state */ | ||
1582 | struct amdgpu_ps *uvd_ps; | ||
1583 | /* vce requirements */ | ||
1584 | struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; | ||
1585 | enum amdgpu_vce_level vce_level; | ||
1586 | enum amd_pm_state_type state; | ||
1587 | enum amd_pm_state_type user_state; | ||
1588 | u32 platform_caps; | ||
1589 | u32 voltage_response_time; | ||
1590 | u32 backbias_response_time; | ||
1591 | void *priv; | ||
1592 | u32 new_active_crtcs; | ||
1593 | int new_active_crtc_count; | ||
1594 | u32 current_active_crtcs; | ||
1595 | int current_active_crtc_count; | ||
1596 | struct amdgpu_dpm_dynamic_state dyn_state; | ||
1597 | struct amdgpu_dpm_fan fan; | ||
1598 | u32 tdp_limit; | ||
1599 | u32 near_tdp_limit; | ||
1600 | u32 near_tdp_limit_adjusted; | ||
1601 | u32 sq_ramping_threshold; | ||
1602 | u32 cac_leakage; | ||
1603 | u16 tdp_od_limit; | ||
1604 | u32 tdp_adjustment; | ||
1605 | u16 load_line_slope; | ||
1606 | bool power_control; | ||
1607 | bool ac_power; | ||
1608 | /* special states active */ | ||
1609 | bool thermal_active; | ||
1610 | bool uvd_active; | ||
1611 | bool vce_active; | ||
1612 | /* thermal handling */ | ||
1613 | struct amdgpu_dpm_thermal thermal; | ||
1614 | /* forced levels */ | ||
1615 | enum amdgpu_dpm_forced_level forced_level; | ||
1616 | }; | ||
1617 | |||
1618 | struct amdgpu_pm { | ||
1619 | struct mutex mutex; | ||
1620 | u32 current_sclk; | ||
1621 | u32 current_mclk; | ||
1622 | u32 default_sclk; | ||
1623 | u32 default_mclk; | ||
1624 | struct amdgpu_i2c_chan *i2c_bus; | ||
1625 | /* internal thermal controller on rv6xx+ */ | ||
1626 | enum amdgpu_int_thermal_type int_thermal_type; | ||
1627 | struct device *int_hwmon_dev; | ||
1628 | /* fan control parameters */ | ||
1629 | bool no_fan; | ||
1630 | u8 fan_pulses_per_revolution; | ||
1631 | u8 fan_min_rpm; | ||
1632 | u8 fan_max_rpm; | ||
1633 | /* dpm */ | ||
1634 | bool dpm_enabled; | ||
1635 | bool sysfs_initialized; | ||
1636 | struct amdgpu_dpm dpm; | ||
1637 | const struct firmware *fw; /* SMC firmware */ | ||
1638 | uint32_t fw_version; | ||
1639 | const struct amdgpu_dpm_funcs *funcs; | ||
1640 | uint32_t pcie_gen_mask; | ||
1641 | uint32_t pcie_mlw_mask; | ||
1642 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ | ||
1643 | }; | ||
1644 | |||
1645 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); | 995 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
1646 | 996 | ||
1647 | /* | 997 | /* |
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |||
1939 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 1289 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
1940 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | 1290 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
1941 | 1291 | ||
1942 | struct amdgpu_ip_block_status { | ||
1943 | bool valid; | ||
1944 | bool sw; | ||
1945 | bool hw; | ||
1946 | bool late_initialized; | ||
1947 | bool hang; | ||
1948 | }; | ||
1949 | |||
1950 | struct amdgpu_device { | 1292 | struct amdgpu_device { |
1951 | struct device *dev; | 1293 | struct device *dev; |
1952 | struct drm_device *ddev; | 1294 | struct drm_device *ddev; |
@@ -2102,9 +1444,8 @@ struct amdgpu_device { | |||
2102 | /* GDS */ | 1444 | /* GDS */ |
2103 | struct amdgpu_gds gds; | 1445 | struct amdgpu_gds gds; |
2104 | 1446 | ||
2105 | const struct amdgpu_ip_block_version *ip_blocks; | 1447 | struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; |
2106 | int num_ip_blocks; | 1448 | int num_ip_blocks; |
2107 | struct amdgpu_ip_block_status *ip_block_status; | ||
2108 | struct mutex mn_lock; | 1449 | struct mutex mn_lock; |
2109 | DECLARE_HASHTABLE(mn_hash, 7); | 1450 | DECLARE_HASHTABLE(mn_hash, 7); |
2110 | 1451 | ||
@@ -2127,6 +1468,11 @@ struct amdgpu_device { | |||
2127 | 1468 | ||
2128 | }; | 1469 | }; |
2129 | 1470 | ||
1471 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) | ||
1472 | { | ||
1473 | return container_of(bdev, struct amdgpu_device, mman.bdev); | ||
1474 | } | ||
1475 | |||
2130 | bool amdgpu_device_is_px(struct drm_device *dev); | 1476 | bool amdgpu_device_is_px(struct drm_device *dev); |
2131 | int amdgpu_device_init(struct amdgpu_device *adev, | 1477 | int amdgpu_device_init(struct amdgpu_device *adev, |
2132 | struct drm_device *ddev, | 1478 | struct drm_device *ddev, |
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2278 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) | 1624 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
2279 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) | 1625 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
2280 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) | 1626 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
2281 | #define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) | ||
2282 | #define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) | ||
2283 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) | 1627 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
2284 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) | 1628 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
2285 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) | 1629 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2301 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 1645 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
2302 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) | 1646 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
2303 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) | 1647 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
2304 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | ||
2305 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | ||
2306 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | ||
2307 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | ||
2308 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | ||
2309 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | ||
2310 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | ||
2311 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) | 1648 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
2312 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) | 1649 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) |
2313 | |||
2314 | #define amdgpu_dpm_read_sensor(adev, idx, value) \ | ||
2315 | ((adev)->pp_enabled ? \ | ||
2316 | (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ | ||
2317 | -EINVAL) | ||
2318 | |||
2319 | #define amdgpu_dpm_get_temperature(adev) \ | ||
2320 | ((adev)->pp_enabled ? \ | ||
2321 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | ||
2322 | (adev)->pm.funcs->get_temperature((adev))) | ||
2323 | |||
2324 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | ||
2325 | ((adev)->pp_enabled ? \ | ||
2326 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | ||
2327 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) | ||
2328 | |||
2329 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | ||
2330 | ((adev)->pp_enabled ? \ | ||
2331 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | ||
2332 | (adev)->pm.funcs->get_fan_control_mode((adev))) | ||
2333 | |||
2334 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | ||
2335 | ((adev)->pp_enabled ? \ | ||
2336 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2337 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) | ||
2338 | |||
2339 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | ||
2340 | ((adev)->pp_enabled ? \ | ||
2341 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2342 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) | ||
2343 | |||
2344 | #define amdgpu_dpm_get_sclk(adev, l) \ | ||
2345 | ((adev)->pp_enabled ? \ | ||
2346 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2347 | (adev)->pm.funcs->get_sclk((adev), (l))) | ||
2348 | |||
2349 | #define amdgpu_dpm_get_mclk(adev, l) \ | ||
2350 | ((adev)->pp_enabled ? \ | ||
2351 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2352 | (adev)->pm.funcs->get_mclk((adev), (l))) | ||
2353 | |||
2354 | |||
2355 | #define amdgpu_dpm_force_performance_level(adev, l) \ | ||
2356 | ((adev)->pp_enabled ? \ | ||
2357 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | ||
2358 | (adev)->pm.funcs->force_performance_level((adev), (l))) | ||
2359 | |||
2360 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | ||
2361 | ((adev)->pp_enabled ? \ | ||
2362 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | ||
2363 | (adev)->pm.funcs->powergate_uvd((adev), (g))) | ||
2364 | |||
2365 | #define amdgpu_dpm_powergate_vce(adev, g) \ | ||
2366 | ((adev)->pp_enabled ? \ | ||
2367 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | ||
2368 | (adev)->pm.funcs->powergate_vce((adev), (g))) | ||
2369 | |||
2370 | #define amdgpu_dpm_get_current_power_state(adev) \ | ||
2371 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | ||
2372 | |||
2373 | #define amdgpu_dpm_get_performance_level(adev) \ | ||
2374 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) | ||
2375 | |||
2376 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ | ||
2377 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) | ||
2378 | |||
2379 | #define amdgpu_dpm_get_pp_table(adev, table) \ | ||
2380 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) | ||
2381 | |||
2382 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ | ||
2383 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) | ||
2384 | |||
2385 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ | ||
2386 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) | ||
2387 | |||
2388 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ | ||
2389 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) | ||
2390 | |||
2391 | #define amdgpu_dpm_get_sclk_od(adev) \ | ||
2392 | (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) | ||
2393 | |||
2394 | #define amdgpu_dpm_set_sclk_od(adev, value) \ | ||
2395 | (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) | ||
2396 | |||
2397 | #define amdgpu_dpm_get_mclk_od(adev) \ | ||
2398 | ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) | ||
2399 | |||
2400 | #define amdgpu_dpm_set_mclk_od(adev, value) \ | ||
2401 | ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) | ||
2402 | |||
2403 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ | ||
2404 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) | ||
2405 | |||
2406 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) | 1650 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
2407 | 1651 | ||
2408 | /* Common functions */ | 1652 | /* Common functions */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 892d60fb225b..2f9f96cc9f65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle) | |||
265 | 265 | ||
266 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 266 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
267 | 267 | ||
268 | const struct amdgpu_ip_block_version *ip_version = | 268 | const struct amdgpu_ip_block *ip_block = |
269 | amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); | 269 | amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); |
270 | 270 | ||
271 | if (!ip_version) | 271 | if (!ip_block) |
272 | return -EINVAL; | 272 | return -EINVAL; |
273 | 273 | ||
274 | r = amd_acp_hw_init(adev->acp.cgs_device, | 274 | r = amd_acp_hw_init(adev->acp.cgs_device, |
275 | ip_version->major, ip_version->minor); | 275 | ip_block->version->major, ip_block->version->minor); |
276 | /* -ENODEV means board uses AZ rather than ACP */ | 276 | /* -ENODEV means board uses AZ rather than ACP */ |
277 | if (r == -ENODEV) | 277 | if (r == -ENODEV) |
278 | return 0; | 278 | return 0; |
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle, | |||
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | 458 | ||
459 | const struct amd_ip_funcs acp_ip_funcs = { | 459 | static const struct amd_ip_funcs acp_ip_funcs = { |
460 | .name = "acp_ip", | 460 | .name = "acp_ip", |
461 | .early_init = acp_early_init, | 461 | .early_init = acp_early_init, |
462 | .late_init = NULL, | 462 | .late_init = NULL, |
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = { | |||
472 | .set_clockgating_state = acp_set_clockgating_state, | 472 | .set_clockgating_state = acp_set_clockgating_state, |
473 | .set_powergating_state = acp_set_powergating_state, | 473 | .set_powergating_state = acp_set_powergating_state, |
474 | }; | 474 | }; |
475 | |||
476 | const struct amdgpu_ip_block_version acp_ip_block = | ||
477 | { | ||
478 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
479 | .major = 2, | ||
480 | .minor = 2, | ||
481 | .rev = 0, | ||
482 | .funcs = &acp_ip_funcs, | ||
483 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h index 8a396313c86f..a288ce25c176 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h | |||
@@ -37,6 +37,6 @@ struct amdgpu_acp { | |||
37 | struct acp_pm_domain *acp_genpd; | 37 | struct acp_pm_domain *acp_genpd; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | extern const struct amd_ip_funcs acp_ip_funcs; | 40 | extern const struct amdgpu_ip_block_version acp_ip_block; |
41 | 41 | ||
42 | #endif /* __AMDGPU_ACP_H__ */ | 42 | #endif /* __AMDGPU_ACP_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 8e6bf548d689..56a86dd5789e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | |||
1115 | return 0; | 1115 | return 0; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev) | ||
1119 | { | ||
1120 | GET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
1121 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | ||
1122 | |||
1123 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1124 | return le32_to_cpu(args.ulReturnEngineClock); | ||
1125 | } | ||
1126 | |||
1127 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev) | ||
1128 | { | ||
1129 | GET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
1130 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | ||
1131 | |||
1132 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1133 | return le32_to_cpu(args.ulReturnMemoryClock); | ||
1134 | } | ||
1135 | |||
1136 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
1137 | uint32_t eng_clock) | ||
1138 | { | ||
1139 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
1140 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | ||
1141 | |||
1142 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ | ||
1143 | |||
1144 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1145 | } | ||
1146 | |||
1147 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
1148 | uint32_t mem_clock) | ||
1149 | { | ||
1150 | SET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
1151 | int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); | ||
1152 | |||
1153 | if (adev->flags & AMD_IS_APU) | ||
1154 | return; | ||
1155 | |||
1156 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ | ||
1157 | |||
1158 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1159 | } | ||
1160 | |||
1161 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | 1118 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, |
1162 | u32 eng_clock, u32 mem_clock) | 1119 | u32 eng_clock, u32 mem_clock) |
1163 | { | 1120 | { |
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device * | |||
1256 | return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); | 1213 | return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); |
1257 | } | 1214 | } |
1258 | 1215 | ||
1259 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
1260 | u16 voltage_level, | ||
1261 | u8 voltage_type) | ||
1262 | { | ||
1263 | union set_voltage args; | ||
1264 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
1265 | u8 frev, crev, volt_index = voltage_level; | ||
1266 | |||
1267 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
1268 | return; | ||
1269 | |||
1270 | /* 0xff01 is a flag rather then an actual voltage */ | ||
1271 | if (voltage_level == 0xff01) | ||
1272 | return; | ||
1273 | |||
1274 | switch (crev) { | ||
1275 | case 1: | ||
1276 | args.v1.ucVoltageType = voltage_type; | ||
1277 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | ||
1278 | args.v1.ucVoltageIndex = volt_index; | ||
1279 | break; | ||
1280 | case 2: | ||
1281 | args.v2.ucVoltageType = voltage_type; | ||
1282 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | ||
1283 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); | ||
1284 | break; | ||
1285 | case 3: | ||
1286 | args.v3.ucVoltageType = voltage_type; | ||
1287 | args.v3.ucVoltageMode = ATOM_SET_VOLTAGE; | ||
1288 | args.v3.usVoltageLevel = cpu_to_le16(voltage_level); | ||
1289 | break; | ||
1290 | default: | ||
1291 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
1292 | return; | ||
1293 | } | ||
1294 | |||
1295 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1296 | } | ||
1297 | |||
1298 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, | 1216 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, |
1299 | u16 *leakage_id) | 1217 | u16 *leakage_id) |
1300 | { | 1218 | { |
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) | |||
1784 | WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); | 1702 | WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); |
1785 | } | 1703 | } |
1786 | 1704 | ||
1705 | void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, | ||
1706 | bool hung) | ||
1707 | { | ||
1708 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
1709 | |||
1710 | if (hung) | ||
1711 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1712 | else | ||
1713 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1714 | |||
1715 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
1716 | } | ||
1717 | |||
1787 | /* Atom needs data in little endian format | 1718 | /* Atom needs data in little endian format |
1788 | * so swap as appropriate when copying data to | 1719 | * so swap as appropriate when copying data to |
1789 | * or from atom. Note that atom operates on | 1720 | * or from atom. Note that atom operates on |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 17356151db38..70e9acef5d9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | |||
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | |||
163 | bool strobe_mode, | 163 | bool strobe_mode, |
164 | struct atom_mpll_param *mpll_param); | 164 | struct atom_mpll_param *mpll_param); |
165 | 165 | ||
166 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev); | ||
167 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev); | ||
168 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
169 | uint32_t eng_clock); | ||
170 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
171 | uint32_t mem_clock); | ||
172 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
173 | u16 voltage_level, | ||
174 | u8 voltage_type); | ||
175 | |||
176 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | 166 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, |
177 | u32 eng_clock, u32 mem_clock); | 167 | u32 eng_clock, u32 mem_clock); |
178 | 168 | ||
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); | |||
206 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); | 196 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); |
207 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); | 197 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); |
208 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); | 198 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); |
199 | void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, | ||
200 | bool hung); | ||
209 | 201 | ||
210 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | 202 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); |
211 | int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, | 203 | int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 345305235349..cc97eee93226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | |||
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, | |||
33 | { | 33 | { |
34 | unsigned long start_jiffies; | 34 | unsigned long start_jiffies; |
35 | unsigned long end_jiffies; | 35 | unsigned long end_jiffies; |
36 | struct fence *fence = NULL; | 36 | struct dma_fence *fence = NULL; |
37 | int i, r; | 37 | int i, r; |
38 | 38 | ||
39 | start_jiffies = jiffies; | 39 | start_jiffies = jiffies; |
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, | |||
43 | false); | 43 | false); |
44 | if (r) | 44 | if (r) |
45 | goto exit_do_move; | 45 | goto exit_do_move; |
46 | r = fence_wait(fence, false); | 46 | r = dma_fence_wait(fence, false); |
47 | if (r) | 47 | if (r) |
48 | goto exit_do_move; | 48 | goto exit_do_move; |
49 | fence_put(fence); | 49 | dma_fence_put(fence); |
50 | } | 50 | } |
51 | end_jiffies = jiffies; | 51 | end_jiffies = jiffies; |
52 | r = jiffies_to_msecs(end_jiffies - start_jiffies); | 52 | r = jiffies_to_msecs(end_jiffies - start_jiffies); |
53 | 53 | ||
54 | exit_do_move: | 54 | exit_do_move: |
55 | if (fence) | 55 | if (fence) |
56 | fence_put(fence); | 56 | dma_fence_put(fence); |
57 | return r; | 57 | return r; |
58 | } | 58 | } |
59 | 59 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a8bfa34682f..017556ca22e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, | |||
146 | switch(type) { | 146 | switch(type) { |
147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | 147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: |
148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | 148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: |
149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | 149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
150 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
150 | domain = AMDGPU_GEM_DOMAIN_VRAM; | 151 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
151 | if (max_offset > adev->mc.real_vram_size) | 152 | if (max_offset > adev->mc.real_vram_size) |
152 | return -EINVAL; | 153 | return -EINVAL; |
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, | |||
157 | break; | 158 | break; |
158 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | 159 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: |
159 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | 160 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: |
160 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | 161 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
162 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
161 | domain = AMDGPU_GEM_DOMAIN_VRAM; | 163 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
162 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { | 164 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
163 | place.fpfn = | 165 | place.fpfn = |
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h | |||
240 | r = amdgpu_bo_reserve(obj, false); | 242 | r = amdgpu_bo_reserve(obj, false); |
241 | if (unlikely(r != 0)) | 243 | if (unlikely(r != 0)) |
242 | return r; | 244 | return r; |
243 | r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, | 245 | r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, |
244 | min_offset, max_offset, mcaddr); | 246 | min_offset, max_offset, mcaddr); |
245 | amdgpu_bo_unreserve(obj); | 247 | amdgpu_bo_unreserve(obj); |
246 | return r; | 248 | return r; |
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, | |||
624 | int i, r = -1; | 626 | int i, r = -1; |
625 | 627 | ||
626 | for (i = 0; i < adev->num_ip_blocks; i++) { | 628 | for (i = 0; i < adev->num_ip_blocks; i++) { |
627 | if (!adev->ip_block_status[i].valid) | 629 | if (!adev->ip_blocks[i].status.valid) |
628 | continue; | 630 | continue; |
629 | 631 | ||
630 | if (adev->ip_blocks[i].type == block_type) { | 632 | if (adev->ip_blocks[i].version->type == block_type) { |
631 | r = adev->ip_blocks[i].funcs->set_clockgating_state( | 633 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state( |
632 | (void *)adev, | 634 | (void *)adev, |
633 | state); | 635 | state); |
634 | break; | 636 | break; |
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, | |||
645 | int i, r = -1; | 647 | int i, r = -1; |
646 | 648 | ||
647 | for (i = 0; i < adev->num_ip_blocks; i++) { | 649 | for (i = 0; i < adev->num_ip_blocks; i++) { |
648 | if (!adev->ip_block_status[i].valid) | 650 | if (!adev->ip_blocks[i].status.valid) |
649 | continue; | 651 | continue; |
650 | 652 | ||
651 | if (adev->ip_blocks[i].type == block_type) { | 653 | if (adev->ip_blocks[i].version->type == block_type) { |
652 | r = adev->ip_blocks[i].funcs->set_powergating_state( | 654 | r = adev->ip_blocks[i].version->funcs->set_powergating_state( |
653 | (void *)adev, | 655 | (void *)adev, |
654 | state); | 656 | state); |
655 | break; | 657 | break; |
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
685 | result = AMDGPU_UCODE_ID_CP_MEC1; | 687 | result = AMDGPU_UCODE_ID_CP_MEC1; |
686 | break; | 688 | break; |
687 | case CGS_UCODE_ID_CP_MEC_JT2: | 689 | case CGS_UCODE_ID_CP_MEC_JT2: |
688 | if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 | 690 | /* for VI. JT2 should be the same as JT1, because: |
689 | || adev->asic_type == CHIP_POLARIS10) | 691 | 1, MEC2 and MEC1 use exactly same FW. |
690 | result = AMDGPU_UCODE_ID_CP_MEC2; | 692 | 2, JT2 is not pached but JT1 is. |
691 | else | 693 | */ |
694 | if (adev->asic_type >= CHIP_TOPAZ) | ||
692 | result = AMDGPU_UCODE_ID_CP_MEC1; | 695 | result = AMDGPU_UCODE_ID_CP_MEC1; |
696 | else | ||
697 | result = AMDGPU_UCODE_ID_CP_MEC2; | ||
693 | break; | 698 | break; |
694 | case CGS_UCODE_ID_RLC_G: | 699 | case CGS_UCODE_ID_RLC_G: |
695 | result = AMDGPU_UCODE_ID_RLC_G; | 700 | result = AMDGPU_UCODE_ID_RLC_G; |
696 | break; | 701 | break; |
702 | case CGS_UCODE_ID_STORAGE: | ||
703 | result = AMDGPU_UCODE_ID_STORAGE; | ||
704 | break; | ||
697 | default: | 705 | default: |
698 | DRM_ERROR("Firmware type not supported\n"); | 706 | DRM_ERROR("Firmware type not supported\n"); |
699 | } | 707 | } |
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
776 | 784 | ||
777 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || | 785 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || |
778 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { | 786 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { |
779 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | 787 | gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE); |
780 | data_size = le32_to_cpu(header->jt_size) << 2; | 788 | data_size = le32_to_cpu(header->jt_size) << 2; |
781 | } | 789 | } |
782 | info->mc_addr = gpu_addr; | 790 | |
791 | info->kptr = ucode->kaddr; | ||
783 | info->image_size = data_size; | 792 | info->image_size = data_size; |
793 | info->mc_addr = gpu_addr; | ||
784 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | 794 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); |
795 | |||
796 | if (CGS_UCODE_ID_CP_MEC == type) | ||
797 | info->image_size = (header->jt_offset) << 2; | ||
798 | |||
785 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); | 799 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); |
786 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); | 800 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); |
787 | } else { | 801 | } else { |
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
851 | return 0; | 865 | return 0; |
852 | } | 866 | } |
853 | 867 | ||
868 | static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device) | ||
869 | { | ||
870 | CGS_FUNC_ADEV; | ||
871 | return amdgpu_sriov_vf(adev); | ||
872 | } | ||
873 | |||
854 | static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, | 874 | static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, |
855 | struct cgs_system_info *sys_info) | 875 | struct cgs_system_info *sys_info) |
856 | { | 876 | { |
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
1204 | amdgpu_cgs_notify_dpm_enabled, | 1224 | amdgpu_cgs_notify_dpm_enabled, |
1205 | amdgpu_cgs_call_acpi_method, | 1225 | amdgpu_cgs_call_acpi_method, |
1206 | amdgpu_cgs_query_system_info, | 1226 | amdgpu_cgs_query_system_info, |
1227 | amdgpu_cgs_is_virtualization_enabled | ||
1207 | }; | 1228 | }; |
1208 | 1229 | ||
1209 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | 1230 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index e3281d4e3e41..3af8ffb45b64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { | |||
1517 | .force = amdgpu_connector_dvi_force, | 1517 | .force = amdgpu_connector_dvi_force, |
1518 | }; | 1518 | }; |
1519 | 1519 | ||
1520 | static struct drm_encoder * | ||
1521 | amdgpu_connector_virtual_encoder(struct drm_connector *connector) | ||
1522 | { | ||
1523 | int enc_id = connector->encoder_ids[0]; | ||
1524 | struct drm_encoder *encoder; | ||
1525 | int i; | ||
1526 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1527 | if (connector->encoder_ids[i] == 0) | ||
1528 | break; | ||
1529 | |||
1530 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); | ||
1531 | if (!encoder) | ||
1532 | continue; | ||
1533 | |||
1534 | if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) | ||
1535 | return encoder; | ||
1536 | } | ||
1537 | |||
1538 | /* pick the first one */ | ||
1539 | if (enc_id) | ||
1540 | return drm_encoder_find(connector->dev, enc_id); | ||
1541 | return NULL; | ||
1542 | } | ||
1543 | |||
1544 | static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) | ||
1545 | { | ||
1546 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
1547 | |||
1548 | if (encoder) { | ||
1549 | amdgpu_connector_add_common_modes(encoder, connector); | ||
1550 | } | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, | ||
1556 | struct drm_display_mode *mode) | ||
1557 | { | ||
1558 | return MODE_OK; | ||
1559 | } | ||
1560 | |||
1561 | static int | ||
1562 | amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) | ||
1563 | { | ||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1567 | static enum drm_connector_status | ||
1568 | |||
1569 | amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) | ||
1570 | { | ||
1571 | return connector_status_connected; | ||
1572 | } | ||
1573 | |||
1574 | static int | ||
1575 | amdgpu_connector_virtual_set_property(struct drm_connector *connector, | ||
1576 | struct drm_property *property, | ||
1577 | uint64_t val) | ||
1578 | { | ||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | static void amdgpu_connector_virtual_force(struct drm_connector *connector) | ||
1583 | { | ||
1584 | return; | ||
1585 | } | ||
1586 | |||
1587 | static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { | ||
1588 | .get_modes = amdgpu_connector_virtual_get_modes, | ||
1589 | .mode_valid = amdgpu_connector_virtual_mode_valid, | ||
1590 | .best_encoder = amdgpu_connector_virtual_encoder, | ||
1591 | }; | ||
1592 | |||
1593 | static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { | ||
1594 | .dpms = amdgpu_connector_virtual_dpms, | ||
1595 | .detect = amdgpu_connector_virtual_detect, | ||
1596 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1597 | .set_property = amdgpu_connector_virtual_set_property, | ||
1598 | .destroy = amdgpu_connector_destroy, | ||
1599 | .force = amdgpu_connector_virtual_force, | ||
1600 | }; | ||
1601 | |||
1602 | void | 1520 | void |
1603 | amdgpu_connector_add(struct amdgpu_device *adev, | 1521 | amdgpu_connector_add(struct amdgpu_device *adev, |
1604 | uint32_t connector_id, | 1522 | uint32_t connector_id, |
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, | |||
1983 | connector->interlace_allowed = false; | 1901 | connector->interlace_allowed = false; |
1984 | connector->doublescan_allowed = false; | 1902 | connector->doublescan_allowed = false; |
1985 | break; | 1903 | break; |
1986 | case DRM_MODE_CONNECTOR_VIRTUAL: | ||
1987 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
1988 | if (!amdgpu_dig_connector) | ||
1989 | goto failed; | ||
1990 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
1991 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); | ||
1992 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); | ||
1993 | subpixel_order = SubPixelHorizontalRGB; | ||
1994 | connector->interlace_allowed = false; | ||
1995 | connector->doublescan_allowed = false; | ||
1996 | break; | ||
1997 | } | 1904 | } |
1998 | } | 1905 | } |
1999 | 1906 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b0f6e6957536..a024217896fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, | |||
355 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | 355 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, |
356 | struct amdgpu_bo *bo) | 356 | struct amdgpu_bo *bo) |
357 | { | 357 | { |
358 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
358 | u64 initial_bytes_moved; | 359 | u64 initial_bytes_moved; |
359 | uint32_t domain; | 360 | uint32_t domain; |
360 | int r; | 361 | int r; |
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | |||
372 | 373 | ||
373 | retry: | 374 | retry: |
374 | amdgpu_ttm_placement_from_domain(bo, domain); | 375 | amdgpu_ttm_placement_from_domain(bo, domain); |
375 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 376 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
376 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 377 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
377 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 378 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
378 | initial_bytes_moved; | 379 | initial_bytes_moved; |
379 | 380 | ||
380 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { | 381 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
@@ -387,9 +388,9 @@ retry: | |||
387 | 388 | ||
388 | /* Last resort, try to evict something from the current working set */ | 389 | /* Last resort, try to evict something from the current working set */ |
389 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | 390 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, |
390 | struct amdgpu_bo_list_entry *lobj) | 391 | struct amdgpu_bo *validated) |
391 | { | 392 | { |
392 | uint32_t domain = lobj->robj->allowed_domains; | 393 | uint32_t domain = validated->allowed_domains; |
393 | int r; | 394 | int r; |
394 | 395 | ||
395 | if (!p->evictable) | 396 | if (!p->evictable) |
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
400 | 401 | ||
401 | struct amdgpu_bo_list_entry *candidate = p->evictable; | 402 | struct amdgpu_bo_list_entry *candidate = p->evictable; |
402 | struct amdgpu_bo *bo = candidate->robj; | 403 | struct amdgpu_bo *bo = candidate->robj; |
404 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
403 | u64 initial_bytes_moved; | 405 | u64 initial_bytes_moved; |
404 | uint32_t other; | 406 | uint32_t other; |
405 | 407 | ||
406 | /* If we reached our current BO we can forget it */ | 408 | /* If we reached our current BO we can forget it */ |
407 | if (candidate == lobj) | 409 | if (candidate->robj == validated) |
408 | break; | 410 | break; |
409 | 411 | ||
410 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | 412 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
420 | 422 | ||
421 | /* Good we can try to move this BO somewhere else */ | 423 | /* Good we can try to move this BO somewhere else */ |
422 | amdgpu_ttm_placement_from_domain(bo, other); | 424 | amdgpu_ttm_placement_from_domain(bo, other); |
423 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 425 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
424 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 426 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
425 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 427 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
426 | initial_bytes_moved; | 428 | initial_bytes_moved; |
427 | 429 | ||
428 | if (unlikely(r)) | 430 | if (unlikely(r)) |
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
437 | return false; | 439 | return false; |
438 | } | 440 | } |
439 | 441 | ||
442 | static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) | ||
443 | { | ||
444 | struct amdgpu_cs_parser *p = param; | ||
445 | int r; | ||
446 | |||
447 | do { | ||
448 | r = amdgpu_cs_bo_validate(p, bo); | ||
449 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); | ||
450 | if (r) | ||
451 | return r; | ||
452 | |||
453 | if (bo->shadow) | ||
454 | r = amdgpu_cs_bo_validate(p, bo); | ||
455 | |||
456 | return r; | ||
457 | } | ||
458 | |||
440 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | 459 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
441 | struct list_head *validated) | 460 | struct list_head *validated) |
442 | { | 461 | { |
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | |||
464 | if (p->evictable == lobj) | 483 | if (p->evictable == lobj) |
465 | p->evictable = NULL; | 484 | p->evictable = NULL; |
466 | 485 | ||
467 | do { | 486 | r = amdgpu_cs_validate(p, bo); |
468 | r = amdgpu_cs_bo_validate(p, bo); | ||
469 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj)); | ||
470 | if (r) | 487 | if (r) |
471 | return r; | 488 | return r; |
472 | 489 | ||
473 | if (bo->shadow) { | ||
474 | r = amdgpu_cs_bo_validate(p, bo); | ||
475 | if (r) | ||
476 | return r; | ||
477 | } | ||
478 | |||
479 | if (binding_userptr) { | 490 | if (binding_userptr) { |
480 | drm_free_large(lobj->user_pages); | 491 | drm_free_large(lobj->user_pages); |
481 | lobj->user_pages = NULL; | 492 | lobj->user_pages = NULL; |
@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
593 | list_splice(&need_pages, &p->validated); | 604 | list_splice(&need_pages, &p->validated); |
594 | } | 605 | } |
595 | 606 | ||
596 | amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); | ||
597 | |||
598 | p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); | 607 | p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); |
599 | p->bytes_moved = 0; | 608 | p->bytes_moved = 0; |
600 | p->evictable = list_last_entry(&p->validated, | 609 | p->evictable = list_last_entry(&p->validated, |
601 | struct amdgpu_bo_list_entry, | 610 | struct amdgpu_bo_list_entry, |
602 | tv.head); | 611 | tv.head); |
603 | 612 | ||
613 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, | ||
614 | amdgpu_cs_validate, p); | ||
615 | if (r) { | ||
616 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); | ||
617 | goto error_validate; | ||
618 | } | ||
619 | |||
604 | r = amdgpu_cs_list_validate(p, &duplicates); | 620 | r = amdgpu_cs_list_validate(p, &duplicates); |
605 | if (r) { | 621 | if (r) { |
606 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); | 622 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); |
@@ -719,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
719 | ttm_eu_backoff_reservation(&parser->ticket, | 735 | ttm_eu_backoff_reservation(&parser->ticket, |
720 | &parser->validated); | 736 | &parser->validated); |
721 | } | 737 | } |
722 | fence_put(parser->fence); | 738 | dma_fence_put(parser->fence); |
723 | 739 | ||
724 | if (parser->ctx) | 740 | if (parser->ctx) |
725 | amdgpu_ctx_put(parser->ctx); | 741 | amdgpu_ctx_put(parser->ctx); |
@@ -756,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |||
756 | 772 | ||
757 | if (p->bo_list) { | 773 | if (p->bo_list) { |
758 | for (i = 0; i < p->bo_list->num_entries; i++) { | 774 | for (i = 0; i < p->bo_list->num_entries; i++) { |
759 | struct fence *f; | 775 | struct dma_fence *f; |
760 | 776 | ||
761 | /* ignore duplicates */ | 777 | /* ignore duplicates */ |
762 | bo = p->bo_list->array[i].robj; | 778 | bo = p->bo_list->array[i].robj; |
@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
806 | 822 | ||
807 | /* Only for UVD/VCE VM emulation */ | 823 | /* Only for UVD/VCE VM emulation */ |
808 | if (ring->funcs->parse_cs) { | 824 | if (ring->funcs->parse_cs) { |
809 | p->job->vm = NULL; | ||
810 | for (i = 0; i < p->job->num_ibs; i++) { | 825 | for (i = 0; i < p->job->num_ibs; i++) { |
811 | r = amdgpu_ring_parse_cs(ring, p, i); | 826 | r = amdgpu_ring_parse_cs(ring, p, i); |
812 | if (r) | 827 | if (r) |
813 | return r; | 828 | return r; |
814 | } | 829 | } |
815 | } else { | 830 | } |
831 | |||
832 | if (p->job->vm) { | ||
816 | p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 833 | p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
817 | 834 | ||
818 | r = amdgpu_bo_vm_update_pte(p, vm); | 835 | r = amdgpu_bo_vm_update_pte(p, vm); |
@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
901 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; | 918 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
902 | kptr += chunk_ib->va_start - offset; | 919 | kptr += chunk_ib->va_start - offset; |
903 | 920 | ||
904 | r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); | 921 | r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); |
905 | if (r) { | 922 | if (r) { |
906 | DRM_ERROR("Failed to get ib !\n"); | 923 | DRM_ERROR("Failed to get ib !\n"); |
907 | return r; | 924 | return r; |
@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
916 | return r; | 933 | return r; |
917 | } | 934 | } |
918 | 935 | ||
919 | ib->gpu_addr = chunk_ib->va_start; | ||
920 | } | 936 | } |
921 | 937 | ||
938 | ib->gpu_addr = chunk_ib->va_start; | ||
922 | ib->length_dw = chunk_ib->ib_bytes / 4; | 939 | ib->length_dw = chunk_ib->ib_bytes / 4; |
923 | ib->flags = chunk_ib->flags; | 940 | ib->flags = chunk_ib->flags; |
924 | j++; | 941 | j++; |
@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
926 | 943 | ||
927 | /* UVD & VCE fw doesn't support user fences */ | 944 | /* UVD & VCE fw doesn't support user fences */ |
928 | if (parser->job->uf_addr && ( | 945 | if (parser->job->uf_addr && ( |
929 | parser->job->ring->type == AMDGPU_RING_TYPE_UVD || | 946 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
930 | parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) | 947 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) |
931 | return -EINVAL; | 948 | return -EINVAL; |
932 | 949 | ||
933 | return 0; | 950 | return 0; |
@@ -956,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
956 | for (j = 0; j < num_deps; ++j) { | 973 | for (j = 0; j < num_deps; ++j) { |
957 | struct amdgpu_ring *ring; | 974 | struct amdgpu_ring *ring; |
958 | struct amdgpu_ctx *ctx; | 975 | struct amdgpu_ctx *ctx; |
959 | struct fence *fence; | 976 | struct dma_fence *fence; |
960 | 977 | ||
961 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | 978 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, |
962 | deps[j].ip_instance, | 979 | deps[j].ip_instance, |
@@ -978,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
978 | } else if (fence) { | 995 | } else if (fence) { |
979 | r = amdgpu_sync_fence(adev, &p->job->sync, | 996 | r = amdgpu_sync_fence(adev, &p->job->sync, |
980 | fence); | 997 | fence); |
981 | fence_put(fence); | 998 | dma_fence_put(fence); |
982 | amdgpu_ctx_put(ctx); | 999 | amdgpu_ctx_put(ctx); |
983 | if (r) | 1000 | if (r) |
984 | return r; | 1001 | return r; |
@@ -1008,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1008 | 1025 | ||
1009 | job->owner = p->filp; | 1026 | job->owner = p->filp; |
1010 | job->fence_ctx = entity->fence_context; | 1027 | job->fence_ctx = entity->fence_context; |
1011 | p->fence = fence_get(&job->base.s_fence->finished); | 1028 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
1012 | cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); | 1029 | cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); |
1013 | job->uf_sequence = cs->out.handle; | 1030 | job->uf_sequence = cs->out.handle; |
1014 | amdgpu_job_free_resources(job); | 1031 | amdgpu_job_free_resources(job); |
@@ -1091,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
1091 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | 1108 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
1092 | struct amdgpu_ring *ring = NULL; | 1109 | struct amdgpu_ring *ring = NULL; |
1093 | struct amdgpu_ctx *ctx; | 1110 | struct amdgpu_ctx *ctx; |
1094 | struct fence *fence; | 1111 | struct dma_fence *fence; |
1095 | long r; | 1112 | long r; |
1096 | 1113 | ||
1097 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, | 1114 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, |
@@ -1107,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
1107 | if (IS_ERR(fence)) | 1124 | if (IS_ERR(fence)) |
1108 | r = PTR_ERR(fence); | 1125 | r = PTR_ERR(fence); |
1109 | else if (fence) { | 1126 | else if (fence) { |
1110 | r = fence_wait_timeout(fence, true, timeout); | 1127 | r = dma_fence_wait_timeout(fence, true, timeout); |
1111 | fence_put(fence); | 1128 | dma_fence_put(fence); |
1112 | } else | 1129 | } else |
1113 | r = 1; | 1130 | r = 1; |
1114 | 1131 | ||
@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) | |||
1195 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | 1212 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
1196 | if (unlikely(r)) | 1213 | if (unlikely(r)) |
1197 | return r; | 1214 | return r; |
1215 | |||
1216 | if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) | ||
1217 | continue; | ||
1218 | |||
1219 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
1220 | amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains); | ||
1221 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
1222 | if (unlikely(r)) | ||
1223 | return r; | ||
1198 | } | 1224 | } |
1199 | 1225 | ||
1200 | return 0; | 1226 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a5e2fcbef0f0..400c66ba4c6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
35 | kref_init(&ctx->refcount); | 35 | kref_init(&ctx->refcount); |
36 | spin_lock_init(&ctx->ring_lock); | 36 | spin_lock_init(&ctx->ring_lock); |
37 | ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, | 37 | ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, |
38 | sizeof(struct fence*), GFP_KERNEL); | 38 | sizeof(struct dma_fence*), GFP_KERNEL); |
39 | if (!ctx->fences) | 39 | if (!ctx->fences) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | 41 | ||
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
55 | r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, | 55 | r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, |
56 | rq, amdgpu_sched_jobs); | 56 | rq, amdgpu_sched_jobs); |
57 | if (r) | 57 | if (r) |
58 | break; | 58 | goto failed; |
59 | } | 59 | } |
60 | 60 | ||
61 | if (i < adev->num_rings) { | ||
62 | for (j = 0; j < i; j++) | ||
63 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
64 | &ctx->rings[j].entity); | ||
65 | kfree(ctx->fences); | ||
66 | ctx->fences = NULL; | ||
67 | return r; | ||
68 | } | ||
69 | return 0; | 61 | return 0; |
62 | |||
63 | failed: | ||
64 | for (j = 0; j < i; j++) | ||
65 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
66 | &ctx->rings[j].entity); | ||
67 | kfree(ctx->fences); | ||
68 | ctx->fences = NULL; | ||
69 | return r; | ||
70 | } | 70 | } |
71 | 71 | ||
72 | static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | 72 | static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) |
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
79 | 79 | ||
80 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 80 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
81 | for (j = 0; j < amdgpu_sched_jobs; ++j) | 81 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
82 | fence_put(ctx->rings[i].fences[j]); | 82 | dma_fence_put(ctx->rings[i].fences[j]); |
83 | kfree(ctx->fences); | 83 | kfree(ctx->fences); |
84 | ctx->fences = NULL; | 84 | ctx->fences = NULL; |
85 | 85 | ||
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 243 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
244 | struct fence *fence) | 244 | struct dma_fence *fence) |
245 | { | 245 | { |
246 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | 246 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
247 | uint64_t seq = cring->sequence; | 247 | uint64_t seq = cring->sequence; |
248 | unsigned idx = 0; | 248 | unsigned idx = 0; |
249 | struct fence *other = NULL; | 249 | struct dma_fence *other = NULL; |
250 | 250 | ||
251 | idx = seq & (amdgpu_sched_jobs - 1); | 251 | idx = seq & (amdgpu_sched_jobs - 1); |
252 | other = cring->fences[idx]; | 252 | other = cring->fences[idx]; |
253 | if (other) { | 253 | if (other) { |
254 | signed long r; | 254 | signed long r; |
255 | r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); | 255 | r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); |
256 | if (r < 0) | 256 | if (r < 0) |
257 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | 257 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); |
258 | } | 258 | } |
259 | 259 | ||
260 | fence_get(fence); | 260 | dma_fence_get(fence); |
261 | 261 | ||
262 | spin_lock(&ctx->ring_lock); | 262 | spin_lock(&ctx->ring_lock); |
263 | cring->fences[idx] = fence; | 263 | cring->fences[idx] = fence; |
264 | cring->sequence++; | 264 | cring->sequence++; |
265 | spin_unlock(&ctx->ring_lock); | 265 | spin_unlock(&ctx->ring_lock); |
266 | 266 | ||
267 | fence_put(other); | 267 | dma_fence_put(other); |
268 | 268 | ||
269 | return seq; | 269 | return seq; |
270 | } | 270 | } |
271 | 271 | ||
272 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 272 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
273 | struct amdgpu_ring *ring, uint64_t seq) | 273 | struct amdgpu_ring *ring, uint64_t seq) |
274 | { | 274 | { |
275 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | 275 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
276 | struct fence *fence; | 276 | struct dma_fence *fence; |
277 | 277 | ||
278 | spin_lock(&ctx->ring_lock); | 278 | spin_lock(&ctx->ring_lock); |
279 | 279 | ||
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | |||
288 | return NULL; | 288 | return NULL; |
289 | } | 289 | } |
290 | 290 | ||
291 | fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); | 291 | fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); |
292 | spin_unlock(&ctx->ring_lock); | 292 | spin_unlock(&ctx->ring_lock); |
293 | 293 | ||
294 | return fence; | 294 | return fence; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4f4a9239069..6958d4af017f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |||
264 | if (adev->vram_scratch.robj == NULL) { | 264 | if (adev->vram_scratch.robj == NULL) { |
265 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | 265 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, |
266 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 266 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
267 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 267 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
268 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
268 | NULL, NULL, &adev->vram_scratch.robj); | 269 | NULL, NULL, &adev->vram_scratch.robj); |
269 | if (r) { | 270 | if (r) { |
270 | return r; | 271 | return r; |
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | |||
442 | static void amdgpu_wb_fini(struct amdgpu_device *adev) | 443 | static void amdgpu_wb_fini(struct amdgpu_device *adev) |
443 | { | 444 | { |
444 | if (adev->wb.wb_obj) { | 445 | if (adev->wb.wb_obj) { |
445 | if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { | 446 | amdgpu_bo_free_kernel(&adev->wb.wb_obj, |
446 | amdgpu_bo_kunmap(adev->wb.wb_obj); | 447 | &adev->wb.gpu_addr, |
447 | amdgpu_bo_unpin(adev->wb.wb_obj); | 448 | (void **)&adev->wb.wb); |
448 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
449 | } | ||
450 | amdgpu_bo_unref(&adev->wb.wb_obj); | ||
451 | adev->wb.wb = NULL; | ||
452 | adev->wb.wb_obj = NULL; | 449 | adev->wb.wb_obj = NULL; |
453 | } | 450 | } |
454 | } | 451 | } |
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
467 | int r; | 464 | int r; |
468 | 465 | ||
469 | if (adev->wb.wb_obj == NULL) { | 466 | if (adev->wb.wb_obj == NULL) { |
470 | r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, | 467 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, |
471 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | 468 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
472 | &adev->wb.wb_obj); | 469 | &adev->wb.wb_obj, &adev->wb.gpu_addr, |
470 | (void **)&adev->wb.wb); | ||
473 | if (r) { | 471 | if (r) { |
474 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); | 472 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); |
475 | return r; | 473 | return r; |
476 | } | 474 | } |
477 | r = amdgpu_bo_reserve(adev->wb.wb_obj, false); | ||
478 | if (unlikely(r != 0)) { | ||
479 | amdgpu_wb_fini(adev); | ||
480 | return r; | ||
481 | } | ||
482 | r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
483 | &adev->wb.gpu_addr); | ||
484 | if (r) { | ||
485 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
486 | dev_warn(adev->dev, "(%d) pin WB bo failed\n", r); | ||
487 | amdgpu_wb_fini(adev); | ||
488 | return r; | ||
489 | } | ||
490 | r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb); | ||
491 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
492 | if (r) { | ||
493 | dev_warn(adev->dev, "(%d) map WB bo failed\n", r); | ||
494 | amdgpu_wb_fini(adev); | ||
495 | return r; | ||
496 | } | ||
497 | 475 | ||
498 | adev->wb.num_wb = AMDGPU_MAX_WB; | 476 | adev->wb.num_wb = AMDGPU_MAX_WB; |
499 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | 477 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); |
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) | |||
1051 | amdgpu_vm_block_size); | 1029 | amdgpu_vm_block_size); |
1052 | amdgpu_vm_block_size = 9; | 1030 | amdgpu_vm_block_size = 9; |
1053 | } | 1031 | } |
1032 | |||
1033 | if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) || | ||
1034 | !amdgpu_check_pot_argument(amdgpu_vram_page_split)) { | ||
1035 | dev_warn(adev->dev, "invalid VRAM page split (%d)\n", | ||
1036 | amdgpu_vram_page_split); | ||
1037 | amdgpu_vram_page_split = 1024; | ||
1038 | } | ||
1054 | } | 1039 | } |
1055 | 1040 | ||
1056 | /** | 1041 | /** |
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, | |||
1125 | int i, r = 0; | 1110 | int i, r = 0; |
1126 | 1111 | ||
1127 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1112 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1128 | if (!adev->ip_block_status[i].valid) | 1113 | if (!adev->ip_blocks[i].status.valid) |
1129 | continue; | 1114 | continue; |
1130 | if (adev->ip_blocks[i].type == block_type) { | 1115 | if (adev->ip_blocks[i].version->type == block_type) { |
1131 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1116 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1132 | state); | 1117 | state); |
1133 | if (r) | 1118 | if (r) |
1134 | return r; | 1119 | return r; |
1135 | break; | 1120 | break; |
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, | |||
1145 | int i, r = 0; | 1130 | int i, r = 0; |
1146 | 1131 | ||
1147 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1132 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1148 | if (!adev->ip_block_status[i].valid) | 1133 | if (!adev->ip_blocks[i].status.valid) |
1149 | continue; | 1134 | continue; |
1150 | if (adev->ip_blocks[i].type == block_type) { | 1135 | if (adev->ip_blocks[i].version->type == block_type) { |
1151 | r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, | 1136 | r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, |
1152 | state); | 1137 | state); |
1153 | if (r) | 1138 | if (r) |
1154 | return r; | 1139 | return r; |
1155 | break; | 1140 | break; |
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, | |||
1164 | int i, r; | 1149 | int i, r; |
1165 | 1150 | ||
1166 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1151 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1167 | if (!adev->ip_block_status[i].valid) | 1152 | if (!adev->ip_blocks[i].status.valid) |
1168 | continue; | 1153 | continue; |
1169 | if (adev->ip_blocks[i].type == block_type) { | 1154 | if (adev->ip_blocks[i].version->type == block_type) { |
1170 | r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); | 1155 | r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); |
1171 | if (r) | 1156 | if (r) |
1172 | return r; | 1157 | return r; |
1173 | break; | 1158 | break; |
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev, | |||
1183 | int i; | 1168 | int i; |
1184 | 1169 | ||
1185 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1170 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1186 | if (!adev->ip_block_status[i].valid) | 1171 | if (!adev->ip_blocks[i].status.valid) |
1187 | continue; | 1172 | continue; |
1188 | if (adev->ip_blocks[i].type == block_type) | 1173 | if (adev->ip_blocks[i].version->type == block_type) |
1189 | return adev->ip_blocks[i].funcs->is_idle((void *)adev); | 1174 | return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); |
1190 | } | 1175 | } |
1191 | return true; | 1176 | return true; |
1192 | 1177 | ||
1193 | } | 1178 | } |
1194 | 1179 | ||
1195 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 1180 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
1196 | struct amdgpu_device *adev, | 1181 | enum amd_ip_block_type type) |
1197 | enum amd_ip_block_type type) | ||
1198 | { | 1182 | { |
1199 | int i; | 1183 | int i; |
1200 | 1184 | ||
1201 | for (i = 0; i < adev->num_ip_blocks; i++) | 1185 | for (i = 0; i < adev->num_ip_blocks; i++) |
1202 | if (adev->ip_blocks[i].type == type) | 1186 | if (adev->ip_blocks[i].version->type == type) |
1203 | return &adev->ip_blocks[i]; | 1187 | return &adev->ip_blocks[i]; |
1204 | 1188 | ||
1205 | return NULL; | 1189 | return NULL; |
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | |||
1220 | enum amd_ip_block_type type, | 1204 | enum amd_ip_block_type type, |
1221 | u32 major, u32 minor) | 1205 | u32 major, u32 minor) |
1222 | { | 1206 | { |
1223 | const struct amdgpu_ip_block_version *ip_block; | 1207 | struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); |
1224 | ip_block = amdgpu_get_ip_block(adev, type); | ||
1225 | 1208 | ||
1226 | if (ip_block && ((ip_block->major > major) || | 1209 | if (ip_block && ((ip_block->version->major > major) || |
1227 | ((ip_block->major == major) && | 1210 | ((ip_block->version->major == major) && |
1228 | (ip_block->minor >= minor)))) | 1211 | (ip_block->version->minor >= minor)))) |
1229 | return 0; | 1212 | return 0; |
1230 | 1213 | ||
1231 | return 1; | 1214 | return 1; |
1232 | } | 1215 | } |
1233 | 1216 | ||
1234 | static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) | 1217 | /** |
1218 | * amdgpu_ip_block_add | ||
1219 | * | ||
1220 | * @adev: amdgpu_device pointer | ||
1221 | * @ip_block_version: pointer to the IP to add | ||
1222 | * | ||
1223 | * Adds the IP block driver information to the collection of IPs | ||
1224 | * on the asic. | ||
1225 | */ | ||
1226 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | ||
1227 | const struct amdgpu_ip_block_version *ip_block_version) | ||
1228 | { | ||
1229 | if (!ip_block_version) | ||
1230 | return -EINVAL; | ||
1231 | |||
1232 | adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) | ||
1235 | { | 1238 | { |
1236 | adev->enable_virtual_display = false; | 1239 | adev->enable_virtual_display = false; |
1237 | 1240 | ||
1238 | if (amdgpu_virtual_display) { | 1241 | if (amdgpu_virtual_display) { |
1239 | struct drm_device *ddev = adev->ddev; | 1242 | struct drm_device *ddev = adev->ddev; |
1240 | const char *pci_address_name = pci_name(ddev->pdev); | 1243 | const char *pci_address_name = pci_name(ddev->pdev); |
1241 | char *pciaddstr, *pciaddstr_tmp, *pciaddname; | 1244 | char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; |
1242 | 1245 | ||
1243 | pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); | 1246 | pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); |
1244 | pciaddstr_tmp = pciaddstr; | 1247 | pciaddstr_tmp = pciaddstr; |
1245 | while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { | 1248 | while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { |
1249 | pciaddname = strsep(&pciaddname_tmp, ","); | ||
1246 | if (!strcmp(pci_address_name, pciaddname)) { | 1250 | if (!strcmp(pci_address_name, pciaddname)) { |
1251 | long num_crtc; | ||
1252 | int res = -1; | ||
1253 | |||
1247 | adev->enable_virtual_display = true; | 1254 | adev->enable_virtual_display = true; |
1255 | |||
1256 | if (pciaddname_tmp) | ||
1257 | res = kstrtol(pciaddname_tmp, 10, | ||
1258 | &num_crtc); | ||
1259 | |||
1260 | if (!res) { | ||
1261 | if (num_crtc < 1) | ||
1262 | num_crtc = 1; | ||
1263 | if (num_crtc > 6) | ||
1264 | num_crtc = 6; | ||
1265 | adev->mode_info.num_crtc = num_crtc; | ||
1266 | } else { | ||
1267 | adev->mode_info.num_crtc = 1; | ||
1268 | } | ||
1248 | break; | 1269 | break; |
1249 | } | 1270 | } |
1250 | } | 1271 | } |
1251 | 1272 | ||
1252 | DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", | 1273 | DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", |
1253 | amdgpu_virtual_display, pci_address_name, | 1274 | amdgpu_virtual_display, pci_address_name, |
1254 | adev->enable_virtual_display); | 1275 | adev->enable_virtual_display, adev->mode_info.num_crtc); |
1255 | 1276 | ||
1256 | kfree(pciaddstr); | 1277 | kfree(pciaddstr); |
1257 | } | 1278 | } |
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1261 | { | 1282 | { |
1262 | int i, r; | 1283 | int i, r; |
1263 | 1284 | ||
1264 | amdgpu_whether_enable_virtual_display(adev); | 1285 | amdgpu_device_enable_virtual_display(adev); |
1265 | 1286 | ||
1266 | switch (adev->asic_type) { | 1287 | switch (adev->asic_type) { |
1267 | case CHIP_TOPAZ: | 1288 | case CHIP_TOPAZ: |
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1313 | return -EINVAL; | 1334 | return -EINVAL; |
1314 | } | 1335 | } |
1315 | 1336 | ||
1316 | adev->ip_block_status = kcalloc(adev->num_ip_blocks, | ||
1317 | sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); | ||
1318 | if (adev->ip_block_status == NULL) | ||
1319 | return -ENOMEM; | ||
1320 | |||
1321 | if (adev->ip_blocks == NULL) { | ||
1322 | DRM_ERROR("No IP blocks found!\n"); | ||
1323 | return r; | ||
1324 | } | ||
1325 | |||
1326 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1337 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1327 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { | 1338 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { |
1328 | DRM_ERROR("disabled ip block: %d\n", i); | 1339 | DRM_ERROR("disabled ip block: %d\n", i); |
1329 | adev->ip_block_status[i].valid = false; | 1340 | adev->ip_blocks[i].status.valid = false; |
1330 | } else { | 1341 | } else { |
1331 | if (adev->ip_blocks[i].funcs->early_init) { | 1342 | if (adev->ip_blocks[i].version->funcs->early_init) { |
1332 | r = adev->ip_blocks[i].funcs->early_init((void *)adev); | 1343 | r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); |
1333 | if (r == -ENOENT) { | 1344 | if (r == -ENOENT) { |
1334 | adev->ip_block_status[i].valid = false; | 1345 | adev->ip_blocks[i].status.valid = false; |
1335 | } else if (r) { | 1346 | } else if (r) { |
1336 | DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1347 | DRM_ERROR("early_init of IP block <%s> failed %d\n", |
1348 | adev->ip_blocks[i].version->funcs->name, r); | ||
1337 | return r; | 1349 | return r; |
1338 | } else { | 1350 | } else { |
1339 | adev->ip_block_status[i].valid = true; | 1351 | adev->ip_blocks[i].status.valid = true; |
1340 | } | 1352 | } |
1341 | } else { | 1353 | } else { |
1342 | adev->ip_block_status[i].valid = true; | 1354 | adev->ip_blocks[i].status.valid = true; |
1343 | } | 1355 | } |
1344 | } | 1356 | } |
1345 | } | 1357 | } |
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1355 | int i, r; | 1367 | int i, r; |
1356 | 1368 | ||
1357 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1369 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1358 | if (!adev->ip_block_status[i].valid) | 1370 | if (!adev->ip_blocks[i].status.valid) |
1359 | continue; | 1371 | continue; |
1360 | r = adev->ip_blocks[i].funcs->sw_init((void *)adev); | 1372 | r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); |
1361 | if (r) { | 1373 | if (r) { |
1362 | DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1374 | DRM_ERROR("sw_init of IP block <%s> failed %d\n", |
1375 | adev->ip_blocks[i].version->funcs->name, r); | ||
1363 | return r; | 1376 | return r; |
1364 | } | 1377 | } |
1365 | adev->ip_block_status[i].sw = true; | 1378 | adev->ip_blocks[i].status.sw = true; |
1366 | /* need to do gmc hw init early so we can allocate gpu mem */ | 1379 | /* need to do gmc hw init early so we can allocate gpu mem */ |
1367 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1380 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1368 | r = amdgpu_vram_scratch_init(adev); | 1381 | r = amdgpu_vram_scratch_init(adev); |
1369 | if (r) { | 1382 | if (r) { |
1370 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); | 1383 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); |
1371 | return r; | 1384 | return r; |
1372 | } | 1385 | } |
1373 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); | 1386 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
1374 | if (r) { | 1387 | if (r) { |
1375 | DRM_ERROR("hw_init %d failed %d\n", i, r); | 1388 | DRM_ERROR("hw_init %d failed %d\n", i, r); |
1376 | return r; | 1389 | return r; |
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1380 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); | 1393 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); |
1381 | return r; | 1394 | return r; |
1382 | } | 1395 | } |
1383 | adev->ip_block_status[i].hw = true; | 1396 | adev->ip_blocks[i].status.hw = true; |
1384 | } | 1397 | } |
1385 | } | 1398 | } |
1386 | 1399 | ||
1387 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1400 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1388 | if (!adev->ip_block_status[i].sw) | 1401 | if (!adev->ip_blocks[i].status.sw) |
1389 | continue; | 1402 | continue; |
1390 | /* gmc hw init is done early */ | 1403 | /* gmc hw init is done early */ |
1391 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) | 1404 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) |
1392 | continue; | 1405 | continue; |
1393 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); | 1406 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
1394 | if (r) { | 1407 | if (r) { |
1395 | DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1408 | DRM_ERROR("hw_init of IP block <%s> failed %d\n", |
1409 | adev->ip_blocks[i].version->funcs->name, r); | ||
1396 | return r; | 1410 | return r; |
1397 | } | 1411 | } |
1398 | adev->ip_block_status[i].hw = true; | 1412 | adev->ip_blocks[i].status.hw = true; |
1399 | } | 1413 | } |
1400 | 1414 | ||
1401 | return 0; | 1415 | return 0; |
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev) | |||
1406 | int i = 0, r; | 1420 | int i = 0, r; |
1407 | 1421 | ||
1408 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1422 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1409 | if (!adev->ip_block_status[i].valid) | 1423 | if (!adev->ip_blocks[i].status.valid) |
1410 | continue; | 1424 | continue; |
1411 | if (adev->ip_blocks[i].funcs->late_init) { | 1425 | if (adev->ip_blocks[i].version->funcs->late_init) { |
1412 | r = adev->ip_blocks[i].funcs->late_init((void *)adev); | 1426 | r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); |
1413 | if (r) { | 1427 | if (r) { |
1414 | DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1428 | DRM_ERROR("late_init of IP block <%s> failed %d\n", |
1429 | adev->ip_blocks[i].version->funcs->name, r); | ||
1415 | return r; | 1430 | return r; |
1416 | } | 1431 | } |
1417 | adev->ip_block_status[i].late_initialized = true; | 1432 | adev->ip_blocks[i].status.late_initialized = true; |
1418 | } | 1433 | } |
1419 | /* skip CG for VCE/UVD, it's handled specially */ | 1434 | /* skip CG for VCE/UVD, it's handled specially */ |
1420 | if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD && | 1435 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
1421 | adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) { | 1436 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { |
1422 | /* enable clockgating to save power */ | 1437 | /* enable clockgating to save power */ |
1423 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1438 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1424 | AMD_CG_STATE_GATE); | 1439 | AMD_CG_STATE_GATE); |
1425 | if (r) { | 1440 | if (r) { |
1426 | DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", | 1441 | DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", |
1427 | adev->ip_blocks[i].funcs->name, r); | 1442 | adev->ip_blocks[i].version->funcs->name, r); |
1428 | return r; | 1443 | return r; |
1429 | } | 1444 | } |
1430 | } | 1445 | } |
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1439 | 1454 | ||
1440 | /* need to disable SMC first */ | 1455 | /* need to disable SMC first */ |
1441 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1456 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1442 | if (!adev->ip_block_status[i].hw) | 1457 | if (!adev->ip_blocks[i].status.hw) |
1443 | continue; | 1458 | continue; |
1444 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) { | 1459 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { |
1445 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | 1460 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ |
1446 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1461 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1447 | AMD_CG_STATE_UNGATE); | 1462 | AMD_CG_STATE_UNGATE); |
1448 | if (r) { | 1463 | if (r) { |
1449 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", | 1464 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1450 | adev->ip_blocks[i].funcs->name, r); | 1465 | adev->ip_blocks[i].version->funcs->name, r); |
1451 | return r; | 1466 | return r; |
1452 | } | 1467 | } |
1453 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); | 1468 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
1454 | /* XXX handle errors */ | 1469 | /* XXX handle errors */ |
1455 | if (r) { | 1470 | if (r) { |
1456 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", | 1471 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", |
1457 | adev->ip_blocks[i].funcs->name, r); | 1472 | adev->ip_blocks[i].version->funcs->name, r); |
1458 | } | 1473 | } |
1459 | adev->ip_block_status[i].hw = false; | 1474 | adev->ip_blocks[i].status.hw = false; |
1460 | break; | 1475 | break; |
1461 | } | 1476 | } |
1462 | } | 1477 | } |
1463 | 1478 | ||
1464 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1479 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1465 | if (!adev->ip_block_status[i].hw) | 1480 | if (!adev->ip_blocks[i].status.hw) |
1466 | continue; | 1481 | continue; |
1467 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1482 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1468 | amdgpu_wb_fini(adev); | 1483 | amdgpu_wb_fini(adev); |
1469 | amdgpu_vram_scratch_fini(adev); | 1484 | amdgpu_vram_scratch_fini(adev); |
1470 | } | 1485 | } |
1471 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | 1486 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ |
1472 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1487 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1473 | AMD_CG_STATE_UNGATE); | 1488 | AMD_CG_STATE_UNGATE); |
1474 | if (r) { | 1489 | if (r) { |
1475 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1490 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1491 | adev->ip_blocks[i].version->funcs->name, r); | ||
1476 | return r; | 1492 | return r; |
1477 | } | 1493 | } |
1478 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); | 1494 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
1479 | /* XXX handle errors */ | 1495 | /* XXX handle errors */ |
1480 | if (r) { | 1496 | if (r) { |
1481 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1497 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", |
1498 | adev->ip_blocks[i].version->funcs->name, r); | ||
1482 | } | 1499 | } |
1483 | adev->ip_block_status[i].hw = false; | 1500 | adev->ip_blocks[i].status.hw = false; |
1484 | } | 1501 | } |
1485 | 1502 | ||
1486 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1503 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1487 | if (!adev->ip_block_status[i].sw) | 1504 | if (!adev->ip_blocks[i].status.sw) |
1488 | continue; | 1505 | continue; |
1489 | r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); | 1506 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); |
1490 | /* XXX handle errors */ | 1507 | /* XXX handle errors */ |
1491 | if (r) { | 1508 | if (r) { |
1492 | DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1509 | DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", |
1510 | adev->ip_blocks[i].version->funcs->name, r); | ||
1493 | } | 1511 | } |
1494 | adev->ip_block_status[i].sw = false; | 1512 | adev->ip_blocks[i].status.sw = false; |
1495 | adev->ip_block_status[i].valid = false; | 1513 | adev->ip_blocks[i].status.valid = false; |
1496 | } | 1514 | } |
1497 | 1515 | ||
1498 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1516 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1499 | if (!adev->ip_block_status[i].late_initialized) | 1517 | if (!adev->ip_blocks[i].status.late_initialized) |
1500 | continue; | 1518 | continue; |
1501 | if (adev->ip_blocks[i].funcs->late_fini) | 1519 | if (adev->ip_blocks[i].version->funcs->late_fini) |
1502 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | 1520 | adev->ip_blocks[i].version->funcs->late_fini((void *)adev); |
1503 | adev->ip_block_status[i].late_initialized = false; | 1521 | adev->ip_blocks[i].status.late_initialized = false; |
1504 | } | 1522 | } |
1505 | 1523 | ||
1506 | return 0; | 1524 | return 0; |
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev) | |||
1518 | } | 1536 | } |
1519 | 1537 | ||
1520 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1538 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1521 | if (!adev->ip_block_status[i].valid) | 1539 | if (!adev->ip_blocks[i].status.valid) |
1522 | continue; | 1540 | continue; |
1523 | /* ungate blocks so that suspend can properly shut them down */ | 1541 | /* ungate blocks so that suspend can properly shut them down */ |
1524 | if (i != AMD_IP_BLOCK_TYPE_SMC) { | 1542 | if (i != AMD_IP_BLOCK_TYPE_SMC) { |
1525 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1543 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1526 | AMD_CG_STATE_UNGATE); | 1544 | AMD_CG_STATE_UNGATE); |
1527 | if (r) { | 1545 | if (r) { |
1528 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1546 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1547 | adev->ip_blocks[i].version->funcs->name, r); | ||
1529 | } | 1548 | } |
1530 | } | 1549 | } |
1531 | /* XXX handle errors */ | 1550 | /* XXX handle errors */ |
1532 | r = adev->ip_blocks[i].funcs->suspend(adev); | 1551 | r = adev->ip_blocks[i].version->funcs->suspend(adev); |
1533 | /* XXX handle errors */ | 1552 | /* XXX handle errors */ |
1534 | if (r) { | 1553 | if (r) { |
1535 | DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1554 | DRM_ERROR("suspend of IP block <%s> failed %d\n", |
1555 | adev->ip_blocks[i].version->funcs->name, r); | ||
1536 | } | 1556 | } |
1537 | } | 1557 | } |
1538 | 1558 | ||
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev) | |||
1544 | int i, r; | 1564 | int i, r; |
1545 | 1565 | ||
1546 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1566 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1547 | if (!adev->ip_block_status[i].valid) | 1567 | if (!adev->ip_blocks[i].status.valid) |
1548 | continue; | 1568 | continue; |
1549 | r = adev->ip_blocks[i].funcs->resume(adev); | 1569 | r = adev->ip_blocks[i].version->funcs->resume(adev); |
1550 | if (r) { | 1570 | if (r) { |
1551 | DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1571 | DRM_ERROR("resume of IP block <%s> failed %d\n", |
1572 | adev->ip_blocks[i].version->funcs->name, r); | ||
1552 | return r; | 1573 | return r; |
1553 | } | 1574 | } |
1554 | } | 1575 | } |
@@ -1599,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1599 | adev->vm_manager.vm_pte_funcs = NULL; | 1620 | adev->vm_manager.vm_pte_funcs = NULL; |
1600 | adev->vm_manager.vm_pte_num_rings = 0; | 1621 | adev->vm_manager.vm_pte_num_rings = 0; |
1601 | adev->gart.gart_funcs = NULL; | 1622 | adev->gart.gart_funcs = NULL; |
1602 | adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); | 1623 | adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
1603 | 1624 | ||
1604 | adev->smc_rreg = &amdgpu_invalid_rreg; | 1625 | adev->smc_rreg = &amdgpu_invalid_rreg; |
1605 | adev->smc_wreg = &amdgpu_invalid_wreg; | 1626 | adev->smc_wreg = &amdgpu_invalid_wreg; |
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
1859 | amdgpu_fence_driver_fini(adev); | 1880 | amdgpu_fence_driver_fini(adev); |
1860 | amdgpu_fbdev_fini(adev); | 1881 | amdgpu_fbdev_fini(adev); |
1861 | r = amdgpu_fini(adev); | 1882 | r = amdgpu_fini(adev); |
1862 | kfree(adev->ip_block_status); | ||
1863 | adev->ip_block_status = NULL; | ||
1864 | adev->accel_working = false; | 1883 | adev->accel_working = false; |
1865 | /* free i2c buses */ | 1884 | /* free i2c buses */ |
1866 | amdgpu_i2c_fini(adev); | 1885 | amdgpu_i2c_fini(adev); |
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) | |||
1956 | 1975 | ||
1957 | r = amdgpu_suspend(adev); | 1976 | r = amdgpu_suspend(adev); |
1958 | 1977 | ||
1959 | /* evict remaining vram memory */ | 1978 | /* evict remaining vram memory |
1979 | * This second call to evict vram is to evict the gart page table | ||
1980 | * using the CPU. | ||
1981 | */ | ||
1960 | amdgpu_bo_evict_vram(adev); | 1982 | amdgpu_bo_evict_vram(adev); |
1961 | 1983 | ||
1962 | pci_save_state(dev->pdev); | 1984 | pci_save_state(dev->pdev); |
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) | |||
2096 | bool asic_hang = false; | 2118 | bool asic_hang = false; |
2097 | 2119 | ||
2098 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2120 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2099 | if (!adev->ip_block_status[i].valid) | 2121 | if (!adev->ip_blocks[i].status.valid) |
2100 | continue; | 2122 | continue; |
2101 | if (adev->ip_blocks[i].funcs->check_soft_reset) | 2123 | if (adev->ip_blocks[i].version->funcs->check_soft_reset) |
2102 | adev->ip_block_status[i].hang = | 2124 | adev->ip_blocks[i].status.hang = |
2103 | adev->ip_blocks[i].funcs->check_soft_reset(adev); | 2125 | adev->ip_blocks[i].version->funcs->check_soft_reset(adev); |
2104 | if (adev->ip_block_status[i].hang) { | 2126 | if (adev->ip_blocks[i].status.hang) { |
2105 | DRM_INFO("IP block:%d is hang!\n", i); | 2127 | DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); |
2106 | asic_hang = true; | 2128 | asic_hang = true; |
2107 | } | 2129 | } |
2108 | } | 2130 | } |
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) | |||
2114 | int i, r = 0; | 2136 | int i, r = 0; |
2115 | 2137 | ||
2116 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2138 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2117 | if (!adev->ip_block_status[i].valid) | 2139 | if (!adev->ip_blocks[i].status.valid) |
2118 | continue; | 2140 | continue; |
2119 | if (adev->ip_block_status[i].hang && | 2141 | if (adev->ip_blocks[i].status.hang && |
2120 | adev->ip_blocks[i].funcs->pre_soft_reset) { | 2142 | adev->ip_blocks[i].version->funcs->pre_soft_reset) { |
2121 | r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); | 2143 | r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); |
2122 | if (r) | 2144 | if (r) |
2123 | return r; | 2145 | return r; |
2124 | } | 2146 | } |
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) | |||
2132 | int i; | 2154 | int i; |
2133 | 2155 | ||
2134 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2156 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2135 | if (!adev->ip_block_status[i].valid) | 2157 | if (!adev->ip_blocks[i].status.valid) |
2136 | continue; | 2158 | continue; |
2137 | if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) || | 2159 | if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || |
2138 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) || | 2160 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || |
2139 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) || | 2161 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || |
2140 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) { | 2162 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { |
2141 | if (adev->ip_block_status[i].hang) { | 2163 | if (adev->ip_blocks[i].status.hang) { |
2142 | DRM_INFO("Some block need full reset!\n"); | 2164 | DRM_INFO("Some block need full reset!\n"); |
2143 | return true; | 2165 | return true; |
2144 | } | 2166 | } |
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev) | |||
2152 | int i, r = 0; | 2174 | int i, r = 0; |
2153 | 2175 | ||
2154 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2176 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2155 | if (!adev->ip_block_status[i].valid) | 2177 | if (!adev->ip_blocks[i].status.valid) |
2156 | continue; | 2178 | continue; |
2157 | if (adev->ip_block_status[i].hang && | 2179 | if (adev->ip_blocks[i].status.hang && |
2158 | adev->ip_blocks[i].funcs->soft_reset) { | 2180 | adev->ip_blocks[i].version->funcs->soft_reset) { |
2159 | r = adev->ip_blocks[i].funcs->soft_reset(adev); | 2181 | r = adev->ip_blocks[i].version->funcs->soft_reset(adev); |
2160 | if (r) | 2182 | if (r) |
2161 | return r; | 2183 | return r; |
2162 | } | 2184 | } |
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev) | |||
2170 | int i, r = 0; | 2192 | int i, r = 0; |
2171 | 2193 | ||
2172 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2194 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2173 | if (!adev->ip_block_status[i].valid) | 2195 | if (!adev->ip_blocks[i].status.valid) |
2174 | continue; | 2196 | continue; |
2175 | if (adev->ip_block_status[i].hang && | 2197 | if (adev->ip_blocks[i].status.hang && |
2176 | adev->ip_blocks[i].funcs->post_soft_reset) | 2198 | adev->ip_blocks[i].version->funcs->post_soft_reset) |
2177 | r = adev->ip_blocks[i].funcs->post_soft_reset(adev); | 2199 | r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); |
2178 | if (r) | 2200 | if (r) |
2179 | return r; | 2201 | return r; |
2180 | } | 2202 | } |
@@ -2193,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) | |||
2193 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, | 2215 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, |
2194 | struct amdgpu_ring *ring, | 2216 | struct amdgpu_ring *ring, |
2195 | struct amdgpu_bo *bo, | 2217 | struct amdgpu_bo *bo, |
2196 | struct fence **fence) | 2218 | struct dma_fence **fence) |
2197 | { | 2219 | { |
2198 | uint32_t domain; | 2220 | uint32_t domain; |
2199 | int r; | 2221 | int r; |
@@ -2312,30 +2334,30 @@ retry: | |||
2312 | if (need_full_reset && amdgpu_need_backup(adev)) { | 2334 | if (need_full_reset && amdgpu_need_backup(adev)) { |
2313 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 2335 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
2314 | struct amdgpu_bo *bo, *tmp; | 2336 | struct amdgpu_bo *bo, *tmp; |
2315 | struct fence *fence = NULL, *next = NULL; | 2337 | struct dma_fence *fence = NULL, *next = NULL; |
2316 | 2338 | ||
2317 | DRM_INFO("recover vram bo from shadow\n"); | 2339 | DRM_INFO("recover vram bo from shadow\n"); |
2318 | mutex_lock(&adev->shadow_list_lock); | 2340 | mutex_lock(&adev->shadow_list_lock); |
2319 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { | 2341 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { |
2320 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); | 2342 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); |
2321 | if (fence) { | 2343 | if (fence) { |
2322 | r = fence_wait(fence, false); | 2344 | r = dma_fence_wait(fence, false); |
2323 | if (r) { | 2345 | if (r) { |
2324 | WARN(r, "recovery from shadow isn't comleted\n"); | 2346 | WARN(r, "recovery from shadow isn't comleted\n"); |
2325 | break; | 2347 | break; |
2326 | } | 2348 | } |
2327 | } | 2349 | } |
2328 | 2350 | ||
2329 | fence_put(fence); | 2351 | dma_fence_put(fence); |
2330 | fence = next; | 2352 | fence = next; |
2331 | } | 2353 | } |
2332 | mutex_unlock(&adev->shadow_list_lock); | 2354 | mutex_unlock(&adev->shadow_list_lock); |
2333 | if (fence) { | 2355 | if (fence) { |
2334 | r = fence_wait(fence, false); | 2356 | r = dma_fence_wait(fence, false); |
2335 | if (r) | 2357 | if (r) |
2336 | WARN(r, "recovery from shadow isn't comleted\n"); | 2358 | WARN(r, "recovery from shadow isn't comleted\n"); |
2337 | } | 2359 | } |
2338 | fence_put(fence); | 2360 | dma_fence_put(fence); |
2339 | } | 2361 | } |
2340 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 2362 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
2341 | struct amdgpu_ring *ring = adev->rings[i]; | 2363 | struct amdgpu_ring *ring = adev->rings[i]; |
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |||
2531 | se_bank = (*pos >> 24) & 0x3FF; | 2553 | se_bank = (*pos >> 24) & 0x3FF; |
2532 | sh_bank = (*pos >> 34) & 0x3FF; | 2554 | sh_bank = (*pos >> 34) & 0x3FF; |
2533 | instance_bank = (*pos >> 44) & 0x3FF; | 2555 | instance_bank = (*pos >> 44) & 0x3FF; |
2556 | |||
2557 | if (se_bank == 0x3FF) | ||
2558 | se_bank = 0xFFFFFFFF; | ||
2559 | if (sh_bank == 0x3FF) | ||
2560 | sh_bank = 0xFFFFFFFF; | ||
2561 | if (instance_bank == 0x3FF) | ||
2562 | instance_bank = 0xFFFFFFFF; | ||
2534 | use_bank = 1; | 2563 | use_bank = 1; |
2535 | } else { | 2564 | } else { |
2536 | use_bank = 0; | 2565 | use_bank = 0; |
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |||
2539 | *pos &= 0x3FFFF; | 2568 | *pos &= 0x3FFFF; |
2540 | 2569 | ||
2541 | if (use_bank) { | 2570 | if (use_bank) { |
2542 | if (sh_bank >= adev->gfx.config.max_sh_per_se || | 2571 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || |
2543 | se_bank >= adev->gfx.config.max_shader_engines) | 2572 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) |
2544 | return -EINVAL; | 2573 | return -EINVAL; |
2545 | mutex_lock(&adev->grbm_idx_mutex); | 2574 | mutex_lock(&adev->grbm_idx_mutex); |
2546 | amdgpu_gfx_select_se_sh(adev, se_bank, | 2575 | amdgpu_gfx_select_se_sh(adev, se_bank, |
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |||
2587 | struct amdgpu_device *adev = f->f_inode->i_private; | 2616 | struct amdgpu_device *adev = f->f_inode->i_private; |
2588 | ssize_t result = 0; | 2617 | ssize_t result = 0; |
2589 | int r; | 2618 | int r; |
2619 | bool pm_pg_lock, use_bank; | ||
2620 | unsigned instance_bank, sh_bank, se_bank; | ||
2590 | 2621 | ||
2591 | if (size & 0x3 || *pos & 0x3) | 2622 | if (size & 0x3 || *pos & 0x3) |
2592 | return -EINVAL; | 2623 | return -EINVAL; |
2593 | 2624 | ||
2625 | /* are we reading registers for which a PG lock is necessary? */ | ||
2626 | pm_pg_lock = (*pos >> 23) & 1; | ||
2627 | |||
2628 | if (*pos & (1ULL << 62)) { | ||
2629 | se_bank = (*pos >> 24) & 0x3FF; | ||
2630 | sh_bank = (*pos >> 34) & 0x3FF; | ||
2631 | instance_bank = (*pos >> 44) & 0x3FF; | ||
2632 | |||
2633 | if (se_bank == 0x3FF) | ||
2634 | se_bank = 0xFFFFFFFF; | ||
2635 | if (sh_bank == 0x3FF) | ||
2636 | sh_bank = 0xFFFFFFFF; | ||
2637 | if (instance_bank == 0x3FF) | ||
2638 | instance_bank = 0xFFFFFFFF; | ||
2639 | use_bank = 1; | ||
2640 | } else { | ||
2641 | use_bank = 0; | ||
2642 | } | ||
2643 | |||
2644 | *pos &= 0x3FFFF; | ||
2645 | |||
2646 | if (use_bank) { | ||
2647 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | ||
2648 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) | ||
2649 | return -EINVAL; | ||
2650 | mutex_lock(&adev->grbm_idx_mutex); | ||
2651 | amdgpu_gfx_select_se_sh(adev, se_bank, | ||
2652 | sh_bank, instance_bank); | ||
2653 | } | ||
2654 | |||
2655 | if (pm_pg_lock) | ||
2656 | mutex_lock(&adev->pm.mutex); | ||
2657 | |||
2594 | while (size) { | 2658 | while (size) { |
2595 | uint32_t value; | 2659 | uint32_t value; |
2596 | 2660 | ||
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |||
2609 | size -= 4; | 2673 | size -= 4; |
2610 | } | 2674 | } |
2611 | 2675 | ||
2676 | if (use_bank) { | ||
2677 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
2678 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2679 | } | ||
2680 | |||
2681 | if (pm_pg_lock) | ||
2682 | mutex_unlock(&adev->pm.mutex); | ||
2683 | |||
2612 | return result; | 2684 | return result; |
2613 | } | 2685 | } |
2614 | 2686 | ||
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, | |||
2871 | return !r ? 4 : r; | 2943 | return !r ? 4 : r; |
2872 | } | 2944 | } |
2873 | 2945 | ||
2946 | static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, | ||
2947 | size_t size, loff_t *pos) | ||
2948 | { | ||
2949 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
2950 | int r, x; | ||
2951 | ssize_t result=0; | ||
2952 | uint32_t offset, se, sh, cu, wave, simd, data[32]; | ||
2953 | |||
2954 | if (size & 3 || *pos & 3) | ||
2955 | return -EINVAL; | ||
2956 | |||
2957 | /* decode offset */ | ||
2958 | offset = (*pos & 0x7F); | ||
2959 | se = ((*pos >> 7) & 0xFF); | ||
2960 | sh = ((*pos >> 15) & 0xFF); | ||
2961 | cu = ((*pos >> 23) & 0xFF); | ||
2962 | wave = ((*pos >> 31) & 0xFF); | ||
2963 | simd = ((*pos >> 37) & 0xFF); | ||
2964 | |||
2965 | /* switch to the specific se/sh/cu */ | ||
2966 | mutex_lock(&adev->grbm_idx_mutex); | ||
2967 | amdgpu_gfx_select_se_sh(adev, se, sh, cu); | ||
2968 | |||
2969 | x = 0; | ||
2970 | if (adev->gfx.funcs->read_wave_data) | ||
2971 | adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); | ||
2972 | |||
2973 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); | ||
2974 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2975 | |||
2976 | if (!x) | ||
2977 | return -EINVAL; | ||
2978 | |||
2979 | while (size && (offset < x * 4)) { | ||
2980 | uint32_t value; | ||
2981 | |||
2982 | value = data[offset >> 2]; | ||
2983 | r = put_user(value, (uint32_t *)buf); | ||
2984 | if (r) | ||
2985 | return r; | ||
2986 | |||
2987 | result += 4; | ||
2988 | buf += 4; | ||
2989 | offset += 4; | ||
2990 | size -= 4; | ||
2991 | } | ||
2992 | |||
2993 | return result; | ||
2994 | } | ||
2995 | |||
2874 | static const struct file_operations amdgpu_debugfs_regs_fops = { | 2996 | static const struct file_operations amdgpu_debugfs_regs_fops = { |
2875 | .owner = THIS_MODULE, | 2997 | .owner = THIS_MODULE, |
2876 | .read = amdgpu_debugfs_regs_read, | 2998 | .read = amdgpu_debugfs_regs_read, |
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = { | |||
2908 | .llseek = default_llseek | 3030 | .llseek = default_llseek |
2909 | }; | 3031 | }; |
2910 | 3032 | ||
3033 | static const struct file_operations amdgpu_debugfs_wave_fops = { | ||
3034 | .owner = THIS_MODULE, | ||
3035 | .read = amdgpu_debugfs_wave_read, | ||
3036 | .llseek = default_llseek | ||
3037 | }; | ||
3038 | |||
2911 | static const struct file_operations *debugfs_regs[] = { | 3039 | static const struct file_operations *debugfs_regs[] = { |
2912 | &amdgpu_debugfs_regs_fops, | 3040 | &amdgpu_debugfs_regs_fops, |
2913 | &amdgpu_debugfs_regs_didt_fops, | 3041 | &amdgpu_debugfs_regs_didt_fops, |
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = { | |||
2915 | &amdgpu_debugfs_regs_smc_fops, | 3043 | &amdgpu_debugfs_regs_smc_fops, |
2916 | &amdgpu_debugfs_gca_config_fops, | 3044 | &amdgpu_debugfs_gca_config_fops, |
2917 | &amdgpu_debugfs_sensors_fops, | 3045 | &amdgpu_debugfs_sensors_fops, |
3046 | &amdgpu_debugfs_wave_fops, | ||
2918 | }; | 3047 | }; |
2919 | 3048 | ||
2920 | static const char *debugfs_regs_names[] = { | 3049 | static const char *debugfs_regs_names[] = { |
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = { | |||
2924 | "amdgpu_regs_smc", | 3053 | "amdgpu_regs_smc", |
2925 | "amdgpu_gca_config", | 3054 | "amdgpu_gca_config", |
2926 | "amdgpu_sensors", | 3055 | "amdgpu_sensors", |
3056 | "amdgpu_wave", | ||
2927 | }; | 3057 | }; |
2928 | 3058 | ||
2929 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) | 3059 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 083e2b429872..741144fcc7bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -35,29 +35,29 @@ | |||
35 | #include <drm/drm_crtc_helper.h> | 35 | #include <drm/drm_crtc_helper.h> |
36 | #include <drm/drm_edid.h> | 36 | #include <drm/drm_edid.h> |
37 | 37 | ||
38 | static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) | 38 | static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) |
39 | { | 39 | { |
40 | struct amdgpu_flip_work *work = | 40 | struct amdgpu_flip_work *work = |
41 | container_of(cb, struct amdgpu_flip_work, cb); | 41 | container_of(cb, struct amdgpu_flip_work, cb); |
42 | 42 | ||
43 | fence_put(f); | 43 | dma_fence_put(f); |
44 | schedule_work(&work->flip_work.work); | 44 | schedule_work(&work->flip_work.work); |
45 | } | 45 | } |
46 | 46 | ||
47 | static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, | 47 | static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, |
48 | struct fence **f) | 48 | struct dma_fence **f) |
49 | { | 49 | { |
50 | struct fence *fence= *f; | 50 | struct dma_fence *fence= *f; |
51 | 51 | ||
52 | if (fence == NULL) | 52 | if (fence == NULL) |
53 | return false; | 53 | return false; |
54 | 54 | ||
55 | *f = NULL; | 55 | *f = NULL; |
56 | 56 | ||
57 | if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) | 57 | if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) |
58 | return true; | 58 | return true; |
59 | 59 | ||
60 | fence_put(fence); | 60 | dma_fence_put(fence); |
61 | return false; | 61 | return false; |
62 | } | 62 | } |
63 | 63 | ||
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
68 | struct amdgpu_flip_work *work = | 68 | struct amdgpu_flip_work *work = |
69 | container_of(delayed_work, struct amdgpu_flip_work, flip_work); | 69 | container_of(delayed_work, struct amdgpu_flip_work, flip_work); |
70 | struct amdgpu_device *adev = work->adev; | 70 | struct amdgpu_device *adev = work->adev; |
71 | struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; | 71 | struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; |
72 | 72 | ||
73 | struct drm_crtc *crtc = &amdgpuCrtc->base; | 73 | struct drm_crtc *crtc = &amdgpu_crtc->base; |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | unsigned i; | 75 | unsigned i; |
76 | int vpos, hpos; | 76 | int vpos, hpos; |
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
85 | /* Wait until we're out of the vertical blank period before the one | 85 | /* Wait until we're out of the vertical blank period before the one |
86 | * targeted by the flip | 86 | * targeted by the flip |
87 | */ | 87 | */ |
88 | if (amdgpuCrtc->enabled && | 88 | if (amdgpu_crtc->enabled && |
89 | (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, | 89 | (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, |
90 | &vpos, &hpos, NULL, NULL, | 90 | &vpos, &hpos, NULL, NULL, |
91 | &crtc->hwmode) | 91 | &crtc->hwmode) |
92 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == | 92 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
93 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && | 93 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
94 | (int)(work->target_vblank - | 94 | (int)(work->target_vblank - |
95 | amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { | 95 | amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { |
96 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); | 96 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); |
97 | return; | 97 | return; |
98 | } | 98 | } |
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
104 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); | 104 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); |
105 | 105 | ||
106 | /* Set the flip status */ | 106 | /* Set the flip status */ |
107 | amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; | 107 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED; |
108 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 108 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
109 | 109 | ||
110 | 110 | ||
111 | DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", | 111 | DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", |
112 | amdgpuCrtc->crtc_id, amdgpuCrtc, work); | 112 | amdgpu_crtc->crtc_id, amdgpu_crtc, work); |
113 | 113 | ||
114 | } | 114 | } |
115 | 115 | ||
@@ -244,9 +244,9 @@ unreserve: | |||
244 | 244 | ||
245 | cleanup: | 245 | cleanup: |
246 | amdgpu_bo_unref(&work->old_abo); | 246 | amdgpu_bo_unref(&work->old_abo); |
247 | fence_put(work->excl); | 247 | dma_fence_put(work->excl); |
248 | for (i = 0; i < work->shared_count; ++i) | 248 | for (i = 0; i < work->shared_count; ++i) |
249 | fence_put(work->shared[i]); | 249 | dma_fence_put(work->shared[i]); |
250 | kfree(work->shared); | 250 | kfree(work->shared); |
251 | kfree(work); | 251 | kfree(work); |
252 | 252 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 14f57d9915e3..6ca0333ca4c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | |||
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) | |||
553 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) | 553 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) |
554 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); | 554 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); |
555 | } | 555 | } |
556 | for (i = 0; i < states->numEntries; i++) { | 556 | adev->pm.dpm.num_of_vce_states = |
557 | if (i >= AMDGPU_MAX_VCE_LEVELS) | 557 | states->numEntries > AMD_MAX_VCE_LEVELS ? |
558 | break; | 558 | AMD_MAX_VCE_LEVELS : states->numEntries; |
559 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { | ||
559 | vce_clk = (VCEClockInfo *) | 560 | vce_clk = (VCEClockInfo *) |
560 | ((u8 *)&array->entries[0] + | 561 | ((u8 *)&array->entries[0] + |
561 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | 562 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); |
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes) | |||
955 | 956 | ||
956 | return encoded_lanes[lanes]; | 957 | return encoded_lanes[lanes]; |
957 | } | 958 | } |
959 | |||
960 | struct amd_vce_state* | ||
961 | amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx) | ||
962 | { | ||
963 | if (idx < adev->pm.dpm.num_of_vce_states) | ||
964 | return &adev->pm.dpm.vce_states[idx]; | ||
965 | |||
966 | return NULL; | ||
967 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 3738a96c2619..bd85e35998e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | |||
@@ -23,6 +23,446 @@ | |||
23 | #ifndef __AMDGPU_DPM_H__ | 23 | #ifndef __AMDGPU_DPM_H__ |
24 | #define __AMDGPU_DPM_H__ | 24 | #define __AMDGPU_DPM_H__ |
25 | 25 | ||
26 | enum amdgpu_int_thermal_type { | ||
27 | THERMAL_TYPE_NONE, | ||
28 | THERMAL_TYPE_EXTERNAL, | ||
29 | THERMAL_TYPE_EXTERNAL_GPIO, | ||
30 | THERMAL_TYPE_RV6XX, | ||
31 | THERMAL_TYPE_RV770, | ||
32 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, | ||
33 | THERMAL_TYPE_EVERGREEN, | ||
34 | THERMAL_TYPE_SUMO, | ||
35 | THERMAL_TYPE_NI, | ||
36 | THERMAL_TYPE_SI, | ||
37 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, | ||
38 | THERMAL_TYPE_CI, | ||
39 | THERMAL_TYPE_KV, | ||
40 | }; | ||
41 | |||
42 | enum amdgpu_dpm_auto_throttle_src { | ||
43 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | ||
44 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | ||
45 | }; | ||
46 | |||
47 | enum amdgpu_dpm_event_src { | ||
48 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | ||
49 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | ||
50 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | ||
51 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | ||
52 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | ||
53 | }; | ||
54 | |||
55 | struct amdgpu_ps { | ||
56 | u32 caps; /* vbios flags */ | ||
57 | u32 class; /* vbios flags */ | ||
58 | u32 class2; /* vbios flags */ | ||
59 | /* UVD clocks */ | ||
60 | u32 vclk; | ||
61 | u32 dclk; | ||
62 | /* VCE clocks */ | ||
63 | u32 evclk; | ||
64 | u32 ecclk; | ||
65 | bool vce_active; | ||
66 | enum amd_vce_level vce_level; | ||
67 | /* asic priv */ | ||
68 | void *ps_priv; | ||
69 | }; | ||
70 | |||
71 | struct amdgpu_dpm_thermal { | ||
72 | /* thermal interrupt work */ | ||
73 | struct work_struct work; | ||
74 | /* low temperature threshold */ | ||
75 | int min_temp; | ||
76 | /* high temperature threshold */ | ||
77 | int max_temp; | ||
78 | /* was last interrupt low to high or high to low */ | ||
79 | bool high_to_low; | ||
80 | /* interrupt source */ | ||
81 | struct amdgpu_irq_src irq; | ||
82 | }; | ||
83 | |||
84 | enum amdgpu_clk_action | ||
85 | { | ||
86 | AMDGPU_SCLK_UP = 1, | ||
87 | AMDGPU_SCLK_DOWN | ||
88 | }; | ||
89 | |||
90 | struct amdgpu_blacklist_clocks | ||
91 | { | ||
92 | u32 sclk; | ||
93 | u32 mclk; | ||
94 | enum amdgpu_clk_action action; | ||
95 | }; | ||
96 | |||
97 | struct amdgpu_clock_and_voltage_limits { | ||
98 | u32 sclk; | ||
99 | u32 mclk; | ||
100 | u16 vddc; | ||
101 | u16 vddci; | ||
102 | }; | ||
103 | |||
104 | struct amdgpu_clock_array { | ||
105 | u32 count; | ||
106 | u32 *values; | ||
107 | }; | ||
108 | |||
109 | struct amdgpu_clock_voltage_dependency_entry { | ||
110 | u32 clk; | ||
111 | u16 v; | ||
112 | }; | ||
113 | |||
114 | struct amdgpu_clock_voltage_dependency_table { | ||
115 | u32 count; | ||
116 | struct amdgpu_clock_voltage_dependency_entry *entries; | ||
117 | }; | ||
118 | |||
119 | union amdgpu_cac_leakage_entry { | ||
120 | struct { | ||
121 | u16 vddc; | ||
122 | u32 leakage; | ||
123 | }; | ||
124 | struct { | ||
125 | u16 vddc1; | ||
126 | u16 vddc2; | ||
127 | u16 vddc3; | ||
128 | }; | ||
129 | }; | ||
130 | |||
131 | struct amdgpu_cac_leakage_table { | ||
132 | u32 count; | ||
133 | union amdgpu_cac_leakage_entry *entries; | ||
134 | }; | ||
135 | |||
136 | struct amdgpu_phase_shedding_limits_entry { | ||
137 | u16 voltage; | ||
138 | u32 sclk; | ||
139 | u32 mclk; | ||
140 | }; | ||
141 | |||
142 | struct amdgpu_phase_shedding_limits_table { | ||
143 | u32 count; | ||
144 | struct amdgpu_phase_shedding_limits_entry *entries; | ||
145 | }; | ||
146 | |||
147 | struct amdgpu_uvd_clock_voltage_dependency_entry { | ||
148 | u32 vclk; | ||
149 | u32 dclk; | ||
150 | u16 v; | ||
151 | }; | ||
152 | |||
153 | struct amdgpu_uvd_clock_voltage_dependency_table { | ||
154 | u8 count; | ||
155 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | ||
156 | }; | ||
157 | |||
158 | struct amdgpu_vce_clock_voltage_dependency_entry { | ||
159 | u32 ecclk; | ||
160 | u32 evclk; | ||
161 | u16 v; | ||
162 | }; | ||
163 | |||
164 | struct amdgpu_vce_clock_voltage_dependency_table { | ||
165 | u8 count; | ||
166 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; | ||
167 | }; | ||
168 | |||
169 | struct amdgpu_ppm_table { | ||
170 | u8 ppm_design; | ||
171 | u16 cpu_core_number; | ||
172 | u32 platform_tdp; | ||
173 | u32 small_ac_platform_tdp; | ||
174 | u32 platform_tdc; | ||
175 | u32 small_ac_platform_tdc; | ||
176 | u32 apu_tdp; | ||
177 | u32 dgpu_tdp; | ||
178 | u32 dgpu_ulv_power; | ||
179 | u32 tj_max; | ||
180 | }; | ||
181 | |||
182 | struct amdgpu_cac_tdp_table { | ||
183 | u16 tdp; | ||
184 | u16 configurable_tdp; | ||
185 | u16 tdc; | ||
186 | u16 battery_power_limit; | ||
187 | u16 small_power_limit; | ||
188 | u16 low_cac_leakage; | ||
189 | u16 high_cac_leakage; | ||
190 | u16 maximum_power_delivery_limit; | ||
191 | }; | ||
192 | |||
193 | struct amdgpu_dpm_dynamic_state { | ||
194 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | ||
195 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | ||
196 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | ||
197 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | ||
198 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | ||
199 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | ||
200 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | ||
201 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | ||
202 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | ||
203 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | ||
204 | struct amdgpu_clock_array valid_sclk_values; | ||
205 | struct amdgpu_clock_array valid_mclk_values; | ||
206 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | ||
207 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | ||
208 | u32 mclk_sclk_ratio; | ||
209 | u32 sclk_mclk_delta; | ||
210 | u16 vddc_vddci_delta; | ||
211 | u16 min_vddc_for_pcie_gen2; | ||
212 | struct amdgpu_cac_leakage_table cac_leakage_table; | ||
213 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | ||
214 | struct amdgpu_ppm_table *ppm_table; | ||
215 | struct amdgpu_cac_tdp_table *cac_tdp_table; | ||
216 | }; | ||
217 | |||
218 | struct amdgpu_dpm_fan { | ||
219 | u16 t_min; | ||
220 | u16 t_med; | ||
221 | u16 t_high; | ||
222 | u16 pwm_min; | ||
223 | u16 pwm_med; | ||
224 | u16 pwm_high; | ||
225 | u8 t_hyst; | ||
226 | u32 cycle_delay; | ||
227 | u16 t_max; | ||
228 | u8 control_mode; | ||
229 | u16 default_max_fan_pwm; | ||
230 | u16 default_fan_output_sensitivity; | ||
231 | u16 fan_output_sensitivity; | ||
232 | bool ucode_fan_control; | ||
233 | }; | ||
234 | |||
235 | enum amdgpu_pcie_gen { | ||
236 | AMDGPU_PCIE_GEN1 = 0, | ||
237 | AMDGPU_PCIE_GEN2 = 1, | ||
238 | AMDGPU_PCIE_GEN3 = 2, | ||
239 | AMDGPU_PCIE_GEN_INVALID = 0xffff | ||
240 | }; | ||
241 | |||
242 | enum amdgpu_dpm_forced_level { | ||
243 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | ||
244 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | ||
245 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | ||
246 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, | ||
247 | }; | ||
248 | |||
249 | struct amdgpu_dpm_funcs { | ||
250 | int (*get_temperature)(struct amdgpu_device *adev); | ||
251 | int (*pre_set_power_state)(struct amdgpu_device *adev); | ||
252 | int (*set_power_state)(struct amdgpu_device *adev); | ||
253 | void (*post_set_power_state)(struct amdgpu_device *adev); | ||
254 | void (*display_configuration_changed)(struct amdgpu_device *adev); | ||
255 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | ||
256 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | ||
257 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | ||
258 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | ||
259 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | ||
260 | bool (*vblank_too_short)(struct amdgpu_device *adev); | ||
261 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | ||
262 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | ||
263 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | ||
264 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | ||
265 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | ||
266 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | ||
267 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | ||
268 | int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); | ||
269 | int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); | ||
270 | int (*get_sclk_od)(struct amdgpu_device *adev); | ||
271 | int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
272 | int (*get_mclk_od)(struct amdgpu_device *adev); | ||
273 | int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
274 | int (*check_state_equal)(struct amdgpu_device *adev, | ||
275 | struct amdgpu_ps *cps, | ||
276 | struct amdgpu_ps *rps, | ||
277 | bool *equal); | ||
278 | |||
279 | struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx); | ||
280 | }; | ||
281 | |||
282 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | ||
283 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | ||
284 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | ||
285 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | ||
286 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | ||
287 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | ||
288 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | ||
289 | |||
290 | #define amdgpu_dpm_read_sensor(adev, idx, value) \ | ||
291 | ((adev)->pp_enabled ? \ | ||
292 | (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ | ||
293 | -EINVAL) | ||
294 | |||
295 | #define amdgpu_dpm_get_temperature(adev) \ | ||
296 | ((adev)->pp_enabled ? \ | ||
297 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | ||
298 | (adev)->pm.funcs->get_temperature((adev))) | ||
299 | |||
300 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | ||
301 | ((adev)->pp_enabled ? \ | ||
302 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | ||
303 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) | ||
304 | |||
305 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | ||
306 | ((adev)->pp_enabled ? \ | ||
307 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | ||
308 | (adev)->pm.funcs->get_fan_control_mode((adev))) | ||
309 | |||
310 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | ||
311 | ((adev)->pp_enabled ? \ | ||
312 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
313 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) | ||
314 | |||
315 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | ||
316 | ((adev)->pp_enabled ? \ | ||
317 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
318 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) | ||
319 | |||
320 | #define amdgpu_dpm_get_sclk(adev, l) \ | ||
321 | ((adev)->pp_enabled ? \ | ||
322 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | ||
323 | (adev)->pm.funcs->get_sclk((adev), (l))) | ||
324 | |||
325 | #define amdgpu_dpm_get_mclk(adev, l) \ | ||
326 | ((adev)->pp_enabled ? \ | ||
327 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | ||
328 | (adev)->pm.funcs->get_mclk((adev), (l))) | ||
329 | |||
330 | |||
331 | #define amdgpu_dpm_force_performance_level(adev, l) \ | ||
332 | ((adev)->pp_enabled ? \ | ||
333 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | ||
334 | (adev)->pm.funcs->force_performance_level((adev), (l))) | ||
335 | |||
336 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | ||
337 | ((adev)->pp_enabled ? \ | ||
338 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | ||
339 | (adev)->pm.funcs->powergate_uvd((adev), (g))) | ||
340 | |||
341 | #define amdgpu_dpm_powergate_vce(adev, g) \ | ||
342 | ((adev)->pp_enabled ? \ | ||
343 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | ||
344 | (adev)->pm.funcs->powergate_vce((adev), (g))) | ||
345 | |||
346 | #define amdgpu_dpm_get_current_power_state(adev) \ | ||
347 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | ||
348 | |||
349 | #define amdgpu_dpm_get_performance_level(adev) \ | ||
350 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) | ||
351 | |||
352 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ | ||
353 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) | ||
354 | |||
355 | #define amdgpu_dpm_get_pp_table(adev, table) \ | ||
356 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) | ||
357 | |||
358 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ | ||
359 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) | ||
360 | |||
361 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ | ||
362 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) | ||
363 | |||
364 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ | ||
365 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) | ||
366 | |||
367 | #define amdgpu_dpm_get_sclk_od(adev) \ | ||
368 | (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) | ||
369 | |||
370 | #define amdgpu_dpm_set_sclk_od(adev, value) \ | ||
371 | (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) | ||
372 | |||
373 | #define amdgpu_dpm_get_mclk_od(adev) \ | ||
374 | ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) | ||
375 | |||
376 | #define amdgpu_dpm_set_mclk_od(adev, value) \ | ||
377 | ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) | ||
378 | |||
379 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ | ||
380 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) | ||
381 | |||
382 | #define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal)) | ||
383 | |||
384 | #define amdgpu_dpm_get_vce_clock_state(adev, i) \ | ||
385 | ((adev)->pp_enabled ? \ | ||
386 | (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \ | ||
387 | (adev)->pm.funcs->get_vce_clock_state((adev), (i))) | ||
388 | |||
389 | struct amdgpu_dpm { | ||
390 | struct amdgpu_ps *ps; | ||
391 | /* number of valid power states */ | ||
392 | int num_ps; | ||
393 | /* current power state that is active */ | ||
394 | struct amdgpu_ps *current_ps; | ||
395 | /* requested power state */ | ||
396 | struct amdgpu_ps *requested_ps; | ||
397 | /* boot up power state */ | ||
398 | struct amdgpu_ps *boot_ps; | ||
399 | /* default uvd power state */ | ||
400 | struct amdgpu_ps *uvd_ps; | ||
401 | /* vce requirements */ | ||
402 | u32 num_of_vce_states; | ||
403 | struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; | ||
404 | enum amd_vce_level vce_level; | ||
405 | enum amd_pm_state_type state; | ||
406 | enum amd_pm_state_type user_state; | ||
407 | enum amd_pm_state_type last_state; | ||
408 | enum amd_pm_state_type last_user_state; | ||
409 | u32 platform_caps; | ||
410 | u32 voltage_response_time; | ||
411 | u32 backbias_response_time; | ||
412 | void *priv; | ||
413 | u32 new_active_crtcs; | ||
414 | int new_active_crtc_count; | ||
415 | u32 current_active_crtcs; | ||
416 | int current_active_crtc_count; | ||
417 | struct amdgpu_dpm_dynamic_state dyn_state; | ||
418 | struct amdgpu_dpm_fan fan; | ||
419 | u32 tdp_limit; | ||
420 | u32 near_tdp_limit; | ||
421 | u32 near_tdp_limit_adjusted; | ||
422 | u32 sq_ramping_threshold; | ||
423 | u32 cac_leakage; | ||
424 | u16 tdp_od_limit; | ||
425 | u32 tdp_adjustment; | ||
426 | u16 load_line_slope; | ||
427 | bool power_control; | ||
428 | bool ac_power; | ||
429 | /* special states active */ | ||
430 | bool thermal_active; | ||
431 | bool uvd_active; | ||
432 | bool vce_active; | ||
433 | /* thermal handling */ | ||
434 | struct amdgpu_dpm_thermal thermal; | ||
435 | /* forced levels */ | ||
436 | enum amdgpu_dpm_forced_level forced_level; | ||
437 | }; | ||
438 | |||
439 | struct amdgpu_pm { | ||
440 | struct mutex mutex; | ||
441 | u32 current_sclk; | ||
442 | u32 current_mclk; | ||
443 | u32 default_sclk; | ||
444 | u32 default_mclk; | ||
445 | struct amdgpu_i2c_chan *i2c_bus; | ||
446 | /* internal thermal controller on rv6xx+ */ | ||
447 | enum amdgpu_int_thermal_type int_thermal_type; | ||
448 | struct device *int_hwmon_dev; | ||
449 | /* fan control parameters */ | ||
450 | bool no_fan; | ||
451 | u8 fan_pulses_per_revolution; | ||
452 | u8 fan_min_rpm; | ||
453 | u8 fan_max_rpm; | ||
454 | /* dpm */ | ||
455 | bool dpm_enabled; | ||
456 | bool sysfs_initialized; | ||
457 | struct amdgpu_dpm dpm; | ||
458 | const struct firmware *fw; /* SMC firmware */ | ||
459 | uint32_t fw_version; | ||
460 | const struct amdgpu_dpm_funcs *funcs; | ||
461 | uint32_t pcie_gen_mask; | ||
462 | uint32_t pcie_mlw_mask; | ||
463 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ | ||
464 | }; | ||
465 | |||
26 | #define R600_SSTU_DFLT 0 | 466 | #define R600_SSTU_DFLT 0 |
27 | #define R600_SST_DFLT 0x00C8 | 467 | #define R600_SST_DFLT 0x00C8 |
28 | 468 | ||
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, | |||
82 | u16 default_lanes); | 522 | u16 default_lanes); |
83 | u8 amdgpu_encode_pci_lane_width(u32 lanes); | 523 | u8 amdgpu_encode_pci_lane_width(u32 lanes); |
84 | 524 | ||
525 | struct amd_vce_state* | ||
526 | amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx); | ||
527 | |||
85 | #endif | 528 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71ed27eb3dde..6bb4d9e9afe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -58,9 +58,10 @@ | |||
58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. | 58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. |
59 | * - 3.7.0 - Add support for VCE clock list packet | 59 | * - 3.7.0 - Add support for VCE clock list packet |
60 | * - 3.8.0 - Add support raster config init in the kernel | 60 | * - 3.8.0 - Add support raster config init in the kernel |
61 | * - 3.9.0 - Add support for memory query info about VRAM and GTT. | ||
61 | */ | 62 | */ |
62 | #define KMS_DRIVER_MAJOR 3 | 63 | #define KMS_DRIVER_MAJOR 3 |
63 | #define KMS_DRIVER_MINOR 8 | 64 | #define KMS_DRIVER_MINOR 9 |
64 | #define KMS_DRIVER_PATCHLEVEL 0 | 65 | #define KMS_DRIVER_PATCHLEVEL 0 |
65 | 66 | ||
66 | int amdgpu_vram_limit = 0; | 67 | int amdgpu_vram_limit = 0; |
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64; | |||
85 | int amdgpu_vm_block_size = -1; | 86 | int amdgpu_vm_block_size = -1; |
86 | int amdgpu_vm_fault_stop = 0; | 87 | int amdgpu_vm_fault_stop = 0; |
87 | int amdgpu_vm_debug = 0; | 88 | int amdgpu_vm_debug = 0; |
89 | int amdgpu_vram_page_split = 1024; | ||
88 | int amdgpu_exp_hw_support = 0; | 90 | int amdgpu_exp_hw_support = 0; |
89 | int amdgpu_sched_jobs = 32; | 91 | int amdgpu_sched_jobs = 32; |
90 | int amdgpu_sched_hw_submission = 2; | 92 | int amdgpu_sched_hw_submission = 2; |
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); | |||
165 | MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); | 167 | MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); |
166 | module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); | 168 | module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); |
167 | 169 | ||
170 | MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); | ||
171 | module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); | ||
172 | |||
168 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); | 173 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); |
169 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); | 174 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); |
170 | 175 | ||
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); | |||
201 | MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); | 206 | MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); |
202 | module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); | 207 | module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); |
203 | 208 | ||
204 | MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); | 209 | MODULE_PARM_DESC(virtual_display, |
210 | "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)"); | ||
205 | module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); | 211 | module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); |
206 | 212 | ||
207 | static const struct pci_device_id pciidlist[] = { | 213 | static const struct pci_device_id pciidlist[] = { |
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = { | |||
381 | {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, | 387 | {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, |
382 | /* fiji */ | 388 | /* fiji */ |
383 | {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, | 389 | {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, |
390 | {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, | ||
384 | /* carrizo */ | 391 | /* carrizo */ |
385 | {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, | 392 | {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, |
386 | {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, | 393 | {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8d01aa24d68a..38bdc2d300a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
152 | aligned_size = ALIGN(size, PAGE_SIZE); | 152 | aligned_size = ALIGN(size, PAGE_SIZE); |
153 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, | 153 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, |
154 | AMDGPU_GEM_DOMAIN_VRAM, | 154 | AMDGPU_GEM_DOMAIN_VRAM, |
155 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 155 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
156 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
156 | true, &gobj); | 157 | true, &gobj); |
157 | if (ret) { | 158 | if (ret) { |
158 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | 159 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..57552c79ec58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -48,7 +48,7 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | struct amdgpu_fence { | 50 | struct amdgpu_fence { |
51 | struct fence base; | 51 | struct dma_fence base; |
52 | 52 | ||
53 | /* RB, DMA, etc. */ | 53 | /* RB, DMA, etc. */ |
54 | struct amdgpu_ring *ring; | 54 | struct amdgpu_ring *ring; |
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void) | |||
73 | /* | 73 | /* |
74 | * Cast helper | 74 | * Cast helper |
75 | */ | 75 | */ |
76 | static const struct fence_ops amdgpu_fence_ops; | 76 | static const struct dma_fence_ops amdgpu_fence_ops; |
77 | static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) | 77 | static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) |
78 | { | 78 | { |
79 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); | 79 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); |
80 | 80 | ||
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |||
130 | * Emits a fence command on the requested ring (all asics). | 130 | * Emits a fence command on the requested ring (all asics). |
131 | * Returns 0 on success, -ENOMEM on failure. | 131 | * Returns 0 on success, -ENOMEM on failure. |
132 | */ | 132 | */ |
133 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | 133 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) |
134 | { | 134 | { |
135 | struct amdgpu_device *adev = ring->adev; | 135 | struct amdgpu_device *adev = ring->adev; |
136 | struct amdgpu_fence *fence; | 136 | struct amdgpu_fence *fence; |
137 | struct fence *old, **ptr; | 137 | struct dma_fence *old, **ptr; |
138 | uint32_t seq; | 138 | uint32_t seq; |
139 | 139 | ||
140 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); | 140 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | |||
143 | 143 | ||
144 | seq = ++ring->fence_drv.sync_seq; | 144 | seq = ++ring->fence_drv.sync_seq; |
145 | fence->ring = ring; | 145 | fence->ring = ring; |
146 | fence_init(&fence->base, &amdgpu_fence_ops, | 146 | dma_fence_init(&fence->base, &amdgpu_fence_ops, |
147 | &ring->fence_drv.lock, | 147 | &ring->fence_drv.lock, |
148 | adev->fence_context + ring->idx, | 148 | adev->fence_context + ring->idx, |
149 | seq); | 149 | seq); |
150 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, | 150 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
151 | seq, AMDGPU_FENCE_FLAG_INT); | 151 | seq, AMDGPU_FENCE_FLAG_INT); |
152 | 152 | ||
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) | |||
155 | * emitting the fence would mess up the hardware ring buffer. | 155 | * emitting the fence would mess up the hardware ring buffer. |
156 | */ | 156 | */ |
157 | old = rcu_dereference_protected(*ptr, 1); | 157 | old = rcu_dereference_protected(*ptr, 1); |
158 | if (old && !fence_is_signaled(old)) { | 158 | if (old && !dma_fence_is_signaled(old)) { |
159 | DRM_INFO("rcu slot is busy\n"); | 159 | DRM_INFO("rcu slot is busy\n"); |
160 | fence_wait(old, false); | 160 | dma_fence_wait(old, false); |
161 | } | 161 | } |
162 | 162 | ||
163 | rcu_assign_pointer(*ptr, fence_get(&fence->base)); | 163 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
164 | 164 | ||
165 | *f = &fence->base; | 165 | *f = &fence->base; |
166 | 166 | ||
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) | |||
211 | seq &= drv->num_fences_mask; | 211 | seq &= drv->num_fences_mask; |
212 | 212 | ||
213 | do { | 213 | do { |
214 | struct fence *fence, **ptr; | 214 | struct dma_fence *fence, **ptr; |
215 | 215 | ||
216 | ++last_seq; | 216 | ++last_seq; |
217 | last_seq &= drv->num_fences_mask; | 217 | last_seq &= drv->num_fences_mask; |
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) | |||
224 | if (!fence) | 224 | if (!fence) |
225 | continue; | 225 | continue; |
226 | 226 | ||
227 | r = fence_signal(fence); | 227 | r = dma_fence_signal(fence); |
228 | if (!r) | 228 | if (!r) |
229 | FENCE_TRACE(fence, "signaled from irq context\n"); | 229 | DMA_FENCE_TRACE(fence, "signaled from irq context\n"); |
230 | else | 230 | else |
231 | BUG(); | 231 | BUG(); |
232 | 232 | ||
233 | fence_put(fence); | 233 | dma_fence_put(fence); |
234 | } while (last_seq != seq); | 234 | } while (last_seq != seq); |
235 | } | 235 | } |
236 | 236 | ||
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) | |||
260 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 260 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
261 | { | 261 | { |
262 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); | 262 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); |
263 | struct fence *fence, **ptr; | 263 | struct dma_fence *fence, **ptr; |
264 | int r; | 264 | int r; |
265 | 265 | ||
266 | if (!seq) | 266 | if (!seq) |
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | |||
269 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; | 269 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
270 | rcu_read_lock(); | 270 | rcu_read_lock(); |
271 | fence = rcu_dereference(*ptr); | 271 | fence = rcu_dereference(*ptr); |
272 | if (!fence || !fence_get_rcu(fence)) { | 272 | if (!fence || !dma_fence_get_rcu(fence)) { |
273 | rcu_read_unlock(); | 273 | rcu_read_unlock(); |
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | rcu_read_unlock(); | 276 | rcu_read_unlock(); |
277 | 277 | ||
278 | r = fence_wait(fence, false); | 278 | r = dma_fence_wait(fence, false); |
279 | fence_put(fence); | 279 | dma_fence_put(fence); |
280 | return r; | 280 | return r; |
281 | } | 281 | } |
282 | 282 | ||
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
452 | amd_sched_fini(&ring->sched); | 452 | amd_sched_fini(&ring->sched); |
453 | del_timer_sync(&ring->fence_drv.fallback_timer); | 453 | del_timer_sync(&ring->fence_drv.fallback_timer); |
454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) | 454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
455 | fence_put(ring->fence_drv.fences[j]); | 455 | dma_fence_put(ring->fence_drv.fences[j]); |
456 | kfree(ring->fence_drv.fences); | 456 | kfree(ring->fence_drv.fences); |
457 | ring->fence_drv.fences = NULL; | 457 | ring->fence_drv.fences = NULL; |
458 | ring->fence_drv.initialized = false; | 458 | ring->fence_drv.initialized = false; |
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | |||
541 | * Common fence implementation | 541 | * Common fence implementation |
542 | */ | 542 | */ |
543 | 543 | ||
544 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | 544 | static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) |
545 | { | 545 | { |
546 | return "amdgpu"; | 546 | return "amdgpu"; |
547 | } | 547 | } |
548 | 548 | ||
549 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | 549 | static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) |
550 | { | 550 | { |
551 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 551 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
552 | return (const char *)fence->ring->name; | 552 | return (const char *)fence->ring->name; |
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) | |||
560 | * to fence_queue that checks if this fence is signaled, and if so it | 560 | * to fence_queue that checks if this fence is signaled, and if so it |
561 | * signals the fence and removes itself. | 561 | * signals the fence and removes itself. |
562 | */ | 562 | */ |
563 | static bool amdgpu_fence_enable_signaling(struct fence *f) | 563 | static bool amdgpu_fence_enable_signaling(struct dma_fence *f) |
564 | { | 564 | { |
565 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 565 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
566 | struct amdgpu_ring *ring = fence->ring; | 566 | struct amdgpu_ring *ring = fence->ring; |
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
568 | if (!timer_pending(&ring->fence_drv.fallback_timer)) | 568 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
569 | amdgpu_fence_schedule_fallback(ring); | 569 | amdgpu_fence_schedule_fallback(ring); |
570 | 570 | ||
571 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | 571 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
572 | 572 | ||
573 | return true; | 573 | return true; |
574 | } | 574 | } |
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
582 | */ | 582 | */ |
583 | static void amdgpu_fence_free(struct rcu_head *rcu) | 583 | static void amdgpu_fence_free(struct rcu_head *rcu) |
584 | { | 584 | { |
585 | struct fence *f = container_of(rcu, struct fence, rcu); | 585 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
586 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 586 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
587 | kmem_cache_free(amdgpu_fence_slab, fence); | 587 | kmem_cache_free(amdgpu_fence_slab, fence); |
588 | } | 588 | } |
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu) | |||
595 | * This function is called when the reference count becomes zero. | 595 | * This function is called when the reference count becomes zero. |
596 | * It just RCU schedules freeing up the fence. | 596 | * It just RCU schedules freeing up the fence. |
597 | */ | 597 | */ |
598 | static void amdgpu_fence_release(struct fence *f) | 598 | static void amdgpu_fence_release(struct dma_fence *f) |
599 | { | 599 | { |
600 | call_rcu(&f->rcu, amdgpu_fence_free); | 600 | call_rcu(&f->rcu, amdgpu_fence_free); |
601 | } | 601 | } |
602 | 602 | ||
603 | static const struct fence_ops amdgpu_fence_ops = { | 603 | static const struct dma_fence_ops amdgpu_fence_ops = { |
604 | .get_driver_name = amdgpu_fence_get_driver_name, | 604 | .get_driver_name = amdgpu_fence_get_driver_name, |
605 | .get_timeline_name = amdgpu_fence_get_timeline_name, | 605 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
606 | .enable_signaling = amdgpu_fence_enable_signaling, | 606 | .enable_signaling = amdgpu_fence_enable_signaling, |
607 | .wait = fence_default_wait, | 607 | .wait = dma_fence_default_wait, |
608 | .release = amdgpu_fence_release, | 608 | .release = amdgpu_fence_release, |
609 | }; | 609 | }; |
610 | 610 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 21a1242fc13b..964d2a946ed5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) | |||
126 | if (adev->gart.robj == NULL) { | 126 | if (adev->gart.robj == NULL) { |
127 | r = amdgpu_bo_create(adev, adev->gart.table_size, | 127 | r = amdgpu_bo_create(adev, adev->gart.table_size, |
128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
130 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
130 | NULL, NULL, &adev->gart.robj); | 131 | NULL, NULL, &adev->gart.robj); |
131 | if (r) { | 132 | if (r) { |
132 | return r; | 133 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 3ad0bf6ce3e4..cd62f6ffde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) | |||
116 | * Call from drm_gem_handle_create which appear in both new and open ioctl | 116 | * Call from drm_gem_handle_create which appear in both new and open ioctl |
117 | * case. | 117 | * case. |
118 | */ | 118 | */ |
119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | 119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, |
120 | struct drm_file *file_priv) | ||
120 | { | 121 | { |
121 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); | 122 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); |
122 | struct amdgpu_device *adev = abo->adev; | 123 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 124 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
124 | struct amdgpu_vm *vm = &fpriv->vm; | 125 | struct amdgpu_vm *vm = &fpriv->vm; |
125 | struct amdgpu_bo_va *bo_va; | 126 | struct amdgpu_bo_va *bo_va; |
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
142 | struct drm_file *file_priv) | 143 | struct drm_file *file_priv) |
143 | { | 144 | { |
144 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 145 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
145 | struct amdgpu_device *adev = bo->adev; | 146 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
146 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 147 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
147 | struct amdgpu_vm *vm = &fpriv->vm; | 148 | struct amdgpu_vm *vm = &fpriv->vm; |
148 | 149 | ||
@@ -468,6 +469,16 @@ out: | |||
468 | return r; | 469 | return r; |
469 | } | 470 | } |
470 | 471 | ||
472 | static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) | ||
473 | { | ||
474 | unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | ||
475 | |||
476 | /* if anything is swapped out don't swap it in here, | ||
477 | just abort and wait for the next CS */ | ||
478 | |||
479 | return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0; | ||
480 | } | ||
481 | |||
471 | /** | 482 | /** |
472 | * amdgpu_gem_va_update_vm -update the bo_va in its VM | 483 | * amdgpu_gem_va_update_vm -update the bo_va in its VM |
473 | * | 484 | * |
@@ -478,7 +489,8 @@ out: | |||
478 | * vital here, so they are not reported back to userspace. | 489 | * vital here, so they are not reported back to userspace. |
479 | */ | 490 | */ |
480 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | 491 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
481 | struct amdgpu_bo_va *bo_va, uint32_t operation) | 492 | struct amdgpu_bo_va *bo_va, |
493 | uint32_t operation) | ||
482 | { | 494 | { |
483 | struct ttm_validate_buffer tv, *entry; | 495 | struct ttm_validate_buffer tv, *entry; |
484 | struct amdgpu_bo_list_entry vm_pd; | 496 | struct amdgpu_bo_list_entry vm_pd; |
@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
501 | if (r) | 513 | if (r) |
502 | goto error_print; | 514 | goto error_print; |
503 | 515 | ||
504 | amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); | ||
505 | list_for_each_entry(entry, &list, head) { | 516 | list_for_each_entry(entry, &list, head) { |
506 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | 517 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); |
507 | /* if anything is swapped out don't swap it in here, | 518 | /* if anything is swapped out don't swap it in here, |
@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
509 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 520 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
510 | goto error_unreserve; | 521 | goto error_unreserve; |
511 | } | 522 | } |
512 | list_for_each_entry(entry, &duplicates, head) { | 523 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, |
513 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | 524 | NULL); |
514 | /* if anything is swapped out don't swap it in here, | 525 | if (r) |
515 | just abort and wait for the next CS */ | 526 | goto error_unreserve; |
516 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
517 | goto error_unreserve; | ||
518 | } | ||
519 | 527 | ||
520 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 528 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
521 | if (r) | 529 | if (r) |
@@ -536,8 +544,6 @@ error_print: | |||
536 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 544 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
537 | } | 545 | } |
538 | 546 | ||
539 | |||
540 | |||
541 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | 547 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
542 | struct drm_file *filp) | 548 | struct drm_file *filp) |
543 | { | 549 | { |
@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
547 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 553 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
548 | struct amdgpu_bo *abo; | 554 | struct amdgpu_bo *abo; |
549 | struct amdgpu_bo_va *bo_va; | 555 | struct amdgpu_bo_va *bo_va; |
550 | struct ttm_validate_buffer tv, tv_pd; | 556 | struct amdgpu_bo_list_entry vm_pd; |
557 | struct ttm_validate_buffer tv; | ||
551 | struct ww_acquire_ctx ticket; | 558 | struct ww_acquire_ctx ticket; |
552 | struct list_head list, duplicates; | 559 | struct list_head list, duplicates; |
553 | uint32_t invalid_flags, va_flags = 0; | 560 | uint32_t invalid_flags, va_flags = 0; |
@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
592 | tv.shared = true; | 599 | tv.shared = true; |
593 | list_add(&tv.head, &list); | 600 | list_add(&tv.head, &list); |
594 | 601 | ||
595 | tv_pd.bo = &fpriv->vm.page_directory->tbo; | 602 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
596 | tv_pd.shared = true; | ||
597 | list_add(&tv_pd.head, &list); | ||
598 | 603 | ||
599 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 604 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
600 | if (r) { | 605 | if (r) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a074edd95c70..01a42b6a69a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" | 26 | #include "amdgpu.h" |
27 | #include "amdgpu_gfx.h" | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * GPU scratch registers helpers function. | 30 | * GPU scratch registers helpers function. |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 51321e154c09..e02044086445 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | |||
@@ -27,6 +27,7 @@ | |||
27 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); | 27 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); |
28 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); | 28 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); |
29 | 29 | ||
30 | unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); | 30 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, |
31 | unsigned max_sh); | ||
31 | 32 | ||
32 | #endif | 33 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f86c84427778..3c634f02a3d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |||
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, | |||
168 | return -ENOMEM; | 168 | return -ENOMEM; |
169 | 169 | ||
170 | node->start = AMDGPU_BO_INVALID_OFFSET; | 170 | node->start = AMDGPU_BO_INVALID_OFFSET; |
171 | node->size = mem->num_pages; | ||
171 | mem->mm_node = node; | 172 | mem->mm_node = node; |
172 | 173 | ||
173 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { | 174 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 6a6c86c9c169..216a9572d946 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
89 | * Free an IB (all asics). | 89 | * Free an IB (all asics). |
90 | */ | 90 | */ |
91 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | 91 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
92 | struct fence *f) | 92 | struct dma_fence *f) |
93 | { | 93 | { |
94 | amdgpu_sa_bo_free(adev, &ib->sa_bo, f); | 94 | amdgpu_sa_bo_free(adev, &ib->sa_bo, f); |
95 | } | 95 | } |
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, | |||
116 | * to SI there was just a DE IB. | 116 | * to SI there was just a DE IB. |
117 | */ | 117 | */ |
118 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 118 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
119 | struct amdgpu_ib *ibs, struct fence *last_vm_update, | 119 | struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, |
120 | struct amdgpu_job *job, struct fence **f) | 120 | struct amdgpu_job *job, struct dma_fence **f) |
121 | { | 121 | { |
122 | struct amdgpu_device *adev = ring->adev; | 122 | struct amdgpu_device *adev = ring->adev; |
123 | struct amdgpu_ib *ib = &ibs[0]; | 123 | struct amdgpu_ib *ib = &ibs[0]; |
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
152 | return -EINVAL; | 152 | return -EINVAL; |
153 | } | 153 | } |
154 | 154 | ||
155 | alloc_size = amdgpu_ring_get_dma_frame_size(ring) + | 155 | alloc_size = ring->funcs->emit_frame_size + num_ibs * |
156 | num_ibs * amdgpu_ring_get_emit_ib_size(ring); | 156 | ring->funcs->emit_ib_size; |
157 | 157 | ||
158 | r = amdgpu_ring_alloc(ring, alloc_size); | 158 | r = amdgpu_ring_alloc(ring, alloc_size); |
159 | if (r) { | 159 | if (r) { |
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
161 | return r; | 161 | return r; |
162 | } | 162 | } |
163 | 163 | ||
164 | if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) | 164 | if (ring->funcs->init_cond_exec) |
165 | patch_offset = amdgpu_ring_init_cond_exec(ring); | 165 | patch_offset = amdgpu_ring_init_cond_exec(ring); |
166 | 166 | ||
167 | if (vm) { | 167 | if (vm) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 8c5807994073..a0de6286c453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, | |||
81 | 81 | ||
82 | void amdgpu_job_free_resources(struct amdgpu_job *job) | 82 | void amdgpu_job_free_resources(struct amdgpu_job *job) |
83 | { | 83 | { |
84 | struct fence *f; | 84 | struct dma_fence *f; |
85 | unsigned i; | 85 | unsigned i; |
86 | 86 | ||
87 | /* use sched fence if available */ | 87 | /* use sched fence if available */ |
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) | |||
95 | { | 95 | { |
96 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); | 96 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); |
97 | 97 | ||
98 | fence_put(job->fence); | 98 | dma_fence_put(job->fence); |
99 | amdgpu_sync_free(&job->sync); | 99 | amdgpu_sync_free(&job->sync); |
100 | kfree(job); | 100 | kfree(job); |
101 | } | 101 | } |
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job) | |||
104 | { | 104 | { |
105 | amdgpu_job_free_resources(job); | 105 | amdgpu_job_free_resources(job); |
106 | 106 | ||
107 | fence_put(job->fence); | 107 | dma_fence_put(job->fence); |
108 | amdgpu_sync_free(&job->sync); | 108 | amdgpu_sync_free(&job->sync); |
109 | kfree(job); | 109 | kfree(job); |
110 | } | 110 | } |
111 | 111 | ||
112 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | 112 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, |
113 | struct amd_sched_entity *entity, void *owner, | 113 | struct amd_sched_entity *entity, void *owner, |
114 | struct fence **f) | 114 | struct dma_fence **f) |
115 | { | 115 | { |
116 | int r; | 116 | int r; |
117 | job->ring = ring; | 117 | job->ring = ring; |
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
125 | 125 | ||
126 | job->owner = owner; | 126 | job->owner = owner; |
127 | job->fence_ctx = entity->fence_context; | 127 | job->fence_ctx = entity->fence_context; |
128 | *f = fence_get(&job->base.s_fence->finished); | 128 | *f = dma_fence_get(&job->base.s_fence->finished); |
129 | amdgpu_job_free_resources(job); | 129 | amdgpu_job_free_resources(job); |
130 | amd_sched_entity_push_job(&job->base); | 130 | amd_sched_entity_push_job(&job->base); |
131 | 131 | ||
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | 135 | static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) |
136 | { | 136 | { |
137 | struct amdgpu_job *job = to_amdgpu_job(sched_job); | 137 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
138 | struct amdgpu_vm *vm = job->vm; | 138 | struct amdgpu_vm *vm = job->vm; |
139 | 139 | ||
140 | struct fence *fence = amdgpu_sync_get_fence(&job->sync); | 140 | struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); |
141 | 141 | ||
142 | if (fence == NULL && vm && !job->vm_id) { | 142 | if (fence == NULL && vm && !job->vm_id) { |
143 | struct amdgpu_ring *ring = job->ring; | 143 | struct amdgpu_ring *ring = job->ring; |
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | |||
155 | return fence; | 155 | return fence; |
156 | } | 156 | } |
157 | 157 | ||
158 | static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) | 158 | static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) |
159 | { | 159 | { |
160 | struct fence *fence = NULL; | 160 | struct dma_fence *fence = NULL; |
161 | struct amdgpu_job *job; | 161 | struct amdgpu_job *job; |
162 | int r; | 162 | int r; |
163 | 163 | ||
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) | |||
176 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 176 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
177 | 177 | ||
178 | /* if gpu reset, hw fence will be replaced here */ | 178 | /* if gpu reset, hw fence will be replaced here */ |
179 | fence_put(job->fence); | 179 | dma_fence_put(job->fence); |
180 | job->fence = fence_get(fence); | 180 | job->fence = dma_fence_get(fence); |
181 | amdgpu_job_free_resources(job); | 181 | amdgpu_job_free_resources(job); |
182 | return fence; | 182 | return fence; |
183 | } | 183 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index c2c7fb140338..78392671046a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
306 | } | 306 | } |
307 | 307 | ||
308 | for (i = 0; i < adev->num_ip_blocks; i++) { | 308 | for (i = 0; i < adev->num_ip_blocks; i++) { |
309 | if (adev->ip_blocks[i].type == type && | 309 | if (adev->ip_blocks[i].version->type == type && |
310 | adev->ip_block_status[i].valid) { | 310 | adev->ip_blocks[i].status.valid) { |
311 | ip.hw_ip_version_major = adev->ip_blocks[i].major; | 311 | ip.hw_ip_version_major = adev->ip_blocks[i].version->major; |
312 | ip.hw_ip_version_minor = adev->ip_blocks[i].minor; | 312 | ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor; |
313 | ip.capabilities_flags = 0; | 313 | ip.capabilities_flags = 0; |
314 | ip.available_rings = ring_mask; | 314 | ip.available_rings = ring_mask; |
315 | ip.ib_start_alignment = ib_start_alignment; | 315 | ip.ib_start_alignment = ib_start_alignment; |
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
345 | } | 345 | } |
346 | 346 | ||
347 | for (i = 0; i < adev->num_ip_blocks; i++) | 347 | for (i = 0; i < adev->num_ip_blocks; i++) |
348 | if (adev->ip_blocks[i].type == type && | 348 | if (adev->ip_blocks[i].version->type == type && |
349 | adev->ip_block_status[i].valid && | 349 | adev->ip_blocks[i].status.valid && |
350 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | 350 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) |
351 | count++; | 351 | count++; |
352 | 352 | ||
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
411 | return copy_to_user(out, &vram_gtt, | 411 | return copy_to_user(out, &vram_gtt, |
412 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; | 412 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; |
413 | } | 413 | } |
414 | case AMDGPU_INFO_MEMORY: { | ||
415 | struct drm_amdgpu_memory_info mem; | ||
416 | |||
417 | memset(&mem, 0, sizeof(mem)); | ||
418 | mem.vram.total_heap_size = adev->mc.real_vram_size; | ||
419 | mem.vram.usable_heap_size = | ||
420 | adev->mc.real_vram_size - adev->vram_pin_size; | ||
421 | mem.vram.heap_usage = atomic64_read(&adev->vram_usage); | ||
422 | mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; | ||
423 | |||
424 | mem.cpu_accessible_vram.total_heap_size = | ||
425 | adev->mc.visible_vram_size; | ||
426 | mem.cpu_accessible_vram.usable_heap_size = | ||
427 | adev->mc.visible_vram_size - | ||
428 | (adev->vram_pin_size - adev->invisible_pin_size); | ||
429 | mem.cpu_accessible_vram.heap_usage = | ||
430 | atomic64_read(&adev->vram_vis_usage); | ||
431 | mem.cpu_accessible_vram.max_allocation = | ||
432 | mem.cpu_accessible_vram.usable_heap_size * 3 / 4; | ||
433 | |||
434 | mem.gtt.total_heap_size = adev->mc.gtt_size; | ||
435 | mem.gtt.usable_heap_size = | ||
436 | adev->mc.gtt_size - adev->gart_pin_size; | ||
437 | mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); | ||
438 | mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; | ||
439 | |||
440 | return copy_to_user(out, &mem, | ||
441 | min((size_t)size, sizeof(mem))) | ||
442 | ? -EFAULT : 0; | ||
443 | } | ||
414 | case AMDGPU_INFO_READ_MMR_REG: { | 444 | case AMDGPU_INFO_READ_MMR_REG: { |
415 | unsigned n, alloc_size; | 445 | unsigned n, alloc_size; |
416 | uint32_t *regs; | 446 | uint32_t *regs; |
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
475 | dev_info.ids_flags = 0; | 505 | dev_info.ids_flags = 0; |
476 | if (adev->flags & AMD_IS_APU) | 506 | if (adev->flags & AMD_IS_APU) |
477 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; | 507 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; |
508 | if (amdgpu_sriov_vf(adev)) | ||
509 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; | ||
478 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; | 510 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; |
479 | dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; | 511 | dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; |
480 | dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); | 512 | dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); |
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
494 | return copy_to_user(out, &dev_info, | 526 | return copy_to_user(out, &dev_info, |
495 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; | 527 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; |
496 | } | 528 | } |
529 | case AMDGPU_INFO_VCE_CLOCK_TABLE: { | ||
530 | unsigned i; | ||
531 | struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; | ||
532 | struct amd_vce_state *vce_state; | ||
533 | |||
534 | for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { | ||
535 | vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); | ||
536 | if (vce_state) { | ||
537 | vce_clk_table.entries[i].sclk = vce_state->sclk; | ||
538 | vce_clk_table.entries[i].mclk = vce_state->mclk; | ||
539 | vce_clk_table.entries[i].eclk = vce_state->evclk; | ||
540 | vce_clk_table.num_valid_entries++; | ||
541 | } | ||
542 | } | ||
543 | |||
544 | return copy_to_user(out, &vce_clk_table, | ||
545 | min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; | ||
546 | } | ||
497 | default: | 547 | default: |
498 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); | 548 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); |
499 | return -EINVAL; | 549 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 32fa7b7913f7..7ea3cacf9f9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -285,7 +285,7 @@ free_rmn: | |||
285 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | 285 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) |
286 | { | 286 | { |
287 | unsigned long end = addr + amdgpu_bo_size(bo) - 1; | 287 | unsigned long end = addr + amdgpu_bo_size(bo) - 1; |
288 | struct amdgpu_device *adev = bo->adev; | 288 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
289 | struct amdgpu_mn *rmn; | 289 | struct amdgpu_mn *rmn; |
290 | struct amdgpu_mn_node *node = NULL; | 290 | struct amdgpu_mn_node *node = NULL; |
291 | struct list_head bos; | 291 | struct list_head bos; |
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
340 | */ | 340 | */ |
341 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) | 341 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) |
342 | { | 342 | { |
343 | struct amdgpu_device *adev = bo->adev; | 343 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
344 | struct amdgpu_mn *rmn; | 344 | struct amdgpu_mn *rmn; |
345 | struct list_head *head; | 345 | struct list_head *head; |
346 | 346 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 7b0eff7d060b..1e23334b07fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -341,8 +341,6 @@ struct amdgpu_mode_info { | |||
341 | int num_dig; /* number of dig blocks */ | 341 | int num_dig; /* number of dig blocks */ |
342 | int disp_priority; | 342 | int disp_priority; |
343 | const struct amdgpu_display_funcs *funcs; | 343 | const struct amdgpu_display_funcs *funcs; |
344 | struct hrtimer vblank_timer; | ||
345 | enum amdgpu_interrupt_state vsync_timer_enabled; | ||
346 | }; | 344 | }; |
347 | 345 | ||
348 | #define AMDGPU_MAX_BL_LEVEL 0xFF | 346 | #define AMDGPU_MAX_BL_LEVEL 0xFF |
@@ -413,6 +411,9 @@ struct amdgpu_crtc { | |||
413 | u32 wm_high; | 411 | u32 wm_high; |
414 | u32 lb_vblank_lead_lines; | 412 | u32 lb_vblank_lead_lines; |
415 | struct drm_display_mode hw_mode; | 413 | struct drm_display_mode hw_mode; |
414 | /* for virtual dce */ | ||
415 | struct hrtimer vblank_timer; | ||
416 | enum amdgpu_interrupt_state vsync_timer_enabled; | ||
416 | }; | 417 | }; |
417 | 418 | ||
418 | struct amdgpu_encoder_atom_dig { | 419 | struct amdgpu_encoder_atom_dig { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aa074fac0c7f..f0a0513ef4c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | |||
88 | 88 | ||
89 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | 89 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
90 | { | 90 | { |
91 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); | ||
91 | struct amdgpu_bo *bo; | 92 | struct amdgpu_bo *bo; |
92 | 93 | ||
93 | bo = container_of(tbo, struct amdgpu_bo, tbo); | 94 | bo = container_of(tbo, struct amdgpu_bo, tbo); |
94 | 95 | ||
95 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | 96 | amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); |
96 | 97 | ||
97 | drm_gem_object_release(&bo->gem_base); | 98 | drm_gem_object_release(&bo->gem_base); |
98 | amdgpu_bo_unref(&bo->parent); | 99 | amdgpu_bo_unref(&bo->parent); |
99 | if (!list_empty(&bo->shadow_list)) { | 100 | if (!list_empty(&bo->shadow_list)) { |
100 | mutex_lock(&bo->adev->shadow_list_lock); | 101 | mutex_lock(&adev->shadow_list_lock); |
101 | list_del_init(&bo->shadow_list); | 102 | list_del_init(&bo->shadow_list); |
102 | mutex_unlock(&bo->adev->shadow_list_lock); | 103 | mutex_unlock(&adev->shadow_list_lock); |
103 | } | 104 | } |
104 | kfree(bo->metadata); | 105 | kfree(bo->metadata); |
105 | kfree(bo); | 106 | kfree(bo); |
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
121 | 122 | ||
122 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | 123 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
123 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | 124 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
125 | unsigned lpfn = 0; | ||
126 | |||
127 | /* This forces a reallocation if the flag wasn't set before */ | ||
128 | if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) | ||
129 | lpfn = adev->mc.real_vram_size >> PAGE_SHIFT; | ||
124 | 130 | ||
125 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && | 131 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
126 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && | 132 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
127 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { | 133 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
128 | places[c].fpfn = visible_pfn; | 134 | places[c].fpfn = visible_pfn; |
129 | places[c].lpfn = 0; | 135 | places[c].lpfn = lpfn; |
130 | places[c].flags = TTM_PL_FLAG_WC | | 136 | places[c].flags = TTM_PL_FLAG_WC | |
131 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | | 137 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | |
132 | TTM_PL_FLAG_TOPDOWN; | 138 | TTM_PL_FLAG_TOPDOWN; |
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
134 | } | 140 | } |
135 | 141 | ||
136 | places[c].fpfn = 0; | 142 | places[c].fpfn = 0; |
137 | places[c].lpfn = 0; | 143 | places[c].lpfn = lpfn; |
138 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | 144 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
139 | TTM_PL_FLAG_VRAM; | 145 | TTM_PL_FLAG_VRAM; |
140 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) | 146 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
205 | 211 | ||
206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) | 212 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
207 | { | 213 | { |
208 | amdgpu_ttm_placement_init(abo->adev, &abo->placement, | 214 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
209 | abo->placements, domain, abo->flags); | 215 | |
216 | amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements, | ||
217 | domain, abo->flags); | ||
210 | } | 218 | } |
211 | 219 | ||
212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | 220 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, |
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |||
245 | int r; | 253 | int r; |
246 | 254 | ||
247 | r = amdgpu_bo_create(adev, size, align, true, domain, | 255 | r = amdgpu_bo_create(adev, size, align, true, domain, |
248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 256 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
257 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
249 | NULL, NULL, bo_ptr); | 258 | NULL, NULL, bo_ptr); |
250 | if (r) { | 259 | if (r) { |
251 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | 260 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); |
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
351 | kfree(bo); | 360 | kfree(bo); |
352 | return r; | 361 | return r; |
353 | } | 362 | } |
354 | bo->adev = adev; | ||
355 | INIT_LIST_HEAD(&bo->shadow_list); | 363 | INIT_LIST_HEAD(&bo->shadow_list); |
356 | INIT_LIST_HEAD(&bo->va); | 364 | INIT_LIST_HEAD(&bo->va); |
357 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 365 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
@@ -383,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
383 | 391 | ||
384 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | 392 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
385 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | 393 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
386 | struct fence *fence; | 394 | struct dma_fence *fence; |
387 | 395 | ||
388 | if (adev->mman.buffer_funcs_ring == NULL || | 396 | if (adev->mman.buffer_funcs_ring == NULL || |
389 | !adev->mman.buffer_funcs_ring->ready) { | 397 | !adev->mman.buffer_funcs_ring->ready) { |
@@ -403,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
403 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | 411 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); |
404 | amdgpu_bo_fence(bo, fence, false); | 412 | amdgpu_bo_fence(bo, fence, false); |
405 | amdgpu_bo_unreserve(bo); | 413 | amdgpu_bo_unreserve(bo); |
406 | fence_put(bo->tbo.moving); | 414 | dma_fence_put(bo->tbo.moving); |
407 | bo->tbo.moving = fence_get(fence); | 415 | bo->tbo.moving = dma_fence_get(fence); |
408 | fence_put(fence); | 416 | dma_fence_put(fence); |
409 | } | 417 | } |
410 | *bo_ptr = bo; | 418 | *bo_ptr = bo; |
411 | 419 | ||
@@ -491,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, | |||
491 | struct amdgpu_ring *ring, | 499 | struct amdgpu_ring *ring, |
492 | struct amdgpu_bo *bo, | 500 | struct amdgpu_bo *bo, |
493 | struct reservation_object *resv, | 501 | struct reservation_object *resv, |
494 | struct fence **fence, | 502 | struct dma_fence **fence, |
495 | bool direct) | 503 | bool direct) |
496 | 504 | ||
497 | { | 505 | { |
@@ -523,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | |||
523 | struct amdgpu_ring *ring, | 531 | struct amdgpu_ring *ring, |
524 | struct amdgpu_bo *bo, | 532 | struct amdgpu_bo *bo, |
525 | struct reservation_object *resv, | 533 | struct reservation_object *resv, |
526 | struct fence **fence, | 534 | struct dma_fence **fence, |
527 | bool direct) | 535 | bool direct) |
528 | 536 | ||
529 | { | 537 | { |
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
616 | u64 min_offset, u64 max_offset, | 624 | u64 min_offset, u64 max_offset, |
617 | u64 *gpu_addr) | 625 | u64 *gpu_addr) |
618 | { | 626 | { |
627 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
619 | int r, i; | 628 | int r, i; |
620 | unsigned fpfn, lpfn; | 629 | unsigned fpfn, lpfn; |
621 | 630 | ||
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
643 | 652 | ||
644 | return 0; | 653 | return 0; |
645 | } | 654 | } |
655 | |||
656 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
646 | amdgpu_ttm_placement_from_domain(bo, domain); | 657 | amdgpu_ttm_placement_from_domain(bo, domain); |
647 | for (i = 0; i < bo->placement.num_placement; i++) { | 658 | for (i = 0; i < bo->placement.num_placement; i++) { |
648 | /* force to pin into visible video ram */ | 659 | /* force to pin into visible video ram */ |
649 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | 660 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
650 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && | 661 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
651 | (!max_offset || max_offset > | 662 | (!max_offset || max_offset > |
652 | bo->adev->mc.visible_vram_size)) { | 663 | adev->mc.visible_vram_size)) { |
653 | if (WARN_ON_ONCE(min_offset > | 664 | if (WARN_ON_ONCE(min_offset > |
654 | bo->adev->mc.visible_vram_size)) | 665 | adev->mc.visible_vram_size)) |
655 | return -EINVAL; | 666 | return -EINVAL; |
656 | fpfn = min_offset >> PAGE_SHIFT; | 667 | fpfn = min_offset >> PAGE_SHIFT; |
657 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | 668 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
658 | } else { | 669 | } else { |
659 | fpfn = min_offset >> PAGE_SHIFT; | 670 | fpfn = min_offset >> PAGE_SHIFT; |
660 | lpfn = max_offset >> PAGE_SHIFT; | 671 | lpfn = max_offset >> PAGE_SHIFT; |
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
669 | 680 | ||
670 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 681 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
671 | if (unlikely(r)) { | 682 | if (unlikely(r)) { |
672 | dev_err(bo->adev->dev, "%p pin failed\n", bo); | 683 | dev_err(adev->dev, "%p pin failed\n", bo); |
673 | goto error; | 684 | goto error; |
674 | } | 685 | } |
675 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | 686 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
676 | if (unlikely(r)) { | 687 | if (unlikely(r)) { |
677 | dev_err(bo->adev->dev, "%p bind failed\n", bo); | 688 | dev_err(adev->dev, "%p bind failed\n", bo); |
678 | goto error; | 689 | goto error; |
679 | } | 690 | } |
680 | 691 | ||
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
682 | if (gpu_addr != NULL) | 693 | if (gpu_addr != NULL) |
683 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | 694 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
684 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | 695 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
685 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | 696 | adev->vram_pin_size += amdgpu_bo_size(bo); |
686 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 697 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
687 | bo->adev->invisible_pin_size += amdgpu_bo_size(bo); | 698 | adev->invisible_pin_size += amdgpu_bo_size(bo); |
688 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { | 699 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
689 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); | 700 | adev->gart_pin_size += amdgpu_bo_size(bo); |
690 | } | 701 | } |
691 | 702 | ||
692 | error: | 703 | error: |
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | |||
700 | 711 | ||
701 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | 712 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
702 | { | 713 | { |
714 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
703 | int r, i; | 715 | int r, i; |
704 | 716 | ||
705 | if (!bo->pin_count) { | 717 | if (!bo->pin_count) { |
706 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | 718 | dev_warn(adev->dev, "%p unpin not necessary\n", bo); |
707 | return 0; | 719 | return 0; |
708 | } | 720 | } |
709 | bo->pin_count--; | 721 | bo->pin_count--; |
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |||
715 | } | 727 | } |
716 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 728 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
717 | if (unlikely(r)) { | 729 | if (unlikely(r)) { |
718 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); | 730 | dev_err(adev->dev, "%p validate failed for unpin\n", bo); |
719 | goto error; | 731 | goto error; |
720 | } | 732 | } |
721 | 733 | ||
722 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { | 734 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
723 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | 735 | adev->vram_pin_size -= amdgpu_bo_size(bo); |
724 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 736 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
725 | bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); | 737 | adev->invisible_pin_size -= amdgpu_bo_size(bo); |
726 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { | 738 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
727 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); | 739 | adev->gart_pin_size -= amdgpu_bo_size(bo); |
728 | } | 740 | } |
729 | 741 | ||
730 | error: | 742 | error: |
@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 861 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
850 | struct ttm_mem_reg *new_mem) | 862 | struct ttm_mem_reg *new_mem) |
851 | { | 863 | { |
864 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
852 | struct amdgpu_bo *abo; | 865 | struct amdgpu_bo *abo; |
853 | struct ttm_mem_reg *old_mem = &bo->mem; | 866 | struct ttm_mem_reg *old_mem = &bo->mem; |
854 | 867 | ||
@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |||
856 | return; | 869 | return; |
857 | 870 | ||
858 | abo = container_of(bo, struct amdgpu_bo, tbo); | 871 | abo = container_of(bo, struct amdgpu_bo, tbo); |
859 | amdgpu_vm_bo_invalidate(abo->adev, abo); | 872 | amdgpu_vm_bo_invalidate(adev, abo); |
860 | 873 | ||
861 | /* update statistics */ | 874 | /* update statistics */ |
862 | if (!new_mem) | 875 | if (!new_mem) |
863 | return; | 876 | return; |
864 | 877 | ||
865 | /* move_notify is called before move happens */ | 878 | /* move_notify is called before move happens */ |
866 | amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); | 879 | amdgpu_update_memory_usage(adev, &bo->mem, new_mem); |
867 | 880 | ||
868 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); | 881 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
869 | } | 882 | } |
870 | 883 | ||
871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 884 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
872 | { | 885 | { |
873 | struct amdgpu_device *adev; | 886 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
874 | struct amdgpu_bo *abo; | 887 | struct amdgpu_bo *abo; |
875 | unsigned long offset, size, lpfn; | 888 | unsigned long offset, size, lpfn; |
876 | int i, r; | 889 | int i, r; |
@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
879 | return 0; | 892 | return 0; |
880 | 893 | ||
881 | abo = container_of(bo, struct amdgpu_bo, tbo); | 894 | abo = container_of(bo, struct amdgpu_bo, tbo); |
882 | adev = abo->adev; | ||
883 | if (bo->mem.mem_type != TTM_PL_VRAM) | 895 | if (bo->mem.mem_type != TTM_PL_VRAM) |
884 | return 0; | 896 | return 0; |
885 | 897 | ||
886 | size = bo->mem.num_pages << PAGE_SHIFT; | 898 | size = bo->mem.num_pages << PAGE_SHIFT; |
887 | offset = bo->mem.start << PAGE_SHIFT; | 899 | offset = bo->mem.start << PAGE_SHIFT; |
888 | if ((offset + size) <= adev->mc.visible_vram_size) | 900 | /* TODO: figure out how to map scattered VRAM to the CPU */ |
901 | if ((offset + size) <= adev->mc.visible_vram_size && | ||
902 | (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) | ||
889 | return 0; | 903 | return 0; |
890 | 904 | ||
891 | /* Can't move a pinned BO to visible VRAM */ | 905 | /* Can't move a pinned BO to visible VRAM */ |
@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
893 | return -EINVAL; | 907 | return -EINVAL; |
894 | 908 | ||
895 | /* hurrah the memory is not visible ! */ | 909 | /* hurrah the memory is not visible ! */ |
910 | abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
896 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | 911 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); |
897 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | 912 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
898 | for (i = 0; i < abo->placement.num_placement; i++) { | 913 | for (i = 0; i < abo->placement.num_placement; i++) { |
@@ -926,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
926 | * @shared: true if fence should be added shared | 941 | * @shared: true if fence should be added shared |
927 | * | 942 | * |
928 | */ | 943 | */ |
929 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, | 944 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
930 | bool shared) | 945 | bool shared) |
931 | { | 946 | { |
932 | struct reservation_object *resv = bo->tbo.resv; | 947 | struct reservation_object *resv = bo->tbo.resv; |
@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | |||
954 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && | 969 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && |
955 | !bo->pin_count); | 970 | !bo->pin_count); |
956 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); | 971 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); |
972 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && | ||
973 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); | ||
957 | 974 | ||
958 | return bo->tbo.offset; | 975 | return bo->tbo.offset; |
959 | } | 976 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 8255034d73eb..5cbf59ec0f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) | |||
71 | */ | 71 | */ |
72 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) | 72 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) |
73 | { | 73 | { |
74 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
74 | int r; | 75 | int r; |
75 | 76 | ||
76 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); | 77 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); |
77 | if (unlikely(r != 0)) { | 78 | if (unlikely(r != 0)) { |
78 | if (r != -ERESTARTSYS) | 79 | if (r != -ERESTARTSYS) |
79 | dev_err(bo->adev->dev, "%p reserve failed\n", bo); | 80 | dev_err(adev->dev, "%p reserve failed\n", bo); |
80 | return r; | 81 | return r; |
81 | } | 82 | } |
82 | return 0; | 83 | return 0; |
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
156 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 157 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
157 | struct ttm_mem_reg *new_mem); | 158 | struct ttm_mem_reg *new_mem); |
158 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | 159 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
159 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, | 160 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
160 | bool shared); | 161 | bool shared); |
161 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); | 162 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
162 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, | 163 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
163 | struct amdgpu_ring *ring, | 164 | struct amdgpu_ring *ring, |
164 | struct amdgpu_bo *bo, | 165 | struct amdgpu_bo *bo, |
165 | struct reservation_object *resv, | 166 | struct reservation_object *resv, |
166 | struct fence **fence, bool direct); | 167 | struct dma_fence **fence, bool direct); |
167 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | 168 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, |
168 | struct amdgpu_ring *ring, | 169 | struct amdgpu_ring *ring, |
169 | struct amdgpu_bo *bo, | 170 | struct amdgpu_bo *bo, |
170 | struct reservation_object *resv, | 171 | struct reservation_object *resv, |
171 | struct fence **fence, | 172 | struct dma_fence **fence, |
172 | bool direct); | 173 | bool direct); |
173 | 174 | ||
174 | 175 | ||
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
200 | unsigned size, unsigned align); | 201 | unsigned size, unsigned align); |
201 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, | 202 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
202 | struct amdgpu_sa_bo **sa_bo, | 203 | struct amdgpu_sa_bo **sa_bo, |
203 | struct fence *fence); | 204 | struct dma_fence *fence); |
204 | #if defined(CONFIG_DEBUG_FS) | 205 | #if defined(CONFIG_DEBUG_FS) |
205 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | 206 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
206 | struct seq_file *m); | 207 | struct seq_file *m); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index accc908bdc88..274f3309aec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -986,10 +986,10 @@ restart_search: | |||
986 | 986 | ||
987 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | 987 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) |
988 | { | 988 | { |
989 | int i; | ||
990 | struct amdgpu_ps *ps; | 989 | struct amdgpu_ps *ps; |
991 | enum amd_pm_state_type dpm_state; | 990 | enum amd_pm_state_type dpm_state; |
992 | int ret; | 991 | int ret; |
992 | bool equal; | ||
993 | 993 | ||
994 | /* if dpm init failed */ | 994 | /* if dpm init failed */ |
995 | if (!adev->pm.dpm_enabled) | 995 | if (!adev->pm.dpm_enabled) |
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | |||
1009 | else | 1009 | else |
1010 | return; | 1010 | return; |
1011 | 1011 | ||
1012 | /* no need to reprogram if nothing changed unless we are on BTC+ */ | ||
1013 | if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) { | ||
1014 | /* vce just modifies an existing state so force a change */ | ||
1015 | if (ps->vce_active != adev->pm.dpm.vce_active) | ||
1016 | goto force; | ||
1017 | if (adev->flags & AMD_IS_APU) { | ||
1018 | /* for APUs if the num crtcs changed but state is the same, | ||
1019 | * all we need to do is update the display configuration. | ||
1020 | */ | ||
1021 | if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) { | ||
1022 | /* update display watermarks based on new power state */ | ||
1023 | amdgpu_display_bandwidth_update(adev); | ||
1024 | /* update displays */ | ||
1025 | amdgpu_dpm_display_configuration_changed(adev); | ||
1026 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
1027 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
1028 | } | ||
1029 | return; | ||
1030 | } else { | ||
1031 | /* for BTC+ if the num crtcs hasn't changed and state is the same, | ||
1032 | * nothing to do, if the num crtcs is > 1 and state is the same, | ||
1033 | * update display configuration. | ||
1034 | */ | ||
1035 | if (adev->pm.dpm.new_active_crtcs == | ||
1036 | adev->pm.dpm.current_active_crtcs) { | ||
1037 | return; | ||
1038 | } else if ((adev->pm.dpm.current_active_crtc_count > 1) && | ||
1039 | (adev->pm.dpm.new_active_crtc_count > 1)) { | ||
1040 | /* update display watermarks based on new power state */ | ||
1041 | amdgpu_display_bandwidth_update(adev); | ||
1042 | /* update displays */ | ||
1043 | amdgpu_dpm_display_configuration_changed(adev); | ||
1044 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
1045 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
1046 | return; | ||
1047 | } | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | force: | ||
1052 | if (amdgpu_dpm == 1) { | 1012 | if (amdgpu_dpm == 1) { |
1053 | printk("switching from power state:\n"); | 1013 | printk("switching from power state:\n"); |
1054 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); | 1014 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); |
@@ -1059,31 +1019,21 @@ force: | |||
1059 | /* update whether vce is active */ | 1019 | /* update whether vce is active */ |
1060 | ps->vce_active = adev->pm.dpm.vce_active; | 1020 | ps->vce_active = adev->pm.dpm.vce_active; |
1061 | 1021 | ||
1022 | amdgpu_dpm_display_configuration_changed(adev); | ||
1023 | |||
1062 | ret = amdgpu_dpm_pre_set_power_state(adev); | 1024 | ret = amdgpu_dpm_pre_set_power_state(adev); |
1063 | if (ret) | 1025 | if (ret) |
1064 | return; | 1026 | return; |
1065 | 1027 | ||
1066 | /* update display watermarks based on new power state */ | 1028 | if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) |
1067 | amdgpu_display_bandwidth_update(adev); | 1029 | equal = false; |
1068 | 1030 | ||
1069 | /* wait for the rings to drain */ | 1031 | if (equal) |
1070 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 1032 | return; |
1071 | struct amdgpu_ring *ring = adev->rings[i]; | ||
1072 | if (ring && ring->ready) | ||
1073 | amdgpu_fence_wait_empty(ring); | ||
1074 | } | ||
1075 | 1033 | ||
1076 | /* program the new power state */ | ||
1077 | amdgpu_dpm_set_power_state(adev); | 1034 | amdgpu_dpm_set_power_state(adev); |
1078 | |||
1079 | /* update current power state */ | ||
1080 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps; | ||
1081 | |||
1082 | amdgpu_dpm_post_set_power_state(adev); | 1035 | amdgpu_dpm_post_set_power_state(adev); |
1083 | 1036 | ||
1084 | /* update displays */ | ||
1085 | amdgpu_dpm_display_configuration_changed(adev); | ||
1086 | |||
1087 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | 1037 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; |
1088 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | 1038 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; |
1089 | 1039 | ||
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | |||
1135 | mutex_lock(&adev->pm.mutex); | 1085 | mutex_lock(&adev->pm.mutex); |
1136 | adev->pm.dpm.vce_active = true; | 1086 | adev->pm.dpm.vce_active = true; |
1137 | /* XXX select vce level based on ring/task */ | 1087 | /* XXX select vce level based on ring/task */ |
1138 | adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; | 1088 | adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; |
1139 | mutex_unlock(&adev->pm.mutex); | 1089 | mutex_unlock(&adev->pm.mutex); |
1140 | } else { | 1090 | } else { |
1141 | mutex_lock(&adev->pm.mutex); | 1091 | mutex_lock(&adev->pm.mutex); |
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1276 | struct drm_device *ddev = adev->ddev; | 1226 | struct drm_device *ddev = adev->ddev; |
1277 | struct drm_crtc *crtc; | 1227 | struct drm_crtc *crtc; |
1278 | struct amdgpu_crtc *amdgpu_crtc; | 1228 | struct amdgpu_crtc *amdgpu_crtc; |
1229 | int i = 0; | ||
1279 | 1230 | ||
1280 | if (!adev->pm.dpm_enabled) | 1231 | if (!adev->pm.dpm_enabled) |
1281 | return; | 1232 | return; |
1282 | 1233 | ||
1283 | if (adev->pp_enabled) { | 1234 | amdgpu_display_bandwidth_update(adev); |
1284 | int i = 0; | ||
1285 | 1235 | ||
1286 | amdgpu_display_bandwidth_update(adev); | 1236 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
1287 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 1237 | struct amdgpu_ring *ring = adev->rings[i]; |
1288 | struct amdgpu_ring *ring = adev->rings[i]; | 1238 | if (ring && ring->ready) |
1289 | if (ring && ring->ready) | 1239 | amdgpu_fence_wait_empty(ring); |
1290 | amdgpu_fence_wait_empty(ring); | 1240 | } |
1291 | } | ||
1292 | 1241 | ||
1242 | if (adev->pp_enabled) { | ||
1293 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); | 1243 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); |
1294 | } else { | 1244 | } else { |
1295 | mutex_lock(&adev->pm.mutex); | 1245 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 7532ff822aa7..fa6baf31a35d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle) | |||
299 | return ret; | 299 | return ret; |
300 | } | 300 | } |
301 | 301 | ||
302 | const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | 302 | static const struct amd_ip_funcs amdgpu_pp_ip_funcs = { |
303 | .name = "amdgpu_powerplay", | 303 | .name = "amdgpu_powerplay", |
304 | .early_init = amdgpu_pp_early_init, | 304 | .early_init = amdgpu_pp_early_init, |
305 | .late_init = amdgpu_pp_late_init, | 305 | .late_init = amdgpu_pp_late_init, |
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
316 | .set_clockgating_state = amdgpu_pp_set_clockgating_state, | 316 | .set_clockgating_state = amdgpu_pp_set_clockgating_state, |
317 | .set_powergating_state = amdgpu_pp_set_powergating_state, | 317 | .set_powergating_state = amdgpu_pp_set_powergating_state, |
318 | }; | 318 | }; |
319 | |||
320 | const struct amdgpu_ip_block_version amdgpu_pp_ip_block = | ||
321 | { | ||
322 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
323 | .major = 1, | ||
324 | .minor = 0, | ||
325 | .rev = 0, | ||
326 | .funcs = &amdgpu_pp_ip_funcs, | ||
327 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h index da5cf47cfd99..c0c4bfdcdb14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h | |||
@@ -23,11 +23,11 @@ | |||
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #ifndef __AMDGPU_POPWERPLAY_H__ | 26 | #ifndef __AMDGPU_POWERPLAY_H__ |
27 | #define __AMDGPU_POPWERPLAY_H__ | 27 | #define __AMDGPU_POWERPLAY_H__ |
28 | 28 | ||
29 | #include "amd_shared.h" | 29 | #include "amd_shared.h" |
30 | 30 | ||
31 | extern const struct amd_ip_funcs amdgpu_pp_ip_funcs; | 31 | extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block; |
32 | 32 | ||
33 | #endif /* __AMDSOC_DM_H__ */ | 33 | #endif /* __AMDGPU_POWERPLAY_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3cb5e903cd62..4c992826d2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | |||
65 | { | 65 | { |
66 | /* Align requested size with padding so unlock_commit can | 66 | /* Align requested size with padding so unlock_commit can |
67 | * pad safely */ | 67 | * pad safely */ |
68 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | 68 | ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; |
69 | 69 | ||
70 | /* Make sure we aren't trying to allocate more space | 70 | /* Make sure we aren't trying to allocate more space |
71 | * than the maximum for one submission | 71 | * than the maximum for one submission |
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
94 | int i; | 94 | int i; |
95 | 95 | ||
96 | for (i = 0; i < count; i++) | 96 | for (i = 0; i < count; i++) |
97 | amdgpu_ring_write(ring, ring->nop); | 97 | amdgpu_ring_write(ring, ring->funcs->nop); |
98 | } | 98 | } |
99 | 99 | ||
100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets | 100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets |
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
106 | */ | 106 | */ |
107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
108 | { | 108 | { |
109 | while (ib->length_dw & ring->align_mask) | 109 | while (ib->length_dw & ring->funcs->align_mask) |
110 | ib->ptr[ib->length_dw++] = ring->nop; | 110 | ib->ptr[ib->length_dw++] = ring->funcs->nop; |
111 | } | 111 | } |
112 | 112 | ||
113 | /** | 113 | /** |
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) | |||
125 | uint32_t count; | 125 | uint32_t count; |
126 | 126 | ||
127 | /* We pad to match fetch size */ | 127 | /* We pad to match fetch size */ |
128 | count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); | 128 | count = ring->funcs->align_mask + 1 - |
129 | count %= ring->align_mask + 1; | 129 | (ring->wptr & ring->funcs->align_mask); |
130 | count %= ring->funcs->align_mask + 1; | ||
130 | ring->funcs->insert_nop(ring, count); | 131 | ring->funcs->insert_nop(ring, count); |
131 | 132 | ||
132 | mb(); | 133 | mb(); |
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) | |||
163 | * Returns 0 on success, error on failure. | 164 | * Returns 0 on success, error on failure. |
164 | */ | 165 | */ |
165 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | 166 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
166 | unsigned max_dw, u32 nop, u32 align_mask, | 167 | unsigned max_dw, struct amdgpu_irq_src *irq_src, |
167 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | 168 | unsigned irq_type) |
168 | enum amdgpu_ring_type ring_type) | ||
169 | { | 169 | { |
170 | int r; | 170 | int r; |
171 | 171 | ||
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
216 | 216 | ||
217 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * | 217 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * |
218 | amdgpu_sched_hw_submission); | 218 | amdgpu_sched_hw_submission); |
219 | ring->align_mask = align_mask; | ||
220 | ring->nop = nop; | ||
221 | ring->type = ring_type; | ||
222 | 219 | ||
223 | /* Allocate ring buffer */ | 220 | /* Allocate ring buffer */ |
224 | if (ring->ring_obj == NULL) { | 221 | if (ring->ring_obj == NULL) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h new file mode 100644 index 000000000000..f2ad49c8e85b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_RING_H__ | ||
25 | #define __AMDGPU_RING_H__ | ||
26 | |||
27 | #include "gpu_scheduler.h" | ||
28 | |||
29 | /* max number of rings */ | ||
30 | #define AMDGPU_MAX_RINGS 16 | ||
31 | #define AMDGPU_MAX_GFX_RINGS 1 | ||
32 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | ||
33 | #define AMDGPU_MAX_VCE_RINGS 3 | ||
34 | |||
35 | /* some special values for the owner field */ | ||
36 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) | ||
37 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) | ||
38 | |||
39 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) | ||
40 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) | ||
41 | |||
42 | enum amdgpu_ring_type { | ||
43 | AMDGPU_RING_TYPE_GFX, | ||
44 | AMDGPU_RING_TYPE_COMPUTE, | ||
45 | AMDGPU_RING_TYPE_SDMA, | ||
46 | AMDGPU_RING_TYPE_UVD, | ||
47 | AMDGPU_RING_TYPE_VCE | ||
48 | }; | ||
49 | |||
50 | struct amdgpu_device; | ||
51 | struct amdgpu_ring; | ||
52 | struct amdgpu_ib; | ||
53 | struct amdgpu_cs_parser; | ||
54 | |||
55 | /* | ||
56 | * Fences. | ||
57 | */ | ||
58 | struct amdgpu_fence_driver { | ||
59 | uint64_t gpu_addr; | ||
60 | volatile uint32_t *cpu_addr; | ||
61 | /* sync_seq is protected by ring emission lock */ | ||
62 | uint32_t sync_seq; | ||
63 | atomic_t last_seq; | ||
64 | bool initialized; | ||
65 | struct amdgpu_irq_src *irq_src; | ||
66 | unsigned irq_type; | ||
67 | struct timer_list fallback_timer; | ||
68 | unsigned num_fences_mask; | ||
69 | spinlock_t lock; | ||
70 | struct dma_fence **fences; | ||
71 | }; | ||
72 | |||
73 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | ||
74 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | ||
75 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | ||
76 | |||
77 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||
78 | unsigned num_hw_submission); | ||
79 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
80 | struct amdgpu_irq_src *irq_src, | ||
81 | unsigned irq_type); | ||
82 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
83 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
84 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); | ||
85 | void amdgpu_fence_process(struct amdgpu_ring *ring); | ||
86 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | ||
87 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | ||
88 | |||
89 | /* | ||
90 | * Rings. | ||
91 | */ | ||
92 | |||
93 | /* provided by hw blocks that expose a ring buffer for commands */ | ||
94 | struct amdgpu_ring_funcs { | ||
95 | enum amdgpu_ring_type type; | ||
96 | uint32_t align_mask; | ||
97 | u32 nop; | ||
98 | |||
99 | /* ring read/write ptr handling */ | ||
100 | u32 (*get_rptr)(struct amdgpu_ring *ring); | ||
101 | u32 (*get_wptr)(struct amdgpu_ring *ring); | ||
102 | void (*set_wptr)(struct amdgpu_ring *ring); | ||
103 | /* validating and patching of IBs */ | ||
104 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
105 | /* constants to calculate how many DW are needed for an emit */ | ||
106 | unsigned emit_frame_size; | ||
107 | unsigned emit_ib_size; | ||
108 | /* command emit functions */ | ||
109 | void (*emit_ib)(struct amdgpu_ring *ring, | ||
110 | struct amdgpu_ib *ib, | ||
111 | unsigned vm_id, bool ctx_switch); | ||
112 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | ||
113 | uint64_t seq, unsigned flags); | ||
114 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | ||
115 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | ||
116 | uint64_t pd_addr); | ||
117 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
118 | void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); | ||
119 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | ||
120 | uint32_t gds_base, uint32_t gds_size, | ||
121 | uint32_t gws_base, uint32_t gws_size, | ||
122 | uint32_t oa_base, uint32_t oa_size); | ||
123 | /* testing functions */ | ||
124 | int (*test_ring)(struct amdgpu_ring *ring); | ||
125 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); | ||
126 | /* insert NOP packets */ | ||
127 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
128 | /* pad the indirect buffer to the necessary number of dw */ | ||
129 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
130 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); | ||
131 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); | ||
132 | /* note usage for clock and power gating */ | ||
133 | void (*begin_use)(struct amdgpu_ring *ring); | ||
134 | void (*end_use)(struct amdgpu_ring *ring); | ||
135 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); | ||
136 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); | ||
137 | }; | ||
138 | |||
139 | struct amdgpu_ring { | ||
140 | struct amdgpu_device *adev; | ||
141 | const struct amdgpu_ring_funcs *funcs; | ||
142 | struct amdgpu_fence_driver fence_drv; | ||
143 | struct amd_gpu_scheduler sched; | ||
144 | |||
145 | struct amdgpu_bo *ring_obj; | ||
146 | volatile uint32_t *ring; | ||
147 | unsigned rptr_offs; | ||
148 | unsigned wptr; | ||
149 | unsigned wptr_old; | ||
150 | unsigned ring_size; | ||
151 | unsigned max_dw; | ||
152 | int count_dw; | ||
153 | uint64_t gpu_addr; | ||
154 | uint32_t ptr_mask; | ||
155 | bool ready; | ||
156 | u32 idx; | ||
157 | u32 me; | ||
158 | u32 pipe; | ||
159 | u32 queue; | ||
160 | struct amdgpu_bo *mqd_obj; | ||
161 | u32 doorbell_index; | ||
162 | bool use_doorbell; | ||
163 | unsigned wptr_offs; | ||
164 | unsigned fence_offs; | ||
165 | uint64_t current_ctx; | ||
166 | char name[16]; | ||
167 | unsigned cond_exe_offs; | ||
168 | u64 cond_exe_gpu_addr; | ||
169 | volatile u32 *cond_exe_cpu_addr; | ||
170 | #if defined(CONFIG_DEBUG_FS) | ||
171 | struct dentry *ent; | ||
172 | #endif | ||
173 | }; | ||
174 | |||
175 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | ||
176 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
177 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
178 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | ||
179 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | ||
180 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
181 | unsigned ring_size, struct amdgpu_irq_src *irq_src, | ||
182 | unsigned irq_type); | ||
183 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | ||
184 | |||
185 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index d8af37a845f4..fd26c4b8d793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) | |||
147 | } | 147 | } |
148 | list_del_init(&sa_bo->olist); | 148 | list_del_init(&sa_bo->olist); |
149 | list_del_init(&sa_bo->flist); | 149 | list_del_init(&sa_bo->flist); |
150 | fence_put(sa_bo->fence); | 150 | dma_fence_put(sa_bo->fence); |
151 | kfree(sa_bo); | 151 | kfree(sa_bo); |
152 | } | 152 | } |
153 | 153 | ||
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) | |||
161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); | 161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); |
162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { | 162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { |
163 | if (sa_bo->fence == NULL || | 163 | if (sa_bo->fence == NULL || |
164 | !fence_is_signaled(sa_bo->fence)) { | 164 | !dma_fence_is_signaled(sa_bo->fence)) { |
165 | return; | 165 | return; |
166 | } | 166 | } |
167 | amdgpu_sa_bo_remove_locked(sa_bo); | 167 | amdgpu_sa_bo_remove_locked(sa_bo); |
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, | |||
244 | } | 244 | } |
245 | 245 | ||
246 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | 246 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, |
247 | struct fence **fences, | 247 | struct dma_fence **fences, |
248 | unsigned *tries) | 248 | unsigned *tries) |
249 | { | 249 | { |
250 | struct amdgpu_sa_bo *best_bo = NULL; | 250 | struct amdgpu_sa_bo *best_bo = NULL; |
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
272 | sa_bo = list_first_entry(&sa_manager->flist[i], | 272 | sa_bo = list_first_entry(&sa_manager->flist[i], |
273 | struct amdgpu_sa_bo, flist); | 273 | struct amdgpu_sa_bo, flist); |
274 | 274 | ||
275 | if (!fence_is_signaled(sa_bo->fence)) { | 275 | if (!dma_fence_is_signaled(sa_bo->fence)) { |
276 | fences[i] = sa_bo->fence; | 276 | fences[i] = sa_bo->fence; |
277 | continue; | 277 | continue; |
278 | } | 278 | } |
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
314 | struct amdgpu_sa_bo **sa_bo, | 314 | struct amdgpu_sa_bo **sa_bo, |
315 | unsigned size, unsigned align) | 315 | unsigned size, unsigned align) |
316 | { | 316 | { |
317 | struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; | 317 | struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; |
318 | unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; | 318 | unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; |
319 | unsigned count; | 319 | unsigned count; |
320 | int i, r; | 320 | int i, r; |
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
356 | 356 | ||
357 | for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) | 357 | for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) |
358 | if (fences[i]) | 358 | if (fences[i]) |
359 | fences[count++] = fence_get(fences[i]); | 359 | fences[count++] = dma_fence_get(fences[i]); |
360 | 360 | ||
361 | if (count) { | 361 | if (count) { |
362 | spin_unlock(&sa_manager->wq.lock); | 362 | spin_unlock(&sa_manager->wq.lock); |
363 | t = fence_wait_any_timeout(fences, count, false, | 363 | t = dma_fence_wait_any_timeout(fences, count, false, |
364 | MAX_SCHEDULE_TIMEOUT); | 364 | MAX_SCHEDULE_TIMEOUT); |
365 | for (i = 0; i < count; ++i) | 365 | for (i = 0; i < count; ++i) |
366 | fence_put(fences[i]); | 366 | dma_fence_put(fences[i]); |
367 | 367 | ||
368 | r = (t > 0) ? 0 : t; | 368 | r = (t > 0) ? 0 : t; |
369 | spin_lock(&sa_manager->wq.lock); | 369 | spin_lock(&sa_manager->wq.lock); |
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
384 | } | 384 | } |
385 | 385 | ||
386 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | 386 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, |
387 | struct fence *fence) | 387 | struct dma_fence *fence) |
388 | { | 388 | { |
389 | struct amdgpu_sa_manager *sa_manager; | 389 | struct amdgpu_sa_manager *sa_manager; |
390 | 390 | ||
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | |||
394 | 394 | ||
395 | sa_manager = (*sa_bo)->manager; | 395 | sa_manager = (*sa_bo)->manager; |
396 | spin_lock(&sa_manager->wq.lock); | 396 | spin_lock(&sa_manager->wq.lock); |
397 | if (fence && !fence_is_signaled(fence)) { | 397 | if (fence && !dma_fence_is_signaled(fence)) { |
398 | uint32_t idx; | 398 | uint32_t idx; |
399 | 399 | ||
400 | (*sa_bo)->fence = fence_get(fence); | 400 | (*sa_bo)->fence = dma_fence_get(fence); |
401 | idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; | 401 | idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; |
402 | list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); | 402 | list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); |
403 | } else { | 403 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 5c8d3022fb87..ed814e6d0207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | struct amdgpu_sync_entry { | 35 | struct amdgpu_sync_entry { |
36 | struct hlist_node node; | 36 | struct hlist_node node; |
37 | struct fence *fence; | 37 | struct dma_fence *fence; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct kmem_cache *amdgpu_sync_slab; | 40 | static struct kmem_cache *amdgpu_sync_slab; |
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) | |||
60 | * | 60 | * |
61 | * Test if the fence was issued by us. | 61 | * Test if the fence was issued by us. |
62 | */ | 62 | */ |
63 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) | 63 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, |
64 | struct dma_fence *f) | ||
64 | { | 65 | { |
65 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 66 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
66 | 67 | ||
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) | |||
81 | * | 82 | * |
82 | * Extract who originally created the fence. | 83 | * Extract who originally created the fence. |
83 | */ | 84 | */ |
84 | static void *amdgpu_sync_get_owner(struct fence *f) | 85 | static void *amdgpu_sync_get_owner(struct dma_fence *f) |
85 | { | 86 | { |
86 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 87 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
87 | 88 | ||
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f) | |||
99 | * | 100 | * |
100 | * Either keep the existing fence or the new one, depending which one is later. | 101 | * Either keep the existing fence or the new one, depending which one is later. |
101 | */ | 102 | */ |
102 | static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) | 103 | static void amdgpu_sync_keep_later(struct dma_fence **keep, |
104 | struct dma_fence *fence) | ||
103 | { | 105 | { |
104 | if (*keep && fence_is_later(*keep, fence)) | 106 | if (*keep && dma_fence_is_later(*keep, fence)) |
105 | return; | 107 | return; |
106 | 108 | ||
107 | fence_put(*keep); | 109 | dma_fence_put(*keep); |
108 | *keep = fence_get(fence); | 110 | *keep = dma_fence_get(fence); |
109 | } | 111 | } |
110 | 112 | ||
111 | /** | 113 | /** |
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) | |||
117 | * Tries to add the fence to an existing hash entry. Returns true when an entry | 119 | * Tries to add the fence to an existing hash entry. Returns true when an entry |
118 | * was found, false otherwise. | 120 | * was found, false otherwise. |
119 | */ | 121 | */ |
120 | static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) | 122 | static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) |
121 | { | 123 | { |
122 | struct amdgpu_sync_entry *e; | 124 | struct amdgpu_sync_entry *e; |
123 | 125 | ||
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) | |||
139 | * | 141 | * |
140 | */ | 142 | */ |
141 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 143 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
142 | struct fence *f) | 144 | struct dma_fence *f) |
143 | { | 145 | { |
144 | struct amdgpu_sync_entry *e; | 146 | struct amdgpu_sync_entry *e; |
145 | 147 | ||
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
158 | return -ENOMEM; | 160 | return -ENOMEM; |
159 | 161 | ||
160 | hash_add(sync->fences, &e->node, f->context); | 162 | hash_add(sync->fences, &e->node, f->context); |
161 | e->fence = fence_get(f); | 163 | e->fence = dma_fence_get(f); |
162 | return 0; | 164 | return 0; |
163 | } | 165 | } |
164 | 166 | ||
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
177 | void *owner) | 179 | void *owner) |
178 | { | 180 | { |
179 | struct reservation_object_list *flist; | 181 | struct reservation_object_list *flist; |
180 | struct fence *f; | 182 | struct dma_fence *f; |
181 | void *fence_owner; | 183 | void *fence_owner; |
182 | unsigned i; | 184 | unsigned i; |
183 | int r = 0; | 185 | int r = 0; |
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
231 | * Returns the next fence not signaled yet without removing it from the sync | 233 | * Returns the next fence not signaled yet without removing it from the sync |
232 | * object. | 234 | * object. |
233 | */ | 235 | */ |
234 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | 236 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, |
235 | struct amdgpu_ring *ring) | 237 | struct amdgpu_ring *ring) |
236 | { | 238 | { |
237 | struct amdgpu_sync_entry *e; | 239 | struct amdgpu_sync_entry *e; |
238 | struct hlist_node *tmp; | 240 | struct hlist_node *tmp; |
239 | int i; | 241 | int i; |
240 | 242 | ||
241 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 243 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
242 | struct fence *f = e->fence; | 244 | struct dma_fence *f = e->fence; |
243 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
244 | 246 | ||
245 | if (ring && s_fence) { | 247 | if (ring && s_fence) { |
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
247 | * when they are scheduled. | 249 | * when they are scheduled. |
248 | */ | 250 | */ |
249 | if (s_fence->sched == &ring->sched) { | 251 | if (s_fence->sched == &ring->sched) { |
250 | if (fence_is_signaled(&s_fence->scheduled)) | 252 | if (dma_fence_is_signaled(&s_fence->scheduled)) |
251 | continue; | 253 | continue; |
252 | 254 | ||
253 | return &s_fence->scheduled; | 255 | return &s_fence->scheduled; |
254 | } | 256 | } |
255 | } | 257 | } |
256 | 258 | ||
257 | if (fence_is_signaled(f)) { | 259 | if (dma_fence_is_signaled(f)) { |
258 | hash_del(&e->node); | 260 | hash_del(&e->node); |
259 | fence_put(f); | 261 | dma_fence_put(f); |
260 | kmem_cache_free(amdgpu_sync_slab, e); | 262 | kmem_cache_free(amdgpu_sync_slab, e); |
261 | continue; | 263 | continue; |
262 | } | 264 | } |
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
274 | * | 276 | * |
275 | * Get and removes the next fence from the sync object not signaled yet. | 277 | * Get and removes the next fence from the sync object not signaled yet. |
276 | */ | 278 | */ |
277 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | 279 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) |
278 | { | 280 | { |
279 | struct amdgpu_sync_entry *e; | 281 | struct amdgpu_sync_entry *e; |
280 | struct hlist_node *tmp; | 282 | struct hlist_node *tmp; |
281 | struct fence *f; | 283 | struct dma_fence *f; |
282 | int i; | 284 | int i; |
283 | 285 | ||
284 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 286 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | |||
288 | hash_del(&e->node); | 290 | hash_del(&e->node); |
289 | kmem_cache_free(amdgpu_sync_slab, e); | 291 | kmem_cache_free(amdgpu_sync_slab, e); |
290 | 292 | ||
291 | if (!fence_is_signaled(f)) | 293 | if (!dma_fence_is_signaled(f)) |
292 | return f; | 294 | return f; |
293 | 295 | ||
294 | fence_put(f); | 296 | dma_fence_put(f); |
295 | } | 297 | } |
296 | return NULL; | 298 | return NULL; |
297 | } | 299 | } |
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) | |||
311 | 313 | ||
312 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 314 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
313 | hash_del(&e->node); | 315 | hash_del(&e->node); |
314 | fence_put(e->fence); | 316 | dma_fence_put(e->fence); |
315 | kmem_cache_free(amdgpu_sync_slab, e); | 317 | kmem_cache_free(amdgpu_sync_slab, e); |
316 | } | 318 | } |
317 | 319 | ||
318 | fence_put(sync->last_vm_update); | 320 | dma_fence_put(sync->last_vm_update); |
319 | } | 321 | } |
320 | 322 | ||
321 | /** | 323 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h new file mode 100644 index 000000000000..605be266e07f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_SYNC_H__ | ||
25 | #define __AMDGPU_SYNC_H__ | ||
26 | |||
27 | #include <linux/hashtable.h> | ||
28 | |||
29 | struct dma_fence; | ||
30 | struct reservation_object; | ||
31 | struct amdgpu_device; | ||
32 | struct amdgpu_ring; | ||
33 | |||
34 | /* | ||
35 | * Container for fences used to sync command submissions. | ||
36 | */ | ||
37 | struct amdgpu_sync { | ||
38 | DECLARE_HASHTABLE(fences, 4); | ||
39 | struct dma_fence *last_vm_update; | ||
40 | }; | ||
41 | |||
42 | void amdgpu_sync_create(struct amdgpu_sync *sync); | ||
43 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
44 | struct dma_fence *f); | ||
45 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
46 | struct amdgpu_sync *sync, | ||
47 | struct reservation_object *resv, | ||
48 | void *owner); | ||
49 | struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | ||
50 | struct amdgpu_ring *ring); | ||
51 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
52 | void amdgpu_sync_free(struct amdgpu_sync *sync); | ||
53 | int amdgpu_sync_init(void); | ||
54 | void amdgpu_sync_fini(void); | ||
55 | |||
56 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b827c75e95de..e05a24325eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
78 | void *gtt_map, *vram_map; | 78 | void *gtt_map, *vram_map; |
79 | void **gtt_start, **gtt_end; | 79 | void **gtt_start, **gtt_end; |
80 | void **vram_start, **vram_end; | 80 | void **vram_start, **vram_end; |
81 | struct fence *fence = NULL; | 81 | struct dma_fence *fence = NULL; |
82 | 82 | ||
83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, |
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
118 | goto out_lclean_unpin; | 118 | goto out_lclean_unpin; |
119 | } | 119 | } |
120 | 120 | ||
121 | r = fence_wait(fence, false); | 121 | r = dma_fence_wait(fence, false); |
122 | if (r) { | 122 | if (r) { |
123 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | 123 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); |
124 | goto out_lclean_unpin; | 124 | goto out_lclean_unpin; |
125 | } | 125 | } |
126 | 126 | ||
127 | fence_put(fence); | 127 | dma_fence_put(fence); |
128 | 128 | ||
129 | r = amdgpu_bo_kmap(vram_obj, &vram_map); | 129 | r = amdgpu_bo_kmap(vram_obj, &vram_map); |
130 | if (r) { | 130 | if (r) { |
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
163 | goto out_lclean_unpin; | 163 | goto out_lclean_unpin; |
164 | } | 164 | } |
165 | 165 | ||
166 | r = fence_wait(fence, false); | 166 | r = dma_fence_wait(fence, false); |
167 | if (r) { | 167 | if (r) { |
168 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | 168 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); |
169 | goto out_lclean_unpin; | 169 | goto out_lclean_unpin; |
170 | } | 170 | } |
171 | 171 | ||
172 | fence_put(fence); | 172 | dma_fence_put(fence); |
173 | 173 | ||
174 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | 174 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); |
175 | if (r) { | 175 | if (r) { |
@@ -216,7 +216,7 @@ out_lclean: | |||
216 | amdgpu_bo_unref(>t_obj[i]); | 216 | amdgpu_bo_unref(>t_obj[i]); |
217 | } | 217 | } |
218 | if (fence) | 218 | if (fence) |
219 | fence_put(fence); | 219 | dma_fence_put(fence); |
220 | break; | 220 | break; |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 067e5e683bb3..bb964a8ff938 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, | |||
104 | __field(struct amdgpu_device *, adev) | 104 | __field(struct amdgpu_device *, adev) |
105 | __field(struct amd_sched_job *, sched_job) | 105 | __field(struct amd_sched_job *, sched_job) |
106 | __field(struct amdgpu_ib *, ib) | 106 | __field(struct amdgpu_ib *, ib) |
107 | __field(struct fence *, fence) | 107 | __field(struct dma_fence *, fence) |
108 | __field(char *, ring_name) | 108 | __field(char *, ring_name) |
109 | __field(u32, num_ibs) | 109 | __field(u32, num_ibs) |
110 | ), | 110 | ), |
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job, | |||
129 | __field(struct amdgpu_device *, adev) | 129 | __field(struct amdgpu_device *, adev) |
130 | __field(struct amd_sched_job *, sched_job) | 130 | __field(struct amd_sched_job *, sched_job) |
131 | __field(struct amdgpu_ib *, ib) | 131 | __field(struct amdgpu_ib *, ib) |
132 | __field(struct fence *, fence) | 132 | __field(struct dma_fence *, fence) |
133 | __field(char *, ring_name) | 133 | __field(char *, ring_name) |
134 | __field(u32, num_ibs) | 134 | __field(u32, num_ibs) |
135 | ), | 135 | ), |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dcaf691f56b5..1821c05484d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -51,16 +51,6 @@ | |||
51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | 51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | 52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
53 | 53 | ||
54 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | ||
55 | { | ||
56 | struct amdgpu_mman *mman; | ||
57 | struct amdgpu_device *adev; | ||
58 | |||
59 | mman = container_of(bdev, struct amdgpu_mman, bdev); | ||
60 | adev = container_of(mman, struct amdgpu_device, mman); | ||
61 | return adev; | ||
62 | } | ||
63 | |||
64 | 54 | ||
65 | /* | 55 | /* |
66 | * Global memory. | 56 | * Global memory. |
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
150 | { | 140 | { |
151 | struct amdgpu_device *adev; | 141 | struct amdgpu_device *adev; |
152 | 142 | ||
153 | adev = amdgpu_get_adev(bdev); | 143 | adev = amdgpu_ttm_adev(bdev); |
154 | 144 | ||
155 | switch (type) { | 145 | switch (type) { |
156 | case TTM_PL_SYSTEM: | 146 | case TTM_PL_SYSTEM: |
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
168 | break; | 158 | break; |
169 | case TTM_PL_VRAM: | 159 | case TTM_PL_VRAM: |
170 | /* "On-card" video ram */ | 160 | /* "On-card" video ram */ |
171 | man->func = &ttm_bo_manager_func; | 161 | man->func = &amdgpu_vram_mgr_func; |
172 | man->gpu_offset = adev->mc.vram_start; | 162 | man->gpu_offset = adev->mc.vram_start; |
173 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 163 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
174 | TTM_MEMTYPE_FLAG_MAPPABLE; | 164 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | 185 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
196 | struct ttm_placement *placement) | 186 | struct ttm_placement *placement) |
197 | { | 187 | { |
188 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
198 | struct amdgpu_bo *abo; | 189 | struct amdgpu_bo *abo; |
199 | static struct ttm_place placements = { | 190 | static struct ttm_place placements = { |
200 | .fpfn = 0, | 191 | .fpfn = 0, |
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
213 | abo = container_of(bo, struct amdgpu_bo, tbo); | 204 | abo = container_of(bo, struct amdgpu_bo, tbo); |
214 | switch (bo->mem.mem_type) { | 205 | switch (bo->mem.mem_type) { |
215 | case TTM_PL_VRAM: | 206 | case TTM_PL_VRAM: |
216 | if (abo->adev->mman.buffer_funcs_ring->ready == false) { | 207 | if (adev->mman.buffer_funcs_ring->ready == false) { |
217 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); | 208 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
218 | } else { | 209 | } else { |
219 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | 210 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
229 | * allocating address space for the BO. | 220 | * allocating address space for the BO. |
230 | */ | 221 | */ |
231 | abo->placements[i].lpfn = | 222 | abo->placements[i].lpfn = |
232 | abo->adev->mc.gtt_size >> PAGE_SHIFT; | 223 | adev->mc.gtt_size >> PAGE_SHIFT; |
233 | } | 224 | } |
234 | } | 225 | } |
235 | break; | 226 | break; |
@@ -260,63 +251,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, | |||
260 | new_mem->mm_node = NULL; | 251 | new_mem->mm_node = NULL; |
261 | } | 252 | } |
262 | 253 | ||
263 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | 254 | static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
264 | bool evict, bool no_wait_gpu, | 255 | struct drm_mm_node *mm_node, |
265 | struct ttm_mem_reg *new_mem, | 256 | struct ttm_mem_reg *mem, |
266 | struct ttm_mem_reg *old_mem) | 257 | uint64_t *addr) |
267 | { | 258 | { |
268 | struct amdgpu_device *adev; | ||
269 | struct amdgpu_ring *ring; | ||
270 | uint64_t old_start, new_start; | ||
271 | struct fence *fence; | ||
272 | int r; | 259 | int r; |
273 | 260 | ||
274 | adev = amdgpu_get_adev(bo->bdev); | 261 | switch (mem->mem_type) { |
275 | ring = adev->mman.buffer_funcs_ring; | ||
276 | |||
277 | switch (old_mem->mem_type) { | ||
278 | case TTM_PL_TT: | 262 | case TTM_PL_TT: |
279 | r = amdgpu_ttm_bind(bo, old_mem); | 263 | r = amdgpu_ttm_bind(bo, mem); |
280 | if (r) | 264 | if (r) |
281 | return r; | 265 | return r; |
282 | 266 | ||
283 | case TTM_PL_VRAM: | 267 | case TTM_PL_VRAM: |
284 | old_start = (u64)old_mem->start << PAGE_SHIFT; | 268 | *addr = mm_node->start << PAGE_SHIFT; |
285 | old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; | 269 | *addr += bo->bdev->man[mem->mem_type].gpu_offset; |
286 | break; | 270 | break; |
287 | default: | 271 | default: |
288 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 272 | DRM_ERROR("Unknown placement %d\n", mem->mem_type); |
289 | return -EINVAL; | 273 | return -EINVAL; |
290 | } | 274 | } |
291 | switch (new_mem->mem_type) { | ||
292 | case TTM_PL_TT: | ||
293 | r = amdgpu_ttm_bind(bo, new_mem); | ||
294 | if (r) | ||
295 | return r; | ||
296 | 275 | ||
297 | case TTM_PL_VRAM: | 276 | return 0; |
298 | new_start = (u64)new_mem->start << PAGE_SHIFT; | 277 | } |
299 | new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; | 278 | |
300 | break; | 279 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
301 | default: | 280 | bool evict, bool no_wait_gpu, |
302 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 281 | struct ttm_mem_reg *new_mem, |
303 | return -EINVAL; | 282 | struct ttm_mem_reg *old_mem) |
304 | } | 283 | { |
284 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
285 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
286 | |||
287 | struct drm_mm_node *old_mm, *new_mm; | ||
288 | uint64_t old_start, old_size, new_start, new_size; | ||
289 | unsigned long num_pages; | ||
290 | struct dma_fence *fence = NULL; | ||
291 | int r; | ||
292 | |||
293 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | ||
294 | |||
305 | if (!ring->ready) { | 295 | if (!ring->ready) { |
306 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | 296 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
307 | return -EINVAL; | 297 | return -EINVAL; |
308 | } | 298 | } |
309 | 299 | ||
310 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | 300 | old_mm = old_mem->mm_node; |
301 | r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); | ||
302 | if (r) | ||
303 | return r; | ||
304 | old_size = old_mm->size; | ||
305 | |||
311 | 306 | ||
312 | r = amdgpu_copy_buffer(ring, old_start, new_start, | 307 | new_mm = new_mem->mm_node; |
313 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | 308 | r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); |
314 | bo->resv, &fence, false); | ||
315 | if (r) | 309 | if (r) |
316 | return r; | 310 | return r; |
311 | new_size = new_mm->size; | ||
312 | |||
313 | num_pages = new_mem->num_pages; | ||
314 | while (num_pages) { | ||
315 | unsigned long cur_pages = min(old_size, new_size); | ||
316 | struct dma_fence *next; | ||
317 | |||
318 | r = amdgpu_copy_buffer(ring, old_start, new_start, | ||
319 | cur_pages * PAGE_SIZE, | ||
320 | bo->resv, &next, false); | ||
321 | if (r) | ||
322 | goto error; | ||
323 | |||
324 | dma_fence_put(fence); | ||
325 | fence = next; | ||
326 | |||
327 | num_pages -= cur_pages; | ||
328 | if (!num_pages) | ||
329 | break; | ||
330 | |||
331 | old_size -= cur_pages; | ||
332 | if (!old_size) { | ||
333 | r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, | ||
334 | &old_start); | ||
335 | if (r) | ||
336 | goto error; | ||
337 | old_size = old_mm->size; | ||
338 | } else { | ||
339 | old_start += cur_pages * PAGE_SIZE; | ||
340 | } | ||
341 | |||
342 | new_size -= cur_pages; | ||
343 | if (!new_size) { | ||
344 | r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, | ||
345 | &new_start); | ||
346 | if (r) | ||
347 | goto error; | ||
348 | |||
349 | new_size = new_mm->size; | ||
350 | } else { | ||
351 | new_start += cur_pages * PAGE_SIZE; | ||
352 | } | ||
353 | } | ||
317 | 354 | ||
318 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | 355 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
319 | fence_put(fence); | 356 | dma_fence_put(fence); |
357 | return r; | ||
358 | |||
359 | error: | ||
360 | if (fence) | ||
361 | dma_fence_wait(fence, false); | ||
362 | dma_fence_put(fence); | ||
320 | return r; | 363 | return r; |
321 | } | 364 | } |
322 | 365 | ||
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |||
332 | struct ttm_placement placement; | 375 | struct ttm_placement placement; |
333 | int r; | 376 | int r; |
334 | 377 | ||
335 | adev = amdgpu_get_adev(bo->bdev); | 378 | adev = amdgpu_ttm_adev(bo->bdev); |
336 | tmp_mem = *new_mem; | 379 | tmp_mem = *new_mem; |
337 | tmp_mem.mm_node = NULL; | 380 | tmp_mem.mm_node = NULL; |
338 | placement.num_placement = 1; | 381 | placement.num_placement = 1; |
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |||
379 | struct ttm_place placements; | 422 | struct ttm_place placements; |
380 | int r; | 423 | int r; |
381 | 424 | ||
382 | adev = amdgpu_get_adev(bo->bdev); | 425 | adev = amdgpu_ttm_adev(bo->bdev); |
383 | tmp_mem = *new_mem; | 426 | tmp_mem = *new_mem; |
384 | tmp_mem.mm_node = NULL; | 427 | tmp_mem.mm_node = NULL; |
385 | placement.num_placement = 1; | 428 | placement.num_placement = 1; |
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |||
422 | if (WARN_ON_ONCE(abo->pin_count > 0)) | 465 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
423 | return -EINVAL; | 466 | return -EINVAL; |
424 | 467 | ||
425 | adev = amdgpu_get_adev(bo->bdev); | 468 | adev = amdgpu_ttm_adev(bo->bdev); |
426 | 469 | ||
427 | /* remember the eviction */ | 470 | /* remember the eviction */ |
428 | if (evict) | 471 | if (evict) |
@@ -475,7 +518,7 @@ memcpy: | |||
475 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 518 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
476 | { | 519 | { |
477 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 520 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
478 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | 521 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
479 | 522 | ||
480 | mem->bus.addr = NULL; | 523 | mem->bus.addr = NULL; |
481 | mem->bus.offset = 0; | 524 | mem->bus.offset = 0; |
@@ -607,7 +650,7 @@ release_pages: | |||
607 | /* prepare the sg table with the user pages */ | 650 | /* prepare the sg table with the user pages */ |
608 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | 651 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
609 | { | 652 | { |
610 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 653 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
611 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 654 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
612 | unsigned nents; | 655 | unsigned nents; |
613 | int r; | 656 | int r; |
@@ -639,7 +682,7 @@ release_sg: | |||
639 | 682 | ||
640 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | 683 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
641 | { | 684 | { |
642 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 685 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
643 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 686 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
644 | struct sg_page_iter sg_iter; | 687 | struct sg_page_iter sg_iter; |
645 | 688 | ||
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |||
799 | struct amdgpu_device *adev; | 842 | struct amdgpu_device *adev; |
800 | struct amdgpu_ttm_tt *gtt; | 843 | struct amdgpu_ttm_tt *gtt; |
801 | 844 | ||
802 | adev = amdgpu_get_adev(bdev); | 845 | adev = amdgpu_ttm_adev(bdev); |
803 | 846 | ||
804 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | 847 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
805 | if (gtt == NULL) { | 848 | if (gtt == NULL) { |
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |||
843 | return 0; | 886 | return 0; |
844 | } | 887 | } |
845 | 888 | ||
846 | adev = amdgpu_get_adev(ttm->bdev); | 889 | adev = amdgpu_ttm_adev(ttm->bdev); |
847 | 890 | ||
848 | #ifdef CONFIG_SWIOTLB | 891 | #ifdef CONFIG_SWIOTLB |
849 | if (swiotlb_nr_tbl()) { | 892 | if (swiotlb_nr_tbl()) { |
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
889 | if (slave) | 932 | if (slave) |
890 | return; | 933 | return; |
891 | 934 | ||
892 | adev = amdgpu_get_adev(ttm->bdev); | 935 | adev = amdgpu_ttm_adev(ttm->bdev); |
893 | 936 | ||
894 | #ifdef CONFIG_SWIOTLB | 937 | #ifdef CONFIG_SWIOTLB |
895 | if (swiotlb_nr_tbl()) { | 938 | if (swiotlb_nr_tbl()) { |
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |||
1012 | 1055 | ||
1013 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | 1056 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) |
1014 | { | 1057 | { |
1015 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1058 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1016 | unsigned i, j; | 1059 | unsigned i, j; |
1017 | 1060 | ||
1018 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { | 1061 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { |
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | |||
1029 | 1072 | ||
1030 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) | 1073 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) |
1031 | { | 1074 | { |
1032 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1075 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1033 | unsigned log2_size = min(ilog2(tbo->num_pages), | 1076 | unsigned log2_size = min(ilog2(tbo->num_pages), |
1034 | AMDGPU_TTM_LRU_SIZE - 1); | 1077 | AMDGPU_TTM_LRU_SIZE - 1); |
1035 | 1078 | ||
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |||
1060 | return res; | 1103 | return res; |
1061 | } | 1104 | } |
1062 | 1105 | ||
1106 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
1107 | const struct ttm_place *place) | ||
1108 | { | ||
1109 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
1110 | bo->mem.start == AMDGPU_BO_INVALID_OFFSET) { | ||
1111 | unsigned long num_pages = bo->mem.num_pages; | ||
1112 | struct drm_mm_node *node = bo->mem.mm_node; | ||
1113 | |||
1114 | /* Check each drm MM node individually */ | ||
1115 | while (num_pages) { | ||
1116 | if (place->fpfn < (node->start + node->size) && | ||
1117 | !(place->lpfn && place->lpfn <= node->start)) | ||
1118 | return true; | ||
1119 | |||
1120 | num_pages -= node->size; | ||
1121 | ++node; | ||
1122 | } | ||
1123 | |||
1124 | return false; | ||
1125 | } | ||
1126 | |||
1127 | return ttm_bo_eviction_valuable(bo, place); | ||
1128 | } | ||
1129 | |||
1063 | static struct ttm_bo_driver amdgpu_bo_driver = { | 1130 | static struct ttm_bo_driver amdgpu_bo_driver = { |
1064 | .ttm_tt_create = &amdgpu_ttm_tt_create, | 1131 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
1065 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | 1132 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
1066 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | 1133 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
1067 | .invalidate_caches = &amdgpu_invalidate_caches, | 1134 | .invalidate_caches = &amdgpu_invalidate_caches, |
1068 | .init_mem_type = &amdgpu_init_mem_type, | 1135 | .init_mem_type = &amdgpu_init_mem_type, |
1136 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, | ||
1069 | .evict_flags = &amdgpu_evict_flags, | 1137 | .evict_flags = &amdgpu_evict_flags, |
1070 | .move = &amdgpu_bo_move, | 1138 | .move = &amdgpu_bo_move, |
1071 | .verify_access = &amdgpu_verify_access, | 1139 | .verify_access = &amdgpu_verify_access, |
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1119 | 1187 | ||
1120 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | 1188 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, |
1121 | AMDGPU_GEM_DOMAIN_VRAM, | 1189 | AMDGPU_GEM_DOMAIN_VRAM, |
1122 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1190 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1191 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1123 | NULL, NULL, &adev->stollen_vga_memory); | 1192 | NULL, NULL, &adev->stollen_vga_memory); |
1124 | if (r) { | 1193 | if (r) { |
1125 | return r; | 1194 | return r; |
@@ -1247,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1247 | uint64_t dst_offset, | 1316 | uint64_t dst_offset, |
1248 | uint32_t byte_count, | 1317 | uint32_t byte_count, |
1249 | struct reservation_object *resv, | 1318 | struct reservation_object *resv, |
1250 | struct fence **fence, bool direct_submit) | 1319 | struct dma_fence **fence, bool direct_submit) |
1251 | { | 1320 | { |
1252 | struct amdgpu_device *adev = ring->adev; | 1321 | struct amdgpu_device *adev = ring->adev; |
1253 | struct amdgpu_job *job; | 1322 | struct amdgpu_job *job; |
@@ -1294,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1294 | if (direct_submit) { | 1363 | if (direct_submit) { |
1295 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, | 1364 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, |
1296 | NULL, NULL, fence); | 1365 | NULL, NULL, fence); |
1297 | job->fence = fence_get(*fence); | 1366 | job->fence = dma_fence_get(*fence); |
1298 | if (r) | 1367 | if (r) |
1299 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 1368 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
1300 | amdgpu_job_free(job); | 1369 | amdgpu_job_free(job); |
@@ -1315,9 +1384,9 @@ error_free: | |||
1315 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 1384 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
1316 | uint32_t src_data, | 1385 | uint32_t src_data, |
1317 | struct reservation_object *resv, | 1386 | struct reservation_object *resv, |
1318 | struct fence **fence) | 1387 | struct dma_fence **fence) |
1319 | { | 1388 | { |
1320 | struct amdgpu_device *adev = bo->adev; | 1389 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1321 | struct amdgpu_job *job; | 1390 | struct amdgpu_job *job; |
1322 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 1391 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1323 | 1392 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9812c805326c..98ee384f0fca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -66,6 +66,7 @@ struct amdgpu_mman { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; | 68 | extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; |
69 | extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; | ||
69 | 70 | ||
70 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, | 71 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, |
71 | struct ttm_buffer_object *tbo, | 72 | struct ttm_buffer_object *tbo, |
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
77 | uint64_t dst_offset, | 78 | uint64_t dst_offset, |
78 | uint32_t byte_count, | 79 | uint32_t byte_count, |
79 | struct reservation_object *resv, | 80 | struct reservation_object *resv, |
80 | struct fence **fence, bool direct_submit); | 81 | struct dma_fence **fence, bool direct_submit); |
81 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 82 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
82 | uint32_t src_data, | 83 | uint32_t src_data, |
83 | struct reservation_object *resv, | 84 | struct reservation_object *resv, |
84 | struct fence **fence); | 85 | struct dma_fence **fence); |
85 | 86 | ||
86 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 87 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
87 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); | 88 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index cb3d252f3c78..0f0b38191fac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |||
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, | |||
228 | ucode->mc_addr = mc_addr; | 228 | ucode->mc_addr = mc_addr; |
229 | ucode->kaddr = kptr; | 229 | ucode->kaddr = kptr; |
230 | 230 | ||
231 | if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE) | ||
232 | return 0; | ||
233 | |||
231 | header = (const struct common_firmware_header *)ucode->fw->data; | 234 | header = (const struct common_firmware_header *)ucode->fw->data; |
232 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | 235 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + |
233 | le32_to_cpu(header->ucode_array_offset_bytes)), | 236 | le32_to_cpu(header->ucode_array_offset_bytes)), |
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, | |||
236 | return 0; | 239 | return 0; |
237 | } | 240 | } |
238 | 241 | ||
242 | static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, | ||
243 | uint64_t mc_addr, void *kptr) | ||
244 | { | ||
245 | const struct gfx_firmware_header_v1_0 *header = NULL; | ||
246 | const struct common_firmware_header *comm_hdr = NULL; | ||
247 | uint8_t* src_addr = NULL; | ||
248 | uint8_t* dst_addr = NULL; | ||
249 | |||
250 | if (NULL == ucode->fw) | ||
251 | return 0; | ||
252 | |||
253 | comm_hdr = (const struct common_firmware_header *)ucode->fw->data; | ||
254 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
255 | dst_addr = ucode->kaddr + | ||
256 | ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes), | ||
257 | PAGE_SIZE); | ||
258 | src_addr = (uint8_t *)ucode->fw->data + | ||
259 | le32_to_cpu(comm_hdr->ucode_array_offset_bytes) + | ||
260 | (le32_to_cpu(header->jt_offset) * 4); | ||
261 | memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | |||
239 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | 267 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) |
240 | { | 268 | { |
241 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; | 269 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; |
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
247 | const struct common_firmware_header *header = NULL; | 275 | const struct common_firmware_header *header = NULL; |
248 | 276 | ||
249 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | 277 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, |
250 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); | 278 | amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
279 | 0, NULL, NULL, bo); | ||
251 | if (err) { | 280 | if (err) { |
252 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | 281 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); |
253 | goto failed; | 282 | goto failed; |
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
259 | goto failed_reserve; | 288 | goto failed_reserve; |
260 | } | 289 | } |
261 | 290 | ||
262 | err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr); | 291 | err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
292 | &fw_mc_addr); | ||
263 | if (err) { | 293 | if (err) { |
264 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); | 294 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); |
265 | goto failed_pin; | 295 | goto failed_pin; |
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
279 | header = (const struct common_firmware_header *)ucode->fw->data; | 309 | header = (const struct common_firmware_header *)ucode->fw->data; |
280 | amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, | 310 | amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, |
281 | fw_buf_ptr + fw_offset); | 311 | fw_buf_ptr + fw_offset); |
312 | if (i == AMDGPU_UCODE_ID_CP_MEC1) { | ||
313 | const struct gfx_firmware_header_v1_0 *cp_hdr; | ||
314 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
315 | amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, | ||
316 | fw_buf_ptr + fw_offset); | ||
317 | fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); | ||
318 | } | ||
282 | fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 319 | fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
283 | } | 320 | } |
284 | } | 321 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index e468be4e28fa..a8a4230729f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | |||
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID { | |||
130 | AMDGPU_UCODE_ID_CP_MEC1, | 130 | AMDGPU_UCODE_ID_CP_MEC1, |
131 | AMDGPU_UCODE_ID_CP_MEC2, | 131 | AMDGPU_UCODE_ID_CP_MEC2, |
132 | AMDGPU_UCODE_ID_RLC_G, | 132 | AMDGPU_UCODE_ID_RLC_G, |
133 | AMDGPU_UCODE_ID_STORAGE, | ||
133 | AMDGPU_UCODE_ID_MAXIMUM, | 134 | AMDGPU_UCODE_ID_MAXIMUM, |
134 | }; | 135 | }; |
135 | 136 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e3281cacc586..fb270c7e7171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
333 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 333 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
334 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | 334 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); |
335 | if (handle != 0 && adev->uvd.filp[i] == filp) { | 335 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
336 | struct fence *fence; | 336 | struct dma_fence *fence; |
337 | 337 | ||
338 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | 338 | r = amdgpu_uvd_get_destroy_msg(ring, handle, |
339 | false, &fence); | 339 | false, &fence); |
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
342 | continue; | 342 | continue; |
343 | } | 343 | } |
344 | 344 | ||
345 | fence_wait(fence, false); | 345 | dma_fence_wait(fence, false); |
346 | fence_put(fence); | 346 | dma_fence_put(fence); |
347 | 347 | ||
348 | adev->uvd.filp[i] = NULL; | 348 | adev->uvd.filp[i] = NULL; |
349 | atomic_set(&adev->uvd.handles[i], 0); | 349 | atomic_set(&adev->uvd.handles[i], 0); |
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
876 | struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; | 876 | struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; |
877 | int r; | 877 | int r; |
878 | 878 | ||
879 | parser->job->vm = NULL; | ||
880 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | ||
881 | |||
879 | if (ib->length_dw % 16) { | 882 | if (ib->length_dw % 16) { |
880 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | 883 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", |
881 | ib->length_dw); | 884 | ib->length_dw); |
@@ -909,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
909 | } | 912 | } |
910 | 913 | ||
911 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | 914 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
912 | bool direct, struct fence **fence) | 915 | bool direct, struct dma_fence **fence) |
913 | { | 916 | { |
914 | struct ttm_validate_buffer tv; | 917 | struct ttm_validate_buffer tv; |
915 | struct ww_acquire_ctx ticket; | 918 | struct ww_acquire_ctx ticket; |
916 | struct list_head head; | 919 | struct list_head head; |
917 | struct amdgpu_job *job; | 920 | struct amdgpu_job *job; |
918 | struct amdgpu_ib *ib; | 921 | struct amdgpu_ib *ib; |
919 | struct fence *f = NULL; | 922 | struct dma_fence *f = NULL; |
920 | struct amdgpu_device *adev = ring->adev; | 923 | struct amdgpu_device *adev = ring->adev; |
921 | uint64_t addr; | 924 | uint64_t addr; |
922 | int i, r; | 925 | int i, r; |
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
931 | if (r) | 934 | if (r) |
932 | return r; | 935 | return r; |
933 | 936 | ||
934 | if (!bo->adev->uvd.address_64_bit) { | 937 | if (!ring->adev->uvd.address_64_bit) { |
935 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | 938 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); |
936 | amdgpu_uvd_force_into_uvd_segment(bo); | 939 | amdgpu_uvd_force_into_uvd_segment(bo); |
937 | } | 940 | } |
@@ -960,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
960 | 963 | ||
961 | if (direct) { | 964 | if (direct) { |
962 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 965 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
963 | job->fence = fence_get(f); | 966 | job->fence = dma_fence_get(f); |
964 | if (r) | 967 | if (r) |
965 | goto err_free; | 968 | goto err_free; |
966 | 969 | ||
@@ -975,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
975 | ttm_eu_fence_buffer_objects(&ticket, &head, f); | 978 | ttm_eu_fence_buffer_objects(&ticket, &head, f); |
976 | 979 | ||
977 | if (fence) | 980 | if (fence) |
978 | *fence = fence_get(f); | 981 | *fence = dma_fence_get(f); |
979 | amdgpu_bo_unref(&bo); | 982 | amdgpu_bo_unref(&bo); |
980 | fence_put(f); | 983 | dma_fence_put(f); |
981 | 984 | ||
982 | return 0; | 985 | return 0; |
983 | 986 | ||
@@ -993,7 +996,7 @@ err: | |||
993 | crash the vcpu so just try to emmit a dummy create/destroy msg to | 996 | crash the vcpu so just try to emmit a dummy create/destroy msg to |
994 | avoid this */ | 997 | avoid this */ |
995 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 998 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
996 | struct fence **fence) | 999 | struct dma_fence **fence) |
997 | { | 1000 | { |
998 | struct amdgpu_device *adev = ring->adev; | 1001 | struct amdgpu_device *adev = ring->adev; |
999 | struct amdgpu_bo *bo; | 1002 | struct amdgpu_bo *bo; |
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1002 | 1005 | ||
1003 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 1006 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
1004 | AMDGPU_GEM_DOMAIN_VRAM, | 1007 | AMDGPU_GEM_DOMAIN_VRAM, |
1005 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1008 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1009 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1006 | NULL, NULL, &bo); | 1010 | NULL, NULL, &bo); |
1007 | if (r) | 1011 | if (r) |
1008 | return r; | 1012 | return r; |
@@ -1042,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1042 | } | 1046 | } |
1043 | 1047 | ||
1044 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 1048 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
1045 | bool direct, struct fence **fence) | 1049 | bool direct, struct dma_fence **fence) |
1046 | { | 1050 | { |
1047 | struct amdgpu_device *adev = ring->adev; | 1051 | struct amdgpu_device *adev = ring->adev; |
1048 | struct amdgpu_bo *bo; | 1052 | struct amdgpu_bo *bo; |
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1051 | 1055 | ||
1052 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 1056 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
1053 | AMDGPU_GEM_DOMAIN_VRAM, | 1057 | AMDGPU_GEM_DOMAIN_VRAM, |
1054 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1058 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1059 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1055 | NULL, NULL, &bo); | 1060 | NULL, NULL, &bo); |
1056 | if (r) | 1061 | if (r) |
1057 | return r; | 1062 | return r; |
@@ -1128,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | |||
1128 | */ | 1133 | */ |
1129 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | 1134 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
1130 | { | 1135 | { |
1131 | struct fence *fence; | 1136 | struct dma_fence *fence; |
1132 | long r; | 1137 | long r; |
1133 | 1138 | ||
1134 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | 1139 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); |
@@ -1143,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1143 | goto error; | 1148 | goto error; |
1144 | } | 1149 | } |
1145 | 1150 | ||
1146 | r = fence_wait_timeout(fence, false, timeout); | 1151 | r = dma_fence_wait_timeout(fence, false, timeout); |
1147 | if (r == 0) { | 1152 | if (r == 0) { |
1148 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 1153 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
1149 | r = -ETIMEDOUT; | 1154 | r = -ETIMEDOUT; |
@@ -1154,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1154 | r = 0; | 1159 | r = 0; |
1155 | } | 1160 | } |
1156 | 1161 | ||
1157 | fence_put(fence); | 1162 | dma_fence_put(fence); |
1158 | 1163 | ||
1159 | error: | 1164 | error: |
1160 | return r; | 1165 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index c850009602d1..6249ba1bde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); | |||
29 | int amdgpu_uvd_suspend(struct amdgpu_device *adev); | 29 | int amdgpu_uvd_suspend(struct amdgpu_device *adev); |
30 | int amdgpu_uvd_resume(struct amdgpu_device *adev); | 30 | int amdgpu_uvd_resume(struct amdgpu_device *adev); |
31 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 31 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
32 | struct fence **fence); | 32 | struct dma_fence **fence); |
33 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 33 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
34 | bool direct, struct fence **fence); | 34 | bool direct, struct dma_fence **fence); |
35 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, | 35 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, |
36 | struct drm_file *filp); | 36 | struct drm_file *filp); |
37 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); | 37 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 7fe8fd884f06..69b66b9e7f57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) | |||
157 | 157 | ||
158 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 158 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
159 | AMDGPU_GEM_DOMAIN_VRAM, | 159 | AMDGPU_GEM_DOMAIN_VRAM, |
160 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 160 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
161 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
161 | NULL, NULL, &adev->vce.vcpu_bo); | 162 | NULL, NULL, &adev->vce.vcpu_bo); |
162 | if (r) { | 163 | if (r) { |
163 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | 164 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); |
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
395 | * Open up a stream for HW test | 396 | * Open up a stream for HW test |
396 | */ | 397 | */ |
397 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 398 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
398 | struct fence **fence) | 399 | struct dma_fence **fence) |
399 | { | 400 | { |
400 | const unsigned ib_size_dw = 1024; | 401 | const unsigned ib_size_dw = 1024; |
401 | struct amdgpu_job *job; | 402 | struct amdgpu_job *job; |
402 | struct amdgpu_ib *ib; | 403 | struct amdgpu_ib *ib; |
403 | struct fence *f = NULL; | 404 | struct dma_fence *f = NULL; |
404 | uint64_t dummy; | 405 | uint64_t dummy; |
405 | int i, r; | 406 | int i, r; |
406 | 407 | ||
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
450 | ib->ptr[i] = 0x0; | 451 | ib->ptr[i] = 0x0; |
451 | 452 | ||
452 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 453 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
453 | job->fence = fence_get(f); | 454 | job->fence = dma_fence_get(f); |
454 | if (r) | 455 | if (r) |
455 | goto err; | 456 | goto err; |
456 | 457 | ||
457 | amdgpu_job_free(job); | 458 | amdgpu_job_free(job); |
458 | if (fence) | 459 | if (fence) |
459 | *fence = fence_get(f); | 460 | *fence = dma_fence_get(f); |
460 | fence_put(f); | 461 | dma_fence_put(f); |
461 | return 0; | 462 | return 0; |
462 | 463 | ||
463 | err: | 464 | err: |
@@ -476,12 +477,12 @@ err: | |||
476 | * Close up a stream for HW test or if userspace failed to do so | 477 | * Close up a stream for HW test or if userspace failed to do so |
477 | */ | 478 | */ |
478 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 479 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
479 | bool direct, struct fence **fence) | 480 | bool direct, struct dma_fence **fence) |
480 | { | 481 | { |
481 | const unsigned ib_size_dw = 1024; | 482 | const unsigned ib_size_dw = 1024; |
482 | struct amdgpu_job *job; | 483 | struct amdgpu_job *job; |
483 | struct amdgpu_ib *ib; | 484 | struct amdgpu_ib *ib; |
484 | struct fence *f = NULL; | 485 | struct dma_fence *f = NULL; |
485 | int i, r; | 486 | int i, r; |
486 | 487 | ||
487 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | 488 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
513 | 514 | ||
514 | if (direct) { | 515 | if (direct) { |
515 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); | 516 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
516 | job->fence = fence_get(f); | 517 | job->fence = dma_fence_get(f); |
517 | if (r) | 518 | if (r) |
518 | goto err; | 519 | goto err; |
519 | 520 | ||
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
526 | } | 527 | } |
527 | 528 | ||
528 | if (fence) | 529 | if (fence) |
529 | *fence = fence_get(f); | 530 | *fence = dma_fence_get(f); |
530 | fence_put(f); | 531 | dma_fence_put(f); |
531 | return 0; | 532 | return 0; |
532 | 533 | ||
533 | err: | 534 | err: |
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) | |||
641 | uint32_t *size = &tmp; | 642 | uint32_t *size = &tmp; |
642 | int i, r, idx = 0; | 643 | int i, r, idx = 0; |
643 | 644 | ||
645 | p->job->vm = NULL; | ||
646 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | ||
647 | |||
644 | r = amdgpu_cs_sysvm_access_required(p); | 648 | r = amdgpu_cs_sysvm_access_required(p); |
645 | if (r) | 649 | if (r) |
646 | return r; | 650 | return r; |
@@ -788,6 +792,96 @@ out: | |||
788 | } | 792 | } |
789 | 793 | ||
790 | /** | 794 | /** |
795 | * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode | ||
796 | * | ||
797 | * @p: parser context | ||
798 | * | ||
799 | */ | ||
800 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) | ||
801 | { | ||
802 | struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; | ||
803 | int session_idx = -1; | ||
804 | uint32_t destroyed = 0; | ||
805 | uint32_t created = 0; | ||
806 | uint32_t allocated = 0; | ||
807 | uint32_t tmp, handle = 0; | ||
808 | int i, r = 0, idx = 0; | ||
809 | |||
810 | while (idx < ib->length_dw) { | ||
811 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | ||
812 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | ||
813 | |||
814 | if ((len < 8) || (len & 3)) { | ||
815 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | ||
816 | r = -EINVAL; | ||
817 | goto out; | ||
818 | } | ||
819 | |||
820 | switch (cmd) { | ||
821 | case 0x00000001: /* session */ | ||
822 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); | ||
823 | session_idx = amdgpu_vce_validate_handle(p, handle, | ||
824 | &allocated); | ||
825 | if (session_idx < 0) { | ||
826 | r = session_idx; | ||
827 | goto out; | ||
828 | } | ||
829 | break; | ||
830 | |||
831 | case 0x01000001: /* create */ | ||
832 | created |= 1 << session_idx; | ||
833 | if (destroyed & (1 << session_idx)) { | ||
834 | destroyed &= ~(1 << session_idx); | ||
835 | allocated |= 1 << session_idx; | ||
836 | |||
837 | } else if (!(allocated & (1 << session_idx))) { | ||
838 | DRM_ERROR("Handle already in use!\n"); | ||
839 | r = -EINVAL; | ||
840 | goto out; | ||
841 | } | ||
842 | |||
843 | break; | ||
844 | |||
845 | case 0x02000001: /* destroy */ | ||
846 | destroyed |= 1 << session_idx; | ||
847 | break; | ||
848 | |||
849 | default: | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | if (session_idx == -1) { | ||
854 | DRM_ERROR("no session command at start of IB\n"); | ||
855 | r = -EINVAL; | ||
856 | goto out; | ||
857 | } | ||
858 | |||
859 | idx += len / 4; | ||
860 | } | ||
861 | |||
862 | if (allocated & ~created) { | ||
863 | DRM_ERROR("New session without create command!\n"); | ||
864 | r = -ENOENT; | ||
865 | } | ||
866 | |||
867 | out: | ||
868 | if (!r) { | ||
869 | /* No error, free all destroyed handle slots */ | ||
870 | tmp = destroyed; | ||
871 | amdgpu_ib_free(p->adev, ib, NULL); | ||
872 | } else { | ||
873 | /* Error during parsing, free all allocated handle slots */ | ||
874 | tmp = allocated; | ||
875 | } | ||
876 | |||
877 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
878 | if (tmp & (1 << i)) | ||
879 | atomic_set(&p->adev->vce.handles[i], 0); | ||
880 | |||
881 | return r; | ||
882 | } | ||
883 | |||
884 | /** | ||
791 | * amdgpu_vce_ring_emit_ib - execute indirect buffer | 885 | * amdgpu_vce_ring_emit_ib - execute indirect buffer |
792 | * | 886 | * |
793 | * @ring: engine to use | 887 | * @ring: engine to use |
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |||
823 | amdgpu_ring_write(ring, VCE_CMD_END); | 917 | amdgpu_ring_write(ring, VCE_CMD_END); |
824 | } | 918 | } |
825 | 919 | ||
826 | unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
827 | { | ||
828 | return | ||
829 | 4; /* amdgpu_vce_ring_emit_ib */ | ||
830 | } | ||
831 | |||
832 | unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
833 | { | ||
834 | return | ||
835 | 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
836 | } | ||
837 | |||
838 | /** | 920 | /** |
839 | * amdgpu_vce_ring_test_ring - test if VCE ring is working | 921 | * amdgpu_vce_ring_test_ring - test if VCE ring is working |
840 | * | 922 | * |
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
883 | */ | 965 | */ |
884 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | 966 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
885 | { | 967 | { |
886 | struct fence *fence = NULL; | 968 | struct dma_fence *fence = NULL; |
887 | long r; | 969 | long r; |
888 | 970 | ||
889 | /* skip vce ring1/2 ib test for now, since it's not reliable */ | 971 | /* skip vce ring1/2 ib test for now, since it's not reliable */ |
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
902 | goto error; | 984 | goto error; |
903 | } | 985 | } |
904 | 986 | ||
905 | r = fence_wait_timeout(fence, false, timeout); | 987 | r = dma_fence_wait_timeout(fence, false, timeout); |
906 | if (r == 0) { | 988 | if (r == 0) { |
907 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 989 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
908 | r = -ETIMEDOUT; | 990 | r = -ETIMEDOUT; |
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
913 | r = 0; | 995 | r = 0; |
914 | } | 996 | } |
915 | error: | 997 | error: |
916 | fence_put(fence); | 998 | dma_fence_put(fence); |
917 | return r; | 999 | return r; |
918 | } | 1000 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 12729d2852df..d98041f7508d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | |||
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); | |||
29 | int amdgpu_vce_suspend(struct amdgpu_device *adev); | 29 | int amdgpu_vce_suspend(struct amdgpu_device *adev); |
30 | int amdgpu_vce_resume(struct amdgpu_device *adev); | 30 | int amdgpu_vce_resume(struct amdgpu_device *adev); |
31 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | 31 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
32 | struct fence **fence); | 32 | struct dma_fence **fence); |
33 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 33 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
34 | bool direct, struct fence **fence); | 34 | bool direct, struct dma_fence **fence); |
35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); | 35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); |
36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
37 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
37 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, | 38 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, |
38 | unsigned vm_id, bool ctx_switch); | 39 | unsigned vm_id, bool ctx_switch); |
39 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | 40 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06f24322e7c3..e480263387e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Alex Deucher | 25 | * Alex Deucher |
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include <linux/fence-array.h> | 28 | #include <linux/dma-fence-array.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/amdgpu_drm.h> | 30 | #include <drm/amdgpu_drm.h> |
31 | #include "amdgpu.h" | 31 | #include "amdgpu.h" |
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | |||
116 | } | 116 | } |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list | 119 | * amdgpu_vm_validate_pt_bos - validate the page table BOs |
120 | * | 120 | * |
121 | * @adev: amdgpu device pointer | 121 | * @adev: amdgpu device pointer |
122 | * @vm: vm providing the BOs | 122 | * @vm: vm providing the BOs |
123 | * @duplicates: head of duplicates list | 123 | * @validate: callback to do the validation |
124 | * @param: parameter for the validation callback | ||
124 | * | 125 | * |
125 | * Add the page directory to the BO duplicates list | 126 | * Validate the page table BOs on command submission if neccessary. |
126 | * for command submission. | ||
127 | */ | 127 | */ |
128 | void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | 128 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
129 | struct list_head *duplicates) | 129 | int (*validate)(void *p, struct amdgpu_bo *bo), |
130 | void *param) | ||
130 | { | 131 | { |
131 | uint64_t num_evictions; | 132 | uint64_t num_evictions; |
132 | unsigned i; | 133 | unsigned i; |
134 | int r; | ||
133 | 135 | ||
134 | /* We only need to validate the page tables | 136 | /* We only need to validate the page tables |
135 | * if they aren't already valid. | 137 | * if they aren't already valid. |
136 | */ | 138 | */ |
137 | num_evictions = atomic64_read(&adev->num_evictions); | 139 | num_evictions = atomic64_read(&adev->num_evictions); |
138 | if (num_evictions == vm->last_eviction_counter) | 140 | if (num_evictions == vm->last_eviction_counter) |
139 | return; | 141 | return 0; |
140 | 142 | ||
141 | /* add the vm page table to the list */ | 143 | /* add the vm page table to the list */ |
142 | for (i = 0; i <= vm->max_pde_used; ++i) { | 144 | for (i = 0; i <= vm->max_pde_used; ++i) { |
143 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | 145 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
144 | 146 | ||
145 | if (!entry->robj) | 147 | if (!bo) |
146 | continue; | 148 | continue; |
147 | 149 | ||
148 | list_add(&entry->tv.head, duplicates); | 150 | r = validate(param, bo); |
151 | if (r) | ||
152 | return r; | ||
149 | } | 153 | } |
150 | 154 | ||
155 | return 0; | ||
151 | } | 156 | } |
152 | 157 | ||
153 | /** | 158 | /** |
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |||
166 | 171 | ||
167 | spin_lock(&glob->lru_lock); | 172 | spin_lock(&glob->lru_lock); |
168 | for (i = 0; i <= vm->max_pde_used; ++i) { | 173 | for (i = 0; i <= vm->max_pde_used; ++i) { |
169 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | 174 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
170 | 175 | ||
171 | if (!entry->robj) | 176 | if (!bo) |
172 | continue; | 177 | continue; |
173 | 178 | ||
174 | ttm_bo_move_to_lru_tail(&entry->robj->tbo); | 179 | ttm_bo_move_to_lru_tail(&bo->tbo); |
175 | } | 180 | } |
176 | spin_unlock(&glob->lru_lock); | 181 | spin_unlock(&glob->lru_lock); |
177 | } | 182 | } |
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, | |||
194 | * Allocate an id for the vm, adding fences to the sync obj as necessary. | 199 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
195 | */ | 200 | */ |
196 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 201 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
197 | struct amdgpu_sync *sync, struct fence *fence, | 202 | struct amdgpu_sync *sync, struct dma_fence *fence, |
198 | struct amdgpu_job *job) | 203 | struct amdgpu_job *job) |
199 | { | 204 | { |
200 | struct amdgpu_device *adev = ring->adev; | 205 | struct amdgpu_device *adev = ring->adev; |
201 | uint64_t fence_context = adev->fence_context + ring->idx; | 206 | uint64_t fence_context = adev->fence_context + ring->idx; |
202 | struct fence *updates = sync->last_vm_update; | 207 | struct dma_fence *updates = sync->last_vm_update; |
203 | struct amdgpu_vm_id *id, *idle; | 208 | struct amdgpu_vm_id *id, *idle; |
204 | struct fence **fences; | 209 | struct dma_fence **fences; |
205 | unsigned i; | 210 | unsigned i; |
206 | int r = 0; | 211 | int r = 0; |
207 | 212 | ||
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
225 | if (&idle->list == &adev->vm_manager.ids_lru) { | 230 | if (&idle->list == &adev->vm_manager.ids_lru) { |
226 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; | 231 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
227 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | 232 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; |
228 | struct fence_array *array; | 233 | struct dma_fence_array *array; |
229 | unsigned j; | 234 | unsigned j; |
230 | 235 | ||
231 | for (j = 0; j < i; ++j) | 236 | for (j = 0; j < i; ++j) |
232 | fence_get(fences[j]); | 237 | dma_fence_get(fences[j]); |
233 | 238 | ||
234 | array = fence_array_create(i, fences, fence_context, | 239 | array = dma_fence_array_create(i, fences, fence_context, |
235 | seqno, true); | 240 | seqno, true); |
236 | if (!array) { | 241 | if (!array) { |
237 | for (j = 0; j < i; ++j) | 242 | for (j = 0; j < i; ++j) |
238 | fence_put(fences[j]); | 243 | dma_fence_put(fences[j]); |
239 | kfree(fences); | 244 | kfree(fences); |
240 | r = -ENOMEM; | 245 | r = -ENOMEM; |
241 | goto error; | 246 | goto error; |
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
243 | 248 | ||
244 | 249 | ||
245 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); | 250 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); |
246 | fence_put(&array->base); | 251 | dma_fence_put(&array->base); |
247 | if (r) | 252 | if (r) |
248 | goto error; | 253 | goto error; |
249 | 254 | ||
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
257 | /* Check if we can use a VMID already assigned to this VM */ | 262 | /* Check if we can use a VMID already assigned to this VM */ |
258 | i = ring->idx; | 263 | i = ring->idx; |
259 | do { | 264 | do { |
260 | struct fence *flushed; | 265 | struct dma_fence *flushed; |
261 | 266 | ||
262 | id = vm->ids[i++]; | 267 | id = vm->ids[i++]; |
263 | if (i == AMDGPU_MAX_RINGS) | 268 | if (i == AMDGPU_MAX_RINGS) |
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
279 | continue; | 284 | continue; |
280 | 285 | ||
281 | if (id->last_flush->context != fence_context && | 286 | if (id->last_flush->context != fence_context && |
282 | !fence_is_signaled(id->last_flush)) | 287 | !dma_fence_is_signaled(id->last_flush)) |
283 | continue; | 288 | continue; |
284 | 289 | ||
285 | flushed = id->flushed_updates; | 290 | flushed = id->flushed_updates; |
286 | if (updates && | 291 | if (updates && |
287 | (!flushed || fence_is_later(updates, flushed))) | 292 | (!flushed || dma_fence_is_later(updates, flushed))) |
288 | continue; | 293 | continue; |
289 | 294 | ||
290 | /* Good we can use this VMID. Remember this submission as | 295 | /* Good we can use this VMID. Remember this submission as |
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
315 | if (r) | 320 | if (r) |
316 | goto error; | 321 | goto error; |
317 | 322 | ||
318 | fence_put(id->first); | 323 | dma_fence_put(id->first); |
319 | id->first = fence_get(fence); | 324 | id->first = dma_fence_get(fence); |
320 | 325 | ||
321 | fence_put(id->last_flush); | 326 | dma_fence_put(id->last_flush); |
322 | id->last_flush = NULL; | 327 | id->last_flush = NULL; |
323 | 328 | ||
324 | fence_put(id->flushed_updates); | 329 | dma_fence_put(id->flushed_updates); |
325 | id->flushed_updates = fence_get(updates); | 330 | id->flushed_updates = dma_fence_get(updates); |
326 | 331 | ||
327 | id->pd_gpu_addr = job->vm_pd_addr; | 332 | id->pd_gpu_addr = job->vm_pd_addr; |
328 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); | 333 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); |
@@ -341,9 +346,9 @@ error: | |||
341 | static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) | 346 | static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) |
342 | { | 347 | { |
343 | struct amdgpu_device *adev = ring->adev; | 348 | struct amdgpu_device *adev = ring->adev; |
344 | const struct amdgpu_ip_block_version *ip_block; | 349 | const struct amdgpu_ip_block *ip_block; |
345 | 350 | ||
346 | if (ring->type != AMDGPU_RING_TYPE_COMPUTE) | 351 | if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) |
347 | /* only compute rings */ | 352 | /* only compute rings */ |
348 | return false; | 353 | return false; |
349 | 354 | ||
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) | |||
351 | if (!ip_block) | 356 | if (!ip_block) |
352 | return false; | 357 | return false; |
353 | 358 | ||
354 | if (ip_block->major <= 7) { | 359 | if (ip_block->version->major <= 7) { |
355 | /* gfx7 has no workaround */ | 360 | /* gfx7 has no workaround */ |
356 | return true; | 361 | return true; |
357 | } else if (ip_block->major == 8) { | 362 | } else if (ip_block->version->major == 8) { |
358 | if (adev->gfx.mec_fw_version >= 673) | 363 | if (adev->gfx.mec_fw_version >= 673) |
359 | /* gfx8 is fixed in MEC firmware 673 */ | 364 | /* gfx8 is fixed in MEC firmware 673 */ |
360 | return false; | 365 | return false; |
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
393 | 398 | ||
394 | if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || | 399 | if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || |
395 | amdgpu_vm_is_gpu_reset(adev, id))) { | 400 | amdgpu_vm_is_gpu_reset(adev, id))) { |
396 | struct fence *fence; | 401 | struct dma_fence *fence; |
397 | 402 | ||
398 | trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); | 403 | trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); |
399 | amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); | 404 | amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); |
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
403 | return r; | 408 | return r; |
404 | 409 | ||
405 | mutex_lock(&adev->vm_manager.lock); | 410 | mutex_lock(&adev->vm_manager.lock); |
406 | fence_put(id->last_flush); | 411 | dma_fence_put(id->last_flush); |
407 | id->last_flush = fence; | 412 | id->last_flush = fence; |
408 | mutex_unlock(&adev->vm_manager.lock); | 413 | mutex_unlock(&adev->vm_manager.lock); |
409 | } | 414 | } |
@@ -537,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
537 | struct amdgpu_bo *bo) | 542 | struct amdgpu_bo *bo) |
538 | { | 543 | { |
539 | struct amdgpu_ring *ring; | 544 | struct amdgpu_ring *ring; |
540 | struct fence *fence = NULL; | 545 | struct dma_fence *fence = NULL; |
541 | struct amdgpu_job *job; | 546 | struct amdgpu_job *job; |
542 | struct amdgpu_pte_update_params params; | 547 | struct amdgpu_pte_update_params params; |
543 | unsigned entries; | 548 | unsigned entries; |
@@ -578,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
578 | goto error_free; | 583 | goto error_free; |
579 | 584 | ||
580 | amdgpu_bo_fence(bo, fence, true); | 585 | amdgpu_bo_fence(bo, fence, true); |
581 | fence_put(fence); | 586 | dma_fence_put(fence); |
582 | return 0; | 587 | return 0; |
583 | 588 | ||
584 | error_free: | 589 | error_free: |
@@ -612,32 +617,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) | |||
612 | return result; | 617 | return result; |
613 | } | 618 | } |
614 | 619 | ||
615 | static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | 620 | /* |
616 | struct amdgpu_vm *vm, | 621 | * amdgpu_vm_update_pdes - make sure that page directory is valid |
617 | bool shadow) | 622 | * |
623 | * @adev: amdgpu_device pointer | ||
624 | * @vm: requested vm | ||
625 | * @start: start of GPU address range | ||
626 | * @end: end of GPU address range | ||
627 | * | ||
628 | * Allocates new page tables if necessary | ||
629 | * and updates the page directory. | ||
630 | * Returns 0 for success, error for failure. | ||
631 | */ | ||
632 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
633 | struct amdgpu_vm *vm) | ||
618 | { | 634 | { |
635 | struct amdgpu_bo *shadow; | ||
619 | struct amdgpu_ring *ring; | 636 | struct amdgpu_ring *ring; |
620 | struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : | 637 | uint64_t pd_addr, shadow_addr; |
621 | vm->page_directory; | ||
622 | uint64_t pd_addr; | ||
623 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | 638 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; |
624 | uint64_t last_pde = ~0, last_pt = ~0; | 639 | uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; |
625 | unsigned count = 0, pt_idx, ndw; | 640 | unsigned count = 0, pt_idx, ndw; |
626 | struct amdgpu_job *job; | 641 | struct amdgpu_job *job; |
627 | struct amdgpu_pte_update_params params; | 642 | struct amdgpu_pte_update_params params; |
628 | struct fence *fence = NULL; | 643 | struct dma_fence *fence = NULL; |
629 | 644 | ||
630 | int r; | 645 | int r; |
631 | 646 | ||
632 | if (!pd) | ||
633 | return 0; | ||
634 | |||
635 | r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem); | ||
636 | if (r) | ||
637 | return r; | ||
638 | |||
639 | pd_addr = amdgpu_bo_gpu_offset(pd); | ||
640 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 647 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
648 | shadow = vm->page_directory->shadow; | ||
641 | 649 | ||
642 | /* padding, etc. */ | 650 | /* padding, etc. */ |
643 | ndw = 64; | 651 | ndw = 64; |
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
645 | /* assume the worst case */ | 653 | /* assume the worst case */ |
646 | ndw += vm->max_pde_used * 6; | 654 | ndw += vm->max_pde_used * 6; |
647 | 655 | ||
656 | pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | ||
657 | if (shadow) { | ||
658 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | ||
659 | if (r) | ||
660 | return r; | ||
661 | shadow_addr = amdgpu_bo_gpu_offset(shadow); | ||
662 | ndw *= 2; | ||
663 | } else { | ||
664 | shadow_addr = 0; | ||
665 | } | ||
666 | |||
648 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); | 667 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
649 | if (r) | 668 | if (r) |
650 | return r; | 669 | return r; |
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
655 | 674 | ||
656 | /* walk over the address space and update the page directory */ | 675 | /* walk over the address space and update the page directory */ |
657 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 676 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
658 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; | 677 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; |
659 | uint64_t pde, pt; | 678 | uint64_t pde, pt; |
660 | 679 | ||
661 | if (bo == NULL) | 680 | if (bo == NULL) |
662 | continue; | 681 | continue; |
663 | 682 | ||
664 | if (bo->shadow) { | 683 | if (bo->shadow) { |
665 | struct amdgpu_bo *shadow = bo->shadow; | 684 | struct amdgpu_bo *pt_shadow = bo->shadow; |
666 | 685 | ||
667 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | 686 | r = amdgpu_ttm_bind(&pt_shadow->tbo, |
687 | &pt_shadow->tbo.mem); | ||
668 | if (r) | 688 | if (r) |
669 | return r; | 689 | return r; |
670 | } | 690 | } |
671 | 691 | ||
672 | pt = amdgpu_bo_gpu_offset(bo); | 692 | pt = amdgpu_bo_gpu_offset(bo); |
673 | if (!shadow) { | 693 | if (vm->page_tables[pt_idx].addr == pt) |
674 | if (vm->page_tables[pt_idx].addr == pt) | 694 | continue; |
675 | continue; | 695 | |
676 | vm->page_tables[pt_idx].addr = pt; | 696 | vm->page_tables[pt_idx].addr = pt; |
677 | } else { | ||
678 | if (vm->page_tables[pt_idx].shadow_addr == pt) | ||
679 | continue; | ||
680 | vm->page_tables[pt_idx].shadow_addr = pt; | ||
681 | } | ||
682 | 697 | ||
683 | pde = pd_addr + pt_idx * 8; | 698 | pde = pd_addr + pt_idx * 8; |
684 | if (((last_pde + 8 * count) != pde) || | 699 | if (((last_pde + 8 * count) != pde) || |
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
686 | (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { | 701 | (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { |
687 | 702 | ||
688 | if (count) { | 703 | if (count) { |
704 | if (shadow) | ||
705 | amdgpu_vm_do_set_ptes(¶ms, | ||
706 | last_shadow, | ||
707 | last_pt, count, | ||
708 | incr, | ||
709 | AMDGPU_PTE_VALID); | ||
710 | |||
689 | amdgpu_vm_do_set_ptes(¶ms, last_pde, | 711 | amdgpu_vm_do_set_ptes(¶ms, last_pde, |
690 | last_pt, count, incr, | 712 | last_pt, count, incr, |
691 | AMDGPU_PTE_VALID); | 713 | AMDGPU_PTE_VALID); |
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
693 | 715 | ||
694 | count = 1; | 716 | count = 1; |
695 | last_pde = pde; | 717 | last_pde = pde; |
718 | last_shadow = shadow_addr + pt_idx * 8; | ||
696 | last_pt = pt; | 719 | last_pt = pt; |
697 | } else { | 720 | } else { |
698 | ++count; | 721 | ++count; |
699 | } | 722 | } |
700 | } | 723 | } |
701 | 724 | ||
702 | if (count) | 725 | if (count) { |
726 | if (vm->page_directory->shadow) | ||
727 | amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, | ||
728 | count, incr, AMDGPU_PTE_VALID); | ||
729 | |||
703 | amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, | 730 | amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, |
704 | count, incr, AMDGPU_PTE_VALID); | 731 | count, incr, AMDGPU_PTE_VALID); |
732 | } | ||
705 | 733 | ||
706 | if (params.ib->length_dw != 0) { | 734 | if (params.ib->length_dw == 0) { |
707 | amdgpu_ring_pad_ib(ring, params.ib); | 735 | amdgpu_job_free(job); |
708 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, | 736 | return 0; |
737 | } | ||
738 | |||
739 | amdgpu_ring_pad_ib(ring, params.ib); | ||
740 | amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | ||
741 | AMDGPU_FENCE_OWNER_VM); | ||
742 | if (shadow) | ||
743 | amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, | ||
709 | AMDGPU_FENCE_OWNER_VM); | 744 | AMDGPU_FENCE_OWNER_VM); |
710 | WARN_ON(params.ib->length_dw > ndw); | ||
711 | r = amdgpu_job_submit(job, ring, &vm->entity, | ||
712 | AMDGPU_FENCE_OWNER_VM, &fence); | ||
713 | if (r) | ||
714 | goto error_free; | ||
715 | 745 | ||
716 | amdgpu_bo_fence(pd, fence, true); | 746 | WARN_ON(params.ib->length_dw > ndw); |
717 | fence_put(vm->page_directory_fence); | 747 | r = amdgpu_job_submit(job, ring, &vm->entity, |
718 | vm->page_directory_fence = fence_get(fence); | 748 | AMDGPU_FENCE_OWNER_VM, &fence); |
719 | fence_put(fence); | 749 | if (r) |
750 | goto error_free; | ||
720 | 751 | ||
721 | } else { | 752 | amdgpu_bo_fence(vm->page_directory, fence, true); |
722 | amdgpu_job_free(job); | 753 | dma_fence_put(vm->page_directory_fence); |
723 | } | 754 | vm->page_directory_fence = dma_fence_get(fence); |
755 | dma_fence_put(fence); | ||
724 | 756 | ||
725 | return 0; | 757 | return 0; |
726 | 758 | ||
@@ -729,29 +761,6 @@ error_free: | |||
729 | return r; | 761 | return r; |
730 | } | 762 | } |
731 | 763 | ||
732 | /* | ||
733 | * amdgpu_vm_update_pdes - make sure that page directory is valid | ||
734 | * | ||
735 | * @adev: amdgpu_device pointer | ||
736 | * @vm: requested vm | ||
737 | * @start: start of GPU address range | ||
738 | * @end: end of GPU address range | ||
739 | * | ||
740 | * Allocates new page tables if necessary | ||
741 | * and updates the page directory. | ||
742 | * Returns 0 for success, error for failure. | ||
743 | */ | ||
744 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
745 | struct amdgpu_vm *vm) | ||
746 | { | ||
747 | int r; | ||
748 | |||
749 | r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); | ||
750 | if (r) | ||
751 | return r; | ||
752 | return amdgpu_vm_update_pd_or_shadow(adev, vm, false); | ||
753 | } | ||
754 | |||
755 | /** | 764 | /** |
756 | * amdgpu_vm_update_ptes - make sure that page tables are valid | 765 | * amdgpu_vm_update_ptes - make sure that page tables are valid |
757 | * | 766 | * |
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
781 | /* initialize the variables */ | 790 | /* initialize the variables */ |
782 | addr = start; | 791 | addr = start; |
783 | pt_idx = addr >> amdgpu_vm_block_size; | 792 | pt_idx = addr >> amdgpu_vm_block_size; |
784 | pt = vm->page_tables[pt_idx].entry.robj; | 793 | pt = vm->page_tables[pt_idx].bo; |
785 | if (params->shadow) { | 794 | if (params->shadow) { |
786 | if (!pt->shadow) | 795 | if (!pt->shadow) |
787 | return; | 796 | return; |
788 | pt = vm->page_tables[pt_idx].entry.robj->shadow; | 797 | pt = pt->shadow; |
789 | } | 798 | } |
790 | if ((addr & ~mask) == (end & ~mask)) | 799 | if ((addr & ~mask) == (end & ~mask)) |
791 | nptes = end - addr; | 800 | nptes = end - addr; |
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
804 | /* walk over the address space and update the page tables */ | 813 | /* walk over the address space and update the page tables */ |
805 | while (addr < end) { | 814 | while (addr < end) { |
806 | pt_idx = addr >> amdgpu_vm_block_size; | 815 | pt_idx = addr >> amdgpu_vm_block_size; |
807 | pt = vm->page_tables[pt_idx].entry.robj; | 816 | pt = vm->page_tables[pt_idx].bo; |
808 | if (params->shadow) { | 817 | if (params->shadow) { |
809 | if (!pt->shadow) | 818 | if (!pt->shadow) |
810 | return; | 819 | return; |
811 | pt = vm->page_tables[pt_idx].entry.robj->shadow; | 820 | pt = pt->shadow; |
812 | } | 821 | } |
813 | 822 | ||
814 | if ((addr & ~mask) == (end & ~mask)) | 823 | if ((addr & ~mask) == (end & ~mask)) |
@@ -929,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, | |||
929 | * Returns 0 for success, -EINVAL for failure. | 938 | * Returns 0 for success, -EINVAL for failure. |
930 | */ | 939 | */ |
931 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | 940 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, |
932 | struct fence *exclusive, | 941 | struct dma_fence *exclusive, |
933 | uint64_t src, | 942 | uint64_t src, |
934 | dma_addr_t *pages_addr, | 943 | dma_addr_t *pages_addr, |
935 | struct amdgpu_vm *vm, | 944 | struct amdgpu_vm *vm, |
936 | uint64_t start, uint64_t last, | 945 | uint64_t start, uint64_t last, |
937 | uint32_t flags, uint64_t addr, | 946 | uint32_t flags, uint64_t addr, |
938 | struct fence **fence) | 947 | struct dma_fence **fence) |
939 | { | 948 | { |
940 | struct amdgpu_ring *ring; | 949 | struct amdgpu_ring *ring; |
941 | void *owner = AMDGPU_FENCE_OWNER_VM; | 950 | void *owner = AMDGPU_FENCE_OWNER_VM; |
942 | unsigned nptes, ncmds, ndw; | 951 | unsigned nptes, ncmds, ndw; |
943 | struct amdgpu_job *job; | 952 | struct amdgpu_job *job; |
944 | struct amdgpu_pte_update_params params; | 953 | struct amdgpu_pte_update_params params; |
945 | struct fence *f = NULL; | 954 | struct dma_fence *f = NULL; |
946 | int r; | 955 | int r; |
947 | 956 | ||
948 | memset(¶ms, 0, sizeof(params)); | 957 | memset(¶ms, 0, sizeof(params)); |
@@ -1045,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1045 | 1054 | ||
1046 | amdgpu_bo_fence(vm->page_directory, f, true); | 1055 | amdgpu_bo_fence(vm->page_directory, f, true); |
1047 | if (fence) { | 1056 | if (fence) { |
1048 | fence_put(*fence); | 1057 | dma_fence_put(*fence); |
1049 | *fence = fence_get(f); | 1058 | *fence = dma_fence_get(f); |
1050 | } | 1059 | } |
1051 | fence_put(f); | 1060 | dma_fence_put(f); |
1052 | return 0; | 1061 | return 0; |
1053 | 1062 | ||
1054 | error_free: | 1063 | error_free: |
@@ -1065,8 +1074,8 @@ error_free: | |||
1065 | * @pages_addr: DMA addresses to use for mapping | 1074 | * @pages_addr: DMA addresses to use for mapping |
1066 | * @vm: requested vm | 1075 | * @vm: requested vm |
1067 | * @mapping: mapped range and flags to use for the update | 1076 | * @mapping: mapped range and flags to use for the update |
1068 | * @addr: addr to set the area to | ||
1069 | * @flags: HW flags for the mapping | 1077 | * @flags: HW flags for the mapping |
1078 | * @nodes: array of drm_mm_nodes with the MC addresses | ||
1070 | * @fence: optional resulting fence | 1079 | * @fence: optional resulting fence |
1071 | * | 1080 | * |
1072 | * Split the mapping into smaller chunks so that each update fits | 1081 | * Split the mapping into smaller chunks so that each update fits |
@@ -1074,17 +1083,16 @@ error_free: | |||
1074 | * Returns 0 for success, -EINVAL for failure. | 1083 | * Returns 0 for success, -EINVAL for failure. |
1075 | */ | 1084 | */ |
1076 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | 1085 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, |
1077 | struct fence *exclusive, | 1086 | struct dma_fence *exclusive, |
1078 | uint32_t gtt_flags, | 1087 | uint32_t gtt_flags, |
1079 | dma_addr_t *pages_addr, | 1088 | dma_addr_t *pages_addr, |
1080 | struct amdgpu_vm *vm, | 1089 | struct amdgpu_vm *vm, |
1081 | struct amdgpu_bo_va_mapping *mapping, | 1090 | struct amdgpu_bo_va_mapping *mapping, |
1082 | uint32_t flags, uint64_t addr, | 1091 | uint32_t flags, |
1083 | struct fence **fence) | 1092 | struct drm_mm_node *nodes, |
1093 | struct dma_fence **fence) | ||
1084 | { | 1094 | { |
1085 | const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; | 1095 | uint64_t pfn, src = 0, start = mapping->it.start; |
1086 | |||
1087 | uint64_t src = 0, start = mapping->it.start; | ||
1088 | int r; | 1096 | int r; |
1089 | 1097 | ||
1090 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | 1098 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here |
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
1097 | 1105 | ||
1098 | trace_amdgpu_vm_bo_update(mapping); | 1106 | trace_amdgpu_vm_bo_update(mapping); |
1099 | 1107 | ||
1100 | if (pages_addr) { | 1108 | pfn = mapping->offset >> PAGE_SHIFT; |
1101 | if (flags == gtt_flags) | 1109 | if (nodes) { |
1102 | src = adev->gart.table_addr + (addr >> 12) * 8; | 1110 | while (pfn >= nodes->size) { |
1103 | addr = 0; | 1111 | pfn -= nodes->size; |
1112 | ++nodes; | ||
1113 | } | ||
1104 | } | 1114 | } |
1105 | addr += mapping->offset; | ||
1106 | 1115 | ||
1107 | if (!pages_addr || src) | 1116 | do { |
1108 | return amdgpu_vm_bo_update_mapping(adev, exclusive, | 1117 | uint64_t max_entries; |
1109 | src, pages_addr, vm, | 1118 | uint64_t addr, last; |
1110 | start, mapping->it.last, | 1119 | |
1111 | flags, addr, fence); | 1120 | if (nodes) { |
1121 | addr = nodes->start << PAGE_SHIFT; | ||
1122 | max_entries = (nodes->size - pfn) * | ||
1123 | (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | ||
1124 | } else { | ||
1125 | addr = 0; | ||
1126 | max_entries = S64_MAX; | ||
1127 | } | ||
1112 | 1128 | ||
1113 | while (start != mapping->it.last + 1) { | 1129 | if (pages_addr) { |
1114 | uint64_t last; | 1130 | if (flags == gtt_flags) |
1131 | src = adev->gart.table_addr + | ||
1132 | (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; | ||
1133 | else | ||
1134 | max_entries = min(max_entries, 16ull * 1024ull); | ||
1135 | addr = 0; | ||
1136 | } else if (flags & AMDGPU_PTE_VALID) { | ||
1137 | addr += adev->vm_manager.vram_base_offset; | ||
1138 | } | ||
1139 | addr += pfn << PAGE_SHIFT; | ||
1115 | 1140 | ||
1116 | last = min((uint64_t)mapping->it.last, start + max_size - 1); | 1141 | last = min((uint64_t)mapping->it.last, start + max_entries - 1); |
1117 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, | 1142 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, |
1118 | src, pages_addr, vm, | 1143 | src, pages_addr, vm, |
1119 | start, last, flags, addr, | 1144 | start, last, flags, addr, |
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
1121 | if (r) | 1146 | if (r) |
1122 | return r; | 1147 | return r; |
1123 | 1148 | ||
1149 | pfn += last - start + 1; | ||
1150 | if (nodes && nodes->size == pfn) { | ||
1151 | pfn = 0; | ||
1152 | ++nodes; | ||
1153 | } | ||
1124 | start = last + 1; | 1154 | start = last + 1; |
1125 | addr += max_size * AMDGPU_GPU_PAGE_SIZE; | 1155 | |
1126 | } | 1156 | } while (unlikely(start != mapping->it.last + 1)); |
1127 | 1157 | ||
1128 | return 0; | 1158 | return 0; |
1129 | } | 1159 | } |
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1147 | dma_addr_t *pages_addr = NULL; | 1177 | dma_addr_t *pages_addr = NULL; |
1148 | uint32_t gtt_flags, flags; | 1178 | uint32_t gtt_flags, flags; |
1149 | struct ttm_mem_reg *mem; | 1179 | struct ttm_mem_reg *mem; |
1150 | struct fence *exclusive; | 1180 | struct drm_mm_node *nodes; |
1151 | uint64_t addr; | 1181 | struct dma_fence *exclusive; |
1152 | int r; | 1182 | int r; |
1153 | 1183 | ||
1154 | if (clear) { | 1184 | if (clear) { |
1155 | mem = NULL; | 1185 | mem = NULL; |
1156 | addr = 0; | 1186 | nodes = NULL; |
1157 | exclusive = NULL; | 1187 | exclusive = NULL; |
1158 | } else { | 1188 | } else { |
1159 | struct ttm_dma_tt *ttm; | 1189 | struct ttm_dma_tt *ttm; |
1160 | 1190 | ||
1161 | mem = &bo_va->bo->tbo.mem; | 1191 | mem = &bo_va->bo->tbo.mem; |
1162 | addr = (u64)mem->start << PAGE_SHIFT; | 1192 | nodes = mem->mm_node; |
1163 | switch (mem->mem_type) { | 1193 | if (mem->mem_type == TTM_PL_TT) { |
1164 | case TTM_PL_TT: | ||
1165 | ttm = container_of(bo_va->bo->tbo.ttm, struct | 1194 | ttm = container_of(bo_va->bo->tbo.ttm, struct |
1166 | ttm_dma_tt, ttm); | 1195 | ttm_dma_tt, ttm); |
1167 | pages_addr = ttm->dma_address; | 1196 | pages_addr = ttm->dma_address; |
1168 | break; | ||
1169 | |||
1170 | case TTM_PL_VRAM: | ||
1171 | addr += adev->vm_manager.vram_base_offset; | ||
1172 | break; | ||
1173 | |||
1174 | default: | ||
1175 | break; | ||
1176 | } | 1197 | } |
1177 | |||
1178 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); | 1198 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); |
1179 | } | 1199 | } |
1180 | 1200 | ||
1181 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | 1201 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
1182 | gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && | 1202 | gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && |
1183 | adev == bo_va->bo->adev) ? flags : 0; | 1203 | adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0; |
1184 | 1204 | ||
1185 | spin_lock(&vm->status_lock); | 1205 | spin_lock(&vm->status_lock); |
1186 | if (!list_empty(&bo_va->vm_status)) | 1206 | if (!list_empty(&bo_va->vm_status)) |
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1190 | list_for_each_entry(mapping, &bo_va->invalids, list) { | 1210 | list_for_each_entry(mapping, &bo_va->invalids, list) { |
1191 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, | 1211 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, |
1192 | gtt_flags, pages_addr, vm, | 1212 | gtt_flags, pages_addr, vm, |
1193 | mapping, flags, addr, | 1213 | mapping, flags, nodes, |
1194 | &bo_va->last_pt_update); | 1214 | &bo_va->last_pt_update); |
1195 | if (r) | 1215 | if (r) |
1196 | return r; | 1216 | return r; |
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1405 | /* walk over the address space and allocate the page tables */ | 1425 | /* walk over the address space and allocate the page tables */ |
1406 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1426 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
1407 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1427 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
1408 | struct amdgpu_bo_list_entry *entry; | ||
1409 | struct amdgpu_bo *pt; | 1428 | struct amdgpu_bo *pt; |
1410 | 1429 | ||
1411 | entry = &vm->page_tables[pt_idx].entry; | 1430 | if (vm->page_tables[pt_idx].bo) |
1412 | if (entry->robj) | ||
1413 | continue; | 1431 | continue; |
1414 | 1432 | ||
1415 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1433 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1416 | AMDGPU_GPU_PAGE_SIZE, true, | 1434 | AMDGPU_GPU_PAGE_SIZE, true, |
1417 | AMDGPU_GEM_DOMAIN_VRAM, | 1435 | AMDGPU_GEM_DOMAIN_VRAM, |
1418 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | 1436 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
1419 | AMDGPU_GEM_CREATE_SHADOW, | 1437 | AMDGPU_GEM_CREATE_SHADOW | |
1438 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1420 | NULL, resv, &pt); | 1439 | NULL, resv, &pt); |
1421 | if (r) | 1440 | if (r) |
1422 | goto error_free; | 1441 | goto error_free; |
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1442 | } | 1461 | } |
1443 | } | 1462 | } |
1444 | 1463 | ||
1445 | entry->robj = pt; | 1464 | vm->page_tables[pt_idx].bo = pt; |
1446 | entry->priority = 0; | ||
1447 | entry->tv.bo = &entry->robj->tbo; | ||
1448 | entry->tv.shared = true; | ||
1449 | entry->user_pages = NULL; | ||
1450 | vm->page_tables[pt_idx].addr = 0; | 1465 | vm->page_tables[pt_idx].addr = 0; |
1451 | } | 1466 | } |
1452 | 1467 | ||
@@ -1547,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1547 | kfree(mapping); | 1562 | kfree(mapping); |
1548 | } | 1563 | } |
1549 | 1564 | ||
1550 | fence_put(bo_va->last_pt_update); | 1565 | dma_fence_put(bo_va->last_pt_update); |
1551 | kfree(bo_va); | 1566 | kfree(bo_va); |
1552 | } | 1567 | } |
1553 | 1568 | ||
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1626 | r = amdgpu_bo_create(adev, pd_size, align, true, | 1641 | r = amdgpu_bo_create(adev, pd_size, align, true, |
1627 | AMDGPU_GEM_DOMAIN_VRAM, | 1642 | AMDGPU_GEM_DOMAIN_VRAM, |
1628 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | 1643 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
1629 | AMDGPU_GEM_CREATE_SHADOW, | 1644 | AMDGPU_GEM_CREATE_SHADOW | |
1645 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1630 | NULL, NULL, &vm->page_directory); | 1646 | NULL, NULL, &vm->page_directory); |
1631 | if (r) | 1647 | if (r) |
1632 | goto error_free_sched_entity; | 1648 | goto error_free_sched_entity; |
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1697 | } | 1713 | } |
1698 | 1714 | ||
1699 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { | 1715 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { |
1700 | struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; | 1716 | struct amdgpu_bo *pt = vm->page_tables[i].bo; |
1701 | 1717 | ||
1702 | if (!pt) | 1718 | if (!pt) |
1703 | continue; | 1719 | continue; |
@@ -1709,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1709 | 1725 | ||
1710 | amdgpu_bo_unref(&vm->page_directory->shadow); | 1726 | amdgpu_bo_unref(&vm->page_directory->shadow); |
1711 | amdgpu_bo_unref(&vm->page_directory); | 1727 | amdgpu_bo_unref(&vm->page_directory); |
1712 | fence_put(vm->page_directory_fence); | 1728 | dma_fence_put(vm->page_directory_fence); |
1713 | } | 1729 | } |
1714 | 1730 | ||
1715 | /** | 1731 | /** |
@@ -1733,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |||
1733 | &adev->vm_manager.ids_lru); | 1749 | &adev->vm_manager.ids_lru); |
1734 | } | 1750 | } |
1735 | 1751 | ||
1736 | adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); | 1752 | adev->vm_manager.fence_context = |
1753 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | ||
1737 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 1754 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
1738 | adev->vm_manager.seqno[i] = 0; | 1755 | adev->vm_manager.seqno[i] = 0; |
1739 | 1756 | ||
@@ -1755,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |||
1755 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { | 1772 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { |
1756 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; | 1773 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; |
1757 | 1774 | ||
1758 | fence_put(adev->vm_manager.ids[i].first); | 1775 | dma_fence_put(adev->vm_manager.ids[i].first); |
1759 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); | 1776 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); |
1760 | fence_put(id->flushed_updates); | 1777 | dma_fence_put(id->flushed_updates); |
1761 | } | 1778 | } |
1762 | } | 1779 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h new file mode 100644 index 000000000000..adbc2f5e5c7f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_VM_H__ | ||
25 | #define __AMDGPU_VM_H__ | ||
26 | |||
27 | #include <linux/rbtree.h> | ||
28 | |||
29 | #include "gpu_scheduler.h" | ||
30 | #include "amdgpu_sync.h" | ||
31 | #include "amdgpu_ring.h" | ||
32 | |||
33 | struct amdgpu_bo_va; | ||
34 | struct amdgpu_job; | ||
35 | struct amdgpu_bo_list_entry; | ||
36 | |||
37 | /* | ||
38 | * GPUVM handling | ||
39 | */ | ||
40 | |||
41 | /* maximum number of VMIDs */ | ||
42 | #define AMDGPU_NUM_VM 16 | ||
43 | |||
44 | /* Maximum number of PTEs the hardware can write with one command */ | ||
45 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
46 | |||
47 | /* number of entries in page table */ | ||
48 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
49 | |||
50 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
51 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
52 | |||
53 | /* LOG2 number of continuous pages for the fragment field */ | ||
54 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
55 | |||
56 | #define AMDGPU_PTE_VALID (1 << 0) | ||
57 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
58 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
59 | |||
60 | /* VI only */ | ||
61 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
62 | |||
63 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
64 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
65 | |||
66 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
67 | |||
68 | /* How to programm VM fault handling */ | ||
69 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
70 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
71 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
72 | |||
73 | struct amdgpu_vm_pt { | ||
74 | struct amdgpu_bo *bo; | ||
75 | uint64_t addr; | ||
76 | }; | ||
77 | |||
78 | struct amdgpu_vm { | ||
79 | /* tree of virtual addresses mapped */ | ||
80 | struct rb_root va; | ||
81 | |||
82 | /* protecting invalidated */ | ||
83 | spinlock_t status_lock; | ||
84 | |||
85 | /* BOs moved, but not yet updated in the PT */ | ||
86 | struct list_head invalidated; | ||
87 | |||
88 | /* BOs cleared in the PT because of a move */ | ||
89 | struct list_head cleared; | ||
90 | |||
91 | /* BO mappings freed, but not yet updated in the PT */ | ||
92 | struct list_head freed; | ||
93 | |||
94 | /* contains the page directory */ | ||
95 | struct amdgpu_bo *page_directory; | ||
96 | unsigned max_pde_used; | ||
97 | struct dma_fence *page_directory_fence; | ||
98 | uint64_t last_eviction_counter; | ||
99 | |||
100 | /* array of page tables, one for each page directory entry */ | ||
101 | struct amdgpu_vm_pt *page_tables; | ||
102 | |||
103 | /* for id and flush management per ring */ | ||
104 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
105 | |||
106 | /* protecting freed */ | ||
107 | spinlock_t freed_lock; | ||
108 | |||
109 | /* Scheduler entity for page table updates */ | ||
110 | struct amd_sched_entity entity; | ||
111 | |||
112 | /* client id */ | ||
113 | u64 client_id; | ||
114 | }; | ||
115 | |||
116 | struct amdgpu_vm_id { | ||
117 | struct list_head list; | ||
118 | struct dma_fence *first; | ||
119 | struct amdgpu_sync active; | ||
120 | struct dma_fence *last_flush; | ||
121 | atomic64_t owner; | ||
122 | |||
123 | uint64_t pd_gpu_addr; | ||
124 | /* last flushed PD/PT update */ | ||
125 | struct dma_fence *flushed_updates; | ||
126 | |||
127 | uint32_t current_gpu_reset_count; | ||
128 | |||
129 | uint32_t gds_base; | ||
130 | uint32_t gds_size; | ||
131 | uint32_t gws_base; | ||
132 | uint32_t gws_size; | ||
133 | uint32_t oa_base; | ||
134 | uint32_t oa_size; | ||
135 | }; | ||
136 | |||
137 | struct amdgpu_vm_manager { | ||
138 | /* Handling of VMIDs */ | ||
139 | struct mutex lock; | ||
140 | unsigned num_ids; | ||
141 | struct list_head ids_lru; | ||
142 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
143 | |||
144 | /* Handling of VM fences */ | ||
145 | u64 fence_context; | ||
146 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
147 | |||
148 | uint32_t max_pfn; | ||
149 | /* vram base address for page table entry */ | ||
150 | u64 vram_base_offset; | ||
151 | /* is vm enabled? */ | ||
152 | bool enabled; | ||
153 | /* vm pte handling */ | ||
154 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
155 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
156 | unsigned vm_pte_num_rings; | ||
157 | atomic_t vm_pte_next_ring; | ||
158 | /* client id counter */ | ||
159 | atomic64_t client_counter; | ||
160 | }; | ||
161 | |||
162 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
163 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
164 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
165 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
166 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
167 | struct list_head *validated, | ||
168 | struct amdgpu_bo_list_entry *entry); | ||
169 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
170 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
171 | void *param); | ||
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
173 | struct amdgpu_vm *vm); | ||
174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
175 | struct amdgpu_sync *sync, struct dma_fence *fence, | ||
176 | struct amdgpu_job *job); | ||
177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
179 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
180 | struct amdgpu_vm *vm); | ||
181 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
182 | struct amdgpu_vm *vm); | ||
183 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
184 | struct amdgpu_sync *sync); | ||
185 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
186 | struct amdgpu_bo_va *bo_va, | ||
187 | bool clear); | ||
188 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
189 | struct amdgpu_bo *bo); | ||
190 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
191 | struct amdgpu_bo *bo); | ||
192 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
193 | struct amdgpu_vm *vm, | ||
194 | struct amdgpu_bo *bo); | ||
195 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
196 | struct amdgpu_bo_va *bo_va, | ||
197 | uint64_t addr, uint64_t offset, | ||
198 | uint64_t size, uint32_t flags); | ||
199 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
200 | struct amdgpu_bo_va *bo_va, | ||
201 | uint64_t addr); | ||
202 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
203 | struct amdgpu_bo_va *bo_va); | ||
204 | |||
205 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c new file mode 100644 index 000000000000..180eed7c8bca --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | |||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | |||
28 | struct amdgpu_vram_mgr { | ||
29 | struct drm_mm mm; | ||
30 | spinlock_t lock; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * amdgpu_vram_mgr_init - init VRAM manager and DRM MM | ||
35 | * | ||
36 | * @man: TTM memory type manager | ||
37 | * @p_size: maximum size of VRAM | ||
38 | * | ||
39 | * Allocate and initialize the VRAM manager. | ||
40 | */ | ||
41 | static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, | ||
42 | unsigned long p_size) | ||
43 | { | ||
44 | struct amdgpu_vram_mgr *mgr; | ||
45 | |||
46 | mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); | ||
47 | if (!mgr) | ||
48 | return -ENOMEM; | ||
49 | |||
50 | drm_mm_init(&mgr->mm, 0, p_size); | ||
51 | spin_lock_init(&mgr->lock); | ||
52 | man->priv = mgr; | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * amdgpu_vram_mgr_fini - free and destroy VRAM manager | ||
58 | * | ||
59 | * @man: TTM memory type manager | ||
60 | * | ||
61 | * Destroy and free the VRAM manager, returns -EBUSY if ranges are still | ||
62 | * allocated inside it. | ||
63 | */ | ||
64 | static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) | ||
65 | { | ||
66 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
67 | |||
68 | spin_lock(&mgr->lock); | ||
69 | if (!drm_mm_clean(&mgr->mm)) { | ||
70 | spin_unlock(&mgr->lock); | ||
71 | return -EBUSY; | ||
72 | } | ||
73 | |||
74 | drm_mm_takedown(&mgr->mm); | ||
75 | spin_unlock(&mgr->lock); | ||
76 | kfree(mgr); | ||
77 | man->priv = NULL; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * amdgpu_vram_mgr_new - allocate new ranges | ||
83 | * | ||
84 | * @man: TTM memory type manager | ||
85 | * @tbo: TTM BO we need this range for | ||
86 | * @place: placement flags and restrictions | ||
87 | * @mem: the resulting mem object | ||
88 | * | ||
89 | * Allocate VRAM for the given BO. | ||
90 | */ | ||
91 | static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, | ||
92 | struct ttm_buffer_object *tbo, | ||
93 | const struct ttm_place *place, | ||
94 | struct ttm_mem_reg *mem) | ||
95 | { | ||
96 | struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo); | ||
97 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
98 | struct drm_mm *mm = &mgr->mm; | ||
99 | struct drm_mm_node *nodes; | ||
100 | enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT; | ||
101 | enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; | ||
102 | unsigned long lpfn, num_nodes, pages_per_node, pages_left; | ||
103 | unsigned i; | ||
104 | int r; | ||
105 | |||
106 | lpfn = place->lpfn; | ||
107 | if (!lpfn) | ||
108 | lpfn = man->size; | ||
109 | |||
110 | if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS || | ||
111 | amdgpu_vram_page_split == -1) { | ||
112 | pages_per_node = ~0ul; | ||
113 | num_nodes = 1; | ||
114 | } else { | ||
115 | pages_per_node = max((uint32_t)amdgpu_vram_page_split, | ||
116 | mem->page_alignment); | ||
117 | num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); | ||
118 | } | ||
119 | |||
120 | nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); | ||
121 | if (!nodes) | ||
122 | return -ENOMEM; | ||
123 | |||
124 | if (place->flags & TTM_PL_FLAG_TOPDOWN) { | ||
125 | sflags = DRM_MM_SEARCH_BELOW; | ||
126 | aflags = DRM_MM_CREATE_TOP; | ||
127 | } | ||
128 | |||
129 | pages_left = mem->num_pages; | ||
130 | |||
131 | spin_lock(&mgr->lock); | ||
132 | for (i = 0; i < num_nodes; ++i) { | ||
133 | unsigned long pages = min(pages_left, pages_per_node); | ||
134 | uint32_t alignment = mem->page_alignment; | ||
135 | |||
136 | if (pages == pages_per_node) | ||
137 | alignment = pages_per_node; | ||
138 | else | ||
139 | sflags |= DRM_MM_SEARCH_BEST; | ||
140 | |||
141 | r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages, | ||
142 | alignment, 0, | ||
143 | place->fpfn, lpfn, | ||
144 | sflags, aflags); | ||
145 | if (unlikely(r)) | ||
146 | goto error; | ||
147 | |||
148 | pages_left -= pages; | ||
149 | } | ||
150 | spin_unlock(&mgr->lock); | ||
151 | |||
152 | mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET; | ||
153 | mem->mm_node = nodes; | ||
154 | |||
155 | return 0; | ||
156 | |||
157 | error: | ||
158 | while (i--) | ||
159 | drm_mm_remove_node(&nodes[i]); | ||
160 | spin_unlock(&mgr->lock); | ||
161 | |||
162 | kfree(nodes); | ||
163 | return r == -ENOSPC ? 0 : r; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * amdgpu_vram_mgr_del - free ranges | ||
168 | * | ||
169 | * @man: TTM memory type manager | ||
170 | * @tbo: TTM BO we need this range for | ||
171 | * @place: placement flags and restrictions | ||
172 | * @mem: TTM memory object | ||
173 | * | ||
174 | * Free the allocated VRAM again. | ||
175 | */ | ||
176 | static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, | ||
177 | struct ttm_mem_reg *mem) | ||
178 | { | ||
179 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
180 | struct drm_mm_node *nodes = mem->mm_node; | ||
181 | unsigned pages = mem->num_pages; | ||
182 | |||
183 | if (!mem->mm_node) | ||
184 | return; | ||
185 | |||
186 | spin_lock(&mgr->lock); | ||
187 | while (pages) { | ||
188 | pages -= nodes->size; | ||
189 | drm_mm_remove_node(nodes); | ||
190 | ++nodes; | ||
191 | } | ||
192 | spin_unlock(&mgr->lock); | ||
193 | |||
194 | kfree(mem->mm_node); | ||
195 | mem->mm_node = NULL; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * amdgpu_vram_mgr_debug - dump VRAM table | ||
200 | * | ||
201 | * @man: TTM memory type manager | ||
202 | * @prefix: text prefix | ||
203 | * | ||
204 | * Dump the table content using printk. | ||
205 | */ | ||
206 | static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, | ||
207 | const char *prefix) | ||
208 | { | ||
209 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
210 | |||
211 | spin_lock(&mgr->lock); | ||
212 | drm_mm_debug_table(&mgr->mm, prefix); | ||
213 | spin_unlock(&mgr->lock); | ||
214 | } | ||
215 | |||
216 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { | ||
217 | amdgpu_vram_mgr_init, | ||
218 | amdgpu_vram_mgr_fini, | ||
219 | amdgpu_vram_mgr_new, | ||
220 | amdgpu_vram_mgr_del, | ||
221 | amdgpu_vram_mgr_debug | ||
222 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index f7d236f95e74..8c9bc75a9c2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | #include "atombios_encoders.h" | 33 | #include "atombios_encoders.h" |
34 | #include "atombios_crtc.h" | ||
34 | #include "amdgpu_atombios.h" | 35 | #include "amdgpu_atombios.h" |
35 | #include "amdgpu_pll.h" | 36 | #include "amdgpu_pll.h" |
36 | #include "amdgpu_connectors.h" | 37 | #include "amdgpu_connectors.h" |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1d8c375a3561..e9b1964d4e61 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) | |||
887 | { | 887 | { |
888 | struct ci_power_info *pi = ci_get_pi(adev); | 888 | struct ci_power_info *pi = ci_get_pi(adev); |
889 | 889 | ||
890 | if (pi->uvd_power_gated == gate) | ||
891 | return; | ||
892 | |||
893 | pi->uvd_power_gated = gate; | 890 | pi->uvd_power_gated = gate; |
894 | 891 | ||
895 | ci_update_uvd_dpm(adev, gate); | 892 | ci_update_uvd_dpm(adev, gate); |
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
960 | sclk = ps->performance_levels[0].sclk; | 957 | sclk = ps->performance_levels[0].sclk; |
961 | } | 958 | } |
962 | 959 | ||
960 | if (adev->pm.pm_display_cfg.min_core_set_clock > sclk) | ||
961 | sclk = adev->pm.pm_display_cfg.min_core_set_clock; | ||
962 | |||
963 | if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk) | ||
964 | mclk = adev->pm.pm_display_cfg.min_mem_set_clock; | ||
965 | |||
963 | if (rps->vce_active) { | 966 | if (rps->vce_active) { |
964 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) | 967 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) |
965 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; | 968 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; |
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev) | |||
2201 | struct ci_power_info *pi = ci_get_pi(adev); | 2204 | struct ci_power_info *pi = ci_get_pi(adev); |
2202 | int i, ret; | 2205 | int i, ret; |
2203 | 2206 | ||
2207 | if (amdgpu_ci_is_smc_running(adev)) { | ||
2208 | DRM_INFO("smc is running, no need to load smc firmware\n"); | ||
2209 | return 0; | ||
2210 | } | ||
2211 | |||
2204 | for (i = 0; i < adev->usec_timeout; i++) { | 2212 | for (i = 0; i < adev->usec_timeout; i++) { |
2205 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) | 2213 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) |
2206 | break; | 2214 | break; |
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4190 | { | 4198 | { |
4191 | struct ci_power_info *pi = ci_get_pi(adev); | 4199 | struct ci_power_info *pi = ci_get_pi(adev); |
4192 | u32 tmp; | 4200 | u32 tmp; |
4201 | int ret = 0; | ||
4193 | 4202 | ||
4194 | if (!gate) { | 4203 | if (!gate) { |
4204 | /* turn the clocks on when decoding */ | ||
4205 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4206 | AMD_CG_STATE_UNGATE); | ||
4207 | if (ret) | ||
4208 | return ret; | ||
4209 | |||
4195 | if (pi->caps_uvd_dpm || | 4210 | if (pi->caps_uvd_dpm || |
4196 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) | 4211 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) |
4197 | pi->smc_state_table.UvdBootLevel = 0; | 4212 | pi->smc_state_table.UvdBootLevel = 0; |
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4203 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; | 4218 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; |
4204 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); | 4219 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); |
4205 | WREG32_SMC(ixDPM_TABLE_475, tmp); | 4220 | WREG32_SMC(ixDPM_TABLE_475, tmp); |
4221 | ret = ci_enable_uvd_dpm(adev, true); | ||
4222 | } else { | ||
4223 | ret = ci_enable_uvd_dpm(adev, false); | ||
4224 | if (ret) | ||
4225 | return ret; | ||
4226 | |||
4227 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4228 | AMD_CG_STATE_GATE); | ||
4206 | } | 4229 | } |
4207 | 4230 | ||
4208 | return ci_enable_uvd_dpm(adev, !gate); | 4231 | return ret; |
4209 | } | 4232 | } |
4210 | 4233 | ||
4211 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) | 4234 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) |
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, | |||
4247 | 4270 | ||
4248 | ret = ci_enable_vce_dpm(adev, true); | 4271 | ret = ci_enable_vce_dpm(adev, true); |
4249 | } else { | 4272 | } else { |
4273 | ret = ci_enable_vce_dpm(adev, false); | ||
4274 | if (ret) | ||
4275 | return ret; | ||
4250 | /* turn the clocks off when not encoding */ | 4276 | /* turn the clocks off when not encoding */ |
4251 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | 4277 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, |
4252 | AMD_CG_STATE_GATE); | 4278 | AMD_CG_STATE_GATE); |
4253 | if (ret) | ||
4254 | return ret; | ||
4255 | |||
4256 | ret = ci_enable_vce_dpm(adev, false); | ||
4257 | } | 4279 | } |
4258 | } | 4280 | } |
4259 | return ret; | 4281 | return ret; |
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev, | |||
5219 | pi->current_rps = *rps; | 5241 | pi->current_rps = *rps; |
5220 | pi->current_ps = *new_ps; | 5242 | pi->current_ps = *new_ps; |
5221 | pi->current_rps.ps_priv = &pi->current_ps; | 5243 | pi->current_rps.ps_priv = &pi->current_ps; |
5244 | adev->pm.dpm.current_ps = &pi->current_rps; | ||
5222 | } | 5245 | } |
5223 | 5246 | ||
5224 | static void ci_update_requested_ps(struct amdgpu_device *adev, | 5247 | static void ci_update_requested_ps(struct amdgpu_device *adev, |
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev, | |||
5230 | pi->requested_rps = *rps; | 5253 | pi->requested_rps = *rps; |
5231 | pi->requested_ps = *new_ps; | 5254 | pi->requested_ps = *new_ps; |
5232 | pi->requested_rps.ps_priv = &pi->requested_ps; | 5255 | pi->requested_rps.ps_priv = &pi->requested_ps; |
5256 | adev->pm.dpm.requested_ps = &pi->requested_rps; | ||
5233 | } | 5257 | } |
5234 | 5258 | ||
5235 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) | 5259 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) |
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev) | |||
5267 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; | 5291 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; |
5268 | int ret; | 5292 | int ret; |
5269 | 5293 | ||
5270 | if (amdgpu_ci_is_smc_running(adev)) | ||
5271 | return -EINVAL; | ||
5272 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { | 5294 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { |
5273 | ci_enable_voltage_control(adev); | 5295 | ci_enable_voltage_control(adev); |
5274 | ret = ci_construct_voltage_tables(adev); | 5296 | ret = ci_construct_voltage_tables(adev); |
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev) | |||
5689 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 5711 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
5690 | 5712 | ||
5691 | /* fill in the vce power states */ | 5713 | /* fill in the vce power states */ |
5692 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 5714 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
5693 | u32 sclk, mclk; | 5715 | u32 sclk, mclk; |
5694 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 5716 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
5695 | clock_info = (union pplib_clock_info *) | 5717 | clock_info = (union pplib_clock_info *) |
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev, | |||
6094 | amdgpu_dpm_print_ps_status(adev, rps); | 6116 | amdgpu_dpm_print_ps_status(adev, rps); |
6095 | } | 6117 | } |
6096 | 6118 | ||
6119 | static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1, | ||
6120 | const struct ci_pl *ci_cpl2) | ||
6121 | { | ||
6122 | return ((ci_cpl1->mclk == ci_cpl2->mclk) && | ||
6123 | (ci_cpl1->sclk == ci_cpl2->sclk) && | ||
6124 | (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) && | ||
6125 | (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); | ||
6126 | } | ||
6127 | |||
6128 | static int ci_check_state_equal(struct amdgpu_device *adev, | ||
6129 | struct amdgpu_ps *cps, | ||
6130 | struct amdgpu_ps *rps, | ||
6131 | bool *equal) | ||
6132 | { | ||
6133 | struct ci_ps *ci_cps; | ||
6134 | struct ci_ps *ci_rps; | ||
6135 | int i; | ||
6136 | |||
6137 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) | ||
6138 | return -EINVAL; | ||
6139 | |||
6140 | ci_cps = ci_get_ps(cps); | ||
6141 | ci_rps = ci_get_ps(rps); | ||
6142 | |||
6143 | if (ci_cps == NULL) { | ||
6144 | *equal = false; | ||
6145 | return 0; | ||
6146 | } | ||
6147 | |||
6148 | if (ci_cps->performance_level_count != ci_rps->performance_level_count) { | ||
6149 | |||
6150 | *equal = false; | ||
6151 | return 0; | ||
6152 | } | ||
6153 | |||
6154 | for (i = 0; i < ci_cps->performance_level_count; i++) { | ||
6155 | if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]), | ||
6156 | &(ci_rps->performance_levels[i]))) { | ||
6157 | *equal = false; | ||
6158 | return 0; | ||
6159 | } | ||
6160 | } | ||
6161 | |||
6162 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | ||
6163 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); | ||
6164 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); | ||
6165 | |||
6166 | return 0; | ||
6167 | } | ||
6168 | |||
6097 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) | 6169 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) |
6098 | { | 6170 | { |
6099 | struct ci_power_info *pi = ci_get_pi(adev); | 6171 | struct ci_power_info *pi = ci_get_pi(adev); |
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle) | |||
6287 | 6359 | ||
6288 | if (adev->pm.dpm_enabled) { | 6360 | if (adev->pm.dpm_enabled) { |
6289 | mutex_lock(&adev->pm.mutex); | 6361 | mutex_lock(&adev->pm.mutex); |
6290 | /* disable dpm */ | 6362 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6291 | ci_dpm_disable(adev); | 6363 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
6292 | /* reset the power state */ | 6364 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6293 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | 6365 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); |
6366 | adev->pm.dpm.last_user_state = adev->pm.dpm.user_state; | ||
6367 | adev->pm.dpm.last_state = adev->pm.dpm.state; | ||
6368 | adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6369 | adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6294 | mutex_unlock(&adev->pm.mutex); | 6370 | mutex_unlock(&adev->pm.mutex); |
6371 | amdgpu_pm_compute_clocks(adev); | ||
6372 | |||
6295 | } | 6373 | } |
6374 | |||
6296 | return 0; | 6375 | return 0; |
6297 | } | 6376 | } |
6298 | 6377 | ||
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle) | |||
6310 | adev->pm.dpm_enabled = false; | 6389 | adev->pm.dpm_enabled = false; |
6311 | else | 6390 | else |
6312 | adev->pm.dpm_enabled = true; | 6391 | adev->pm.dpm_enabled = true; |
6392 | adev->pm.dpm.user_state = adev->pm.dpm.last_user_state; | ||
6393 | adev->pm.dpm.state = adev->pm.dpm.last_state; | ||
6313 | mutex_unlock(&adev->pm.mutex); | 6394 | mutex_unlock(&adev->pm.mutex); |
6314 | if (adev->pm.dpm_enabled) | 6395 | if (adev->pm.dpm_enabled) |
6315 | amdgpu_pm_compute_clocks(adev); | 6396 | amdgpu_pm_compute_clocks(adev); |
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { | |||
6644 | .set_sclk_od = ci_dpm_set_sclk_od, | 6725 | .set_sclk_od = ci_dpm_set_sclk_od, |
6645 | .get_mclk_od = ci_dpm_get_mclk_od, | 6726 | .get_mclk_od = ci_dpm_get_mclk_od, |
6646 | .set_mclk_od = ci_dpm_set_mclk_od, | 6727 | .set_mclk_od = ci_dpm_set_mclk_od, |
6728 | .check_state_equal = ci_check_state_equal, | ||
6729 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
6647 | }; | 6730 | }; |
6648 | 6731 | ||
6649 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 6732 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
6662 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; | 6745 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
6663 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; | 6746 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; |
6664 | } | 6747 | } |
6748 | |||
6749 | const struct amdgpu_ip_block_version ci_dpm_ip_block = | ||
6750 | { | ||
6751 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
6752 | .major = 7, | ||
6753 | .minor = 0, | ||
6754 | .rev = 0, | ||
6755 | .funcs = &ci_dpm_ip_funcs, | ||
6756 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index a845b6a93b79..302df85893ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
1189 | return r; | 1189 | return r; |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) | ||
1193 | { | ||
1194 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
1195 | |||
1196 | if (hung) | ||
1197 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1198 | else | ||
1199 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1200 | |||
1201 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
1202 | } | ||
1203 | |||
1204 | /** | 1192 | /** |
1205 | * cik_asic_reset - soft reset GPU | 1193 | * cik_asic_reset - soft reset GPU |
1206 | * | 1194 | * |
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu | |||
1213 | static int cik_asic_reset(struct amdgpu_device *adev) | 1201 | static int cik_asic_reset(struct amdgpu_device *adev) |
1214 | { | 1202 | { |
1215 | int r; | 1203 | int r; |
1216 | cik_set_bios_scratch_engine_hung(adev, true); | 1204 | |
1205 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); | ||
1217 | 1206 | ||
1218 | r = cik_gpu_pci_config_reset(adev); | 1207 | r = cik_gpu_pci_config_reset(adev); |
1219 | 1208 | ||
1220 | cik_set_bios_scratch_engine_hung(adev, false); | 1209 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
1221 | 1210 | ||
1222 | return r; | 1211 | return r; |
1223 | } | 1212 | } |
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev) | |||
1641 | adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; | 1630 | adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; |
1642 | } | 1631 | } |
1643 | 1632 | ||
1644 | static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = | ||
1645 | { | ||
1646 | /* ORDER MATTERS! */ | ||
1647 | { | ||
1648 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1649 | .major = 1, | ||
1650 | .minor = 0, | ||
1651 | .rev = 0, | ||
1652 | .funcs = &cik_common_ip_funcs, | ||
1653 | }, | ||
1654 | { | ||
1655 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1656 | .major = 7, | ||
1657 | .minor = 0, | ||
1658 | .rev = 0, | ||
1659 | .funcs = &gmc_v7_0_ip_funcs, | ||
1660 | }, | ||
1661 | { | ||
1662 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1663 | .major = 2, | ||
1664 | .minor = 0, | ||
1665 | .rev = 0, | ||
1666 | .funcs = &cik_ih_ip_funcs, | ||
1667 | }, | ||
1668 | { | ||
1669 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1670 | .major = 7, | ||
1671 | .minor = 0, | ||
1672 | .rev = 0, | ||
1673 | .funcs = &amdgpu_pp_ip_funcs, | ||
1674 | }, | ||
1675 | { | ||
1676 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1677 | .major = 8, | ||
1678 | .minor = 2, | ||
1679 | .rev = 0, | ||
1680 | .funcs = &dce_v8_0_ip_funcs, | ||
1681 | }, | ||
1682 | { | ||
1683 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1684 | .major = 7, | ||
1685 | .minor = 2, | ||
1686 | .rev = 0, | ||
1687 | .funcs = &gfx_v7_0_ip_funcs, | ||
1688 | }, | ||
1689 | { | ||
1690 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1691 | .major = 2, | ||
1692 | .minor = 0, | ||
1693 | .rev = 0, | ||
1694 | .funcs = &cik_sdma_ip_funcs, | ||
1695 | }, | ||
1696 | { | ||
1697 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1698 | .major = 4, | ||
1699 | .minor = 2, | ||
1700 | .rev = 0, | ||
1701 | .funcs = &uvd_v4_2_ip_funcs, | ||
1702 | }, | ||
1703 | { | ||
1704 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1705 | .major = 2, | ||
1706 | .minor = 0, | ||
1707 | .rev = 0, | ||
1708 | .funcs = &vce_v2_0_ip_funcs, | ||
1709 | }, | ||
1710 | }; | ||
1711 | |||
1712 | static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = | ||
1713 | { | ||
1714 | /* ORDER MATTERS! */ | ||
1715 | { | ||
1716 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1717 | .major = 1, | ||
1718 | .minor = 0, | ||
1719 | .rev = 0, | ||
1720 | .funcs = &cik_common_ip_funcs, | ||
1721 | }, | ||
1722 | { | ||
1723 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1724 | .major = 7, | ||
1725 | .minor = 0, | ||
1726 | .rev = 0, | ||
1727 | .funcs = &gmc_v7_0_ip_funcs, | ||
1728 | }, | ||
1729 | { | ||
1730 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1731 | .major = 2, | ||
1732 | .minor = 0, | ||
1733 | .rev = 0, | ||
1734 | .funcs = &cik_ih_ip_funcs, | ||
1735 | }, | ||
1736 | { | ||
1737 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1738 | .major = 7, | ||
1739 | .minor = 0, | ||
1740 | .rev = 0, | ||
1741 | .funcs = &amdgpu_pp_ip_funcs, | ||
1742 | }, | ||
1743 | { | ||
1744 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1745 | .major = 8, | ||
1746 | .minor = 2, | ||
1747 | .rev = 0, | ||
1748 | .funcs = &dce_virtual_ip_funcs, | ||
1749 | }, | ||
1750 | { | ||
1751 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1752 | .major = 7, | ||
1753 | .minor = 2, | ||
1754 | .rev = 0, | ||
1755 | .funcs = &gfx_v7_0_ip_funcs, | ||
1756 | }, | ||
1757 | { | ||
1758 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1759 | .major = 2, | ||
1760 | .minor = 0, | ||
1761 | .rev = 0, | ||
1762 | .funcs = &cik_sdma_ip_funcs, | ||
1763 | }, | ||
1764 | { | ||
1765 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1766 | .major = 4, | ||
1767 | .minor = 2, | ||
1768 | .rev = 0, | ||
1769 | .funcs = &uvd_v4_2_ip_funcs, | ||
1770 | }, | ||
1771 | { | ||
1772 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1773 | .major = 2, | ||
1774 | .minor = 0, | ||
1775 | .rev = 0, | ||
1776 | .funcs = &vce_v2_0_ip_funcs, | ||
1777 | }, | ||
1778 | }; | ||
1779 | |||
1780 | static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = | ||
1781 | { | ||
1782 | /* ORDER MATTERS! */ | ||
1783 | { | ||
1784 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1785 | .major = 1, | ||
1786 | .minor = 0, | ||
1787 | .rev = 0, | ||
1788 | .funcs = &cik_common_ip_funcs, | ||
1789 | }, | ||
1790 | { | ||
1791 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1792 | .major = 7, | ||
1793 | .minor = 0, | ||
1794 | .rev = 0, | ||
1795 | .funcs = &gmc_v7_0_ip_funcs, | ||
1796 | }, | ||
1797 | { | ||
1798 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1799 | .major = 2, | ||
1800 | .minor = 0, | ||
1801 | .rev = 0, | ||
1802 | .funcs = &cik_ih_ip_funcs, | ||
1803 | }, | ||
1804 | { | ||
1805 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1806 | .major = 7, | ||
1807 | .minor = 0, | ||
1808 | .rev = 0, | ||
1809 | .funcs = &amdgpu_pp_ip_funcs, | ||
1810 | }, | ||
1811 | { | ||
1812 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1813 | .major = 8, | ||
1814 | .minor = 5, | ||
1815 | .rev = 0, | ||
1816 | .funcs = &dce_v8_0_ip_funcs, | ||
1817 | }, | ||
1818 | { | ||
1819 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1820 | .major = 7, | ||
1821 | .minor = 3, | ||
1822 | .rev = 0, | ||
1823 | .funcs = &gfx_v7_0_ip_funcs, | ||
1824 | }, | ||
1825 | { | ||
1826 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1827 | .major = 2, | ||
1828 | .minor = 0, | ||
1829 | .rev = 0, | ||
1830 | .funcs = &cik_sdma_ip_funcs, | ||
1831 | }, | ||
1832 | { | ||
1833 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1834 | .major = 4, | ||
1835 | .minor = 2, | ||
1836 | .rev = 0, | ||
1837 | .funcs = &uvd_v4_2_ip_funcs, | ||
1838 | }, | ||
1839 | { | ||
1840 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1841 | .major = 2, | ||
1842 | .minor = 0, | ||
1843 | .rev = 0, | ||
1844 | .funcs = &vce_v2_0_ip_funcs, | ||
1845 | }, | ||
1846 | }; | ||
1847 | |||
1848 | static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = | ||
1849 | { | ||
1850 | /* ORDER MATTERS! */ | ||
1851 | { | ||
1852 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1853 | .major = 1, | ||
1854 | .minor = 0, | ||
1855 | .rev = 0, | ||
1856 | .funcs = &cik_common_ip_funcs, | ||
1857 | }, | ||
1858 | { | ||
1859 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1860 | .major = 7, | ||
1861 | .minor = 0, | ||
1862 | .rev = 0, | ||
1863 | .funcs = &gmc_v7_0_ip_funcs, | ||
1864 | }, | ||
1865 | { | ||
1866 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1867 | .major = 2, | ||
1868 | .minor = 0, | ||
1869 | .rev = 0, | ||
1870 | .funcs = &cik_ih_ip_funcs, | ||
1871 | }, | ||
1872 | { | ||
1873 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1874 | .major = 7, | ||
1875 | .minor = 0, | ||
1876 | .rev = 0, | ||
1877 | .funcs = &amdgpu_pp_ip_funcs, | ||
1878 | }, | ||
1879 | { | ||
1880 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1881 | .major = 8, | ||
1882 | .minor = 5, | ||
1883 | .rev = 0, | ||
1884 | .funcs = &dce_virtual_ip_funcs, | ||
1885 | }, | ||
1886 | { | ||
1887 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1888 | .major = 7, | ||
1889 | .minor = 3, | ||
1890 | .rev = 0, | ||
1891 | .funcs = &gfx_v7_0_ip_funcs, | ||
1892 | }, | ||
1893 | { | ||
1894 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1895 | .major = 2, | ||
1896 | .minor = 0, | ||
1897 | .rev = 0, | ||
1898 | .funcs = &cik_sdma_ip_funcs, | ||
1899 | }, | ||
1900 | { | ||
1901 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1902 | .major = 4, | ||
1903 | .minor = 2, | ||
1904 | .rev = 0, | ||
1905 | .funcs = &uvd_v4_2_ip_funcs, | ||
1906 | }, | ||
1907 | { | ||
1908 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1909 | .major = 2, | ||
1910 | .minor = 0, | ||
1911 | .rev = 0, | ||
1912 | .funcs = &vce_v2_0_ip_funcs, | ||
1913 | }, | ||
1914 | }; | ||
1915 | |||
1916 | static const struct amdgpu_ip_block_version kabini_ip_blocks[] = | ||
1917 | { | ||
1918 | /* ORDER MATTERS! */ | ||
1919 | { | ||
1920 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1921 | .major = 1, | ||
1922 | .minor = 0, | ||
1923 | .rev = 0, | ||
1924 | .funcs = &cik_common_ip_funcs, | ||
1925 | }, | ||
1926 | { | ||
1927 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1928 | .major = 7, | ||
1929 | .minor = 0, | ||
1930 | .rev = 0, | ||
1931 | .funcs = &gmc_v7_0_ip_funcs, | ||
1932 | }, | ||
1933 | { | ||
1934 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1935 | .major = 2, | ||
1936 | .minor = 0, | ||
1937 | .rev = 0, | ||
1938 | .funcs = &cik_ih_ip_funcs, | ||
1939 | }, | ||
1940 | { | ||
1941 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1942 | .major = 7, | ||
1943 | .minor = 0, | ||
1944 | .rev = 0, | ||
1945 | .funcs = &amdgpu_pp_ip_funcs, | ||
1946 | }, | ||
1947 | { | ||
1948 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1949 | .major = 8, | ||
1950 | .minor = 3, | ||
1951 | .rev = 0, | ||
1952 | .funcs = &dce_v8_0_ip_funcs, | ||
1953 | }, | ||
1954 | { | ||
1955 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1956 | .major = 7, | ||
1957 | .minor = 2, | ||
1958 | .rev = 0, | ||
1959 | .funcs = &gfx_v7_0_ip_funcs, | ||
1960 | }, | ||
1961 | { | ||
1962 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1963 | .major = 2, | ||
1964 | .minor = 0, | ||
1965 | .rev = 0, | ||
1966 | .funcs = &cik_sdma_ip_funcs, | ||
1967 | }, | ||
1968 | { | ||
1969 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1970 | .major = 4, | ||
1971 | .minor = 2, | ||
1972 | .rev = 0, | ||
1973 | .funcs = &uvd_v4_2_ip_funcs, | ||
1974 | }, | ||
1975 | { | ||
1976 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1977 | .major = 2, | ||
1978 | .minor = 0, | ||
1979 | .rev = 0, | ||
1980 | .funcs = &vce_v2_0_ip_funcs, | ||
1981 | }, | ||
1982 | }; | ||
1983 | |||
1984 | static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = | ||
1985 | { | ||
1986 | /* ORDER MATTERS! */ | ||
1987 | { | ||
1988 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1989 | .major = 1, | ||
1990 | .minor = 0, | ||
1991 | .rev = 0, | ||
1992 | .funcs = &cik_common_ip_funcs, | ||
1993 | }, | ||
1994 | { | ||
1995 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1996 | .major = 7, | ||
1997 | .minor = 0, | ||
1998 | .rev = 0, | ||
1999 | .funcs = &gmc_v7_0_ip_funcs, | ||
2000 | }, | ||
2001 | { | ||
2002 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2003 | .major = 2, | ||
2004 | .minor = 0, | ||
2005 | .rev = 0, | ||
2006 | .funcs = &cik_ih_ip_funcs, | ||
2007 | }, | ||
2008 | { | ||
2009 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2010 | .major = 7, | ||
2011 | .minor = 0, | ||
2012 | .rev = 0, | ||
2013 | .funcs = &amdgpu_pp_ip_funcs, | ||
2014 | }, | ||
2015 | { | ||
2016 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2017 | .major = 8, | ||
2018 | .minor = 3, | ||
2019 | .rev = 0, | ||
2020 | .funcs = &dce_virtual_ip_funcs, | ||
2021 | }, | ||
2022 | { | ||
2023 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2024 | .major = 7, | ||
2025 | .minor = 2, | ||
2026 | .rev = 0, | ||
2027 | .funcs = &gfx_v7_0_ip_funcs, | ||
2028 | }, | ||
2029 | { | ||
2030 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2031 | .major = 2, | ||
2032 | .minor = 0, | ||
2033 | .rev = 0, | ||
2034 | .funcs = &cik_sdma_ip_funcs, | ||
2035 | }, | ||
2036 | { | ||
2037 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2038 | .major = 4, | ||
2039 | .minor = 2, | ||
2040 | .rev = 0, | ||
2041 | .funcs = &uvd_v4_2_ip_funcs, | ||
2042 | }, | ||
2043 | { | ||
2044 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2045 | .major = 2, | ||
2046 | .minor = 0, | ||
2047 | .rev = 0, | ||
2048 | .funcs = &vce_v2_0_ip_funcs, | ||
2049 | }, | ||
2050 | }; | ||
2051 | |||
2052 | static const struct amdgpu_ip_block_version mullins_ip_blocks[] = | ||
2053 | { | ||
2054 | /* ORDER MATTERS! */ | ||
2055 | { | ||
2056 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2057 | .major = 1, | ||
2058 | .minor = 0, | ||
2059 | .rev = 0, | ||
2060 | .funcs = &cik_common_ip_funcs, | ||
2061 | }, | ||
2062 | { | ||
2063 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2064 | .major = 7, | ||
2065 | .minor = 0, | ||
2066 | .rev = 0, | ||
2067 | .funcs = &gmc_v7_0_ip_funcs, | ||
2068 | }, | ||
2069 | { | ||
2070 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2071 | .major = 2, | ||
2072 | .minor = 0, | ||
2073 | .rev = 0, | ||
2074 | .funcs = &cik_ih_ip_funcs, | ||
2075 | }, | ||
2076 | { | ||
2077 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2078 | .major = 7, | ||
2079 | .minor = 0, | ||
2080 | .rev = 0, | ||
2081 | .funcs = &amdgpu_pp_ip_funcs, | ||
2082 | }, | ||
2083 | { | ||
2084 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2085 | .major = 8, | ||
2086 | .minor = 3, | ||
2087 | .rev = 0, | ||
2088 | .funcs = &dce_v8_0_ip_funcs, | ||
2089 | }, | ||
2090 | { | ||
2091 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2092 | .major = 7, | ||
2093 | .minor = 2, | ||
2094 | .rev = 0, | ||
2095 | .funcs = &gfx_v7_0_ip_funcs, | ||
2096 | }, | ||
2097 | { | ||
2098 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2099 | .major = 2, | ||
2100 | .minor = 0, | ||
2101 | .rev = 0, | ||
2102 | .funcs = &cik_sdma_ip_funcs, | ||
2103 | }, | ||
2104 | { | ||
2105 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2106 | .major = 4, | ||
2107 | .minor = 2, | ||
2108 | .rev = 0, | ||
2109 | .funcs = &uvd_v4_2_ip_funcs, | ||
2110 | }, | ||
2111 | { | ||
2112 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2113 | .major = 2, | ||
2114 | .minor = 0, | ||
2115 | .rev = 0, | ||
2116 | .funcs = &vce_v2_0_ip_funcs, | ||
2117 | }, | ||
2118 | }; | ||
2119 | |||
2120 | static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = | ||
2121 | { | ||
2122 | /* ORDER MATTERS! */ | ||
2123 | { | ||
2124 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2125 | .major = 1, | ||
2126 | .minor = 0, | ||
2127 | .rev = 0, | ||
2128 | .funcs = &cik_common_ip_funcs, | ||
2129 | }, | ||
2130 | { | ||
2131 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2132 | .major = 7, | ||
2133 | .minor = 0, | ||
2134 | .rev = 0, | ||
2135 | .funcs = &gmc_v7_0_ip_funcs, | ||
2136 | }, | ||
2137 | { | ||
2138 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2139 | .major = 2, | ||
2140 | .minor = 0, | ||
2141 | .rev = 0, | ||
2142 | .funcs = &cik_ih_ip_funcs, | ||
2143 | }, | ||
2144 | { | ||
2145 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2146 | .major = 7, | ||
2147 | .minor = 0, | ||
2148 | .rev = 0, | ||
2149 | .funcs = &amdgpu_pp_ip_funcs, | ||
2150 | }, | ||
2151 | { | ||
2152 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2153 | .major = 8, | ||
2154 | .minor = 3, | ||
2155 | .rev = 0, | ||
2156 | .funcs = &dce_virtual_ip_funcs, | ||
2157 | }, | ||
2158 | { | ||
2159 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2160 | .major = 7, | ||
2161 | .minor = 2, | ||
2162 | .rev = 0, | ||
2163 | .funcs = &gfx_v7_0_ip_funcs, | ||
2164 | }, | ||
2165 | { | ||
2166 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2167 | .major = 2, | ||
2168 | .minor = 0, | ||
2169 | .rev = 0, | ||
2170 | .funcs = &cik_sdma_ip_funcs, | ||
2171 | }, | ||
2172 | { | ||
2173 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2174 | .major = 4, | ||
2175 | .minor = 2, | ||
2176 | .rev = 0, | ||
2177 | .funcs = &uvd_v4_2_ip_funcs, | ||
2178 | }, | ||
2179 | { | ||
2180 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2181 | .major = 2, | ||
2182 | .minor = 0, | ||
2183 | .rev = 0, | ||
2184 | .funcs = &vce_v2_0_ip_funcs, | ||
2185 | }, | ||
2186 | }; | ||
2187 | |||
2188 | static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = | ||
2189 | { | ||
2190 | /* ORDER MATTERS! */ | ||
2191 | { | ||
2192 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2193 | .major = 1, | ||
2194 | .minor = 0, | ||
2195 | .rev = 0, | ||
2196 | .funcs = &cik_common_ip_funcs, | ||
2197 | }, | ||
2198 | { | ||
2199 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2200 | .major = 7, | ||
2201 | .minor = 0, | ||
2202 | .rev = 0, | ||
2203 | .funcs = &gmc_v7_0_ip_funcs, | ||
2204 | }, | ||
2205 | { | ||
2206 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2207 | .major = 2, | ||
2208 | .minor = 0, | ||
2209 | .rev = 0, | ||
2210 | .funcs = &cik_ih_ip_funcs, | ||
2211 | }, | ||
2212 | { | ||
2213 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2214 | .major = 7, | ||
2215 | .minor = 0, | ||
2216 | .rev = 0, | ||
2217 | .funcs = &amdgpu_pp_ip_funcs, | ||
2218 | }, | ||
2219 | { | ||
2220 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2221 | .major = 8, | ||
2222 | .minor = 1, | ||
2223 | .rev = 0, | ||
2224 | .funcs = &dce_v8_0_ip_funcs, | ||
2225 | }, | ||
2226 | { | ||
2227 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2228 | .major = 7, | ||
2229 | .minor = 1, | ||
2230 | .rev = 0, | ||
2231 | .funcs = &gfx_v7_0_ip_funcs, | ||
2232 | }, | ||
2233 | { | ||
2234 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2235 | .major = 2, | ||
2236 | .minor = 0, | ||
2237 | .rev = 0, | ||
2238 | .funcs = &cik_sdma_ip_funcs, | ||
2239 | }, | ||
2240 | { | ||
2241 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2242 | .major = 4, | ||
2243 | .minor = 2, | ||
2244 | .rev = 0, | ||
2245 | .funcs = &uvd_v4_2_ip_funcs, | ||
2246 | }, | ||
2247 | { | ||
2248 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2249 | .major = 2, | ||
2250 | .minor = 0, | ||
2251 | .rev = 0, | ||
2252 | .funcs = &vce_v2_0_ip_funcs, | ||
2253 | }, | ||
2254 | }; | ||
2255 | |||
2256 | static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = | ||
2257 | { | ||
2258 | /* ORDER MATTERS! */ | ||
2259 | { | ||
2260 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2261 | .major = 1, | ||
2262 | .minor = 0, | ||
2263 | .rev = 0, | ||
2264 | .funcs = &cik_common_ip_funcs, | ||
2265 | }, | ||
2266 | { | ||
2267 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2268 | .major = 7, | ||
2269 | .minor = 0, | ||
2270 | .rev = 0, | ||
2271 | .funcs = &gmc_v7_0_ip_funcs, | ||
2272 | }, | ||
2273 | { | ||
2274 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2275 | .major = 2, | ||
2276 | .minor = 0, | ||
2277 | .rev = 0, | ||
2278 | .funcs = &cik_ih_ip_funcs, | ||
2279 | }, | ||
2280 | { | ||
2281 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2282 | .major = 7, | ||
2283 | .minor = 0, | ||
2284 | .rev = 0, | ||
2285 | .funcs = &amdgpu_pp_ip_funcs, | ||
2286 | }, | ||
2287 | { | ||
2288 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2289 | .major = 8, | ||
2290 | .minor = 1, | ||
2291 | .rev = 0, | ||
2292 | .funcs = &dce_virtual_ip_funcs, | ||
2293 | }, | ||
2294 | { | ||
2295 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2296 | .major = 7, | ||
2297 | .minor = 1, | ||
2298 | .rev = 0, | ||
2299 | .funcs = &gfx_v7_0_ip_funcs, | ||
2300 | }, | ||
2301 | { | ||
2302 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2303 | .major = 2, | ||
2304 | .minor = 0, | ||
2305 | .rev = 0, | ||
2306 | .funcs = &cik_sdma_ip_funcs, | ||
2307 | }, | ||
2308 | { | ||
2309 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2310 | .major = 4, | ||
2311 | .minor = 2, | ||
2312 | .rev = 0, | ||
2313 | .funcs = &uvd_v4_2_ip_funcs, | ||
2314 | }, | ||
2315 | { | ||
2316 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2317 | .major = 2, | ||
2318 | .minor = 0, | ||
2319 | .rev = 0, | ||
2320 | .funcs = &vce_v2_0_ip_funcs, | ||
2321 | }, | ||
2322 | }; | ||
2323 | |||
2324 | int cik_set_ip_blocks(struct amdgpu_device *adev) | ||
2325 | { | ||
2326 | if (adev->enable_virtual_display) { | ||
2327 | switch (adev->asic_type) { | ||
2328 | case CHIP_BONAIRE: | ||
2329 | adev->ip_blocks = bonaire_ip_blocks_vd; | ||
2330 | adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); | ||
2331 | break; | ||
2332 | case CHIP_HAWAII: | ||
2333 | adev->ip_blocks = hawaii_ip_blocks_vd; | ||
2334 | adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); | ||
2335 | break; | ||
2336 | case CHIP_KAVERI: | ||
2337 | adev->ip_blocks = kaveri_ip_blocks_vd; | ||
2338 | adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); | ||
2339 | break; | ||
2340 | case CHIP_KABINI: | ||
2341 | adev->ip_blocks = kabini_ip_blocks_vd; | ||
2342 | adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); | ||
2343 | break; | ||
2344 | case CHIP_MULLINS: | ||
2345 | adev->ip_blocks = mullins_ip_blocks_vd; | ||
2346 | adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); | ||
2347 | break; | ||
2348 | default: | ||
2349 | /* FIXME: not supported yet */ | ||
2350 | return -EINVAL; | ||
2351 | } | ||
2352 | } else { | ||
2353 | switch (adev->asic_type) { | ||
2354 | case CHIP_BONAIRE: | ||
2355 | adev->ip_blocks = bonaire_ip_blocks; | ||
2356 | adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); | ||
2357 | break; | ||
2358 | case CHIP_HAWAII: | ||
2359 | adev->ip_blocks = hawaii_ip_blocks; | ||
2360 | adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); | ||
2361 | break; | ||
2362 | case CHIP_KAVERI: | ||
2363 | adev->ip_blocks = kaveri_ip_blocks; | ||
2364 | adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); | ||
2365 | break; | ||
2366 | case CHIP_KABINI: | ||
2367 | adev->ip_blocks = kabini_ip_blocks; | ||
2368 | adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); | ||
2369 | break; | ||
2370 | case CHIP_MULLINS: | ||
2371 | adev->ip_blocks = mullins_ip_blocks; | ||
2372 | adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); | ||
2373 | break; | ||
2374 | default: | ||
2375 | /* FIXME: not supported yet */ | ||
2376 | return -EINVAL; | ||
2377 | } | ||
2378 | } | ||
2379 | |||
2380 | return 0; | ||
2381 | } | ||
2382 | |||
2383 | static const struct amdgpu_asic_funcs cik_asic_funcs = | 1633 | static const struct amdgpu_asic_funcs cik_asic_funcs = |
2384 | { | 1634 | { |
2385 | .read_disabled_bios = &cik_read_disabled_bios, | 1635 | .read_disabled_bios = &cik_read_disabled_bios, |
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle, | |||
2612 | return 0; | 1862 | return 0; |
2613 | } | 1863 | } |
2614 | 1864 | ||
2615 | const struct amd_ip_funcs cik_common_ip_funcs = { | 1865 | static const struct amd_ip_funcs cik_common_ip_funcs = { |
2616 | .name = "cik_common", | 1866 | .name = "cik_common", |
2617 | .early_init = cik_common_early_init, | 1867 | .early_init = cik_common_early_init, |
2618 | .late_init = NULL, | 1868 | .late_init = NULL, |
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = { | |||
2628 | .set_clockgating_state = cik_common_set_clockgating_state, | 1878 | .set_clockgating_state = cik_common_set_clockgating_state, |
2629 | .set_powergating_state = cik_common_set_powergating_state, | 1879 | .set_powergating_state = cik_common_set_powergating_state, |
2630 | }; | 1880 | }; |
1881 | |||
1882 | static const struct amdgpu_ip_block_version cik_common_ip_block = | ||
1883 | { | ||
1884 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1885 | .major = 1, | ||
1886 | .minor = 0, | ||
1887 | .rev = 0, | ||
1888 | .funcs = &cik_common_ip_funcs, | ||
1889 | }; | ||
1890 | |||
1891 | int cik_set_ip_blocks(struct amdgpu_device *adev) | ||
1892 | { | ||
1893 | switch (adev->asic_type) { | ||
1894 | case CHIP_BONAIRE: | ||
1895 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1896 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1897 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1898 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1899 | if (adev->enable_virtual_display) | ||
1900 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1901 | else | ||
1902 | amdgpu_ip_block_add(adev, &dce_v8_2_ip_block); | ||
1903 | amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); | ||
1904 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1905 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1906 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1907 | break; | ||
1908 | case CHIP_HAWAII: | ||
1909 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1910 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1911 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1912 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1913 | if (adev->enable_virtual_display) | ||
1914 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1915 | else | ||
1916 | amdgpu_ip_block_add(adev, &dce_v8_5_ip_block); | ||
1917 | amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block); | ||
1918 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1919 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1920 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1921 | break; | ||
1922 | case CHIP_KAVERI: | ||
1923 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1924 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1925 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1926 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1927 | if (adev->enable_virtual_display) | ||
1928 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1929 | else | ||
1930 | amdgpu_ip_block_add(adev, &dce_v8_1_ip_block); | ||
1931 | amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block); | ||
1932 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1933 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1934 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1935 | break; | ||
1936 | case CHIP_KABINI: | ||
1937 | case CHIP_MULLINS: | ||
1938 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1939 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1940 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1941 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1942 | if (adev->enable_virtual_display) | ||
1943 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1944 | else | ||
1945 | amdgpu_ip_block_add(adev, &dce_v8_3_ip_block); | ||
1946 | amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); | ||
1947 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1948 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1949 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1950 | break; | ||
1951 | default: | ||
1952 | /* FIXME: not supported yet */ | ||
1953 | return -EINVAL; | ||
1954 | } | ||
1955 | return 0; | ||
1956 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 5ebd2d7a0327..c4989f51ecef 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __CIK_H__ | 24 | #ifndef __CIK_H__ |
25 | #define __CIK_H__ | 25 | #define __CIK_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_common_ip_funcs; | ||
28 | |||
29 | void cik_srbm_select(struct amdgpu_device *adev, | 27 | void cik_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int cik_set_ip_blocks(struct amdgpu_device *adev); | 29 | int cik_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index be3d6f79a864..319b32cdea84 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c | |||
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle, | |||
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | 415 | ||
416 | const struct amd_ip_funcs cik_ih_ip_funcs = { | 416 | static const struct amd_ip_funcs cik_ih_ip_funcs = { |
417 | .name = "cik_ih", | 417 | .name = "cik_ih", |
418 | .early_init = cik_ih_early_init, | 418 | .early_init = cik_ih_early_init, |
419 | .late_init = NULL, | 419 | .late_init = NULL, |
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
441 | if (adev->irq.ih_funcs == NULL) | 441 | if (adev->irq.ih_funcs == NULL) |
442 | adev->irq.ih_funcs = &cik_ih_funcs; | 442 | adev->irq.ih_funcs = &cik_ih_funcs; |
443 | } | 443 | } |
444 | |||
445 | const struct amdgpu_ip_block_version cik_ih_ip_block = | ||
446 | { | ||
447 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
448 | .major = 2, | ||
449 | .minor = 0, | ||
450 | .rev = 0, | ||
451 | .funcs = &cik_ih_ip_funcs, | ||
452 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h index 6b0f375ec244..1d9ddee2868e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CIK_IH_H__ | 24 | #ifndef __CIK_IH_H__ |
25 | #define __CIK_IH_H__ | 25 | #define __CIK_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cik_ih_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index cb952acc7133..4c34dbc7a254 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
206 | 206 | ||
207 | for (i = 0; i < count; i++) | 207 | for (i = 0; i < count; i++) |
208 | if (sdma && sdma->burst_nop && (i == 0)) | 208 | if (sdma && sdma->burst_nop && (i == 0)) |
209 | amdgpu_ring_write(ring, ring->nop | | 209 | amdgpu_ring_write(ring, ring->funcs->nop | |
210 | SDMA_NOP_COUNT(count - 1)); | 210 | SDMA_NOP_COUNT(count - 1)); |
211 | else | 211 | else |
212 | amdgpu_ring_write(ring, ring->nop); | 212 | amdgpu_ring_write(ring, ring->funcs->nop); |
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
622 | { | 622 | { |
623 | struct amdgpu_device *adev = ring->adev; | 623 | struct amdgpu_device *adev = ring->adev; |
624 | struct amdgpu_ib ib; | 624 | struct amdgpu_ib ib; |
625 | struct fence *f = NULL; | 625 | struct dma_fence *f = NULL; |
626 | unsigned index; | 626 | unsigned index; |
627 | u32 tmp = 0; | 627 | u32 tmp = 0; |
628 | u64 gpu_addr; | 628 | u64 gpu_addr; |
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
655 | if (r) | 655 | if (r) |
656 | goto err1; | 656 | goto err1; |
657 | 657 | ||
658 | r = fence_wait_timeout(f, false, timeout); | 658 | r = dma_fence_wait_timeout(f, false, timeout); |
659 | if (r == 0) { | 659 | if (r == 0) { |
660 | DRM_ERROR("amdgpu: IB test timed out\n"); | 660 | DRM_ERROR("amdgpu: IB test timed out\n"); |
661 | r = -ETIMEDOUT; | 661 | r = -ETIMEDOUT; |
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
675 | 675 | ||
676 | err1: | 676 | err1: |
677 | amdgpu_ib_free(adev, &ib, NULL); | 677 | amdgpu_ib_free(adev, &ib, NULL); |
678 | fence_put(f); | 678 | dma_fence_put(f); |
679 | err0: | 679 | err0: |
680 | amdgpu_wb_free(adev, index); | 680 | amdgpu_wb_free(adev, index); |
681 | return r; | 681 | return r; |
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
848 | amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ | 848 | amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ |
849 | } | 849 | } |
850 | 850 | ||
851 | static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
852 | { | ||
853 | return | ||
854 | 7 + 4; /* cik_sdma_ring_emit_ib */ | ||
855 | } | ||
856 | |||
857 | static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
858 | { | ||
859 | return | ||
860 | 6 + /* cik_sdma_ring_emit_hdp_flush */ | ||
861 | 3 + /* cik_sdma_ring_emit_hdp_invalidate */ | ||
862 | 6 + /* cik_sdma_ring_emit_pipeline_sync */ | ||
863 | 12 + /* cik_sdma_ring_emit_vm_flush */ | ||
864 | 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ | ||
865 | } | ||
866 | |||
867 | static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, | 851 | static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, |
868 | bool enable) | 852 | bool enable) |
869 | { | 853 | { |
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle) | |||
959 | ring->ring_obj = NULL; | 943 | ring->ring_obj = NULL; |
960 | sprintf(ring->name, "sdma%d", i); | 944 | sprintf(ring->name, "sdma%d", i); |
961 | r = amdgpu_ring_init(adev, ring, 1024, | 945 | r = amdgpu_ring_init(adev, ring, 1024, |
962 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | ||
963 | &adev->sdma.trap_irq, | 946 | &adev->sdma.trap_irq, |
964 | (i == 0) ? | 947 | (i == 0) ? |
965 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 948 | AMDGPU_SDMA_IRQ_TRAP0 : |
966 | AMDGPU_RING_TYPE_SDMA); | 949 | AMDGPU_SDMA_IRQ_TRAP1); |
967 | if (r) | 950 | if (r) |
968 | return r; | 951 | return r; |
969 | } | 952 | } |
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle, | |||
1207 | return 0; | 1190 | return 0; |
1208 | } | 1191 | } |
1209 | 1192 | ||
1210 | const struct amd_ip_funcs cik_sdma_ip_funcs = { | 1193 | static const struct amd_ip_funcs cik_sdma_ip_funcs = { |
1211 | .name = "cik_sdma", | 1194 | .name = "cik_sdma", |
1212 | .early_init = cik_sdma_early_init, | 1195 | .early_init = cik_sdma_early_init, |
1213 | .late_init = NULL, | 1196 | .late_init = NULL, |
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = { | |||
1225 | }; | 1208 | }; |
1226 | 1209 | ||
1227 | static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | 1210 | static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { |
1211 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1212 | .align_mask = 0xf, | ||
1213 | .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), | ||
1228 | .get_rptr = cik_sdma_ring_get_rptr, | 1214 | .get_rptr = cik_sdma_ring_get_rptr, |
1229 | .get_wptr = cik_sdma_ring_get_wptr, | 1215 | .get_wptr = cik_sdma_ring_get_wptr, |
1230 | .set_wptr = cik_sdma_ring_set_wptr, | 1216 | .set_wptr = cik_sdma_ring_set_wptr, |
1231 | .parse_cs = NULL, | 1217 | .emit_frame_size = |
1218 | 6 + /* cik_sdma_ring_emit_hdp_flush */ | ||
1219 | 3 + /* cik_sdma_ring_emit_hdp_invalidate */ | ||
1220 | 6 + /* cik_sdma_ring_emit_pipeline_sync */ | ||
1221 | 12 + /* cik_sdma_ring_emit_vm_flush */ | ||
1222 | 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ | ||
1223 | .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ | ||
1232 | .emit_ib = cik_sdma_ring_emit_ib, | 1224 | .emit_ib = cik_sdma_ring_emit_ib, |
1233 | .emit_fence = cik_sdma_ring_emit_fence, | 1225 | .emit_fence = cik_sdma_ring_emit_fence, |
1234 | .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, | 1226 | .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, |
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | |||
1239 | .test_ib = cik_sdma_ring_test_ib, | 1231 | .test_ib = cik_sdma_ring_test_ib, |
1240 | .insert_nop = cik_sdma_ring_insert_nop, | 1232 | .insert_nop = cik_sdma_ring_insert_nop, |
1241 | .pad_ib = cik_sdma_ring_pad_ib, | 1233 | .pad_ib = cik_sdma_ring_pad_ib, |
1242 | .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size, | ||
1243 | .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size, | ||
1244 | }; | 1234 | }; |
1245 | 1235 | ||
1246 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) | 1236 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1352 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1342 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1353 | } | 1343 | } |
1354 | } | 1344 | } |
1345 | |||
1346 | const struct amdgpu_ip_block_version cik_sdma_ip_block = | ||
1347 | { | ||
1348 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1349 | .major = 2, | ||
1350 | .minor = 0, | ||
1351 | .rev = 0, | ||
1352 | .funcs = &cik_sdma_ip_funcs, | ||
1353 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h index 027727c677b8..a4a8fe01410b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CIK_SDMA_H__ | 24 | #ifndef __CIK_SDMA_H__ |
25 | #define __CIK_SDMA_H__ | 25 | #define __CIK_SDMA_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_sdma_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cik_sdma_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 8659852aea9e..6cbd913fd12e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h | |||
@@ -43,6 +43,14 @@ | |||
43 | #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) | 43 | #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) |
44 | #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) | 44 | #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) |
45 | 45 | ||
46 | /* hpd instance offsets */ | ||
47 | #define HPD0_REGISTER_OFFSET (0x1807 - 0x1807) | ||
48 | #define HPD1_REGISTER_OFFSET (0x180a - 0x1807) | ||
49 | #define HPD2_REGISTER_OFFSET (0x180d - 0x1807) | ||
50 | #define HPD3_REGISTER_OFFSET (0x1810 - 0x1807) | ||
51 | #define HPD4_REGISTER_OFFSET (0x1813 - 0x1807) | ||
52 | #define HPD5_REGISTER_OFFSET (0x1816 - 0x1807) | ||
53 | |||
46 | #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 | 54 | #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 |
47 | #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 | 55 | #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 |
48 | 56 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 3c082e143730..352b5fad5a06 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev, | |||
1250 | 1250 | ||
1251 | pi->current_ps = *ps; | 1251 | pi->current_ps = *ps; |
1252 | pi->current_rps = *rps; | 1252 | pi->current_rps = *rps; |
1253 | pi->current_rps.ps_priv = ps; | 1253 | pi->current_rps.ps_priv = &pi->current_ps; |
1254 | adev->pm.dpm.current_ps = &pi->current_rps; | ||
1254 | 1255 | ||
1255 | } | 1256 | } |
1256 | 1257 | ||
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev, | |||
1262 | 1263 | ||
1263 | pi->requested_ps = *ps; | 1264 | pi->requested_ps = *ps; |
1264 | pi->requested_rps = *rps; | 1265 | pi->requested_rps = *rps; |
1265 | pi->requested_rps.ps_priv = ps; | 1266 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1267 | adev->pm.dpm.requested_ps = &pi->requested_rps; | ||
1266 | 1268 | ||
1267 | } | 1269 | } |
1268 | 1270 | ||
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | |||
2257 | } | 2259 | } |
2258 | } | 2260 | } |
2259 | 2261 | ||
2262 | static int cz_check_state_equal(struct amdgpu_device *adev, | ||
2263 | struct amdgpu_ps *cps, | ||
2264 | struct amdgpu_ps *rps, | ||
2265 | bool *equal) | ||
2266 | { | ||
2267 | if (equal == NULL) | ||
2268 | return -EINVAL; | ||
2269 | |||
2270 | *equal = false; | ||
2271 | return 0; | ||
2272 | } | ||
2273 | |||
2260 | const struct amd_ip_funcs cz_dpm_ip_funcs = { | 2274 | const struct amd_ip_funcs cz_dpm_ip_funcs = { |
2261 | .name = "cz_dpm", | 2275 | .name = "cz_dpm", |
2262 | .early_init = cz_dpm_early_init, | 2276 | .early_init = cz_dpm_early_init, |
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = { | |||
2289 | .vblank_too_short = NULL, | 2303 | .vblank_too_short = NULL, |
2290 | .powergate_uvd = cz_dpm_powergate_uvd, | 2304 | .powergate_uvd = cz_dpm_powergate_uvd, |
2291 | .powergate_vce = cz_dpm_powergate_vce, | 2305 | .powergate_vce = cz_dpm_powergate_vce, |
2306 | .check_state_equal = cz_check_state_equal, | ||
2292 | }; | 2307 | }; |
2293 | 2308 | ||
2294 | static void cz_dpm_set_funcs(struct amdgpu_device *adev) | 2309 | static void cz_dpm_set_funcs(struct amdgpu_device *adev) |
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev) | |||
2296 | if (NULL == adev->pm.funcs) | 2311 | if (NULL == adev->pm.funcs) |
2297 | adev->pm.funcs = &cz_dpm_funcs; | 2312 | adev->pm.funcs = &cz_dpm_funcs; |
2298 | } | 2313 | } |
2314 | |||
2315 | const struct amdgpu_ip_block_version cz_dpm_ip_block = | ||
2316 | { | ||
2317 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2318 | .major = 8, | ||
2319 | .minor = 0, | ||
2320 | .rev = 0, | ||
2321 | .funcs = &cz_dpm_ip_funcs, | ||
2322 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 3d23a70b6432..fe7cbb24da7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c | |||
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle, | |||
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | 396 | ||
397 | const struct amd_ip_funcs cz_ih_ip_funcs = { | 397 | static const struct amd_ip_funcs cz_ih_ip_funcs = { |
398 | .name = "cz_ih", | 398 | .name = "cz_ih", |
399 | .early_init = cz_ih_early_init, | 399 | .early_init = cz_ih_early_init, |
400 | .late_init = NULL, | 400 | .late_init = NULL, |
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
423 | adev->irq.ih_funcs = &cz_ih_funcs; | 423 | adev->irq.ih_funcs = &cz_ih_funcs; |
424 | } | 424 | } |
425 | 425 | ||
426 | const struct amdgpu_ip_block_version cz_ih_ip_block = | ||
427 | { | ||
428 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
429 | .major = 3, | ||
430 | .minor = 0, | ||
431 | .rev = 0, | ||
432 | .funcs = &cz_ih_ip_funcs, | ||
433 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h index fc4057a2ecb9..14be7753221b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CZ_IH_H__ | 24 | #ifndef __CZ_IH_H__ |
25 | #define __CZ_IH_H__ | 25 | #define __CZ_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cz_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cz_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __CZ_IH_H__ */ | 29 | #endif /* __CZ_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4108c686aa7c..199d3f7235d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v10_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_10_0_d.h" | 36 | #include "dce/dce_10_0_d.h" |
36 | #include "dce/dce_10_0_sh_mask.h" | 37 | #include "dce/dce_10_0_sh_mask.h" |
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |||
330 | static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, | 331 | static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, |
331 | enum amdgpu_hpd_id hpd) | 332 | enum amdgpu_hpd_id hpd) |
332 | { | 333 | { |
333 | int idx; | ||
334 | bool connected = false; | 334 | bool connected = false; |
335 | 335 | ||
336 | switch (hpd) { | 336 | if (hpd >= adev->mode_info.num_hpd) |
337 | case AMDGPU_HPD_1: | ||
338 | idx = 0; | ||
339 | break; | ||
340 | case AMDGPU_HPD_2: | ||
341 | idx = 1; | ||
342 | break; | ||
343 | case AMDGPU_HPD_3: | ||
344 | idx = 2; | ||
345 | break; | ||
346 | case AMDGPU_HPD_4: | ||
347 | idx = 3; | ||
348 | break; | ||
349 | case AMDGPU_HPD_5: | ||
350 | idx = 4; | ||
351 | break; | ||
352 | case AMDGPU_HPD_6: | ||
353 | idx = 5; | ||
354 | break; | ||
355 | default: | ||
356 | return connected; | 337 | return connected; |
357 | } | ||
358 | 338 | ||
359 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | 339 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & |
360 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | 340 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) |
361 | connected = true; | 341 | connected = true; |
362 | 342 | ||
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
376 | { | 356 | { |
377 | u32 tmp; | 357 | u32 tmp; |
378 | bool connected = dce_v10_0_hpd_sense(adev, hpd); | 358 | bool connected = dce_v10_0_hpd_sense(adev, hpd); |
379 | int idx; | ||
380 | 359 | ||
381 | switch (hpd) { | 360 | if (hpd >= adev->mode_info.num_hpd) |
382 | case AMDGPU_HPD_1: | ||
383 | idx = 0; | ||
384 | break; | ||
385 | case AMDGPU_HPD_2: | ||
386 | idx = 1; | ||
387 | break; | ||
388 | case AMDGPU_HPD_3: | ||
389 | idx = 2; | ||
390 | break; | ||
391 | case AMDGPU_HPD_4: | ||
392 | idx = 3; | ||
393 | break; | ||
394 | case AMDGPU_HPD_5: | ||
395 | idx = 4; | ||
396 | break; | ||
397 | case AMDGPU_HPD_6: | ||
398 | idx = 5; | ||
399 | break; | ||
400 | default: | ||
401 | return; | 361 | return; |
402 | } | ||
403 | 362 | ||
404 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 363 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); |
405 | if (connected) | 364 | if (connected) |
406 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | 365 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); |
407 | else | 366 | else |
408 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | 367 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); |
409 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 368 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); |
410 | } | 369 | } |
411 | 370 | ||
412 | /** | 371 | /** |
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
422 | struct drm_device *dev = adev->ddev; | 381 | struct drm_device *dev = adev->ddev; |
423 | struct drm_connector *connector; | 382 | struct drm_connector *connector; |
424 | u32 tmp; | 383 | u32 tmp; |
425 | int idx; | ||
426 | 384 | ||
427 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 385 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
428 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 386 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
429 | 387 | ||
430 | switch (amdgpu_connector->hpd.hpd) { | 388 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
431 | case AMDGPU_HPD_1: | ||
432 | idx = 0; | ||
433 | break; | ||
434 | case AMDGPU_HPD_2: | ||
435 | idx = 1; | ||
436 | break; | ||
437 | case AMDGPU_HPD_3: | ||
438 | idx = 2; | ||
439 | break; | ||
440 | case AMDGPU_HPD_4: | ||
441 | idx = 3; | ||
442 | break; | ||
443 | case AMDGPU_HPD_5: | ||
444 | idx = 4; | ||
445 | break; | ||
446 | case AMDGPU_HPD_6: | ||
447 | idx = 5; | ||
448 | break; | ||
449 | default: | ||
450 | continue; | 389 | continue; |
451 | } | ||
452 | 390 | ||
453 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 391 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
454 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 392 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
457 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 395 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
458 | * also avoid interrupt storms during dpms. | 396 | * also avoid interrupt storms during dpms. |
459 | */ | 397 | */ |
460 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 398 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
461 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | 399 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
462 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 400 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
463 | continue; | 401 | continue; |
464 | } | 402 | } |
465 | 403 | ||
466 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 404 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
467 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 405 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
468 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 406 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
469 | 407 | ||
470 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | 408 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
471 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 409 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
472 | DC_HPD_CONNECT_INT_DELAY, | 410 | DC_HPD_CONNECT_INT_DELAY, |
473 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | 411 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); |
474 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 412 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
475 | DC_HPD_DISCONNECT_INT_DELAY, | 413 | DC_HPD_DISCONNECT_INT_DELAY, |
476 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | 414 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); |
477 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | 415 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
478 | 416 | ||
479 | dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 417 | dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
480 | amdgpu_irq_get(adev, &adev->hpd_irq, | 418 | amdgpu_irq_get(adev, &adev->hpd_irq, |
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) | |||
495 | struct drm_device *dev = adev->ddev; | 433 | struct drm_device *dev = adev->ddev; |
496 | struct drm_connector *connector; | 434 | struct drm_connector *connector; |
497 | u32 tmp; | 435 | u32 tmp; |
498 | int idx; | ||
499 | 436 | ||
500 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 437 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
501 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 438 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
502 | 439 | ||
503 | switch (amdgpu_connector->hpd.hpd) { | 440 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
504 | case AMDGPU_HPD_1: | ||
505 | idx = 0; | ||
506 | break; | ||
507 | case AMDGPU_HPD_2: | ||
508 | idx = 1; | ||
509 | break; | ||
510 | case AMDGPU_HPD_3: | ||
511 | idx = 2; | ||
512 | break; | ||
513 | case AMDGPU_HPD_4: | ||
514 | idx = 3; | ||
515 | break; | ||
516 | case AMDGPU_HPD_5: | ||
517 | idx = 4; | ||
518 | break; | ||
519 | case AMDGPU_HPD_6: | ||
520 | idx = 5; | ||
521 | break; | ||
522 | default: | ||
523 | continue; | 441 | continue; |
524 | } | ||
525 | 442 | ||
526 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 443 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
527 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | 444 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); |
528 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 445 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
529 | 446 | ||
530 | amdgpu_irq_put(adev, &adev->hpd_irq, | 447 | amdgpu_irq_put(adev, &adev->hpd_irq, |
531 | amdgpu_connector->hpd.hpd); | 448 | amdgpu_connector->hpd.hpd); |
@@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle, | |||
3554 | return 0; | 3471 | return 0; |
3555 | } | 3472 | } |
3556 | 3473 | ||
3557 | const struct amd_ip_funcs dce_v10_0_ip_funcs = { | 3474 | static const struct amd_ip_funcs dce_v10_0_ip_funcs = { |
3558 | .name = "dce_v10_0", | 3475 | .name = "dce_v10_0", |
3559 | .early_init = dce_v10_0_early_init, | 3476 | .early_init = dce_v10_0_early_init, |
3560 | .late_init = NULL, | 3477 | .late_init = NULL, |
@@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3885 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3802 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3886 | adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; | 3803 | adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; |
3887 | } | 3804 | } |
3805 | |||
3806 | const struct amdgpu_ip_block_version dce_v10_0_ip_block = | ||
3807 | { | ||
3808 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3809 | .major = 10, | ||
3810 | .minor = 0, | ||
3811 | .rev = 0, | ||
3812 | .funcs = &dce_v10_0_ip_funcs, | ||
3813 | }; | ||
3814 | |||
3815 | const struct amdgpu_ip_block_version dce_v10_1_ip_block = | ||
3816 | { | ||
3817 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3818 | .major = 10, | ||
3819 | .minor = 1, | ||
3820 | .rev = 0, | ||
3821 | .funcs = &dce_v10_0_ip_funcs, | ||
3822 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index e3dc04d293e4..7a0747789f1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | |||
@@ -24,7 +24,9 @@ | |||
24 | #ifndef __DCE_V10_0_H__ | 24 | #ifndef __DCE_V10_0_H__ |
25 | #define __DCE_V10_0_H__ | 25 | #define __DCE_V10_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v10_0_ip_funcs; | 27 | |
28 | extern const struct amdgpu_ip_block_version dce_v10_0_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version dce_v10_1_ip_block; | ||
28 | 30 | ||
29 | void dce_v10_0_disable_dce(struct amdgpu_device *adev); | 31 | void dce_v10_0_disable_dce(struct amdgpu_device *adev); |
30 | 32 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f264b8f17ad1..ecd000e35981 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v11_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_11_0_d.h" | 36 | #include "dce/dce_11_0_d.h" |
36 | #include "dce/dce_11_0_sh_mask.h" | 37 | #include "dce/dce_11_0_sh_mask.h" |
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |||
346 | static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, | 347 | static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, |
347 | enum amdgpu_hpd_id hpd) | 348 | enum amdgpu_hpd_id hpd) |
348 | { | 349 | { |
349 | int idx; | ||
350 | bool connected = false; | 350 | bool connected = false; |
351 | 351 | ||
352 | switch (hpd) { | 352 | if (hpd >= adev->mode_info.num_hpd) |
353 | case AMDGPU_HPD_1: | ||
354 | idx = 0; | ||
355 | break; | ||
356 | case AMDGPU_HPD_2: | ||
357 | idx = 1; | ||
358 | break; | ||
359 | case AMDGPU_HPD_3: | ||
360 | idx = 2; | ||
361 | break; | ||
362 | case AMDGPU_HPD_4: | ||
363 | idx = 3; | ||
364 | break; | ||
365 | case AMDGPU_HPD_5: | ||
366 | idx = 4; | ||
367 | break; | ||
368 | case AMDGPU_HPD_6: | ||
369 | idx = 5; | ||
370 | break; | ||
371 | default: | ||
372 | return connected; | 353 | return connected; |
373 | } | ||
374 | 354 | ||
375 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | 355 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & |
376 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | 356 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) |
377 | connected = true; | 357 | connected = true; |
378 | 358 | ||
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
392 | { | 372 | { |
393 | u32 tmp; | 373 | u32 tmp; |
394 | bool connected = dce_v11_0_hpd_sense(adev, hpd); | 374 | bool connected = dce_v11_0_hpd_sense(adev, hpd); |
395 | int idx; | ||
396 | 375 | ||
397 | switch (hpd) { | 376 | if (hpd >= adev->mode_info.num_hpd) |
398 | case AMDGPU_HPD_1: | ||
399 | idx = 0; | ||
400 | break; | ||
401 | case AMDGPU_HPD_2: | ||
402 | idx = 1; | ||
403 | break; | ||
404 | case AMDGPU_HPD_3: | ||
405 | idx = 2; | ||
406 | break; | ||
407 | case AMDGPU_HPD_4: | ||
408 | idx = 3; | ||
409 | break; | ||
410 | case AMDGPU_HPD_5: | ||
411 | idx = 4; | ||
412 | break; | ||
413 | case AMDGPU_HPD_6: | ||
414 | idx = 5; | ||
415 | break; | ||
416 | default: | ||
417 | return; | 377 | return; |
418 | } | ||
419 | 378 | ||
420 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 379 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); |
421 | if (connected) | 380 | if (connected) |
422 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | 381 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); |
423 | else | 382 | else |
424 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | 383 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); |
425 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 384 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); |
426 | } | 385 | } |
427 | 386 | ||
428 | /** | 387 | /** |
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
438 | struct drm_device *dev = adev->ddev; | 397 | struct drm_device *dev = adev->ddev; |
439 | struct drm_connector *connector; | 398 | struct drm_connector *connector; |
440 | u32 tmp; | 399 | u32 tmp; |
441 | int idx; | ||
442 | 400 | ||
443 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
444 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 402 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
445 | 403 | ||
446 | switch (amdgpu_connector->hpd.hpd) { | 404 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
447 | case AMDGPU_HPD_1: | ||
448 | idx = 0; | ||
449 | break; | ||
450 | case AMDGPU_HPD_2: | ||
451 | idx = 1; | ||
452 | break; | ||
453 | case AMDGPU_HPD_3: | ||
454 | idx = 2; | ||
455 | break; | ||
456 | case AMDGPU_HPD_4: | ||
457 | idx = 3; | ||
458 | break; | ||
459 | case AMDGPU_HPD_5: | ||
460 | idx = 4; | ||
461 | break; | ||
462 | case AMDGPU_HPD_6: | ||
463 | idx = 5; | ||
464 | break; | ||
465 | default: | ||
466 | continue; | 405 | continue; |
467 | } | ||
468 | 406 | ||
469 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 407 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
470 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 408 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
473 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 411 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
474 | * also avoid interrupt storms during dpms. | 412 | * also avoid interrupt storms during dpms. |
475 | */ | 413 | */ |
476 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 414 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
477 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | 415 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
478 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 416 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
479 | continue; | 417 | continue; |
480 | } | 418 | } |
481 | 419 | ||
482 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 420 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
483 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 421 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
484 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 422 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
485 | 423 | ||
486 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | 424 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
487 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 425 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
488 | DC_HPD_CONNECT_INT_DELAY, | 426 | DC_HPD_CONNECT_INT_DELAY, |
489 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | 427 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); |
490 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 428 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
491 | DC_HPD_DISCONNECT_INT_DELAY, | 429 | DC_HPD_DISCONNECT_INT_DELAY, |
492 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | 430 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); |
493 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | 431 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
494 | 432 | ||
495 | dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 433 | dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
496 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 434 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) | |||
510 | struct drm_device *dev = adev->ddev; | 448 | struct drm_device *dev = adev->ddev; |
511 | struct drm_connector *connector; | 449 | struct drm_connector *connector; |
512 | u32 tmp; | 450 | u32 tmp; |
513 | int idx; | ||
514 | 451 | ||
515 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 452 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
516 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 453 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
517 | 454 | ||
518 | switch (amdgpu_connector->hpd.hpd) { | 455 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
519 | case AMDGPU_HPD_1: | ||
520 | idx = 0; | ||
521 | break; | ||
522 | case AMDGPU_HPD_2: | ||
523 | idx = 1; | ||
524 | break; | ||
525 | case AMDGPU_HPD_3: | ||
526 | idx = 2; | ||
527 | break; | ||
528 | case AMDGPU_HPD_4: | ||
529 | idx = 3; | ||
530 | break; | ||
531 | case AMDGPU_HPD_5: | ||
532 | idx = 4; | ||
533 | break; | ||
534 | case AMDGPU_HPD_6: | ||
535 | idx = 5; | ||
536 | break; | ||
537 | default: | ||
538 | continue; | 456 | continue; |
539 | } | ||
540 | 457 | ||
541 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 458 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
542 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | 459 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); |
543 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 460 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
544 | 461 | ||
545 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 462 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
546 | } | 463 | } |
@@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle, | |||
3611 | return 0; | 3528 | return 0; |
3612 | } | 3529 | } |
3613 | 3530 | ||
3614 | const struct amd_ip_funcs dce_v11_0_ip_funcs = { | 3531 | static const struct amd_ip_funcs dce_v11_0_ip_funcs = { |
3615 | .name = "dce_v11_0", | 3532 | .name = "dce_v11_0", |
3616 | .early_init = dce_v11_0_early_init, | 3533 | .early_init = dce_v11_0_early_init, |
3617 | .late_init = NULL, | 3534 | .late_init = NULL, |
@@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3941 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3858 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3942 | adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; | 3859 | adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; |
3943 | } | 3860 | } |
3861 | |||
3862 | const struct amdgpu_ip_block_version dce_v11_0_ip_block = | ||
3863 | { | ||
3864 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3865 | .major = 11, | ||
3866 | .minor = 0, | ||
3867 | .rev = 0, | ||
3868 | .funcs = &dce_v11_0_ip_funcs, | ||
3869 | }; | ||
3870 | |||
3871 | const struct amdgpu_ip_block_version dce_v11_2_ip_block = | ||
3872 | { | ||
3873 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3874 | .major = 11, | ||
3875 | .minor = 2, | ||
3876 | .rev = 0, | ||
3877 | .funcs = &dce_v11_0_ip_funcs, | ||
3878 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 1f58a65ba2ef..0d878ca3acba 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | |||
@@ -24,7 +24,8 @@ | |||
24 | #ifndef __DCE_V11_0_H__ | 24 | #ifndef __DCE_V11_0_H__ |
25 | #define __DCE_V11_0_H__ | 25 | #define __DCE_V11_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v11_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v11_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v11_2_ip_block; | ||
28 | 29 | ||
29 | void dce_v11_0_disable_dce(struct amdgpu_device *adev); | 30 | void dce_v11_0_disable_dce(struct amdgpu_device *adev); |
30 | 31 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b948d6cb1399..44547f951d92 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] = | |||
46 | SI_CRTC5_REGISTER_OFFSET | 46 | SI_CRTC5_REGISTER_OFFSET |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static const u32 hpd_offsets[] = | ||
50 | { | ||
51 | DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS, | ||
52 | DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS, | ||
53 | DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS, | ||
54 | DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS, | ||
55 | DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS, | ||
56 | DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS, | ||
57 | }; | ||
58 | |||
49 | static const uint32_t dig_offsets[] = { | 59 | static const uint32_t dig_offsets[] = { |
50 | SI_CRTC0_REGISTER_OFFSET, | 60 | SI_CRTC0_REGISTER_OFFSET, |
51 | SI_CRTC1_REGISTER_OFFSET, | 61 | SI_CRTC1_REGISTER_OFFSET, |
@@ -94,15 +104,6 @@ static const struct { | |||
94 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | 104 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK |
95 | } }; | 105 | } }; |
96 | 106 | ||
97 | static const uint32_t hpd_int_control_offsets[6] = { | ||
98 | DC_HPD1_INT_CONTROL, | ||
99 | DC_HPD2_INT_CONTROL, | ||
100 | DC_HPD3_INT_CONTROL, | ||
101 | DC_HPD4_INT_CONTROL, | ||
102 | DC_HPD5_INT_CONTROL, | ||
103 | DC_HPD6_INT_CONTROL, | ||
104 | }; | ||
105 | |||
106 | static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, | 107 | static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, |
107 | u32 block_offset, u32 reg) | 108 | u32 block_offset, u32 reg) |
108 | { | 109 | { |
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, | |||
257 | { | 258 | { |
258 | bool connected = false; | 259 | bool connected = false; |
259 | 260 | ||
260 | switch (hpd) { | 261 | if (hpd >= adev->mode_info.num_hpd) |
261 | case AMDGPU_HPD_1: | 262 | return connected; |
262 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | 263 | |
263 | connected = true; | 264 | if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE) |
264 | break; | 265 | connected = true; |
265 | case AMDGPU_HPD_2: | ||
266 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | ||
267 | connected = true; | ||
268 | break; | ||
269 | case AMDGPU_HPD_3: | ||
270 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | ||
271 | connected = true; | ||
272 | break; | ||
273 | case AMDGPU_HPD_4: | ||
274 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | ||
275 | connected = true; | ||
276 | break; | ||
277 | case AMDGPU_HPD_5: | ||
278 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | ||
279 | connected = true; | ||
280 | break; | ||
281 | case AMDGPU_HPD_6: | ||
282 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | ||
283 | connected = true; | ||
284 | break; | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | 266 | ||
289 | return connected; | 267 | return connected; |
290 | } | 268 | } |
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
303 | u32 tmp; | 281 | u32 tmp; |
304 | bool connected = dce_v6_0_hpd_sense(adev, hpd); | 282 | bool connected = dce_v6_0_hpd_sense(adev, hpd); |
305 | 283 | ||
306 | switch (hpd) { | 284 | if (hpd >= adev->mode_info.num_hpd) |
307 | case AMDGPU_HPD_1: | 285 | return; |
308 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 286 | |
309 | if (connected) | 287 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
310 | tmp &= ~DC_HPDx_INT_POLARITY; | 288 | if (connected) |
311 | else | 289 | tmp &= ~DC_HPDx_INT_POLARITY; |
312 | tmp |= DC_HPDx_INT_POLARITY; | 290 | else |
313 | WREG32(DC_HPD1_INT_CONTROL, tmp); | 291 | tmp |= DC_HPDx_INT_POLARITY; |
314 | break; | 292 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
315 | case AMDGPU_HPD_2: | ||
316 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
317 | if (connected) | ||
318 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
319 | else | ||
320 | tmp |= DC_HPDx_INT_POLARITY; | ||
321 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
322 | break; | ||
323 | case AMDGPU_HPD_3: | ||
324 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
325 | if (connected) | ||
326 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
327 | else | ||
328 | tmp |= DC_HPDx_INT_POLARITY; | ||
329 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
330 | break; | ||
331 | case AMDGPU_HPD_4: | ||
332 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
333 | if (connected) | ||
334 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
335 | else | ||
336 | tmp |= DC_HPDx_INT_POLARITY; | ||
337 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
338 | break; | ||
339 | case AMDGPU_HPD_5: | ||
340 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
341 | if (connected) | ||
342 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
343 | else | ||
344 | tmp |= DC_HPDx_INT_POLARITY; | ||
345 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
346 | break; | ||
347 | case AMDGPU_HPD_6: | ||
348 | tmp = RREG32(DC_HPD6_INT_CONTROL); | ||
349 | if (connected) | ||
350 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
351 | else | ||
352 | tmp |= DC_HPDx_INT_POLARITY; | ||
353 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
354 | break; | ||
355 | default: | ||
356 | break; | ||
357 | } | ||
358 | } | 293 | } |
359 | 294 | ||
360 | /** | 295 | /** |
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
369 | { | 304 | { |
370 | struct drm_device *dev = adev->ddev; | 305 | struct drm_device *dev = adev->ddev; |
371 | struct drm_connector *connector; | 306 | struct drm_connector *connector; |
372 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | | 307 | u32 tmp; |
373 | DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; | ||
374 | 308 | ||
375 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 309 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
376 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 310 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
377 | 311 | ||
378 | switch (amdgpu_connector->hpd.hpd) { | 312 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
379 | case AMDGPU_HPD_1: | 313 | continue; |
380 | WREG32(DC_HPD1_CONTROL, tmp); | 314 | |
381 | break; | 315 | tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
382 | case AMDGPU_HPD_2: | 316 | tmp |= DC_HPDx_EN; |
383 | WREG32(DC_HPD2_CONTROL, tmp); | 317 | WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
384 | break; | ||
385 | case AMDGPU_HPD_3: | ||
386 | WREG32(DC_HPD3_CONTROL, tmp); | ||
387 | break; | ||
388 | case AMDGPU_HPD_4: | ||
389 | WREG32(DC_HPD4_CONTROL, tmp); | ||
390 | break; | ||
391 | case AMDGPU_HPD_5: | ||
392 | WREG32(DC_HPD5_CONTROL, tmp); | ||
393 | break; | ||
394 | case AMDGPU_HPD_6: | ||
395 | WREG32(DC_HPD6_CONTROL, tmp); | ||
396 | break; | ||
397 | default: | ||
398 | break; | ||
399 | } | ||
400 | 318 | ||
401 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 319 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
402 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 320 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
405 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 323 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
406 | * also avoid interrupt storms during dpms. | 324 | * also avoid interrupt storms during dpms. |
407 | */ | 325 | */ |
408 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 326 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
409 | 327 | tmp &= ~DC_HPDx_INT_EN; | |
410 | switch (amdgpu_connector->hpd.hpd) { | 328 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
411 | case AMDGPU_HPD_1: | ||
412 | dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; | ||
413 | break; | ||
414 | case AMDGPU_HPD_2: | ||
415 | dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; | ||
416 | break; | ||
417 | case AMDGPU_HPD_3: | ||
418 | dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; | ||
419 | break; | ||
420 | case AMDGPU_HPD_4: | ||
421 | dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; | ||
422 | break; | ||
423 | case AMDGPU_HPD_5: | ||
424 | dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; | ||
425 | break; | ||
426 | case AMDGPU_HPD_6: | ||
427 | dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; | ||
428 | break; | ||
429 | default: | ||
430 | continue; | ||
431 | } | ||
432 | |||
433 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
434 | dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; | ||
435 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
436 | continue; | 329 | continue; |
437 | } | 330 | } |
438 | 331 | ||
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) | |||
454 | { | 347 | { |
455 | struct drm_device *dev = adev->ddev; | 348 | struct drm_device *dev = adev->ddev; |
456 | struct drm_connector *connector; | 349 | struct drm_connector *connector; |
350 | u32 tmp; | ||
457 | 351 | ||
458 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 352 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
459 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 353 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
460 | 354 | ||
461 | switch (amdgpu_connector->hpd.hpd) { | 355 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
462 | case AMDGPU_HPD_1: | 356 | continue; |
463 | WREG32(DC_HPD1_CONTROL, 0); | 357 | |
464 | break; | 358 | tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
465 | case AMDGPU_HPD_2: | 359 | tmp &= ~DC_HPDx_EN; |
466 | WREG32(DC_HPD2_CONTROL, 0); | 360 | WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); |
467 | break; | 361 | |
468 | case AMDGPU_HPD_3: | ||
469 | WREG32(DC_HPD3_CONTROL, 0); | ||
470 | break; | ||
471 | case AMDGPU_HPD_4: | ||
472 | WREG32(DC_HPD4_CONTROL, 0); | ||
473 | break; | ||
474 | case AMDGPU_HPD_5: | ||
475 | WREG32(DC_HPD5_CONTROL, 0); | ||
476 | break; | ||
477 | case AMDGPU_HPD_6: | ||
478 | WREG32(DC_HPD6_CONTROL, 0); | ||
479 | break; | ||
480 | default: | ||
481 | break; | ||
482 | } | ||
483 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 362 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
484 | } | 363 | } |
485 | } | 364 | } |
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, | |||
611 | static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, | 490 | static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, |
612 | bool render) | 491 | bool render) |
613 | { | 492 | { |
614 | if (!render) | 493 | if (!render) |
615 | WREG32(R_000300_VGA_RENDER_CONTROL, | 494 | WREG32(R_000300_VGA_RENDER_CONTROL, |
616 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); | 495 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); |
617 | 496 | ||
618 | } | 497 | } |
619 | 498 | ||
499 | static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) | ||
500 | { | ||
501 | int num_crtc = 0; | ||
502 | |||
503 | switch (adev->asic_type) { | ||
504 | case CHIP_TAHITI: | ||
505 | case CHIP_PITCAIRN: | ||
506 | case CHIP_VERDE: | ||
507 | num_crtc = 6; | ||
508 | break; | ||
509 | case CHIP_OLAND: | ||
510 | num_crtc = 2; | ||
511 | break; | ||
512 | default: | ||
513 | num_crtc = 0; | ||
514 | } | ||
515 | return num_crtc; | ||
516 | } | ||
517 | |||
518 | void dce_v6_0_disable_dce(struct amdgpu_device *adev) | ||
519 | { | ||
520 | /*Disable VGA render and enabled crtc, if has DCE engine*/ | ||
521 | if (amdgpu_atombios_has_dce_engine_info(adev)) { | ||
522 | u32 tmp; | ||
523 | int crtc_enabled, i; | ||
524 | |||
525 | dce_v6_0_set_vga_render_state(adev, false); | ||
526 | |||
527 | /*Disable crtc*/ | ||
528 | for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { | ||
529 | crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & | ||
530 | EVERGREEN_CRTC_MASTER_EN; | ||
531 | if (crtc_enabled) { | ||
532 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
533 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); | ||
534 | tmp &= ~EVERGREEN_CRTC_MASTER_EN; | ||
535 | WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); | ||
536 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
537 | } | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | |||
620 | static void dce_v6_0_program_fmt(struct drm_encoder *encoder) | 542 | static void dce_v6_0_program_fmt(struct drm_encoder *encoder) |
621 | { | 543 | { |
622 | 544 | ||
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle) | |||
2338 | dce_v6_0_set_display_funcs(adev); | 2260 | dce_v6_0_set_display_funcs(adev); |
2339 | dce_v6_0_set_irq_funcs(adev); | 2261 | dce_v6_0_set_irq_funcs(adev); |
2340 | 2262 | ||
2263 | adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev); | ||
2264 | |||
2341 | switch (adev->asic_type) { | 2265 | switch (adev->asic_type) { |
2342 | case CHIP_TAHITI: | 2266 | case CHIP_TAHITI: |
2343 | case CHIP_PITCAIRN: | 2267 | case CHIP_PITCAIRN: |
2344 | case CHIP_VERDE: | 2268 | case CHIP_VERDE: |
2345 | adev->mode_info.num_crtc = 6; | ||
2346 | adev->mode_info.num_hpd = 6; | 2269 | adev->mode_info.num_hpd = 6; |
2347 | adev->mode_info.num_dig = 6; | 2270 | adev->mode_info.num_dig = 6; |
2348 | break; | 2271 | break; |
2349 | case CHIP_OLAND: | 2272 | case CHIP_OLAND: |
2350 | adev->mode_info.num_crtc = 2; | ||
2351 | adev->mode_info.num_hpd = 2; | 2273 | adev->mode_info.num_hpd = 2; |
2352 | adev->mode_info.num_dig = 2; | 2274 | adev->mode_info.num_dig = 2; |
2353 | break; | 2275 | break; |
2354 | default: | 2276 | default: |
2355 | /* FIXME: not supported yet */ | ||
2356 | return -EINVAL; | 2277 | return -EINVAL; |
2357 | } | 2278 | } |
2358 | 2279 | ||
@@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, | |||
2588 | unsigned type, | 2509 | unsigned type, |
2589 | enum amdgpu_interrupt_state state) | 2510 | enum amdgpu_interrupt_state state) |
2590 | { | 2511 | { |
2591 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 2512 | u32 dc_hpd_int_cntl; |
2592 | 2513 | ||
2593 | switch (type) { | 2514 | if (type >= adev->mode_info.num_hpd) { |
2594 | case AMDGPU_HPD_1: | ||
2595 | dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; | ||
2596 | break; | ||
2597 | case AMDGPU_HPD_2: | ||
2598 | dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; | ||
2599 | break; | ||
2600 | case AMDGPU_HPD_3: | ||
2601 | dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; | ||
2602 | break; | ||
2603 | case AMDGPU_HPD_4: | ||
2604 | dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; | ||
2605 | break; | ||
2606 | case AMDGPU_HPD_5: | ||
2607 | dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; | ||
2608 | break; | ||
2609 | case AMDGPU_HPD_6: | ||
2610 | dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; | ||
2611 | break; | ||
2612 | default: | ||
2613 | DRM_DEBUG("invalid hdp %d\n", type); | 2515 | DRM_DEBUG("invalid hdp %d\n", type); |
2614 | return 0; | 2516 | return 0; |
2615 | } | 2517 | } |
2616 | 2518 | ||
2617 | switch (state) { | 2519 | switch (state) { |
2618 | case AMDGPU_IRQ_STATE_DISABLE: | 2520 | case AMDGPU_IRQ_STATE_DISABLE: |
2619 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 2521 | dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); |
2620 | dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); | 2522 | dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; |
2621 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 2523 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
2622 | break; | 2524 | break; |
2623 | case AMDGPU_IRQ_STATE_ENABLE: | 2525 | case AMDGPU_IRQ_STATE_ENABLE: |
2624 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 2526 | dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); |
2625 | dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); | 2527 | dc_hpd_int_cntl |= DC_HPDx_INT_EN; |
2626 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 2528 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
2627 | break; | 2529 | break; |
2628 | default: | 2530 | default: |
2629 | break; | 2531 | break; |
@@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, | |||
2796 | struct amdgpu_irq_src *source, | 2698 | struct amdgpu_irq_src *source, |
2797 | struct amdgpu_iv_entry *entry) | 2699 | struct amdgpu_iv_entry *entry) |
2798 | { | 2700 | { |
2799 | uint32_t disp_int, mask, int_control, tmp; | 2701 | uint32_t disp_int, mask, tmp; |
2800 | unsigned hpd; | 2702 | unsigned hpd; |
2801 | 2703 | ||
2802 | if (entry->src_data >= adev->mode_info.num_hpd) { | 2704 | if (entry->src_data >= adev->mode_info.num_hpd) { |
@@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, | |||
2807 | hpd = entry->src_data; | 2709 | hpd = entry->src_data; |
2808 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | 2710 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); |
2809 | mask = interrupt_status_offsets[hpd].hpd; | 2711 | mask = interrupt_status_offsets[hpd].hpd; |
2810 | int_control = hpd_int_control_offsets[hpd]; | ||
2811 | 2712 | ||
2812 | if (disp_int & mask) { | 2713 | if (disp_int & mask) { |
2813 | tmp = RREG32(int_control); | 2714 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
2814 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; | 2715 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; |
2815 | WREG32(int_control, tmp); | 2716 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
2816 | schedule_work(&adev->hotplug_work); | 2717 | schedule_work(&adev->hotplug_work); |
2817 | DRM_INFO("IH: HPD%d\n", hpd + 1); | 2718 | DRM_INFO("IH: HPD%d\n", hpd + 1); |
2818 | } | 2719 | } |
@@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle, | |||
2833 | return 0; | 2734 | return 0; |
2834 | } | 2735 | } |
2835 | 2736 | ||
2836 | const struct amd_ip_funcs dce_v6_0_ip_funcs = { | 2737 | static const struct amd_ip_funcs dce_v6_0_ip_funcs = { |
2837 | .name = "dce_v6_0", | 2738 | .name = "dce_v6_0", |
2838 | .early_init = dce_v6_0_early_init, | 2739 | .early_init = dce_v6_0_early_init, |
2839 | .late_init = NULL, | 2740 | .late_init = NULL, |
@@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3174 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3075 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3175 | adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; | 3076 | adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; |
3176 | } | 3077 | } |
3078 | |||
3079 | const struct amdgpu_ip_block_version dce_v6_0_ip_block = | ||
3080 | { | ||
3081 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3082 | .major = 6, | ||
3083 | .minor = 0, | ||
3084 | .rev = 0, | ||
3085 | .funcs = &dce_v6_0_ip_funcs, | ||
3086 | }; | ||
3087 | |||
3088 | const struct amdgpu_ip_block_version dce_v6_4_ip_block = | ||
3089 | { | ||
3090 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3091 | .major = 6, | ||
3092 | .minor = 4, | ||
3093 | .rev = 0, | ||
3094 | .funcs = &dce_v6_0_ip_funcs, | ||
3095 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h index 6a5528105bb6..7b546b596de1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #ifndef __DCE_V6_0_H__ | 24 | #ifndef __DCE_V6_0_H__ |
25 | #define __DCE_V6_0_H__ | 25 | #define __DCE_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v6_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v6_4_ip_block; | ||
29 | |||
30 | void dce_v6_0_disable_dce(struct amdgpu_device *adev); | ||
28 | 31 | ||
29 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 5966166ec94c..979aedf4b74d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v8_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_8_0_d.h" | 36 | #include "dce/dce_8_0_d.h" |
36 | #include "dce/dce_8_0_sh_mask.h" | 37 | #include "dce/dce_8_0_sh_mask.h" |
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] = | |||
56 | CRTC5_REGISTER_OFFSET | 57 | CRTC5_REGISTER_OFFSET |
57 | }; | 58 | }; |
58 | 59 | ||
60 | static const u32 hpd_offsets[] = | ||
61 | { | ||
62 | HPD0_REGISTER_OFFSET, | ||
63 | HPD1_REGISTER_OFFSET, | ||
64 | HPD2_REGISTER_OFFSET, | ||
65 | HPD3_REGISTER_OFFSET, | ||
66 | HPD4_REGISTER_OFFSET, | ||
67 | HPD5_REGISTER_OFFSET | ||
68 | }; | ||
69 | |||
59 | static const uint32_t dig_offsets[] = { | 70 | static const uint32_t dig_offsets[] = { |
60 | CRTC0_REGISTER_OFFSET, | 71 | CRTC0_REGISTER_OFFSET, |
61 | CRTC1_REGISTER_OFFSET, | 72 | CRTC1_REGISTER_OFFSET, |
@@ -104,15 +115,6 @@ static const struct { | |||
104 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | 115 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK |
105 | } }; | 116 | } }; |
106 | 117 | ||
107 | static const uint32_t hpd_int_control_offsets[6] = { | ||
108 | mmDC_HPD1_INT_CONTROL, | ||
109 | mmDC_HPD2_INT_CONTROL, | ||
110 | mmDC_HPD3_INT_CONTROL, | ||
111 | mmDC_HPD4_INT_CONTROL, | ||
112 | mmDC_HPD5_INT_CONTROL, | ||
113 | mmDC_HPD6_INT_CONTROL, | ||
114 | }; | ||
115 | |||
116 | static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, | 118 | static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, |
117 | u32 block_offset, u32 reg) | 119 | u32 block_offset, u32 reg) |
118 | { | 120 | { |
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, | |||
278 | { | 280 | { |
279 | bool connected = false; | 281 | bool connected = false; |
280 | 282 | ||
281 | switch (hpd) { | 283 | if (hpd >= adev->mode_info.num_hpd) |
282 | case AMDGPU_HPD_1: | 284 | return connected; |
283 | if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) | 285 | |
284 | connected = true; | 286 | if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & |
285 | break; | 287 | DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) |
286 | case AMDGPU_HPD_2: | 288 | connected = true; |
287 | if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK) | ||
288 | connected = true; | ||
289 | break; | ||
290 | case AMDGPU_HPD_3: | ||
291 | if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK) | ||
292 | connected = true; | ||
293 | break; | ||
294 | case AMDGPU_HPD_4: | ||
295 | if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK) | ||
296 | connected = true; | ||
297 | break; | ||
298 | case AMDGPU_HPD_5: | ||
299 | if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK) | ||
300 | connected = true; | ||
301 | break; | ||
302 | case AMDGPU_HPD_6: | ||
303 | if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK) | ||
304 | connected = true; | ||
305 | break; | ||
306 | default: | ||
307 | break; | ||
308 | } | ||
309 | 289 | ||
310 | return connected; | 290 | return connected; |
311 | } | 291 | } |
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
324 | u32 tmp; | 304 | u32 tmp; |
325 | bool connected = dce_v8_0_hpd_sense(adev, hpd); | 305 | bool connected = dce_v8_0_hpd_sense(adev, hpd); |
326 | 306 | ||
327 | switch (hpd) { | 307 | if (hpd >= adev->mode_info.num_hpd) |
328 | case AMDGPU_HPD_1: | 308 | return; |
329 | tmp = RREG32(mmDC_HPD1_INT_CONTROL); | 309 | |
330 | if (connected) | 310 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
331 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; | 311 | if (connected) |
332 | else | 312 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; |
333 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; | 313 | else |
334 | WREG32(mmDC_HPD1_INT_CONTROL, tmp); | 314 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; |
335 | break; | 315 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
336 | case AMDGPU_HPD_2: | ||
337 | tmp = RREG32(mmDC_HPD2_INT_CONTROL); | ||
338 | if (connected) | ||
339 | tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; | ||
340 | else | ||
341 | tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; | ||
342 | WREG32(mmDC_HPD2_INT_CONTROL, tmp); | ||
343 | break; | ||
344 | case AMDGPU_HPD_3: | ||
345 | tmp = RREG32(mmDC_HPD3_INT_CONTROL); | ||
346 | if (connected) | ||
347 | tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; | ||
348 | else | ||
349 | tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; | ||
350 | WREG32(mmDC_HPD3_INT_CONTROL, tmp); | ||
351 | break; | ||
352 | case AMDGPU_HPD_4: | ||
353 | tmp = RREG32(mmDC_HPD4_INT_CONTROL); | ||
354 | if (connected) | ||
355 | tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; | ||
356 | else | ||
357 | tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; | ||
358 | WREG32(mmDC_HPD4_INT_CONTROL, tmp); | ||
359 | break; | ||
360 | case AMDGPU_HPD_5: | ||
361 | tmp = RREG32(mmDC_HPD5_INT_CONTROL); | ||
362 | if (connected) | ||
363 | tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; | ||
364 | else | ||
365 | tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; | ||
366 | WREG32(mmDC_HPD5_INT_CONTROL, tmp); | ||
367 | break; | ||
368 | case AMDGPU_HPD_6: | ||
369 | tmp = RREG32(mmDC_HPD6_INT_CONTROL); | ||
370 | if (connected) | ||
371 | tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; | ||
372 | else | ||
373 | tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; | ||
374 | WREG32(mmDC_HPD6_INT_CONTROL, tmp); | ||
375 | break; | ||
376 | default: | ||
377 | break; | ||
378 | } | ||
379 | } | 316 | } |
380 | 317 | ||
381 | /** | 318 | /** |
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
390 | { | 327 | { |
391 | struct drm_device *dev = adev->ddev; | 328 | struct drm_device *dev = adev->ddev; |
392 | struct drm_connector *connector; | 329 | struct drm_connector *connector; |
393 | u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | | 330 | u32 tmp; |
394 | (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) | | ||
395 | DC_HPD1_CONTROL__DC_HPD1_EN_MASK; | ||
396 | 331 | ||
397 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 332 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
398 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 333 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
399 | 334 | ||
400 | switch (amdgpu_connector->hpd.hpd) { | 335 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
401 | case AMDGPU_HPD_1: | 336 | continue; |
402 | WREG32(mmDC_HPD1_CONTROL, tmp); | 337 | |
403 | break; | 338 | tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
404 | case AMDGPU_HPD_2: | 339 | tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; |
405 | WREG32(mmDC_HPD2_CONTROL, tmp); | 340 | WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
406 | break; | ||
407 | case AMDGPU_HPD_3: | ||
408 | WREG32(mmDC_HPD3_CONTROL, tmp); | ||
409 | break; | ||
410 | case AMDGPU_HPD_4: | ||
411 | WREG32(mmDC_HPD4_CONTROL, tmp); | ||
412 | break; | ||
413 | case AMDGPU_HPD_5: | ||
414 | WREG32(mmDC_HPD5_CONTROL, tmp); | ||
415 | break; | ||
416 | case AMDGPU_HPD_6: | ||
417 | WREG32(mmDC_HPD6_CONTROL, tmp); | ||
418 | break; | ||
419 | default: | ||
420 | break; | ||
421 | } | ||
422 | 341 | ||
423 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 342 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
424 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 343 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
427 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 346 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
428 | * also avoid interrupt storms during dpms. | 347 | * also avoid interrupt storms during dpms. |
429 | */ | 348 | */ |
430 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 349 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
431 | 350 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | |
432 | switch (amdgpu_connector->hpd.hpd) { | 351 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
433 | case AMDGPU_HPD_1: | ||
434 | dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; | ||
435 | break; | ||
436 | case AMDGPU_HPD_2: | ||
437 | dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; | ||
438 | break; | ||
439 | case AMDGPU_HPD_3: | ||
440 | dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; | ||
441 | break; | ||
442 | case AMDGPU_HPD_4: | ||
443 | dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; | ||
444 | break; | ||
445 | case AMDGPU_HPD_5: | ||
446 | dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; | ||
447 | break; | ||
448 | case AMDGPU_HPD_6: | ||
449 | dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; | ||
450 | break; | ||
451 | default: | ||
452 | continue; | ||
453 | } | ||
454 | |||
455 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
456 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | ||
457 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
458 | continue; | 352 | continue; |
459 | } | 353 | } |
460 | 354 | ||
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) | |||
475 | { | 369 | { |
476 | struct drm_device *dev = adev->ddev; | 370 | struct drm_device *dev = adev->ddev; |
477 | struct drm_connector *connector; | 371 | struct drm_connector *connector; |
372 | u32 tmp; | ||
478 | 373 | ||
479 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 374 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
480 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 375 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
481 | 376 | ||
482 | switch (amdgpu_connector->hpd.hpd) { | 377 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
483 | case AMDGPU_HPD_1: | 378 | continue; |
484 | WREG32(mmDC_HPD1_CONTROL, 0); | 379 | |
485 | break; | 380 | tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
486 | case AMDGPU_HPD_2: | 381 | tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; |
487 | WREG32(mmDC_HPD2_CONTROL, 0); | 382 | WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); |
488 | break; | 383 | |
489 | case AMDGPU_HPD_3: | ||
490 | WREG32(mmDC_HPD3_CONTROL, 0); | ||
491 | break; | ||
492 | case AMDGPU_HPD_4: | ||
493 | WREG32(mmDC_HPD4_CONTROL, 0); | ||
494 | break; | ||
495 | case AMDGPU_HPD_5: | ||
496 | WREG32(mmDC_HPD5_CONTROL, 0); | ||
497 | break; | ||
498 | case AMDGPU_HPD_6: | ||
499 | WREG32(mmDC_HPD6_CONTROL, 0); | ||
500 | break; | ||
501 | default: | ||
502 | break; | ||
503 | } | ||
504 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 384 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
505 | } | 385 | } |
506 | } | 386 | } |
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, | |||
3204 | unsigned type, | 3084 | unsigned type, |
3205 | enum amdgpu_interrupt_state state) | 3085 | enum amdgpu_interrupt_state state) |
3206 | { | 3086 | { |
3207 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 3087 | u32 dc_hpd_int_cntl; |
3208 | 3088 | ||
3209 | switch (type) { | 3089 | if (type >= adev->mode_info.num_hpd) { |
3210 | case AMDGPU_HPD_1: | ||
3211 | dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; | ||
3212 | break; | ||
3213 | case AMDGPU_HPD_2: | ||
3214 | dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; | ||
3215 | break; | ||
3216 | case AMDGPU_HPD_3: | ||
3217 | dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; | ||
3218 | break; | ||
3219 | case AMDGPU_HPD_4: | ||
3220 | dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; | ||
3221 | break; | ||
3222 | case AMDGPU_HPD_5: | ||
3223 | dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; | ||
3224 | break; | ||
3225 | case AMDGPU_HPD_6: | ||
3226 | dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; | ||
3227 | break; | ||
3228 | default: | ||
3229 | DRM_DEBUG("invalid hdp %d\n", type); | 3090 | DRM_DEBUG("invalid hdp %d\n", type); |
3230 | return 0; | 3091 | return 0; |
3231 | } | 3092 | } |
3232 | 3093 | ||
3233 | switch (state) { | 3094 | switch (state) { |
3234 | case AMDGPU_IRQ_STATE_DISABLE: | 3095 | case AMDGPU_IRQ_STATE_DISABLE: |
3235 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 3096 | dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); |
3236 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | 3097 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; |
3237 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 3098 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
3238 | break; | 3099 | break; |
3239 | case AMDGPU_IRQ_STATE_ENABLE: | 3100 | case AMDGPU_IRQ_STATE_ENABLE: |
3240 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 3101 | dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); |
3241 | dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | 3102 | dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; |
3242 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 3103 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
3243 | break; | 3104 | break; |
3244 | default: | 3105 | default: |
3245 | break; | 3106 | break; |
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, | |||
3412 | struct amdgpu_irq_src *source, | 3273 | struct amdgpu_irq_src *source, |
3413 | struct amdgpu_iv_entry *entry) | 3274 | struct amdgpu_iv_entry *entry) |
3414 | { | 3275 | { |
3415 | uint32_t disp_int, mask, int_control, tmp; | 3276 | uint32_t disp_int, mask, tmp; |
3416 | unsigned hpd; | 3277 | unsigned hpd; |
3417 | 3278 | ||
3418 | if (entry->src_data >= adev->mode_info.num_hpd) { | 3279 | if (entry->src_data >= adev->mode_info.num_hpd) { |
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, | |||
3423 | hpd = entry->src_data; | 3284 | hpd = entry->src_data; |
3424 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | 3285 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); |
3425 | mask = interrupt_status_offsets[hpd].hpd; | 3286 | mask = interrupt_status_offsets[hpd].hpd; |
3426 | int_control = hpd_int_control_offsets[hpd]; | ||
3427 | 3287 | ||
3428 | if (disp_int & mask) { | 3288 | if (disp_int & mask) { |
3429 | tmp = RREG32(int_control); | 3289 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
3430 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; | 3290 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; |
3431 | WREG32(int_control, tmp); | 3291 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
3432 | schedule_work(&adev->hotplug_work); | 3292 | schedule_work(&adev->hotplug_work); |
3433 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); | 3293 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); |
3434 | } | 3294 | } |
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle, | |||
3449 | return 0; | 3309 | return 0; |
3450 | } | 3310 | } |
3451 | 3311 | ||
3452 | const struct amd_ip_funcs dce_v8_0_ip_funcs = { | 3312 | static const struct amd_ip_funcs dce_v8_0_ip_funcs = { |
3453 | .name = "dce_v8_0", | 3313 | .name = "dce_v8_0", |
3454 | .early_init = dce_v8_0_early_init, | 3314 | .early_init = dce_v8_0_early_init, |
3455 | .late_init = NULL, | 3315 | .late_init = NULL, |
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3779 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3639 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3780 | adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; | 3640 | adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; |
3781 | } | 3641 | } |
3642 | |||
3643 | const struct amdgpu_ip_block_version dce_v8_0_ip_block = | ||
3644 | { | ||
3645 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3646 | .major = 8, | ||
3647 | .minor = 0, | ||
3648 | .rev = 0, | ||
3649 | .funcs = &dce_v8_0_ip_funcs, | ||
3650 | }; | ||
3651 | |||
3652 | const struct amdgpu_ip_block_version dce_v8_1_ip_block = | ||
3653 | { | ||
3654 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3655 | .major = 8, | ||
3656 | .minor = 1, | ||
3657 | .rev = 0, | ||
3658 | .funcs = &dce_v8_0_ip_funcs, | ||
3659 | }; | ||
3660 | |||
3661 | const struct amdgpu_ip_block_version dce_v8_2_ip_block = | ||
3662 | { | ||
3663 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3664 | .major = 8, | ||
3665 | .minor = 2, | ||
3666 | .rev = 0, | ||
3667 | .funcs = &dce_v8_0_ip_funcs, | ||
3668 | }; | ||
3669 | |||
3670 | const struct amdgpu_ip_block_version dce_v8_3_ip_block = | ||
3671 | { | ||
3672 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3673 | .major = 8, | ||
3674 | .minor = 3, | ||
3675 | .rev = 0, | ||
3676 | .funcs = &dce_v8_0_ip_funcs, | ||
3677 | }; | ||
3678 | |||
3679 | const struct amdgpu_ip_block_version dce_v8_5_ip_block = | ||
3680 | { | ||
3681 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3682 | .major = 8, | ||
3683 | .minor = 5, | ||
3684 | .rev = 0, | ||
3685 | .funcs = &dce_v8_0_ip_funcs, | ||
3686 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 7d0770c3a49b..13b802dd946a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h | |||
@@ -24,7 +24,11 @@ | |||
24 | #ifndef __DCE_V8_0_H__ | 24 | #ifndef __DCE_V8_0_H__ |
25 | #define __DCE_V8_0_H__ | 25 | #define __DCE_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v8_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version dce_v8_2_ip_block; | ||
30 | extern const struct amdgpu_ip_block_version dce_v8_3_ip_block; | ||
31 | extern const struct amdgpu_ip_block_version dce_v8_5_ip_block; | ||
28 | 32 | ||
29 | void dce_v8_0_disable_dce(struct amdgpu_device *adev); | 33 | void dce_v8_0_disable_dce(struct amdgpu_device *adev); |
30 | 34 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index c2bd9f045532..cc85676a68d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include "atom.h" | 27 | #include "atom.h" |
28 | #include "amdgpu_pll.h" | 28 | #include "amdgpu_pll.h" |
29 | #include "amdgpu_connectors.h" | 29 | #include "amdgpu_connectors.h" |
30 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
31 | #include "dce_v6_0.h" | ||
32 | #endif | ||
30 | #ifdef CONFIG_DRM_AMDGPU_CIK | 33 | #ifdef CONFIG_DRM_AMDGPU_CIK |
31 | #include "dce_v8_0.h" | 34 | #include "dce_v8_0.h" |
32 | #endif | 35 | #endif |
@@ -34,11 +37,13 @@ | |||
34 | #include "dce_v11_0.h" | 37 | #include "dce_v11_0.h" |
35 | #include "dce_virtual.h" | 38 | #include "dce_virtual.h" |
36 | 39 | ||
40 | #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 | ||
41 | |||
42 | |||
37 | static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); | 43 | static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); |
38 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); | 44 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); |
39 | static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | 45 | static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, |
40 | struct amdgpu_irq_src *source, | 46 | int index); |
41 | struct amdgpu_iv_entry *entry); | ||
42 | 47 | ||
43 | /** | 48 | /** |
44 | * dce_virtual_vblank_wait - vblank wait asic callback. | 49 | * dce_virtual_vblank_wait - vblank wait asic callback. |
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, | |||
99 | struct amdgpu_mode_mc_save *save) | 104 | struct amdgpu_mode_mc_save *save) |
100 | { | 105 | { |
101 | switch (adev->asic_type) { | 106 | switch (adev->asic_type) { |
107 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
108 | case CHIP_TAHITI: | ||
109 | case CHIP_PITCAIRN: | ||
110 | case CHIP_VERDE: | ||
111 | case CHIP_OLAND: | ||
112 | dce_v6_0_disable_dce(adev); | ||
113 | break; | ||
114 | #endif | ||
102 | #ifdef CONFIG_DRM_AMDGPU_CIK | 115 | #ifdef CONFIG_DRM_AMDGPU_CIK |
103 | case CHIP_BONAIRE: | 116 | case CHIP_BONAIRE: |
104 | case CHIP_HAWAII: | 117 | case CHIP_HAWAII: |
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, | |||
119 | dce_v11_0_disable_dce(adev); | 132 | dce_v11_0_disable_dce(adev); |
120 | break; | 133 | break; |
121 | case CHIP_TOPAZ: | 134 | case CHIP_TOPAZ: |
135 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
136 | case CHIP_HAINAN: | ||
137 | #endif | ||
122 | /* no DCE */ | 138 | /* no DCE */ |
123 | return; | 139 | return; |
124 | default: | 140 | default: |
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
195 | switch (mode) { | 211 | switch (mode) { |
196 | case DRM_MODE_DPMS_ON: | 212 | case DRM_MODE_DPMS_ON: |
197 | amdgpu_crtc->enabled = true; | 213 | amdgpu_crtc->enabled = true; |
198 | /* Make sure VBLANK and PFLIP interrupts are still enabled */ | 214 | /* Make sure VBLANK interrupts are still enabled */ |
199 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | 215 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); |
200 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | 216 | amdgpu_irq_update(adev, &adev->crtc_irq, type); |
201 | amdgpu_irq_update(adev, &adev->pageflip_irq, type); | ||
202 | drm_vblank_on(dev, amdgpu_crtc->crtc_id); | 217 | drm_vblank_on(dev, amdgpu_crtc->crtc_id); |
203 | break; | 218 | break; |
204 | case DRM_MODE_DPMS_STANDBY: | 219 | case DRM_MODE_DPMS_STANDBY: |
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, | |||
264 | const struct drm_display_mode *mode, | 279 | const struct drm_display_mode *mode, |
265 | struct drm_display_mode *adjusted_mode) | 280 | struct drm_display_mode *adjusted_mode) |
266 | { | 281 | { |
267 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
268 | struct drm_device *dev = crtc->dev; | ||
269 | struct drm_encoder *encoder; | ||
270 | |||
271 | /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ | ||
272 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
273 | if (encoder->crtc == crtc) { | ||
274 | amdgpu_crtc->encoder = encoder; | ||
275 | amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { | ||
280 | amdgpu_crtc->encoder = NULL; | ||
281 | amdgpu_crtc->connector = NULL; | ||
282 | return false; | ||
283 | } | ||
284 | |||
285 | return true; | 282 | return true; |
286 | } | 283 | } |
287 | 284 | ||
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) | |||
341 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | 338 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; |
342 | amdgpu_crtc->encoder = NULL; | 339 | amdgpu_crtc->encoder = NULL; |
343 | amdgpu_crtc->connector = NULL; | 340 | amdgpu_crtc->connector = NULL; |
341 | amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; | ||
344 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); | 342 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); |
345 | 343 | ||
346 | return 0; | 344 | return 0; |
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle) | |||
350 | { | 348 | { |
351 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 349 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
352 | 350 | ||
353 | adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; | ||
354 | dce_virtual_set_display_funcs(adev); | 351 | dce_virtual_set_display_funcs(adev); |
355 | dce_virtual_set_irq_funcs(adev); | 352 | dce_virtual_set_irq_funcs(adev); |
356 | 353 | ||
357 | adev->mode_info.num_crtc = 1; | ||
358 | adev->mode_info.num_hpd = 1; | 354 | adev->mode_info.num_hpd = 1; |
359 | adev->mode_info.num_dig = 1; | 355 | adev->mode_info.num_dig = 1; |
360 | return 0; | 356 | return 0; |
361 | } | 357 | } |
362 | 358 | ||
363 | static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) | 359 | static struct drm_encoder * |
360 | dce_virtual_encoder(struct drm_connector *connector) | ||
364 | { | 361 | { |
365 | struct amdgpu_i2c_bus_rec ddc_bus; | 362 | int enc_id = connector->encoder_ids[0]; |
366 | struct amdgpu_router router; | 363 | struct drm_encoder *encoder; |
367 | struct amdgpu_hpd hpd; | 364 | int i; |
368 | 365 | ||
369 | /* look up gpio for ddc, hpd */ | 366 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
370 | ddc_bus.valid = false; | 367 | if (connector->encoder_ids[i] == 0) |
371 | hpd.hpd = AMDGPU_HPD_NONE; | 368 | break; |
372 | /* needed for aux chan transactions */ | ||
373 | ddc_bus.hpd = hpd.hpd; | ||
374 | 369 | ||
375 | memset(&router, 0, sizeof(router)); | 370 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); |
376 | router.ddc_valid = false; | 371 | if (!encoder) |
377 | router.cd_valid = false; | 372 | continue; |
378 | amdgpu_display_add_connector(adev, | ||
379 | 0, | ||
380 | ATOM_DEVICE_CRT1_SUPPORT, | ||
381 | DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, | ||
382 | CONNECTOR_OBJECT_ID_VIRTUAL, | ||
383 | &hpd, | ||
384 | &router); | ||
385 | 373 | ||
386 | amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, | 374 | if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) |
387 | ATOM_DEVICE_CRT1_SUPPORT, | 375 | return encoder; |
388 | 0); | 376 | } |
389 | 377 | ||
390 | amdgpu_link_encoder_connector(adev->ddev); | 378 | /* pick the first one */ |
379 | if (enc_id) | ||
380 | return drm_encoder_find(connector->dev, enc_id); | ||
381 | return NULL; | ||
382 | } | ||
383 | |||
384 | static int dce_virtual_get_modes(struct drm_connector *connector) | ||
385 | { | ||
386 | struct drm_device *dev = connector->dev; | ||
387 | struct drm_display_mode *mode = NULL; | ||
388 | unsigned i; | ||
389 | static const struct mode_size { | ||
390 | int w; | ||
391 | int h; | ||
392 | } common_modes[17] = { | ||
393 | { 640, 480}, | ||
394 | { 720, 480}, | ||
395 | { 800, 600}, | ||
396 | { 848, 480}, | ||
397 | {1024, 768}, | ||
398 | {1152, 768}, | ||
399 | {1280, 720}, | ||
400 | {1280, 800}, | ||
401 | {1280, 854}, | ||
402 | {1280, 960}, | ||
403 | {1280, 1024}, | ||
404 | {1440, 900}, | ||
405 | {1400, 1050}, | ||
406 | {1680, 1050}, | ||
407 | {1600, 1200}, | ||
408 | {1920, 1080}, | ||
409 | {1920, 1200} | ||
410 | }; | ||
411 | |||
412 | for (i = 0; i < 17; i++) { | ||
413 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); | ||
414 | drm_mode_probed_add(connector, mode); | ||
415 | } | ||
391 | 416 | ||
392 | return true; | 417 | return 0; |
418 | } | ||
419 | |||
420 | static int dce_virtual_mode_valid(struct drm_connector *connector, | ||
421 | struct drm_display_mode *mode) | ||
422 | { | ||
423 | return MODE_OK; | ||
424 | } | ||
425 | |||
426 | static int | ||
427 | dce_virtual_dpms(struct drm_connector *connector, int mode) | ||
428 | { | ||
429 | return 0; | ||
393 | } | 430 | } |
394 | 431 | ||
432 | static enum drm_connector_status | ||
433 | dce_virtual_detect(struct drm_connector *connector, bool force) | ||
434 | { | ||
435 | return connector_status_connected; | ||
436 | } | ||
437 | |||
438 | static int | ||
439 | dce_virtual_set_property(struct drm_connector *connector, | ||
440 | struct drm_property *property, | ||
441 | uint64_t val) | ||
442 | { | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static void dce_virtual_destroy(struct drm_connector *connector) | ||
447 | { | ||
448 | drm_connector_unregister(connector); | ||
449 | drm_connector_cleanup(connector); | ||
450 | kfree(connector); | ||
451 | } | ||
452 | |||
453 | static void dce_virtual_force(struct drm_connector *connector) | ||
454 | { | ||
455 | return; | ||
456 | } | ||
457 | |||
458 | static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = { | ||
459 | .get_modes = dce_virtual_get_modes, | ||
460 | .mode_valid = dce_virtual_mode_valid, | ||
461 | .best_encoder = dce_virtual_encoder, | ||
462 | }; | ||
463 | |||
464 | static const struct drm_connector_funcs dce_virtual_connector_funcs = { | ||
465 | .dpms = dce_virtual_dpms, | ||
466 | .detect = dce_virtual_detect, | ||
467 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
468 | .set_property = dce_virtual_set_property, | ||
469 | .destroy = dce_virtual_destroy, | ||
470 | .force = dce_virtual_force, | ||
471 | }; | ||
472 | |||
395 | static int dce_virtual_sw_init(void *handle) | 473 | static int dce_virtual_sw_init(void *handle) |
396 | { | 474 | { |
397 | int r, i; | 475 | int r, i; |
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle) | |||
420 | adev->ddev->mode_config.max_width = 16384; | 498 | adev->ddev->mode_config.max_width = 16384; |
421 | adev->ddev->mode_config.max_height = 16384; | 499 | adev->ddev->mode_config.max_height = 16384; |
422 | 500 | ||
423 | /* allocate crtcs */ | 501 | /* allocate crtcs, encoders, connectors */ |
424 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | 502 | for (i = 0; i < adev->mode_info.num_crtc; i++) { |
425 | r = dce_virtual_crtc_init(adev, i); | 503 | r = dce_virtual_crtc_init(adev, i); |
426 | if (r) | 504 | if (r) |
427 | return r; | 505 | return r; |
506 | r = dce_virtual_connector_encoder_init(adev, i); | ||
507 | if (r) | ||
508 | return r; | ||
428 | } | 509 | } |
429 | 510 | ||
430 | dce_virtual_get_connector_info(adev); | ||
431 | amdgpu_print_display_setup(adev->ddev); | ||
432 | |||
433 | drm_kms_helper_poll_init(adev->ddev); | 511 | drm_kms_helper_poll_init(adev->ddev); |
434 | 512 | ||
435 | adev->mode_info.mode_config_initialized = true; | 513 | adev->mode_info.mode_config_initialized = true; |
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle, | |||
496 | return 0; | 574 | return 0; |
497 | } | 575 | } |
498 | 576 | ||
499 | const struct amd_ip_funcs dce_virtual_ip_funcs = { | 577 | static const struct amd_ip_funcs dce_virtual_ip_funcs = { |
500 | .name = "dce_virtual", | 578 | .name = "dce_virtual", |
501 | .early_init = dce_virtual_early_init, | 579 | .early_init = dce_virtual_early_init, |
502 | .late_init = NULL, | 580 | .late_init = NULL, |
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder) | |||
526 | 604 | ||
527 | static void | 605 | static void |
528 | dce_virtual_encoder_mode_set(struct drm_encoder *encoder, | 606 | dce_virtual_encoder_mode_set(struct drm_encoder *encoder, |
529 | struct drm_display_mode *mode, | 607 | struct drm_display_mode *mode, |
530 | struct drm_display_mode *adjusted_mode) | 608 | struct drm_display_mode *adjusted_mode) |
531 | { | 609 | { |
532 | return; | 610 | return; |
533 | } | 611 | } |
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, | |||
547 | const struct drm_display_mode *mode, | 625 | const struct drm_display_mode *mode, |
548 | struct drm_display_mode *adjusted_mode) | 626 | struct drm_display_mode *adjusted_mode) |
549 | { | 627 | { |
550 | |||
551 | /* set the active encoder to connector routing */ | ||
552 | amdgpu_encoder_set_active_device(encoder); | ||
553 | |||
554 | return true; | 628 | return true; |
555 | } | 629 | } |
556 | 630 | ||
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { | |||
576 | .destroy = dce_virtual_encoder_destroy, | 650 | .destroy = dce_virtual_encoder_destroy, |
577 | }; | 651 | }; |
578 | 652 | ||
579 | static void dce_virtual_encoder_add(struct amdgpu_device *adev, | 653 | static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, |
580 | uint32_t encoder_enum, | 654 | int index) |
581 | uint32_t supported_device, | ||
582 | u16 caps) | ||
583 | { | 655 | { |
584 | struct drm_device *dev = adev->ddev; | ||
585 | struct drm_encoder *encoder; | 656 | struct drm_encoder *encoder; |
586 | struct amdgpu_encoder *amdgpu_encoder; | 657 | struct drm_connector *connector; |
587 | 658 | ||
588 | /* see if we already added it */ | 659 | /* add a new encoder */ |
589 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 660 | encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL); |
590 | amdgpu_encoder = to_amdgpu_encoder(encoder); | 661 | if (!encoder) |
591 | if (amdgpu_encoder->encoder_enum == encoder_enum) { | 662 | return -ENOMEM; |
592 | amdgpu_encoder->devices |= supported_device; | 663 | encoder->possible_crtcs = 1 << index; |
593 | return; | 664 | drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, |
594 | } | 665 | DRM_MODE_ENCODER_VIRTUAL, NULL); |
666 | drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); | ||
595 | 667 | ||
668 | connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); | ||
669 | if (!connector) { | ||
670 | kfree(encoder); | ||
671 | return -ENOMEM; | ||
596 | } | 672 | } |
597 | 673 | ||
598 | /* add a new one */ | 674 | /* add a new connector */ |
599 | amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); | 675 | drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, |
600 | if (!amdgpu_encoder) | 676 | DRM_MODE_CONNECTOR_VIRTUAL); |
601 | return; | 677 | drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs); |
678 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
679 | connector->interlace_allowed = false; | ||
680 | connector->doublescan_allowed = false; | ||
681 | drm_connector_register(connector); | ||
602 | 682 | ||
603 | encoder = &amdgpu_encoder->base; | 683 | /* link them */ |
604 | encoder->possible_crtcs = 0x1; | 684 | drm_mode_connector_attach_encoder(connector, encoder); |
605 | amdgpu_encoder->enc_priv = NULL; | 685 | |
606 | amdgpu_encoder->encoder_enum = encoder_enum; | 686 | return 0; |
607 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
608 | amdgpu_encoder->devices = supported_device; | ||
609 | amdgpu_encoder->rmx_type = RMX_OFF; | ||
610 | amdgpu_encoder->underscan_type = UNDERSCAN_OFF; | ||
611 | amdgpu_encoder->is_ext_encoder = false; | ||
612 | amdgpu_encoder->caps = caps; | ||
613 | |||
614 | drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, | ||
615 | DRM_MODE_ENCODER_VIRTUAL, NULL); | ||
616 | drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); | ||
617 | DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); | ||
618 | } | 687 | } |
619 | 688 | ||
620 | static const struct amdgpu_display_funcs dce_virtual_display_funcs = { | 689 | static const struct amdgpu_display_funcs dce_virtual_display_funcs = { |
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { | |||
630 | .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, | 699 | .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, |
631 | .page_flip = &dce_virtual_page_flip, | 700 | .page_flip = &dce_virtual_page_flip, |
632 | .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, | 701 | .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, |
633 | .add_encoder = &dce_virtual_encoder_add, | 702 | .add_encoder = NULL, |
634 | .add_connector = &amdgpu_connector_add, | 703 | .add_connector = NULL, |
635 | .stop_mc_access = &dce_virtual_stop_mc_access, | 704 | .stop_mc_access = &dce_virtual_stop_mc_access, |
636 | .resume_mc_access = &dce_virtual_resume_mc_access, | 705 | .resume_mc_access = &dce_virtual_resume_mc_access, |
637 | }; | 706 | }; |
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) | |||
642 | adev->mode_info.funcs = &dce_virtual_display_funcs; | 711 | adev->mode_info.funcs = &dce_virtual_display_funcs; |
643 | } | 712 | } |
644 | 713 | ||
645 | static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) | 714 | static int dce_virtual_pageflip(struct amdgpu_device *adev, |
646 | { | 715 | unsigned crtc_id) |
647 | struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); | ||
648 | struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); | ||
649 | unsigned crtc = 0; | ||
650 | drm_handle_vblank(adev->ddev, crtc); | ||
651 | dce_virtual_pageflip_irq(adev, NULL, NULL); | ||
652 | hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
653 | return HRTIMER_NORESTART; | ||
654 | } | ||
655 | |||
656 | static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
657 | int crtc, | ||
658 | enum amdgpu_interrupt_state state) | ||
659 | { | ||
660 | if (crtc >= adev->mode_info.num_crtc) { | ||
661 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | if (state && !adev->mode_info.vsync_timer_enabled) { | ||
666 | DRM_DEBUG("Enable software vsync timer\n"); | ||
667 | hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
668 | hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); | ||
669 | adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; | ||
670 | hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
671 | } else if (!state && adev->mode_info.vsync_timer_enabled) { | ||
672 | DRM_DEBUG("Disable software vsync timer\n"); | ||
673 | hrtimer_cancel(&adev->mode_info.vblank_timer); | ||
674 | } | ||
675 | |||
676 | adev->mode_info.vsync_timer_enabled = state; | ||
677 | DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); | ||
678 | } | ||
679 | |||
680 | |||
681 | static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, | ||
682 | struct amdgpu_irq_src *source, | ||
683 | unsigned type, | ||
684 | enum amdgpu_interrupt_state state) | ||
685 | { | ||
686 | switch (type) { | ||
687 | case AMDGPU_CRTC_IRQ_VBLANK1: | ||
688 | dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); | ||
689 | break; | ||
690 | default: | ||
691 | break; | ||
692 | } | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, | ||
697 | int crtc) | ||
698 | { | ||
699 | if (crtc >= adev->mode_info.num_crtc) { | ||
700 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
701 | return; | ||
702 | } | ||
703 | } | ||
704 | |||
705 | static int dce_virtual_crtc_irq(struct amdgpu_device *adev, | ||
706 | struct amdgpu_irq_src *source, | ||
707 | struct amdgpu_iv_entry *entry) | ||
708 | { | ||
709 | unsigned crtc = 0; | ||
710 | unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; | ||
711 | |||
712 | dce_virtual_crtc_vblank_int_ack(adev, crtc); | ||
713 | |||
714 | if (amdgpu_irq_enabled(adev, source, irq_type)) { | ||
715 | drm_handle_vblank(adev->ddev, crtc); | ||
716 | } | ||
717 | dce_virtual_pageflip_irq(adev, NULL, NULL); | ||
718 | DRM_DEBUG("IH: D%d vblank\n", crtc + 1); | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, | ||
723 | struct amdgpu_irq_src *src, | ||
724 | unsigned type, | ||
725 | enum amdgpu_interrupt_state state) | ||
726 | { | ||
727 | if (type >= adev->mode_info.num_crtc) { | ||
728 | DRM_ERROR("invalid pageflip crtc %d\n", type); | ||
729 | return -EINVAL; | ||
730 | } | ||
731 | DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); | ||
732 | |||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | ||
737 | struct amdgpu_irq_src *source, | ||
738 | struct amdgpu_iv_entry *entry) | ||
739 | { | 716 | { |
740 | unsigned long flags; | 717 | unsigned long flags; |
741 | unsigned crtc_id = 0; | ||
742 | struct amdgpu_crtc *amdgpu_crtc; | 718 | struct amdgpu_crtc *amdgpu_crtc; |
743 | struct amdgpu_flip_work *works; | 719 | struct amdgpu_flip_work *works; |
744 | 720 | ||
745 | crtc_id = 0; | ||
746 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | 721 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; |
747 | 722 | ||
748 | if (crtc_id >= adev->mode_info.num_crtc) { | 723 | if (crtc_id >= adev->mode_info.num_crtc) { |
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | |||
781 | return 0; | 756 | return 0; |
782 | } | 757 | } |
783 | 758 | ||
759 | static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) | ||
760 | { | ||
761 | struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer, | ||
762 | struct amdgpu_crtc, vblank_timer); | ||
763 | struct drm_device *ddev = amdgpu_crtc->base.dev; | ||
764 | struct amdgpu_device *adev = ddev->dev_private; | ||
765 | |||
766 | drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); | ||
767 | dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); | ||
768 | hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), | ||
769 | HRTIMER_MODE_REL); | ||
770 | |||
771 | return HRTIMER_NORESTART; | ||
772 | } | ||
773 | |||
774 | static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
775 | int crtc, | ||
776 | enum amdgpu_interrupt_state state) | ||
777 | { | ||
778 | if (crtc >= adev->mode_info.num_crtc) { | ||
779 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
780 | return; | ||
781 | } | ||
782 | |||
783 | if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { | ||
784 | DRM_DEBUG("Enable software vsync timer\n"); | ||
785 | hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
786 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
787 | hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
788 | ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); | ||
789 | adev->mode_info.crtcs[crtc]->vblank_timer.function = | ||
790 | dce_virtual_vblank_timer_handle; | ||
791 | hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
792 | ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
793 | } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { | ||
794 | DRM_DEBUG("Disable software vsync timer\n"); | ||
795 | hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer); | ||
796 | } | ||
797 | |||
798 | adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state; | ||
799 | DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); | ||
800 | } | ||
801 | |||
802 | |||
803 | static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, | ||
804 | struct amdgpu_irq_src *source, | ||
805 | unsigned type, | ||
806 | enum amdgpu_interrupt_state state) | ||
807 | { | ||
808 | if (type > AMDGPU_CRTC_IRQ_VBLANK6) | ||
809 | return -EINVAL; | ||
810 | |||
811 | dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state); | ||
812 | |||
813 | return 0; | ||
814 | } | ||
815 | |||
784 | static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { | 816 | static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { |
785 | .set = dce_virtual_set_crtc_irq_state, | 817 | .set = dce_virtual_set_crtc_irq_state, |
786 | .process = dce_virtual_crtc_irq, | 818 | .process = NULL, |
787 | }; | ||
788 | |||
789 | static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { | ||
790 | .set = dce_virtual_set_pageflip_irq_state, | ||
791 | .process = dce_virtual_pageflip_irq, | ||
792 | }; | 819 | }; |
793 | 820 | ||
794 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) | 821 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) |
795 | { | 822 | { |
796 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; | 823 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; |
797 | adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; | 824 | adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; |
798 | |||
799 | adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; | ||
800 | adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; | ||
801 | } | 825 | } |
802 | 826 | ||
827 | const struct amdgpu_ip_block_version dce_virtual_ip_block = | ||
828 | { | ||
829 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
830 | .major = 1, | ||
831 | .minor = 0, | ||
832 | .rev = 0, | ||
833 | .funcs = &dce_virtual_ip_funcs, | ||
834 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h index e239243f6ebc..ed422012c8c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h | |||
@@ -24,8 +24,7 @@ | |||
24 | #ifndef __DCE_VIRTUAL_H__ | 24 | #ifndef __DCE_VIRTUAL_H__ |
25 | #define __DCE_VIRTUAL_H__ | 25 | #define __DCE_VIRTUAL_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_virtual_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_virtual_ip_block; |
28 | #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 | ||
29 | 28 | ||
30 | #endif | 29 | #endif |
31 | 30 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 40abb6b81c09..21c086e02e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1522 | { | 1522 | { |
1523 | struct amdgpu_device *adev = ring->adev; | 1523 | struct amdgpu_device *adev = ring->adev; |
1524 | struct amdgpu_ib ib; | 1524 | struct amdgpu_ib ib; |
1525 | struct fence *f = NULL; | 1525 | struct dma_fence *f = NULL; |
1526 | uint32_t scratch; | 1526 | uint32_t scratch; |
1527 | uint32_t tmp = 0; | 1527 | uint32_t tmp = 0; |
1528 | long r; | 1528 | long r; |
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1548 | if (r) | 1548 | if (r) |
1549 | goto err2; | 1549 | goto err2; |
1550 | 1550 | ||
1551 | r = fence_wait_timeout(f, false, timeout); | 1551 | r = dma_fence_wait_timeout(f, false, timeout); |
1552 | if (r == 0) { | 1552 | if (r == 0) { |
1553 | DRM_ERROR("amdgpu: IB test timed out\n"); | 1553 | DRM_ERROR("amdgpu: IB test timed out\n"); |
1554 | r = -ETIMEDOUT; | 1554 | r = -ETIMEDOUT; |
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1569 | 1569 | ||
1570 | err2: | 1570 | err2: |
1571 | amdgpu_ib_free(adev, &ib, NULL); | 1571 | amdgpu_ib_free(adev, &ib, NULL); |
1572 | fence_put(f); | 1572 | dma_fence_put(f); |
1573 | err1: | 1573 | err1: |
1574 | amdgpu_gfx_scratch_free(adev, scratch); | 1574 | amdgpu_gfx_scratch_free(adev, scratch); |
1575 | return r; | 1575 | return r; |
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev) | |||
1940 | 1940 | ||
1941 | static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 1941 | static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
1942 | { | 1942 | { |
1943 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 1943 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
1944 | uint32_t seq = ring->fence_drv.sync_seq; | 1944 | uint32_t seq = ring->fence_drv.sync_seq; |
1945 | uint64_t addr = ring->fence_drv.gpu_addr; | 1945 | uint64_t addr = ring->fence_drv.gpu_addr; |
1946 | 1946 | ||
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
1966 | static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 1966 | static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1967 | unsigned vm_id, uint64_t pd_addr) | 1967 | unsigned vm_id, uint64_t pd_addr) |
1968 | { | 1968 | { |
1969 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 1969 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
1970 | 1970 | ||
1971 | /* write new base address */ | 1971 | /* write new base address */ |
1972 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 1972 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) | |||
2814 | amdgpu_ring_write(ring, 0); | 2814 | amdgpu_ring_write(ring, 0); |
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
2818 | { | ||
2819 | return | ||
2820 | 6; /* gfx_v6_0_ring_emit_ib */ | ||
2821 | } | ||
2822 | |||
2823 | static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | ||
2824 | { | ||
2825 | return | ||
2826 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
2827 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
2828 | 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
2829 | 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
2830 | 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
2831 | 3; /* gfx_v6_ring_emit_cntxcntl */ | ||
2832 | } | ||
2833 | |||
2834 | static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | ||
2835 | { | ||
2836 | return | ||
2837 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
2838 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
2839 | 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
2840 | 17 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
2841 | 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
2842 | } | ||
2843 | |||
2844 | static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { | 2817 | static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { |
2845 | .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, | 2818 | .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, |
2846 | .select_se_sh = &gfx_v6_0_select_se_sh, | 2819 | .select_se_sh = &gfx_v6_0_select_se_sh, |
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle) | |||
2896 | ring->ring_obj = NULL; | 2869 | ring->ring_obj = NULL; |
2897 | sprintf(ring->name, "gfx"); | 2870 | sprintf(ring->name, "gfx"); |
2898 | r = amdgpu_ring_init(adev, ring, 1024, | 2871 | r = amdgpu_ring_init(adev, ring, 1024, |
2899 | 0x80000000, 0xf, | 2872 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
2900 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
2901 | AMDGPU_RING_TYPE_GFX); | ||
2902 | if (r) | 2873 | if (r) |
2903 | return r; | 2874 | return r; |
2904 | } | 2875 | } |
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle) | |||
2920 | sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); | 2891 | sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); |
2921 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 2892 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
2922 | r = amdgpu_ring_init(adev, ring, 1024, | 2893 | r = amdgpu_ring_init(adev, ring, 1024, |
2923 | 0x80000000, 0xf, | 2894 | &adev->gfx.eop_irq, irq_type); |
2924 | &adev->gfx.eop_irq, irq_type, | ||
2925 | AMDGPU_RING_TYPE_COMPUTE); | ||
2926 | if (r) | 2895 | if (r) |
2927 | return r; | 2896 | return r; |
2928 | } | 2897 | } |
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle, | |||
3237 | return 0; | 3206 | return 0; |
3238 | } | 3207 | } |
3239 | 3208 | ||
3240 | const struct amd_ip_funcs gfx_v6_0_ip_funcs = { | 3209 | static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { |
3241 | .name = "gfx_v6_0", | 3210 | .name = "gfx_v6_0", |
3242 | .early_init = gfx_v6_0_early_init, | 3211 | .early_init = gfx_v6_0_early_init, |
3243 | .late_init = NULL, | 3212 | .late_init = NULL, |
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = { | |||
3255 | }; | 3224 | }; |
3256 | 3225 | ||
3257 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { | 3226 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { |
3227 | .type = AMDGPU_RING_TYPE_GFX, | ||
3228 | .align_mask = 0xff, | ||
3229 | .nop = 0x80000000, | ||
3258 | .get_rptr = gfx_v6_0_ring_get_rptr, | 3230 | .get_rptr = gfx_v6_0_ring_get_rptr, |
3259 | .get_wptr = gfx_v6_0_ring_get_wptr, | 3231 | .get_wptr = gfx_v6_0_ring_get_wptr, |
3260 | .set_wptr = gfx_v6_0_ring_set_wptr_gfx, | 3232 | .set_wptr = gfx_v6_0_ring_set_wptr_gfx, |
3261 | .parse_cs = NULL, | 3233 | .emit_frame_size = |
3234 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
3235 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
3236 | 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
3237 | 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
3238 | 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
3239 | 3, /* gfx_v6_ring_emit_cntxcntl */ | ||
3240 | .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ | ||
3262 | .emit_ib = gfx_v6_0_ring_emit_ib, | 3241 | .emit_ib = gfx_v6_0_ring_emit_ib, |
3263 | .emit_fence = gfx_v6_0_ring_emit_fence, | 3242 | .emit_fence = gfx_v6_0_ring_emit_fence, |
3264 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, | 3243 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, |
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { | |||
3269 | .test_ib = gfx_v6_0_ring_test_ib, | 3248 | .test_ib = gfx_v6_0_ring_test_ib, |
3270 | .insert_nop = amdgpu_ring_insert_nop, | 3249 | .insert_nop = amdgpu_ring_insert_nop, |
3271 | .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, | 3250 | .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, |
3272 | .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, | ||
3273 | .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx, | ||
3274 | }; | 3251 | }; |
3275 | 3252 | ||
3276 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { | 3253 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { |
3254 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
3255 | .align_mask = 0xff, | ||
3256 | .nop = 0x80000000, | ||
3277 | .get_rptr = gfx_v6_0_ring_get_rptr, | 3257 | .get_rptr = gfx_v6_0_ring_get_rptr, |
3278 | .get_wptr = gfx_v6_0_ring_get_wptr, | 3258 | .get_wptr = gfx_v6_0_ring_get_wptr, |
3279 | .set_wptr = gfx_v6_0_ring_set_wptr_compute, | 3259 | .set_wptr = gfx_v6_0_ring_set_wptr_compute, |
3280 | .parse_cs = NULL, | 3260 | .emit_frame_size = |
3261 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
3262 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
3263 | 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
3264 | 17 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
3265 | 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
3266 | .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ | ||
3281 | .emit_ib = gfx_v6_0_ring_emit_ib, | 3267 | .emit_ib = gfx_v6_0_ring_emit_ib, |
3282 | .emit_fence = gfx_v6_0_ring_emit_fence, | 3268 | .emit_fence = gfx_v6_0_ring_emit_fence, |
3283 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, | 3269 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, |
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { | |||
3287 | .test_ring = gfx_v6_0_ring_test_ring, | 3273 | .test_ring = gfx_v6_0_ring_test_ring, |
3288 | .test_ib = gfx_v6_0_ring_test_ib, | 3274 | .test_ib = gfx_v6_0_ring_test_ib, |
3289 | .insert_nop = amdgpu_ring_insert_nop, | 3275 | .insert_nop = amdgpu_ring_insert_nop, |
3290 | .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, | ||
3291 | .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute, | ||
3292 | }; | 3276 | }; |
3293 | 3277 | ||
3294 | static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) | 3278 | static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) | |||
3360 | cu_info->number = active_cu_number; | 3344 | cu_info->number = active_cu_number; |
3361 | cu_info->ao_cu_mask = ao_cu_mask; | 3345 | cu_info->ao_cu_mask = ao_cu_mask; |
3362 | } | 3346 | } |
3347 | |||
3348 | const struct amdgpu_ip_block_version gfx_v6_0_ip_block = | ||
3349 | { | ||
3350 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
3351 | .major = 6, | ||
3352 | .minor = 0, | ||
3353 | .rev = 0, | ||
3354 | .funcs = &gfx_v6_0_ip_funcs, | ||
3355 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h index b9657e72b248..ced6fc42f688 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __GFX_V6_0_H__ | 24 | #ifndef __GFX_V6_0_H__ |
25 | #define __GFX_V6_0_H__ | 25 | #define __GFX_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 71116da9e782..5b631fd1a879 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | 2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
2078 | { | 2078 | { |
2079 | u32 ref_and_mask; | 2079 | u32 ref_and_mask; |
2080 | int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; | 2080 | int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; |
2081 | 2081 | ||
2082 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | 2082 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
2083 | switch (ring->me) { | 2083 | switch (ring->me) { |
2084 | case 1: | 2084 | case 1: |
2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | 2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; |
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2286 | { | 2286 | { |
2287 | struct amdgpu_device *adev = ring->adev; | 2287 | struct amdgpu_device *adev = ring->adev; |
2288 | struct amdgpu_ib ib; | 2288 | struct amdgpu_ib ib; |
2289 | struct fence *f = NULL; | 2289 | struct dma_fence *f = NULL; |
2290 | uint32_t scratch; | 2290 | uint32_t scratch; |
2291 | uint32_t tmp = 0; | 2291 | uint32_t tmp = 0; |
2292 | long r; | 2292 | long r; |
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2312 | if (r) | 2312 | if (r) |
2313 | goto err2; | 2313 | goto err2; |
2314 | 2314 | ||
2315 | r = fence_wait_timeout(f, false, timeout); | 2315 | r = dma_fence_wait_timeout(f, false, timeout); |
2316 | if (r == 0) { | 2316 | if (r == 0) { |
2317 | DRM_ERROR("amdgpu: IB test timed out\n"); | 2317 | DRM_ERROR("amdgpu: IB test timed out\n"); |
2318 | r = -ETIMEDOUT; | 2318 | r = -ETIMEDOUT; |
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2333 | 2333 | ||
2334 | err2: | 2334 | err2: |
2335 | amdgpu_ib_free(adev, &ib, NULL); | 2335 | amdgpu_ib_free(adev, &ib, NULL); |
2336 | fence_put(f); | 2336 | dma_fence_put(f); |
2337 | err1: | 2337 | err1: |
2338 | amdgpu_gfx_scratch_free(adev, scratch); | 2338 | amdgpu_gfx_scratch_free(adev, scratch); |
2339 | return r; | 2339 | return r; |
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) | |||
3222 | */ | 3222 | */ |
3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
3224 | { | 3224 | { |
3225 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3225 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3226 | uint32_t seq = ring->fence_drv.sync_seq; | 3226 | uint32_t seq = ring->fence_drv.sync_seq; |
3227 | uint64_t addr = ring->fence_drv.gpu_addr; | 3227 | uint64_t addr = ring->fence_drv.gpu_addr; |
3228 | 3228 | ||
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
3263 | unsigned vm_id, uint64_t pd_addr) | 3263 | unsigned vm_id, uint64_t pd_addr) |
3264 | { | 3264 | { |
3265 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3265 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3266 | 3266 | ||
3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { | 3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { |
3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3393 | AMDGPU_GEM_DOMAIN_VRAM, | 3393 | AMDGPU_GEM_DOMAIN_VRAM, |
3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3395 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3395 | NULL, NULL, | 3396 | NULL, NULL, |
3396 | &adev->gfx.rlc.save_restore_obj); | 3397 | &adev->gfx.rlc.save_restore_obj); |
3397 | if (r) { | 3398 | if (r) { |
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3435 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 3436 | if (adev->gfx.rlc.clear_state_obj == NULL) { |
3436 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3437 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3437 | AMDGPU_GEM_DOMAIN_VRAM, | 3438 | AMDGPU_GEM_DOMAIN_VRAM, |
3438 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3439 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3440 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3439 | NULL, NULL, | 3441 | NULL, NULL, |
3440 | &adev->gfx.rlc.clear_state_obj); | 3442 | &adev->gfx.rlc.clear_state_obj); |
3441 | if (r) { | 3443 | if (r) { |
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3475 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 3477 | if (adev->gfx.rlc.cp_table_obj == NULL) { |
3476 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 3478 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
3477 | AMDGPU_GEM_DOMAIN_VRAM, | 3479 | AMDGPU_GEM_DOMAIN_VRAM, |
3478 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3480 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3481 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3479 | NULL, NULL, | 3482 | NULL, NULL, |
3480 | &adev->gfx.rlc.cp_table_obj); | 3483 | &adev->gfx.rlc.cp_table_obj); |
3481 | if (r) { | 3484 | if (r) { |
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |||
4354 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | 4357 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); |
4355 | } | 4358 | } |
4356 | 4359 | ||
4357 | static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) | 4360 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) |
4358 | { | 4361 | { |
4359 | return | 4362 | WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13)); |
4360 | 4; /* gfx_v7_0_ring_emit_ib_gfx */ | 4363 | return RREG32(mmSQ_IND_DATA); |
4361 | } | 4364 | } |
4362 | 4365 | ||
4363 | static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | 4366 | static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) |
4364 | { | 4367 | { |
4365 | return | 4368 | /* type 0 wave data */ |
4366 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4369 | dst[(*no_fields)++] = 0; |
4367 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | 4370 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); |
4368 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | 4371 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); |
4369 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | 4372 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); |
4370 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | 4373 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); |
4371 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | 4374 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); |
4372 | 3; /* gfx_v7_ring_emit_cntxcntl */ | 4375 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); |
4373 | } | 4376 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); |
4374 | 4377 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | |
4375 | static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) | 4378 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); |
4376 | { | 4379 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); |
4377 | return | 4380 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); |
4378 | 4; /* gfx_v7_0_ring_emit_ib_compute */ | 4381 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); |
4379 | } | 4382 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); |
4380 | 4383 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); | |
4381 | static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | 4384 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); |
4382 | { | 4385 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); |
4383 | return | 4386 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); |
4384 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4387 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); |
4385 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
4386 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
4387 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
4388 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
4389 | 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
4390 | } | 4388 | } |
4391 | 4389 | ||
4392 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { | 4390 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { |
4393 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, | 4391 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, |
4394 | .select_se_sh = &gfx_v7_0_select_se_sh, | 4392 | .select_se_sh = &gfx_v7_0_select_se_sh, |
4393 | .read_wave_data = &gfx_v7_0_read_wave_data, | ||
4395 | }; | 4394 | }; |
4396 | 4395 | ||
4397 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { | 4396 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { |
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4643 | ring->ring_obj = NULL; | 4642 | ring->ring_obj = NULL; |
4644 | sprintf(ring->name, "gfx"); | 4643 | sprintf(ring->name, "gfx"); |
4645 | r = amdgpu_ring_init(adev, ring, 1024, | 4644 | r = amdgpu_ring_init(adev, ring, 1024, |
4646 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4645 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
4647 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
4648 | AMDGPU_RING_TYPE_GFX); | ||
4649 | if (r) | 4646 | if (r) |
4650 | return r; | 4647 | return r; |
4651 | } | 4648 | } |
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4670 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 4667 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
4671 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 4668 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
4672 | r = amdgpu_ring_init(adev, ring, 1024, | 4669 | r = amdgpu_ring_init(adev, ring, 1024, |
4673 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4670 | &adev->gfx.eop_irq, irq_type); |
4674 | &adev->gfx.eop_irq, irq_type, | ||
4675 | AMDGPU_RING_TYPE_COMPUTE); | ||
4676 | if (r) | 4671 | if (r) |
4677 | return r; | 4672 | return r; |
4678 | } | 4673 | } |
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, | |||
5123 | return 0; | 5118 | return 0; |
5124 | } | 5119 | } |
5125 | 5120 | ||
5126 | const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | 5121 | static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { |
5127 | .name = "gfx_v7_0", | 5122 | .name = "gfx_v7_0", |
5128 | .early_init = gfx_v7_0_early_init, | 5123 | .early_init = gfx_v7_0_early_init, |
5129 | .late_init = gfx_v7_0_late_init, | 5124 | .late_init = gfx_v7_0_late_init, |
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | |||
5141 | }; | 5136 | }; |
5142 | 5137 | ||
5143 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | 5138 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { |
5139 | .type = AMDGPU_RING_TYPE_GFX, | ||
5140 | .align_mask = 0xff, | ||
5141 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5144 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5142 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5145 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, | 5143 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, |
5146 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, | 5144 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, |
5147 | .parse_cs = NULL, | 5145 | .emit_frame_size = |
5146 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5147 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5148 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5149 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
5150 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5151 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5152 | 3, /* gfx_v7_ring_emit_cntxcntl */ | ||
5153 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ | ||
5148 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, | 5154 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, |
5149 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, | 5155 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, |
5150 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5156 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | |||
5157 | .insert_nop = amdgpu_ring_insert_nop, | 5163 | .insert_nop = amdgpu_ring_insert_nop, |
5158 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5164 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5159 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, | 5165 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, |
5160 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx, | ||
5161 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx, | ||
5162 | }; | 5166 | }; |
5163 | 5167 | ||
5164 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | 5168 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { |
5169 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
5170 | .align_mask = 0xff, | ||
5171 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5165 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5172 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5166 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, | 5173 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, |
5167 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, | 5174 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, |
5168 | .parse_cs = NULL, | 5175 | .emit_frame_size = |
5176 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5177 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5178 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5179 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5180 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5181 | 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
5182 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ | ||
5169 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, | 5183 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, |
5170 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, | 5184 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, |
5171 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5185 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | |||
5177 | .test_ib = gfx_v7_0_ring_test_ib, | 5191 | .test_ib = gfx_v7_0_ring_test_ib, |
5178 | .insert_nop = amdgpu_ring_insert_nop, | 5192 | .insert_nop = amdgpu_ring_insert_nop, |
5179 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5193 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5180 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute, | ||
5181 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute, | ||
5182 | }; | 5194 | }; |
5183 | 5195 | ||
5184 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) | 5196 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) | |||
5289 | cu_info->number = active_cu_number; | 5301 | cu_info->number = active_cu_number; |
5290 | cu_info->ao_cu_mask = ao_cu_mask; | 5302 | cu_info->ao_cu_mask = ao_cu_mask; |
5291 | } | 5303 | } |
5304 | |||
5305 | const struct amdgpu_ip_block_version gfx_v7_0_ip_block = | ||
5306 | { | ||
5307 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5308 | .major = 7, | ||
5309 | .minor = 0, | ||
5310 | .rev = 0, | ||
5311 | .funcs = &gfx_v7_0_ip_funcs, | ||
5312 | }; | ||
5313 | |||
5314 | const struct amdgpu_ip_block_version gfx_v7_1_ip_block = | ||
5315 | { | ||
5316 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5317 | .major = 7, | ||
5318 | .minor = 1, | ||
5319 | .rev = 0, | ||
5320 | .funcs = &gfx_v7_0_ip_funcs, | ||
5321 | }; | ||
5322 | |||
5323 | const struct amdgpu_ip_block_version gfx_v7_2_ip_block = | ||
5324 | { | ||
5325 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5326 | .major = 7, | ||
5327 | .minor = 2, | ||
5328 | .rev = 0, | ||
5329 | .funcs = &gfx_v7_0_ip_funcs, | ||
5330 | }; | ||
5331 | |||
5332 | const struct amdgpu_ip_block_version gfx_v7_3_ip_block = | ||
5333 | { | ||
5334 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5335 | .major = 7, | ||
5336 | .minor = 3, | ||
5337 | .rev = 0, | ||
5338 | .funcs = &gfx_v7_0_ip_funcs, | ||
5339 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index 94e3ea147c26..2f5164cc0e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #ifndef __GFX_V7_0_H__ | 24 | #ifndef __GFX_V7_0_H__ |
25 | #define __GFX_V7_0_H__ | 25 | #define __GFX_V7_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block; | ||
30 | extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block; | ||
28 | 31 | ||
29 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee6a48a09214..86a7ca5d8511 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
798 | { | 798 | { |
799 | struct amdgpu_device *adev = ring->adev; | 799 | struct amdgpu_device *adev = ring->adev; |
800 | struct amdgpu_ib ib; | 800 | struct amdgpu_ib ib; |
801 | struct fence *f = NULL; | 801 | struct dma_fence *f = NULL; |
802 | uint32_t scratch; | 802 | uint32_t scratch; |
803 | uint32_t tmp = 0; | 803 | uint32_t tmp = 0; |
804 | long r; | 804 | long r; |
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
824 | if (r) | 824 | if (r) |
825 | goto err2; | 825 | goto err2; |
826 | 826 | ||
827 | r = fence_wait_timeout(f, false, timeout); | 827 | r = dma_fence_wait_timeout(f, false, timeout); |
828 | if (r == 0) { | 828 | if (r == 0) { |
829 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 829 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
830 | r = -ETIMEDOUT; | 830 | r = -ETIMEDOUT; |
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
844 | } | 844 | } |
845 | err2: | 845 | err2: |
846 | amdgpu_ib_free(adev, &ib, NULL); | 846 | amdgpu_ib_free(adev, &ib, NULL); |
847 | fence_put(f); | 847 | dma_fence_put(f); |
848 | err1: | 848 | err1: |
849 | amdgpu_gfx_scratch_free(adev, scratch); | 849 | amdgpu_gfx_scratch_free(adev, scratch); |
850 | return r; | 850 | return r; |
@@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | |||
1058 | adev->firmware.fw_size += | 1058 | adev->firmware.fw_size += |
1059 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 1059 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
1060 | 1060 | ||
1061 | /* we need account JT in */ | ||
1062 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1063 | adev->firmware.fw_size += | ||
1064 | ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); | ||
1065 | |||
1066 | if (amdgpu_sriov_vf(adev)) { | ||
1067 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE]; | ||
1068 | info->ucode_id = AMDGPU_UCODE_ID_STORAGE; | ||
1069 | info->fw = adev->gfx.mec_fw; | ||
1070 | adev->firmware.fw_size += | ||
1071 | ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE); | ||
1072 | } | ||
1073 | |||
1061 | if (adev->gfx.mec2_fw) { | 1074 | if (adev->gfx.mec2_fw) { |
1062 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; | 1075 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; |
1063 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; | 1076 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; |
@@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | |||
1127 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 1140 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
1128 | buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - | 1141 | buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - |
1129 | PACKET3_SET_CONTEXT_REG_START); | 1142 | PACKET3_SET_CONTEXT_REG_START); |
1130 | switch (adev->asic_type) { | 1143 | buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); |
1131 | case CHIP_TONGA: | 1144 | buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); |
1132 | case CHIP_POLARIS10: | ||
1133 | buffer[count++] = cpu_to_le32(0x16000012); | ||
1134 | buffer[count++] = cpu_to_le32(0x0000002A); | ||
1135 | break; | ||
1136 | case CHIP_POLARIS11: | ||
1137 | buffer[count++] = cpu_to_le32(0x16000012); | ||
1138 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1139 | break; | ||
1140 | case CHIP_FIJI: | ||
1141 | buffer[count++] = cpu_to_le32(0x3a00161a); | ||
1142 | buffer[count++] = cpu_to_le32(0x0000002e); | ||
1143 | break; | ||
1144 | case CHIP_TOPAZ: | ||
1145 | case CHIP_CARRIZO: | ||
1146 | buffer[count++] = cpu_to_le32(0x00000002); | ||
1147 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1148 | break; | ||
1149 | case CHIP_STONEY: | ||
1150 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1151 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1152 | break; | ||
1153 | default: | ||
1154 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1155 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1156 | break; | ||
1157 | } | ||
1158 | 1145 | ||
1159 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1146 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1160 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); | 1147 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); |
@@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1273 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 1260 | if (adev->gfx.rlc.clear_state_obj == NULL) { |
1274 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 1261 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
1275 | AMDGPU_GEM_DOMAIN_VRAM, | 1262 | AMDGPU_GEM_DOMAIN_VRAM, |
1276 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1263 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1264 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1277 | NULL, NULL, | 1265 | NULL, NULL, |
1278 | &adev->gfx.rlc.clear_state_obj); | 1266 | &adev->gfx.rlc.clear_state_obj); |
1279 | if (r) { | 1267 | if (r) { |
@@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1315 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 1303 | if (adev->gfx.rlc.cp_table_obj == NULL) { |
1316 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 1304 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
1317 | AMDGPU_GEM_DOMAIN_VRAM, | 1305 | AMDGPU_GEM_DOMAIN_VRAM, |
1318 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1306 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1307 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1319 | NULL, NULL, | 1308 | NULL, NULL, |
1320 | &adev->gfx.rlc.cp_table_obj); | 1309 | &adev->gfx.rlc.cp_table_obj); |
1321 | if (r) { | 1310 | if (r) { |
@@ -1575,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1575 | { | 1564 | { |
1576 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; | 1565 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; |
1577 | struct amdgpu_ib ib; | 1566 | struct amdgpu_ib ib; |
1578 | struct fence *f = NULL; | 1567 | struct dma_fence *f = NULL; |
1579 | int r, i; | 1568 | int r, i; |
1580 | u32 tmp; | 1569 | u32 tmp; |
1581 | unsigned total_size, vgpr_offset, sgpr_offset; | 1570 | unsigned total_size, vgpr_offset, sgpr_offset; |
@@ -1708,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1708 | } | 1697 | } |
1709 | 1698 | ||
1710 | /* wait for the GPU to finish processing the IB */ | 1699 | /* wait for the GPU to finish processing the IB */ |
1711 | r = fence_wait(f, false); | 1700 | r = dma_fence_wait(f, false); |
1712 | if (r) { | 1701 | if (r) { |
1713 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | 1702 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); |
1714 | goto fail; | 1703 | goto fail; |
@@ -1729,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1729 | 1718 | ||
1730 | fail: | 1719 | fail: |
1731 | amdgpu_ib_free(adev, &ib, NULL); | 1720 | amdgpu_ib_free(adev, &ib, NULL); |
1732 | fence_put(f); | 1721 | dma_fence_put(f); |
1733 | 1722 | ||
1734 | return r; | 1723 | return r; |
1735 | } | 1724 | } |
@@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2045 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; | 2034 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; |
2046 | } | 2035 | } |
2047 | 2036 | ||
2048 | r = amdgpu_ring_init(adev, ring, 1024, | 2037 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, |
2049 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 2038 | AMDGPU_CP_IRQ_GFX_EOP); |
2050 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
2051 | AMDGPU_RING_TYPE_GFX); | ||
2052 | if (r) | 2039 | if (r) |
2053 | return r; | 2040 | return r; |
2054 | } | 2041 | } |
@@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2072 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | 2059 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
2073 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 2060 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
2074 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 2061 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
2075 | r = amdgpu_ring_init(adev, ring, 1024, | 2062 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, |
2076 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 2063 | irq_type); |
2077 | &adev->gfx.eop_irq, irq_type, | ||
2078 | AMDGPU_RING_TYPE_COMPUTE); | ||
2079 | if (r) | 2064 | if (r) |
2080 | return r; | 2065 | return r; |
2081 | } | 2066 | } |
@@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) | |||
3679 | num_rb_pipes); | 3664 | num_rb_pipes); |
3680 | } | 3665 | } |
3681 | 3666 | ||
3667 | /* cache the values for userspace */ | ||
3668 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
3669 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
3670 | gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); | ||
3671 | adev->gfx.config.rb_config[i][j].rb_backend_disable = | ||
3672 | RREG32(mmCC_RB_BACKEND_DISABLE); | ||
3673 | adev->gfx.config.rb_config[i][j].user_rb_backend_disable = | ||
3674 | RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
3675 | adev->gfx.config.rb_config[i][j].raster_config = | ||
3676 | RREG32(mmPA_SC_RASTER_CONFIG); | ||
3677 | adev->gfx.config.rb_config[i][j].raster_config_1 = | ||
3678 | RREG32(mmPA_SC_RASTER_CONFIG_1); | ||
3679 | } | ||
3680 | } | ||
3681 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
3682 | mutex_unlock(&adev->grbm_idx_mutex); | 3682 | mutex_unlock(&adev->grbm_idx_mutex); |
3683 | } | 3683 | } |
3684 | 3684 | ||
@@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4331 | struct amdgpu_ring *ring; | 4331 | struct amdgpu_ring *ring; |
4332 | u32 tmp; | 4332 | u32 tmp; |
4333 | u32 rb_bufsz; | 4333 | u32 rb_bufsz; |
4334 | u64 rb_addr, rptr_addr; | 4334 | u64 rb_addr, rptr_addr, wptr_gpu_addr; |
4335 | int r; | 4335 | int r; |
4336 | 4336 | ||
4337 | /* Set the write pointer delay */ | 4337 | /* Set the write pointer delay */ |
@@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4362 | WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); | 4362 | WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); |
4363 | WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); | 4363 | WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); |
4364 | 4364 | ||
4365 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
4366 | WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); | ||
4367 | WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); | ||
4365 | mdelay(1); | 4368 | mdelay(1); |
4366 | WREG32(mmCP_RB0_CNTL, tmp); | 4369 | WREG32(mmCP_RB0_CNTL, tmp); |
4367 | 4370 | ||
@@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |||
5438 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | 5441 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); |
5439 | } | 5442 | } |
5440 | 5443 | ||
5444 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) | ||
5445 | { | ||
5446 | WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13)); | ||
5447 | return RREG32(mmSQ_IND_DATA); | ||
5448 | } | ||
5449 | |||
5450 | static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) | ||
5451 | { | ||
5452 | /* type 0 wave data */ | ||
5453 | dst[(*no_fields)++] = 0; | ||
5454 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); | ||
5455 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); | ||
5456 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); | ||
5457 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); | ||
5458 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); | ||
5459 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); | ||
5460 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); | ||
5461 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | ||
5462 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); | ||
5463 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); | ||
5464 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); | ||
5465 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); | ||
5466 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); | ||
5467 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); | ||
5468 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); | ||
5469 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); | ||
5470 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); | ||
5471 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); | ||
5472 | } | ||
5473 | |||
5474 | |||
5441 | static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { | 5475 | static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { |
5442 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, | 5476 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, |
5443 | .select_se_sh = &gfx_v8_0_select_se_sh, | 5477 | .select_se_sh = &gfx_v8_0_select_se_sh, |
5478 | .read_wave_data = &gfx_v8_0_read_wave_data, | ||
5444 | }; | 5479 | }; |
5445 | 5480 | ||
5446 | static int gfx_v8_0_early_init(void *handle) | 5481 | static int gfx_v8_0_early_init(void *handle) |
@@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
6120 | { | 6155 | { |
6121 | u32 ref_and_mask, reg_mem_engine; | 6156 | u32 ref_and_mask, reg_mem_engine; |
6122 | 6157 | ||
6123 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | 6158 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
6124 | switch (ring->me) { | 6159 | switch (ring->me) { |
6125 | case 1: | 6160 | case 1: |
6126 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | 6161 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; |
@@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | |||
6222 | 6257 | ||
6223 | static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 6258 | static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
6224 | { | 6259 | { |
6225 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 6260 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
6226 | uint32_t seq = ring->fence_drv.sync_seq; | 6261 | uint32_t seq = ring->fence_drv.sync_seq; |
6227 | uint64_t addr = ring->fence_drv.gpu_addr; | 6262 | uint64_t addr = ring->fence_drv.gpu_addr; |
6228 | 6263 | ||
@@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
6240 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 6275 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
6241 | unsigned vm_id, uint64_t pd_addr) | 6276 | unsigned vm_id, uint64_t pd_addr) |
6242 | { | 6277 | { |
6243 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 6278 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
6244 | |||
6245 | /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */ | ||
6246 | if (usepfp) | ||
6247 | amdgpu_ring_insert_nop(ring, 128); | ||
6248 | 6279 | ||
6249 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 6280 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
6250 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 6281 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) | |||
6360 | amdgpu_ring_write(ring, 0); | 6391 | amdgpu_ring_write(ring, 0); |
6361 | } | 6392 | } |
6362 | 6393 | ||
6363 | static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) | ||
6364 | { | ||
6365 | return | ||
6366 | 4; /* gfx_v8_0_ring_emit_ib_gfx */ | ||
6367 | } | ||
6368 | |||
6369 | static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | ||
6370 | { | ||
6371 | return | ||
6372 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6373 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6374 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6375 | 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
6376 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6377 | 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6378 | 2 + /* gfx_v8_ring_emit_sb */ | ||
6379 | 3; /* gfx_v8_ring_emit_cntxcntl */ | ||
6380 | } | ||
6381 | |||
6382 | static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) | ||
6383 | { | ||
6384 | return | ||
6385 | 4; /* gfx_v8_0_ring_emit_ib_compute */ | ||
6386 | } | ||
6387 | |||
6388 | static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | ||
6389 | { | ||
6390 | return | ||
6391 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6392 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6393 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6394 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6395 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6396 | 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
6397 | } | ||
6398 | |||
6399 | static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, | 6394 | static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, |
6400 | enum amdgpu_interrupt_state state) | 6395 | enum amdgpu_interrupt_state state) |
6401 | { | 6396 | { |
@@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, | |||
6541 | return 0; | 6536 | return 0; |
6542 | } | 6537 | } |
6543 | 6538 | ||
6544 | const struct amd_ip_funcs gfx_v8_0_ip_funcs = { | 6539 | static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { |
6545 | .name = "gfx_v8_0", | 6540 | .name = "gfx_v8_0", |
6546 | .early_init = gfx_v8_0_early_init, | 6541 | .early_init = gfx_v8_0_early_init, |
6547 | .late_init = gfx_v8_0_late_init, | 6542 | .late_init = gfx_v8_0_late_init, |
@@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { | |||
6562 | }; | 6557 | }; |
6563 | 6558 | ||
6564 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | 6559 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { |
6560 | .type = AMDGPU_RING_TYPE_GFX, | ||
6561 | .align_mask = 0xff, | ||
6562 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
6565 | .get_rptr = gfx_v8_0_ring_get_rptr, | 6563 | .get_rptr = gfx_v8_0_ring_get_rptr, |
6566 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, | 6564 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, |
6567 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, | 6565 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, |
6568 | .parse_cs = NULL, | 6566 | .emit_frame_size = |
6567 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6568 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6569 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6570 | 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
6571 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6572 | 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6573 | 2 + /* gfx_v8_ring_emit_sb */ | ||
6574 | 3, /* gfx_v8_ring_emit_cntxcntl */ | ||
6575 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ | ||
6569 | .emit_ib = gfx_v8_0_ring_emit_ib_gfx, | 6576 | .emit_ib = gfx_v8_0_ring_emit_ib_gfx, |
6570 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, | 6577 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, |
6571 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, | 6578 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, |
@@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | |||
6579 | .pad_ib = amdgpu_ring_generic_pad_ib, | 6586 | .pad_ib = amdgpu_ring_generic_pad_ib, |
6580 | .emit_switch_buffer = gfx_v8_ring_emit_sb, | 6587 | .emit_switch_buffer = gfx_v8_ring_emit_sb, |
6581 | .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, | 6588 | .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, |
6582 | .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx, | ||
6583 | .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx, | ||
6584 | }; | 6589 | }; |
6585 | 6590 | ||
6586 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | 6591 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { |
6592 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
6593 | .align_mask = 0xff, | ||
6594 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
6587 | .get_rptr = gfx_v8_0_ring_get_rptr, | 6595 | .get_rptr = gfx_v8_0_ring_get_rptr, |
6588 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, | 6596 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, |
6589 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, | 6597 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, |
6590 | .parse_cs = NULL, | 6598 | .emit_frame_size = |
6599 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6600 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6601 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6602 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6603 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6604 | 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
6605 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ | ||
6591 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, | 6606 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, |
6592 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, | 6607 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, |
6593 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, | 6608 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, |
@@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | |||
6599 | .test_ib = gfx_v8_0_ring_test_ib, | 6614 | .test_ib = gfx_v8_0_ring_test_ib, |
6600 | .insert_nop = amdgpu_ring_insert_nop, | 6615 | .insert_nop = amdgpu_ring_insert_nop, |
6601 | .pad_ib = amdgpu_ring_generic_pad_ib, | 6616 | .pad_ib = amdgpu_ring_generic_pad_ib, |
6602 | .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute, | ||
6603 | .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute, | ||
6604 | }; | 6617 | }; |
6605 | 6618 | ||
6606 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) | 6619 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) | |||
6753 | cu_info->number = active_cu_number; | 6766 | cu_info->number = active_cu_number; |
6754 | cu_info->ao_cu_mask = ao_cu_mask; | 6767 | cu_info->ao_cu_mask = ao_cu_mask; |
6755 | } | 6768 | } |
6769 | |||
6770 | const struct amdgpu_ip_block_version gfx_v8_0_ip_block = | ||
6771 | { | ||
6772 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
6773 | .major = 8, | ||
6774 | .minor = 0, | ||
6775 | .rev = 0, | ||
6776 | .funcs = &gfx_v8_0_ip_funcs, | ||
6777 | }; | ||
6778 | |||
6779 | const struct amdgpu_ip_block_version gfx_v8_1_ip_block = | ||
6780 | { | ||
6781 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
6782 | .major = 8, | ||
6783 | .minor = 1, | ||
6784 | .rev = 0, | ||
6785 | .funcs = &gfx_v8_0_ip_funcs, | ||
6786 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index ebed1f829297..788cc3ab584b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __GFX_V8_0_H__ | 24 | #ifndef __GFX_V8_0_H__ |
25 | #define __GFX_V8_0_H__ | 25 | #define __GFX_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b13c8aaec078..1940d36bc304 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle, | |||
1030 | return 0; | 1030 | return 0; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | const struct amd_ip_funcs gmc_v6_0_ip_funcs = { | 1033 | static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { |
1034 | .name = "gmc_v6_0", | 1034 | .name = "gmc_v6_0", |
1035 | .early_init = gmc_v6_0_early_init, | 1035 | .early_init = gmc_v6_0_early_init, |
1036 | .late_init = gmc_v6_0_late_init, | 1036 | .late_init = gmc_v6_0_late_init, |
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1069 | adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; | 1069 | adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | const struct amdgpu_ip_block_version gmc_v6_0_ip_block = | ||
1073 | { | ||
1074 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1075 | .major = 6, | ||
1076 | .minor = 0, | ||
1077 | .rev = 0, | ||
1078 | .funcs = &gmc_v6_0_ip_funcs, | ||
1079 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h index 42c4fc676cd4..ed2f64dec47a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __GMC_V6_0_H__ | 24 | #ifndef __GMC_V6_0_H__ |
25 | #define __GMC_V6_0_H__ | 25 | #define __GMC_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index aa0c4b964621..3a25f72980c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle, | |||
1235 | return 0; | 1235 | return 0; |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | const struct amd_ip_funcs gmc_v7_0_ip_funcs = { | 1238 | static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { |
1239 | .name = "gmc_v7_0", | 1239 | .name = "gmc_v7_0", |
1240 | .early_init = gmc_v7_0_early_init, | 1240 | .early_init = gmc_v7_0_early_init, |
1241 | .late_init = gmc_v7_0_late_init, | 1241 | .late_init = gmc_v7_0_late_init, |
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1273 | adev->mc.vm_fault.num_types = 1; | 1273 | adev->mc.vm_fault.num_types = 1; |
1274 | adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; | 1274 | adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; |
1275 | } | 1275 | } |
1276 | |||
1277 | const struct amdgpu_ip_block_version gmc_v7_0_ip_block = | ||
1278 | { | ||
1279 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1280 | .major = 7, | ||
1281 | .minor = 0, | ||
1282 | .rev = 0, | ||
1283 | .funcs = &gmc_v7_0_ip_funcs, | ||
1284 | }; | ||
1285 | |||
1286 | const struct amdgpu_ip_block_version gmc_v7_4_ip_block = | ||
1287 | { | ||
1288 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1289 | .major = 7, | ||
1290 | .minor = 4, | ||
1291 | .rev = 0, | ||
1292 | .funcs = &gmc_v7_0_ip_funcs, | ||
1293 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 0b386b5d2f7a..ebce2966c1c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __GMC_V7_0_H__ | 24 | #ifndef __GMC_V7_0_H__ |
25 | #define __GMC_V7_0_H__ | 25 | #define __GMC_V7_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c22ef140a542..74d7cc3f7e8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle, | |||
1436 | return 0; | 1436 | return 0; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | const struct amd_ip_funcs gmc_v8_0_ip_funcs = { | 1439 | static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { |
1440 | .name = "gmc_v8_0", | 1440 | .name = "gmc_v8_0", |
1441 | .early_init = gmc_v8_0_early_init, | 1441 | .early_init = gmc_v8_0_early_init, |
1442 | .late_init = gmc_v8_0_late_init, | 1442 | .late_init = gmc_v8_0_late_init, |
@@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1477 | adev->mc.vm_fault.num_types = 1; | 1477 | adev->mc.vm_fault.num_types = 1; |
1478 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | 1478 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; |
1479 | } | 1479 | } |
1480 | |||
1481 | const struct amdgpu_ip_block_version gmc_v8_0_ip_block = | ||
1482 | { | ||
1483 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1484 | .major = 8, | ||
1485 | .minor = 0, | ||
1486 | .rev = 0, | ||
1487 | .funcs = &gmc_v8_0_ip_funcs, | ||
1488 | }; | ||
1489 | |||
1490 | const struct amdgpu_ip_block_version gmc_v8_1_ip_block = | ||
1491 | { | ||
1492 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1493 | .major = 8, | ||
1494 | .minor = 1, | ||
1495 | .rev = 0, | ||
1496 | .funcs = &gmc_v8_0_ip_funcs, | ||
1497 | }; | ||
1498 | |||
1499 | const struct amdgpu_ip_block_version gmc_v8_5_ip_block = | ||
1500 | { | ||
1501 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1502 | .major = 8, | ||
1503 | .minor = 5, | ||
1504 | .rev = 0, | ||
1505 | .funcs = &gmc_v8_0_ip_funcs, | ||
1506 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index fc5001a8119d..19b8a8aed204 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __GMC_V8_0_H__ | 24 | #ifndef __GMC_V8_0_H__ |
25 | #define __GMC_V8_0_H__ | 25 | #define __GMC_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 3b8906ce3511..ac21bb7bc0f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c | |||
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle, | |||
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | const struct amd_ip_funcs iceland_ih_ip_funcs = { | 395 | static const struct amd_ip_funcs iceland_ih_ip_funcs = { |
396 | .name = "iceland_ih", | 396 | .name = "iceland_ih", |
397 | .early_init = iceland_ih_early_init, | 397 | .early_init = iceland_ih_early_init, |
398 | .late_init = NULL, | 398 | .late_init = NULL, |
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
421 | adev->irq.ih_funcs = &iceland_ih_funcs; | 421 | adev->irq.ih_funcs = &iceland_ih_funcs; |
422 | } | 422 | } |
423 | 423 | ||
424 | const struct amdgpu_ip_block_version iceland_ih_ip_block = | ||
425 | { | ||
426 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
427 | .major = 2, | ||
428 | .minor = 4, | ||
429 | .rev = 0, | ||
430 | .funcs = &iceland_ih_ip_funcs, | ||
431 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h index 57558cddfbcb..3235f4277548 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __ICELAND_IH_H__ | 24 | #ifndef __ICELAND_IH_H__ |
25 | #define __ICELAND_IH_H__ | 25 | #define __ICELAND_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs iceland_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version iceland_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __ICELAND_IH_H__ */ | 29 | #endif /* __ICELAND_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f8618a3881a8..b6f2e50636a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev) | |||
2796 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 2796 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
2797 | 2797 | ||
2798 | /* fill in the vce power states */ | 2798 | /* fill in the vce power states */ |
2799 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 2799 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
2800 | u32 sclk; | 2800 | u32 sclk; |
2801 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 2801 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
2802 | clock_info = (union pplib_clock_info *) | 2802 | clock_info = (union pplib_clock_info *) |
@@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle, | |||
3243 | return 0; | 3243 | return 0; |
3244 | } | 3244 | } |
3245 | 3245 | ||
3246 | static int kv_check_state_equal(struct amdgpu_device *adev, | ||
3247 | struct amdgpu_ps *cps, | ||
3248 | struct amdgpu_ps *rps, | ||
3249 | bool *equal) | ||
3250 | { | ||
3251 | if (equal == NULL) | ||
3252 | return -EINVAL; | ||
3253 | |||
3254 | *equal = false; | ||
3255 | return 0; | ||
3256 | } | ||
3257 | |||
3246 | const struct amd_ip_funcs kv_dpm_ip_funcs = { | 3258 | const struct amd_ip_funcs kv_dpm_ip_funcs = { |
3247 | .name = "kv_dpm", | 3259 | .name = "kv_dpm", |
3248 | .early_init = kv_dpm_early_init, | 3260 | .early_init = kv_dpm_early_init, |
@@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = { | |||
3273 | .force_performance_level = &kv_dpm_force_performance_level, | 3285 | .force_performance_level = &kv_dpm_force_performance_level, |
3274 | .powergate_uvd = &kv_dpm_powergate_uvd, | 3286 | .powergate_uvd = &kv_dpm_powergate_uvd, |
3275 | .enable_bapm = &kv_dpm_enable_bapm, | 3287 | .enable_bapm = &kv_dpm_enable_bapm, |
3288 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
3289 | .check_state_equal = kv_check_state_equal, | ||
3276 | }; | 3290 | }; |
3277 | 3291 | ||
3278 | static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 3292 | static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
3291 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; | 3305 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
3292 | adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; | 3306 | adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; |
3293 | } | 3307 | } |
3308 | |||
3309 | const struct amdgpu_ip_block_version kv_dpm_ip_block = | ||
3310 | { | ||
3311 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
3312 | .major = 7, | ||
3313 | .minor = 0, | ||
3314 | .rev = 0, | ||
3315 | .funcs = &kv_dpm_ip_funcs, | ||
3316 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 565dab3c7218..e81aa4682760 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
232 | 232 | ||
233 | for (i = 0; i < count; i++) | 233 | for (i = 0; i < count; i++) |
234 | if (sdma && sdma->burst_nop && (i == 0)) | 234 | if (sdma && sdma->burst_nop && (i == 0)) |
235 | amdgpu_ring_write(ring, ring->nop | | 235 | amdgpu_ring_write(ring, ring->funcs->nop | |
236 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); | 236 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); |
237 | else | 237 | else |
238 | amdgpu_ring_write(ring, ring->nop); | 238 | amdgpu_ring_write(ring, ring->funcs->nop); |
239 | } | 239 | } |
240 | 240 | ||
241 | /** | 241 | /** |
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
668 | { | 668 | { |
669 | struct amdgpu_device *adev = ring->adev; | 669 | struct amdgpu_device *adev = ring->adev; |
670 | struct amdgpu_ib ib; | 670 | struct amdgpu_ib ib; |
671 | struct fence *f = NULL; | 671 | struct dma_fence *f = NULL; |
672 | unsigned index; | 672 | unsigned index; |
673 | u32 tmp = 0; | 673 | u32 tmp = 0; |
674 | u64 gpu_addr; | 674 | u64 gpu_addr; |
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
705 | if (r) | 705 | if (r) |
706 | goto err1; | 706 | goto err1; |
707 | 707 | ||
708 | r = fence_wait_timeout(f, false, timeout); | 708 | r = dma_fence_wait_timeout(f, false, timeout); |
709 | if (r == 0) { | 709 | if (r == 0) { |
710 | DRM_ERROR("amdgpu: IB test timed out\n"); | 710 | DRM_ERROR("amdgpu: IB test timed out\n"); |
711 | r = -ETIMEDOUT; | 711 | r = -ETIMEDOUT; |
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
725 | 725 | ||
726 | err1: | 726 | err1: |
727 | amdgpu_ib_free(adev, &ib, NULL); | 727 | amdgpu_ib_free(adev, &ib, NULL); |
728 | fence_put(f); | 728 | dma_fence_put(f); |
729 | err0: | 729 | err0: |
730 | amdgpu_wb_free(adev, index); | 730 | amdgpu_wb_free(adev, index); |
731 | return r; | 731 | return r; |
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
902 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | 902 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ |
903 | } | 903 | } |
904 | 904 | ||
905 | static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
906 | { | ||
907 | return | ||
908 | 7 + 6; /* sdma_v2_4_ring_emit_ib */ | ||
909 | } | ||
910 | |||
911 | static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
912 | { | ||
913 | return | ||
914 | 6 + /* sdma_v2_4_ring_emit_hdp_flush */ | ||
915 | 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ | ||
916 | 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ | ||
917 | 12 + /* sdma_v2_4_ring_emit_vm_flush */ | ||
918 | 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ | ||
919 | } | ||
920 | |||
921 | static int sdma_v2_4_early_init(void *handle) | 905 | static int sdma_v2_4_early_init(void *handle) |
922 | { | 906 | { |
923 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 907 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle) | |||
965 | ring->use_doorbell = false; | 949 | ring->use_doorbell = false; |
966 | sprintf(ring->name, "sdma%d", i); | 950 | sprintf(ring->name, "sdma%d", i); |
967 | r = amdgpu_ring_init(adev, ring, 1024, | 951 | r = amdgpu_ring_init(adev, ring, 1024, |
968 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
969 | &adev->sdma.trap_irq, | 952 | &adev->sdma.trap_irq, |
970 | (i == 0) ? | 953 | (i == 0) ? |
971 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 954 | AMDGPU_SDMA_IRQ_TRAP0 : |
972 | AMDGPU_RING_TYPE_SDMA); | 955 | AMDGPU_SDMA_IRQ_TRAP1); |
973 | if (r) | 956 | if (r) |
974 | return r; | 957 | return r; |
975 | } | 958 | } |
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle, | |||
1204 | return 0; | 1187 | return 0; |
1205 | } | 1188 | } |
1206 | 1189 | ||
1207 | const struct amd_ip_funcs sdma_v2_4_ip_funcs = { | 1190 | static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { |
1208 | .name = "sdma_v2_4", | 1191 | .name = "sdma_v2_4", |
1209 | .early_init = sdma_v2_4_early_init, | 1192 | .early_init = sdma_v2_4_early_init, |
1210 | .late_init = NULL, | 1193 | .late_init = NULL, |
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = { | |||
1222 | }; | 1205 | }; |
1223 | 1206 | ||
1224 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | 1207 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { |
1208 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1209 | .align_mask = 0xf, | ||
1210 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | ||
1225 | .get_rptr = sdma_v2_4_ring_get_rptr, | 1211 | .get_rptr = sdma_v2_4_ring_get_rptr, |
1226 | .get_wptr = sdma_v2_4_ring_get_wptr, | 1212 | .get_wptr = sdma_v2_4_ring_get_wptr, |
1227 | .set_wptr = sdma_v2_4_ring_set_wptr, | 1213 | .set_wptr = sdma_v2_4_ring_set_wptr, |
1228 | .parse_cs = NULL, | 1214 | .emit_frame_size = |
1215 | 6 + /* sdma_v2_4_ring_emit_hdp_flush */ | ||
1216 | 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ | ||
1217 | 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ | ||
1218 | 12 + /* sdma_v2_4_ring_emit_vm_flush */ | ||
1219 | 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ | ||
1220 | .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ | ||
1229 | .emit_ib = sdma_v2_4_ring_emit_ib, | 1221 | .emit_ib = sdma_v2_4_ring_emit_ib, |
1230 | .emit_fence = sdma_v2_4_ring_emit_fence, | 1222 | .emit_fence = sdma_v2_4_ring_emit_fence, |
1231 | .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, | 1223 | .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, |
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | |||
1236 | .test_ib = sdma_v2_4_ring_test_ib, | 1228 | .test_ib = sdma_v2_4_ring_test_ib, |
1237 | .insert_nop = sdma_v2_4_ring_insert_nop, | 1229 | .insert_nop = sdma_v2_4_ring_insert_nop, |
1238 | .pad_ib = sdma_v2_4_ring_pad_ib, | 1230 | .pad_ib = sdma_v2_4_ring_pad_ib, |
1239 | .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size, | ||
1240 | .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size, | ||
1241 | }; | 1231 | }; |
1242 | 1232 | ||
1243 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) | 1233 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1350 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1340 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1351 | } | 1341 | } |
1352 | } | 1342 | } |
1343 | |||
1344 | const struct amdgpu_ip_block_version sdma_v2_4_ip_block = | ||
1345 | { | ||
1346 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1347 | .major = 2, | ||
1348 | .minor = 4, | ||
1349 | .rev = 0, | ||
1350 | .funcs = &sdma_v2_4_ip_funcs, | ||
1351 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h index 07349f5ee10f..28b433729216 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SDMA_V2_4_H__ | 24 | #ifndef __SDMA_V2_4_H__ |
25 | #define __SDMA_V2_4_H__ | 25 | #define __SDMA_V2_4_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs sdma_v2_4_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index a9d10941fb53..77f146587c60 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
392 | 392 | ||
393 | for (i = 0; i < count; i++) | 393 | for (i = 0; i < count; i++) |
394 | if (sdma && sdma->burst_nop && (i == 0)) | 394 | if (sdma && sdma->burst_nop && (i == 0)) |
395 | amdgpu_ring_write(ring, ring->nop | | 395 | amdgpu_ring_write(ring, ring->funcs->nop | |
396 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); | 396 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); |
397 | else | 397 | else |
398 | amdgpu_ring_write(ring, ring->nop); | 398 | amdgpu_ring_write(ring, ring->funcs->nop); |
399 | } | 399 | } |
400 | 400 | ||
401 | /** | 401 | /** |
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
871 | { | 871 | { |
872 | struct amdgpu_device *adev = ring->adev; | 872 | struct amdgpu_device *adev = ring->adev; |
873 | struct amdgpu_ib ib; | 873 | struct amdgpu_ib ib; |
874 | struct fence *f = NULL; | 874 | struct dma_fence *f = NULL; |
875 | unsigned index; | 875 | unsigned index; |
876 | u32 tmp = 0; | 876 | u32 tmp = 0; |
877 | u64 gpu_addr; | 877 | u64 gpu_addr; |
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
908 | if (r) | 908 | if (r) |
909 | goto err1; | 909 | goto err1; |
910 | 910 | ||
911 | r = fence_wait_timeout(f, false, timeout); | 911 | r = dma_fence_wait_timeout(f, false, timeout); |
912 | if (r == 0) { | 912 | if (r == 0) { |
913 | DRM_ERROR("amdgpu: IB test timed out\n"); | 913 | DRM_ERROR("amdgpu: IB test timed out\n"); |
914 | r = -ETIMEDOUT; | 914 | r = -ETIMEDOUT; |
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
927 | } | 927 | } |
928 | err1: | 928 | err1: |
929 | amdgpu_ib_free(adev, &ib, NULL); | 929 | amdgpu_ib_free(adev, &ib, NULL); |
930 | fence_put(f); | 930 | dma_fence_put(f); |
931 | err0: | 931 | err0: |
932 | amdgpu_wb_free(adev, index); | 932 | amdgpu_wb_free(adev, index); |
933 | return r; | 933 | return r; |
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
1104 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | 1104 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
1108 | { | ||
1109 | return | ||
1110 | 7 + 6; /* sdma_v3_0_ring_emit_ib */ | ||
1111 | } | ||
1112 | |||
1113 | static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
1114 | { | ||
1115 | return | ||
1116 | 6 + /* sdma_v3_0_ring_emit_hdp_flush */ | ||
1117 | 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ | ||
1118 | 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ | ||
1119 | 12 + /* sdma_v3_0_ring_emit_vm_flush */ | ||
1120 | 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ | ||
1121 | } | ||
1122 | |||
1123 | static int sdma_v3_0_early_init(void *handle) | 1107 | static int sdma_v3_0_early_init(void *handle) |
1124 | { | 1108 | { |
1125 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1109 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle) | |||
1177 | 1161 | ||
1178 | sprintf(ring->name, "sdma%d", i); | 1162 | sprintf(ring->name, "sdma%d", i); |
1179 | r = amdgpu_ring_init(adev, ring, 1024, | 1163 | r = amdgpu_ring_init(adev, ring, 1024, |
1180 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1181 | &adev->sdma.trap_irq, | 1164 | &adev->sdma.trap_irq, |
1182 | (i == 0) ? | 1165 | (i == 0) ? |
1183 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 1166 | AMDGPU_SDMA_IRQ_TRAP0 : |
1184 | AMDGPU_RING_TYPE_SDMA); | 1167 | AMDGPU_SDMA_IRQ_TRAP1); |
1185 | if (r) | 1168 | if (r) |
1186 | return r; | 1169 | return r; |
1187 | } | 1170 | } |
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle, | |||
1544 | return 0; | 1527 | return 0; |
1545 | } | 1528 | } |
1546 | 1529 | ||
1547 | const struct amd_ip_funcs sdma_v3_0_ip_funcs = { | 1530 | static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { |
1548 | .name = "sdma_v3_0", | 1531 | .name = "sdma_v3_0", |
1549 | .early_init = sdma_v3_0_early_init, | 1532 | .early_init = sdma_v3_0_early_init, |
1550 | .late_init = NULL, | 1533 | .late_init = NULL, |
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { | |||
1565 | }; | 1548 | }; |
1566 | 1549 | ||
1567 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | 1550 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { |
1551 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1552 | .align_mask = 0xf, | ||
1553 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | ||
1568 | .get_rptr = sdma_v3_0_ring_get_rptr, | 1554 | .get_rptr = sdma_v3_0_ring_get_rptr, |
1569 | .get_wptr = sdma_v3_0_ring_get_wptr, | 1555 | .get_wptr = sdma_v3_0_ring_get_wptr, |
1570 | .set_wptr = sdma_v3_0_ring_set_wptr, | 1556 | .set_wptr = sdma_v3_0_ring_set_wptr, |
1571 | .parse_cs = NULL, | 1557 | .emit_frame_size = |
1558 | 6 + /* sdma_v3_0_ring_emit_hdp_flush */ | ||
1559 | 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ | ||
1560 | 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ | ||
1561 | 12 + /* sdma_v3_0_ring_emit_vm_flush */ | ||
1562 | 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ | ||
1563 | .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ | ||
1572 | .emit_ib = sdma_v3_0_ring_emit_ib, | 1564 | .emit_ib = sdma_v3_0_ring_emit_ib, |
1573 | .emit_fence = sdma_v3_0_ring_emit_fence, | 1565 | .emit_fence = sdma_v3_0_ring_emit_fence, |
1574 | .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, | 1566 | .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, |
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | |||
1579 | .test_ib = sdma_v3_0_ring_test_ib, | 1571 | .test_ib = sdma_v3_0_ring_test_ib, |
1580 | .insert_nop = sdma_v3_0_ring_insert_nop, | 1572 | .insert_nop = sdma_v3_0_ring_insert_nop, |
1581 | .pad_ib = sdma_v3_0_ring_pad_ib, | 1573 | .pad_ib = sdma_v3_0_ring_pad_ib, |
1582 | .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size, | ||
1583 | .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size, | ||
1584 | }; | 1574 | }; |
1585 | 1575 | ||
1586 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) | 1576 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1693 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1683 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1694 | } | 1684 | } |
1695 | } | 1685 | } |
1686 | |||
1687 | const struct amdgpu_ip_block_version sdma_v3_0_ip_block = | ||
1688 | { | ||
1689 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1690 | .major = 3, | ||
1691 | .minor = 0, | ||
1692 | .rev = 0, | ||
1693 | .funcs = &sdma_v3_0_ip_funcs, | ||
1694 | }; | ||
1695 | |||
1696 | const struct amdgpu_ip_block_version sdma_v3_1_ip_block = | ||
1697 | { | ||
1698 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1699 | .major = 3, | ||
1700 | .minor = 1, | ||
1701 | .rev = 0, | ||
1702 | .funcs = &sdma_v3_0_ip_funcs, | ||
1703 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h index 0cb9698a3054..7aa223d35f1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __SDMA_V3_0_H__ | 24 | #ifndef __SDMA_V3_0_H__ |
25 | #define __SDMA_V3_0_H__ | 25 | #define __SDMA_V3_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs sdma_v3_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index dc9511c5ecb8..3ed8ad8725b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "si_dma.h" | 39 | #include "si_dma.h" |
40 | #include "dce_v6_0.h" | 40 | #include "dce_v6_0.h" |
41 | #include "si.h" | 41 | #include "si.h" |
42 | #include "dce_virtual.h" | ||
42 | 43 | ||
43 | static const u32 tahiti_golden_registers[] = | 44 | static const u32 tahiti_golden_registers[] = |
44 | { | 45 | { |
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |||
905 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | 906 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
906 | } | 907 | } |
907 | 908 | ||
908 | u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) | 909 | static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) |
909 | { | 910 | { |
910 | unsigned long flags; | 911 | unsigned long flags; |
911 | u32 r; | 912 | u32 r; |
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) | |||
918 | return r; | 919 | return r; |
919 | } | 920 | } |
920 | 921 | ||
921 | void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | 922 | static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
922 | { | 923 | { |
923 | unsigned long flags; | 924 | unsigned long flags; |
924 | 925 | ||
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle, | |||
1811 | return 0; | 1812 | return 0; |
1812 | } | 1813 | } |
1813 | 1814 | ||
1814 | const struct amd_ip_funcs si_common_ip_funcs = { | 1815 | static const struct amd_ip_funcs si_common_ip_funcs = { |
1815 | .name = "si_common", | 1816 | .name = "si_common", |
1816 | .early_init = si_common_early_init, | 1817 | .early_init = si_common_early_init, |
1817 | .late_init = NULL, | 1818 | .late_init = NULL, |
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = { | |||
1828 | .set_powergating_state = si_common_set_powergating_state, | 1829 | .set_powergating_state = si_common_set_powergating_state, |
1829 | }; | 1830 | }; |
1830 | 1831 | ||
1831 | static const struct amdgpu_ip_block_version verde_ip_blocks[] = | 1832 | static const struct amdgpu_ip_block_version si_common_ip_block = |
1832 | { | 1833 | { |
1833 | { | 1834 | .type = AMD_IP_BLOCK_TYPE_COMMON, |
1834 | .type = AMD_IP_BLOCK_TYPE_COMMON, | 1835 | .major = 1, |
1835 | .major = 1, | 1836 | .minor = 0, |
1836 | .minor = 0, | 1837 | .rev = 0, |
1837 | .rev = 0, | 1838 | .funcs = &si_common_ip_funcs, |
1838 | .funcs = &si_common_ip_funcs, | ||
1839 | }, | ||
1840 | { | ||
1841 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1842 | .major = 6, | ||
1843 | .minor = 0, | ||
1844 | .rev = 0, | ||
1845 | .funcs = &gmc_v6_0_ip_funcs, | ||
1846 | }, | ||
1847 | { | ||
1848 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1849 | .major = 1, | ||
1850 | .minor = 0, | ||
1851 | .rev = 0, | ||
1852 | .funcs = &si_ih_ip_funcs, | ||
1853 | }, | ||
1854 | { | ||
1855 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1856 | .major = 6, | ||
1857 | .minor = 0, | ||
1858 | .rev = 0, | ||
1859 | .funcs = &amdgpu_pp_ip_funcs, | ||
1860 | }, | ||
1861 | { | ||
1862 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1863 | .major = 6, | ||
1864 | .minor = 0, | ||
1865 | .rev = 0, | ||
1866 | .funcs = &dce_v6_0_ip_funcs, | ||
1867 | }, | ||
1868 | { | ||
1869 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1870 | .major = 6, | ||
1871 | .minor = 0, | ||
1872 | .rev = 0, | ||
1873 | .funcs = &gfx_v6_0_ip_funcs, | ||
1874 | }, | ||
1875 | { | ||
1876 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1877 | .major = 1, | ||
1878 | .minor = 0, | ||
1879 | .rev = 0, | ||
1880 | .funcs = &si_dma_ip_funcs, | ||
1881 | }, | ||
1882 | /* { | ||
1883 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1884 | .major = 3, | ||
1885 | .minor = 1, | ||
1886 | .rev = 0, | ||
1887 | .funcs = &si_null_ip_funcs, | ||
1888 | }, | ||
1889 | { | ||
1890 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1891 | .major = 1, | ||
1892 | .minor = 0, | ||
1893 | .rev = 0, | ||
1894 | .funcs = &si_null_ip_funcs, | ||
1895 | }, | ||
1896 | */ | ||
1897 | }; | ||
1898 | |||
1899 | |||
1900 | static const struct amdgpu_ip_block_version hainan_ip_blocks[] = | ||
1901 | { | ||
1902 | { | ||
1903 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1904 | .major = 1, | ||
1905 | .minor = 0, | ||
1906 | .rev = 0, | ||
1907 | .funcs = &si_common_ip_funcs, | ||
1908 | }, | ||
1909 | { | ||
1910 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1911 | .major = 6, | ||
1912 | .minor = 0, | ||
1913 | .rev = 0, | ||
1914 | .funcs = &gmc_v6_0_ip_funcs, | ||
1915 | }, | ||
1916 | { | ||
1917 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1918 | .major = 1, | ||
1919 | .minor = 0, | ||
1920 | .rev = 0, | ||
1921 | .funcs = &si_ih_ip_funcs, | ||
1922 | }, | ||
1923 | { | ||
1924 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1925 | .major = 6, | ||
1926 | .minor = 0, | ||
1927 | .rev = 0, | ||
1928 | .funcs = &amdgpu_pp_ip_funcs, | ||
1929 | }, | ||
1930 | { | ||
1931 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1932 | .major = 6, | ||
1933 | .minor = 0, | ||
1934 | .rev = 0, | ||
1935 | .funcs = &gfx_v6_0_ip_funcs, | ||
1936 | }, | ||
1937 | { | ||
1938 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1939 | .major = 1, | ||
1940 | .minor = 0, | ||
1941 | .rev = 0, | ||
1942 | .funcs = &si_dma_ip_funcs, | ||
1943 | }, | ||
1944 | }; | 1839 | }; |
1945 | 1840 | ||
1946 | int si_set_ip_blocks(struct amdgpu_device *adev) | 1841 | int si_set_ip_blocks(struct amdgpu_device *adev) |
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev) | |||
1949 | case CHIP_VERDE: | 1844 | case CHIP_VERDE: |
1950 | case CHIP_TAHITI: | 1845 | case CHIP_TAHITI: |
1951 | case CHIP_PITCAIRN: | 1846 | case CHIP_PITCAIRN: |
1847 | amdgpu_ip_block_add(adev, &si_common_ip_block); | ||
1848 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); | ||
1849 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1850 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1851 | if (adev->enable_virtual_display) | ||
1852 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1853 | else | ||
1854 | amdgpu_ip_block_add(adev, &dce_v6_0_ip_block); | ||
1855 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1856 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1857 | /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ | ||
1858 | /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ | ||
1859 | break; | ||
1952 | case CHIP_OLAND: | 1860 | case CHIP_OLAND: |
1953 | adev->ip_blocks = verde_ip_blocks; | 1861 | amdgpu_ip_block_add(adev, &si_common_ip_block); |
1954 | adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks); | 1862 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); |
1863 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1864 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1865 | if (adev->enable_virtual_display) | ||
1866 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1867 | else | ||
1868 | amdgpu_ip_block_add(adev, &dce_v6_4_ip_block); | ||
1869 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1870 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1871 | /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ | ||
1872 | /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ | ||
1955 | break; | 1873 | break; |
1956 | case CHIP_HAINAN: | 1874 | case CHIP_HAINAN: |
1957 | adev->ip_blocks = hainan_ip_blocks; | 1875 | amdgpu_ip_block_add(adev, &si_common_ip_block); |
1958 | adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks); | 1876 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); |
1877 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1878 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1879 | if (adev->enable_virtual_display) | ||
1880 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1881 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1882 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1959 | break; | 1883 | break; |
1960 | default: | 1884 | default: |
1961 | BUG(); | 1885 | BUG(); |
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h index 959d7b63e0e5..589225080c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.h +++ b/drivers/gpu/drm/amd/amdgpu/si.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __SI_H__ | 24 | #ifndef __SI_H__ |
25 | #define __SI_H__ | 25 | #define __SI_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_common_ip_funcs; | ||
28 | |||
29 | void si_srbm_select(struct amdgpu_device *adev, | 27 | void si_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int si_set_ip_blocks(struct amdgpu_device *adev); | 29 | int si_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index de358193a8f9..3dd552ae0b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c | |||
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
274 | { | 274 | { |
275 | struct amdgpu_device *adev = ring->adev; | 275 | struct amdgpu_device *adev = ring->adev; |
276 | struct amdgpu_ib ib; | 276 | struct amdgpu_ib ib; |
277 | struct fence *f = NULL; | 277 | struct dma_fence *f = NULL; |
278 | unsigned index; | 278 | unsigned index; |
279 | u32 tmp = 0; | 279 | u32 tmp = 0; |
280 | u64 gpu_addr; | 280 | u64 gpu_addr; |
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
305 | if (r) | 305 | if (r) |
306 | goto err1; | 306 | goto err1; |
307 | 307 | ||
308 | r = fence_wait_timeout(f, false, timeout); | 308 | r = dma_fence_wait_timeout(f, false, timeout); |
309 | if (r == 0) { | 309 | if (r == 0) { |
310 | DRM_ERROR("amdgpu: IB test timed out\n"); | 310 | DRM_ERROR("amdgpu: IB test timed out\n"); |
311 | r = -ETIMEDOUT; | 311 | r = -ETIMEDOUT; |
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
325 | 325 | ||
326 | err1: | 326 | err1: |
327 | amdgpu_ib_free(adev, &ib, NULL); | 327 | amdgpu_ib_free(adev, &ib, NULL); |
328 | fence_put(f); | 328 | dma_fence_put(f); |
329 | err0: | 329 | err0: |
330 | amdgpu_wb_free(adev, index); | 330 | amdgpu_wb_free(adev, index); |
331 | return r; | 331 | return r; |
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
495 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | 495 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ |
496 | } | 496 | } |
497 | 497 | ||
498 | static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
499 | { | ||
500 | return | ||
501 | 7 + 3; /* si_dma_ring_emit_ib */ | ||
502 | } | ||
503 | |||
504 | static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
505 | { | ||
506 | return | ||
507 | 3 + /* si_dma_ring_emit_hdp_flush */ | ||
508 | 3 + /* si_dma_ring_emit_hdp_invalidate */ | ||
509 | 6 + /* si_dma_ring_emit_pipeline_sync */ | ||
510 | 12 + /* si_dma_ring_emit_vm_flush */ | ||
511 | 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | ||
512 | } | ||
513 | |||
514 | static int si_dma_early_init(void *handle) | 498 | static int si_dma_early_init(void *handle) |
515 | { | 499 | { |
516 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 500 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle) | |||
547 | ring->use_doorbell = false; | 531 | ring->use_doorbell = false; |
548 | sprintf(ring->name, "sdma%d", i); | 532 | sprintf(ring->name, "sdma%d", i); |
549 | r = amdgpu_ring_init(adev, ring, 1024, | 533 | r = amdgpu_ring_init(adev, ring, 1024, |
550 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf, | ||
551 | &adev->sdma.trap_irq, | 534 | &adev->sdma.trap_irq, |
552 | (i == 0) ? | 535 | (i == 0) ? |
553 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 536 | AMDGPU_SDMA_IRQ_TRAP0 : |
554 | AMDGPU_RING_TYPE_SDMA); | 537 | AMDGPU_SDMA_IRQ_TRAP1); |
555 | if (r) | 538 | if (r) |
556 | return r; | 539 | return r; |
557 | } | 540 | } |
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle, | |||
762 | return 0; | 745 | return 0; |
763 | } | 746 | } |
764 | 747 | ||
765 | const struct amd_ip_funcs si_dma_ip_funcs = { | 748 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
766 | .name = "si_dma", | 749 | .name = "si_dma", |
767 | .early_init = si_dma_early_init, | 750 | .early_init = si_dma_early_init, |
768 | .late_init = NULL, | 751 | .late_init = NULL, |
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = { | |||
780 | }; | 763 | }; |
781 | 764 | ||
782 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | 765 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { |
766 | .type = AMDGPU_RING_TYPE_SDMA, | ||
767 | .align_mask = 0xf, | ||
768 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | ||
783 | .get_rptr = si_dma_ring_get_rptr, | 769 | .get_rptr = si_dma_ring_get_rptr, |
784 | .get_wptr = si_dma_ring_get_wptr, | 770 | .get_wptr = si_dma_ring_get_wptr, |
785 | .set_wptr = si_dma_ring_set_wptr, | 771 | .set_wptr = si_dma_ring_set_wptr, |
786 | .parse_cs = NULL, | 772 | .emit_frame_size = |
773 | 3 + /* si_dma_ring_emit_hdp_flush */ | ||
774 | 3 + /* si_dma_ring_emit_hdp_invalidate */ | ||
775 | 6 + /* si_dma_ring_emit_pipeline_sync */ | ||
776 | 12 + /* si_dma_ring_emit_vm_flush */ | ||
777 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | ||
778 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | ||
787 | .emit_ib = si_dma_ring_emit_ib, | 779 | .emit_ib = si_dma_ring_emit_ib, |
788 | .emit_fence = si_dma_ring_emit_fence, | 780 | .emit_fence = si_dma_ring_emit_fence, |
789 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | 781 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, |
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |||
794 | .test_ib = si_dma_ring_test_ib, | 786 | .test_ib = si_dma_ring_test_ib, |
795 | .insert_nop = amdgpu_ring_insert_nop, | 787 | .insert_nop = amdgpu_ring_insert_nop, |
796 | .pad_ib = si_dma_ring_pad_ib, | 788 | .pad_ib = si_dma_ring_pad_ib, |
797 | .get_emit_ib_size = si_dma_ring_get_emit_ib_size, | ||
798 | .get_dma_frame_size = si_dma_ring_get_dma_frame_size, | ||
799 | }; | 789 | }; |
800 | 790 | ||
801 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | 791 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) |
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
913 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 903 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
914 | } | 904 | } |
915 | } | 905 | } |
906 | |||
907 | const struct amdgpu_ip_block_version si_dma_ip_block = | ||
908 | { | ||
909 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
910 | .major = 1, | ||
911 | .minor = 0, | ||
912 | .rev = 0, | ||
913 | .funcs = &si_dma_ip_funcs, | ||
914 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h index 3a3e0c78a54b..5ac1b8452fb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.h +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SI_DMA_H__ | 24 | #ifndef __SI_DMA_H__ |
25 | #define __SI_DMA_H__ | 25 | #define __SI_DMA_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_dma_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version si_dma_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 3de7bca5854b..917213396787 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev, | |||
3171 | eg_pi->current_rps = *rps; | 3171 | eg_pi->current_rps = *rps; |
3172 | ni_pi->current_ps = *new_ps; | 3172 | ni_pi->current_ps = *new_ps; |
3173 | eg_pi->current_rps.ps_priv = &ni_pi->current_ps; | 3173 | eg_pi->current_rps.ps_priv = &ni_pi->current_ps; |
3174 | adev->pm.dpm.current_ps = &eg_pi->current_rps; | ||
3174 | } | 3175 | } |
3175 | 3176 | ||
3176 | static void ni_update_requested_ps(struct amdgpu_device *adev, | 3177 | static void ni_update_requested_ps(struct amdgpu_device *adev, |
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev, | |||
3183 | eg_pi->requested_rps = *rps; | 3184 | eg_pi->requested_rps = *rps; |
3184 | ni_pi->requested_ps = *new_ps; | 3185 | ni_pi->requested_ps = *new_ps; |
3185 | eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; | 3186 | eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; |
3187 | adev->pm.dpm.requested_ps = &eg_pi->requested_rps; | ||
3186 | } | 3188 | } |
3187 | 3189 | ||
3188 | static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, | 3190 | static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, |
@@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev) | |||
7320 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 7322 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
7321 | 7323 | ||
7322 | /* fill in the vce power states */ | 7324 | /* fill in the vce power states */ |
7323 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 7325 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
7324 | u32 sclk, mclk; | 7326 | u32 sclk, mclk; |
7325 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 7327 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
7326 | clock_info = (union pplib_clock_info *) | 7328 | clock_info = (union pplib_clock_info *) |
@@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle) | |||
7957 | return 0; | 7959 | return 0; |
7958 | } | 7960 | } |
7959 | 7961 | ||
7962 | static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, | ||
7963 | const struct rv7xx_pl *si_cpl2) | ||
7964 | { | ||
7965 | return ((si_cpl1->mclk == si_cpl2->mclk) && | ||
7966 | (si_cpl1->sclk == si_cpl2->sclk) && | ||
7967 | (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && | ||
7968 | (si_cpl1->vddc == si_cpl2->vddc) && | ||
7969 | (si_cpl1->vddci == si_cpl2->vddci)); | ||
7970 | } | ||
7971 | |||
7972 | static int si_check_state_equal(struct amdgpu_device *adev, | ||
7973 | struct amdgpu_ps *cps, | ||
7974 | struct amdgpu_ps *rps, | ||
7975 | bool *equal) | ||
7976 | { | ||
7977 | struct si_ps *si_cps; | ||
7978 | struct si_ps *si_rps; | ||
7979 | int i; | ||
7980 | |||
7981 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) | ||
7982 | return -EINVAL; | ||
7983 | |||
7984 | si_cps = si_get_ps(cps); | ||
7985 | si_rps = si_get_ps(rps); | ||
7986 | |||
7987 | if (si_cps == NULL) { | ||
7988 | printk("si_cps is NULL\n"); | ||
7989 | *equal = false; | ||
7990 | return 0; | ||
7991 | } | ||
7992 | |||
7993 | if (si_cps->performance_level_count != si_rps->performance_level_count) { | ||
7994 | *equal = false; | ||
7995 | return 0; | ||
7996 | } | ||
7997 | |||
7998 | for (i = 0; i < si_cps->performance_level_count; i++) { | ||
7999 | if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), | ||
8000 | &(si_rps->performance_levels[i]))) { | ||
8001 | *equal = false; | ||
8002 | return 0; | ||
8003 | } | ||
8004 | } | ||
8005 | |||
8006 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | ||
8007 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); | ||
8008 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); | ||
8009 | |||
8010 | return 0; | ||
8011 | } | ||
8012 | |||
7960 | 8013 | ||
7961 | const struct amd_ip_funcs si_dpm_ip_funcs = { | 8014 | const struct amd_ip_funcs si_dpm_ip_funcs = { |
7962 | .name = "si_dpm", | 8015 | .name = "si_dpm", |
@@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = { | |||
7991 | .get_fan_control_mode = &si_dpm_get_fan_control_mode, | 8044 | .get_fan_control_mode = &si_dpm_get_fan_control_mode, |
7992 | .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, | 8045 | .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, |
7993 | .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, | 8046 | .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, |
8047 | .check_state_equal = &si_check_state_equal, | ||
8048 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
7994 | }; | 8049 | }; |
7995 | 8050 | ||
7996 | static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 8051 | static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
8010 | adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; | 8065 | adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; |
8011 | } | 8066 | } |
8012 | 8067 | ||
8068 | const struct amdgpu_ip_block_version si_dpm_ip_block = | ||
8069 | { | ||
8070 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
8071 | .major = 6, | ||
8072 | .minor = 0, | ||
8073 | .rev = 0, | ||
8074 | .funcs = &si_dpm_ip_funcs, | ||
8075 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 8fae3d4a2360..db0f36846661 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c | |||
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle, | |||
268 | return 0; | 268 | return 0; |
269 | } | 269 | } |
270 | 270 | ||
271 | const struct amd_ip_funcs si_ih_ip_funcs = { | 271 | static const struct amd_ip_funcs si_ih_ip_funcs = { |
272 | .name = "si_ih", | 272 | .name = "si_ih", |
273 | .early_init = si_ih_early_init, | 273 | .early_init = si_ih_early_init, |
274 | .late_init = NULL, | 274 | .late_init = NULL, |
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
297 | adev->irq.ih_funcs = &si_ih_funcs; | 297 | adev->irq.ih_funcs = &si_ih_funcs; |
298 | } | 298 | } |
299 | 299 | ||
300 | const struct amdgpu_ip_block_version si_ih_ip_block = | ||
301 | { | ||
302 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
303 | .major = 1, | ||
304 | .minor = 0, | ||
305 | .rev = 0, | ||
306 | .funcs = &si_ih_ip_funcs, | ||
307 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h index f3e3a954369c..42e64a53e24f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SI_IH_H__ | 24 | #ifndef __SI_IH_H__ |
25 | #define __SI_IH_H__ | 25 | #define __SI_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version si_ih_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index b4ea229bb449..52b71ee58793 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle, | |||
455 | return 0; | 455 | return 0; |
456 | } | 456 | } |
457 | 457 | ||
458 | const struct amd_ip_funcs tonga_ih_ip_funcs = { | 458 | static const struct amd_ip_funcs tonga_ih_ip_funcs = { |
459 | .name = "tonga_ih", | 459 | .name = "tonga_ih", |
460 | .early_init = tonga_ih_early_init, | 460 | .early_init = tonga_ih_early_init, |
461 | .late_init = NULL, | 461 | .late_init = NULL, |
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
487 | adev->irq.ih_funcs = &tonga_ih_funcs; | 487 | adev->irq.ih_funcs = &tonga_ih_funcs; |
488 | } | 488 | } |
489 | 489 | ||
490 | const struct amdgpu_ip_block_version tonga_ih_ip_block = | ||
491 | { | ||
492 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
493 | .major = 3, | ||
494 | .minor = 0, | ||
495 | .rev = 0, | ||
496 | .funcs = &tonga_ih_ip_funcs, | ||
497 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h index 7392d70fa4a7..499027eee5c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __TONGA_IH_H__ | 24 | #ifndef __TONGA_IH_H__ |
25 | #define __TONGA_IH_H__ | 25 | #define __TONGA_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs tonga_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version tonga_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __CZ_IH_H__ */ | 29 | #endif /* __TONGA_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f6c941550b8f..8f9c7d55ddda 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -36,6 +36,9 @@ | |||
36 | 36 | ||
37 | #include "bif/bif_4_1_d.h" | 37 | #include "bif/bif_4_1_d.h" |
38 | 38 | ||
39 | #include "smu/smu_7_0_1_d.h" | ||
40 | #include "smu/smu_7_0_1_sh_mask.h" | ||
41 | |||
39 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); | 42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); |
40 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); | 43 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); |
41 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); | 44 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); |
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle) | |||
116 | 119 | ||
117 | ring = &adev->uvd.ring; | 120 | ring = &adev->uvd.ring; |
118 | sprintf(ring->name, "uvd"); | 121 | sprintf(ring->name, "uvd"); |
119 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 122 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
120 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
121 | 123 | ||
122 | return r; | 124 | return r; |
123 | } | 125 | } |
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, | |||
526 | amdgpu_ring_write(ring, ib->length_dw); | 528 | amdgpu_ring_write(ring, ib->length_dw); |
527 | } | 529 | } |
528 | 530 | ||
529 | static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
530 | { | ||
531 | return | ||
532 | 4; /* uvd_v4_2_ring_emit_ib */ | ||
533 | } | ||
534 | |||
535 | static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
536 | { | ||
537 | return | ||
538 | 2 + /* uvd_v4_2_ring_emit_hdp_flush */ | ||
539 | 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ | ||
540 | 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */ | ||
541 | } | ||
542 | |||
543 | /** | 531 | /** |
544 | * uvd_v4_2_mc_resume - memory controller programming | 532 | * uvd_v4_2_mc_resume - memory controller programming |
545 | * | 533 | * |
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, | |||
698 | return 0; | 686 | return 0; |
699 | } | 687 | } |
700 | 688 | ||
689 | static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) | ||
690 | { | ||
691 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | ||
692 | |||
693 | if (enable) | ||
694 | tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
695 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
696 | else | ||
697 | tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
698 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
699 | |||
700 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | ||
701 | } | ||
702 | |||
701 | static int uvd_v4_2_set_clockgating_state(void *handle, | 703 | static int uvd_v4_2_set_clockgating_state(void *handle, |
702 | enum amd_clockgating_state state) | 704 | enum amd_clockgating_state state) |
703 | { | 705 | { |
704 | bool gate = false; | 706 | bool gate = false; |
705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 707 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
706 | 708 | ||
707 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
708 | return 0; | ||
709 | |||
710 | if (state == AMD_CG_STATE_GATE) | 709 | if (state == AMD_CG_STATE_GATE) |
711 | gate = true; | 710 | gate = true; |
712 | 711 | ||
712 | uvd_v5_0_set_bypass_mode(adev, gate); | ||
713 | |||
714 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
715 | return 0; | ||
716 | |||
713 | uvd_v4_2_enable_mgcg(adev, gate); | 717 | uvd_v4_2_enable_mgcg(adev, gate); |
714 | 718 | ||
715 | return 0; | 719 | return 0; |
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
738 | } | 742 | } |
739 | } | 743 | } |
740 | 744 | ||
741 | const struct amd_ip_funcs uvd_v4_2_ip_funcs = { | 745 | static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { |
742 | .name = "uvd_v4_2", | 746 | .name = "uvd_v4_2", |
743 | .early_init = uvd_v4_2_early_init, | 747 | .early_init = uvd_v4_2_early_init, |
744 | .late_init = NULL, | 748 | .late_init = NULL, |
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = { | |||
756 | }; | 760 | }; |
757 | 761 | ||
758 | static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { | 762 | static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { |
763 | .type = AMDGPU_RING_TYPE_UVD, | ||
764 | .align_mask = 0xf, | ||
765 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
759 | .get_rptr = uvd_v4_2_ring_get_rptr, | 766 | .get_rptr = uvd_v4_2_ring_get_rptr, |
760 | .get_wptr = uvd_v4_2_ring_get_wptr, | 767 | .get_wptr = uvd_v4_2_ring_get_wptr, |
761 | .set_wptr = uvd_v4_2_ring_set_wptr, | 768 | .set_wptr = uvd_v4_2_ring_set_wptr, |
762 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 769 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
770 | .emit_frame_size = | ||
771 | 2 + /* uvd_v4_2_ring_emit_hdp_flush */ | ||
772 | 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ | ||
773 | 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ | ||
774 | .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ | ||
763 | .emit_ib = uvd_v4_2_ring_emit_ib, | 775 | .emit_ib = uvd_v4_2_ring_emit_ib, |
764 | .emit_fence = uvd_v4_2_ring_emit_fence, | 776 | .emit_fence = uvd_v4_2_ring_emit_fence, |
765 | .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, | 777 | .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, |
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { | |||
770 | .pad_ib = amdgpu_ring_generic_pad_ib, | 782 | .pad_ib = amdgpu_ring_generic_pad_ib, |
771 | .begin_use = amdgpu_uvd_ring_begin_use, | 783 | .begin_use = amdgpu_uvd_ring_begin_use, |
772 | .end_use = amdgpu_uvd_ring_end_use, | 784 | .end_use = amdgpu_uvd_ring_end_use, |
773 | .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size, | ||
774 | .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size, | ||
775 | }; | 785 | }; |
776 | 786 | ||
777 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) | 787 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) |
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) | |||
789 | adev->uvd.irq.num_types = 1; | 799 | adev->uvd.irq.num_types = 1; |
790 | adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; | 800 | adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; |
791 | } | 801 | } |
802 | |||
803 | const struct amdgpu_ip_block_version uvd_v4_2_ip_block = | ||
804 | { | ||
805 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
806 | .major = 4, | ||
807 | .minor = 2, | ||
808 | .rev = 0, | ||
809 | .funcs = &uvd_v4_2_ip_funcs, | ||
810 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h index 0a615dd50840..8a0444bb8b95 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __UVD_V4_2_H__ | 24 | #ifndef __UVD_V4_2_H__ |
25 | #define __UVD_V4_2_H__ | 25 | #define __UVD_V4_2_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v4_2_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 400c16fe579e..95303e2d5f92 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include "oss/oss_2_0_sh_mask.h" | 33 | #include "oss/oss_2_0_sh_mask.h" |
34 | #include "bif/bif_5_0_d.h" | 34 | #include "bif/bif_5_0_d.h" |
35 | #include "vi.h" | 35 | #include "vi.h" |
36 | #include "smu/smu_7_1_2_d.h" | ||
37 | #include "smu/smu_7_1_2_sh_mask.h" | ||
36 | 38 | ||
37 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); | 39 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); |
38 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); | 40 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); |
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle) | |||
112 | 114 | ||
113 | ring = &adev->uvd.ring; | 115 | ring = &adev->uvd.ring; |
114 | sprintf(ring->name, "uvd"); | 116 | sprintf(ring->name, "uvd"); |
115 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 117 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
116 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
117 | 118 | ||
118 | return r; | 119 | return r; |
119 | } | 120 | } |
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
577 | amdgpu_ring_write(ring, ib->length_dw); | 578 | amdgpu_ring_write(ring, ib->length_dw); |
578 | } | 579 | } |
579 | 580 | ||
580 | static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
581 | { | ||
582 | return | ||
583 | 6; /* uvd_v5_0_ring_emit_ib */ | ||
584 | } | ||
585 | |||
586 | static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
587 | { | ||
588 | return | ||
589 | 2 + /* uvd_v5_0_ring_emit_hdp_flush */ | ||
590 | 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ | ||
591 | 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */ | ||
592 | } | ||
593 | |||
594 | static bool uvd_v5_0_is_idle(void *handle) | 581 | static bool uvd_v5_0_is_idle(void *handle) |
595 | { | 582 | { |
596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 583 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) | |||
737 | } | 724 | } |
738 | #endif | 725 | #endif |
739 | 726 | ||
727 | static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) | ||
728 | { | ||
729 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | ||
730 | |||
731 | if (enable) | ||
732 | tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
733 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
734 | else | ||
735 | tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
736 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
737 | |||
738 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | ||
739 | } | ||
740 | |||
740 | static int uvd_v5_0_set_clockgating_state(void *handle, | 741 | static int uvd_v5_0_set_clockgating_state(void *handle, |
741 | enum amd_clockgating_state state) | 742 | enum amd_clockgating_state state) |
742 | { | 743 | { |
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle, | |||
744 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 745 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
745 | static int curstate = -1; | 746 | static int curstate = -1; |
746 | 747 | ||
748 | uvd_v5_0_set_bypass_mode(adev, enable); | ||
749 | |||
747 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | 750 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
748 | return 0; | 751 | return 0; |
749 | 752 | ||
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle, | |||
789 | } | 792 | } |
790 | } | 793 | } |
791 | 794 | ||
792 | const struct amd_ip_funcs uvd_v5_0_ip_funcs = { | 795 | static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { |
793 | .name = "uvd_v5_0", | 796 | .name = "uvd_v5_0", |
794 | .early_init = uvd_v5_0_early_init, | 797 | .early_init = uvd_v5_0_early_init, |
795 | .late_init = NULL, | 798 | .late_init = NULL, |
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = { | |||
807 | }; | 810 | }; |
808 | 811 | ||
809 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | 812 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { |
813 | .type = AMDGPU_RING_TYPE_UVD, | ||
814 | .align_mask = 0xf, | ||
815 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
810 | .get_rptr = uvd_v5_0_ring_get_rptr, | 816 | .get_rptr = uvd_v5_0_ring_get_rptr, |
811 | .get_wptr = uvd_v5_0_ring_get_wptr, | 817 | .get_wptr = uvd_v5_0_ring_get_wptr, |
812 | .set_wptr = uvd_v5_0_ring_set_wptr, | 818 | .set_wptr = uvd_v5_0_ring_set_wptr, |
813 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 819 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
820 | .emit_frame_size = | ||
821 | 2 + /* uvd_v5_0_ring_emit_hdp_flush */ | ||
822 | 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ | ||
823 | 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ | ||
824 | .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ | ||
814 | .emit_ib = uvd_v5_0_ring_emit_ib, | 825 | .emit_ib = uvd_v5_0_ring_emit_ib, |
815 | .emit_fence = uvd_v5_0_ring_emit_fence, | 826 | .emit_fence = uvd_v5_0_ring_emit_fence, |
816 | .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, | 827 | .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, |
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | |||
821 | .pad_ib = amdgpu_ring_generic_pad_ib, | 832 | .pad_ib = amdgpu_ring_generic_pad_ib, |
822 | .begin_use = amdgpu_uvd_ring_begin_use, | 833 | .begin_use = amdgpu_uvd_ring_begin_use, |
823 | .end_use = amdgpu_uvd_ring_end_use, | 834 | .end_use = amdgpu_uvd_ring_end_use, |
824 | .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size, | ||
825 | .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size, | ||
826 | }; | 835 | }; |
827 | 836 | ||
828 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | 837 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | |||
840 | adev->uvd.irq.num_types = 1; | 849 | adev->uvd.irq.num_types = 1; |
841 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; | 850 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; |
842 | } | 851 | } |
852 | |||
853 | const struct amdgpu_ip_block_version uvd_v5_0_ip_block = | ||
854 | { | ||
855 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
856 | .major = 5, | ||
857 | .minor = 0, | ||
858 | .rev = 0, | ||
859 | .funcs = &uvd_v5_0_ip_funcs, | ||
860 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h index e3b3c49fa5de..2eaaea793ac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __UVD_V5_0_H__ | 24 | #ifndef __UVD_V5_0_H__ |
25 | #define __UVD_V5_0_H__ | 25 | #define __UVD_V5_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v5_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ab3df6d75656..a339b5ccb296 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) | |||
116 | 116 | ||
117 | ring = &adev->uvd.ring; | 117 | ring = &adev->uvd.ring; |
118 | sprintf(ring->name, "uvd"); | 118 | sprintf(ring->name, "uvd"); |
119 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 119 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
120 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
121 | 120 | ||
122 | return r; | 121 | return r; |
123 | } | 122 | } |
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
725 | amdgpu_ring_write(ring, 0xE); | 724 | amdgpu_ring_write(ring, 0xE); |
726 | } | 725 | } |
727 | 726 | ||
728 | static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
729 | { | ||
730 | return | ||
731 | 8; /* uvd_v6_0_ring_emit_ib */ | ||
732 | } | ||
733 | |||
734 | static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
735 | { | ||
736 | return | ||
737 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
738 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
739 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
740 | 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */ | ||
741 | } | ||
742 | |||
743 | static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) | ||
744 | { | ||
745 | return | ||
746 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
747 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
748 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
749 | 20 + /* uvd_v6_0_ring_emit_vm_flush */ | ||
750 | 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */ | ||
751 | } | ||
752 | |||
753 | static bool uvd_v6_0_is_idle(void *handle) | 727 | static bool uvd_v6_0_is_idle(void *handle) |
754 | { | 728 | { |
755 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 729 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) | |||
961 | } | 935 | } |
962 | #endif | 936 | #endif |
963 | 937 | ||
964 | static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable) | 938 | static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) |
965 | { | 939 | { |
966 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | 940 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); |
967 | 941 | ||
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle, | |||
979 | enum amd_clockgating_state state) | 953 | enum amd_clockgating_state state) |
980 | { | 954 | { |
981 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 955 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
956 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | ||
982 | 957 | ||
983 | if (adev->asic_type == CHIP_FIJI || | 958 | uvd_v6_0_set_bypass_mode(adev, enable); |
984 | adev->asic_type == CHIP_POLARIS10) | ||
985 | uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); | ||
986 | 959 | ||
987 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | 960 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
988 | return 0; | 961 | return 0; |
989 | 962 | ||
990 | if (state == AMD_CG_STATE_GATE) { | 963 | if (enable) { |
991 | /* disable HW gating and enable Sw gating */ | 964 | /* disable HW gating and enable Sw gating */ |
992 | uvd_v6_0_set_sw_clock_gating(adev); | 965 | uvd_v6_0_set_sw_clock_gating(adev); |
993 | } else { | 966 | } else { |
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle, | |||
1027 | } | 1000 | } |
1028 | } | 1001 | } |
1029 | 1002 | ||
1030 | const struct amd_ip_funcs uvd_v6_0_ip_funcs = { | 1003 | static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
1031 | .name = "uvd_v6_0", | 1004 | .name = "uvd_v6_0", |
1032 | .early_init = uvd_v6_0_early_init, | 1005 | .early_init = uvd_v6_0_early_init, |
1033 | .late_init = NULL, | 1006 | .late_init = NULL, |
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { | |||
1048 | }; | 1021 | }; |
1049 | 1022 | ||
1050 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { | 1023 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { |
1024 | .type = AMDGPU_RING_TYPE_UVD, | ||
1025 | .align_mask = 0xf, | ||
1026 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
1051 | .get_rptr = uvd_v6_0_ring_get_rptr, | 1027 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1052 | .get_wptr = uvd_v6_0_ring_get_wptr, | 1028 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1053 | .set_wptr = uvd_v6_0_ring_set_wptr, | 1029 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1054 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 1030 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
1031 | .emit_frame_size = | ||
1032 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
1033 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
1034 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
1035 | 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ | ||
1036 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | ||
1055 | .emit_ib = uvd_v6_0_ring_emit_ib, | 1037 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1056 | .emit_fence = uvd_v6_0_ring_emit_fence, | 1038 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1057 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, | 1039 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { | |||
1062 | .pad_ib = amdgpu_ring_generic_pad_ib, | 1044 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1063 | .begin_use = amdgpu_uvd_ring_begin_use, | 1045 | .begin_use = amdgpu_uvd_ring_begin_use, |
1064 | .end_use = amdgpu_uvd_ring_end_use, | 1046 | .end_use = amdgpu_uvd_ring_end_use, |
1065 | .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, | ||
1066 | .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size, | ||
1067 | }; | 1047 | }; |
1068 | 1048 | ||
1069 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { | 1049 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
1050 | .type = AMDGPU_RING_TYPE_UVD, | ||
1051 | .align_mask = 0xf, | ||
1052 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
1070 | .get_rptr = uvd_v6_0_ring_get_rptr, | 1053 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1071 | .get_wptr = uvd_v6_0_ring_get_wptr, | 1054 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1072 | .set_wptr = uvd_v6_0_ring_set_wptr, | 1055 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1073 | .parse_cs = NULL, | 1056 | .emit_frame_size = |
1057 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
1058 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
1059 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
1060 | 20 + /* uvd_v6_0_ring_emit_vm_flush */ | ||
1061 | 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ | ||
1062 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | ||
1074 | .emit_ib = uvd_v6_0_ring_emit_ib, | 1063 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1075 | .emit_fence = uvd_v6_0_ring_emit_fence, | 1064 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1076 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, | 1065 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, |
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { | |||
1083 | .pad_ib = amdgpu_ring_generic_pad_ib, | 1072 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1084 | .begin_use = amdgpu_uvd_ring_begin_use, | 1073 | .begin_use = amdgpu_uvd_ring_begin_use, |
1085 | .end_use = amdgpu_uvd_ring_end_use, | 1074 | .end_use = amdgpu_uvd_ring_end_use, |
1086 | .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, | ||
1087 | .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm, | ||
1088 | }; | 1075 | }; |
1089 | 1076 | ||
1090 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | 1077 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1108 | adev->uvd.irq.num_types = 1; | 1095 | adev->uvd.irq.num_types = 1; |
1109 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | 1096 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; |
1110 | } | 1097 | } |
1098 | |||
1099 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = | ||
1100 | { | ||
1101 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1102 | .major = 6, | ||
1103 | .minor = 0, | ||
1104 | .rev = 0, | ||
1105 | .funcs = &uvd_v6_0_ip_funcs, | ||
1106 | }; | ||
1107 | |||
1108 | const struct amdgpu_ip_block_version uvd_v6_2_ip_block = | ||
1109 | { | ||
1110 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1111 | .major = 6, | ||
1112 | .minor = 2, | ||
1113 | .rev = 0, | ||
1114 | .funcs = &uvd_v6_0_ip_funcs, | ||
1115 | }; | ||
1116 | |||
1117 | const struct amdgpu_ip_block_version uvd_v6_3_ip_block = | ||
1118 | { | ||
1119 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1120 | .major = 6, | ||
1121 | .minor = 3, | ||
1122 | .rev = 0, | ||
1123 | .funcs = &uvd_v6_0_ip_funcs, | ||
1124 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h index 6b92a2352986..d3d48c6428cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __UVD_V6_0_H__ | 24 | #ifndef __UVD_V6_0_H__ |
25 | #define __UVD_V6_0_H__ | 25 | #define __UVD_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 76e64ad04a53..38ed903dd6f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | |||
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle) | |||
224 | for (i = 0; i < adev->vce.num_rings; i++) { | 224 | for (i = 0; i < adev->vce.num_rings; i++) { |
225 | ring = &adev->vce.ring[i]; | 225 | ring = &adev->vce.ring[i]; |
226 | sprintf(ring->name, "vce%d", i); | 226 | sprintf(ring->name, "vce%d", i); |
227 | r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, | 227 | r = amdgpu_ring_init(adev, ring, 512, |
228 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | 228 | &adev->vce.irq, 0); |
229 | if (r) | 229 | if (r) |
230 | return r; | 230 | return r; |
231 | } | 231 | } |
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle, | |||
592 | return vce_v2_0_start(adev); | 592 | return vce_v2_0_start(adev); |
593 | } | 593 | } |
594 | 594 | ||
595 | const struct amd_ip_funcs vce_v2_0_ip_funcs = { | 595 | static const struct amd_ip_funcs vce_v2_0_ip_funcs = { |
596 | .name = "vce_v2_0", | 596 | .name = "vce_v2_0", |
597 | .early_init = vce_v2_0_early_init, | 597 | .early_init = vce_v2_0_early_init, |
598 | .late_init = NULL, | 598 | .late_init = NULL, |
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = { | |||
610 | }; | 610 | }; |
611 | 611 | ||
612 | static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { | 612 | static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { |
613 | .type = AMDGPU_RING_TYPE_VCE, | ||
614 | .align_mask = 0xf, | ||
615 | .nop = VCE_CMD_NO_OP, | ||
613 | .get_rptr = vce_v2_0_ring_get_rptr, | 616 | .get_rptr = vce_v2_0_ring_get_rptr, |
614 | .get_wptr = vce_v2_0_ring_get_wptr, | 617 | .get_wptr = vce_v2_0_ring_get_wptr, |
615 | .set_wptr = vce_v2_0_ring_set_wptr, | 618 | .set_wptr = vce_v2_0_ring_set_wptr, |
616 | .parse_cs = amdgpu_vce_ring_parse_cs, | 619 | .parse_cs = amdgpu_vce_ring_parse_cs, |
620 | .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
621 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ | ||
617 | .emit_ib = amdgpu_vce_ring_emit_ib, | 622 | .emit_ib = amdgpu_vce_ring_emit_ib, |
618 | .emit_fence = amdgpu_vce_ring_emit_fence, | 623 | .emit_fence = amdgpu_vce_ring_emit_fence, |
619 | .test_ring = amdgpu_vce_ring_test_ring, | 624 | .test_ring = amdgpu_vce_ring_test_ring, |
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { | |||
622 | .pad_ib = amdgpu_ring_generic_pad_ib, | 627 | .pad_ib = amdgpu_ring_generic_pad_ib, |
623 | .begin_use = amdgpu_vce_ring_begin_use, | 628 | .begin_use = amdgpu_vce_ring_begin_use, |
624 | .end_use = amdgpu_vce_ring_end_use, | 629 | .end_use = amdgpu_vce_ring_end_use, |
625 | .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size, | ||
626 | .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size, | ||
627 | }; | 630 | }; |
628 | 631 | ||
629 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) | 632 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev) | |||
644 | adev->vce.irq.num_types = 1; | 647 | adev->vce.irq.num_types = 1; |
645 | adev->vce.irq.funcs = &vce_v2_0_irq_funcs; | 648 | adev->vce.irq.funcs = &vce_v2_0_irq_funcs; |
646 | }; | 649 | }; |
650 | |||
651 | const struct amdgpu_ip_block_version vce_v2_0_ip_block = | ||
652 | { | ||
653 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
654 | .major = 2, | ||
655 | .minor = 0, | ||
656 | .rev = 0, | ||
657 | .funcs = &vce_v2_0_ip_funcs, | ||
658 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h index 0d2ae8a01acd..4d15167654a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __VCE_V2_0_H__ | 24 | #ifndef __VCE_V2_0_H__ |
25 | #define __VCE_V2_0_H__ | 25 | #define __VCE_V2_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vce_v2_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version vce_v2_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8533269ec160..5ed2930a8568 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle) | |||
389 | for (i = 0; i < adev->vce.num_rings; i++) { | 389 | for (i = 0; i < adev->vce.num_rings; i++) { |
390 | ring = &adev->vce.ring[i]; | 390 | ring = &adev->vce.ring[i]; |
391 | sprintf(ring->name, "vce%d", i); | 391 | sprintf(ring->name, "vce%d", i); |
392 | r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, | 392 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); |
393 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | ||
394 | if (r) | 393 | if (r) |
395 | return r; | 394 | return r; |
396 | } | 395 | } |
@@ -808,28 +807,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
808 | amdgpu_ring_write(ring, seq); | 807 | amdgpu_ring_write(ring, seq); |
809 | } | 808 | } |
810 | 809 | ||
811 | static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | 810 | static const struct amd_ip_funcs vce_v3_0_ip_funcs = { |
812 | { | ||
813 | return | ||
814 | 5; /* vce_v3_0_ring_emit_ib */ | ||
815 | } | ||
816 | |||
817 | static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
818 | { | ||
819 | return | ||
820 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
821 | 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
822 | } | ||
823 | |||
824 | static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) | ||
825 | { | ||
826 | return | ||
827 | 6 + /* vce_v3_0_emit_vm_flush */ | ||
828 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
829 | 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */ | ||
830 | } | ||
831 | |||
832 | const struct amd_ip_funcs vce_v3_0_ip_funcs = { | ||
833 | .name = "vce_v3_0", | 811 | .name = "vce_v3_0", |
834 | .early_init = vce_v3_0_early_init, | 812 | .early_init = vce_v3_0_early_init, |
835 | .late_init = NULL, | 813 | .late_init = NULL, |
@@ -850,10 +828,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { | |||
850 | }; | 828 | }; |
851 | 829 | ||
852 | static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { | 830 | static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { |
831 | .type = AMDGPU_RING_TYPE_VCE, | ||
832 | .align_mask = 0xf, | ||
833 | .nop = VCE_CMD_NO_OP, | ||
853 | .get_rptr = vce_v3_0_ring_get_rptr, | 834 | .get_rptr = vce_v3_0_ring_get_rptr, |
854 | .get_wptr = vce_v3_0_ring_get_wptr, | 835 | .get_wptr = vce_v3_0_ring_get_wptr, |
855 | .set_wptr = vce_v3_0_ring_set_wptr, | 836 | .set_wptr = vce_v3_0_ring_set_wptr, |
856 | .parse_cs = amdgpu_vce_ring_parse_cs, | 837 | .parse_cs = amdgpu_vce_ring_parse_cs, |
838 | .emit_frame_size = | ||
839 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
840 | 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
841 | .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ | ||
857 | .emit_ib = amdgpu_vce_ring_emit_ib, | 842 | .emit_ib = amdgpu_vce_ring_emit_ib, |
858 | .emit_fence = amdgpu_vce_ring_emit_fence, | 843 | .emit_fence = amdgpu_vce_ring_emit_fence, |
859 | .test_ring = amdgpu_vce_ring_test_ring, | 844 | .test_ring = amdgpu_vce_ring_test_ring, |
@@ -862,15 +847,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { | |||
862 | .pad_ib = amdgpu_ring_generic_pad_ib, | 847 | .pad_ib = amdgpu_ring_generic_pad_ib, |
863 | .begin_use = amdgpu_vce_ring_begin_use, | 848 | .begin_use = amdgpu_vce_ring_begin_use, |
864 | .end_use = amdgpu_vce_ring_end_use, | 849 | .end_use = amdgpu_vce_ring_end_use, |
865 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, | ||
866 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size, | ||
867 | }; | 850 | }; |
868 | 851 | ||
869 | static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { | 852 | static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { |
853 | .type = AMDGPU_RING_TYPE_VCE, | ||
854 | .align_mask = 0xf, | ||
855 | .nop = VCE_CMD_NO_OP, | ||
870 | .get_rptr = vce_v3_0_ring_get_rptr, | 856 | .get_rptr = vce_v3_0_ring_get_rptr, |
871 | .get_wptr = vce_v3_0_ring_get_wptr, | 857 | .get_wptr = vce_v3_0_ring_get_wptr, |
872 | .set_wptr = vce_v3_0_ring_set_wptr, | 858 | .set_wptr = vce_v3_0_ring_set_wptr, |
873 | .parse_cs = NULL, | 859 | .parse_cs = amdgpu_vce_ring_parse_cs_vm, |
860 | .emit_frame_size = | ||
861 | 6 + /* vce_v3_0_emit_vm_flush */ | ||
862 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
863 | 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ | ||
864 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ | ||
874 | .emit_ib = vce_v3_0_ring_emit_ib, | 865 | .emit_ib = vce_v3_0_ring_emit_ib, |
875 | .emit_vm_flush = vce_v3_0_emit_vm_flush, | 866 | .emit_vm_flush = vce_v3_0_emit_vm_flush, |
876 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, | 867 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, |
@@ -881,8 +872,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { | |||
881 | .pad_ib = amdgpu_ring_generic_pad_ib, | 872 | .pad_ib = amdgpu_ring_generic_pad_ib, |
882 | .begin_use = amdgpu_vce_ring_begin_use, | 873 | .begin_use = amdgpu_vce_ring_begin_use, |
883 | .end_use = amdgpu_vce_ring_end_use, | 874 | .end_use = amdgpu_vce_ring_end_use, |
884 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, | ||
885 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm, | ||
886 | }; | 875 | }; |
887 | 876 | ||
888 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) | 877 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -910,3 +899,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) | |||
910 | adev->vce.irq.num_types = 1; | 899 | adev->vce.irq.num_types = 1; |
911 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; | 900 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; |
912 | }; | 901 | }; |
902 | |||
903 | const struct amdgpu_ip_block_version vce_v3_0_ip_block = | ||
904 | { | ||
905 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
906 | .major = 3, | ||
907 | .minor = 0, | ||
908 | .rev = 0, | ||
909 | .funcs = &vce_v3_0_ip_funcs, | ||
910 | }; | ||
911 | |||
912 | const struct amdgpu_ip_block_version vce_v3_1_ip_block = | ||
913 | { | ||
914 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
915 | .major = 3, | ||
916 | .minor = 1, | ||
917 | .rev = 0, | ||
918 | .funcs = &vce_v3_0_ip_funcs, | ||
919 | }; | ||
920 | |||
921 | const struct amdgpu_ip_block_version vce_v3_4_ip_block = | ||
922 | { | ||
923 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
924 | .major = 3, | ||
925 | .minor = 4, | ||
926 | .rev = 0, | ||
927 | .funcs = &vce_v3_0_ip_funcs, | ||
928 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h index b45af65da81f..08b908c7de0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __VCE_V3_0_H__ | 24 | #ifndef __VCE_V3_0_H__ |
25 | #define __VCE_V3_0_H__ | 25 | #define __VCE_V3_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vce_v3_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version vce_v3_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version vce_v3_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version vce_v3_4_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index c0d9aad7126f..25c0a71b257d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) | |||
121 | u32 r; | 121 | u32 r; |
122 | 122 | ||
123 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 123 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
124 | WREG32(mmSMC_IND_INDEX_0, (reg)); | 124 | WREG32(mmSMC_IND_INDEX_11, (reg)); |
125 | r = RREG32(mmSMC_IND_DATA_0); | 125 | r = RREG32(mmSMC_IND_DATA_11); |
126 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 126 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
127 | return r; | 127 | return r; |
128 | } | 128 | } |
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |||
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | 133 | ||
134 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 134 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
135 | WREG32(mmSMC_IND_INDEX_0, (reg)); | 135 | WREG32(mmSMC_IND_INDEX_11, (reg)); |
136 | WREG32(mmSMC_IND_DATA_0, (v)); | 136 | WREG32(mmSMC_IND_DATA_11, (v)); |
137 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 137 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
138 | } | 138 | } |
139 | 139 | ||
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, | |||
437 | /* take the smc lock since we are using the smc index */ | 437 | /* take the smc lock since we are using the smc index */ |
438 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 438 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
439 | /* set rom index to 0 */ | 439 | /* set rom index to 0 */ |
440 | WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); | 440 | WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); |
441 | WREG32(mmSMC_IND_DATA_0, 0); | 441 | WREG32(mmSMC_IND_DATA_11, 0); |
442 | /* set index to data for continous read */ | 442 | /* set index to data for continous read */ |
443 | WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); | 443 | WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); |
444 | for (i = 0; i < length_dw; i++) | 444 | for (i = 0; i < length_dw; i++) |
445 | dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); | 445 | dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); |
446 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 446 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
447 | 447 | ||
448 | return true; | 448 | return true; |
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = | |||
556 | {mmPA_SC_RASTER_CONFIG_1, false, true}, | 556 | {mmPA_SC_RASTER_CONFIG_1, false, true}, |
557 | }; | 557 | }; |
558 | 558 | ||
559 | static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | 559 | static uint32_t vi_get_register_value(struct amdgpu_device *adev, |
560 | u32 sh_num, u32 reg_offset) | 560 | bool indexed, u32 se_num, |
561 | { | 561 | u32 sh_num, u32 reg_offset) |
562 | uint32_t val; | 562 | { |
563 | if (indexed) { | ||
564 | uint32_t val; | ||
565 | unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; | ||
566 | unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; | ||
567 | |||
568 | switch (reg_offset) { | ||
569 | case mmCC_RB_BACKEND_DISABLE: | ||
570 | return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; | ||
571 | case mmGC_USER_RB_BACKEND_DISABLE: | ||
572 | return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; | ||
573 | case mmPA_SC_RASTER_CONFIG: | ||
574 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; | ||
575 | case mmPA_SC_RASTER_CONFIG_1: | ||
576 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; | ||
577 | } | ||
563 | 578 | ||
564 | mutex_lock(&adev->grbm_idx_mutex); | 579 | mutex_lock(&adev->grbm_idx_mutex); |
565 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 580 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
566 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | 581 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
567 | 582 | ||
568 | val = RREG32(reg_offset); | 583 | val = RREG32(reg_offset); |
569 | 584 | ||
570 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 585 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
571 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 586 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
572 | mutex_unlock(&adev->grbm_idx_mutex); | 587 | mutex_unlock(&adev->grbm_idx_mutex); |
573 | return val; | 588 | return val; |
589 | } else { | ||
590 | unsigned idx; | ||
591 | |||
592 | switch (reg_offset) { | ||
593 | case mmGB_ADDR_CONFIG: | ||
594 | return adev->gfx.config.gb_addr_config; | ||
595 | case mmMC_ARB_RAMCFG: | ||
596 | return adev->gfx.config.mc_arb_ramcfg; | ||
597 | case mmGB_TILE_MODE0: | ||
598 | case mmGB_TILE_MODE1: | ||
599 | case mmGB_TILE_MODE2: | ||
600 | case mmGB_TILE_MODE3: | ||
601 | case mmGB_TILE_MODE4: | ||
602 | case mmGB_TILE_MODE5: | ||
603 | case mmGB_TILE_MODE6: | ||
604 | case mmGB_TILE_MODE7: | ||
605 | case mmGB_TILE_MODE8: | ||
606 | case mmGB_TILE_MODE9: | ||
607 | case mmGB_TILE_MODE10: | ||
608 | case mmGB_TILE_MODE11: | ||
609 | case mmGB_TILE_MODE12: | ||
610 | case mmGB_TILE_MODE13: | ||
611 | case mmGB_TILE_MODE14: | ||
612 | case mmGB_TILE_MODE15: | ||
613 | case mmGB_TILE_MODE16: | ||
614 | case mmGB_TILE_MODE17: | ||
615 | case mmGB_TILE_MODE18: | ||
616 | case mmGB_TILE_MODE19: | ||
617 | case mmGB_TILE_MODE20: | ||
618 | case mmGB_TILE_MODE21: | ||
619 | case mmGB_TILE_MODE22: | ||
620 | case mmGB_TILE_MODE23: | ||
621 | case mmGB_TILE_MODE24: | ||
622 | case mmGB_TILE_MODE25: | ||
623 | case mmGB_TILE_MODE26: | ||
624 | case mmGB_TILE_MODE27: | ||
625 | case mmGB_TILE_MODE28: | ||
626 | case mmGB_TILE_MODE29: | ||
627 | case mmGB_TILE_MODE30: | ||
628 | case mmGB_TILE_MODE31: | ||
629 | idx = (reg_offset - mmGB_TILE_MODE0); | ||
630 | return adev->gfx.config.tile_mode_array[idx]; | ||
631 | case mmGB_MACROTILE_MODE0: | ||
632 | case mmGB_MACROTILE_MODE1: | ||
633 | case mmGB_MACROTILE_MODE2: | ||
634 | case mmGB_MACROTILE_MODE3: | ||
635 | case mmGB_MACROTILE_MODE4: | ||
636 | case mmGB_MACROTILE_MODE5: | ||
637 | case mmGB_MACROTILE_MODE6: | ||
638 | case mmGB_MACROTILE_MODE7: | ||
639 | case mmGB_MACROTILE_MODE8: | ||
640 | case mmGB_MACROTILE_MODE9: | ||
641 | case mmGB_MACROTILE_MODE10: | ||
642 | case mmGB_MACROTILE_MODE11: | ||
643 | case mmGB_MACROTILE_MODE12: | ||
644 | case mmGB_MACROTILE_MODE13: | ||
645 | case mmGB_MACROTILE_MODE14: | ||
646 | case mmGB_MACROTILE_MODE15: | ||
647 | idx = (reg_offset - mmGB_MACROTILE_MODE0); | ||
648 | return adev->gfx.config.macrotile_mode_array[idx]; | ||
649 | default: | ||
650 | return RREG32(reg_offset); | ||
651 | } | ||
652 | } | ||
574 | } | 653 | } |
575 | 654 | ||
576 | static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | 655 | static int vi_read_register(struct amdgpu_device *adev, u32 se_num, |
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | |||
605 | if (reg_offset != asic_register_entry->reg_offset) | 684 | if (reg_offset != asic_register_entry->reg_offset) |
606 | continue; | 685 | continue; |
607 | if (!asic_register_entry->untouched) | 686 | if (!asic_register_entry->untouched) |
608 | *value = asic_register_entry->grbm_indexed ? | 687 | *value = vi_get_register_value(adev, |
609 | vi_read_indexed_register(adev, se_num, | 688 | asic_register_entry->grbm_indexed, |
610 | sh_num, reg_offset) : | 689 | se_num, sh_num, reg_offset); |
611 | RREG32(reg_offset); | ||
612 | return 0; | 690 | return 0; |
613 | } | 691 | } |
614 | } | 692 | } |
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | |||
618 | continue; | 696 | continue; |
619 | 697 | ||
620 | if (!vi_allowed_read_registers[i].untouched) | 698 | if (!vi_allowed_read_registers[i].untouched) |
621 | *value = vi_allowed_read_registers[i].grbm_indexed ? | 699 | *value = vi_get_register_value(adev, |
622 | vi_read_indexed_register(adev, se_num, | 700 | vi_allowed_read_registers[i].grbm_indexed, |
623 | sh_num, reg_offset) : | 701 | se_num, sh_num, reg_offset); |
624 | RREG32(reg_offset); | ||
625 | return 0; | 702 | return 0; |
626 | } | 703 | } |
627 | return -EINVAL; | 704 | return -EINVAL; |
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
652 | return -EINVAL; | 729 | return -EINVAL; |
653 | } | 730 | } |
654 | 731 | ||
655 | static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) | ||
656 | { | ||
657 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
658 | |||
659 | if (hung) | ||
660 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
661 | else | ||
662 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
663 | |||
664 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
665 | } | ||
666 | |||
667 | /** | 732 | /** |
668 | * vi_asic_reset - soft reset GPU | 733 | * vi_asic_reset - soft reset GPU |
669 | * | 734 | * |
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev) | |||
677 | { | 742 | { |
678 | int r; | 743 | int r; |
679 | 744 | ||
680 | vi_set_bios_scratch_engine_hung(adev, true); | 745 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
681 | 746 | ||
682 | r = vi_gpu_pci_config_reset(adev); | 747 | r = vi_gpu_pci_config_reset(adev); |
683 | 748 | ||
684 | vi_set_bios_scratch_engine_hung(adev, false); | 749 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
685 | 750 | ||
686 | return r; | 751 | return r; |
687 | } | 752 | } |
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
781 | WREG32(mmBIF_DOORBELL_APER_EN, tmp); | 846 | WREG32(mmBIF_DOORBELL_APER_EN, tmp); |
782 | } | 847 | } |
783 | 848 | ||
784 | /* topaz has no DCE, UVD, VCE */ | ||
785 | static const struct amdgpu_ip_block_version topaz_ip_blocks[] = | ||
786 | { | ||
787 | /* ORDER MATTERS! */ | ||
788 | { | ||
789 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
790 | .major = 2, | ||
791 | .minor = 0, | ||
792 | .rev = 0, | ||
793 | .funcs = &vi_common_ip_funcs, | ||
794 | }, | ||
795 | { | ||
796 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
797 | .major = 7, | ||
798 | .minor = 4, | ||
799 | .rev = 0, | ||
800 | .funcs = &gmc_v7_0_ip_funcs, | ||
801 | }, | ||
802 | { | ||
803 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
804 | .major = 2, | ||
805 | .minor = 4, | ||
806 | .rev = 0, | ||
807 | .funcs = &iceland_ih_ip_funcs, | ||
808 | }, | ||
809 | { | ||
810 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
811 | .major = 7, | ||
812 | .minor = 1, | ||
813 | .rev = 0, | ||
814 | .funcs = &amdgpu_pp_ip_funcs, | ||
815 | }, | ||
816 | { | ||
817 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
818 | .major = 8, | ||
819 | .minor = 0, | ||
820 | .rev = 0, | ||
821 | .funcs = &gfx_v8_0_ip_funcs, | ||
822 | }, | ||
823 | { | ||
824 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
825 | .major = 2, | ||
826 | .minor = 4, | ||
827 | .rev = 0, | ||
828 | .funcs = &sdma_v2_4_ip_funcs, | ||
829 | }, | ||
830 | }; | ||
831 | |||
832 | static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = | ||
833 | { | ||
834 | /* ORDER MATTERS! */ | ||
835 | { | ||
836 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
837 | .major = 2, | ||
838 | .minor = 0, | ||
839 | .rev = 0, | ||
840 | .funcs = &vi_common_ip_funcs, | ||
841 | }, | ||
842 | { | ||
843 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
844 | .major = 7, | ||
845 | .minor = 4, | ||
846 | .rev = 0, | ||
847 | .funcs = &gmc_v7_0_ip_funcs, | ||
848 | }, | ||
849 | { | ||
850 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
851 | .major = 2, | ||
852 | .minor = 4, | ||
853 | .rev = 0, | ||
854 | .funcs = &iceland_ih_ip_funcs, | ||
855 | }, | ||
856 | { | ||
857 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
858 | .major = 7, | ||
859 | .minor = 1, | ||
860 | .rev = 0, | ||
861 | .funcs = &amdgpu_pp_ip_funcs, | ||
862 | }, | ||
863 | { | ||
864 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
865 | .major = 1, | ||
866 | .minor = 0, | ||
867 | .rev = 0, | ||
868 | .funcs = &dce_virtual_ip_funcs, | ||
869 | }, | ||
870 | { | ||
871 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
872 | .major = 8, | ||
873 | .minor = 0, | ||
874 | .rev = 0, | ||
875 | .funcs = &gfx_v8_0_ip_funcs, | ||
876 | }, | ||
877 | { | ||
878 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
879 | .major = 2, | ||
880 | .minor = 4, | ||
881 | .rev = 0, | ||
882 | .funcs = &sdma_v2_4_ip_funcs, | ||
883 | }, | ||
884 | }; | ||
885 | |||
886 | static const struct amdgpu_ip_block_version tonga_ip_blocks[] = | ||
887 | { | ||
888 | /* ORDER MATTERS! */ | ||
889 | { | ||
890 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
891 | .major = 2, | ||
892 | .minor = 0, | ||
893 | .rev = 0, | ||
894 | .funcs = &vi_common_ip_funcs, | ||
895 | }, | ||
896 | { | ||
897 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
898 | .major = 8, | ||
899 | .minor = 0, | ||
900 | .rev = 0, | ||
901 | .funcs = &gmc_v8_0_ip_funcs, | ||
902 | }, | ||
903 | { | ||
904 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
905 | .major = 3, | ||
906 | .minor = 0, | ||
907 | .rev = 0, | ||
908 | .funcs = &tonga_ih_ip_funcs, | ||
909 | }, | ||
910 | { | ||
911 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
912 | .major = 7, | ||
913 | .minor = 1, | ||
914 | .rev = 0, | ||
915 | .funcs = &amdgpu_pp_ip_funcs, | ||
916 | }, | ||
917 | { | ||
918 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
919 | .major = 10, | ||
920 | .minor = 0, | ||
921 | .rev = 0, | ||
922 | .funcs = &dce_v10_0_ip_funcs, | ||
923 | }, | ||
924 | { | ||
925 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
926 | .major = 8, | ||
927 | .minor = 0, | ||
928 | .rev = 0, | ||
929 | .funcs = &gfx_v8_0_ip_funcs, | ||
930 | }, | ||
931 | { | ||
932 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
933 | .major = 3, | ||
934 | .minor = 0, | ||
935 | .rev = 0, | ||
936 | .funcs = &sdma_v3_0_ip_funcs, | ||
937 | }, | ||
938 | { | ||
939 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
940 | .major = 5, | ||
941 | .minor = 0, | ||
942 | .rev = 0, | ||
943 | .funcs = &uvd_v5_0_ip_funcs, | ||
944 | }, | ||
945 | { | ||
946 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
947 | .major = 3, | ||
948 | .minor = 0, | ||
949 | .rev = 0, | ||
950 | .funcs = &vce_v3_0_ip_funcs, | ||
951 | }, | ||
952 | }; | ||
953 | |||
954 | static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = | ||
955 | { | ||
956 | /* ORDER MATTERS! */ | ||
957 | { | ||
958 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
959 | .major = 2, | ||
960 | .minor = 0, | ||
961 | .rev = 0, | ||
962 | .funcs = &vi_common_ip_funcs, | ||
963 | }, | ||
964 | { | ||
965 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
966 | .major = 8, | ||
967 | .minor = 0, | ||
968 | .rev = 0, | ||
969 | .funcs = &gmc_v8_0_ip_funcs, | ||
970 | }, | ||
971 | { | ||
972 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
973 | .major = 3, | ||
974 | .minor = 0, | ||
975 | .rev = 0, | ||
976 | .funcs = &tonga_ih_ip_funcs, | ||
977 | }, | ||
978 | { | ||
979 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
980 | .major = 7, | ||
981 | .minor = 1, | ||
982 | .rev = 0, | ||
983 | .funcs = &amdgpu_pp_ip_funcs, | ||
984 | }, | ||
985 | { | ||
986 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
987 | .major = 10, | ||
988 | .minor = 0, | ||
989 | .rev = 0, | ||
990 | .funcs = &dce_virtual_ip_funcs, | ||
991 | }, | ||
992 | { | ||
993 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
994 | .major = 8, | ||
995 | .minor = 0, | ||
996 | .rev = 0, | ||
997 | .funcs = &gfx_v8_0_ip_funcs, | ||
998 | }, | ||
999 | { | ||
1000 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1001 | .major = 3, | ||
1002 | .minor = 0, | ||
1003 | .rev = 0, | ||
1004 | .funcs = &sdma_v3_0_ip_funcs, | ||
1005 | }, | ||
1006 | { | ||
1007 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1008 | .major = 5, | ||
1009 | .minor = 0, | ||
1010 | .rev = 0, | ||
1011 | .funcs = &uvd_v5_0_ip_funcs, | ||
1012 | }, | ||
1013 | { | ||
1014 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1015 | .major = 3, | ||
1016 | .minor = 0, | ||
1017 | .rev = 0, | ||
1018 | .funcs = &vce_v3_0_ip_funcs, | ||
1019 | }, | ||
1020 | }; | ||
1021 | |||
1022 | static const struct amdgpu_ip_block_version fiji_ip_blocks[] = | ||
1023 | { | ||
1024 | /* ORDER MATTERS! */ | ||
1025 | { | ||
1026 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1027 | .major = 2, | ||
1028 | .minor = 0, | ||
1029 | .rev = 0, | ||
1030 | .funcs = &vi_common_ip_funcs, | ||
1031 | }, | ||
1032 | { | ||
1033 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1034 | .major = 8, | ||
1035 | .minor = 5, | ||
1036 | .rev = 0, | ||
1037 | .funcs = &gmc_v8_0_ip_funcs, | ||
1038 | }, | ||
1039 | { | ||
1040 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1041 | .major = 3, | ||
1042 | .minor = 0, | ||
1043 | .rev = 0, | ||
1044 | .funcs = &tonga_ih_ip_funcs, | ||
1045 | }, | ||
1046 | { | ||
1047 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1048 | .major = 7, | ||
1049 | .minor = 1, | ||
1050 | .rev = 0, | ||
1051 | .funcs = &amdgpu_pp_ip_funcs, | ||
1052 | }, | ||
1053 | { | ||
1054 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1055 | .major = 10, | ||
1056 | .minor = 1, | ||
1057 | .rev = 0, | ||
1058 | .funcs = &dce_v10_0_ip_funcs, | ||
1059 | }, | ||
1060 | { | ||
1061 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1062 | .major = 8, | ||
1063 | .minor = 0, | ||
1064 | .rev = 0, | ||
1065 | .funcs = &gfx_v8_0_ip_funcs, | ||
1066 | }, | ||
1067 | { | ||
1068 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1069 | .major = 3, | ||
1070 | .minor = 0, | ||
1071 | .rev = 0, | ||
1072 | .funcs = &sdma_v3_0_ip_funcs, | ||
1073 | }, | ||
1074 | { | ||
1075 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1076 | .major = 6, | ||
1077 | .minor = 0, | ||
1078 | .rev = 0, | ||
1079 | .funcs = &uvd_v6_0_ip_funcs, | ||
1080 | }, | ||
1081 | { | ||
1082 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1083 | .major = 3, | ||
1084 | .minor = 0, | ||
1085 | .rev = 0, | ||
1086 | .funcs = &vce_v3_0_ip_funcs, | ||
1087 | }, | ||
1088 | }; | ||
1089 | |||
1090 | static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = | ||
1091 | { | ||
1092 | /* ORDER MATTERS! */ | ||
1093 | { | ||
1094 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1095 | .major = 2, | ||
1096 | .minor = 0, | ||
1097 | .rev = 0, | ||
1098 | .funcs = &vi_common_ip_funcs, | ||
1099 | }, | ||
1100 | { | ||
1101 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1102 | .major = 8, | ||
1103 | .minor = 5, | ||
1104 | .rev = 0, | ||
1105 | .funcs = &gmc_v8_0_ip_funcs, | ||
1106 | }, | ||
1107 | { | ||
1108 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1109 | .major = 3, | ||
1110 | .minor = 0, | ||
1111 | .rev = 0, | ||
1112 | .funcs = &tonga_ih_ip_funcs, | ||
1113 | }, | ||
1114 | { | ||
1115 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1116 | .major = 7, | ||
1117 | .minor = 1, | ||
1118 | .rev = 0, | ||
1119 | .funcs = &amdgpu_pp_ip_funcs, | ||
1120 | }, | ||
1121 | { | ||
1122 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1123 | .major = 10, | ||
1124 | .minor = 1, | ||
1125 | .rev = 0, | ||
1126 | .funcs = &dce_virtual_ip_funcs, | ||
1127 | }, | ||
1128 | { | ||
1129 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1130 | .major = 8, | ||
1131 | .minor = 0, | ||
1132 | .rev = 0, | ||
1133 | .funcs = &gfx_v8_0_ip_funcs, | ||
1134 | }, | ||
1135 | { | ||
1136 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1137 | .major = 3, | ||
1138 | .minor = 0, | ||
1139 | .rev = 0, | ||
1140 | .funcs = &sdma_v3_0_ip_funcs, | ||
1141 | }, | ||
1142 | { | ||
1143 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1144 | .major = 6, | ||
1145 | .minor = 0, | ||
1146 | .rev = 0, | ||
1147 | .funcs = &uvd_v6_0_ip_funcs, | ||
1148 | }, | ||
1149 | { | ||
1150 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1151 | .major = 3, | ||
1152 | .minor = 0, | ||
1153 | .rev = 0, | ||
1154 | .funcs = &vce_v3_0_ip_funcs, | ||
1155 | }, | ||
1156 | }; | ||
1157 | |||
1158 | static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = | ||
1159 | { | ||
1160 | /* ORDER MATTERS! */ | ||
1161 | { | ||
1162 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1163 | .major = 2, | ||
1164 | .minor = 0, | ||
1165 | .rev = 0, | ||
1166 | .funcs = &vi_common_ip_funcs, | ||
1167 | }, | ||
1168 | { | ||
1169 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1170 | .major = 8, | ||
1171 | .minor = 1, | ||
1172 | .rev = 0, | ||
1173 | .funcs = &gmc_v8_0_ip_funcs, | ||
1174 | }, | ||
1175 | { | ||
1176 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1177 | .major = 3, | ||
1178 | .minor = 1, | ||
1179 | .rev = 0, | ||
1180 | .funcs = &tonga_ih_ip_funcs, | ||
1181 | }, | ||
1182 | { | ||
1183 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1184 | .major = 7, | ||
1185 | .minor = 2, | ||
1186 | .rev = 0, | ||
1187 | .funcs = &amdgpu_pp_ip_funcs, | ||
1188 | }, | ||
1189 | { | ||
1190 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1191 | .major = 11, | ||
1192 | .minor = 2, | ||
1193 | .rev = 0, | ||
1194 | .funcs = &dce_v11_0_ip_funcs, | ||
1195 | }, | ||
1196 | { | ||
1197 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1198 | .major = 8, | ||
1199 | .minor = 0, | ||
1200 | .rev = 0, | ||
1201 | .funcs = &gfx_v8_0_ip_funcs, | ||
1202 | }, | ||
1203 | { | ||
1204 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1205 | .major = 3, | ||
1206 | .minor = 1, | ||
1207 | .rev = 0, | ||
1208 | .funcs = &sdma_v3_0_ip_funcs, | ||
1209 | }, | ||
1210 | { | ||
1211 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1212 | .major = 6, | ||
1213 | .minor = 3, | ||
1214 | .rev = 0, | ||
1215 | .funcs = &uvd_v6_0_ip_funcs, | ||
1216 | }, | ||
1217 | { | ||
1218 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1219 | .major = 3, | ||
1220 | .minor = 4, | ||
1221 | .rev = 0, | ||
1222 | .funcs = &vce_v3_0_ip_funcs, | ||
1223 | }, | ||
1224 | }; | ||
1225 | |||
1226 | static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = | ||
1227 | { | ||
1228 | /* ORDER MATTERS! */ | ||
1229 | { | ||
1230 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1231 | .major = 2, | ||
1232 | .minor = 0, | ||
1233 | .rev = 0, | ||
1234 | .funcs = &vi_common_ip_funcs, | ||
1235 | }, | ||
1236 | { | ||
1237 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1238 | .major = 8, | ||
1239 | .minor = 1, | ||
1240 | .rev = 0, | ||
1241 | .funcs = &gmc_v8_0_ip_funcs, | ||
1242 | }, | ||
1243 | { | ||
1244 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1245 | .major = 3, | ||
1246 | .minor = 1, | ||
1247 | .rev = 0, | ||
1248 | .funcs = &tonga_ih_ip_funcs, | ||
1249 | }, | ||
1250 | { | ||
1251 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1252 | .major = 7, | ||
1253 | .minor = 2, | ||
1254 | .rev = 0, | ||
1255 | .funcs = &amdgpu_pp_ip_funcs, | ||
1256 | }, | ||
1257 | { | ||
1258 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1259 | .major = 11, | ||
1260 | .minor = 2, | ||
1261 | .rev = 0, | ||
1262 | .funcs = &dce_virtual_ip_funcs, | ||
1263 | }, | ||
1264 | { | ||
1265 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1266 | .major = 8, | ||
1267 | .minor = 0, | ||
1268 | .rev = 0, | ||
1269 | .funcs = &gfx_v8_0_ip_funcs, | ||
1270 | }, | ||
1271 | { | ||
1272 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1273 | .major = 3, | ||
1274 | .minor = 1, | ||
1275 | .rev = 0, | ||
1276 | .funcs = &sdma_v3_0_ip_funcs, | ||
1277 | }, | ||
1278 | { | ||
1279 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1280 | .major = 6, | ||
1281 | .minor = 3, | ||
1282 | .rev = 0, | ||
1283 | .funcs = &uvd_v6_0_ip_funcs, | ||
1284 | }, | ||
1285 | { | ||
1286 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1287 | .major = 3, | ||
1288 | .minor = 4, | ||
1289 | .rev = 0, | ||
1290 | .funcs = &vce_v3_0_ip_funcs, | ||
1291 | }, | ||
1292 | }; | ||
1293 | |||
1294 | static const struct amdgpu_ip_block_version cz_ip_blocks[] = | ||
1295 | { | ||
1296 | /* ORDER MATTERS! */ | ||
1297 | { | ||
1298 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1299 | .major = 2, | ||
1300 | .minor = 0, | ||
1301 | .rev = 0, | ||
1302 | .funcs = &vi_common_ip_funcs, | ||
1303 | }, | ||
1304 | { | ||
1305 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1306 | .major = 8, | ||
1307 | .minor = 0, | ||
1308 | .rev = 0, | ||
1309 | .funcs = &gmc_v8_0_ip_funcs, | ||
1310 | }, | ||
1311 | { | ||
1312 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1313 | .major = 3, | ||
1314 | .minor = 0, | ||
1315 | .rev = 0, | ||
1316 | .funcs = &cz_ih_ip_funcs, | ||
1317 | }, | ||
1318 | { | ||
1319 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1320 | .major = 8, | ||
1321 | .minor = 0, | ||
1322 | .rev = 0, | ||
1323 | .funcs = &amdgpu_pp_ip_funcs | ||
1324 | }, | ||
1325 | { | ||
1326 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1327 | .major = 11, | ||
1328 | .minor = 0, | ||
1329 | .rev = 0, | ||
1330 | .funcs = &dce_v11_0_ip_funcs, | ||
1331 | }, | ||
1332 | { | ||
1333 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1334 | .major = 8, | ||
1335 | .minor = 0, | ||
1336 | .rev = 0, | ||
1337 | .funcs = &gfx_v8_0_ip_funcs, | ||
1338 | }, | ||
1339 | { | ||
1340 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1341 | .major = 3, | ||
1342 | .minor = 0, | ||
1343 | .rev = 0, | ||
1344 | .funcs = &sdma_v3_0_ip_funcs, | ||
1345 | }, | ||
1346 | { | ||
1347 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1348 | .major = 6, | ||
1349 | .minor = 0, | ||
1350 | .rev = 0, | ||
1351 | .funcs = &uvd_v6_0_ip_funcs, | ||
1352 | }, | ||
1353 | { | ||
1354 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1355 | .major = 3, | ||
1356 | .minor = 0, | ||
1357 | .rev = 0, | ||
1358 | .funcs = &vce_v3_0_ip_funcs, | ||
1359 | }, | ||
1360 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1361 | { | ||
1362 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
1363 | .major = 2, | ||
1364 | .minor = 2, | ||
1365 | .rev = 0, | ||
1366 | .funcs = &acp_ip_funcs, | ||
1367 | }, | ||
1368 | #endif | ||
1369 | }; | ||
1370 | |||
1371 | static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = | ||
1372 | { | ||
1373 | /* ORDER MATTERS! */ | ||
1374 | { | ||
1375 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1376 | .major = 2, | ||
1377 | .minor = 0, | ||
1378 | .rev = 0, | ||
1379 | .funcs = &vi_common_ip_funcs, | ||
1380 | }, | ||
1381 | { | ||
1382 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1383 | .major = 8, | ||
1384 | .minor = 0, | ||
1385 | .rev = 0, | ||
1386 | .funcs = &gmc_v8_0_ip_funcs, | ||
1387 | }, | ||
1388 | { | ||
1389 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1390 | .major = 3, | ||
1391 | .minor = 0, | ||
1392 | .rev = 0, | ||
1393 | .funcs = &cz_ih_ip_funcs, | ||
1394 | }, | ||
1395 | { | ||
1396 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1397 | .major = 8, | ||
1398 | .minor = 0, | ||
1399 | .rev = 0, | ||
1400 | .funcs = &amdgpu_pp_ip_funcs | ||
1401 | }, | ||
1402 | { | ||
1403 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1404 | .major = 11, | ||
1405 | .minor = 0, | ||
1406 | .rev = 0, | ||
1407 | .funcs = &dce_virtual_ip_funcs, | ||
1408 | }, | ||
1409 | { | ||
1410 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1411 | .major = 8, | ||
1412 | .minor = 0, | ||
1413 | .rev = 0, | ||
1414 | .funcs = &gfx_v8_0_ip_funcs, | ||
1415 | }, | ||
1416 | { | ||
1417 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1418 | .major = 3, | ||
1419 | .minor = 0, | ||
1420 | .rev = 0, | ||
1421 | .funcs = &sdma_v3_0_ip_funcs, | ||
1422 | }, | ||
1423 | { | ||
1424 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1425 | .major = 6, | ||
1426 | .minor = 0, | ||
1427 | .rev = 0, | ||
1428 | .funcs = &uvd_v6_0_ip_funcs, | ||
1429 | }, | ||
1430 | { | ||
1431 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1432 | .major = 3, | ||
1433 | .minor = 0, | ||
1434 | .rev = 0, | ||
1435 | .funcs = &vce_v3_0_ip_funcs, | ||
1436 | }, | ||
1437 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1438 | { | ||
1439 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
1440 | .major = 2, | ||
1441 | .minor = 2, | ||
1442 | .rev = 0, | ||
1443 | .funcs = &acp_ip_funcs, | ||
1444 | }, | ||
1445 | #endif | ||
1446 | }; | ||
1447 | |||
1448 | int vi_set_ip_blocks(struct amdgpu_device *adev) | ||
1449 | { | ||
1450 | if (adev->enable_virtual_display) { | ||
1451 | switch (adev->asic_type) { | ||
1452 | case CHIP_TOPAZ: | ||
1453 | adev->ip_blocks = topaz_ip_blocks_vd; | ||
1454 | adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); | ||
1455 | break; | ||
1456 | case CHIP_FIJI: | ||
1457 | adev->ip_blocks = fiji_ip_blocks_vd; | ||
1458 | adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); | ||
1459 | break; | ||
1460 | case CHIP_TONGA: | ||
1461 | adev->ip_blocks = tonga_ip_blocks_vd; | ||
1462 | adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); | ||
1463 | break; | ||
1464 | case CHIP_POLARIS11: | ||
1465 | case CHIP_POLARIS10: | ||
1466 | adev->ip_blocks = polaris11_ip_blocks_vd; | ||
1467 | adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); | ||
1468 | break; | ||
1469 | |||
1470 | case CHIP_CARRIZO: | ||
1471 | case CHIP_STONEY: | ||
1472 | adev->ip_blocks = cz_ip_blocks_vd; | ||
1473 | adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); | ||
1474 | break; | ||
1475 | default: | ||
1476 | /* FIXME: not supported yet */ | ||
1477 | return -EINVAL; | ||
1478 | } | ||
1479 | } else { | ||
1480 | switch (adev->asic_type) { | ||
1481 | case CHIP_TOPAZ: | ||
1482 | adev->ip_blocks = topaz_ip_blocks; | ||
1483 | adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); | ||
1484 | break; | ||
1485 | case CHIP_FIJI: | ||
1486 | adev->ip_blocks = fiji_ip_blocks; | ||
1487 | adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); | ||
1488 | break; | ||
1489 | case CHIP_TONGA: | ||
1490 | adev->ip_blocks = tonga_ip_blocks; | ||
1491 | adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); | ||
1492 | break; | ||
1493 | case CHIP_POLARIS11: | ||
1494 | case CHIP_POLARIS10: | ||
1495 | adev->ip_blocks = polaris11_ip_blocks; | ||
1496 | adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); | ||
1497 | break; | ||
1498 | case CHIP_CARRIZO: | ||
1499 | case CHIP_STONEY: | ||
1500 | adev->ip_blocks = cz_ip_blocks; | ||
1501 | adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); | ||
1502 | break; | ||
1503 | default: | ||
1504 | /* FIXME: not supported yet */ | ||
1505 | return -EINVAL; | ||
1506 | } | ||
1507 | } | ||
1508 | |||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 | 849 | #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 |
1513 | #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 | 850 | #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 |
1514 | #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 | 851 | #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 |
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle) | |||
1593 | break; | 930 | break; |
1594 | case CHIP_TONGA: | 931 | case CHIP_TONGA: |
1595 | adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; | 932 | adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; |
1596 | adev->pg_flags = 0; | 933 | adev->pg_flags = AMD_PG_SUPPORT_UVD; |
1597 | adev->external_rev_id = adev->rev_id + 0x14; | 934 | adev->external_rev_id = adev->rev_id + 0x14; |
1598 | break; | 935 | break; |
1599 | case CHIP_POLARIS11: | 936 | case CHIP_POLARIS11: |
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle, | |||
1908 | return 0; | 1245 | return 0; |
1909 | } | 1246 | } |
1910 | 1247 | ||
1911 | const struct amd_ip_funcs vi_common_ip_funcs = { | 1248 | static const struct amd_ip_funcs vi_common_ip_funcs = { |
1912 | .name = "vi_common", | 1249 | .name = "vi_common", |
1913 | .early_init = vi_common_early_init, | 1250 | .early_init = vi_common_early_init, |
1914 | .late_init = NULL, | 1251 | .late_init = NULL, |
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = { | |||
1925 | .set_powergating_state = vi_common_set_powergating_state, | 1262 | .set_powergating_state = vi_common_set_powergating_state, |
1926 | }; | 1263 | }; |
1927 | 1264 | ||
1265 | static const struct amdgpu_ip_block_version vi_common_ip_block = | ||
1266 | { | ||
1267 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1268 | .major = 1, | ||
1269 | .minor = 0, | ||
1270 | .rev = 0, | ||
1271 | .funcs = &vi_common_ip_funcs, | ||
1272 | }; | ||
1273 | |||
1274 | int vi_set_ip_blocks(struct amdgpu_device *adev) | ||
1275 | { | ||
1276 | switch (adev->asic_type) { | ||
1277 | case CHIP_TOPAZ: | ||
1278 | /* topaz has no DCE, UVD, VCE */ | ||
1279 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1280 | amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); | ||
1281 | amdgpu_ip_block_add(adev, &iceland_ih_ip_block); | ||
1282 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1283 | if (adev->enable_virtual_display) | ||
1284 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1285 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1286 | amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); | ||
1287 | break; | ||
1288 | case CHIP_FIJI: | ||
1289 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1290 | amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); | ||
1291 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1292 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1293 | if (adev->enable_virtual_display) | ||
1294 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1295 | else | ||
1296 | amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); | ||
1297 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1298 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1299 | amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); | ||
1300 | amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); | ||
1301 | break; | ||
1302 | case CHIP_TONGA: | ||
1303 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1304 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1305 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1306 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1307 | if (adev->enable_virtual_display) | ||
1308 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1309 | else | ||
1310 | amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); | ||
1311 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1312 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1313 | amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); | ||
1314 | amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); | ||
1315 | break; | ||
1316 | case CHIP_POLARIS11: | ||
1317 | case CHIP_POLARIS10: | ||
1318 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1319 | amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); | ||
1320 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1321 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1322 | if (adev->enable_virtual_display) | ||
1323 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1324 | else | ||
1325 | amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); | ||
1326 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1327 | amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); | ||
1328 | amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); | ||
1329 | amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); | ||
1330 | break; | ||
1331 | case CHIP_CARRIZO: | ||
1332 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1333 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1334 | amdgpu_ip_block_add(adev, &cz_ih_ip_block); | ||
1335 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1336 | if (adev->enable_virtual_display) | ||
1337 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1338 | else | ||
1339 | amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); | ||
1340 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1341 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1342 | amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); | ||
1343 | amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); | ||
1344 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1345 | amdgpu_ip_block_add(adev, &acp_ip_block); | ||
1346 | #endif | ||
1347 | break; | ||
1348 | case CHIP_STONEY: | ||
1349 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1350 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1351 | amdgpu_ip_block_add(adev, &cz_ih_ip_block); | ||
1352 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1353 | if (adev->enable_virtual_display) | ||
1354 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1355 | else | ||
1356 | amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); | ||
1357 | amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); | ||
1358 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1359 | amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); | ||
1360 | amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); | ||
1361 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1362 | amdgpu_ip_block_add(adev, &acp_ip_block); | ||
1363 | #endif | ||
1364 | break; | ||
1365 | default: | ||
1366 | /* FIXME: not supported yet */ | ||
1367 | return -EINVAL; | ||
1368 | } | ||
1369 | |||
1370 | return 0; | ||
1371 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 502094042462..575d7aed5d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __VI_H__ | 24 | #ifndef __VI_H__ |
25 | #define __VI_H__ | 25 | #define __VI_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vi_common_ip_funcs; | ||
28 | |||
29 | void vi_srbm_select(struct amdgpu_device *adev, | 27 | void vi_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int vi_set_ip_blocks(struct amdgpu_device *adev); | 29 | int vi_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index bec8125bceb0..d1986276dbbd 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
@@ -84,6 +84,29 @@ enum amd_powergating_state { | |||
84 | AMD_PG_STATE_UNGATE, | 84 | AMD_PG_STATE_UNGATE, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct amd_vce_state { | ||
88 | /* vce clocks */ | ||
89 | u32 evclk; | ||
90 | u32 ecclk; | ||
91 | /* gpu clocks */ | ||
92 | u32 sclk; | ||
93 | u32 mclk; | ||
94 | u8 clk_idx; | ||
95 | u8 pstate; | ||
96 | }; | ||
97 | |||
98 | |||
99 | #define AMD_MAX_VCE_LEVELS 6 | ||
100 | |||
101 | enum amd_vce_level { | ||
102 | AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
103 | AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
104 | AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
105 | AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
106 | AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
107 | AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
108 | }; | ||
109 | |||
87 | /* CG flags */ | 110 | /* CG flags */ |
88 | #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) | 111 | #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) |
89 | #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) | 112 | #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h index 3014d4a58c43..a9ef1562f43b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h | |||
@@ -176,6 +176,8 @@ | |||
176 | #define mmSMU1_SMU_SMC_IND_DATA 0x83 | 176 | #define mmSMU1_SMU_SMC_IND_DATA 0x83 |
177 | #define mmSMU2_SMU_SMC_IND_DATA 0x85 | 177 | #define mmSMU2_SMU_SMC_IND_DATA 0x85 |
178 | #define mmSMU3_SMU_SMC_IND_DATA 0x87 | 178 | #define mmSMU3_SMU_SMC_IND_DATA 0x87 |
179 | #define mmSMC_IND_INDEX_11 0x1AC | ||
180 | #define mmSMC_IND_DATA_11 0x1AD | ||
179 | #define ixRCU_UC_EVENTS 0xc0000004 | 181 | #define ixRCU_UC_EVENTS 0xc0000004 |
180 | #define ixRCU_MISC_CTRL 0xc0000010 | 182 | #define ixRCU_MISC_CTRL 0xc0000010 |
181 | #define ixCC_RCU_FUSES 0xc00c0000 | 183 | #define ixCC_RCU_FUSES 0xc00c0000 |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index 933917479985..22dd4c2b7290 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h | |||
@@ -87,6 +87,8 @@ | |||
87 | #define mmSMC_IND_DATA_6 0x8d | 87 | #define mmSMC_IND_DATA_6 0x8d |
88 | #define mmSMC_IND_INDEX_7 0x8e | 88 | #define mmSMC_IND_INDEX_7 0x8e |
89 | #define mmSMC_IND_DATA_7 0x8f | 89 | #define mmSMC_IND_DATA_7 0x8f |
90 | #define mmSMC_IND_INDEX_11 0x1AC | ||
91 | #define mmSMC_IND_DATA_11 0x1AD | ||
90 | #define mmSMC_IND_ACCESS_CNTL 0x92 | 92 | #define mmSMC_IND_ACCESS_CNTL 0x92 |
91 | #define mmSMC_MESSAGE_0 0x94 | 93 | #define mmSMC_MESSAGE_0 0x94 |
92 | #define mmSMC_RESP_0 0x95 | 94 | #define mmSMC_RESP_0 0x95 |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index 44b1855cb8df..eca2b851f25f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h | |||
@@ -90,6 +90,8 @@ | |||
90 | #define mmSMC_IND_DATA_6 0x8d | 90 | #define mmSMC_IND_DATA_6 0x8d |
91 | #define mmSMC_IND_INDEX_7 0x8e | 91 | #define mmSMC_IND_INDEX_7 0x8e |
92 | #define mmSMC_IND_DATA_7 0x8f | 92 | #define mmSMC_IND_DATA_7 0x8f |
93 | #define mmSMC_IND_INDEX_11 0x1AC | ||
94 | #define mmSMC_IND_DATA_11 0x1AD | ||
93 | #define mmSMC_IND_ACCESS_CNTL 0x92 | 95 | #define mmSMC_IND_ACCESS_CNTL 0x92 |
94 | #define mmSMC_MESSAGE_0 0x94 | 96 | #define mmSMC_MESSAGE_0 0x94 |
95 | #define mmSMC_RESP_0 0x95 | 97 | #define mmSMC_RESP_0 0x95 |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index df7c18b6a02a..e4a1697ec1d3 100755 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
@@ -106,6 +106,7 @@ enum cgs_ucode_id { | |||
106 | CGS_UCODE_ID_CP_MEC_JT2, | 106 | CGS_UCODE_ID_CP_MEC_JT2, |
107 | CGS_UCODE_ID_GMCON_RENG, | 107 | CGS_UCODE_ID_GMCON_RENG, |
108 | CGS_UCODE_ID_RLC_G, | 108 | CGS_UCODE_ID_RLC_G, |
109 | CGS_UCODE_ID_STORAGE, | ||
109 | CGS_UCODE_ID_MAXIMUM, | 110 | CGS_UCODE_ID_MAXIMUM, |
110 | }; | 111 | }; |
111 | 112 | ||
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device, | |||
619 | typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device, | 620 | typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device, |
620 | struct cgs_system_info *sys_info); | 621 | struct cgs_system_info *sys_info); |
621 | 622 | ||
623 | typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); | ||
624 | |||
622 | struct cgs_ops { | 625 | struct cgs_ops { |
623 | /* memory management calls (similar to KFD interface) */ | 626 | /* memory management calls (similar to KFD interface) */ |
624 | cgs_gpu_mem_info_t gpu_mem_info; | 627 | cgs_gpu_mem_info_t gpu_mem_info; |
@@ -670,6 +673,7 @@ struct cgs_ops { | |||
670 | cgs_call_acpi_method call_acpi_method; | 673 | cgs_call_acpi_method call_acpi_method; |
671 | /* get system info */ | 674 | /* get system info */ |
672 | cgs_query_system_info query_system_info; | 675 | cgs_query_system_info query_system_info; |
676 | cgs_is_virtualization_enabled_t is_virtualization_enabled; | ||
673 | }; | 677 | }; |
674 | 678 | ||
675 | struct cgs_os_ops; /* To be define in OS-specific CGS header */ | 679 | struct cgs_os_ops; /* To be define in OS-specific CGS header */ |
@@ -773,4 +777,6 @@ struct cgs_device | |||
773 | CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ | 777 | CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ |
774 | resource_base) | 778 | resource_base) |
775 | 779 | ||
780 | #define cgs_is_virtualization_enabled(cgs_device) \ | ||
781 | CGS_CALL(is_virtualization_enabled, cgs_device) | ||
776 | #endif /* _CGS_COMMON_H */ | 782 | #endif /* _CGS_COMMON_H */ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7174f7a68266..0b1f2205c2f1 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) | |||
436 | } | 436 | } |
437 | } | 437 | } |
438 | 438 | ||
439 | int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output) | 439 | static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, |
440 | void *input, void *output) | ||
440 | { | 441 | { |
441 | int ret = 0; | 442 | int ret = 0; |
442 | struct pp_instance *pp_handle; | 443 | struct pp_instance *pp_handle; |
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, | |||
475 | return ret; | 476 | return ret; |
476 | } | 477 | } |
477 | 478 | ||
478 | enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) | 479 | static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) |
479 | { | 480 | { |
480 | struct pp_hwmgr *hwmgr; | 481 | struct pp_hwmgr *hwmgr; |
481 | struct pp_power_state *state; | 482 | struct pp_power_state *state; |
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) | |||
820 | return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); | 821 | return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); |
821 | } | 822 | } |
822 | 823 | ||
824 | static struct amd_vce_state* | ||
825 | pp_dpm_get_vce_clock_state(void *handle, unsigned idx) | ||
826 | { | ||
827 | struct pp_hwmgr *hwmgr; | ||
828 | |||
829 | if (handle) { | ||
830 | hwmgr = ((struct pp_instance *)handle)->hwmgr; | ||
831 | |||
832 | if (hwmgr && idx < hwmgr->num_vce_state_tables) | ||
833 | return &hwmgr->vce_states[idx]; | ||
834 | } | ||
835 | |||
836 | return NULL; | ||
837 | } | ||
838 | |||
823 | const struct amd_powerplay_funcs pp_dpm_funcs = { | 839 | const struct amd_powerplay_funcs pp_dpm_funcs = { |
824 | .get_temperature = pp_dpm_get_temperature, | 840 | .get_temperature = pp_dpm_get_temperature, |
825 | .load_firmware = pp_dpm_load_fw, | 841 | .load_firmware = pp_dpm_load_fw, |
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { | |||
846 | .get_mclk_od = pp_dpm_get_mclk_od, | 862 | .get_mclk_od = pp_dpm_get_mclk_od, |
847 | .set_mclk_od = pp_dpm_set_mclk_od, | 863 | .set_mclk_od = pp_dpm_set_mclk_od, |
848 | .read_sensor = pp_dpm_read_sensor, | 864 | .read_sensor = pp_dpm_read_sensor, |
865 | .get_vce_clock_state = pp_dpm_get_vce_clock_state, | ||
849 | }; | 866 | }; |
850 | 867 | ||
851 | static int amd_pp_instance_init(struct amd_pp_init *pp_init, | 868 | static int amd_pp_instance_init(struct amd_pp_init *pp_init, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 960424913496..4b14f259a147 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState( | |||
66 | return (struct cz_power_state *)hw_ps; | 66 | return (struct cz_power_state *)hw_ps; |
67 | } | 67 | } |
68 | 68 | ||
69 | uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, | 69 | static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, |
70 | uint32_t clock, uint32_t msg) | 70 | uint32_t clock, uint32_t msg) |
71 | { | 71 | { |
72 | int i = 0; | 72 | int i = 0; |
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, | |||
1017 | return 0; | 1017 | return 0; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, | 1020 | static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, |
1021 | void *output, void *storage, int result) | 1021 | void *output, void *storage, int result) |
1022 | { | 1022 | { |
1023 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1023 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) | |||
1225 | return 0; | 1225 | return 0; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | 1228 | static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) |
1229 | { | 1229 | { |
1230 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1230 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1231 | 1231 | ||
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
1239 | return 0; | 1239 | return 0; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | 1242 | static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) |
1243 | { | 1243 | { |
1244 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1244 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1245 | struct phm_clock_voltage_dependency_table *table = | 1245 | struct phm_clock_voltage_dependency_table *table = |
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |||
1277 | return 0; | 1277 | return 0; |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) | 1280 | static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) |
1281 | { | 1281 | { |
1282 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1282 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1283 | 1283 | ||
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
1533 | return result; | 1533 | return result; |
1534 | } | 1534 | } |
1535 | 1535 | ||
1536 | int cz_get_power_state_size(struct pp_hwmgr *hwmgr) | 1536 | static int cz_get_power_state_size(struct pp_hwmgr *hwmgr) |
1537 | { | 1537 | { |
1538 | return sizeof(struct cz_power_state); | 1538 | return sizeof(struct cz_power_state); |
1539 | } | 1539 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index 1944d289f846..f5e8fda964f7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "linux/delay.h" | 25 | #include "linux/delay.h" |
26 | #include "hwmgr.h" | 26 | #include "hwmgr.h" |
27 | #include "amd_acpi.h" | 27 | #include "amd_acpi.h" |
28 | #include "pp_acpi.h" | ||
28 | 29 | ||
29 | bool acpi_atcs_functions_supported(void *device, uint32_t index) | 30 | bool acpi_atcs_functions_supported(void *device, uint32_t index) |
30 | { | 31 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 7de701d8a450..baf0f3d4c2f0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | |||
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) | |||
131 | /** | 131 | /** |
132 | * Private Function to get the PowerPlay Table Address. | 132 | * Private Function to get the PowerPlay Table Address. |
133 | */ | 133 | */ |
134 | const void *get_powerplay_table(struct pp_hwmgr *hwmgr) | 134 | static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) |
135 | { | 135 | { |
136 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 136 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
137 | 137 | ||
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables( | |||
1049 | return 0; | 1049 | return 0; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) | 1052 | static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) |
1053 | { | 1053 | { |
1054 | int result = 0; | 1054 | int result = 0; |
1055 | const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; | 1055 | const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; |
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) | |||
1100 | return result; | 1100 | return result; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) | 1103 | static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) |
1104 | { | 1104 | { |
1105 | struct phm_ppt_v1_information *pp_table_information = | 1105 | struct phm_ppt_v1_information *pp_table_information = |
1106 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1106 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
@@ -1211,7 +1211,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) | |||
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i, | 1213 | static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i, |
1214 | struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag) | 1214 | struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag) |
1215 | { | 1215 | { |
1216 | const ATOM_Tonga_VCE_State_Record *vce_state_record; | 1216 | const ATOM_Tonga_VCE_State_Record *vce_state_record; |
1217 | ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; | 1217 | ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; |
@@ -1315,7 +1315,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, | |||
1315 | 1315 | ||
1316 | hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr); | 1316 | hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr); |
1317 | 1317 | ||
1318 | if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) { | 1318 | if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) { |
1319 | for (j = 0; j < i; j++) | 1319 | for (j = 0; j < i; j++) |
1320 | ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags); | 1320 | ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags); |
1321 | } | 1321 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index ccf7ebeaf892..a4e9cf429e62 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | |||
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr, | |||
1507 | return 0; | 1507 | return 0; |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | int get_number_of_vce_state_table_entries( | 1510 | static int get_number_of_vce_state_table_entries( |
1511 | struct pp_hwmgr *hwmgr) | 1511 | struct pp_hwmgr *hwmgr) |
1512 | { | 1512 | { |
1513 | const ATOM_PPLIB_POWERPLAYTABLE *table = | 1513 | const ATOM_PPLIB_POWERPLAYTABLE *table = |
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries( | |||
1521 | return 0; | 1521 | return 0; |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, | 1524 | static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, |
1525 | unsigned long i, | 1525 | unsigned long i, |
1526 | struct pp_vce_state *vce_state, | 1526 | struct amd_vce_state *vce_state, |
1527 | void **clock_info, | 1527 | void **clock_info, |
1528 | unsigned long *flag) | 1528 | unsigned long *flag) |
1529 | { | 1529 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 6eb6db199250..cf2ee93d8475 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | |||
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) | 78 | static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) |
79 | { | 79 | { |
80 | if (phm_cf_want_uvd_power_gating(hwmgr)) { | 80 | if (phm_cf_want_uvd_power_gating(hwmgr)) { |
81 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 81 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) | 94 | static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) |
95 | { | 95 | { |
96 | if (phm_cf_want_vce_power_gating(hwmgr)) | 96 | if (phm_cf_want_vce_power_gating(hwmgr)) |
97 | return smum_send_msg_to_smc(hwmgr->smumgr, | 97 | return smum_send_msg_to_smc(hwmgr->smumgr, |
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | int smu7_powerup_vce(struct pp_hwmgr *hwmgr) | 102 | static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) |
103 | { | 103 | { |
104 | if (phm_cf_want_vce_power_gating(hwmgr)) | 104 | if (phm_cf_want_vce_power_gating(hwmgr)) |
105 | return smum_send_msg_to_smc(hwmgr->smumgr, | 105 | return smum_send_msg_to_smc(hwmgr->smumgr, |
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr) | |||
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) | 110 | static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) |
111 | { | 111 | { |
112 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 112 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
113 | PHM_PlatformCaps_SamuPowerGating)) | 113 | PHM_PlatformCaps_SamuPowerGating)) |
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | int smu7_powerup_samu(struct pp_hwmgr *hwmgr) | 119 | static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) |
120 | { | 120 | { |
121 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 121 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
122 | PHM_PlatformCaps_SamuPowerGating)) | 122 | PHM_PlatformCaps_SamuPowerGating)) |
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | |||
149 | if (bgate) { | 149 | if (bgate) { |
150 | cgs_set_clockgating_state(hwmgr->device, | 150 | cgs_set_clockgating_state(hwmgr->device, |
151 | AMD_IP_BLOCK_TYPE_UVD, | 151 | AMD_IP_BLOCK_TYPE_UVD, |
152 | AMD_CG_STATE_GATE); | 152 | AMD_CG_STATE_UNGATE); |
153 | cgs_set_powergating_state(hwmgr->device, | ||
154 | AMD_IP_BLOCK_TYPE_UVD, | ||
155 | AMD_PG_STATE_GATE); | ||
153 | smu7_update_uvd_dpm(hwmgr, true); | 156 | smu7_update_uvd_dpm(hwmgr, true); |
154 | smu7_powerdown_uvd(hwmgr); | 157 | smu7_powerdown_uvd(hwmgr); |
155 | } else { | 158 | } else { |
156 | smu7_powerup_uvd(hwmgr); | 159 | smu7_powerup_uvd(hwmgr); |
157 | smu7_update_uvd_dpm(hwmgr, false); | 160 | cgs_set_powergating_state(hwmgr->device, |
161 | AMD_IP_BLOCK_TYPE_UVD, | ||
162 | AMD_CG_STATE_UNGATE); | ||
158 | cgs_set_clockgating_state(hwmgr->device, | 163 | cgs_set_clockgating_state(hwmgr->device, |
159 | AMD_IP_BLOCK_TYPE_UVD, | 164 | AMD_IP_BLOCK_TYPE_UVD, |
160 | AMD_CG_STATE_UNGATE); | 165 | AMD_CG_STATE_GATE); |
166 | smu7_update_uvd_dpm(hwmgr, false); | ||
161 | } | 167 | } |
162 | 168 | ||
163 | return 0; | 169 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 609996c84ad5..073e0bfa22a0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC { | |||
89 | 89 | ||
90 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); | 90 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); |
91 | 91 | ||
92 | struct smu7_power_state *cast_phw_smu7_power_state( | 92 | static struct smu7_power_state *cast_phw_smu7_power_state( |
93 | struct pp_hw_power_state *hw_ps) | 93 | struct pp_hw_power_state *hw_ps) |
94 | { | 94 | { |
95 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | 95 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state( | |||
99 | return (struct smu7_power_state *)hw_ps; | 99 | return (struct smu7_power_state *)hw_ps; |
100 | } | 100 | } |
101 | 101 | ||
102 | const struct smu7_power_state *cast_const_phw_smu7_power_state( | 102 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
103 | const struct pp_hw_power_state *hw_ps) | 103 | const struct pp_hw_power_state *hw_ps) |
104 | { | 104 | { |
105 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | 105 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state( | |||
115 | * @param hwmgr the address of the powerplay hardware manager. | 115 | * @param hwmgr the address of the powerplay hardware manager. |
116 | * @return always 0 | 116 | * @return always 0 |
117 | */ | 117 | */ |
118 | int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) | 118 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
119 | { | 119 | { |
120 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | 120 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); |
121 | 121 | ||
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) | |||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
127 | uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) | 127 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
128 | { | 128 | { |
129 | uint32_t speedCntl = 0; | 129 | uint32_t speedCntl = 0; |
130 | 130 | ||
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) | |||
135 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); | 135 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); |
136 | } | 136 | } |
137 | 137 | ||
138 | int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | 138 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
139 | { | 139 | { |
140 | uint32_t link_width; | 140 | uint32_t link_width; |
141 | 141 | ||
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | |||
155 | * @param pHwMgr the address of the powerplay hardware manager. | 155 | * @param pHwMgr the address of the powerplay hardware manager. |
156 | * @return always PP_Result_OK | 156 | * @return always PP_Result_OK |
157 | */ | 157 | */ |
158 | int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) | 158 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
159 | { | 159 | { |
160 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) | 160 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) |
161 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); | 161 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); |
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |||
802 | return 0; | 802 | return 0; |
803 | } | 803 | } |
804 | 804 | ||
805 | int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | 805 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
806 | { | 806 | { |
807 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 807 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
808 | 808 | ||
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |||
1153 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | 1153 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) | 1156 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
1157 | { | 1157 | { |
1158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 1158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1159 | data->pcie_performance_request = true; | 1159 | data->pcie_performance_request = true; |
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) | |||
1161 | return 0; | 1161 | return 0; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | 1164 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
1165 | { | 1165 | { |
1166 | int tmp_result = 0; | 1166 | int tmp_result = 0; |
1167 | int result = 0; | 1167 | int result = 0; |
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
1352 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 1352 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1353 | struct phm_ppt_v1_information *table_info = | 1353 | struct phm_ppt_v1_information *table_info = |
1354 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1354 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1355 | struct cgs_system_info sys_info = {0}; | ||
1356 | int result; | ||
1355 | 1357 | ||
1356 | data->dll_default_on = false; | 1358 | data->dll_default_on = false; |
1357 | data->mclk_dpm0_activity_target = 0xa; | 1359 | data->mclk_dpm0_activity_target = 0xa; |
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
1439 | data->pcie_lane_performance.min = 16; | 1441 | data->pcie_lane_performance.min = 16; |
1440 | data->pcie_lane_power_saving.max = 0; | 1442 | data->pcie_lane_power_saving.max = 0; |
1441 | data->pcie_lane_power_saving.min = 16; | 1443 | data->pcie_lane_power_saving.min = 16; |
1444 | |||
1445 | sys_info.size = sizeof(struct cgs_system_info); | ||
1446 | sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; | ||
1447 | result = cgs_query_system_info(hwmgr->device, &sys_info); | ||
1448 | if (!result) { | ||
1449 | if (sys_info.value & AMD_PG_SUPPORT_UVD) | ||
1450 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
1451 | PHM_PlatformCaps_UVDPowerGating); | ||
1452 | if (sys_info.value & AMD_PG_SUPPORT_VCE) | ||
1453 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
1454 | PHM_PlatformCaps_VCEPowerGating); | ||
1455 | } | ||
1442 | } | 1456 | } |
1443 | 1457 | ||
1444 | /** | 1458 | /** |
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) | |||
1864 | return 0; | 1878 | return 0; |
1865 | } | 1879 | } |
1866 | 1880 | ||
1867 | int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) | 1881 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
1868 | { | 1882 | { |
1869 | struct phm_ppt_v1_information *table_info = | 1883 | struct phm_ppt_v1_information *table_info = |
1870 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1884 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) | |||
2253 | return 0; | 2267 | return 0; |
2254 | } | 2268 | } |
2255 | 2269 | ||
2256 | int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | 2270 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
2257 | { | 2271 | { |
2258 | struct smu7_hwmgr *data; | 2272 | struct smu7_hwmgr *data; |
2259 | int result; | 2273 | int result; |
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f | |||
3672 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); | 3686 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); |
3673 | } | 3687 | } |
3674 | 3688 | ||
3675 | int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | 3689 | static int |
3690 | smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | ||
3676 | { | 3691 | { |
3677 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; | 3692 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; |
3678 | 3693 | ||
3679 | return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; | 3694 | return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; |
3680 | } | 3695 | } |
3681 | 3696 | ||
3682 | int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | 3697 | static int |
3698 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | ||
3683 | { | 3699 | { |
3684 | uint32_t num_active_displays = 0; | 3700 | uint32_t num_active_displays = 0; |
3685 | struct cgs_display_info info = {0}; | 3701 | struct cgs_display_info info = {0}; |
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | |||
3701 | * @param hwmgr the address of the powerplay hardware manager. | 3717 | * @param hwmgr the address of the powerplay hardware manager. |
3702 | * @return always OK | 3718 | * @return always OK |
3703 | */ | 3719 | */ |
3704 | int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | 3720 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
3705 | { | 3721 | { |
3706 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3722 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3707 | uint32_t num_active_displays = 0; | 3723 | uint32_t num_active_displays = 0; |
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | |||
3751 | return 0; | 3767 | return 0; |
3752 | } | 3768 | } |
3753 | 3769 | ||
3754 | int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) | 3770 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
3755 | { | 3771 | { |
3756 | return smu7_program_display_gap(hwmgr); | 3772 | return smu7_program_display_gap(hwmgr); |
3757 | } | 3773 | } |
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f | |||
3775 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); | 3791 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); |
3776 | } | 3792 | } |
3777 | 3793 | ||
3778 | int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, | 3794 | static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, |
3779 | const void *thermal_interrupt_info) | 3795 | const void *thermal_interrupt_info) |
3780 | { | 3796 | { |
3781 | return 0; | 3797 | return 0; |
3782 | } | 3798 | } |
3783 | 3799 | ||
3784 | bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | 3800 | static bool |
3801 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | ||
3785 | { | 3802 | { |
3786 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3803 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3787 | bool is_update_required = false; | 3804 | bool is_update_required = false; |
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev | |||
3810 | (pl1->pcie_lane == pl2->pcie_lane)); | 3827 | (pl1->pcie_lane == pl2->pcie_lane)); |
3811 | } | 3828 | } |
3812 | 3829 | ||
3813 | int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) | 3830 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
3831 | const struct pp_hw_power_state *pstate1, | ||
3832 | const struct pp_hw_power_state *pstate2, bool *equal) | ||
3814 | { | 3833 | { |
3815 | const struct smu7_power_state *psa; | 3834 | const struct smu7_power_state *psa; |
3816 | const struct smu7_power_state *psb; | 3835 | const struct smu7_power_state *psb; |
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta | |||
3843 | return 0; | 3862 | return 0; |
3844 | } | 3863 | } |
3845 | 3864 | ||
3846 | int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) | 3865 | static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) |
3847 | { | 3866 | { |
3848 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3867 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3849 | 3868 | ||
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) | |||
3972 | return 0; | 3991 | return 0; |
3973 | } | 3992 | } |
3974 | 3993 | ||
3975 | int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) | 3994 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
3976 | { | 3995 | { |
3977 | int tmp_result, result = 0; | 3996 | int tmp_result, result = 0; |
3978 | 3997 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 3fb5e57a378b..eb3e83d7af31 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h | |||
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs { | |||
359 | int (*get_mclk_od)(void *handle); | 359 | int (*get_mclk_od)(void *handle); |
360 | int (*set_mclk_od)(void *handle, uint32_t value); | 360 | int (*set_mclk_od)(void *handle, uint32_t value); |
361 | int (*read_sensor)(void *handle, int idx, int32_t *value); | 361 | int (*read_sensor)(void *handle, int idx, int32_t *value); |
362 | struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx); | ||
362 | }; | 363 | }; |
363 | 364 | ||
364 | struct amd_powerplay { | 365 | struct amd_powerplay { |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 4f0fedd1e9d3..e38b999e3235 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
@@ -367,7 +367,7 @@ struct pp_table_func { | |||
367 | int (*pptable_get_vce_state_table_entry)( | 367 | int (*pptable_get_vce_state_table_entry)( |
368 | struct pp_hwmgr *hwmgr, | 368 | struct pp_hwmgr *hwmgr, |
369 | unsigned long i, | 369 | unsigned long i, |
370 | struct pp_vce_state *vce_state, | 370 | struct amd_vce_state *vce_state, |
371 | void **clock_info, | 371 | void **clock_info, |
372 | unsigned long *flag); | 372 | unsigned long *flag); |
373 | }; | 373 | }; |
@@ -586,18 +586,6 @@ struct phm_microcode_version_info { | |||
586 | uint32_t NB; | 586 | uint32_t NB; |
587 | }; | 587 | }; |
588 | 588 | ||
589 | #define PP_MAX_VCE_LEVELS 6 | ||
590 | |||
591 | enum PP_VCE_LEVEL { | ||
592 | PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
593 | PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
594 | PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
595 | PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
596 | PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
597 | PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
598 | }; | ||
599 | |||
600 | |||
601 | enum PP_TABLE_VERSION { | 589 | enum PP_TABLE_VERSION { |
602 | PP_TABLE_V0 = 0, | 590 | PP_TABLE_V0 = 0, |
603 | PP_TABLE_V1, | 591 | PP_TABLE_V1, |
@@ -620,7 +608,7 @@ struct pp_hwmgr { | |||
620 | void *hardcode_pp_table; | 608 | void *hardcode_pp_table; |
621 | bool need_pp_table_upload; | 609 | bool need_pp_table_upload; |
622 | 610 | ||
623 | struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS]; | 611 | struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; |
624 | uint32_t num_vce_state_tables; | 612 | uint32_t num_vce_state_tables; |
625 | 613 | ||
626 | enum amd_dpm_forced_level dpm_level; | 614 | enum amd_dpm_forced_level dpm_level; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h index 9ceaed9ac52a..827860fffe78 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h +++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h | |||
@@ -156,15 +156,6 @@ struct pp_power_state { | |||
156 | struct pp_hw_power_state hardware; | 156 | struct pp_hw_power_state hardware; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | |||
160 | /*Structure to hold a VCE state entry*/ | ||
161 | struct pp_vce_state { | ||
162 | uint32_t evclk; | ||
163 | uint32_t ecclk; | ||
164 | uint32_t sclk; | ||
165 | uint32_t mclk; | ||
166 | }; | ||
167 | |||
168 | enum PP_MMProfilingState { | 159 | enum PP_MMProfilingState { |
169 | PP_MMProfilingState_NA = 0, | 160 | PP_MMProfilingState_NA = 0, |
170 | PP_MMProfilingState_Started, | 161 | PP_MMProfilingState_Started, |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h index 3df5de2cdab0..8fe8ba9434ff 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h | |||
@@ -21,9 +21,6 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | extern bool acpi_atcs_functions_supported(void *device, | 24 | bool acpi_atcs_functions_supported(void *device, uint32_t index); |
25 | uint32_t index); | 25 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise); |
26 | extern int acpi_pcie_perf_request(void *device, | 26 | bool acpi_atcs_notify_pcie_device_ready(void *device); |
27 | uint8_t perf_req, | ||
28 | bool advertise); | ||
29 | extern bool acpi_atcs_notify_pcie_device_ready(void *device); | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 76310ac7ef0d..34523fe6ed6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c | |||
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) | |||
2049 | return 0; | 2049 | return 0; |
2050 | } | 2050 | } |
2051 | 2051 | ||
2052 | int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) | 2052 | static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) |
2053 | { | 2053 | { |
2054 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 2054 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2055 | 2055 | ||
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) | |||
2125 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); | 2125 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); |
2126 | } | 2126 | } |
2127 | } | 2127 | } |
2128 | printk("cant't get the offset of type %x member %x \n", type, member); | 2128 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2129 | return 0; | 2129 | return 0; |
2130 | } | 2130 | } |
2131 | 2131 | ||
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) | |||
2150 | return SMU73_MAX_LEVELS_MVDD; | 2150 | return SMU73_MAX_LEVELS_MVDD; |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | printk("cant't get the mac of %x \n", value); | 2153 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2154 | return 0; | 2154 | return 0; |
2155 | } | 2155 | } |
2156 | 2156 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 02fe1df855a9..b86e48fb40d1 100755 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) | |||
159 | return result; | 159 | return result; |
160 | } | 160 | } |
161 | 161 | ||
162 | int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) | 162 | static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) |
163 | { | 163 | { |
164 | int i, result = -1; | 164 | int i, result = -1; |
165 | uint32_t reg, data; | 165 | uint32_t reg, data; |
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) | |||
224 | return result; | 224 | return result; |
225 | } | 225 | } |
226 | 226 | ||
227 | int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) | 227 | static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) |
228 | { | 228 | { |
229 | int result = 0; | 229 | int result = 0; |
230 | uint32_t table_start; | 230 | uint32_t table_start; |
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) | |||
260 | return result; | 260 | return result; |
261 | } | 261 | } |
262 | 262 | ||
263 | int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) | 263 | static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) |
264 | { | 264 | { |
265 | int32_t vr_config; | 265 | int32_t vr_config; |
266 | uint32_t table_start; | 266 | uint32_t table_start; |
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) | |||
299 | } | 299 | } |
300 | 300 | ||
301 | /* Work in Progress */ | 301 | /* Work in Progress */ |
302 | int fiji_restore_vft_table(struct pp_smumgr *smumgr) | 302 | static int fiji_restore_vft_table(struct pp_smumgr *smumgr) |
303 | { | 303 | { |
304 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 304 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
305 | 305 | ||
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | /* Work in Progress */ | 313 | /* Work in Progress */ |
314 | int fiji_save_vft_table(struct pp_smumgr *smumgr) | 314 | static int fiji_save_vft_table(struct pp_smumgr *smumgr) |
315 | { | 315 | { |
316 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 316 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
317 | 317 | ||
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr) | |||
322 | return -EINVAL; | 322 | return -EINVAL; |
323 | } | 323 | } |
324 | 324 | ||
325 | int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) | 325 | static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) |
326 | { | 326 | { |
327 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 327 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
328 | 328 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 8c889caba420..b579f0c175e6 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c | |||
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) | |||
2140 | return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); | 2140 | return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); |
2141 | } | 2141 | } |
2142 | } | 2142 | } |
2143 | printk("cant't get the offset of type %x member %x \n", type, member); | 2143 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2144 | return 0; | 2144 | return 0; |
2145 | } | 2145 | } |
2146 | 2146 | ||
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value) | |||
2163 | return SMU71_MAX_LEVELS_MVDD; | 2163 | return SMU71_MAX_LEVELS_MVDD; |
2164 | } | 2164 | } |
2165 | 2165 | ||
2166 | printk("cant't get the mac of %x \n", value); | 2166 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2167 | return 0; | 2167 | return 0; |
2168 | } | 2168 | } |
2169 | 2169 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 4ccc0b72324d..006b22071685 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c | |||
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) | |||
2174 | return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); | 2174 | return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); |
2175 | } | 2175 | } |
2176 | } | 2176 | } |
2177 | printk("cant't get the offset of type %x member %x \n", type, member); | 2177 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2178 | return 0; | 2178 | return 0; |
2179 | } | 2179 | } |
2180 | 2180 | ||
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) | |||
2201 | return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; | 2201 | return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; |
2202 | } | 2202 | } |
2203 | 2203 | ||
2204 | printk("cant't get the mac of %x \n", value); | 2204 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2205 | return 0; | 2205 | return 0; |
2206 | } | 2206 | } |
2207 | 2207 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5c3598ab7dae..f38a68747df0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | 120 | ||
121 | int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) | 121 | static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) |
122 | { | 122 | { |
123 | uint32_t vr_config; | 123 | uint32_t vr_config; |
124 | uint32_t dpm_table_start; | 124 | uint32_t dpm_table_start; |
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) | |||
172 | return 0; | 172 | return 0; |
173 | } | 173 | } |
174 | 174 | ||
175 | int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) | 175 | static int |
176 | polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) | ||
176 | { | 177 | { |
177 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | 178 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); |
178 | 179 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 6af744f42ec9..6df0d6edfdd1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | |||
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) | |||
278 | case UCODE_ID_RLC_G: | 278 | case UCODE_ID_RLC_G: |
279 | result = CGS_UCODE_ID_RLC_G; | 279 | result = CGS_UCODE_ID_RLC_G; |
280 | break; | 280 | break; |
281 | case UCODE_ID_MEC_STORAGE: | ||
282 | result = CGS_UCODE_ID_STORAGE; | ||
283 | break; | ||
281 | default: | 284 | default: |
282 | break; | 285 | break; |
283 | } | 286 | } |
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) | |||
452 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, | 455 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, |
453 | UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), | 456 | UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), |
454 | "Failed to Get Firmware Entry.", return -EINVAL); | 457 | "Failed to Get Firmware Entry.", return -EINVAL); |
458 | if (cgs_is_virtualization_enabled(smumgr->device)) | ||
459 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, | ||
460 | UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), | ||
461 | "Failed to Get Firmware Entry.", return -EINVAL); | ||
455 | 462 | ||
456 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); | 463 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); |
457 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); | 464 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 76352f2423ae..919be435b49c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <pp_endian.h> | 28 | #include <pp_endian.h> |
29 | 29 | ||
30 | #define SMC_RAM_END 0x40000 | 30 | #define SMC_RAM_END 0x40000 |
31 | #define mmSMC_IND_INDEX_11 0x01AC | ||
32 | #define mmSMC_IND_DATA_11 0x01AD | ||
33 | 31 | ||
34 | struct smu7_buffer_entry { | 32 | struct smu7_buffer_entry { |
35 | uint32_t data_size; | 33 | uint32_t data_size; |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index de2a24d85f48..d08f6f19b454 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c | |||
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) | |||
2651 | return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); | 2651 | return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); |
2652 | } | 2652 | } |
2653 | } | 2653 | } |
2654 | printk("cant't get the offset of type %x member %x\n", type, member); | 2654 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2655 | return 0; | 2655 | return 0; |
2656 | } | 2656 | } |
2657 | 2657 | ||
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) | |||
2675 | case SMU_MAX_LEVELS_MVDD: | 2675 | case SMU_MAX_LEVELS_MVDD: |
2676 | return SMU72_MAX_LEVELS_MVDD; | 2676 | return SMU72_MAX_LEVELS_MVDD; |
2677 | } | 2677 | } |
2678 | printk("cant't get the mac value %x\n", value); | 2678 | printk(KERN_WARNING "can't get the mac value %x\n", value); |
2679 | 2679 | ||
2680 | return 0; | 2680 | return 0; |
2681 | } | 2681 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index b961a1c6caf3..dbd4fd3a810b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | |||
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job, | |||
17 | TP_STRUCT__entry( | 17 | TP_STRUCT__entry( |
18 | __field(struct amd_sched_entity *, entity) | 18 | __field(struct amd_sched_entity *, entity) |
19 | __field(struct amd_sched_job *, sched_job) | 19 | __field(struct amd_sched_job *, sched_job) |
20 | __field(struct fence *, fence) | 20 | __field(struct dma_fence *, fence) |
21 | __field(const char *, name) | 21 | __field(const char *, name) |
22 | __field(u32, job_count) | 22 | __field(u32, job_count) |
23 | __field(int, hw_job_count) | 23 | __field(int, hw_job_count) |
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job, | |||
42 | TP_PROTO(struct amd_sched_fence *fence), | 42 | TP_PROTO(struct amd_sched_fence *fence), |
43 | TP_ARGS(fence), | 43 | TP_ARGS(fence), |
44 | TP_STRUCT__entry( | 44 | TP_STRUCT__entry( |
45 | __field(struct fence *, fence) | 45 | __field(struct dma_fence *, fence) |
46 | ), | 46 | ), |
47 | 47 | ||
48 | TP_fast_assign( | 48 | TP_fast_assign( |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..5364e6a7ec8f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); | 35 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); |
36 | 36 | ||
37 | struct kmem_cache *sched_fence_slab; | 37 | struct kmem_cache *sched_fence_slab; |
38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | 38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); |
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
141 | return r; | 141 | return r; |
142 | 142 | ||
143 | atomic_set(&entity->fence_seq, 0); | 143 | atomic_set(&entity->fence_seq, 0); |
144 | entity->fence_context = fence_context_alloc(2); | 144 | entity->fence_context = dma_fence_context_alloc(2); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
221 | kfifo_free(&entity->job_queue); | 221 | kfifo_free(&entity->job_queue); |
222 | } | 222 | } |
223 | 223 | ||
224 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) | 224 | static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) |
225 | { | 225 | { |
226 | struct amd_sched_entity *entity = | 226 | struct amd_sched_entity *entity = |
227 | container_of(cb, struct amd_sched_entity, cb); | 227 | container_of(cb, struct amd_sched_entity, cb); |
228 | entity->dependency = NULL; | 228 | entity->dependency = NULL; |
229 | fence_put(f); | 229 | dma_fence_put(f); |
230 | amd_sched_wakeup(entity->sched); | 230 | amd_sched_wakeup(entity->sched); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) | 233 | static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) |
234 | { | 234 | { |
235 | struct amd_sched_entity *entity = | 235 | struct amd_sched_entity *entity = |
236 | container_of(cb, struct amd_sched_entity, cb); | 236 | container_of(cb, struct amd_sched_entity, cb); |
237 | entity->dependency = NULL; | 237 | entity->dependency = NULL; |
238 | fence_put(f); | 238 | dma_fence_put(f); |
239 | } | 239 | } |
240 | 240 | ||
241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | 241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
242 | { | 242 | { |
243 | struct amd_gpu_scheduler *sched = entity->sched; | 243 | struct amd_gpu_scheduler *sched = entity->sched; |
244 | struct fence * fence = entity->dependency; | 244 | struct dma_fence * fence = entity->dependency; |
245 | struct amd_sched_fence *s_fence; | 245 | struct amd_sched_fence *s_fence; |
246 | 246 | ||
247 | if (fence->context == entity->fence_context) { | 247 | if (fence->context == entity->fence_context) { |
248 | /* We can ignore fences from ourself */ | 248 | /* We can ignore fences from ourself */ |
249 | fence_put(entity->dependency); | 249 | dma_fence_put(entity->dependency); |
250 | return false; | 250 | return false; |
251 | } | 251 | } |
252 | 252 | ||
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | |||
257 | * Fence is from the same scheduler, only need to wait for | 257 | * Fence is from the same scheduler, only need to wait for |
258 | * it to be scheduled | 258 | * it to be scheduled |
259 | */ | 259 | */ |
260 | fence = fence_get(&s_fence->scheduled); | 260 | fence = dma_fence_get(&s_fence->scheduled); |
261 | fence_put(entity->dependency); | 261 | dma_fence_put(entity->dependency); |
262 | entity->dependency = fence; | 262 | entity->dependency = fence; |
263 | if (!fence_add_callback(fence, &entity->cb, | 263 | if (!dma_fence_add_callback(fence, &entity->cb, |
264 | amd_sched_entity_clear_dep)) | 264 | amd_sched_entity_clear_dep)) |
265 | return true; | 265 | return true; |
266 | 266 | ||
267 | /* Ignore it when it is already scheduled */ | 267 | /* Ignore it when it is already scheduled */ |
268 | fence_put(fence); | 268 | dma_fence_put(fence); |
269 | return false; | 269 | return false; |
270 | } | 270 | } |
271 | 271 | ||
272 | if (!fence_add_callback(entity->dependency, &entity->cb, | 272 | if (!dma_fence_add_callback(entity->dependency, &entity->cb, |
273 | amd_sched_entity_wakeup)) | 273 | amd_sched_entity_wakeup)) |
274 | return true; | 274 | return true; |
275 | 275 | ||
276 | fence_put(entity->dependency); | 276 | dma_fence_put(entity->dependency); |
277 | return false; | 277 | return false; |
278 | } | 278 | } |
279 | 279 | ||
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work) | |||
354 | sched->ops->free_job(s_job); | 354 | sched->ops->free_job(s_job); |
355 | } | 355 | } |
356 | 356 | ||
357 | static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) | 357 | static void amd_sched_job_finish_cb(struct dma_fence *f, |
358 | struct dma_fence_cb *cb) | ||
358 | { | 359 | { |
359 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, | 360 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, |
360 | finish_cb); | 361 | finish_cb); |
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) | |||
388 | 389 | ||
389 | spin_lock(&sched->job_list_lock); | 390 | spin_lock(&sched->job_list_lock); |
390 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | 391 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { |
391 | if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { | 392 | if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { |
392 | fence_put(s_job->s_fence->parent); | 393 | dma_fence_put(s_job->s_fence->parent); |
393 | s_job->s_fence->parent = NULL; | 394 | s_job->s_fence->parent = NULL; |
394 | } | 395 | } |
395 | } | 396 | } |
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) | |||
410 | 411 | ||
411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 412 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
412 | struct amd_sched_fence *s_fence = s_job->s_fence; | 413 | struct amd_sched_fence *s_fence = s_job->s_fence; |
413 | struct fence *fence; | 414 | struct dma_fence *fence; |
414 | 415 | ||
415 | spin_unlock(&sched->job_list_lock); | 416 | spin_unlock(&sched->job_list_lock); |
416 | fence = sched->ops->run_job(s_job); | 417 | fence = sched->ops->run_job(s_job); |
417 | atomic_inc(&sched->hw_rq_count); | 418 | atomic_inc(&sched->hw_rq_count); |
418 | if (fence) { | 419 | if (fence) { |
419 | s_fence->parent = fence_get(fence); | 420 | s_fence->parent = dma_fence_get(fence); |
420 | r = fence_add_callback(fence, &s_fence->cb, | 421 | r = dma_fence_add_callback(fence, &s_fence->cb, |
421 | amd_sched_process_job); | 422 | amd_sched_process_job); |
422 | if (r == -ENOENT) | 423 | if (r == -ENOENT) |
423 | amd_sched_process_job(fence, &s_fence->cb); | 424 | amd_sched_process_job(fence, &s_fence->cb); |
424 | else if (r) | 425 | else if (r) |
425 | DRM_ERROR("fence add callback failed (%d)\n", | 426 | DRM_ERROR("fence add callback failed (%d)\n", |
426 | r); | 427 | r); |
427 | fence_put(fence); | 428 | dma_fence_put(fence); |
428 | } else { | 429 | } else { |
429 | DRM_ERROR("Failed to run job!\n"); | 430 | DRM_ERROR("Failed to run job!\n"); |
430 | amd_sched_process_job(NULL, &s_fence->cb); | 431 | amd_sched_process_job(NULL, &s_fence->cb); |
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |||
446 | struct amd_sched_entity *entity = sched_job->s_entity; | 447 | struct amd_sched_entity *entity = sched_job->s_entity; |
447 | 448 | ||
448 | trace_amd_sched_job(sched_job); | 449 | trace_amd_sched_job(sched_job); |
449 | fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, | 450 | dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
450 | amd_sched_job_finish_cb); | 451 | amd_sched_job_finish_cb); |
451 | wait_event(entity->sched->job_scheduled, | 452 | wait_event(entity->sched->job_scheduled, |
452 | amd_sched_entity_in(sched_job)); | 453 | amd_sched_entity_in(sched_job)); |
453 | } | 454 | } |
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched) | |||
511 | return entity; | 512 | return entity; |
512 | } | 513 | } |
513 | 514 | ||
514 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | 515 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) |
515 | { | 516 | { |
516 | struct amd_sched_fence *s_fence = | 517 | struct amd_sched_fence *s_fence = |
517 | container_of(cb, struct amd_sched_fence, cb); | 518 | container_of(cb, struct amd_sched_fence, cb); |
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | |||
521 | amd_sched_fence_finished(s_fence); | 522 | amd_sched_fence_finished(s_fence); |
522 | 523 | ||
523 | trace_amd_sched_process_job(s_fence); | 524 | trace_amd_sched_process_job(s_fence); |
524 | fence_put(&s_fence->finished); | 525 | dma_fence_put(&s_fence->finished); |
525 | wake_up_interruptible(&sched->wake_up_worker); | 526 | wake_up_interruptible(&sched->wake_up_worker); |
526 | } | 527 | } |
527 | 528 | ||
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param) | |||
547 | struct amd_sched_entity *entity = NULL; | 548 | struct amd_sched_entity *entity = NULL; |
548 | struct amd_sched_fence *s_fence; | 549 | struct amd_sched_fence *s_fence; |
549 | struct amd_sched_job *sched_job; | 550 | struct amd_sched_job *sched_job; |
550 | struct fence *fence; | 551 | struct dma_fence *fence; |
551 | 552 | ||
552 | wait_event_interruptible(sched->wake_up_worker, | 553 | wait_event_interruptible(sched->wake_up_worker, |
553 | (!amd_sched_blocked(sched) && | 554 | (!amd_sched_blocked(sched) && |
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param) | |||
569 | fence = sched->ops->run_job(sched_job); | 570 | fence = sched->ops->run_job(sched_job); |
570 | amd_sched_fence_scheduled(s_fence); | 571 | amd_sched_fence_scheduled(s_fence); |
571 | if (fence) { | 572 | if (fence) { |
572 | s_fence->parent = fence_get(fence); | 573 | s_fence->parent = dma_fence_get(fence); |
573 | r = fence_add_callback(fence, &s_fence->cb, | 574 | r = dma_fence_add_callback(fence, &s_fence->cb, |
574 | amd_sched_process_job); | 575 | amd_sched_process_job); |
575 | if (r == -ENOENT) | 576 | if (r == -ENOENT) |
576 | amd_sched_process_job(fence, &s_fence->cb); | 577 | amd_sched_process_job(fence, &s_fence->cb); |
577 | else if (r) | 578 | else if (r) |
578 | DRM_ERROR("fence add callback failed (%d)\n", | 579 | DRM_ERROR("fence add callback failed (%d)\n", |
579 | r); | 580 | r); |
580 | fence_put(fence); | 581 | dma_fence_put(fence); |
581 | } else { | 582 | } else { |
582 | DRM_ERROR("Failed to run job!\n"); | 583 | DRM_ERROR("Failed to run job!\n"); |
583 | amd_sched_process_job(NULL, &s_fence->cb); | 584 | amd_sched_process_job(NULL, &s_fence->cb); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..876aa43b57df 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define _GPU_SCHEDULER_H_ | 25 | #define _GPU_SCHEDULER_H_ |
26 | 26 | ||
27 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | 29 | ||
30 | struct amd_gpu_scheduler; | 30 | struct amd_gpu_scheduler; |
31 | struct amd_sched_rq; | 31 | struct amd_sched_rq; |
@@ -50,8 +50,8 @@ struct amd_sched_entity { | |||
50 | atomic_t fence_seq; | 50 | atomic_t fence_seq; |
51 | uint64_t fence_context; | 51 | uint64_t fence_context; |
52 | 52 | ||
53 | struct fence *dependency; | 53 | struct dma_fence *dependency; |
54 | struct fence_cb cb; | 54 | struct dma_fence_cb cb; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | /** | 57 | /** |
@@ -66,10 +66,10 @@ struct amd_sched_rq { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct amd_sched_fence { | 68 | struct amd_sched_fence { |
69 | struct fence scheduled; | 69 | struct dma_fence scheduled; |
70 | struct fence finished; | 70 | struct dma_fence finished; |
71 | struct fence_cb cb; | 71 | struct dma_fence_cb cb; |
72 | struct fence *parent; | 72 | struct dma_fence *parent; |
73 | struct amd_gpu_scheduler *sched; | 73 | struct amd_gpu_scheduler *sched; |
74 | spinlock_t lock; | 74 | spinlock_t lock; |
75 | void *owner; | 75 | void *owner; |
@@ -79,15 +79,15 @@ struct amd_sched_job { | |||
79 | struct amd_gpu_scheduler *sched; | 79 | struct amd_gpu_scheduler *sched; |
80 | struct amd_sched_entity *s_entity; | 80 | struct amd_sched_entity *s_entity; |
81 | struct amd_sched_fence *s_fence; | 81 | struct amd_sched_fence *s_fence; |
82 | struct fence_cb finish_cb; | 82 | struct dma_fence_cb finish_cb; |
83 | struct work_struct finish_work; | 83 | struct work_struct finish_work; |
84 | struct list_head node; | 84 | struct list_head node; |
85 | struct delayed_work work_tdr; | 85 | struct delayed_work work_tdr; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | extern const struct fence_ops amd_sched_fence_ops_scheduled; | 88 | extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; |
89 | extern const struct fence_ops amd_sched_fence_ops_finished; | 89 | extern const struct dma_fence_ops amd_sched_fence_ops_finished; |
90 | static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) | 90 | static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) |
91 | { | 91 | { |
92 | if (f->ops == &amd_sched_fence_ops_scheduled) | 92 | if (f->ops == &amd_sched_fence_ops_scheduled) |
93 | return container_of(f, struct amd_sched_fence, scheduled); | 93 | return container_of(f, struct amd_sched_fence, scheduled); |
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) | |||
103 | * these functions should be implemented in driver side | 103 | * these functions should be implemented in driver side |
104 | */ | 104 | */ |
105 | struct amd_sched_backend_ops { | 105 | struct amd_sched_backend_ops { |
106 | struct fence *(*dependency)(struct amd_sched_job *sched_job); | 106 | struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); |
107 | struct fence *(*run_job)(struct amd_sched_job *sched_job); | 107 | struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); |
108 | void (*timedout_job)(struct amd_sched_job *sched_job); | 108 | void (*timedout_job)(struct amd_sched_job *sched_job); |
109 | void (*free_job)(struct amd_sched_job *sched_job); | 109 | void (*free_job)(struct amd_sched_job *sched_job); |
110 | }; | 110 | }; |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..c26fa298fe9e 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, | |||
42 | spin_lock_init(&fence->lock); | 42 | spin_lock_init(&fence->lock); |
43 | 43 | ||
44 | seq = atomic_inc_return(&entity->fence_seq); | 44 | seq = atomic_inc_return(&entity->fence_seq); |
45 | fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, | 45 | dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, |
46 | &fence->lock, entity->fence_context, seq); | 46 | &fence->lock, entity->fence_context, seq); |
47 | fence_init(&fence->finished, &amd_sched_fence_ops_finished, | 47 | dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, |
48 | &fence->lock, entity->fence_context + 1, seq); | 48 | &fence->lock, entity->fence_context + 1, seq); |
49 | 49 | ||
50 | return fence; | 50 | return fence; |
51 | } | 51 | } |
52 | 52 | ||
53 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence) | 53 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence) |
54 | { | 54 | { |
55 | int ret = fence_signal(&fence->scheduled); | 55 | int ret = dma_fence_signal(&fence->scheduled); |
56 | 56 | ||
57 | if (!ret) | 57 | if (!ret) |
58 | FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); | 58 | DMA_FENCE_TRACE(&fence->scheduled, |
59 | "signaled from irq context\n"); | ||
59 | else | 60 | else |
60 | FENCE_TRACE(&fence->scheduled, "was already signaled\n"); | 61 | DMA_FENCE_TRACE(&fence->scheduled, |
62 | "was already signaled\n"); | ||
61 | } | 63 | } |
62 | 64 | ||
63 | void amd_sched_fence_finished(struct amd_sched_fence *fence) | 65 | void amd_sched_fence_finished(struct amd_sched_fence *fence) |
64 | { | 66 | { |
65 | int ret = fence_signal(&fence->finished); | 67 | int ret = dma_fence_signal(&fence->finished); |
66 | 68 | ||
67 | if (!ret) | 69 | if (!ret) |
68 | FENCE_TRACE(&fence->finished, "signaled from irq context\n"); | 70 | DMA_FENCE_TRACE(&fence->finished, |
71 | "signaled from irq context\n"); | ||
69 | else | 72 | else |
70 | FENCE_TRACE(&fence->finished, "was already signaled\n"); | 73 | DMA_FENCE_TRACE(&fence->finished, |
74 | "was already signaled\n"); | ||
71 | } | 75 | } |
72 | 76 | ||
73 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) | 77 | static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) |
74 | { | 78 | { |
75 | return "amd_sched"; | 79 | return "amd_sched"; |
76 | } | 80 | } |
77 | 81 | ||
78 | static const char *amd_sched_fence_get_timeline_name(struct fence *f) | 82 | static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) |
79 | { | 83 | { |
80 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 84 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
81 | return (const char *)fence->sched->name; | 85 | return (const char *)fence->sched->name; |
82 | } | 86 | } |
83 | 87 | ||
84 | static bool amd_sched_fence_enable_signaling(struct fence *f) | 88 | static bool amd_sched_fence_enable_signaling(struct dma_fence *f) |
85 | { | 89 | { |
86 | return true; | 90 | return true; |
87 | } | 91 | } |
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) | |||
95 | */ | 99 | */ |
96 | static void amd_sched_fence_free(struct rcu_head *rcu) | 100 | static void amd_sched_fence_free(struct rcu_head *rcu) |
97 | { | 101 | { |
98 | struct fence *f = container_of(rcu, struct fence, rcu); | 102 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
99 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 103 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
100 | 104 | ||
101 | fence_put(fence->parent); | 105 | dma_fence_put(fence->parent); |
102 | kmem_cache_free(sched_fence_slab, fence); | 106 | kmem_cache_free(sched_fence_slab, fence); |
103 | } | 107 | } |
104 | 108 | ||
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) | |||
110 | * This function is called when the reference count becomes zero. | 114 | * This function is called when the reference count becomes zero. |
111 | * It just RCU schedules freeing up the fence. | 115 | * It just RCU schedules freeing up the fence. |
112 | */ | 116 | */ |
113 | static void amd_sched_fence_release_scheduled(struct fence *f) | 117 | static void amd_sched_fence_release_scheduled(struct dma_fence *f) |
114 | { | 118 | { |
115 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 119 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
116 | 120 | ||
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f) | |||
124 | * | 128 | * |
125 | * Drop the extra reference from the scheduled fence to the base fence. | 129 | * Drop the extra reference from the scheduled fence to the base fence. |
126 | */ | 130 | */ |
127 | static void amd_sched_fence_release_finished(struct fence *f) | 131 | static void amd_sched_fence_release_finished(struct dma_fence *f) |
128 | { | 132 | { |
129 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 133 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
130 | 134 | ||
131 | fence_put(&fence->scheduled); | 135 | dma_fence_put(&fence->scheduled); |
132 | } | 136 | } |
133 | 137 | ||
134 | const struct fence_ops amd_sched_fence_ops_scheduled = { | 138 | const struct dma_fence_ops amd_sched_fence_ops_scheduled = { |
135 | .get_driver_name = amd_sched_fence_get_driver_name, | 139 | .get_driver_name = amd_sched_fence_get_driver_name, |
136 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 140 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
137 | .enable_signaling = amd_sched_fence_enable_signaling, | 141 | .enable_signaling = amd_sched_fence_enable_signaling, |
138 | .signaled = NULL, | 142 | .signaled = NULL, |
139 | .wait = fence_default_wait, | 143 | .wait = dma_fence_default_wait, |
140 | .release = amd_sched_fence_release_scheduled, | 144 | .release = amd_sched_fence_release_scheduled, |
141 | }; | 145 | }; |
142 | 146 | ||
143 | const struct fence_ops amd_sched_fence_ops_finished = { | 147 | const struct dma_fence_ops amd_sched_fence_ops_finished = { |
144 | .get_driver_name = amd_sched_fence_get_driver_name, | 148 | .get_driver_name = amd_sched_fence_get_driver_name, |
145 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 149 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
146 | .enable_signaling = amd_sched_fence_enable_signaling, | 150 | .enable_signaling = amd_sched_fence_enable_signaling, |
147 | .signaled = NULL, | 151 | .signaled = NULL, |
148 | .wait = fence_default_wait, | 152 | .wait = dma_fence_default_wait, |
149 | .release = amd_sched_fence_release_finished, | 153 | .release = amd_sched_fence_release_finished, |
150 | }; | 154 | }; |
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index fb6a418ce6be..6477d1a65266 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c | |||
@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev) | |||
453 | return -EAGAIN; | 453 | return -EAGAIN; |
454 | } | 454 | } |
455 | 455 | ||
456 | component_match_add(&pdev->dev, &match, compare_dev, port); | 456 | drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); |
457 | of_node_put(port); | ||
457 | 458 | ||
458 | return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, | 459 | return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, |
459 | match); | 460 | match); |
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 9280358b8f15..9f4739452a25 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c | |||
@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev) | |||
493 | return -EAGAIN; | 493 | return -EAGAIN; |
494 | } | 494 | } |
495 | 495 | ||
496 | component_match_add(&pdev->dev, &match, malidp_compare_dev, port); | 496 | drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, |
497 | port); | ||
498 | of_node_put(port); | ||
497 | return component_master_add_with_match(&pdev->dev, &malidp_master_ops, | 499 | return component_master_add_with_match(&pdev->dev, &malidp_master_ops, |
498 | match); | 500 | match); |
499 | } | 501 | } |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f608e4..94e46da9a758 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev, | |||
254 | continue; | 254 | continue; |
255 | } | 255 | } |
256 | 256 | ||
257 | component_match_add(dev, match, compare_of, remote); | 257 | drm_of_component_match_add(dev, match, compare_of, remote); |
258 | of_node_put(remote); | 258 | of_node_put(remote); |
259 | } | 259 | } |
260 | } | 260 | } |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 608df4c90520..7134fdf49210 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = { | |||
230 | .ttm_tt_populate = ast_ttm_tt_populate, | 230 | .ttm_tt_populate = ast_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, |
232 | .init_mem_type = ast_bo_init_mem_type, | 232 | .init_mem_type = ast_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = ast_bo_evict_flags, | 234 | .evict_flags = ast_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = ast_bo_verify_access, | 236 | .verify_access = ast_bo_verify_access, |
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 269cfca9ca06..099a3c688c26 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = { | |||
199 | .ttm_tt_populate = ttm_pool_populate, | 199 | .ttm_tt_populate = ttm_pool_populate, |
200 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | 200 | .ttm_tt_unpopulate = ttm_pool_unpopulate, |
201 | .init_mem_type = bochs_bo_init_mem_type, | 201 | .init_mem_type = bochs_bo_init_mem_type, |
202 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
202 | .evict_flags = bochs_bo_evict_flags, | 203 | .evict_flags = bochs_bo_evict_flags, |
203 | .move = NULL, | 204 | .move = NULL, |
204 | .verify_access = bochs_bo_verify_access, | 205 | .verify_access = bochs_bo_verify_access, |
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 10e12e74fc9f..bd6acc829f97 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig | |||
@@ -57,6 +57,13 @@ config DRM_PARADE_PS8622 | |||
57 | ---help--- | 57 | ---help--- |
58 | Parade eDP-LVDS bridge chip driver. | 58 | Parade eDP-LVDS bridge chip driver. |
59 | 59 | ||
60 | config DRM_SIL_SII8620 | ||
61 | tristate "Silicon Image SII8620 HDMI/MHL bridge" | ||
62 | depends on OF | ||
63 | select DRM_KMS_HELPER | ||
64 | help | ||
65 | Silicon Image SII8620 HDMI/MHL bridge chip driver. | ||
66 | |||
60 | config DRM_SII902X | 67 | config DRM_SII902X |
61 | tristate "Silicon Image sii902x RGB/HDMI bridge" | 68 | tristate "Silicon Image sii902x RGB/HDMI bridge" |
62 | depends on OF | 69 | depends on OF |
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index cdf3a3cf765d..97ed1a5fea9a 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o | |||
6 | obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o | 6 | obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o |
7 | obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o | 7 | obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o |
8 | obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o | 8 | obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o |
9 | obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o | ||
9 | obj-$(CONFIG_DRM_SII902X) += sii902x.o | 10 | obj-$(CONFIG_DRM_SII902X) += sii902x.o |
10 | obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o | 11 | obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o |
11 | obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ | 12 | obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ |
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c new file mode 100644 index 000000000000..b2c267df7ee7 --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.c | |||
@@ -0,0 +1,1564 @@ | |||
1 | /* | ||
2 | * Silicon Image SiI8620 HDMI/MHL bridge driver | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <drm/bridge/mhl.h> | ||
13 | #include <drm/drm_crtc.h> | ||
14 | #include <drm/drm_edid.h> | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/gpio/consumer.h> | ||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/irq.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/mutex.h> | ||
26 | #include <linux/regulator/consumer.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "sil-sii8620.h" | ||
30 | |||
31 | #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) | ||
32 | |||
33 | enum sii8620_mode { | ||
34 | CM_DISCONNECTED, | ||
35 | CM_DISCOVERY, | ||
36 | CM_MHL1, | ||
37 | CM_MHL3, | ||
38 | CM_ECBUS_S | ||
39 | }; | ||
40 | |||
41 | enum sii8620_sink_type { | ||
42 | SINK_NONE, | ||
43 | SINK_HDMI, | ||
44 | SINK_DVI | ||
45 | }; | ||
46 | |||
47 | enum sii8620_mt_state { | ||
48 | MT_STATE_READY, | ||
49 | MT_STATE_BUSY, | ||
50 | MT_STATE_DONE | ||
51 | }; | ||
52 | |||
53 | struct sii8620 { | ||
54 | struct drm_bridge bridge; | ||
55 | struct device *dev; | ||
56 | struct clk *clk_xtal; | ||
57 | struct gpio_desc *gpio_reset; | ||
58 | struct gpio_desc *gpio_int; | ||
59 | struct regulator_bulk_data supplies[2]; | ||
60 | struct mutex lock; /* context lock, protects fields below */ | ||
61 | int error; | ||
62 | enum sii8620_mode mode; | ||
63 | enum sii8620_sink_type sink_type; | ||
64 | u8 cbus_status; | ||
65 | u8 stat[MHL_DST_SIZE]; | ||
66 | u8 xstat[MHL_XDS_SIZE]; | ||
67 | u8 devcap[MHL_DCAP_SIZE]; | ||
68 | u8 xdevcap[MHL_XDC_SIZE]; | ||
69 | u8 avif[19]; | ||
70 | struct edid *edid; | ||
71 | unsigned int gen2_write_burst:1; | ||
72 | enum sii8620_mt_state mt_state; | ||
73 | struct list_head mt_queue; | ||
74 | }; | ||
75 | |||
76 | struct sii8620_mt_msg; | ||
77 | |||
78 | typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx, | ||
79 | struct sii8620_mt_msg *msg); | ||
80 | |||
81 | struct sii8620_mt_msg { | ||
82 | struct list_head node; | ||
83 | u8 reg[4]; | ||
84 | u8 ret; | ||
85 | sii8620_mt_msg_cb send; | ||
86 | sii8620_mt_msg_cb recv; | ||
87 | }; | ||
88 | |||
89 | static const u8 sii8620_i2c_page[] = { | ||
90 | 0x39, /* Main System */ | ||
91 | 0x3d, /* TDM and HSIC */ | ||
92 | 0x49, /* TMDS Receiver, MHL EDID */ | ||
93 | 0x4d, /* eMSC, HDCP, HSIC */ | ||
94 | 0x5d, /* MHL Spec */ | ||
95 | 0x64, /* MHL CBUS */ | ||
96 | 0x59, /* Hardware TPI (Transmitter Programming Interface) */ | ||
97 | 0x61, /* eCBUS-S, eCBUS-D */ | ||
98 | }; | ||
99 | |||
100 | static void sii8620_fetch_edid(struct sii8620 *ctx); | ||
101 | static void sii8620_set_upstream_edid(struct sii8620 *ctx); | ||
102 | static void sii8620_enable_hpd(struct sii8620 *ctx); | ||
103 | static void sii8620_mhl_disconnected(struct sii8620 *ctx); | ||
104 | |||
105 | static int sii8620_clear_error(struct sii8620 *ctx) | ||
106 | { | ||
107 | int ret = ctx->error; | ||
108 | |||
109 | ctx->error = 0; | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) | ||
114 | { | ||
115 | struct device *dev = ctx->dev; | ||
116 | struct i2c_client *client = to_i2c_client(dev); | ||
117 | u8 data = addr; | ||
118 | struct i2c_msg msg[] = { | ||
119 | { | ||
120 | .addr = sii8620_i2c_page[addr >> 8], | ||
121 | .flags = client->flags, | ||
122 | .len = 1, | ||
123 | .buf = &data | ||
124 | }, | ||
125 | { | ||
126 | .addr = sii8620_i2c_page[addr >> 8], | ||
127 | .flags = client->flags | I2C_M_RD, | ||
128 | .len = len, | ||
129 | .buf = buf | ||
130 | }, | ||
131 | }; | ||
132 | int ret; | ||
133 | |||
134 | if (ctx->error) | ||
135 | return; | ||
136 | |||
137 | ret = i2c_transfer(client->adapter, msg, 2); | ||
138 | dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret); | ||
139 | |||
140 | if (ret != 2) { | ||
141 | dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n", | ||
142 | addr, len, ret); | ||
143 | ctx->error = ret < 0 ? ret : -EIO; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) | ||
148 | { | ||
149 | u8 ret; | ||
150 | |||
151 | sii8620_read_buf(ctx, addr, &ret, 1); | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf, | ||
156 | int len) | ||
157 | { | ||
158 | struct device *dev = ctx->dev; | ||
159 | struct i2c_client *client = to_i2c_client(dev); | ||
160 | u8 data[2]; | ||
161 | struct i2c_msg msg = { | ||
162 | .addr = sii8620_i2c_page[addr >> 8], | ||
163 | .flags = client->flags, | ||
164 | .len = len + 1, | ||
165 | }; | ||
166 | int ret; | ||
167 | |||
168 | if (ctx->error) | ||
169 | return; | ||
170 | |||
171 | if (len > 1) { | ||
172 | msg.buf = kmalloc(len + 1, GFP_KERNEL); | ||
173 | if (!msg.buf) { | ||
174 | ctx->error = -ENOMEM; | ||
175 | return; | ||
176 | } | ||
177 | memcpy(msg.buf + 1, buf, len); | ||
178 | } else { | ||
179 | msg.buf = data; | ||
180 | msg.buf[1] = *buf; | ||
181 | } | ||
182 | |||
183 | msg.buf[0] = addr; | ||
184 | |||
185 | ret = i2c_transfer(client->adapter, &msg, 1); | ||
186 | dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret); | ||
187 | |||
188 | if (ret != 1) { | ||
189 | dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n", | ||
190 | addr, len, buf, ret); | ||
191 | ctx->error = ret ?: -EIO; | ||
192 | } | ||
193 | |||
194 | if (len > 1) | ||
195 | kfree(msg.buf); | ||
196 | } | ||
197 | |||
198 | #define sii8620_write(ctx, addr, arr...) \ | ||
199 | ({\ | ||
200 | u8 d[] = { arr }; \ | ||
201 | sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \ | ||
202 | }) | ||
203 | |||
204 | static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len) | ||
205 | { | ||
206 | int i; | ||
207 | |||
208 | for (i = 0; i < len; i += 2) | ||
209 | sii8620_write(ctx, seq[i], seq[i + 1]); | ||
210 | } | ||
211 | |||
212 | #define sii8620_write_seq(ctx, seq...) \ | ||
213 | ({\ | ||
214 | const u16 d[] = { seq }; \ | ||
215 | __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ | ||
216 | }) | ||
217 | |||
218 | #define sii8620_write_seq_static(ctx, seq...) \ | ||
219 | ({\ | ||
220 | static const u16 d[] = { seq }; \ | ||
221 | __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ | ||
222 | }) | ||
223 | |||
224 | static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val) | ||
225 | { | ||
226 | val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask); | ||
227 | sii8620_write(ctx, addr, val); | ||
228 | } | ||
229 | |||
230 | static void sii8620_mt_cleanup(struct sii8620 *ctx) | ||
231 | { | ||
232 | struct sii8620_mt_msg *msg, *n; | ||
233 | |||
234 | list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { | ||
235 | list_del(&msg->node); | ||
236 | kfree(msg); | ||
237 | } | ||
238 | ctx->mt_state = MT_STATE_READY; | ||
239 | } | ||
240 | |||
241 | static void sii8620_mt_work(struct sii8620 *ctx) | ||
242 | { | ||
243 | struct sii8620_mt_msg *msg; | ||
244 | |||
245 | if (ctx->error) | ||
246 | return; | ||
247 | if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue)) | ||
248 | return; | ||
249 | |||
250 | if (ctx->mt_state == MT_STATE_DONE) { | ||
251 | ctx->mt_state = MT_STATE_READY; | ||
252 | msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, | ||
253 | node); | ||
254 | if (msg->recv) | ||
255 | msg->recv(ctx, msg); | ||
256 | list_del(&msg->node); | ||
257 | kfree(msg); | ||
258 | } | ||
259 | |||
260 | if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue)) | ||
261 | return; | ||
262 | |||
263 | ctx->mt_state = MT_STATE_BUSY; | ||
264 | msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); | ||
265 | if (msg->send) | ||
266 | msg->send(ctx, msg); | ||
267 | } | ||
268 | |||
269 | static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx, | ||
270 | struct sii8620_mt_msg *msg) | ||
271 | { | ||
272 | switch (msg->reg[0]) { | ||
273 | case MHL_WRITE_STAT: | ||
274 | case MHL_SET_INT: | ||
275 | sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2); | ||
276 | sii8620_write(ctx, REG_MSC_COMMAND_START, | ||
277 | BIT_MSC_COMMAND_START_WRITE_STAT); | ||
278 | break; | ||
279 | case MHL_MSC_MSG: | ||
280 | sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3); | ||
281 | sii8620_write(ctx, REG_MSC_COMMAND_START, | ||
282 | BIT_MSC_COMMAND_START_MSC_MSG); | ||
283 | break; | ||
284 | default: | ||
285 | dev_err(ctx->dev, "%s: command %#x not supported\n", __func__, | ||
286 | msg->reg[0]); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx) | ||
291 | { | ||
292 | struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); | ||
293 | |||
294 | if (!msg) | ||
295 | ctx->error = -ENOMEM; | ||
296 | else | ||
297 | list_add_tail(&msg->node, &ctx->mt_queue); | ||
298 | |||
299 | return msg; | ||
300 | } | ||
301 | |||
302 | static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2) | ||
303 | { | ||
304 | struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); | ||
305 | |||
306 | if (!msg) | ||
307 | return; | ||
308 | |||
309 | msg->reg[0] = cmd; | ||
310 | msg->reg[1] = arg1; | ||
311 | msg->reg[2] = arg2; | ||
312 | msg->send = sii8620_mt_msc_cmd_send; | ||
313 | } | ||
314 | |||
315 | static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val) | ||
316 | { | ||
317 | sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val); | ||
318 | } | ||
319 | |||
320 | static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask) | ||
321 | { | ||
322 | sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask); | ||
323 | } | ||
324 | |||
325 | static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data) | ||
326 | { | ||
327 | sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data); | ||
328 | } | ||
329 | |||
330 | static void sii8620_mt_rap(struct sii8620 *ctx, u8 code) | ||
331 | { | ||
332 | sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); | ||
333 | } | ||
334 | |||
335 | static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, | ||
336 | struct sii8620_mt_msg *msg) | ||
337 | { | ||
338 | u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | ||
339 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
340 | | BIT_EDID_CTRL_EDID_MODE_EN; | ||
341 | |||
342 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
343 | ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; | ||
344 | |||
345 | sii8620_write_seq(ctx, | ||
346 | REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE, | ||
347 | REG_EDID_CTRL, ctrl, | ||
348 | REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START | ||
349 | ); | ||
350 | } | ||
351 | |||
352 | /* copy src to dst and set changed bits in src */ | ||
353 | static void sii8620_update_array(u8 *dst, u8 *src, int count) | ||
354 | { | ||
355 | while (--count >= 0) { | ||
356 | *src ^= *dst; | ||
357 | *dst++ ^= *src++; | ||
358 | } | ||
359 | } | ||
360 | |||
361 | static void sii8620_mr_devcap(struct sii8620 *ctx) | ||
362 | { | ||
363 | static const char * const sink_str[] = { | ||
364 | [SINK_NONE] = "NONE", | ||
365 | [SINK_HDMI] = "HDMI", | ||
366 | [SINK_DVI] = "DVI" | ||
367 | }; | ||
368 | |||
369 | u8 dcap[MHL_DCAP_SIZE]; | ||
370 | char sink_name[20]; | ||
371 | struct device *dev = ctx->dev; | ||
372 | |||
373 | sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE); | ||
374 | if (ctx->error < 0) | ||
375 | return; | ||
376 | |||
377 | dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap); | ||
378 | dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n", | ||
379 | dcap[MHL_DCAP_MHL_VERSION] / 16, | ||
380 | dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H], | ||
381 | dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], | ||
382 | dcap[MHL_DCAP_DEVICE_ID_L]); | ||
383 | sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); | ||
384 | |||
385 | if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK)) | ||
386 | return; | ||
387 | |||
388 | sii8620_fetch_edid(ctx); | ||
389 | if (!ctx->edid) { | ||
390 | dev_err(ctx->dev, "Cannot fetch EDID\n"); | ||
391 | sii8620_mhl_disconnected(ctx); | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | if (drm_detect_hdmi_monitor(ctx->edid)) | ||
396 | ctx->sink_type = SINK_HDMI; | ||
397 | else | ||
398 | ctx->sink_type = SINK_DVI; | ||
399 | |||
400 | drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name)); | ||
401 | |||
402 | dev_info(dev, "detected sink(type: %s): %s\n", | ||
403 | sink_str[ctx->sink_type], sink_name); | ||
404 | sii8620_set_upstream_edid(ctx); | ||
405 | sii8620_enable_hpd(ctx); | ||
406 | } | ||
407 | |||
408 | static void sii8620_mr_xdevcap(struct sii8620 *ctx) | ||
409 | { | ||
410 | sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap, | ||
411 | MHL_XDC_SIZE); | ||
412 | |||
413 | sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE), | ||
414 | MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT); | ||
415 | sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP); | ||
416 | } | ||
417 | |||
418 | static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx, | ||
419 | struct sii8620_mt_msg *msg) | ||
420 | { | ||
421 | u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | ||
422 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
423 | | BIT_EDID_CTRL_EDID_MODE_EN; | ||
424 | |||
425 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
426 | ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; | ||
427 | |||
428 | sii8620_write_seq(ctx, | ||
429 | REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE | ||
430 | | BIT_INTR9_EDID_ERROR, | ||
431 | REG_EDID_CTRL, ctrl, | ||
432 | REG_EDID_FIFO_ADDR, 0 | ||
433 | ); | ||
434 | |||
435 | if (msg->reg[0] == MHL_READ_XDEVCAP) | ||
436 | sii8620_mr_xdevcap(ctx); | ||
437 | else | ||
438 | sii8620_mr_devcap(ctx); | ||
439 | } | ||
440 | |||
441 | static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) | ||
442 | { | ||
443 | struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); | ||
444 | |||
445 | if (!msg) | ||
446 | return; | ||
447 | |||
448 | msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP; | ||
449 | msg->send = sii8620_mt_read_devcap_send; | ||
450 | msg->recv = sii8620_mt_read_devcap_recv; | ||
451 | } | ||
452 | |||
453 | static void sii8620_fetch_edid(struct sii8620 *ctx) | ||
454 | { | ||
455 | u8 lm_ddc, ddc_cmd, int3, cbus; | ||
456 | int fetched, i; | ||
457 | int edid_len = EDID_LENGTH; | ||
458 | u8 *edid; | ||
459 | |||
460 | sii8620_readb(ctx, REG_CBUS_STATUS); | ||
461 | lm_ddc = sii8620_readb(ctx, REG_LM_DDC); | ||
462 | ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD); | ||
463 | |||
464 | sii8620_write_seq(ctx, | ||
465 | REG_INTR9_MASK, 0, | ||
466 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, | ||
467 | REG_HDCP2X_POLL_CS, 0x71, | ||
468 | REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX, | ||
469 | REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED, | ||
470 | ); | ||
471 | |||
472 | for (i = 0; i < 256; ++i) { | ||
473 | u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS); | ||
474 | |||
475 | if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG)) | ||
476 | break; | ||
477 | sii8620_write(ctx, REG_DDC_STATUS, | ||
478 | BIT_DDC_STATUS_DDC_FIFO_EMPTY); | ||
479 | } | ||
480 | |||
481 | sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1); | ||
482 | |||
483 | edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
484 | if (!edid) { | ||
485 | ctx->error = -ENOMEM; | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | #define FETCH_SIZE 16 | ||
490 | for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) { | ||
491 | sii8620_readb(ctx, REG_DDC_STATUS); | ||
492 | sii8620_write_seq(ctx, | ||
493 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT, | ||
494 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO, | ||
495 | REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY | ||
496 | ); | ||
497 | sii8620_write_seq(ctx, | ||
498 | REG_DDC_SEGM, fetched >> 8, | ||
499 | REG_DDC_OFFSET, fetched & 0xff, | ||
500 | REG_DDC_DIN_CNT1, FETCH_SIZE, | ||
501 | REG_DDC_DIN_CNT2, 0, | ||
502 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK | ||
503 | ); | ||
504 | |||
505 | do { | ||
506 | int3 = sii8620_readb(ctx, REG_INTR3); | ||
507 | cbus = sii8620_readb(ctx, REG_CBUS_STATUS); | ||
508 | |||
509 | if (int3 & BIT_DDC_CMD_DONE) | ||
510 | break; | ||
511 | |||
512 | if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { | ||
513 | kfree(edid); | ||
514 | edid = NULL; | ||
515 | goto end; | ||
516 | } | ||
517 | } while (1); | ||
518 | |||
519 | sii8620_readb(ctx, REG_DDC_STATUS); | ||
520 | while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) | ||
521 | usleep_range(10, 20); | ||
522 | |||
523 | sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); | ||
524 | if (fetched + FETCH_SIZE == EDID_LENGTH) { | ||
525 | u8 ext = ((struct edid *)edid)->extensions; | ||
526 | |||
527 | if (ext) { | ||
528 | u8 *new_edid; | ||
529 | |||
530 | edid_len += ext * EDID_LENGTH; | ||
531 | new_edid = krealloc(edid, edid_len, GFP_KERNEL); | ||
532 | if (!new_edid) { | ||
533 | kfree(edid); | ||
534 | ctx->error = -ENOMEM; | ||
535 | return; | ||
536 | } | ||
537 | edid = new_edid; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | if (fetched + FETCH_SIZE == edid_len) | ||
542 | sii8620_write(ctx, REG_INTR3, int3); | ||
543 | } | ||
544 | |||
545 | sii8620_write(ctx, REG_LM_DDC, lm_ddc); | ||
546 | |||
547 | end: | ||
548 | kfree(ctx->edid); | ||
549 | ctx->edid = (struct edid *)edid; | ||
550 | } | ||
551 | |||
552 | static void sii8620_set_upstream_edid(struct sii8620 *ctx) | ||
553 | { | ||
554 | sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N | ||
555 | | BIT_DPD_PD_MHL_CLK_N, 0xff); | ||
556 | |||
557 | sii8620_write_seq_static(ctx, | ||
558 | REG_RX_HDMI_CTRL3, 0x00, | ||
559 | REG_PKT_FILTER_0, 0xFF, | ||
560 | REG_PKT_FILTER_1, 0xFF, | ||
561 | REG_ALICE0_BW_I2C, 0x06 | ||
562 | ); | ||
563 | |||
564 | sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER, | ||
565 | BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff); | ||
566 | |||
567 | sii8620_write_seq_static(ctx, | ||
568 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
569 | | BIT_EDID_CTRL_EDID_MODE_EN, | ||
570 | REG_EDID_FIFO_ADDR, 0, | ||
571 | ); | ||
572 | |||
573 | sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid, | ||
574 | (ctx->edid->extensions + 1) * EDID_LENGTH); | ||
575 | |||
576 | sii8620_write_seq_static(ctx, | ||
577 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID | ||
578 | | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | ||
579 | | BIT_EDID_CTRL_EDID_MODE_EN, | ||
580 | REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE, | ||
581 | REG_INTR9_MASK, 0 | ||
582 | ); | ||
583 | } | ||
584 | |||
585 | static void sii8620_xtal_set_rate(struct sii8620 *ctx) | ||
586 | { | ||
587 | static const struct { | ||
588 | unsigned int rate; | ||
589 | u8 div; | ||
590 | u8 tp1; | ||
591 | } rates[] = { | ||
592 | { 19200, 0x04, 0x53 }, | ||
593 | { 20000, 0x04, 0x62 }, | ||
594 | { 24000, 0x05, 0x75 }, | ||
595 | { 30000, 0x06, 0x92 }, | ||
596 | { 38400, 0x0c, 0xbc }, | ||
597 | }; | ||
598 | unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000; | ||
599 | int i; | ||
600 | |||
601 | for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i) | ||
602 | if (rate <= rates[i].rate) | ||
603 | break; | ||
604 | |||
605 | if (rate != rates[i].rate) | ||
606 | dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n", | ||
607 | rate, rates[i].rate); | ||
608 | |||
609 | sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div); | ||
610 | sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1); | ||
611 | } | ||
612 | |||
613 | static int sii8620_hw_on(struct sii8620 *ctx) | ||
614 | { | ||
615 | int ret; | ||
616 | |||
617 | ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | ||
618 | if (ret) | ||
619 | return ret; | ||
620 | usleep_range(10000, 20000); | ||
621 | return clk_prepare_enable(ctx->clk_xtal); | ||
622 | } | ||
623 | |||
624 | static int sii8620_hw_off(struct sii8620 *ctx) | ||
625 | { | ||
626 | clk_disable_unprepare(ctx->clk_xtal); | ||
627 | gpiod_set_value(ctx->gpio_reset, 1); | ||
628 | return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | ||
629 | } | ||
630 | |||
631 | static void sii8620_hw_reset(struct sii8620 *ctx) | ||
632 | { | ||
633 | usleep_range(10000, 20000); | ||
634 | gpiod_set_value(ctx->gpio_reset, 0); | ||
635 | usleep_range(5000, 20000); | ||
636 | gpiod_set_value(ctx->gpio_reset, 1); | ||
637 | usleep_range(10000, 20000); | ||
638 | gpiod_set_value(ctx->gpio_reset, 0); | ||
639 | msleep(300); | ||
640 | } | ||
641 | |||
642 | static void sii8620_cbus_reset(struct sii8620 *ctx) | ||
643 | { | ||
644 | sii8620_write_seq_static(ctx, | ||
645 | REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST | ||
646 | | BIT_PWD_SRST_CBUS_RST_SW_EN, | ||
647 | REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN | ||
648 | ); | ||
649 | } | ||
650 | |||
651 | static void sii8620_set_auto_zone(struct sii8620 *ctx) | ||
652 | { | ||
653 | if (ctx->mode != CM_MHL1) { | ||
654 | sii8620_write_seq_static(ctx, | ||
655 | REG_TX_ZONE_CTL1, 0x0, | ||
656 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
657 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
658 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE | ||
659 | ); | ||
660 | } else { | ||
661 | sii8620_write_seq_static(ctx, | ||
662 | REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE, | ||
663 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
664 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE | ||
665 | ); | ||
666 | } | ||
667 | } | ||
668 | |||
669 | static void sii8620_stop_video(struct sii8620 *ctx) | ||
670 | { | ||
671 | u8 uninitialized_var(val); | ||
672 | |||
673 | sii8620_write_seq_static(ctx, | ||
674 | REG_TPI_INTR_EN, 0, | ||
675 | REG_HDCP2X_INTR0_MASK, 0, | ||
676 | REG_TPI_COPP_DATA2, 0, | ||
677 | REG_TPI_INTR_ST0, ~0, | ||
678 | ); | ||
679 | |||
680 | switch (ctx->sink_type) { | ||
681 | case SINK_DVI: | ||
682 | val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | ||
683 | | BIT_TPI_SC_TPI_AV_MUTE; | ||
684 | break; | ||
685 | case SINK_HDMI: | ||
686 | val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | ||
687 | | BIT_TPI_SC_TPI_AV_MUTE | ||
688 | | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI; | ||
689 | break; | ||
690 | default: | ||
691 | return; | ||
692 | } | ||
693 | |||
694 | sii8620_write(ctx, REG_TPI_SC, val); | ||
695 | } | ||
696 | |||
697 | static void sii8620_start_hdmi(struct sii8620 *ctx) | ||
698 | { | ||
699 | sii8620_write_seq_static(ctx, | ||
700 | REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL | ||
701 | | BIT_RX_HDMI_CTRL2_USE_AV_MUTE, | ||
702 | REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE | ||
703 | | BIT_VID_OVRRD_M1080P_OVRRD, | ||
704 | REG_VID_MODE, 0, | ||
705 | REG_MHL_TOP_CTL, 0x1, | ||
706 | REG_MHLTX_CTL6, 0xa0, | ||
707 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), | ||
708 | REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), | ||
709 | ); | ||
710 | |||
711 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
712 | MHL_DST_LM_CLK_MODE_NORMAL | | ||
713 | MHL_DST_LM_PATH_ENABLED); | ||
714 | |||
715 | sii8620_set_auto_zone(ctx); | ||
716 | |||
717 | sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); | ||
718 | |||
719 | sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif, | ||
720 | ARRAY_SIZE(ctx->avif)); | ||
721 | |||
722 | sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2); | ||
723 | } | ||
724 | |||
725 | static void sii8620_start_video(struct sii8620 *ctx) | ||
726 | { | ||
727 | if (ctx->mode < CM_MHL3) | ||
728 | sii8620_stop_video(ctx); | ||
729 | |||
730 | switch (ctx->sink_type) { | ||
731 | case SINK_HDMI: | ||
732 | sii8620_start_hdmi(ctx); | ||
733 | break; | ||
734 | case SINK_DVI: | ||
735 | default: | ||
736 | break; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | static void sii8620_disable_hpd(struct sii8620 *ctx) | ||
741 | { | ||
742 | sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0); | ||
743 | sii8620_write_seq_static(ctx, | ||
744 | REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN, | ||
745 | REG_INTR8_MASK, 0 | ||
746 | ); | ||
747 | } | ||
748 | |||
749 | static void sii8620_enable_hpd(struct sii8620 *ctx) | ||
750 | { | ||
751 | sii8620_setbits(ctx, REG_TMDS_CSTAT_P3, | ||
752 | BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | ||
753 | | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0); | ||
754 | sii8620_write_seq_static(ctx, | ||
755 | REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN | ||
756 | | BIT_HPD_CTRL_HPD_HIGH, | ||
757 | ); | ||
758 | } | ||
759 | |||
760 | static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx) | ||
761 | { | ||
762 | if (ctx->gen2_write_burst) | ||
763 | return; | ||
764 | |||
765 | sii8620_write_seq_static(ctx, | ||
766 | REG_MDT_RCV_TIMEOUT, 100, | ||
767 | REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN | ||
768 | ); | ||
769 | ctx->gen2_write_burst = 1; | ||
770 | } | ||
771 | |||
772 | static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx) | ||
773 | { | ||
774 | if (!ctx->gen2_write_burst) | ||
775 | return; | ||
776 | |||
777 | sii8620_write_seq_static(ctx, | ||
778 | REG_MDT_XMIT_CTRL, 0, | ||
779 | REG_MDT_RCV_CTRL, 0 | ||
780 | ); | ||
781 | ctx->gen2_write_burst = 0; | ||
782 | } | ||
783 | |||
784 | static void sii8620_start_gen2_write_burst(struct sii8620 *ctx) | ||
785 | { | ||
786 | sii8620_write_seq_static(ctx, | ||
787 | REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT | ||
788 | | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR | ||
789 | | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD | ||
790 | | BIT_MDT_XMIT_SM_ERROR, | ||
791 | REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY | ||
792 | | BIT_MDT_IDLE_AFTER_HAWB_DISABLE | ||
793 | | BIT_MDT_RFIFO_DATA_RDY | ||
794 | ); | ||
795 | sii8620_enable_gen2_write_burst(ctx); | ||
796 | } | ||
797 | |||
798 | static void sii8620_mhl_discover(struct sii8620 *ctx) | ||
799 | { | ||
800 | sii8620_write_seq_static(ctx, | ||
801 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
802 | | BIT_DISC_CTRL9_DISC_PULSE_PROCEED, | ||
803 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K), | ||
804 | REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT | ||
805 | | BIT_MHL_EST_INT | ||
806 | | BIT_NOT_MHL_EST_INT | ||
807 | | BIT_CBUS_MHL3_DISCON_INT | ||
808 | | BIT_CBUS_MHL12_DISCON_INT | ||
809 | | BIT_RGND_READY_INT, | ||
810 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
811 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
812 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, | ||
813 | REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | ||
814 | | BIT_MHL_DP_CTL0_TX_OE_OVR, | ||
815 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
816 | REG_MHL_DP_CTL1, 0xA2, | ||
817 | REG_MHL_DP_CTL2, 0x03, | ||
818 | REG_MHL_DP_CTL3, 0x35, | ||
819 | REG_MHL_DP_CTL5, 0x02, | ||
820 | REG_MHL_DP_CTL6, 0x02, | ||
821 | REG_MHL_DP_CTL7, 0x03, | ||
822 | REG_COC_CTLC, 0xFF, | ||
823 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | ||
824 | | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC, | ||
825 | REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE | ||
826 | | BIT_COC_CALIBRATION_DONE, | ||
827 | REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD | ||
828 | | BIT_CBUS_CMD_ABORT, | ||
829 | REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE | ||
830 | | BIT_CBUS_HPD_CHG | ||
831 | | BIT_CBUS_MSC_MR_WRITE_STAT | ||
832 | | BIT_CBUS_MSC_MR_MSC_MSG | ||
833 | | BIT_CBUS_MSC_MR_WRITE_BURST | ||
834 | | BIT_CBUS_MSC_MR_SET_INT | ||
835 | | BIT_CBUS_MSC_MT_DONE_NACK | ||
836 | ); | ||
837 | } | ||
838 | |||
839 | static void sii8620_peer_specific_init(struct sii8620 *ctx) | ||
840 | { | ||
841 | if (ctx->mode == CM_MHL3) | ||
842 | sii8620_write_seq_static(ctx, | ||
843 | REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD, | ||
844 | REG_EMSCINTRMASK1, | ||
845 | BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR | ||
846 | ); | ||
847 | else | ||
848 | sii8620_write_seq_static(ctx, | ||
849 | REG_HDCP2X_INTR0_MASK, 0x00, | ||
850 | REG_EMSCINTRMASK1, 0x00, | ||
851 | REG_HDCP2X_INTR0, 0xFF, | ||
852 | REG_INTR1, 0xFF, | ||
853 | REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD | ||
854 | | BIT_SYS_CTRL1_TX_CTRL_HDMI | ||
855 | ); | ||
856 | } | ||
857 | |||
858 | #define SII8620_MHL_VERSION 0x32 | ||
859 | #define SII8620_SCRATCHPAD_SIZE 16 | ||
860 | #define SII8620_INT_STAT_SIZE 0x33 | ||
861 | |||
862 | static void sii8620_set_dev_cap(struct sii8620 *ctx) | ||
863 | { | ||
864 | static const u8 devcap[MHL_DCAP_SIZE] = { | ||
865 | [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION, | ||
866 | [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER, | ||
867 | [MHL_DCAP_ADOPTER_ID_H] = 0x01, | ||
868 | [MHL_DCAP_ADOPTER_ID_L] = 0x41, | ||
869 | [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444 | ||
870 | | MHL_DCAP_VID_LINK_PPIXEL | ||
871 | | MHL_DCAP_VID_LINK_16BPP, | ||
872 | [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH, | ||
873 | [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS, | ||
874 | [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI, | ||
875 | [MHL_DCAP_BANDWIDTH] = 0x0f, | ||
876 | [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT | ||
877 | | MHL_DCAP_FEATURE_RAP_SUPPORT | ||
878 | | MHL_DCAP_FEATURE_SP_SUPPORT, | ||
879 | [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE, | ||
880 | [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE, | ||
881 | }; | ||
882 | static const u8 xdcap[MHL_XDC_SIZE] = { | ||
883 | [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075 | ||
884 | | MHL_XDC_ECBUS_S_8BIT, | ||
885 | [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150 | ||
886 | | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600, | ||
887 | [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST, | ||
888 | [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE, | ||
889 | }; | ||
890 | |||
891 | sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap)); | ||
892 | sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap)); | ||
893 | } | ||
894 | |||
895 | static void sii8620_mhl_init(struct sii8620 *ctx) | ||
896 | { | ||
897 | sii8620_write_seq_static(ctx, | ||
898 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
899 | REG_CBUS_MSC_COMPAT_CTRL, | ||
900 | BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN, | ||
901 | ); | ||
902 | |||
903 | sii8620_peer_specific_init(ctx); | ||
904 | |||
905 | sii8620_disable_hpd(ctx); | ||
906 | |||
907 | sii8620_write_seq_static(ctx, | ||
908 | REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, | ||
909 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
910 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
911 | REG_TMDS0_CCTRL1, 0x90, | ||
912 | REG_TMDS_CLK_EN, 0x01, | ||
913 | REG_TMDS_CH_EN, 0x11, | ||
914 | REG_BGR_BIAS, 0x87, | ||
915 | REG_ALICE0_ZONE_CTRL, 0xE8, | ||
916 | REG_ALICE0_MODE_CTRL, 0x04, | ||
917 | ); | ||
918 | sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0); | ||
919 | sii8620_write_seq_static(ctx, | ||
920 | REG_TPI_HW_OPT3, 0x76, | ||
921 | REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE, | ||
922 | REG_TPI_DTD_B2, 79, | ||
923 | ); | ||
924 | sii8620_set_dev_cap(ctx); | ||
925 | sii8620_write_seq_static(ctx, | ||
926 | REG_MDT_XMIT_TIMEOUT, 100, | ||
927 | REG_MDT_XMIT_CTRL, 0x03, | ||
928 | REG_MDT_XFIFO_STAT, 0x00, | ||
929 | REG_MDT_RCV_TIMEOUT, 100, | ||
930 | REG_CBUS_LINK_CTRL_8, 0x1D, | ||
931 | ); | ||
932 | |||
933 | sii8620_start_gen2_write_burst(ctx); | ||
934 | sii8620_write_seq_static(ctx, | ||
935 | REG_BIST_CTRL, 0x00, | ||
936 | REG_COC_CTL1, 0x10, | ||
937 | REG_COC_CTL2, 0x18, | ||
938 | REG_COC_CTLF, 0x07, | ||
939 | REG_COC_CTL11, 0xF8, | ||
940 | REG_COC_CTL17, 0x61, | ||
941 | REG_COC_CTL18, 0x46, | ||
942 | REG_COC_CTL19, 0x15, | ||
943 | REG_COC_CTL1A, 0x01, | ||
944 | REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN, | ||
945 | REG_MHL_COC_CTL4, 0x2D, | ||
946 | REG_MHL_COC_CTL5, 0xF9, | ||
947 | REG_MSC_HEARTBEAT_CTRL, 0x27, | ||
948 | ); | ||
949 | sii8620_disable_gen2_write_burst(ctx); | ||
950 | |||
951 | /* currently MHL3 is not supported, so we force version to 0 */ | ||
952 | sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0); | ||
953 | sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY), | ||
954 | MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP | ||
955 | | MHL_DST_CONN_POW_STAT); | ||
956 | sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG); | ||
957 | } | ||
958 | |||
959 | static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) | ||
960 | { | ||
961 | if (ctx->mode == mode) | ||
962 | return; | ||
963 | |||
964 | ctx->mode = mode; | ||
965 | |||
966 | switch (mode) { | ||
967 | case CM_MHL1: | ||
968 | sii8620_write_seq_static(ctx, | ||
969 | REG_CBUS_MSC_COMPAT_CTRL, 0x02, | ||
970 | REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE, | ||
971 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | ||
972 | | BIT_DPD_OSC_EN, | ||
973 | REG_COC_INTR_MASK, 0 | ||
974 | ); | ||
975 | break; | ||
976 | case CM_MHL3: | ||
977 | sii8620_write_seq_static(ctx, | ||
978 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
979 | REG_COC_CTL0, 0x40, | ||
980 | REG_MHL_COC_CTL1, 0x07 | ||
981 | ); | ||
982 | break; | ||
983 | case CM_DISCONNECTED: | ||
984 | break; | ||
985 | default: | ||
986 | dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); | ||
987 | break; | ||
988 | } | ||
989 | |||
990 | sii8620_set_auto_zone(ctx); | ||
991 | |||
992 | if (mode != CM_MHL1) | ||
993 | return; | ||
994 | |||
995 | sii8620_write_seq_static(ctx, | ||
996 | REG_MHL_DP_CTL0, 0xBC, | ||
997 | REG_MHL_DP_CTL1, 0xBB, | ||
998 | REG_MHL_DP_CTL3, 0x48, | ||
999 | REG_MHL_DP_CTL5, 0x39, | ||
1000 | REG_MHL_DP_CTL2, 0x2A, | ||
1001 | REG_MHL_DP_CTL6, 0x2A, | ||
1002 | REG_MHL_DP_CTL7, 0x08 | ||
1003 | ); | ||
1004 | } | ||
1005 | |||
1006 | static void sii8620_disconnect(struct sii8620 *ctx) | ||
1007 | { | ||
1008 | sii8620_disable_gen2_write_burst(ctx); | ||
1009 | sii8620_stop_video(ctx); | ||
1010 | msleep(50); | ||
1011 | sii8620_cbus_reset(ctx); | ||
1012 | sii8620_set_mode(ctx, CM_DISCONNECTED); | ||
1013 | sii8620_write_seq_static(ctx, | ||
1014 | REG_COC_CTL0, 0x40, | ||
1015 | REG_CBUS3_CNVT, 0x84, | ||
1016 | REG_COC_CTL14, 0x00, | ||
1017 | REG_COC_CTL0, 0x40, | ||
1018 | REG_HRXCTRL3, 0x07, | ||
1019 | REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | ||
1020 | | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | ||
1021 | | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, | ||
1022 | REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | ||
1023 | | BIT_MHL_DP_CTL0_TX_OE_OVR, | ||
1024 | REG_MHL_DP_CTL1, 0xBB, | ||
1025 | REG_MHL_DP_CTL3, 0x48, | ||
1026 | REG_MHL_DP_CTL5, 0x3F, | ||
1027 | REG_MHL_DP_CTL2, 0x2F, | ||
1028 | REG_MHL_DP_CTL6, 0x2A, | ||
1029 | REG_MHL_DP_CTL7, 0x03 | ||
1030 | ); | ||
1031 | sii8620_disable_hpd(ctx); | ||
1032 | sii8620_write_seq_static(ctx, | ||
1033 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | ||
1034 | REG_MHL_COC_CTL1, 0x07, | ||
1035 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
1036 | REG_DISC_CTRL8, 0x00, | ||
1037 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
1038 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
1039 | REG_INT_CTRL, 0x00, | ||
1040 | REG_MSC_HEARTBEAT_CTRL, 0x27, | ||
1041 | REG_DISC_CTRL1, 0x25, | ||
1042 | REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT, | ||
1043 | REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT, | ||
1044 | REG_MDT_INT_1, 0xff, | ||
1045 | REG_MDT_INT_1_MASK, 0x00, | ||
1046 | REG_MDT_INT_0, 0xff, | ||
1047 | REG_MDT_INT_0_MASK, 0x00, | ||
1048 | REG_COC_INTR, 0xff, | ||
1049 | REG_COC_INTR_MASK, 0x00, | ||
1050 | REG_TRXINTH, 0xff, | ||
1051 | REG_TRXINTMH, 0x00, | ||
1052 | REG_CBUS_INT_0, 0xff, | ||
1053 | REG_CBUS_INT_0_MASK, 0x00, | ||
1054 | REG_CBUS_INT_1, 0xff, | ||
1055 | REG_CBUS_INT_1_MASK, 0x00, | ||
1056 | REG_EMSCINTR, 0xff, | ||
1057 | REG_EMSCINTRMASK, 0x00, | ||
1058 | REG_EMSCINTR1, 0xff, | ||
1059 | REG_EMSCINTRMASK1, 0x00, | ||
1060 | REG_INTR8, 0xff, | ||
1061 | REG_INTR8_MASK, 0x00, | ||
1062 | REG_TPI_INTR_ST0, 0xff, | ||
1063 | REG_TPI_INTR_EN, 0x00, | ||
1064 | REG_HDCP2X_INTR0, 0xff, | ||
1065 | REG_HDCP2X_INTR0_MASK, 0x00, | ||
1066 | REG_INTR9, 0xff, | ||
1067 | REG_INTR9_MASK, 0x00, | ||
1068 | REG_INTR3, 0xff, | ||
1069 | REG_INTR3_MASK, 0x00, | ||
1070 | REG_INTR5, 0xff, | ||
1071 | REG_INTR5_MASK, 0x00, | ||
1072 | REG_INTR2, 0xff, | ||
1073 | REG_INTR2_MASK, 0x00, | ||
1074 | ); | ||
1075 | memset(ctx->stat, 0, sizeof(ctx->stat)); | ||
1076 | memset(ctx->xstat, 0, sizeof(ctx->xstat)); | ||
1077 | memset(ctx->devcap, 0, sizeof(ctx->devcap)); | ||
1078 | memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); | ||
1079 | ctx->cbus_status = 0; | ||
1080 | ctx->sink_type = SINK_NONE; | ||
1081 | kfree(ctx->edid); | ||
1082 | ctx->edid = NULL; | ||
1083 | sii8620_mt_cleanup(ctx); | ||
1084 | } | ||
1085 | |||
1086 | static void sii8620_mhl_disconnected(struct sii8620 *ctx) | ||
1087 | { | ||
1088 | sii8620_write_seq_static(ctx, | ||
1089 | REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), | ||
1090 | REG_CBUS_MSC_COMPAT_CTRL, | ||
1091 | BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN | ||
1092 | ); | ||
1093 | sii8620_disconnect(ctx); | ||
1094 | } | ||
1095 | |||
1096 | static void sii8620_irq_disc(struct sii8620 *ctx) | ||
1097 | { | ||
1098 | u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0); | ||
1099 | |||
1100 | if (stat & VAL_CBUS_MHL_DISCON) | ||
1101 | sii8620_mhl_disconnected(ctx); | ||
1102 | |||
1103 | if (stat & BIT_RGND_READY_INT) { | ||
1104 | u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2); | ||
1105 | |||
1106 | if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) { | ||
1107 | sii8620_mhl_discover(ctx); | ||
1108 | } else { | ||
1109 | sii8620_write_seq_static(ctx, | ||
1110 | REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | ||
1111 | | BIT_DISC_CTRL9_NOMHL_EST | ||
1112 | | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, | ||
1113 | REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT | ||
1114 | | BIT_CBUS_MHL3_DISCON_INT | ||
1115 | | BIT_CBUS_MHL12_DISCON_INT | ||
1116 | | BIT_NOT_MHL_EST_INT | ||
1117 | ); | ||
1118 | } | ||
1119 | } | ||
1120 | if (stat & BIT_MHL_EST_INT) | ||
1121 | sii8620_mhl_init(ctx); | ||
1122 | |||
1123 | sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat); | ||
1124 | } | ||
1125 | |||
1126 | static void sii8620_irq_g2wb(struct sii8620 *ctx) | ||
1127 | { | ||
1128 | u8 stat = sii8620_readb(ctx, REG_MDT_INT_0); | ||
1129 | |||
1130 | if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE) | ||
1131 | dev_dbg(ctx->dev, "HAWB idle\n"); | ||
1132 | |||
1133 | sii8620_write(ctx, REG_MDT_INT_0, stat); | ||
1134 | } | ||
1135 | |||
1136 | static void sii8620_status_changed_dcap(struct sii8620 *ctx) | ||
1137 | { | ||
1138 | if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) { | ||
1139 | sii8620_set_mode(ctx, CM_MHL1); | ||
1140 | sii8620_peer_specific_init(ctx); | ||
1141 | sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | ||
1142 | | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR); | ||
1143 | } | ||
1144 | } | ||
1145 | |||
1146 | static void sii8620_status_changed_path(struct sii8620 *ctx) | ||
1147 | { | ||
1148 | if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { | ||
1149 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
1150 | MHL_DST_LM_CLK_MODE_NORMAL | ||
1151 | | MHL_DST_LM_PATH_ENABLED); | ||
1152 | sii8620_mt_read_devcap(ctx, false); | ||
1153 | } else { | ||
1154 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
1155 | MHL_DST_LM_CLK_MODE_NORMAL); | ||
1156 | } | ||
1157 | } | ||
1158 | |||
1159 | static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) | ||
1160 | { | ||
1161 | u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE]; | ||
1162 | |||
1163 | sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE); | ||
1164 | sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE); | ||
1165 | |||
1166 | sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); | ||
1167 | sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); | ||
1168 | |||
1169 | if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) | ||
1170 | sii8620_status_changed_dcap(ctx); | ||
1171 | |||
1172 | if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) | ||
1173 | sii8620_status_changed_path(ctx); | ||
1174 | } | ||
1175 | |||
1176 | static void sii8620_msc_mr_set_int(struct sii8620 *ctx) | ||
1177 | { | ||
1178 | u8 ints[MHL_INT_SIZE]; | ||
1179 | |||
1180 | sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); | ||
1181 | sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); | ||
1182 | } | ||
1183 | |||
1184 | static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) | ||
1185 | { | ||
1186 | struct device *dev = ctx->dev; | ||
1187 | |||
1188 | if (list_empty(&ctx->mt_queue)) { | ||
1189 | dev_err(dev, "unexpected MSC MT response\n"); | ||
1190 | return NULL; | ||
1191 | } | ||
1192 | |||
1193 | return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); | ||
1194 | } | ||
1195 | |||
1196 | static void sii8620_msc_mt_done(struct sii8620 *ctx) | ||
1197 | { | ||
1198 | struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); | ||
1199 | |||
1200 | if (!msg) | ||
1201 | return; | ||
1202 | |||
1203 | msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0); | ||
1204 | ctx->mt_state = MT_STATE_DONE; | ||
1205 | } | ||
1206 | |||
1207 | static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) | ||
1208 | { | ||
1209 | struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); | ||
1210 | u8 buf[2]; | ||
1211 | |||
1212 | if (!msg) | ||
1213 | return; | ||
1214 | |||
1215 | sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); | ||
1216 | |||
1217 | switch (buf[0]) { | ||
1218 | case MHL_MSC_MSG_RAPK: | ||
1219 | msg->ret = buf[1]; | ||
1220 | ctx->mt_state = MT_STATE_DONE; | ||
1221 | break; | ||
1222 | default: | ||
1223 | dev_err(ctx->dev, "%s message type %d,%d not supported", | ||
1224 | __func__, buf[0], buf[1]); | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1228 | static void sii8620_irq_msc(struct sii8620 *ctx) | ||
1229 | { | ||
1230 | u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0); | ||
1231 | |||
1232 | if (stat & ~BIT_CBUS_HPD_CHG) | ||
1233 | sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG); | ||
1234 | |||
1235 | if (stat & BIT_CBUS_HPD_CHG) { | ||
1236 | u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS); | ||
1237 | |||
1238 | if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) { | ||
1239 | sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG); | ||
1240 | } else { | ||
1241 | stat ^= BIT_CBUS_STATUS_CBUS_HPD; | ||
1242 | cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD; | ||
1243 | } | ||
1244 | ctx->cbus_status = cbus_stat; | ||
1245 | } | ||
1246 | |||
1247 | if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) | ||
1248 | sii8620_msc_mr_write_stat(ctx); | ||
1249 | |||
1250 | if (stat & BIT_CBUS_MSC_MR_SET_INT) | ||
1251 | sii8620_msc_mr_set_int(ctx); | ||
1252 | |||
1253 | if (stat & BIT_CBUS_MSC_MT_DONE) | ||
1254 | sii8620_msc_mt_done(ctx); | ||
1255 | |||
1256 | if (stat & BIT_CBUS_MSC_MR_MSC_MSG) | ||
1257 | sii8620_msc_mr_msc_msg(ctx); | ||
1258 | } | ||
1259 | |||
1260 | static void sii8620_irq_coc(struct sii8620 *ctx) | ||
1261 | { | ||
1262 | u8 stat = sii8620_readb(ctx, REG_COC_INTR); | ||
1263 | |||
1264 | sii8620_write(ctx, REG_COC_INTR, stat); | ||
1265 | } | ||
1266 | |||
1267 | static void sii8620_irq_merr(struct sii8620 *ctx) | ||
1268 | { | ||
1269 | u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1); | ||
1270 | |||
1271 | sii8620_write(ctx, REG_CBUS_INT_1, stat); | ||
1272 | } | ||
1273 | |||
1274 | static void sii8620_irq_edid(struct sii8620 *ctx) | ||
1275 | { | ||
1276 | u8 stat = sii8620_readb(ctx, REG_INTR9); | ||
1277 | |||
1278 | sii8620_write(ctx, REG_INTR9, stat); | ||
1279 | |||
1280 | if (stat & BIT_INTR9_DEVCAP_DONE) | ||
1281 | ctx->mt_state = MT_STATE_DONE; | ||
1282 | } | ||
1283 | |||
1284 | static void sii8620_scdt_high(struct sii8620 *ctx) | ||
1285 | { | ||
1286 | sii8620_write_seq_static(ctx, | ||
1287 | REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, | ||
1288 | REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, | ||
1289 | ); | ||
1290 | } | ||
1291 | |||
1292 | static void sii8620_scdt_low(struct sii8620 *ctx) | ||
1293 | { | ||
1294 | sii8620_write(ctx, REG_TMDS_CSTAT_P3, | ||
1295 | BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | | ||
1296 | BIT_TMDS_CSTAT_P3_CLR_AVI); | ||
1297 | |||
1298 | sii8620_stop_video(ctx); | ||
1299 | |||
1300 | sii8620_write(ctx, REG_INTR8_MASK, 0); | ||
1301 | } | ||
1302 | |||
1303 | static void sii8620_irq_scdt(struct sii8620 *ctx) | ||
1304 | { | ||
1305 | u8 stat = sii8620_readb(ctx, REG_INTR5); | ||
1306 | |||
1307 | if (stat & BIT_INTR_SCDT_CHANGE) { | ||
1308 | u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); | ||
1309 | |||
1310 | if (cstat & BIT_TMDS_CSTAT_P3_SCDT) | ||
1311 | sii8620_scdt_high(ctx); | ||
1312 | else | ||
1313 | sii8620_scdt_low(ctx); | ||
1314 | } | ||
1315 | |||
1316 | sii8620_write(ctx, REG_INTR5, stat); | ||
1317 | } | ||
1318 | |||
1319 | static void sii8620_new_vsi(struct sii8620 *ctx) | ||
1320 | { | ||
1321 | u8 vsif[11]; | ||
1322 | |||
1323 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, | ||
1324 | VAL_RX_HDMI_CTRL2_DEFVAL | | ||
1325 | BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); | ||
1326 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, | ||
1327 | ARRAY_SIZE(vsif)); | ||
1328 | } | ||
1329 | |||
1330 | static void sii8620_new_avi(struct sii8620 *ctx) | ||
1331 | { | ||
1332 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); | ||
1333 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, | ||
1334 | ARRAY_SIZE(ctx->avif)); | ||
1335 | } | ||
1336 | |||
1337 | static void sii8620_irq_infr(struct sii8620 *ctx) | ||
1338 | { | ||
1339 | u8 stat = sii8620_readb(ctx, REG_INTR8) | ||
1340 | & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); | ||
1341 | |||
1342 | sii8620_write(ctx, REG_INTR8, stat); | ||
1343 | |||
1344 | if (stat & BIT_CEA_NEW_VSI) | ||
1345 | sii8620_new_vsi(ctx); | ||
1346 | |||
1347 | if (stat & BIT_CEA_NEW_AVI) | ||
1348 | sii8620_new_avi(ctx); | ||
1349 | |||
1350 | if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) | ||
1351 | sii8620_start_video(ctx); | ||
1352 | } | ||
1353 | |||
1354 | /* endian agnostic, non-volatile version of test_bit */ | ||
1355 | static bool sii8620_test_bit(unsigned int nr, const u8 *addr) | ||
1356 | { | ||
1357 | return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE)); | ||
1358 | } | ||
1359 | |||
1360 | static irqreturn_t sii8620_irq_thread(int irq, void *data) | ||
1361 | { | ||
1362 | static const struct { | ||
1363 | int bit; | ||
1364 | void (*handler)(struct sii8620 *ctx); | ||
1365 | } irq_vec[] = { | ||
1366 | { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc }, | ||
1367 | { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb }, | ||
1368 | { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc }, | ||
1369 | { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc }, | ||
1370 | { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr }, | ||
1371 | { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, | ||
1372 | { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, | ||
1373 | { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, | ||
1374 | }; | ||
1375 | struct sii8620 *ctx = data; | ||
1376 | u8 stats[LEN_FAST_INTR_STAT]; | ||
1377 | int i, ret; | ||
1378 | |||
1379 | mutex_lock(&ctx->lock); | ||
1380 | |||
1381 | sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats)); | ||
1382 | for (i = 0; i < ARRAY_SIZE(irq_vec); ++i) | ||
1383 | if (sii8620_test_bit(irq_vec[i].bit, stats)) | ||
1384 | irq_vec[i].handler(ctx); | ||
1385 | |||
1386 | sii8620_mt_work(ctx); | ||
1387 | |||
1388 | ret = sii8620_clear_error(ctx); | ||
1389 | if (ret) { | ||
1390 | dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret); | ||
1391 | sii8620_mhl_disconnected(ctx); | ||
1392 | } | ||
1393 | mutex_unlock(&ctx->lock); | ||
1394 | |||
1395 | return IRQ_HANDLED; | ||
1396 | } | ||
1397 | |||
1398 | static void sii8620_cable_in(struct sii8620 *ctx) | ||
1399 | { | ||
1400 | struct device *dev = ctx->dev; | ||
1401 | u8 ver[5]; | ||
1402 | int ret; | ||
1403 | |||
1404 | ret = sii8620_hw_on(ctx); | ||
1405 | if (ret) { | ||
1406 | dev_err(dev, "Error powering on, %d.\n", ret); | ||
1407 | return; | ||
1408 | } | ||
1409 | sii8620_hw_reset(ctx); | ||
1410 | |||
1411 | sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); | ||
1412 | ret = sii8620_clear_error(ctx); | ||
1413 | if (ret) { | ||
1414 | dev_err(dev, "Error accessing I2C bus, %d.\n", ret); | ||
1415 | return; | ||
1416 | } | ||
1417 | |||
1418 | dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0], | ||
1419 | ver[3], ver[2], ver[4]); | ||
1420 | |||
1421 | sii8620_write(ctx, REG_DPD, | ||
1422 | BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN); | ||
1423 | |||
1424 | sii8620_xtal_set_rate(ctx); | ||
1425 | sii8620_disconnect(ctx); | ||
1426 | |||
1427 | sii8620_write_seq_static(ctx, | ||
1428 | REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG | ||
1429 | | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734, | ||
1430 | REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM, | ||
1431 | REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, | ||
1432 | ); | ||
1433 | |||
1434 | ret = sii8620_clear_error(ctx); | ||
1435 | if (ret) { | ||
1436 | dev_err(dev, "Error accessing I2C bus, %d.\n", ret); | ||
1437 | return; | ||
1438 | } | ||
1439 | |||
1440 | enable_irq(to_i2c_client(ctx->dev)->irq); | ||
1441 | } | ||
1442 | |||
1443 | static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) | ||
1444 | { | ||
1445 | return container_of(bridge, struct sii8620, bridge); | ||
1446 | } | ||
1447 | |||
1448 | static bool sii8620_mode_fixup(struct drm_bridge *bridge, | ||
1449 | const struct drm_display_mode *mode, | ||
1450 | struct drm_display_mode *adjusted_mode) | ||
1451 | { | ||
1452 | struct sii8620 *ctx = bridge_to_sii8620(bridge); | ||
1453 | bool ret = false; | ||
1454 | int max_clock = 74250; | ||
1455 | |||
1456 | mutex_lock(&ctx->lock); | ||
1457 | |||
1458 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1459 | goto out; | ||
1460 | |||
1461 | if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) | ||
1462 | max_clock = 300000; | ||
1463 | |||
1464 | ret = mode->clock <= max_clock; | ||
1465 | |||
1466 | out: | ||
1467 | mutex_unlock(&ctx->lock); | ||
1468 | |||
1469 | return ret; | ||
1470 | } | ||
1471 | |||
1472 | static const struct drm_bridge_funcs sii8620_bridge_funcs = { | ||
1473 | .mode_fixup = sii8620_mode_fixup, | ||
1474 | }; | ||
1475 | |||
1476 | static int sii8620_probe(struct i2c_client *client, | ||
1477 | const struct i2c_device_id *id) | ||
1478 | { | ||
1479 | struct device *dev = &client->dev; | ||
1480 | struct sii8620 *ctx; | ||
1481 | int ret; | ||
1482 | |||
1483 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | ||
1484 | if (!ctx) | ||
1485 | return -ENOMEM; | ||
1486 | |||
1487 | ctx->dev = dev; | ||
1488 | mutex_init(&ctx->lock); | ||
1489 | INIT_LIST_HEAD(&ctx->mt_queue); | ||
1490 | |||
1491 | ctx->clk_xtal = devm_clk_get(dev, "xtal"); | ||
1492 | if (IS_ERR(ctx->clk_xtal)) { | ||
1493 | dev_err(dev, "failed to get xtal clock from DT\n"); | ||
1494 | return PTR_ERR(ctx->clk_xtal); | ||
1495 | } | ||
1496 | |||
1497 | if (!client->irq) { | ||
1498 | dev_err(dev, "no irq provided\n"); | ||
1499 | return -EINVAL; | ||
1500 | } | ||
1501 | irq_set_status_flags(client->irq, IRQ_NOAUTOEN); | ||
1502 | ret = devm_request_threaded_irq(dev, client->irq, NULL, | ||
1503 | sii8620_irq_thread, | ||
1504 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, | ||
1505 | "sii8620", ctx); | ||
1506 | |||
1507 | ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); | ||
1508 | if (IS_ERR(ctx->gpio_reset)) { | ||
1509 | dev_err(dev, "failed to get reset gpio from DT\n"); | ||
1510 | return PTR_ERR(ctx->gpio_reset); | ||
1511 | } | ||
1512 | |||
1513 | ctx->supplies[0].supply = "cvcc10"; | ||
1514 | ctx->supplies[1].supply = "iovcc18"; | ||
1515 | ret = devm_regulator_bulk_get(dev, 2, ctx->supplies); | ||
1516 | if (ret) | ||
1517 | return ret; | ||
1518 | |||
1519 | i2c_set_clientdata(client, ctx); | ||
1520 | |||
1521 | ctx->bridge.funcs = &sii8620_bridge_funcs; | ||
1522 | ctx->bridge.of_node = dev->of_node; | ||
1523 | drm_bridge_add(&ctx->bridge); | ||
1524 | |||
1525 | sii8620_cable_in(ctx); | ||
1526 | |||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static int sii8620_remove(struct i2c_client *client) | ||
1531 | { | ||
1532 | struct sii8620 *ctx = i2c_get_clientdata(client); | ||
1533 | |||
1534 | disable_irq(to_i2c_client(ctx->dev)->irq); | ||
1535 | drm_bridge_remove(&ctx->bridge); | ||
1536 | sii8620_hw_off(ctx); | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | static const struct of_device_id sii8620_dt_match[] = { | ||
1542 | { .compatible = "sil,sii8620" }, | ||
1543 | { }, | ||
1544 | }; | ||
1545 | MODULE_DEVICE_TABLE(of, sii8620_dt_match); | ||
1546 | |||
1547 | static const struct i2c_device_id sii8620_id[] = { | ||
1548 | { "sii8620", 0 }, | ||
1549 | { }, | ||
1550 | }; | ||
1551 | |||
1552 | MODULE_DEVICE_TABLE(i2c, sii8620_id); | ||
1553 | static struct i2c_driver sii8620_driver = { | ||
1554 | .driver = { | ||
1555 | .name = "sii8620", | ||
1556 | .of_match_table = of_match_ptr(sii8620_dt_match), | ||
1557 | }, | ||
1558 | .probe = sii8620_probe, | ||
1559 | .remove = sii8620_remove, | ||
1560 | .id_table = sii8620_id, | ||
1561 | }; | ||
1562 | |||
1563 | module_i2c_driver(sii8620_driver); | ||
1564 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h new file mode 100644 index 000000000000..6ff616a4f6ce --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.h | |||
@@ -0,0 +1,1517 @@ | |||
1 | /* | ||
2 | * Registers of Silicon Image SiI8620 Mobile HD Transmitter | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * Based on MHL driver for Android devices. | ||
8 | * Copyright (C) 2013-2014 Silicon Image, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __SIL_SII8620_H__ | ||
16 | #define __SIL_SII8620_H__ | ||
17 | |||
18 | /* Vendor ID Low byte, default value: 0x01 */ | ||
19 | #define REG_VND_IDL 0x0000 | ||
20 | |||
21 | /* Vendor ID High byte, default value: 0x00 */ | ||
22 | #define REG_VND_IDH 0x0001 | ||
23 | |||
24 | /* Device ID Low byte, default value: 0x60 */ | ||
25 | #define REG_DEV_IDL 0x0002 | ||
26 | |||
27 | /* Device ID High byte, default value: 0x86 */ | ||
28 | #define REG_DEV_IDH 0x0003 | ||
29 | |||
30 | /* Device Revision, default value: 0x10 */ | ||
31 | #define REG_DEV_REV 0x0004 | ||
32 | |||
33 | /* OTP DBYTE510, default value: 0x00 */ | ||
34 | #define REG_OTP_DBYTE510 0x0006 | ||
35 | |||
36 | /* System Control #1, default value: 0x00 */ | ||
37 | #define REG_SYS_CTRL1 0x0008 | ||
38 | #define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7) | ||
39 | #define BIT_SYS_CTRL1_VSYNCPIN BIT(6) | ||
40 | #define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5) | ||
41 | #define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4) | ||
42 | #define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3) | ||
43 | #define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2) | ||
44 | #define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1) | ||
45 | #define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0) | ||
46 | |||
47 | /* System Control DPD, default value: 0x90 */ | ||
48 | #define REG_DPD 0x000b | ||
49 | #define BIT_DPD_PWRON_PLL BIT(7) | ||
50 | #define BIT_DPD_PDNTX12 BIT(6) | ||
51 | #define BIT_DPD_PDNRX12 BIT(5) | ||
52 | #define BIT_DPD_OSC_EN BIT(4) | ||
53 | #define BIT_DPD_PWRON_HSIC BIT(3) | ||
54 | #define BIT_DPD_PDIDCK_N BIT(2) | ||
55 | #define BIT_DPD_PD_MHL_CLK_N BIT(1) | ||
56 | |||
57 | /* Dual link Control, default value: 0x00 */ | ||
58 | #define REG_DCTL 0x000d | ||
59 | #define BIT_DCTL_TDM_LCLK_PHASE BIT(7) | ||
60 | #define BIT_DCTL_HSIC_CLK_PHASE BIT(6) | ||
61 | #define BIT_DCTL_CTS_TCK_PHASE BIT(5) | ||
62 | #define BIT_DCTL_EXT_DDC_SEL BIT(4) | ||
63 | #define BIT_DCTL_TRANSCODE BIT(3) | ||
64 | #define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2) | ||
65 | #define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1) | ||
66 | #define BIT_DCTL_TCLKNX_PHASE BIT(0) | ||
67 | |||
68 | /* PWD Software Reset, default value: 0x20 */ | ||
69 | #define REG_PWD_SRST 0x000e | ||
70 | #define BIT_PWD_SRST_COC_DOC_RST BIT(7) | ||
71 | #define BIT_PWD_SRST_CBUS_RST_SW BIT(6) | ||
72 | #define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5) | ||
73 | #define BIT_PWD_SRST_MHLFIFO_RST BIT(4) | ||
74 | #define BIT_PWD_SRST_CBUS_RST BIT(3) | ||
75 | #define BIT_PWD_SRST_SW_RST_AUTO BIT(2) | ||
76 | #define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1) | ||
77 | #define BIT_PWD_SRST_SW_RST BIT(0) | ||
78 | |||
79 | /* AKSV_1, default value: 0x00 */ | ||
80 | #define REG_AKSV_1 0x001d | ||
81 | |||
82 | /* Video H Resolution #1, default value: 0x00 */ | ||
83 | #define REG_H_RESL 0x003a | ||
84 | |||
85 | /* Video Mode, default value: 0x00 */ | ||
86 | #define REG_VID_MODE 0x004a | ||
87 | #define BIT_VID_MODE_M1080P BIT(6) | ||
88 | |||
89 | /* Video Input Mode, default value: 0xc0 */ | ||
90 | #define REG_VID_OVRRD 0x0051 | ||
91 | #define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7) | ||
92 | #define BIT_VID_OVRRD_M1080P_OVRRD BIT(6) | ||
93 | #define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5) | ||
94 | #define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4) | ||
95 | #define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3) | ||
96 | #define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2) | ||
97 | #define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0) | ||
98 | |||
99 | /* I2C Address reassignment, default value: 0x00 */ | ||
100 | #define REG_PAGE_MHLSPEC_ADDR 0x0057 | ||
101 | #define REG_PAGE7_ADDR 0x0058 | ||
102 | #define REG_PAGE8_ADDR 0x005c | ||
103 | |||
104 | /* Fast Interrupt Status, default value: 0x00 */ | ||
105 | #define REG_FAST_INTR_STAT 0x005f | ||
106 | #define LEN_FAST_INTR_STAT 7 | ||
107 | #define BIT_FAST_INTR_STAT_TIMR 8 | ||
108 | #define BIT_FAST_INTR_STAT_INT2 9 | ||
109 | #define BIT_FAST_INTR_STAT_DDC 10 | ||
110 | #define BIT_FAST_INTR_STAT_SCDT 11 | ||
111 | #define BIT_FAST_INTR_STAT_INFR 13 | ||
112 | #define BIT_FAST_INTR_STAT_EDID 14 | ||
113 | #define BIT_FAST_INTR_STAT_HDCP 15 | ||
114 | #define BIT_FAST_INTR_STAT_MSC 16 | ||
115 | #define BIT_FAST_INTR_STAT_MERR 17 | ||
116 | #define BIT_FAST_INTR_STAT_G2WB 18 | ||
117 | #define BIT_FAST_INTR_STAT_G2WB_ERR 19 | ||
118 | #define BIT_FAST_INTR_STAT_DISC 28 | ||
119 | #define BIT_FAST_INTR_STAT_BLOCK 30 | ||
120 | #define BIT_FAST_INTR_STAT_LTRN 31 | ||
121 | #define BIT_FAST_INTR_STAT_HDCP2 32 | ||
122 | #define BIT_FAST_INTR_STAT_TDM 42 | ||
123 | #define BIT_FAST_INTR_STAT_COC 51 | ||
124 | |||
125 | /* GPIO Control, default value: 0x15 */ | ||
126 | #define REG_GPIO_CTRL1 0x006e | ||
127 | #define BIT_CTRL1_GPIO_I_8 BIT(5) | ||
128 | #define BIT_CTRL1_GPIO_OEN_8 BIT(4) | ||
129 | #define BIT_CTRL1_GPIO_I_7 BIT(3) | ||
130 | #define BIT_CTRL1_GPIO_OEN_7 BIT(2) | ||
131 | #define BIT_CTRL1_GPIO_I_6 BIT(1) | ||
132 | #define BIT_CTRL1_GPIO_OEN_6 BIT(0) | ||
133 | |||
134 | /* Interrupt Control, default value: 0x06 */ | ||
135 | #define REG_INT_CTRL 0x006f | ||
136 | #define BIT_INT_CTRL_SOFTWARE_WP BIT(7) | ||
137 | #define BIT_INT_CTRL_INTR_OD BIT(2) | ||
138 | #define BIT_INT_CTRL_INTR_POLARITY BIT(1) | ||
139 | |||
140 | /* Interrupt State, default value: 0x00 */ | ||
141 | #define REG_INTR_STATE 0x0070 | ||
142 | #define BIT_INTR_STATE_INTR_STATE BIT(0) | ||
143 | |||
144 | /* Interrupt Source #1, default value: 0x00 */ | ||
145 | #define REG_INTR1 0x0071 | ||
146 | |||
147 | /* Interrupt Source #2, default value: 0x00 */ | ||
148 | #define REG_INTR2 0x0072 | ||
149 | |||
150 | /* Interrupt Source #3, default value: 0x01 */ | ||
151 | #define REG_INTR3 0x0073 | ||
152 | #define BIT_DDC_CMD_DONE BIT(3) | ||
153 | |||
154 | /* Interrupt Source #5, default value: 0x00 */ | ||
155 | #define REG_INTR5 0x0074 | ||
156 | |||
157 | /* Interrupt #1 Mask, default value: 0x00 */ | ||
158 | #define REG_INTR1_MASK 0x0075 | ||
159 | |||
160 | /* Interrupt #2 Mask, default value: 0x00 */ | ||
161 | #define REG_INTR2_MASK 0x0076 | ||
162 | |||
163 | /* Interrupt #3 Mask, default value: 0x00 */ | ||
164 | #define REG_INTR3_MASK 0x0077 | ||
165 | |||
166 | /* Interrupt #5 Mask, default value: 0x00 */ | ||
167 | #define REG_INTR5_MASK 0x0078 | ||
168 | #define BIT_INTR_SCDT_CHANGE BIT(0) | ||
169 | |||
170 | /* Hot Plug Connection Control, default value: 0x45 */ | ||
171 | #define REG_HPD_CTRL 0x0079 | ||
172 | #define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7) | ||
173 | #define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6) | ||
174 | #define BIT_HPD_CTRL_HPD_HIGH BIT(5) | ||
175 | #define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4) | ||
176 | #define BIT_HPD_CTRL_GPIO_I_1 BIT(3) | ||
177 | #define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2) | ||
178 | #define BIT_HPD_CTRL_GPIO_I_0 BIT(1) | ||
179 | #define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0) | ||
180 | |||
181 | /* GPIO Control, default value: 0x55 */ | ||
182 | #define REG_GPIO_CTRL 0x007a | ||
183 | #define BIT_CTRL_GPIO_I_5 BIT(7) | ||
184 | #define BIT_CTRL_GPIO_OEN_5 BIT(6) | ||
185 | #define BIT_CTRL_GPIO_I_4 BIT(5) | ||
186 | #define BIT_CTRL_GPIO_OEN_4 BIT(4) | ||
187 | #define BIT_CTRL_GPIO_I_3 BIT(3) | ||
188 | #define BIT_CTRL_GPIO_OEN_3 BIT(2) | ||
189 | #define BIT_CTRL_GPIO_I_2 BIT(1) | ||
190 | #define BIT_CTRL_GPIO_OEN_2 BIT(0) | ||
191 | |||
192 | /* Interrupt Source 7, default value: 0x00 */ | ||
193 | #define REG_INTR7 0x007b | ||
194 | |||
195 | /* Interrupt Source 8, default value: 0x00 */ | ||
196 | #define REG_INTR8 0x007c | ||
197 | |||
198 | /* Interrupt #7 Mask, default value: 0x00 */ | ||
199 | #define REG_INTR7_MASK 0x007d | ||
200 | |||
201 | /* Interrupt #8 Mask, default value: 0x00 */ | ||
202 | #define REG_INTR8_MASK 0x007e | ||
203 | #define BIT_CEA_NEW_VSI BIT(2) | ||
204 | #define BIT_CEA_NEW_AVI BIT(1) | ||
205 | |||
206 | /* IEEE, default value: 0x10 */ | ||
207 | #define REG_TMDS_CCTRL 0x0080 | ||
208 | #define BIT_TMDS_CCTRL_TMDS_OE BIT(4) | ||
209 | |||
210 | /* TMDS Control #4, default value: 0x02 */ | ||
211 | #define REG_TMDS_CTRL4 0x0085 | ||
212 | #define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1) | ||
213 | #define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0) | ||
214 | |||
215 | /* BIST CNTL, default value: 0x00 */ | ||
216 | #define REG_BIST_CTRL 0x00bb | ||
217 | #define BIT_RXBIST_VGB_EN BIT(7) | ||
218 | #define BIT_TXBIST_VGB_EN BIT(6) | ||
219 | #define BIT_BIST_START_SEL BIT(5) | ||
220 | #define BIT_BIST_START_BIT BIT(4) | ||
221 | #define BIT_BIST_ALWAYS_ON BIT(3) | ||
222 | #define BIT_BIST_TRANS BIT(2) | ||
223 | #define BIT_BIST_RESET BIT(1) | ||
224 | #define BIT_BIST_EN BIT(0) | ||
225 | |||
226 | /* BIST DURATION0, default value: 0x00 */ | ||
227 | #define REG_BIST_TEST_SEL 0x00bd | ||
228 | #define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f | ||
229 | |||
230 | /* BIST VIDEO_MODE, default value: 0x00 */ | ||
231 | #define REG_BIST_VIDEO_MODE 0x00be | ||
232 | #define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f | ||
233 | |||
234 | /* BIST DURATION0, default value: 0x00 */ | ||
235 | #define REG_BIST_DURATION_0 0x00bf | ||
236 | |||
237 | /* BIST DURATION1, default value: 0x00 */ | ||
238 | #define REG_BIST_DURATION_1 0x00c0 | ||
239 | |||
240 | /* BIST DURATION2, default value: 0x00 */ | ||
241 | #define REG_BIST_DURATION_2 0x00c1 | ||
242 | |||
243 | /* BIST 8BIT_PATTERN, default value: 0x00 */ | ||
244 | #define REG_BIST_8BIT_PATTERN 0x00c2 | ||
245 | |||
246 | /* LM DDC, default value: 0x80 */ | ||
247 | #define REG_LM_DDC 0x00c7 | ||
248 | #define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7) | ||
249 | |||
250 | #define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5) | ||
251 | #define BIT_LM_DDC_DDC_TPI_SW BIT(2) | ||
252 | #define BIT_LM_DDC_DDC_GRANT BIT(1) | ||
253 | #define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0) | ||
254 | |||
255 | /* DDC I2C Manual, default value: 0x03 */ | ||
256 | #define REG_DDC_MANUAL 0x00ec | ||
257 | #define BIT_DDC_MANUAL_MAN_DDC BIT(7) | ||
258 | #define BIT_DDC_MANUAL_VP_SEL BIT(6) | ||
259 | #define BIT_DDC_MANUAL_DSDA BIT(5) | ||
260 | #define BIT_DDC_MANUAL_DSCL BIT(4) | ||
261 | #define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3) | ||
262 | #define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2) | ||
263 | #define BIT_DDC_MANUAL_IO_DSDA BIT(1) | ||
264 | #define BIT_DDC_MANUAL_IO_DSCL BIT(0) | ||
265 | |||
266 | /* DDC I2C Target Slave Address, default value: 0x00 */ | ||
267 | #define REG_DDC_ADDR 0x00ed | ||
268 | #define MSK_DDC_ADDR_DDC_ADDR 0xfe | ||
269 | |||
270 | /* DDC I2C Target Segment Address, default value: 0x00 */ | ||
271 | #define REG_DDC_SEGM 0x00ee | ||
272 | |||
273 | /* DDC I2C Target Offset Address, default value: 0x00 */ | ||
274 | #define REG_DDC_OFFSET 0x00ef | ||
275 | |||
276 | /* DDC I2C Data In count #1, default value: 0x00 */ | ||
277 | #define REG_DDC_DIN_CNT1 0x00f0 | ||
278 | |||
279 | /* DDC I2C Data In count #2, default value: 0x00 */ | ||
280 | #define REG_DDC_DIN_CNT2 0x00f1 | ||
281 | #define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03 | ||
282 | |||
283 | /* DDC I2C Status, default value: 0x04 */ | ||
284 | #define REG_DDC_STATUS 0x00f2 | ||
285 | #define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6) | ||
286 | #define BIT_DDC_STATUS_DDC_NO_ACK BIT(5) | ||
287 | #define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4) | ||
288 | #define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3) | ||
289 | #define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2) | ||
290 | #define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1) | ||
291 | #define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0) | ||
292 | |||
293 | /* DDC I2C Command, default value: 0x70 */ | ||
294 | #define REG_DDC_CMD 0x00f3 | ||
295 | #define BIT_DDC_CMD_HDCP_DDC_EN BIT(6) | ||
296 | #define BIT_DDC_CMD_SDA_DEL_EN BIT(5) | ||
297 | #define BIT_DDC_CMD_DDC_FLT_EN BIT(4) | ||
298 | |||
299 | #define MSK_DDC_CMD_DDC_CMD 0x0f | ||
300 | #define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04 | ||
301 | #define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09 | ||
302 | #define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f | ||
303 | |||
304 | /* DDC I2C FIFO Data In/Out, default value: 0x00 */ | ||
305 | #define REG_DDC_DATA 0x00f4 | ||
306 | |||
307 | /* DDC I2C Data Out Counter, default value: 0x00 */ | ||
308 | #define REG_DDC_DOUT_CNT 0x00f5 | ||
309 | #define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7) | ||
310 | #define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f | ||
311 | |||
312 | /* DDC I2C Delay Count, default value: 0x14 */ | ||
313 | #define REG_DDC_DELAY_CNT 0x00f6 | ||
314 | |||
315 | /* Test Control, default value: 0x80 */ | ||
316 | #define REG_TEST_TXCTRL 0x00f7 | ||
317 | #define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7) | ||
318 | #define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6) | ||
319 | #define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c | ||
320 | #define BIT_TEST_TXCTRL_HDMI_MODE BIT(1) | ||
321 | #define BIT_TEST_TXCTRL_TST_PLLCK BIT(0) | ||
322 | |||
323 | /* CBUS Address, default value: 0x00 */ | ||
324 | #define REG_PAGE_CBUS_ADDR 0x00f8 | ||
325 | |||
326 | /* I2C Device Address re-assignment */ | ||
327 | #define REG_PAGE1_ADDR 0x00fc | ||
328 | #define REG_PAGE2_ADDR 0x00fd | ||
329 | #define REG_PAGE3_ADDR 0x00fe | ||
330 | #define REG_HW_TPI_ADDR 0x00ff | ||
331 | |||
332 | /* USBT CTRL0, default value: 0x00 */ | ||
333 | #define REG_UTSRST 0x0100 | ||
334 | #define BIT_UTSRST_FC_SRST BIT(5) | ||
335 | #define BIT_UTSRST_KEEPER_SRST BIT(4) | ||
336 | #define BIT_UTSRST_HTX_SRST BIT(3) | ||
337 | #define BIT_UTSRST_TRX_SRST BIT(2) | ||
338 | #define BIT_UTSRST_TTX_SRST BIT(1) | ||
339 | #define BIT_UTSRST_HRX_SRST BIT(0) | ||
340 | |||
341 | /* HSIC RX Control3, default value: 0x07 */ | ||
342 | #define REG_HRXCTRL3 0x0104 | ||
343 | #define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0 | ||
344 | #define BIT_HRXCTRL3_HRX_OUT_EN BIT(2) | ||
345 | #define BIT_HRXCTRL3_STATUS_EN BIT(1) | ||
346 | #define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0) | ||
347 | |||
348 | /* HSIC RX INT Registers */ | ||
349 | #define REG_HRXINTL 0x0111 | ||
350 | #define REG_HRXINTH 0x0112 | ||
351 | |||
352 | /* TDM TX NUMBITS, default value: 0x0c */ | ||
353 | #define REG_TTXNUMB 0x0116 | ||
354 | #define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0 | ||
355 | #define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3) | ||
356 | #define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07 | ||
357 | |||
358 | /* TDM TX NUMSPISYM, default value: 0x04 */ | ||
359 | #define REG_TTXSPINUMS 0x0117 | ||
360 | |||
361 | /* TDM TX NUMHSICSYM, default value: 0x14 */ | ||
362 | #define REG_TTXHSICNUMS 0x0118 | ||
363 | |||
364 | /* TDM TX NUMTOTSYM, default value: 0x18 */ | ||
365 | #define REG_TTXTOTNUMS 0x0119 | ||
366 | |||
367 | /* TDM TX INT Low, default value: 0x00 */ | ||
368 | #define REG_TTXINTL 0x0136 | ||
369 | #define BIT_TTXINTL_TTX_INTR7 BIT(7) | ||
370 | #define BIT_TTXINTL_TTX_INTR6 BIT(6) | ||
371 | #define BIT_TTXINTL_TTX_INTR5 BIT(5) | ||
372 | #define BIT_TTXINTL_TTX_INTR4 BIT(4) | ||
373 | #define BIT_TTXINTL_TTX_INTR3 BIT(3) | ||
374 | #define BIT_TTXINTL_TTX_INTR2 BIT(2) | ||
375 | #define BIT_TTXINTL_TTX_INTR1 BIT(1) | ||
376 | #define BIT_TTXINTL_TTX_INTR0 BIT(0) | ||
377 | |||
378 | /* TDM TX INT High, default value: 0x00 */ | ||
379 | #define REG_TTXINTH 0x0137 | ||
380 | #define BIT_TTXINTH_TTX_INTR15 BIT(7) | ||
381 | #define BIT_TTXINTH_TTX_INTR14 BIT(6) | ||
382 | #define BIT_TTXINTH_TTX_INTR13 BIT(5) | ||
383 | #define BIT_TTXINTH_TTX_INTR12 BIT(4) | ||
384 | #define BIT_TTXINTH_TTX_INTR11 BIT(3) | ||
385 | #define BIT_TTXINTH_TTX_INTR10 BIT(2) | ||
386 | #define BIT_TTXINTH_TTX_INTR9 BIT(1) | ||
387 | #define BIT_TTXINTH_TTX_INTR8 BIT(0) | ||
388 | |||
389 | /* TDM RX Control, default value: 0x1c */ | ||
390 | #define REG_TRXCTRL 0x013b | ||
391 | #define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4) | ||
392 | #define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3) | ||
393 | #define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07 | ||
394 | |||
395 | /* TDM RX NUMSPISYM, default value: 0x04 */ | ||
396 | #define REG_TRXSPINUMS 0x013c | ||
397 | |||
398 | /* TDM RX NUMHSICSYM, default value: 0x14 */ | ||
399 | #define REG_TRXHSICNUMS 0x013d | ||
400 | |||
401 | /* TDM RX NUMTOTSYM, default value: 0x18 */ | ||
402 | #define REG_TRXTOTNUMS 0x013e | ||
403 | |||
404 | /* TDM RX Status 2nd, default value: 0x00 */ | ||
405 | #define REG_TRXSTA2 0x015c | ||
406 | |||
407 | /* TDM RX INT Low, default value: 0x00 */ | ||
408 | #define REG_TRXINTL 0x0163 | ||
409 | |||
410 | /* TDM RX INT High, default value: 0x00 */ | ||
411 | #define REG_TRXINTH 0x0164 | ||
412 | |||
413 | /* TDM RX INTMASK High, default value: 0x00 */ | ||
414 | #define REG_TRXINTMH 0x0166 | ||
415 | |||
416 | /* HSIC TX CRTL, default value: 0x00 */ | ||
417 | #define REG_HTXCTRL 0x0169 | ||
418 | #define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4) | ||
419 | #define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3) | ||
420 | #define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2) | ||
421 | #define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1) | ||
422 | #define BIT_HTXCTRL_HTX_DRVRST1 BIT(0) | ||
423 | |||
424 | /* HSIC TX INT Low, default value: 0x00 */ | ||
425 | #define REG_HTXINTL 0x017d | ||
426 | |||
427 | /* HSIC TX INT High, default value: 0x00 */ | ||
428 | #define REG_HTXINTH 0x017e | ||
429 | |||
430 | /* HSIC Keeper, default value: 0x00 */ | ||
431 | #define REG_KEEPER 0x0181 | ||
432 | #define MSK_KEEPER_KEEPER_MODE_1_0 0x03 | ||
433 | |||
434 | /* HSIC Flow Control General, default value: 0x02 */ | ||
435 | #define REG_FCGC 0x0183 | ||
436 | #define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1) | ||
437 | #define BIT_FCGC_HSIC_FC_ENABLE BIT(0) | ||
438 | |||
439 | /* HSIC Flow Control CTR13, default value: 0xfc */ | ||
440 | #define REG_FCCTR13 0x0191 | ||
441 | |||
442 | /* HSIC Flow Control CTR14, default value: 0xff */ | ||
443 | #define REG_FCCTR14 0x0192 | ||
444 | |||
445 | /* HSIC Flow Control CTR15, default value: 0xff */ | ||
446 | #define REG_FCCTR15 0x0193 | ||
447 | |||
448 | /* HSIC Flow Control CTR50, default value: 0x03 */ | ||
449 | #define REG_FCCTR50 0x01b6 | ||
450 | |||
451 | /* HSIC Flow Control INTR0, default value: 0x00 */ | ||
452 | #define REG_FCINTR0 0x01ec | ||
453 | #define REG_FCINTR1 0x01ed | ||
454 | #define REG_FCINTR2 0x01ee | ||
455 | #define REG_FCINTR3 0x01ef | ||
456 | #define REG_FCINTR4 0x01f0 | ||
457 | #define REG_FCINTR5 0x01f1 | ||
458 | #define REG_FCINTR6 0x01f2 | ||
459 | #define REG_FCINTR7 0x01f3 | ||
460 | |||
461 | /* TDM Low Latency, default value: 0x20 */ | ||
462 | #define REG_TDMLLCTL 0x01fc | ||
463 | #define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0 | ||
464 | #define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30 | ||
465 | #define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c | ||
466 | #define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1) | ||
467 | #define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0) | ||
468 | |||
469 | /* TMDS 0 Clock Control, default value: 0x10 */ | ||
470 | #define REG_TMDS0_CCTRL1 0x0210 | ||
471 | #define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0 | ||
472 | #define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30 | ||
473 | |||
474 | /* TMDS Clock Enable, default value: 0x00 */ | ||
475 | #define REG_TMDS_CLK_EN 0x0211 | ||
476 | #define BIT_TMDS_CLK_EN_CLK_EN BIT(0) | ||
477 | |||
478 | /* TMDS Channel Enable, default value: 0x00 */ | ||
479 | #define REG_TMDS_CH_EN 0x0212 | ||
480 | #define BIT_TMDS_CH_EN_CH0_EN BIT(4) | ||
481 | #define BIT_TMDS_CH_EN_CH12_EN BIT(0) | ||
482 | |||
483 | /* BGR_BIAS, default value: 0x07 */ | ||
484 | #define REG_BGR_BIAS 0x0215 | ||
485 | #define BIT_BGR_BIAS_BGR_EN BIT(7) | ||
486 | #define MSK_BGR_BIAS_BIAS_BGR_D 0x0f | ||
487 | |||
488 | /* TMDS 0 Digital I2C BW, default value: 0x0a */ | ||
489 | #define REG_ALICE0_BW_I2C 0x0231 | ||
490 | |||
491 | /* TMDS 0 Digital Zone Control, default value: 0xe0 */ | ||
492 | #define REG_ALICE0_ZONE_CTRL 0x024c | ||
493 | #define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7) | ||
494 | #define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6) | ||
495 | #define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30 | ||
496 | #define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f | ||
497 | |||
498 | /* TMDS 0 Digital PLL Mode Control, default value: 0x00 */ | ||
499 | #define REG_ALICE0_MODE_CTRL 0x024d | ||
500 | #define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c | ||
501 | #define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03 | ||
502 | |||
503 | /* MHL Tx Control 6th, default value: 0xa0 */ | ||
504 | #define REG_MHLTX_CTL6 0x0285 | ||
505 | #define MSK_MHLTX_CTL6_EMI_SEL 0xe0 | ||
506 | #define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03 | ||
507 | |||
508 | /* Packet Filter0, default value: 0x00 */ | ||
509 | #define REG_PKT_FILTER_0 0x0290 | ||
510 | #define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7) | ||
511 | #define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6) | ||
512 | #define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5) | ||
513 | #define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4) | ||
514 | #define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3) | ||
515 | #define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2) | ||
516 | #define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1) | ||
517 | #define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0) | ||
518 | |||
519 | /* Packet Filter1, default value: 0x00 */ | ||
520 | #define REG_PKT_FILTER_1 0x0291 | ||
521 | #define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7) | ||
522 | #define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6) | ||
523 | #define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3) | ||
524 | #define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2) | ||
525 | #define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1) | ||
526 | #define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0) | ||
527 | |||
528 | /* TMDS Clock Status, default value: 0x10 */ | ||
529 | #define REG_TMDS_CSTAT_P3 0x02a0 | ||
530 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7) | ||
531 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6) | ||
532 | #define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5) | ||
533 | #define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3) | ||
534 | #define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2) | ||
535 | #define BIT_TMDS_CSTAT_P3_SCDT BIT(1) | ||
536 | #define BIT_TMDS_CSTAT_P3_CKDT BIT(0) | ||
537 | |||
538 | /* RX_HDMI Control, default value: 0x10 */ | ||
539 | #define REG_RX_HDMI_CTRL0 0x02a1 | ||
540 | #define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5) | ||
541 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4) | ||
542 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3) | ||
543 | #define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2) | ||
544 | #define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1) | ||
545 | #define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0) | ||
546 | |||
547 | /* RX_HDMI Control, default value: 0x38 */ | ||
548 | #define REG_RX_HDMI_CTRL2 0x02a3 | ||
549 | #define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0 | ||
550 | #define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4) | ||
551 | #define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3) | ||
552 | #define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0) | ||
553 | |||
554 | /* RX_HDMI Control, default value: 0x0f */ | ||
555 | #define REG_RX_HDMI_CTRL3 0x02a4 | ||
556 | #define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f | ||
557 | |||
558 | /* rx_hdmi Clear Buffer, default value: 0x00 */ | ||
559 | #define REG_RX_HDMI_CLR_BUFFER 0x02ac | ||
560 | #define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0 | ||
561 | #define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5) | ||
562 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4) | ||
563 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3) | ||
564 | #define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2) | ||
565 | #define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1) | ||
566 | #define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0) | ||
567 | |||
568 | /* RX_HDMI VSI Header1, default value: 0x00 */ | ||
569 | #define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8 | ||
570 | |||
571 | /* RX_HDMI VSI MHL Monitor, default value: 0x3c */ | ||
572 | #define REG_RX_HDMI_VSIF_MHL_MON 0x02d7 | ||
573 | |||
574 | #define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c | ||
575 | #define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03 | ||
576 | |||
577 | /* Interrupt Source 9, default value: 0x00 */ | ||
578 | #define REG_INTR9 0x02e0 | ||
579 | #define BIT_INTR9_EDID_ERROR BIT(6) | ||
580 | #define BIT_INTR9_EDID_DONE BIT(5) | ||
581 | #define BIT_INTR9_DEVCAP_DONE BIT(4) | ||
582 | |||
583 | /* Interrupt 9 Mask, default value: 0x00 */ | ||
584 | #define REG_INTR9_MASK 0x02e1 | ||
585 | |||
586 | /* TPI CBUS Start, default value: 0x00 */ | ||
587 | #define REG_TPI_CBUS_START 0x02e2 | ||
588 | #define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7) | ||
589 | #define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6) | ||
590 | #define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5) | ||
591 | #define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4) | ||
592 | #define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3) | ||
593 | #define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2) | ||
594 | #define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1) | ||
595 | #define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0) | ||
596 | |||
597 | /* EDID Control, default value: 0x10 */ | ||
598 | #define REG_EDID_CTRL 0x02e3 | ||
599 | #define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7) | ||
600 | #define BIT_EDID_CTRL_XDEVCAP_EN BIT(6) | ||
601 | #define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5) | ||
602 | #define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4) | ||
603 | #define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3) | ||
604 | #define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2) | ||
605 | #define BIT_EDID_CTRL_INVALID_BKSV BIT(1) | ||
606 | #define BIT_EDID_CTRL_EDID_MODE_EN BIT(0) | ||
607 | |||
608 | /* EDID FIFO Addr, default value: 0x00 */ | ||
609 | #define REG_EDID_FIFO_ADDR 0x02e9 | ||
610 | |||
611 | /* EDID FIFO Write Data, default value: 0x00 */ | ||
612 | #define REG_EDID_FIFO_WR_DATA 0x02ea | ||
613 | |||
614 | /* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */ | ||
615 | #define REG_EDID_FIFO_ADDR_MON 0x02eb | ||
616 | |||
617 | /* EDID FIFO Read Data, default value: 0x00 */ | ||
618 | #define REG_EDID_FIFO_RD_DATA 0x02ec | ||
619 | |||
620 | /* EDID DDC Segment Pointer, default value: 0x00 */ | ||
621 | #define REG_EDID_START_EXT 0x02ed | ||
622 | |||
623 | /* TX IP BIST CNTL and Status, default value: 0x00 */ | ||
624 | #define REG_TX_IP_BIST_CNTLSTA 0x02f2 | ||
625 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6) | ||
626 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5) | ||
627 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4) | ||
628 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3) | ||
629 | #define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2) | ||
630 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1) | ||
631 | #define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0) | ||
632 | |||
633 | /* TX IP BIST INST LOW, default value: 0x00 */ | ||
634 | #define REG_TX_IP_BIST_INST_LOW 0x02f3 | ||
635 | #define REG_TX_IP_BIST_INST_HIGH 0x02f4 | ||
636 | |||
637 | /* TX IP BIST PATTERN LOW, default value: 0x00 */ | ||
638 | #define REG_TX_IP_BIST_PAT_LOW 0x02f5 | ||
639 | #define REG_TX_IP_BIST_PAT_HIGH 0x02f6 | ||
640 | |||
641 | /* TX IP BIST CONFIGURE LOW, default value: 0x00 */ | ||
642 | #define REG_TX_IP_BIST_CONF_LOW 0x02f7 | ||
643 | #define REG_TX_IP_BIST_CONF_HIGH 0x02f8 | ||
644 | |||
645 | /* E-MSC General Control, default value: 0x80 */ | ||
646 | #define REG_GENCTL 0x0300 | ||
647 | #define BIT_GENCTL_SPEC_TRANS_DIS BIT(7) | ||
648 | #define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6) | ||
649 | #define BIT_GENCTL_SPI_MISO_EDGE BIT(5) | ||
650 | #define BIT_GENCTL_SPI_MOSI_EDGE BIT(4) | ||
651 | #define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3) | ||
652 | #define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2) | ||
653 | #define BIT_GENCTL_START_TRAIN_SEQ BIT(1) | ||
654 | #define BIT_GENCTL_EMSC_EN BIT(0) | ||
655 | |||
656 | /* E-MSC Comma ErrorCNT, default value: 0x03 */ | ||
657 | #define REG_COMMECNT 0x0305 | ||
658 | #define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7) | ||
659 | #define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f | ||
660 | |||
661 | /* E-MSC RFIFO ByteCnt, default value: 0x00 */ | ||
662 | #define REG_EMSCRFIFOBCNTL 0x031a | ||
663 | #define REG_EMSCRFIFOBCNTH 0x031b | ||
664 | |||
665 | /* SPI Burst Cnt Status, default value: 0x00 */ | ||
666 | #define REG_SPIBURSTCNT 0x031e | ||
667 | |||
668 | /* SPI Burst Status and SWRST, default value: 0x00 */ | ||
669 | #define REG_SPIBURSTSTAT 0x0322 | ||
670 | #define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7) | ||
671 | #define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6) | ||
672 | #define BIT_SPIBURSTSTAT_SPI_SRST BIT(5) | ||
673 | #define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0) | ||
674 | |||
675 | /* E-MSC 1st Interrupt, default value: 0x00 */ | ||
676 | #define REG_EMSCINTR 0x0323 | ||
677 | #define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7) | ||
678 | #define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6) | ||
679 | #define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5) | ||
680 | #define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4) | ||
681 | #define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3) | ||
682 | #define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2) | ||
683 | #define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1) | ||
684 | #define BIT_EMSCINTR_SPI_DVLD BIT(0) | ||
685 | |||
686 | /* E-MSC Interrupt Mask, default value: 0x00 */ | ||
687 | #define REG_EMSCINTRMASK 0x0324 | ||
688 | |||
689 | /* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */ | ||
690 | #define REG_EMSC_XMIT_WRITE_PORT 0x032a | ||
691 | |||
692 | /* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */ | ||
693 | #define REG_EMSC_RCV_READ_PORT 0x032b | ||
694 | |||
695 | /* E-MSC 2nd Interrupt, default value: 0x00 */ | ||
696 | #define REG_EMSCINTR1 0x032c | ||
697 | #define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0) | ||
698 | |||
699 | /* E-MSC Interrupt Mask, default value: 0x00 */ | ||
700 | #define REG_EMSCINTRMASK1 0x032d | ||
701 | #define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0) | ||
702 | |||
703 | /* MHL Top Ctl, default value: 0x00 */ | ||
704 | #define REG_MHL_TOP_CTL 0x0330 | ||
705 | #define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7) | ||
706 | #define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6) | ||
707 | #define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03 | ||
708 | |||
709 | /* MHL DataPath 1st Ctl, default value: 0xbc */ | ||
710 | #define REG_MHL_DP_CTL0 0x0331 | ||
711 | #define BIT_MHL_DP_CTL0_DP_OE BIT(7) | ||
712 | #define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6) | ||
713 | #define MSK_MHL_DP_CTL0_TX_OE 0x3f | ||
714 | |||
715 | /* MHL DataPath 2nd Ctl, default value: 0xbb */ | ||
716 | #define REG_MHL_DP_CTL1 0x0332 | ||
717 | #define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0 | ||
718 | #define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f | ||
719 | |||
720 | /* MHL DataPath 3rd Ctl, default value: 0x2f */ | ||
721 | #define REG_MHL_DP_CTL2 0x0333 | ||
722 | #define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7) | ||
723 | #define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30 | ||
724 | #define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c | ||
725 | #define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03 | ||
726 | |||
727 | /* MHL DataPath 4th Ctl, default value: 0x48 */ | ||
728 | #define REG_MHL_DP_CTL3 0x0334 | ||
729 | #define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0 | ||
730 | #define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f | ||
731 | |||
732 | /* MHL DataPath 5th Ctl, default value: 0x48 */ | ||
733 | #define REG_MHL_DP_CTL4 0x0335 | ||
734 | #define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0 | ||
735 | #define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f | ||
736 | |||
737 | /* MHL DataPath 6th Ctl, default value: 0x3f */ | ||
738 | #define REG_MHL_DP_CTL5 0x0336 | ||
739 | #define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7) | ||
740 | #define BIT_MHL_DP_CTL5_RSEN_EN BIT(6) | ||
741 | #define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30 | ||
742 | #define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c | ||
743 | #define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03 | ||
744 | |||
745 | /* MHL PLL 1st Ctl, default value: 0x05 */ | ||
746 | #define REG_MHL_PLL_CTL0 0x0337 | ||
747 | #define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7) | ||
748 | |||
749 | #define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70 | ||
750 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70 | ||
751 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60 | ||
752 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50 | ||
753 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40 | ||
754 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30 | ||
755 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20 | ||
756 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10 | ||
757 | #define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00 | ||
758 | |||
759 | #define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c | ||
760 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c | ||
761 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08 | ||
762 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04 | ||
763 | #define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00 | ||
764 | |||
765 | #define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1) | ||
766 | #define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0) | ||
767 | |||
768 | /* MHL PLL 3rd Ctl, default value: 0x80 */ | ||
769 | #define REG_MHL_PLL_CTL2 0x0339 | ||
770 | #define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7) | ||
771 | #define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3) | ||
772 | #define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2) | ||
773 | #define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03 | ||
774 | |||
775 | /* MHL CBUS 1st Ctl, default value: 0x12 */ | ||
776 | #define REG_MHL_CBUS_CTL0 0x0340 | ||
777 | #define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7) | ||
778 | |||
779 | #define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30 | ||
780 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00 | ||
781 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10 | ||
782 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20 | ||
783 | #define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30 | ||
784 | |||
785 | #define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c | ||
786 | |||
787 | #define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03 | ||
788 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00 | ||
789 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01 | ||
790 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02 | ||
791 | #define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03 | ||
792 | |||
793 | /* MHL CBUS 2nd Ctl, default value: 0x03 */ | ||
794 | #define REG_MHL_CBUS_CTL1 0x0341 | ||
795 | #define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07 | ||
796 | #define VAL_MHL_CBUS_CTL1_0888_OHM 0x00 | ||
797 | #define VAL_MHL_CBUS_CTL1_1115_OHM 0x04 | ||
798 | #define VAL_MHL_CBUS_CTL1_1378_OHM 0x07 | ||
799 | |||
800 | /* MHL CoC 1st Ctl, default value: 0xc3 */ | ||
801 | #define REG_MHL_COC_CTL0 0x0342 | ||
802 | #define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7) | ||
803 | #define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70 | ||
804 | #define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07 | ||
805 | |||
806 | /* MHL CoC 2nd Ctl, default value: 0x87 */ | ||
807 | #define REG_MHL_COC_CTL1 0x0343 | ||
808 | #define BIT_MHL_COC_CTL1_COC_EN BIT(7) | ||
809 | #define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f | ||
810 | |||
811 | /* MHL CoC 4th Ctl, default value: 0x00 */ | ||
812 | #define REG_MHL_COC_CTL3 0x0345 | ||
813 | #define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0) | ||
814 | |||
815 | /* MHL CoC 5th Ctl, default value: 0x28 */ | ||
816 | #define REG_MHL_COC_CTL4 0x0346 | ||
817 | #define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0 | ||
818 | #define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f | ||
819 | |||
820 | /* MHL CoC 6th Ctl, default value: 0x0d */ | ||
821 | #define REG_MHL_COC_CTL5 0x0347 | ||
822 | |||
823 | /* MHL DoC 1st Ctl, default value: 0x18 */ | ||
824 | #define REG_MHL_DOC_CTL0 0x0349 | ||
825 | #define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7) | ||
826 | #define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38 | ||
827 | #define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06 | ||
828 | #define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0) | ||
829 | |||
830 | /* MHL DataPath 7th Ctl, default value: 0x2a */ | ||
831 | #define REG_MHL_DP_CTL6 0x0350 | ||
832 | #define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5) | ||
833 | #define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4) | ||
834 | #define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3) | ||
835 | #define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2) | ||
836 | #define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1) | ||
837 | #define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0) | ||
838 | |||
839 | /* MHL DataPath 8th Ctl, default value: 0x06 */ | ||
840 | #define REG_MHL_DP_CTL7 0x0351 | ||
841 | #define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0 | ||
842 | #define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f | ||
843 | |||
844 | /* Tx Zone Ctl1, default value: 0x00 */ | ||
845 | #define REG_TX_ZONE_CTL1 0x0361 | ||
846 | #define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08 | ||
847 | |||
848 | /* MHL3 Tx Zone Ctl, default value: 0x00 */ | ||
849 | #define REG_MHL3_TX_ZONE_CTL 0x0364 | ||
850 | #define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7) | ||
851 | #define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03 | ||
852 | |||
853 | #define MSK_TX_ZONE_CTL3_TX_ZONE 0x03 | ||
854 | #define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00 | ||
855 | #define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01 | ||
856 | #define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02 | ||
857 | |||
858 | /* HDCP Polling Control and Status, default value: 0x70 */ | ||
859 | #define REG_HDCP2X_POLL_CS 0x0391 | ||
860 | |||
861 | #define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6) | ||
862 | #define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5) | ||
863 | #define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4) | ||
864 | #define MSK_HDCP2X_POLL_CS_ 0x0c | ||
865 | #define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1) | ||
866 | #define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0) | ||
867 | |||
868 | /* HDCP Interrupt 0, default value: 0x00 */ | ||
869 | #define REG_HDCP2X_INTR0 0x0398 | ||
870 | |||
871 | /* HDCP Interrupt 0 Mask, default value: 0x00 */ | ||
872 | #define REG_HDCP2X_INTR0_MASK 0x0399 | ||
873 | |||
874 | /* HDCP General Control 0, default value: 0x02 */ | ||
875 | #define REG_HDCP2X_CTRL_0 0x03a0 | ||
876 | #define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7) | ||
877 | #define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6) | ||
878 | #define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5) | ||
879 | #define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4) | ||
880 | #define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3) | ||
881 | #define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2) | ||
882 | #define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1) | ||
883 | #define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0) | ||
884 | |||
885 | /* HDCP General Control 1, default value: 0x08 */ | ||
886 | #define REG_HDCP2X_CTRL_1 0x03a1 | ||
887 | #define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0 | ||
888 | #define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3) | ||
889 | #define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2) | ||
890 | #define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1) | ||
891 | #define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0) | ||
892 | |||
893 | /* HDCP Misc Control, default value: 0x00 */ | ||
894 | #define REG_HDCP2X_MISC_CTRL 0x03a5 | ||
895 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4) | ||
896 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3) | ||
897 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2) | ||
898 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1) | ||
899 | #define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0) | ||
900 | |||
901 | /* HDCP RPT SMNG K, default value: 0x00 */ | ||
902 | #define REG_HDCP2X_RPT_SMNG_K 0x03a6 | ||
903 | |||
904 | /* HDCP RPT SMNG In, default value: 0x00 */ | ||
905 | #define REG_HDCP2X_RPT_SMNG_IN 0x03a7 | ||
906 | |||
907 | /* HDCP Auth Status, default value: 0x00 */ | ||
908 | #define REG_HDCP2X_AUTH_STAT 0x03aa | ||
909 | |||
910 | /* HDCP RPT RCVID Out, default value: 0x00 */ | ||
911 | #define REG_HDCP2X_RPT_RCVID_OUT 0x03ac | ||
912 | |||
913 | /* HDCP TP1, default value: 0x62 */ | ||
914 | #define REG_HDCP2X_TP1 0x03b4 | ||
915 | |||
916 | /* HDCP GP Out 0, default value: 0x00 */ | ||
917 | #define REG_HDCP2X_GP_OUT0 0x03c7 | ||
918 | |||
919 | /* HDCP Repeater RCVR ID 0, default value: 0x00 */ | ||
920 | #define REG_HDCP2X_RPT_RCVR_ID0 0x03d1 | ||
921 | |||
922 | /* HDCP DDCM Status, default value: 0x00 */ | ||
923 | #define REG_HDCP2X_DDCM_STS 0x03d8 | ||
924 | #define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0 | ||
925 | #define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f | ||
926 | |||
927 | /* HDMI2MHL3 Control, default value: 0x0a */ | ||
928 | #define REG_M3_CTRL 0x03e0 | ||
929 | #define BIT_M3_CTRL_H2M_SWRST BIT(4) | ||
930 | #define BIT_M3_CTRL_SW_MHL3_SEL BIT(3) | ||
931 | #define BIT_M3_CTRL_M3AV_EN BIT(2) | ||
932 | #define BIT_M3_CTRL_ENC_TMDS BIT(1) | ||
933 | #define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0) | ||
934 | |||
935 | #define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ | ||
936 | | BIT_M3_CTRL_ENC_TMDS) | ||
937 | #define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ | ||
938 | | BIT_M3_CTRL_M3AV_EN \ | ||
939 | | BIT_M3_CTRL_ENC_TMDS \ | ||
940 | | BIT_M3_CTRL_MHL3_MASTER_EN) | ||
941 | |||
942 | /* HDMI2MHL3 Port0 Control, default value: 0x04 */ | ||
943 | #define REG_M3_P0CTRL 0x03e1 | ||
944 | #define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4) | ||
945 | #define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3) | ||
946 | #define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2) | ||
947 | #define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1) | ||
948 | #define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0) | ||
949 | |||
950 | #define REG_M3_POSTM 0x03e2 | ||
951 | #define MSK_M3_POSTM_RRP_DECODE 0xf8 | ||
952 | #define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07 | ||
953 | |||
954 | /* HDMI2MHL3 Scramble Control, default value: 0x41 */ | ||
955 | #define REG_M3_SCTRL 0x03e6 | ||
956 | #define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0 | ||
957 | #define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0) | ||
958 | |||
959 | /* HSIC Div Ctl, default value: 0x05 */ | ||
960 | #define REG_DIV_CTL_MAIN 0x03f2 | ||
961 | #define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c | ||
962 | #define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03 | ||
963 | |||
964 | /* MHL Capability 1st Byte, default value: 0x00 */ | ||
965 | #define REG_MHL_DEVCAP_0 0x0400 | ||
966 | |||
967 | /* MHL Interrupt 1st Byte, default value: 0x00 */ | ||
968 | #define REG_MHL_INT_0 0x0420 | ||
969 | |||
970 | /* Device Status 1st byte, default value: 0x00 */ | ||
971 | #define REG_MHL_STAT_0 0x0430 | ||
972 | |||
973 | /* CBUS Scratch Pad 1st Byte, default value: 0x00 */ | ||
974 | #define REG_MHL_SCRPAD_0 0x0440 | ||
975 | |||
976 | /* MHL Extended Capability 1st Byte, default value: 0x00 */ | ||
977 | #define REG_MHL_EXTDEVCAP_0 0x0480 | ||
978 | |||
979 | /* Device Extended Status 1st byte, default value: 0x00 */ | ||
980 | #define REG_MHL_EXTSTAT_0 0x0490 | ||
981 | |||
982 | /* TPI DTD Byte2, default value: 0x00 */ | ||
983 | #define REG_TPI_DTD_B2 0x0602 | ||
984 | |||
985 | #define VAL_TPI_QUAN_RANGE_LIMITED 0x01 | ||
986 | #define VAL_TPI_QUAN_RANGE_FULL 0x02 | ||
987 | #define VAL_TPI_FORMAT_RGB 0x00 | ||
988 | #define VAL_TPI_FORMAT_YCBCR444 0x01 | ||
989 | #define VAL_TPI_FORMAT_YCBCR422 0x02 | ||
990 | #define VAL_TPI_FORMAT_INTERNAL_RGB 0x03 | ||
991 | #define VAL_TPI_FORMAT(_fmt, _qr) \ | ||
992 | (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2)) | ||
993 | |||
994 | /* Input Format, default value: 0x00 */ | ||
995 | #define REG_TPI_INPUT 0x0609 | ||
996 | #define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7) | ||
997 | #define BIT_TPI_INPUT_ENDITHER BIT(6) | ||
998 | #define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c | ||
999 | #define MSK_TPI_INPUT_INPUT_FORMAT 0x03 | ||
1000 | |||
1001 | /* Output Format, default value: 0x00 */ | ||
1002 | #define REG_TPI_OUTPUT 0x060a | ||
1003 | #define BIT_TPI_OUTPUT_CSCMODE709 BIT(4) | ||
1004 | #define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c | ||
1005 | #define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03 | ||
1006 | |||
1007 | /* TPI AVI Check Sum, default value: 0x00 */ | ||
1008 | #define REG_TPI_AVI_CHSUM 0x060c | ||
1009 | |||
1010 | /* TPI System Control, default value: 0x00 */ | ||
1011 | #define REG_TPI_SC 0x061a | ||
1012 | #define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7) | ||
1013 | #define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6) | ||
1014 | #define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5) | ||
1015 | #define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4) | ||
1016 | #define BIT_TPI_SC_TPI_AV_MUTE BIT(3) | ||
1017 | #define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2) | ||
1018 | #define BIT_TPI_SC_DDC_TPI_SW BIT(1) | ||
1019 | #define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0) | ||
1020 | |||
1021 | /* TPI COPP Query Data, default value: 0x00 */ | ||
1022 | #define REG_TPI_COPP_DATA1 0x0629 | ||
1023 | #define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7) | ||
1024 | #define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6) | ||
1025 | #define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30 | ||
1026 | #define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00 | ||
1027 | #define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10 | ||
1028 | #define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20 | ||
1029 | #define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30 | ||
1030 | #define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3) | ||
1031 | #define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2) | ||
1032 | #define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1) | ||
1033 | #define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0) | ||
1034 | |||
1035 | /* TPI COPP Control Data, default value: 0x00 */ | ||
1036 | #define REG_TPI_COPP_DATA2 0x062a | ||
1037 | #define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5) | ||
1038 | #define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4) | ||
1039 | #define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3) | ||
1040 | #define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2) | ||
1041 | #define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1) | ||
1042 | #define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0) | ||
1043 | |||
1044 | /* TPI Interrupt Enable, default value: 0x00 */ | ||
1045 | #define REG_TPI_INTR_EN 0x063c | ||
1046 | |||
1047 | /* TPI Interrupt Status Low Byte, default value: 0x00 */ | ||
1048 | #define REG_TPI_INTR_ST0 0x063d | ||
1049 | #define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7) | ||
1050 | #define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6) | ||
1051 | #define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5) | ||
1052 | #define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3) | ||
1053 | #define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2) | ||
1054 | #define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1) | ||
1055 | #define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0) | ||
1056 | |||
1057 | /* TPI DS BCAPS Status, default value: 0x00 */ | ||
1058 | #define REG_TPI_DS_BCAPS 0x0644 | ||
1059 | |||
1060 | /* TPI BStatus1, default value: 0x00 */ | ||
1061 | #define REG_TPI_BSTATUS1 0x0645 | ||
1062 | #define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7) | ||
1063 | #define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f | ||
1064 | |||
1065 | /* TPI BStatus2, default value: 0x10 */ | ||
1066 | #define REG_TPI_BSTATUS2 0x0646 | ||
1067 | #define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0 | ||
1068 | #define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4) | ||
1069 | #define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3) | ||
1070 | #define MSK_TPI_BSTATUS2_DS_DEPTH 0x07 | ||
1071 | |||
1072 | /* TPI HW Optimization Control #3, default value: 0x00 */ | ||
1073 | #define REG_TPI_HW_OPT3 0x06bb | ||
1074 | #define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7) | ||
1075 | #define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3) | ||
1076 | #define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2) | ||
1077 | #define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03 | ||
1078 | |||
1079 | /* TPI Info Frame Select, default value: 0x00 */ | ||
1080 | #define REG_TPI_INFO_FSEL 0x06bf | ||
1081 | #define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7) | ||
1082 | #define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6) | ||
1083 | #define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5) | ||
1084 | #define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07 | ||
1085 | |||
1086 | /* TPI Info Byte #0, default value: 0x00 */ | ||
1087 | #define REG_TPI_INFO_B0 0x06c0 | ||
1088 | |||
1089 | /* CoC Status, default value: 0x00 */ | ||
1090 | #define REG_COC_STAT_0 0x0700 | ||
1091 | #define REG_COC_STAT_1 0x0701 | ||
1092 | #define REG_COC_STAT_2 0x0702 | ||
1093 | #define REG_COC_STAT_3 0x0703 | ||
1094 | #define REG_COC_STAT_4 0x0704 | ||
1095 | #define REG_COC_STAT_5 0x0705 | ||
1096 | |||
1097 | /* CoC 1st Ctl, default value: 0x40 */ | ||
1098 | #define REG_COC_CTL0 0x0710 | ||
1099 | |||
1100 | /* CoC 2nd Ctl, default value: 0x0a */ | ||
1101 | #define REG_COC_CTL1 0x0711 | ||
1102 | #define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0 | ||
1103 | #define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f | ||
1104 | |||
1105 | /* CoC 3rd Ctl, default value: 0x14 */ | ||
1106 | #define REG_COC_CTL2 0x0712 | ||
1107 | #define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0 | ||
1108 | #define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f | ||
1109 | |||
1110 | /* CoC 4th Ctl, default value: 0x40 */ | ||
1111 | #define REG_COC_CTL3 0x0713 | ||
1112 | #define BIT_COC_CTL3_COC_CTRL3_7 BIT(7) | ||
1113 | #define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f | ||
1114 | |||
1115 | /* CoC 7th Ctl, default value: 0x00 */ | ||
1116 | #define REG_COC_CTL6 0x0716 | ||
1117 | #define BIT_COC_CTL6_COC_CTRL6_7 BIT(7) | ||
1118 | #define BIT_COC_CTL6_COC_CTRL6_6 BIT(6) | ||
1119 | #define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f | ||
1120 | |||
1121 | /* CoC 8th Ctl, default value: 0x06 */ | ||
1122 | #define REG_COC_CTL7 0x0717 | ||
1123 | #define BIT_COC_CTL7_COC_CTRL7_7 BIT(7) | ||
1124 | #define BIT_COC_CTL7_COC_CTRL7_6 BIT(6) | ||
1125 | #define BIT_COC_CTL7_COC_CTRL7_5 BIT(5) | ||
1126 | #define MSK_COC_CTL7_COC_CTRL7_4_3 0x18 | ||
1127 | #define MSK_COC_CTL7_COC_CTRL7_2_0 0x07 | ||
1128 | |||
1129 | /* CoC 10th Ctl, default value: 0x00 */ | ||
1130 | #define REG_COC_CTL9 0x0719 | ||
1131 | |||
1132 | /* CoC 11th Ctl, default value: 0x00 */ | ||
1133 | #define REG_COC_CTLA 0x071a | ||
1134 | |||
1135 | /* CoC 12th Ctl, default value: 0x00 */ | ||
1136 | #define REG_COC_CTLB 0x071b | ||
1137 | |||
1138 | /* CoC 13th Ctl, default value: 0x0f */ | ||
1139 | #define REG_COC_CTLC 0x071c | ||
1140 | |||
1141 | /* CoC 14th Ctl, default value: 0x0a */ | ||
1142 | #define REG_COC_CTLD 0x071d | ||
1143 | #define BIT_COC_CTLD_COC_CTRLD_7 BIT(7) | ||
1144 | #define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f | ||
1145 | |||
1146 | /* CoC 15th Ctl, default value: 0x0a */ | ||
1147 | #define REG_COC_CTLE 0x071e | ||
1148 | #define BIT_COC_CTLE_COC_CTRLE_7 BIT(7) | ||
1149 | #define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f | ||
1150 | |||
1151 | /* CoC 16th Ctl, default value: 0x00 */ | ||
1152 | #define REG_COC_CTLF 0x071f | ||
1153 | #define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8 | ||
1154 | #define MSK_COC_CTLF_COC_CTRLF_2_0 0x07 | ||
1155 | |||
1156 | /* CoC 18th Ctl, default value: 0x32 */ | ||
1157 | #define REG_COC_CTL11 0x0721 | ||
1158 | #define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0 | ||
1159 | #define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f | ||
1160 | |||
1161 | /* CoC 21st Ctl, default value: 0x00 */ | ||
1162 | #define REG_COC_CTL14 0x0724 | ||
1163 | #define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0 | ||
1164 | #define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f | ||
1165 | |||
1166 | /* CoC 22nd Ctl, default value: 0x00 */ | ||
1167 | #define REG_COC_CTL15 0x0725 | ||
1168 | #define BIT_COC_CTL15_COC_CTRL15_7 BIT(7) | ||
1169 | #define MSK_COC_CTL15_COC_CTRL15_6_4 0x70 | ||
1170 | #define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f | ||
1171 | |||
1172 | /* CoC Interrupt, default value: 0x00 */ | ||
1173 | #define REG_COC_INTR 0x0726 | ||
1174 | |||
1175 | /* CoC Interrupt Mask, default value: 0x00 */ | ||
1176 | #define REG_COC_INTR_MASK 0x0727 | ||
1177 | #define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0) | ||
1178 | #define BIT_COC_CALIBRATION_DONE BIT(1) | ||
1179 | |||
1180 | /* CoC Misc Ctl, default value: 0x00 */ | ||
1181 | #define REG_COC_MISC_CTL0 0x0728 | ||
1182 | #define BIT_COC_MISC_CTL0_FSM_MON BIT(7) | ||
1183 | |||
1184 | /* CoC 24th Ctl, default value: 0x00 */ | ||
1185 | #define REG_COC_CTL17 0x072a | ||
1186 | #define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0 | ||
1187 | #define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f | ||
1188 | |||
1189 | /* CoC 25th Ctl, default value: 0x00 */ | ||
1190 | #define REG_COC_CTL18 0x072b | ||
1191 | #define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0 | ||
1192 | #define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f | ||
1193 | |||
1194 | /* CoC 26th Ctl, default value: 0x00 */ | ||
1195 | #define REG_COC_CTL19 0x072c | ||
1196 | #define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0 | ||
1197 | #define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f | ||
1198 | |||
1199 | /* CoC 27th Ctl, default value: 0x00 */ | ||
1200 | #define REG_COC_CTL1A 0x072d | ||
1201 | #define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc | ||
1202 | #define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03 | ||
1203 | |||
1204 | /* DoC 9th Status, default value: 0x00 */ | ||
1205 | #define REG_DOC_STAT_8 0x0740 | ||
1206 | |||
1207 | /* DoC 10th Status, default value: 0x00 */ | ||
1208 | #define REG_DOC_STAT_9 0x0741 | ||
1209 | |||
1210 | /* DoC 5th CFG, default value: 0x00 */ | ||
1211 | #define REG_DOC_CFG4 0x074e | ||
1212 | #define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f | ||
1213 | |||
1214 | /* DoC 1st Ctl, default value: 0x40 */ | ||
1215 | #define REG_DOC_CTL0 0x0751 | ||
1216 | |||
1217 | /* DoC 7th Ctl, default value: 0x00 */ | ||
1218 | #define REG_DOC_CTL6 0x0757 | ||
1219 | #define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7) | ||
1220 | #define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6) | ||
1221 | #define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30 | ||
1222 | #define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f | ||
1223 | |||
1224 | /* DoC 8th Ctl, default value: 0x00 */ | ||
1225 | #define REG_DOC_CTL7 0x0758 | ||
1226 | #define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7) | ||
1227 | #define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6) | ||
1228 | #define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5) | ||
1229 | #define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18 | ||
1230 | #define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07 | ||
1231 | |||
1232 | /* DoC 9th Ctl, default value: 0x00 */ | ||
1233 | #define REG_DOC_CTL8 0x076c | ||
1234 | #define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7) | ||
1235 | #define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70 | ||
1236 | #define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c | ||
1237 | #define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03 | ||
1238 | |||
1239 | /* DoC 10th Ctl, default value: 0x00 */ | ||
1240 | #define REG_DOC_CTL9 0x076d | ||
1241 | |||
1242 | /* DoC 11th Ctl, default value: 0x00 */ | ||
1243 | #define REG_DOC_CTLA 0x076e | ||
1244 | |||
1245 | /* DoC 15th Ctl, default value: 0x00 */ | ||
1246 | #define REG_DOC_CTLE 0x0772 | ||
1247 | #define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7) | ||
1248 | #define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6) | ||
1249 | #define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30 | ||
1250 | #define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f | ||
1251 | |||
1252 | /* Interrupt Mask 1st, default value: 0x00 */ | ||
1253 | #define REG_MHL_INT_0_MASK 0x0580 | ||
1254 | |||
1255 | /* Interrupt Mask 2nd, default value: 0x00 */ | ||
1256 | #define REG_MHL_INT_1_MASK 0x0581 | ||
1257 | |||
1258 | /* Interrupt Mask 3rd, default value: 0x00 */ | ||
1259 | #define REG_MHL_INT_2_MASK 0x0582 | ||
1260 | |||
1261 | /* Interrupt Mask 4th, default value: 0x00 */ | ||
1262 | #define REG_MHL_INT_3_MASK 0x0583 | ||
1263 | |||
1264 | /* MDT Receive Time Out, default value: 0x00 */ | ||
1265 | #define REG_MDT_RCV_TIMEOUT 0x0584 | ||
1266 | |||
1267 | /* MDT Transmit Time Out, default value: 0x00 */ | ||
1268 | #define REG_MDT_XMIT_TIMEOUT 0x0585 | ||
1269 | |||
1270 | /* MDT Receive Control, default value: 0x00 */ | ||
1271 | #define REG_MDT_RCV_CTRL 0x0586 | ||
1272 | #define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7) | ||
1273 | #define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6) | ||
1274 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4) | ||
1275 | #define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3) | ||
1276 | #define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2) | ||
1277 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1) | ||
1278 | #define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0) | ||
1279 | |||
1280 | /* MDT Receive Read Port, default value: 0x00 */ | ||
1281 | #define REG_MDT_RCV_READ_PORT 0x0587 | ||
1282 | |||
1283 | /* MDT Transmit Control, default value: 0x70 */ | ||
1284 | #define REG_MDT_XMIT_CTRL 0x0588 | ||
1285 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7) | ||
1286 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6) | ||
1287 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5) | ||
1288 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4) | ||
1289 | #define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3) | ||
1290 | #define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2) | ||
1291 | #define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1) | ||
1292 | #define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0) | ||
1293 | |||
1294 | /* MDT Receive WRITE Port, default value: 0x00 */ | ||
1295 | #define REG_MDT_XMIT_WRITE_PORT 0x0589 | ||
1296 | |||
1297 | /* MDT RFIFO Status, default value: 0x00 */ | ||
1298 | #define REG_MDT_RFIFO_STAT 0x058a | ||
1299 | #define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0 | ||
1300 | #define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f | ||
1301 | |||
1302 | /* MDT XFIFO Status, default value: 0x80 */ | ||
1303 | #define REG_MDT_XFIFO_STAT 0x058b | ||
1304 | #define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0 | ||
1305 | #define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4) | ||
1306 | #define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f | ||
1307 | |||
1308 | /* MDT Interrupt 0, default value: 0x0c */ | ||
1309 | #define REG_MDT_INT_0 0x058c | ||
1310 | #define BIT_MDT_RFIFO_DATA_RDY BIT(0) | ||
1311 | #define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2) | ||
1312 | #define BIT_MDT_XFIFO_EMPTY BIT(3) | ||
1313 | |||
1314 | /* MDT Interrupt 0 Mask, default value: 0x00 */ | ||
1315 | #define REG_MDT_INT_0_MASK 0x058d | ||
1316 | |||
1317 | /* MDT Interrupt 1, default value: 0x00 */ | ||
1318 | #define REG_MDT_INT_1 0x058e | ||
1319 | #define BIT_MDT_RCV_TIMEOUT BIT(0) | ||
1320 | #define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1) | ||
1321 | #define BIT_MDT_RCV_SM_ERROR BIT(2) | ||
1322 | #define BIT_MDT_XMIT_TIMEOUT BIT(5) | ||
1323 | #define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6) | ||
1324 | #define BIT_MDT_XMIT_SM_ERROR BIT(7) | ||
1325 | |||
1326 | /* MDT Interrupt 1 Mask, default value: 0x00 */ | ||
1327 | #define REG_MDT_INT_1_MASK 0x058f | ||
1328 | |||
1329 | /* CBUS Vendor ID, default value: 0x01 */ | ||
1330 | #define REG_CBUS_VENDOR_ID 0x0590 | ||
1331 | |||
1332 | /* CBUS Connection Status, default value: 0x00 */ | ||
1333 | #define REG_CBUS_STATUS 0x0591 | ||
1334 | #define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4) | ||
1335 | #define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3) | ||
1336 | #define BIT_CBUS_STATUS_CBUS_HPD BIT(2) | ||
1337 | #define BIT_CBUS_STATUS_MHL_MODE BIT(1) | ||
1338 | #define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0) | ||
1339 | |||
1340 | /* CBUS Interrupt 1st, default value: 0x00 */ | ||
1341 | #define REG_CBUS_INT_0 0x0592 | ||
1342 | #define BIT_CBUS_MSC_MT_DONE_NACK BIT(7) | ||
1343 | #define BIT_CBUS_MSC_MR_SET_INT BIT(6) | ||
1344 | #define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5) | ||
1345 | #define BIT_CBUS_MSC_MR_MSC_MSG BIT(4) | ||
1346 | #define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3) | ||
1347 | #define BIT_CBUS_HPD_CHG BIT(2) | ||
1348 | #define BIT_CBUS_MSC_MT_DONE BIT(1) | ||
1349 | #define BIT_CBUS_CNX_CHG BIT(0) | ||
1350 | |||
1351 | /* CBUS Interrupt Mask 1st, default value: 0x00 */ | ||
1352 | #define REG_CBUS_INT_0_MASK 0x0593 | ||
1353 | |||
1354 | /* CBUS Interrupt 2nd, default value: 0x00 */ | ||
1355 | #define REG_CBUS_INT_1 0x0594 | ||
1356 | #define BIT_CBUS_CMD_ABORT BIT(6) | ||
1357 | #define BIT_CBUS_MSC_ABORT_RCVD BIT(3) | ||
1358 | #define BIT_CBUS_DDC_ABORT BIT(2) | ||
1359 | #define BIT_CBUS_CEC_ABORT BIT(1) | ||
1360 | |||
1361 | /* CBUS Interrupt Mask 2nd, default value: 0x00 */ | ||
1362 | #define REG_CBUS_INT_1_MASK 0x0595 | ||
1363 | |||
1364 | /* CBUS DDC Abort Interrupt, default value: 0x00 */ | ||
1365 | #define REG_DDC_ABORT_INT 0x0598 | ||
1366 | |||
1367 | /* CBUS DDC Abort Interrupt Mask, default value: 0x00 */ | ||
1368 | #define REG_DDC_ABORT_INT_MASK 0x0599 | ||
1369 | |||
1370 | /* CBUS MSC Requester Abort Interrupt, default value: 0x00 */ | ||
1371 | #define REG_MSC_MT_ABORT_INT 0x059a | ||
1372 | |||
1373 | /* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */ | ||
1374 | #define REG_MSC_MT_ABORT_INT_MASK 0x059b | ||
1375 | |||
1376 | /* CBUS MSC Responder Abort Interrupt, default value: 0x00 */ | ||
1377 | #define REG_MSC_MR_ABORT_INT 0x059c | ||
1378 | |||
1379 | /* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */ | ||
1380 | #define REG_MSC_MR_ABORT_INT_MASK 0x059d | ||
1381 | |||
1382 | /* CBUS RX DISCOVERY interrupt, default value: 0x00 */ | ||
1383 | #define REG_CBUS_RX_DISC_INT0 0x059e | ||
1384 | |||
1385 | /* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */ | ||
1386 | #define REG_CBUS_RX_DISC_INT0_MASK 0x059f | ||
1387 | |||
1388 | /* CBUS_Link_Layer Control #8, default value: 0x00 */ | ||
1389 | #define REG_CBUS_LINK_CTRL_8 0x05a7 | ||
1390 | |||
1391 | /* MDT State Machine Status, default value: 0x00 */ | ||
1392 | #define REG_MDT_SM_STAT 0x05b5 | ||
1393 | #define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0 | ||
1394 | #define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f | ||
1395 | |||
1396 | /* CBUS MSC command trigger, default value: 0x00 */ | ||
1397 | #define REG_MSC_COMMAND_START 0x05b8 | ||
1398 | #define BIT_MSC_COMMAND_START_DEBUG BIT(5) | ||
1399 | #define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4) | ||
1400 | #define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3) | ||
1401 | #define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2) | ||
1402 | #define BIT_MSC_COMMAND_START_MSC_MSG BIT(1) | ||
1403 | #define BIT_MSC_COMMAND_START_PEER BIT(0) | ||
1404 | |||
1405 | /* CBUS MSC Command/Offset, default value: 0x00 */ | ||
1406 | #define REG_MSC_CMD_OR_OFFSET 0x05b9 | ||
1407 | |||
1408 | /* CBUS MSC Transmit Data */ | ||
1409 | #define REG_MSC_1ST_TRANSMIT_DATA 0x05ba | ||
1410 | #define REG_MSC_2ND_TRANSMIT_DATA 0x05bb | ||
1411 | |||
1412 | /* CBUS MSC Requester Received Data */ | ||
1413 | #define REG_MSC_MT_RCVD_DATA0 0x05bc | ||
1414 | #define REG_MSC_MT_RCVD_DATA1 0x05bd | ||
1415 | |||
1416 | /* CBUS MSC Responder MSC_MSG Received Data */ | ||
1417 | #define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf | ||
1418 | #define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0 | ||
1419 | |||
1420 | /* CBUS MSC Heartbeat Control, default value: 0x27 */ | ||
1421 | #define REG_MSC_HEARTBEAT_CTRL 0x05c4 | ||
1422 | #define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7) | ||
1423 | #define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70 | ||
1424 | #define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f | ||
1425 | |||
1426 | /* CBUS MSC Compatibility Control, default value: 0x02 */ | ||
1427 | #define REG_CBUS_MSC_COMPAT_CTRL 0x05c7 | ||
1428 | #define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7) | ||
1429 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6) | ||
1430 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5) | ||
1431 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3) | ||
1432 | #define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2) | ||
1433 | |||
1434 | /* CBUS3 Converter Control, default value: 0x24 */ | ||
1435 | #define REG_CBUS3_CNVT 0x05dc | ||
1436 | #define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0 | ||
1437 | #define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c | ||
1438 | #define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1) | ||
1439 | #define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0) | ||
1440 | |||
1441 | /* Discovery Control1, default value: 0x24 */ | ||
1442 | #define REG_DISC_CTRL1 0x05e0 | ||
1443 | #define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7) | ||
1444 | #define BIT_DISC_CTRL1_HB_ONLY BIT(6) | ||
1445 | #define MSK_DISC_CTRL1_DISC_ATT 0x30 | ||
1446 | #define MSK_DISC_CTRL1_DISC_CYC 0x0c | ||
1447 | #define BIT_DISC_CTRL1_DISC_EN BIT(0) | ||
1448 | |||
1449 | #define VAL_PUP_OFF 0 | ||
1450 | #define VAL_PUP_20K 1 | ||
1451 | #define VAL_PUP_5K 2 | ||
1452 | |||
1453 | /* Discovery Control4, default value: 0x80 */ | ||
1454 | #define REG_DISC_CTRL4 0x05e3 | ||
1455 | #define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0 | ||
1456 | #define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30 | ||
1457 | #define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4)) | ||
1458 | |||
1459 | /* Discovery Control5, default value: 0x03 */ | ||
1460 | #define REG_DISC_CTRL5 0x05e4 | ||
1461 | #define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3) | ||
1462 | #define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03 | ||
1463 | |||
1464 | /* Discovery Control8, default value: 0x81 */ | ||
1465 | #define REG_DISC_CTRL8 0x05e7 | ||
1466 | #define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7) | ||
1467 | #define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0) | ||
1468 | |||
1469 | /* Discovery Control9, default value: 0x54 */ | ||
1470 | #define REG_DISC_CTRL9 0x05e8 | ||
1471 | #define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7) | ||
1472 | #define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6) | ||
1473 | #define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4) | ||
1474 | #define BIT_DISC_CTRL9_NOMHL_EST BIT(3) | ||
1475 | #define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2) | ||
1476 | #define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1) | ||
1477 | #define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0) | ||
1478 | |||
1479 | /* Discovery Status1, default value: 0x00 */ | ||
1480 | #define REG_DISC_STAT1 0x05eb | ||
1481 | #define BIT_DISC_STAT1_PSM_OVRIDE BIT(5) | ||
1482 | #define MSK_DISC_STAT1_DISC_SM 0x0f | ||
1483 | |||
1484 | /* Discovery Status2, default value: 0x00 */ | ||
1485 | #define REG_DISC_STAT2 0x05ec | ||
1486 | #define BIT_DISC_STAT2_CBUS_OE_POL BIT(6) | ||
1487 | #define BIT_DISC_STAT2_CBUS_SATUS BIT(5) | ||
1488 | #define BIT_DISC_STAT2_RSEN BIT(4) | ||
1489 | |||
1490 | #define MSK_DISC_STAT2_MHL_VRSN 0x0c | ||
1491 | #define VAL_DISC_STAT2_DEFAULT 0x00 | ||
1492 | #define VAL_DISC_STAT2_MHL1_2 0x04 | ||
1493 | #define VAL_DISC_STAT2_MHL3 0x08 | ||
1494 | #define VAL_DISC_STAT2_RESERVED 0x0c | ||
1495 | |||
1496 | #define MSK_DISC_STAT2_RGND 0x03 | ||
1497 | #define VAL_RGND_OPEN 0x00 | ||
1498 | #define VAL_RGND_2K 0x01 | ||
1499 | #define VAL_RGND_1K 0x02 | ||
1500 | #define VAL_RGND_SHORT 0x03 | ||
1501 | |||
1502 | /* Interrupt CBUS_reg1 INTR0, default value: 0x00 */ | ||
1503 | #define REG_CBUS_DISC_INTR0 0x05ed | ||
1504 | #define BIT_RGND_READY_INT BIT(6) | ||
1505 | #define BIT_CBUS_MHL12_DISCON_INT BIT(5) | ||
1506 | #define BIT_CBUS_MHL3_DISCON_INT BIT(4) | ||
1507 | #define BIT_NOT_MHL_EST_INT BIT(3) | ||
1508 | #define BIT_MHL_EST_INT BIT(2) | ||
1509 | #define BIT_MHL3_EST_INT BIT(1) | ||
1510 | #define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \ | ||
1511 | | BIT_CBUS_MHL3_DISCON_INT \ | ||
1512 | | BIT_NOT_MHL_EST_INT) | ||
1513 | |||
1514 | /* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */ | ||
1515 | #define REG_CBUS_DISC_INTR0_MASK 0x05ee | ||
1516 | |||
1517 | #endif /* __SIL_SII8620_H__ */ | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index bb2438dd8733..de52b20800e1 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = { | |||
230 | .ttm_tt_populate = cirrus_ttm_tt_populate, | 230 | .ttm_tt_populate = cirrus_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, |
232 | .init_mem_type = cirrus_bo_init_mem_type, | 232 | .init_mem_type = cirrus_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = cirrus_bo_evict_flags, | 234 | .evict_flags = cirrus_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = cirrus_bo_verify_access, | 236 | .verify_access = cirrus_bo_verify_access, |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f81706387889..c32fb3c1d6f0 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, | |||
705 | state->src_w = val; | 705 | state->src_w = val; |
706 | } else if (property == config->prop_src_h) { | 706 | } else if (property == config->prop_src_h) { |
707 | state->src_h = val; | 707 | state->src_h = val; |
708 | } else if (property == config->rotation_property || | 708 | } else if (property == plane->rotation_property) { |
709 | property == plane->rotation_property) { | ||
710 | if (!is_power_of_2(val & DRM_ROTATE_MASK)) | 709 | if (!is_power_of_2(val & DRM_ROTATE_MASK)) |
711 | return -EINVAL; | 710 | return -EINVAL; |
712 | state->rotation = val; | 711 | state->rotation = val; |
@@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane, | |||
766 | *val = state->src_w; | 765 | *val = state->src_w; |
767 | } else if (property == config->prop_src_h) { | 766 | } else if (property == config->prop_src_h) { |
768 | *val = state->src_h; | 767 | *val = state->src_h; |
769 | } else if (property == config->rotation_property || | 768 | } else if (property == plane->rotation_property) { |
770 | property == plane->rotation_property) { | ||
771 | *val = state->rotation; | 769 | *val = state->rotation; |
772 | } else if (property == plane->zpos_property) { | 770 | } else if (property == plane->zpos_property) { |
773 | *val = state->zpos; | 771 | *val = state->zpos; |
@@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); | |||
1465 | 1463 | ||
1466 | static struct drm_pending_vblank_event *create_vblank_event( | 1464 | static struct drm_pending_vblank_event *create_vblank_event( |
1467 | struct drm_device *dev, struct drm_file *file_priv, | 1465 | struct drm_device *dev, struct drm_file *file_priv, |
1468 | struct fence *fence, uint64_t user_data) | 1466 | struct dma_fence *fence, uint64_t user_data) |
1469 | { | 1467 | { |
1470 | struct drm_pending_vblank_event *e = NULL; | 1468 | struct drm_pending_vblank_event *e = NULL; |
1471 | int ret; | 1469 | int ret; |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f9362760bfb2..75ad01d595fd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <drm/drm_plane_helper.h> | 30 | #include <drm/drm_plane_helper.h> |
31 | #include <drm/drm_crtc_helper.h> | 31 | #include <drm/drm_crtc_helper.h> |
32 | #include <drm/drm_atomic_helper.h> | 32 | #include <drm/drm_atomic_helper.h> |
33 | #include <linux/fence.h> | 33 | #include <linux/dma-fence.h> |
34 | 34 | ||
35 | #include "drm_crtc_internal.h" | 35 | #include "drm_crtc_internal.h" |
36 | 36 | ||
@@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); | |||
1017 | * drm_atomic_helper_swap_state() so it uses the current plane state (and | 1017 | * drm_atomic_helper_swap_state() so it uses the current plane state (and |
1018 | * just uses the atomic state to find the changed planes) | 1018 | * just uses the atomic state to find the changed planes) |
1019 | * | 1019 | * |
1020 | * Returns zero if success or < 0 if fence_wait() fails. | 1020 | * Returns zero if success or < 0 if dma_fence_wait() fails. |
1021 | */ | 1021 | */ |
1022 | int drm_atomic_helper_wait_for_fences(struct drm_device *dev, | 1022 | int drm_atomic_helper_wait_for_fences(struct drm_device *dev, |
1023 | struct drm_atomic_state *state, | 1023 | struct drm_atomic_state *state, |
@@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, | |||
1041 | * still interrupt the operation. Instead of blocking until the | 1041 | * still interrupt the operation. Instead of blocking until the |
1042 | * timer expires, make the wait interruptible. | 1042 | * timer expires, make the wait interruptible. |
1043 | */ | 1043 | */ |
1044 | ret = fence_wait(plane_state->fence, pre_swap); | 1044 | ret = dma_fence_wait(plane_state->fence, pre_swap); |
1045 | if (ret) | 1045 | if (ret) |
1046 | return ret; | 1046 | return ret; |
1047 | 1047 | ||
1048 | fence_put(plane_state->fence); | 1048 | dma_fence_put(plane_state->fence); |
1049 | plane_state->fence = NULL; | 1049 | plane_state->fence = NULL; |
1050 | } | 1050 | } |
1051 | 1051 | ||
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index e52aece30900..1f2412c7ccfd 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c | |||
@@ -89,7 +89,7 @@ | |||
89 | * On top of this basic transformation additional properties can be exposed by | 89 | * On top of this basic transformation additional properties can be exposed by |
90 | * the driver: | 90 | * the driver: |
91 | * | 91 | * |
92 | * - Rotation is set up with drm_mode_create_rotation_property(). It adds a | 92 | * - Rotation is set up with drm_plane_create_rotation_property(). It adds a |
93 | * rotation and reflection step between the source and destination rectangles. | 93 | * rotation and reflection step between the source and destination rectangles. |
94 | * Without this property the rectangle is only scaled, but not rotated or | 94 | * Without this property the rectangle is only scaled, but not rotated or |
95 | * reflected. | 95 | * reflected. |
@@ -105,18 +105,12 @@ | |||
105 | */ | 105 | */ |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * drm_mode_create_rotation_property - create a new rotation property | 108 | * drm_plane_create_rotation_property - create a new rotation property |
109 | * @dev: DRM device | 109 | * @plane: drm plane |
110 | * @rotation: initial value of the rotation property | ||
110 | * @supported_rotations: bitmask of supported rotations and reflections | 111 | * @supported_rotations: bitmask of supported rotations and reflections |
111 | * | 112 | * |
112 | * This creates a new property with the selected support for transformations. | 113 | * This creates a new property with the selected support for transformations. |
113 | * The resulting property should be stored in @rotation_property in | ||
114 | * &drm_mode_config. It then must be attached to each plane which supports | ||
115 | * rotations using drm_object_attach_property(). | ||
116 | * | ||
117 | * FIXME: Probably better if the rotation property is created on each plane, | ||
118 | * like the zpos property. Otherwise it's not possible to allow different | ||
119 | * rotation modes on different planes. | ||
120 | * | 114 | * |
121 | * Since a rotation by 180° degress is the same as reflecting both along the x | 115 | * Since a rotation by 180° degress is the same as reflecting both along the x |
122 | * and the y axis the rotation property is somewhat redundant. Drivers can use | 116 | * and the y axis the rotation property is somewhat redundant. Drivers can use |
@@ -144,24 +138,6 @@ | |||
144 | * rotation. After reflection, the rotation is applied to the image sampled from | 138 | * rotation. After reflection, the rotation is applied to the image sampled from |
145 | * the source rectangle, before scaling it to fit the destination rectangle. | 139 | * the source rectangle, before scaling it to fit the destination rectangle. |
146 | */ | 140 | */ |
147 | struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, | ||
148 | unsigned int supported_rotations) | ||
149 | { | ||
150 | static const struct drm_prop_enum_list props[] = { | ||
151 | { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, | ||
152 | { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, | ||
153 | { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, | ||
154 | { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, | ||
155 | { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, | ||
156 | { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, | ||
157 | }; | ||
158 | |||
159 | return drm_property_create_bitmask(dev, 0, "rotation", | ||
160 | props, ARRAY_SIZE(props), | ||
161 | supported_rotations); | ||
162 | } | ||
163 | EXPORT_SYMBOL(drm_mode_create_rotation_property); | ||
164 | |||
165 | int drm_plane_create_rotation_property(struct drm_plane *plane, | 141 | int drm_plane_create_rotation_property(struct drm_plane *plane, |
166 | unsigned int rotation, | 142 | unsigned int rotation, |
167 | unsigned int supported_rotations) | 143 | unsigned int supported_rotations) |
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 488355bdafb9..e02563966271 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c | |||
@@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) | |||
142 | sizeof(dp_dual_mode_hdmi_id)) == 0; | 142 | sizeof(dp_dual_mode_hdmi_id)) == 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | static bool is_type1_adaptor(uint8_t adaptor_id) | ||
146 | { | ||
147 | return adaptor_id == 0 || adaptor_id == 0xff; | ||
148 | } | ||
149 | |||
145 | static bool is_type2_adaptor(uint8_t adaptor_id) | 150 | static bool is_type2_adaptor(uint8_t adaptor_id) |
146 | { | 151 | { |
147 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | | 152 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | |
@@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
193 | */ | 198 | */ |
194 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, | 199 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, |
195 | hdmi_id, sizeof(hdmi_id)); | 200 | hdmi_id, sizeof(hdmi_id)); |
201 | DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n", | ||
202 | ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret); | ||
196 | if (ret) | 203 | if (ret) |
197 | return DRM_DP_DUAL_MODE_UNKNOWN; | 204 | return DRM_DP_DUAL_MODE_UNKNOWN; |
198 | 205 | ||
@@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
210 | */ | 217 | */ |
211 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, | 218 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, |
212 | &adaptor_id, sizeof(adaptor_id)); | 219 | &adaptor_id, sizeof(adaptor_id)); |
220 | DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", | ||
221 | adaptor_id, ret); | ||
213 | if (ret == 0) { | 222 | if (ret == 0) { |
214 | if (is_lspcon_adaptor(hdmi_id, adaptor_id)) | 223 | if (is_lspcon_adaptor(hdmi_id, adaptor_id)) |
215 | return DRM_DP_DUAL_MODE_LSPCON; | 224 | return DRM_DP_DUAL_MODE_LSPCON; |
@@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | |||
219 | else | 228 | else |
220 | return DRM_DP_DUAL_MODE_TYPE2_DVI; | 229 | return DRM_DP_DUAL_MODE_TYPE2_DVI; |
221 | } | 230 | } |
231 | /* | ||
232 | * If neither a proper type 1 ID nor a broken type 1 adaptor | ||
233 | * as described above, assume type 1, but let the user know | ||
234 | * that we may have misdetected the type. | ||
235 | */ | ||
236 | if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) | ||
237 | DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", | ||
238 | adaptor_id); | ||
239 | |||
222 | } | 240 | } |
223 | 241 | ||
224 | if (is_hdmi_adaptor(hdmi_id)) | 242 | if (is_hdmi_adaptor(hdmi_id)) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95de47ba1e77..9506933b41cd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) | |||
1260 | return ret == xfers ? 0 : -1; | 1260 | return ret == xfers ? 0 : -1; |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static void connector_bad_edid(struct drm_connector *connector, | ||
1264 | u8 *edid, int num_blocks) | ||
1265 | { | ||
1266 | int i; | ||
1267 | |||
1268 | if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) | ||
1269 | return; | ||
1270 | |||
1271 | dev_warn(connector->dev->dev, | ||
1272 | "%s: EDID is invalid:\n", | ||
1273 | connector->name); | ||
1274 | for (i = 0; i < num_blocks; i++) { | ||
1275 | u8 *block = edid + i * EDID_LENGTH; | ||
1276 | char prefix[20]; | ||
1277 | |||
1278 | if (drm_edid_is_zero(block, EDID_LENGTH)) | ||
1279 | sprintf(prefix, "\t[%02x] ZERO ", i); | ||
1280 | else if (!drm_edid_block_valid(block, i, false, NULL)) | ||
1281 | sprintf(prefix, "\t[%02x] BAD ", i); | ||
1282 | else | ||
1283 | sprintf(prefix, "\t[%02x] GOOD ", i); | ||
1284 | |||
1285 | print_hex_dump(KERN_WARNING, | ||
1286 | prefix, DUMP_PREFIX_NONE, 16, 1, | ||
1287 | block, EDID_LENGTH, false); | ||
1288 | } | ||
1289 | } | ||
1290 | |||
1263 | /** | 1291 | /** |
1264 | * drm_do_get_edid - get EDID data using a custom EDID block read function | 1292 | * drm_do_get_edid - get EDID data using a custom EDID block read function |
1265 | * @connector: connector we're probing | 1293 | * @connector: connector we're probing |
@@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1283 | { | 1311 | { |
1284 | int i, j = 0, valid_extensions = 0; | 1312 | int i, j = 0, valid_extensions = 0; |
1285 | u8 *edid, *new; | 1313 | u8 *edid, *new; |
1286 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); | ||
1287 | 1314 | ||
1288 | if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | 1315 | if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
1289 | return NULL; | 1316 | return NULL; |
@@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1292 | for (i = 0; i < 4; i++) { | 1319 | for (i = 0; i < 4; i++) { |
1293 | if (get_edid_block(data, edid, 0, EDID_LENGTH)) | 1320 | if (get_edid_block(data, edid, 0, EDID_LENGTH)) |
1294 | goto out; | 1321 | goto out; |
1295 | if (drm_edid_block_valid(edid, 0, print_bad_edid, | 1322 | if (drm_edid_block_valid(edid, 0, false, |
1296 | &connector->edid_corrupt)) | 1323 | &connector->edid_corrupt)) |
1297 | break; | 1324 | break; |
1298 | if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { | 1325 | if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { |
@@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, | |||
1304 | goto carp; | 1331 | goto carp; |
1305 | 1332 | ||
1306 | /* if there's no extensions, we're done */ | 1333 | /* if there's no extensions, we're done */ |
1307 | if (edid[0x7e] == 0) | 1334 | valid_extensions = edid[0x7e]; |
1335 | if (valid_extensions == 0) | ||
1308 | return (struct edid *)edid; | 1336 | return (struct edid *)edid; |
1309 | 1337 | ||
1310 | new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); | 1338 | new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); |
1311 | if (!new) | 1339 | if (!new) |
1312 | goto out; | 1340 | goto out; |
1313 | edid = new; | 1341 | edid = new; |
1314 | 1342 | ||
1315 | for (j = 1; j <= edid[0x7e]; j++) { | 1343 | for (j = 1; j <= edid[0x7e]; j++) { |
1316 | u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH; | 1344 | u8 *block = edid + j * EDID_LENGTH; |
1317 | 1345 | ||
1318 | for (i = 0; i < 4; i++) { | 1346 | for (i = 0; i < 4; i++) { |
1319 | if (get_edid_block(data, block, j, EDID_LENGTH)) | 1347 | if (get_edid_block(data, block, j, EDID_LENGTH)) |
1320 | goto out; | 1348 | goto out; |
1321 | if (drm_edid_block_valid(block, j, | 1349 | if (drm_edid_block_valid(block, j, false, NULL)) |
1322 | print_bad_edid, NULL)) { | ||
1323 | valid_extensions++; | ||
1324 | break; | 1350 | break; |
1325 | } | ||
1326 | } | 1351 | } |
1327 | 1352 | ||
1328 | if (i == 4 && print_bad_edid) { | 1353 | if (i == 4) |
1329 | dev_warn(connector->dev->dev, | 1354 | valid_extensions--; |
1330 | "%s: Ignoring invalid EDID block %d.\n", | ||
1331 | connector->name, j); | ||
1332 | |||
1333 | connector->bad_edid_counter++; | ||
1334 | } | ||
1335 | } | 1355 | } |
1336 | 1356 | ||
1337 | if (valid_extensions != edid[0x7e]) { | 1357 | if (valid_extensions != edid[0x7e]) { |
1358 | u8 *base; | ||
1359 | |||
1360 | connector_bad_edid(connector, edid, edid[0x7e] + 1); | ||
1361 | |||
1338 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; | 1362 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; |
1339 | edid[0x7e] = valid_extensions; | 1363 | edid[0x7e] = valid_extensions; |
1340 | new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | 1364 | |
1365 | new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
1341 | if (!new) | 1366 | if (!new) |
1342 | goto out; | 1367 | goto out; |
1368 | |||
1369 | base = new; | ||
1370 | for (i = 0; i <= edid[0x7e]; i++) { | ||
1371 | u8 *block = edid + i * EDID_LENGTH; | ||
1372 | |||
1373 | if (!drm_edid_block_valid(block, i, false, NULL)) | ||
1374 | continue; | ||
1375 | |||
1376 | memcpy(base, block, EDID_LENGTH); | ||
1377 | base += EDID_LENGTH; | ||
1378 | } | ||
1379 | |||
1380 | kfree(edid); | ||
1343 | edid = new; | 1381 | edid = new; |
1344 | } | 1382 | } |
1345 | 1383 | ||
1346 | return (struct edid *)edid; | 1384 | return (struct edid *)edid; |
1347 | 1385 | ||
1348 | carp: | 1386 | carp: |
1349 | if (print_bad_edid) { | 1387 | connector_bad_edid(connector, edid, 1); |
1350 | dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", | ||
1351 | connector->name, j); | ||
1352 | } | ||
1353 | connector->bad_edid_counter++; | ||
1354 | |||
1355 | out: | 1388 | out: |
1356 | kfree(edid); | 1389 | kfree(edid); |
1357 | return NULL; | 1390 | return NULL; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e0d428f9d1cb..83dbae0fabcf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) | |||
392 | if (plane->type != DRM_PLANE_TYPE_PRIMARY) | 392 | if (plane->type != DRM_PLANE_TYPE_PRIMARY) |
393 | drm_plane_force_disable(plane); | 393 | drm_plane_force_disable(plane); |
394 | 394 | ||
395 | if (plane->rotation_property) { | 395 | if (plane->rotation_property) |
396 | drm_mode_plane_set_obj_prop(plane, | 396 | drm_mode_plane_set_obj_prop(plane, |
397 | plane->rotation_property, | 397 | plane->rotation_property, |
398 | DRM_ROTATE_0); | 398 | DRM_ROTATE_0); |
399 | } else if (dev->mode_config.rotation_property) { | ||
400 | drm_mode_plane_set_obj_prop(plane, | ||
401 | dev->mode_config.rotation_property, | ||
402 | DRM_ROTATE_0); | ||
403 | } | ||
404 | } | 399 | } |
405 | 400 | ||
406 | for (i = 0; i < fb_helper->crtc_count; i++) { | 401 | for (i = 0; i < fb_helper->crtc_count; i++) { |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8bed5f459182..cf993dbf602e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev, | |||
665 | spin_unlock_irqrestore(&dev->event_lock, flags); | 665 | spin_unlock_irqrestore(&dev->event_lock, flags); |
666 | 666 | ||
667 | if (p->fence) | 667 | if (p->fence) |
668 | fence_put(p->fence); | 668 | dma_fence_put(p->fence); |
669 | 669 | ||
670 | kfree(p); | 670 | kfree(p); |
671 | } | 671 | } |
@@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) | |||
696 | } | 696 | } |
697 | 697 | ||
698 | if (e->fence) { | 698 | if (e->fence) { |
699 | fence_signal(e->fence); | 699 | dma_fence_signal(e->fence); |
700 | fence_put(e->fence); | 700 | dma_fence_put(e->fence); |
701 | } | 701 | } |
702 | 702 | ||
703 | if (!e->file_priv) { | 703 | if (!e->file_priv) { |
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index bc98bb94264d..47848ed8ca48 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c | |||
@@ -6,6 +6,11 @@ | |||
6 | #include <drm/drm_crtc.h> | 6 | #include <drm/drm_crtc.h> |
7 | #include <drm/drm_of.h> | 7 | #include <drm/drm_of.h> |
8 | 8 | ||
9 | static void drm_release_of(struct device *dev, void *data) | ||
10 | { | ||
11 | of_node_put(data); | ||
12 | } | ||
13 | |||
9 | /** | 14 | /** |
10 | * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node | 15 | * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node |
11 | * @dev: DRM device | 16 | * @dev: DRM device |
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | |||
64 | EXPORT_SYMBOL(drm_of_find_possible_crtcs); | 69 | EXPORT_SYMBOL(drm_of_find_possible_crtcs); |
65 | 70 | ||
66 | /** | 71 | /** |
72 | * drm_of_component_match_add - Add a component helper OF node match rule | ||
73 | * @master: master device | ||
74 | * @matchptr: component match pointer | ||
75 | * @compare: compare function used for matching component | ||
76 | * @node: of_node | ||
77 | */ | ||
78 | void drm_of_component_match_add(struct device *master, | ||
79 | struct component_match **matchptr, | ||
80 | int (*compare)(struct device *, void *), | ||
81 | struct device_node *node) | ||
82 | { | ||
83 | of_node_get(node); | ||
84 | component_match_add_release(master, matchptr, drm_release_of, | ||
85 | compare, node); | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(drm_of_component_match_add); | ||
88 | |||
89 | /** | ||
67 | * drm_of_component_probe - Generic probe function for a component based master | 90 | * drm_of_component_probe - Generic probe function for a component based master |
68 | * @dev: master device containing the OF node | 91 | * @dev: master device containing the OF node |
69 | * @compare_of: compare function used for matching components | 92 | * @compare_of: compare function used for matching components |
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev, | |||
101 | continue; | 124 | continue; |
102 | } | 125 | } |
103 | 126 | ||
104 | component_match_add(dev, &match, compare_of, port); | 127 | drm_of_component_match_add(dev, &match, compare_of, port); |
105 | of_node_put(port); | 128 | of_node_put(port); |
106 | } | 129 | } |
107 | 130 | ||
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev, | |||
140 | continue; | 163 | continue; |
141 | } | 164 | } |
142 | 165 | ||
143 | component_match_add(dev, &match, compare_of, remote); | 166 | drm_of_component_match_add(dev, &match, compare_of, |
167 | remote); | ||
144 | of_node_put(remote); | 168 | of_node_put(remote); |
145 | } | 169 | } |
146 | of_node_put(port); | 170 | of_node_put(port); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa687669e22b..0dee6acbd880 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/component.h> | 17 | #include <linux/component.h> |
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <drm/drm_of.h> | ||
19 | 20 | ||
20 | #include "etnaviv_drv.h" | 21 | #include "etnaviv_drv.h" |
21 | #include "etnaviv_gpu.h" | 22 | #include "etnaviv_gpu.h" |
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) | |||
629 | if (!core_node) | 630 | if (!core_node) |
630 | break; | 631 | break; |
631 | 632 | ||
632 | component_match_add(&pdev->dev, &match, compare_of, | 633 | drm_of_component_match_add(&pdev->dev, &match, |
633 | core_node); | 634 | compare_of, core_node); |
634 | of_node_put(core_node); | 635 | of_node_put(core_node); |
635 | } | 636 | } |
636 | } else if (dev->platform_data) { | 637 | } else if (dev->platform_data) { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 3755ef935af4..7d066a91d778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | |||
466 | } | 466 | } |
467 | 467 | ||
468 | #ifdef CONFIG_DEBUG_FS | 468 | #ifdef CONFIG_DEBUG_FS |
469 | static void etnaviv_gem_describe_fence(struct fence *fence, | 469 | static void etnaviv_gem_describe_fence(struct dma_fence *fence, |
470 | const char *type, struct seq_file *m) | 470 | const char *type, struct seq_file *m) |
471 | { | 471 | { |
472 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 472 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
473 | seq_printf(m, "\t%9s: %s %s seq %u\n", | 473 | seq_printf(m, "\t%9s: %s %s seq %u\n", |
474 | type, | 474 | type, |
475 | fence->ops->get_driver_name(fence), | 475 | fence->ops->get_driver_name(fence), |
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
482 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 482 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
483 | struct reservation_object *robj = etnaviv_obj->resv; | 483 | struct reservation_object *robj = etnaviv_obj->resv; |
484 | struct reservation_object_list *fobj; | 484 | struct reservation_object_list *fobj; |
485 | struct fence *fence; | 485 | struct dma_fence *fence; |
486 | unsigned long off = drm_vma_node_start(&obj->vma_node); | 486 | unsigned long off = drm_vma_node_start(&obj->vma_node); |
487 | 487 | ||
488 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", | 488 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index b1254f885fed..d2211825e5c8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -15,7 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/component.h> | 17 | #include <linux/component.h> |
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/of_device.h> | 20 | #include <linux/of_device.h> |
21 | #include "etnaviv_dump.h" | 21 | #include "etnaviv_dump.h" |
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work) | |||
882 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | 882 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { |
883 | if (!gpu->event[i].used) | 883 | if (!gpu->event[i].used) |
884 | continue; | 884 | continue; |
885 | fence_signal(gpu->event[i].fence); | 885 | dma_fence_signal(gpu->event[i].fence); |
886 | gpu->event[i].fence = NULL; | 886 | gpu->event[i].fence = NULL; |
887 | gpu->event[i].used = false; | 887 | gpu->event[i].used = false; |
888 | complete(&gpu->event_free); | 888 | complete(&gpu->event_free); |
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) | |||
952 | /* fence object management */ | 952 | /* fence object management */ |
953 | struct etnaviv_fence { | 953 | struct etnaviv_fence { |
954 | struct etnaviv_gpu *gpu; | 954 | struct etnaviv_gpu *gpu; |
955 | struct fence base; | 955 | struct dma_fence base; |
956 | }; | 956 | }; |
957 | 957 | ||
958 | static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) | 958 | static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) |
959 | { | 959 | { |
960 | return container_of(fence, struct etnaviv_fence, base); | 960 | return container_of(fence, struct etnaviv_fence, base); |
961 | } | 961 | } |
962 | 962 | ||
963 | static const char *etnaviv_fence_get_driver_name(struct fence *fence) | 963 | static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) |
964 | { | 964 | { |
965 | return "etnaviv"; | 965 | return "etnaviv"; |
966 | } | 966 | } |
967 | 967 | ||
968 | static const char *etnaviv_fence_get_timeline_name(struct fence *fence) | 968 | static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) |
969 | { | 969 | { |
970 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 970 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
971 | 971 | ||
972 | return dev_name(f->gpu->dev); | 972 | return dev_name(f->gpu->dev); |
973 | } | 973 | } |
974 | 974 | ||
975 | static bool etnaviv_fence_enable_signaling(struct fence *fence) | 975 | static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) |
976 | { | 976 | { |
977 | return true; | 977 | return true; |
978 | } | 978 | } |
979 | 979 | ||
980 | static bool etnaviv_fence_signaled(struct fence *fence) | 980 | static bool etnaviv_fence_signaled(struct dma_fence *fence) |
981 | { | 981 | { |
982 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 982 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
983 | 983 | ||
984 | return fence_completed(f->gpu, f->base.seqno); | 984 | return fence_completed(f->gpu, f->base.seqno); |
985 | } | 985 | } |
986 | 986 | ||
987 | static void etnaviv_fence_release(struct fence *fence) | 987 | static void etnaviv_fence_release(struct dma_fence *fence) |
988 | { | 988 | { |
989 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | 989 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
990 | 990 | ||
991 | kfree_rcu(f, base.rcu); | 991 | kfree_rcu(f, base.rcu); |
992 | } | 992 | } |
993 | 993 | ||
994 | static const struct fence_ops etnaviv_fence_ops = { | 994 | static const struct dma_fence_ops etnaviv_fence_ops = { |
995 | .get_driver_name = etnaviv_fence_get_driver_name, | 995 | .get_driver_name = etnaviv_fence_get_driver_name, |
996 | .get_timeline_name = etnaviv_fence_get_timeline_name, | 996 | .get_timeline_name = etnaviv_fence_get_timeline_name, |
997 | .enable_signaling = etnaviv_fence_enable_signaling, | 997 | .enable_signaling = etnaviv_fence_enable_signaling, |
998 | .signaled = etnaviv_fence_signaled, | 998 | .signaled = etnaviv_fence_signaled, |
999 | .wait = fence_default_wait, | 999 | .wait = dma_fence_default_wait, |
1000 | .release = etnaviv_fence_release, | 1000 | .release = etnaviv_fence_release, |
1001 | }; | 1001 | }; |
1002 | 1002 | ||
1003 | static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | 1003 | static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) |
1004 | { | 1004 | { |
1005 | struct etnaviv_fence *f; | 1005 | struct etnaviv_fence *f; |
1006 | 1006 | ||
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | |||
1010 | 1010 | ||
1011 | f->gpu = gpu; | 1011 | f->gpu = gpu; |
1012 | 1012 | ||
1013 | fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, | 1013 | dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, |
1014 | gpu->fence_context, ++gpu->next_fence); | 1014 | gpu->fence_context, ++gpu->next_fence); |
1015 | 1015 | ||
1016 | return &f->base; | 1016 | return &f->base; |
1017 | } | 1017 | } |
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1021 | { | 1021 | { |
1022 | struct reservation_object *robj = etnaviv_obj->resv; | 1022 | struct reservation_object *robj = etnaviv_obj->resv; |
1023 | struct reservation_object_list *fobj; | 1023 | struct reservation_object_list *fobj; |
1024 | struct fence *fence; | 1024 | struct dma_fence *fence; |
1025 | int i, ret; | 1025 | int i, ret; |
1026 | 1026 | ||
1027 | if (!exclusive) { | 1027 | if (!exclusive) { |
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1039 | /* Wait on any existing exclusive fence which isn't our own */ | 1039 | /* Wait on any existing exclusive fence which isn't our own */ |
1040 | fence = reservation_object_get_excl(robj); | 1040 | fence = reservation_object_get_excl(robj); |
1041 | if (fence && fence->context != context) { | 1041 | if (fence && fence->context != context) { |
1042 | ret = fence_wait(fence, true); | 1042 | ret = dma_fence_wait(fence, true); |
1043 | if (ret) | 1043 | if (ret) |
1044 | return ret; | 1044 | return ret; |
1045 | } | 1045 | } |
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1052 | fence = rcu_dereference_protected(fobj->shared[i], | 1052 | fence = rcu_dereference_protected(fobj->shared[i], |
1053 | reservation_object_held(robj)); | 1053 | reservation_object_held(robj)); |
1054 | if (fence->context != context) { | 1054 | if (fence->context != context) { |
1055 | ret = fence_wait(fence, true); | 1055 | ret = dma_fence_wait(fence, true); |
1056 | if (ret) | 1056 | if (ret) |
1057 | return ret; | 1057 | return ret; |
1058 | } | 1058 | } |
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work) | |||
1158 | 1158 | ||
1159 | mutex_lock(&gpu->lock); | 1159 | mutex_lock(&gpu->lock); |
1160 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { | 1160 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { |
1161 | if (!fence_is_signaled(cmdbuf->fence)) | 1161 | if (!dma_fence_is_signaled(cmdbuf->fence)) |
1162 | break; | 1162 | break; |
1163 | 1163 | ||
1164 | list_del(&cmdbuf->node); | 1164 | list_del(&cmdbuf->node); |
1165 | fence_put(cmdbuf->fence); | 1165 | dma_fence_put(cmdbuf->fence); |
1166 | 1166 | ||
1167 | for (i = 0; i < cmdbuf->nr_bos; i++) { | 1167 | for (i = 0; i < cmdbuf->nr_bos; i++) { |
1168 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; | 1168 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; |
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) | |||
1275 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | 1275 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, |
1276 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) | 1276 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) |
1277 | { | 1277 | { |
1278 | struct fence *fence; | 1278 | struct dma_fence *fence; |
1279 | unsigned int event, i; | 1279 | unsigned int event, i; |
1280 | int ret; | 1280 | int ret; |
1281 | 1281 | ||
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | while ((event = ffs(intr)) != 0) { | 1393 | while ((event = ffs(intr)) != 0) { |
1394 | struct fence *fence; | 1394 | struct dma_fence *fence; |
1395 | 1395 | ||
1396 | event -= 1; | 1396 | event -= 1; |
1397 | 1397 | ||
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1401 | 1401 | ||
1402 | fence = gpu->event[event].fence; | 1402 | fence = gpu->event[event].fence; |
1403 | gpu->event[event].fence = NULL; | 1403 | gpu->event[event].fence = NULL; |
1404 | fence_signal(fence); | 1404 | dma_fence_signal(fence); |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * Events can be processed out of order. Eg, | 1407 | * Events can be processed out of order. Eg, |
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, | |||
1553 | return ret; | 1553 | return ret; |
1554 | 1554 | ||
1555 | gpu->drm = drm; | 1555 | gpu->drm = drm; |
1556 | gpu->fence_context = fence_context_alloc(1); | 1556 | gpu->fence_context = dma_fence_context_alloc(1); |
1557 | spin_lock_init(&gpu->fence_spinlock); | 1557 | spin_lock_init(&gpu->fence_spinlock); |
1558 | 1558 | ||
1559 | INIT_LIST_HEAD(&gpu->active_cmd_list); | 1559 | INIT_LIST_HEAD(&gpu->active_cmd_list); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 73c278dc3706..8c6b824e9d0a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity { | |||
89 | 89 | ||
90 | struct etnaviv_event { | 90 | struct etnaviv_event { |
91 | bool used; | 91 | bool used; |
92 | struct fence *fence; | 92 | struct dma_fence *fence; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct etnaviv_cmdbuf; | 95 | struct etnaviv_cmdbuf; |
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf { | |||
163 | /* vram node used if the cmdbuf is mapped through the MMUv2 */ | 163 | /* vram node used if the cmdbuf is mapped through the MMUv2 */ |
164 | struct drm_mm_node vram_node; | 164 | struct drm_mm_node vram_node; |
165 | /* fence after which this buffer is to be disposed */ | 165 | /* fence after which this buffer is to be disposed */ |
166 | struct fence *fence; | 166 | struct dma_fence *fence; |
167 | /* target exec state */ | 167 | /* target exec state */ |
168 | u32 exec_state; | 168 | u32 exec_state; |
169 | /* per GPU in-flight list */ | 169 | /* per GPU in-flight list */ |
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 90377a609c98..e88fde18c946 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <drm/drm_fb_cma_helper.h> | 24 | #include <drm/drm_fb_cma_helper.h> |
25 | #include <drm/drm_atomic_helper.h> | 25 | #include <drm/drm_atomic_helper.h> |
26 | #include <drm/drm_crtc_helper.h> | 26 | #include <drm/drm_crtc_helper.h> |
27 | #include <drm/drm_of.h> | ||
27 | 28 | ||
28 | #include "kirin_drm_drv.h" | 29 | #include "kirin_drm_drv.h" |
29 | 30 | ||
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np) | |||
260 | DRM_ERROR("no valid endpoint node\n"); | 261 | DRM_ERROR("no valid endpoint node\n"); |
261 | return ERR_PTR(-ENODEV); | 262 | return ERR_PTR(-ENODEV); |
262 | } | 263 | } |
263 | of_node_put(endpoint); | ||
264 | 264 | ||
265 | remote = of_graph_get_remote_port_parent(endpoint); | 265 | remote = of_graph_get_remote_port_parent(endpoint); |
266 | of_node_put(endpoint); | ||
266 | if (!remote) { | 267 | if (!remote) { |
267 | DRM_ERROR("no valid remote node\n"); | 268 | DRM_ERROR("no valid remote node\n"); |
268 | return ERR_PTR(-ENODEV); | 269 | return ERR_PTR(-ENODEV); |
269 | } | 270 | } |
270 | of_node_put(remote); | ||
271 | 271 | ||
272 | if (!of_device_is_available(remote)) { | 272 | if (!of_device_is_available(remote)) { |
273 | DRM_ERROR("not available for remote node\n"); | 273 | DRM_ERROR("not available for remote node\n"); |
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev) | |||
294 | if (IS_ERR(remote)) | 294 | if (IS_ERR(remote)) |
295 | return PTR_ERR(remote); | 295 | return PTR_ERR(remote); |
296 | 296 | ||
297 | component_match_add(dev, &match, compare_of, remote); | 297 | drm_of_component_match_add(dev, &match, compare_of, remote); |
298 | of_node_put(remote); | ||
298 | 299 | ||
299 | return component_master_add_with_match(dev, &kirin_drm_ops, match); | 300 | return component_master_add_with_match(dev, &kirin_drm_ops, match); |
300 | 301 | ||
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9798d400d817..af8683e0dd54 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) | |||
1289 | mutex_unlock(&priv->audio_mutex); | 1289 | mutex_unlock(&priv->audio_mutex); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) | 1292 | static int |
1293 | tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) | ||
1293 | { | 1294 | { |
1294 | struct tda998x_priv *priv = dev_get_drvdata(dev); | 1295 | struct tda998x_priv *priv = dev_get_drvdata(dev); |
1295 | 1296 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 74ede1f53372..f9af2a00625e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -26,12 +26,12 @@ | |||
26 | 26 | ||
27 | #include "i915_drv.h" | 27 | #include "i915_drv.h" |
28 | 28 | ||
29 | static const char *i915_fence_get_driver_name(struct fence *fence) | 29 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
30 | { | 30 | { |
31 | return "i915"; | 31 | return "i915"; |
32 | } | 32 | } |
33 | 33 | ||
34 | static const char *i915_fence_get_timeline_name(struct fence *fence) | 34 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
35 | { | 35 | { |
36 | /* Timelines are bound by eviction to a VM. However, since | 36 | /* Timelines are bound by eviction to a VM. However, since |
37 | * we only have a global seqno at the moment, we only have | 37 | * we only have a global seqno at the moment, we only have |
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence) | |||
42 | return "global"; | 42 | return "global"; |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool i915_fence_signaled(struct fence *fence) | 45 | static bool i915_fence_signaled(struct dma_fence *fence) |
46 | { | 46 | { |
47 | return i915_gem_request_completed(to_request(fence)); | 47 | return i915_gem_request_completed(to_request(fence)); |
48 | } | 48 | } |
49 | 49 | ||
50 | static bool i915_fence_enable_signaling(struct fence *fence) | 50 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
51 | { | 51 | { |
52 | if (i915_fence_signaled(fence)) | 52 | if (i915_fence_signaled(fence)) |
53 | return false; | 53 | return false; |
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence) | |||
56 | return true; | 56 | return true; |
57 | } | 57 | } |
58 | 58 | ||
59 | static signed long i915_fence_wait(struct fence *fence, | 59 | static signed long i915_fence_wait(struct dma_fence *fence, |
60 | bool interruptible, | 60 | bool interruptible, |
61 | signed long timeout_jiffies) | 61 | signed long timeout_jiffies) |
62 | { | 62 | { |
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence, | |||
85 | return timeout_jiffies; | 85 | return timeout_jiffies; |
86 | } | 86 | } |
87 | 87 | ||
88 | static void i915_fence_value_str(struct fence *fence, char *str, int size) | 88 | static void i915_fence_value_str(struct dma_fence *fence, char *str, int size) |
89 | { | 89 | { |
90 | snprintf(str, size, "%u", fence->seqno); | 90 | snprintf(str, size, "%u", fence->seqno); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void i915_fence_timeline_value_str(struct fence *fence, char *str, | 93 | static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str, |
94 | int size) | 94 | int size) |
95 | { | 95 | { |
96 | snprintf(str, size, "%u", | 96 | snprintf(str, size, "%u", |
97 | intel_engine_get_seqno(to_request(fence)->engine)); | 97 | intel_engine_get_seqno(to_request(fence)->engine)); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void i915_fence_release(struct fence *fence) | 100 | static void i915_fence_release(struct dma_fence *fence) |
101 | { | 101 | { |
102 | struct drm_i915_gem_request *req = to_request(fence); | 102 | struct drm_i915_gem_request *req = to_request(fence); |
103 | 103 | ||
104 | kmem_cache_free(req->i915->requests, req); | 104 | kmem_cache_free(req->i915->requests, req); |
105 | } | 105 | } |
106 | 106 | ||
107 | const struct fence_ops i915_fence_ops = { | 107 | const struct dma_fence_ops i915_fence_ops = { |
108 | .get_driver_name = i915_fence_get_driver_name, | 108 | .get_driver_name = i915_fence_get_driver_name, |
109 | .get_timeline_name = i915_fence_get_timeline_name, | 109 | .get_timeline_name = i915_fence_get_timeline_name, |
110 | .enable_signaling = i915_fence_enable_signaling, | 110 | .enable_signaling = i915_fence_enable_signaling, |
@@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
388 | * The reference count is incremented atomically. If it is zero, | 388 | * The reference count is incremented atomically. If it is zero, |
389 | * the lookup knows the request is unallocated and complete. Otherwise, | 389 | * the lookup knows the request is unallocated and complete. Otherwise, |
390 | * it is either still in use, or has been reallocated and reset | 390 | * it is either still in use, or has been reallocated and reset |
391 | * with fence_init(). This increment is safe for release as we check | 391 | * with dma_fence_init(). This increment is safe for release as we |
392 | * that the request we have a reference to and matches the active | 392 | * check that the request we have a reference to and matches the active |
393 | * request. | 393 | * request. |
394 | * | 394 | * |
395 | * Before we increment the refcount, we chase the request->engine | 395 | * Before we increment the refcount, we chase the request->engine |
@@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
412 | goto err; | 412 | goto err; |
413 | 413 | ||
414 | spin_lock_init(&req->lock); | 414 | spin_lock_init(&req->lock); |
415 | fence_init(&req->fence, | 415 | dma_fence_init(&req->fence, |
416 | &i915_fence_ops, | 416 | &i915_fence_ops, |
417 | &req->lock, | 417 | &req->lock, |
418 | engine->fence_context, | 418 | engine->fence_context, |
419 | seqno); | 419 | seqno); |
420 | 420 | ||
421 | i915_sw_fence_init(&req->submit, submit_notify); | 421 | i915_sw_fence_init(&req->submit, submit_notify); |
422 | 422 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 974bd7bcc801..bceeaa3a5193 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #ifndef I915_GEM_REQUEST_H | 25 | #ifndef I915_GEM_REQUEST_H |
26 | #define I915_GEM_REQUEST_H | 26 | #define I915_GEM_REQUEST_H |
27 | 27 | ||
28 | #include <linux/fence.h> | 28 | #include <linux/dma-fence.h> |
29 | 29 | ||
30 | #include "i915_gem.h" | 30 | #include "i915_gem.h" |
31 | #include "i915_sw_fence.h" | 31 | #include "i915_sw_fence.h" |
@@ -62,7 +62,7 @@ struct intel_signal_node { | |||
62 | * The requests are reference counted. | 62 | * The requests are reference counted. |
63 | */ | 63 | */ |
64 | struct drm_i915_gem_request { | 64 | struct drm_i915_gem_request { |
65 | struct fence fence; | 65 | struct dma_fence fence; |
66 | spinlock_t lock; | 66 | spinlock_t lock; |
67 | 67 | ||
68 | /** On Which ring this request was generated */ | 68 | /** On Which ring this request was generated */ |
@@ -145,9 +145,9 @@ struct drm_i915_gem_request { | |||
145 | struct list_head execlist_link; | 145 | struct list_head execlist_link; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | extern const struct fence_ops i915_fence_ops; | 148 | extern const struct dma_fence_ops i915_fence_ops; |
149 | 149 | ||
150 | static inline bool fence_is_i915(struct fence *fence) | 150 | static inline bool fence_is_i915(struct dma_fence *fence) |
151 | { | 151 | { |
152 | return fence->ops == &i915_fence_ops; | 152 | return fence->ops == &i915_fence_ops; |
153 | } | 153 | } |
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | static inline struct drm_i915_gem_request * | 174 | static inline struct drm_i915_gem_request * |
175 | to_request(struct fence *fence) | 175 | to_request(struct dma_fence *fence) |
176 | { | 176 | { |
177 | /* We assume that NULL fence/request are interoperable */ | 177 | /* We assume that NULL fence/request are interoperable */ |
178 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); | 178 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); |
@@ -183,19 +183,19 @@ to_request(struct fence *fence) | |||
183 | static inline struct drm_i915_gem_request * | 183 | static inline struct drm_i915_gem_request * |
184 | i915_gem_request_get(struct drm_i915_gem_request *req) | 184 | i915_gem_request_get(struct drm_i915_gem_request *req) |
185 | { | 185 | { |
186 | return to_request(fence_get(&req->fence)); | 186 | return to_request(dma_fence_get(&req->fence)); |
187 | } | 187 | } |
188 | 188 | ||
189 | static inline struct drm_i915_gem_request * | 189 | static inline struct drm_i915_gem_request * |
190 | i915_gem_request_get_rcu(struct drm_i915_gem_request *req) | 190 | i915_gem_request_get_rcu(struct drm_i915_gem_request *req) |
191 | { | 191 | { |
192 | return to_request(fence_get_rcu(&req->fence)); | 192 | return to_request(dma_fence_get_rcu(&req->fence)); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline void | 195 | static inline void |
196 | i915_gem_request_put(struct drm_i915_gem_request *req) | 196 | i915_gem_request_put(struct drm_i915_gem_request *req) |
197 | { | 197 | { |
198 | fence_put(&req->fence); | 198 | dma_fence_put(&req->fence); |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, | 201 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, |
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) | |||
497 | * compiler. | 497 | * compiler. |
498 | * | 498 | * |
499 | * The atomic operation at the heart of | 499 | * The atomic operation at the heart of |
500 | * i915_gem_request_get_rcu(), see fence_get_rcu(), is | 500 | * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is |
501 | * atomic_inc_not_zero() which is only a full memory barrier | 501 | * atomic_inc_not_zero() which is only a full memory barrier |
502 | * when successful. That is, if i915_gem_request_get_rcu() | 502 | * when successful. That is, if i915_gem_request_get_rcu() |
503 | * returns the request (and so with the reference counted | 503 | * returns the request (and so with the reference counted |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 1e5cbc585ca2..8185002d7ec8 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/fence.h> | 11 | #include <linux/dma-fence.h> |
12 | #include <linux/reservation.h> | 12 | #include <linux/reservation.h> |
13 | 13 | ||
14 | #include "i915_sw_fence.h" | 14 | #include "i915_sw_fence.h" |
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
226 | return pending; | 226 | return pending; |
227 | } | 227 | } |
228 | 228 | ||
229 | struct dma_fence_cb { | 229 | struct i915_sw_dma_fence_cb { |
230 | struct fence_cb base; | 230 | struct dma_fence_cb base; |
231 | struct i915_sw_fence *fence; | 231 | struct i915_sw_fence *fence; |
232 | struct fence *dma; | 232 | struct dma_fence *dma; |
233 | struct timer_list timer; | 233 | struct timer_list timer; |
234 | }; | 234 | }; |
235 | 235 | ||
236 | static void timer_i915_sw_fence_wake(unsigned long data) | 236 | static void timer_i915_sw_fence_wake(unsigned long data) |
237 | { | 237 | { |
238 | struct dma_fence_cb *cb = (struct dma_fence_cb *)data; | 238 | struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; |
239 | 239 | ||
240 | printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", | 240 | printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", |
241 | cb->dma->ops->get_driver_name(cb->dma), | 241 | cb->dma->ops->get_driver_name(cb->dma), |
242 | cb->dma->ops->get_timeline_name(cb->dma), | 242 | cb->dma->ops->get_timeline_name(cb->dma), |
243 | cb->dma->seqno); | 243 | cb->dma->seqno); |
244 | fence_put(cb->dma); | 244 | dma_fence_put(cb->dma); |
245 | cb->dma = NULL; | 245 | cb->dma = NULL; |
246 | 246 | ||
247 | i915_sw_fence_commit(cb->fence); | 247 | i915_sw_fence_commit(cb->fence); |
248 | cb->timer.function = NULL; | 248 | cb->timer.function = NULL; |
249 | } | 249 | } |
250 | 250 | ||
251 | static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data) | 251 | static void dma_i915_sw_fence_wake(struct dma_fence *dma, |
252 | struct dma_fence_cb *data) | ||
252 | { | 253 | { |
253 | struct dma_fence_cb *cb = container_of(data, typeof(*cb), base); | 254 | struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); |
254 | 255 | ||
255 | del_timer_sync(&cb->timer); | 256 | del_timer_sync(&cb->timer); |
256 | if (cb->timer.function) | 257 | if (cb->timer.function) |
257 | i915_sw_fence_commit(cb->fence); | 258 | i915_sw_fence_commit(cb->fence); |
258 | fence_put(cb->dma); | 259 | dma_fence_put(cb->dma); |
259 | 260 | ||
260 | kfree(cb); | 261 | kfree(cb); |
261 | } | 262 | } |
262 | 263 | ||
263 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 264 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, |
264 | struct fence *dma, | 265 | struct dma_fence *dma, |
265 | unsigned long timeout, | 266 | unsigned long timeout, |
266 | gfp_t gfp) | 267 | gfp_t gfp) |
267 | { | 268 | { |
268 | struct dma_fence_cb *cb; | 269 | struct i915_sw_dma_fence_cb *cb; |
269 | int ret; | 270 | int ret; |
270 | 271 | ||
271 | if (fence_is_signaled(dma)) | 272 | if (dma_fence_is_signaled(dma)) |
272 | return 0; | 273 | return 0; |
273 | 274 | ||
274 | cb = kmalloc(sizeof(*cb), gfp); | 275 | cb = kmalloc(sizeof(*cb), gfp); |
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
276 | if (!gfpflags_allow_blocking(gfp)) | 277 | if (!gfpflags_allow_blocking(gfp)) |
277 | return -ENOMEM; | 278 | return -ENOMEM; |
278 | 279 | ||
279 | return fence_wait(dma, false); | 280 | return dma_fence_wait(dma, false); |
280 | } | 281 | } |
281 | 282 | ||
282 | cb->fence = i915_sw_fence_get(fence); | 283 | cb->fence = i915_sw_fence_get(fence); |
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
287 | timer_i915_sw_fence_wake, (unsigned long)cb, | 288 | timer_i915_sw_fence_wake, (unsigned long)cb, |
288 | TIMER_IRQSAFE); | 289 | TIMER_IRQSAFE); |
289 | if (timeout) { | 290 | if (timeout) { |
290 | cb->dma = fence_get(dma); | 291 | cb->dma = dma_fence_get(dma); |
291 | mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); | 292 | mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); |
292 | } | 293 | } |
293 | 294 | ||
294 | ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); | 295 | ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); |
295 | if (ret == 0) { | 296 | if (ret == 0) { |
296 | ret = 1; | 297 | ret = 1; |
297 | } else { | 298 | } else { |
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
305 | 306 | ||
306 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 307 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
307 | struct reservation_object *resv, | 308 | struct reservation_object *resv, |
308 | const struct fence_ops *exclude, | 309 | const struct dma_fence_ops *exclude, |
309 | bool write, | 310 | bool write, |
310 | unsigned long timeout, | 311 | unsigned long timeout, |
311 | gfp_t gfp) | 312 | gfp_t gfp) |
312 | { | 313 | { |
313 | struct fence *excl; | 314 | struct dma_fence *excl; |
314 | int ret = 0, pending; | 315 | int ret = 0, pending; |
315 | 316 | ||
316 | if (write) { | 317 | if (write) { |
317 | struct fence **shared; | 318 | struct dma_fence **shared; |
318 | unsigned int count, i; | 319 | unsigned int count, i; |
319 | 320 | ||
320 | ret = reservation_object_get_fences_rcu(resv, | 321 | ret = reservation_object_get_fences_rcu(resv, |
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
339 | } | 340 | } |
340 | 341 | ||
341 | for (i = 0; i < count; i++) | 342 | for (i = 0; i < count; i++) |
342 | fence_put(shared[i]); | 343 | dma_fence_put(shared[i]); |
343 | kfree(shared); | 344 | kfree(shared); |
344 | } else { | 345 | } else { |
345 | excl = reservation_object_get_excl_rcu(resv); | 346 | excl = reservation_object_get_excl_rcu(resv); |
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
356 | ret |= pending; | 357 | ret |= pending; |
357 | } | 358 | } |
358 | 359 | ||
359 | fence_put(excl); | 360 | dma_fence_put(excl); |
360 | 361 | ||
361 | return ret; | 362 | return ret; |
362 | } | 363 | } |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 373141602ca4..cd239e92f67f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -16,8 +16,8 @@ | |||
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | 17 | ||
18 | struct completion; | 18 | struct completion; |
19 | struct fence; | 19 | struct dma_fence; |
20 | struct fence_ops; | 20 | struct dma_fence_ops; |
21 | struct reservation_object; | 21 | struct reservation_object; |
22 | 22 | ||
23 | struct i915_sw_fence { | 23 | struct i915_sw_fence { |
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
47 | struct i915_sw_fence *after, | 47 | struct i915_sw_fence *after, |
48 | wait_queue_t *wq); | 48 | wait_queue_t *wq); |
49 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 49 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, |
50 | struct fence *dma, | 50 | struct dma_fence *dma, |
51 | unsigned long timeout, | 51 | unsigned long timeout, |
52 | gfp_t gfp); | 52 | gfp_t gfp); |
53 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 53 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
54 | struct reservation_object *resv, | 54 | struct reservation_object *resv, |
55 | const struct fence_ops *exclude, | 55 | const struct dma_fence_ops *exclude, |
56 | bool write, | 56 | bool write, |
57 | unsigned long timeout, | 57 | unsigned long timeout, |
58 | gfp_t gfp); | 58 | gfp_t gfp); |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 178798002a73..5c912c25f7d3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, | |||
491 | __entry->ring = req->engine->id; | 491 | __entry->ring = req->engine->id; |
492 | __entry->seqno = req->fence.seqno; | 492 | __entry->seqno = req->fence.seqno; |
493 | __entry->flags = flags; | 493 | __entry->flags = flags; |
494 | fence_enable_sw_signaling(&req->fence); | 494 | dma_fence_enable_sw_signaling(&req->fence); |
495 | ), | 495 | ), |
496 | 496 | ||
497 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", | 497 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", |
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 23fc1042fed4..56efcc507ea2 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg) | |||
464 | &request->signaling.wait); | 464 | &request->signaling.wait); |
465 | 465 | ||
466 | local_bh_disable(); | 466 | local_bh_disable(); |
467 | fence_signal(&request->fence); | 467 | dma_fence_signal(&request->fence); |
468 | local_bh_enable(); /* kick start the tasklets */ | 468 | local_bh_enable(); /* kick start the tasklets */ |
469 | 469 | ||
470 | /* Find the next oldest signal. Note that as we have | 470 | /* Find the next oldest signal. Note that as we have |
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) | |||
502 | struct rb_node *parent, **p; | 502 | struct rb_node *parent, **p; |
503 | bool first, wakeup; | 503 | bool first, wakeup; |
504 | 504 | ||
505 | /* locked by fence_enable_sw_signaling() */ | 505 | /* locked by dma_fence_enable_sw_signaling() */ |
506 | assert_spin_locked(&request->lock); | 506 | assert_spin_locked(&request->lock); |
507 | 507 | ||
508 | request->signaling.wait.tsk = b->signaler; | 508 | request->signaling.wait.tsk = b->signaler; |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2dc94812bea5..8cceb345aa0f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) | |||
245 | INIT_LIST_HEAD(&engine->execlist_queue); | 245 | INIT_LIST_HEAD(&engine->execlist_queue); |
246 | spin_lock_init(&engine->execlist_lock); | 246 | spin_lock_init(&engine->execlist_lock); |
247 | 247 | ||
248 | engine->fence_context = fence_context_alloc(1); | 248 | engine->fence_context = dma_fence_context_alloc(1); |
249 | 249 | ||
250 | intel_engine_init_requests(engine); | 250 | intel_engine_init_requests(engine); |
251 | intel_engine_init_hangcheck(engine); | 251 | intel_engine_init_hangcheck(engine); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index db61aa5f32ef..296f541fbe2f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <drm/drm_crtc_helper.h> | 18 | #include <drm/drm_crtc_helper.h> |
19 | #include <drm/drm_gem.h> | 19 | #include <drm/drm_gem.h> |
20 | #include <drm/drm_gem_cma_helper.h> | 20 | #include <drm/drm_gem_cma_helper.h> |
21 | #include <drm/drm_of.h> | ||
21 | #include <linux/component.h> | 22 | #include <linux/component.h> |
22 | #include <linux/iommu.h> | 23 | #include <linux/iommu.h> |
23 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
@@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev) | |||
416 | comp_type == MTK_DPI) { | 417 | comp_type == MTK_DPI) { |
417 | dev_info(dev, "Adding component match for %s\n", | 418 | dev_info(dev, "Adding component match for %s\n", |
418 | node->full_name); | 419 | node->full_name); |
419 | component_match_add(dev, &match, compare_of, node); | 420 | drm_of_component_match_add(dev, &match, compare_of, |
421 | node); | ||
420 | } else { | 422 | } else { |
421 | struct mtk_ddp_comp *comp; | 423 | struct mtk_ddp_comp *comp; |
422 | 424 | ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 919b35f2ad24..83272b456329 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = { | |||
230 | .ttm_tt_populate = mgag200_ttm_tt_populate, | 230 | .ttm_tt_populate = mgag200_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, |
232 | .init_mem_type = mgag200_bo_init_mem_type, | 232 | .init_mem_type = mgag200_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = mgag200_bo_evict_flags, | 234 | .evict_flags = mgag200_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = mgag200_bo_verify_access, | 236 | .verify_access = mgag200_bo_verify_access, |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 5127b75dbf40..7250ffc6322f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c | |||
@@ -25,9 +25,6 @@ bool hang_debug = false; | |||
25 | MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); | 25 | MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); |
26 | module_param_named(hang_debug, hang_debug, bool, 0600); | 26 | module_param_named(hang_debug, hang_debug, bool, 0600); |
27 | 27 | ||
28 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); | ||
29 | struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); | ||
30 | |||
31 | static const struct adreno_info gpulist[] = { | 28 | static const struct adreno_info gpulist[] = { |
32 | { | 29 | { |
33 | .rev = ADRENO_REV(3, 0, 5, ANY_ID), | 30 | .rev = ADRENO_REV(3, 0, 5, ANY_ID), |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a54f6e036b4a..07d99bdf7c99 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h | |||
@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, | |||
311 | gpu_write(&gpu->base, reg - 1, data); | 311 | gpu_write(&gpu->base, reg - 1, data); |
312 | } | 312 | } |
313 | 313 | ||
314 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); | ||
315 | struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); | ||
316 | |||
314 | #endif /* __ADRENO_GPU_H__ */ | 317 | #endif /* __ADRENO_GPU_H__ */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..cf50d3ec8d1b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, | |||
75 | !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) | 75 | !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | if (!dev->mode_config.rotation_property) | 78 | drm_plane_create_rotation_property(plane, |
79 | dev->mode_config.rotation_property = | 79 | DRM_ROTATE_0, |
80 | drm_mode_create_rotation_property(dev, | 80 | DRM_ROTATE_0 | |
81 | DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); | 81 | DRM_ROTATE_180 | |
82 | 82 | DRM_REFLECT_X | | |
83 | if (dev->mode_config.rotation_property) | 83 | DRM_REFLECT_Y); |
84 | drm_object_attach_property(&plane->base, | ||
85 | dev->mode_config.rotation_property, | ||
86 | DRM_ROTATE_0); | ||
87 | } | 84 | } |
88 | 85 | ||
89 | /* helper to install properties which are common to planes and crtcs */ | 86 | /* helper to install properties which are common to planes and crtcs */ |
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
289 | plane_enabled(old_state), plane_enabled(state)); | 286 | plane_enabled(old_state), plane_enabled(state)); |
290 | 287 | ||
291 | if (plane_enabled(state)) { | 288 | if (plane_enabled(state)) { |
289 | unsigned int rotation; | ||
290 | |||
292 | format = to_mdp_format(msm_framebuffer_format(state->fb)); | 291 | format = to_mdp_format(msm_framebuffer_format(state->fb)); |
293 | if (MDP_FORMAT_IS_YUV(format) && | 292 | if (MDP_FORMAT_IS_YUV(format) && |
294 | !pipe_supports_yuv(mdp5_plane->caps)) { | 293 | !pipe_supports_yuv(mdp5_plane->caps)) { |
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
309 | return -EINVAL; | 308 | return -EINVAL; |
310 | } | 309 | } |
311 | 310 | ||
312 | hflip = !!(state->rotation & DRM_REFLECT_X); | 311 | rotation = drm_rotation_simplify(state->rotation, |
313 | vflip = !!(state->rotation & DRM_REFLECT_Y); | 312 | DRM_ROTATE_0 | |
313 | DRM_REFLECT_X | | ||
314 | DRM_REFLECT_Y); | ||
315 | hflip = !!(rotation & DRM_REFLECT_X); | ||
316 | vflip = !!(rotation & DRM_REFLECT_Y); | ||
317 | |||
314 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || | 318 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || |
315 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { | 319 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { |
316 | dev_err(plane->dev->dev, | 320 | dev_err(plane->dev->dev, |
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
681 | int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; | 685 | int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; |
682 | uint32_t hdecm = 0, vdecm = 0; | 686 | uint32_t hdecm = 0, vdecm = 0; |
683 | uint32_t pix_format; | 687 | uint32_t pix_format; |
688 | unsigned int rotation; | ||
684 | bool vflip, hflip; | 689 | bool vflip, hflip; |
685 | unsigned long flags; | 690 | unsigned long flags; |
686 | int ret; | 691 | int ret; |
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
743 | config |= get_scale_config(format, src_h, crtc_h, false); | 748 | config |= get_scale_config(format, src_h, crtc_h, false); |
744 | DBG("scale config = %x", config); | 749 | DBG("scale config = %x", config); |
745 | 750 | ||
746 | hflip = !!(pstate->rotation & DRM_REFLECT_X); | 751 | rotation = drm_rotation_simplify(pstate->rotation, |
747 | vflip = !!(pstate->rotation & DRM_REFLECT_Y); | 752 | DRM_ROTATE_0 | |
753 | DRM_REFLECT_X | | ||
754 | DRM_REFLECT_Y); | ||
755 | hflip = !!(rotation & DRM_REFLECT_X); | ||
756 | vflip = !!(rotation & DRM_REFLECT_Y); | ||
748 | 757 | ||
749 | spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); | 758 | spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); |
750 | 759 | ||
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 663f2b6ef091..3c853733c99a 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #ifdef CONFIG_DEBUG_FS | 18 | #ifdef CONFIG_DEBUG_FS |
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "msm_gpu.h" | 20 | #include "msm_gpu.h" |
21 | #include "msm_debugfs.h" | ||
21 | 22 | ||
22 | static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) | 23 | static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) |
23 | { | 24 | { |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..84d38eaea585 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <drm/drm_of.h> | ||
19 | |||
18 | #include "msm_drv.h" | 20 | #include "msm_drv.h" |
19 | #include "msm_debugfs.h" | 21 | #include "msm_debugfs.h" |
20 | #include "msm_fence.h" | 22 | #include "msm_fence.h" |
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev, | |||
919 | continue; | 921 | continue; |
920 | } | 922 | } |
921 | 923 | ||
922 | component_match_add(master_dev, matchptr, compare_of, intf); | 924 | drm_of_component_match_add(master_dev, matchptr, compare_of, |
923 | 925 | intf); | |
924 | of_node_put(intf); | 926 | of_node_put(intf); |
925 | of_node_put(ep_node); | 927 | of_node_put(ep_node); |
926 | } | 928 | } |
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev, | |||
962 | put_device(mdp_dev); | 964 | put_device(mdp_dev); |
963 | 965 | ||
964 | /* add the MDP component itself */ | 966 | /* add the MDP component itself */ |
965 | component_match_add(dev, matchptr, compare_of, | 967 | drm_of_component_match_add(dev, matchptr, compare_of, |
966 | mdp_dev->of_node); | 968 | mdp_dev->of_node); |
967 | } else { | 969 | } else { |
968 | /* MDP4 */ | 970 | /* MDP4 */ |
969 | mdp_dev = dev; | 971 | mdp_dev = dev; |
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev, | |||
996 | if (!np) | 998 | if (!np) |
997 | return 0; | 999 | return 0; |
998 | 1000 | ||
999 | component_match_add(dev, matchptr, compare_of, np); | 1001 | drm_of_component_match_add(dev, matchptr, compare_of, np); |
1000 | 1002 | ||
1001 | of_node_put(np); | 1003 | of_node_put(np); |
1002 | 1004 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d0da52f2a806..940bf4992fe2 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj); | |||
217 | int msm_gem_sync_object(struct drm_gem_object *obj, | 217 | int msm_gem_sync_object(struct drm_gem_object *obj, |
218 | struct msm_fence_context *fctx, bool exclusive); | 218 | struct msm_fence_context *fctx, bool exclusive); |
219 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 219 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
220 | struct msm_gpu *gpu, bool exclusive, struct fence *fence); | 220 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); |
221 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); | 221 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
222 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); | 222 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
223 | int msm_gem_cpu_fini(struct drm_gem_object *obj); | 223 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index a9b9b1c95a2e..3f299c537b77 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/fence.h> | 18 | #include <linux/dma-fence.h> |
19 | 19 | ||
20 | #include "msm_drv.h" | 20 | #include "msm_drv.h" |
21 | #include "msm_fence.h" | 21 | #include "msm_fence.h" |
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) | |||
32 | 32 | ||
33 | fctx->dev = dev; | 33 | fctx->dev = dev; |
34 | fctx->name = name; | 34 | fctx->name = name; |
35 | fctx->context = fence_context_alloc(1); | 35 | fctx->context = dma_fence_context_alloc(1); |
36 | init_waitqueue_head(&fctx->event); | 36 | init_waitqueue_head(&fctx->event); |
37 | spin_lock_init(&fctx->spinlock); | 37 | spin_lock_init(&fctx->spinlock); |
38 | 38 | ||
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | |||
100 | 100 | ||
101 | struct msm_fence { | 101 | struct msm_fence { |
102 | struct msm_fence_context *fctx; | 102 | struct msm_fence_context *fctx; |
103 | struct fence base; | 103 | struct dma_fence base; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | static inline struct msm_fence *to_msm_fence(struct fence *fence) | 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) |
107 | { | 107 | { |
108 | return container_of(fence, struct msm_fence, base); | 108 | return container_of(fence, struct msm_fence, base); |
109 | } | 109 | } |
110 | 110 | ||
111 | static const char *msm_fence_get_driver_name(struct fence *fence) | 111 | static const char *msm_fence_get_driver_name(struct dma_fence *fence) |
112 | { | 112 | { |
113 | return "msm"; | 113 | return "msm"; |
114 | } | 114 | } |
115 | 115 | ||
116 | static const char *msm_fence_get_timeline_name(struct fence *fence) | 116 | static const char *msm_fence_get_timeline_name(struct dma_fence *fence) |
117 | { | 117 | { |
118 | struct msm_fence *f = to_msm_fence(fence); | 118 | struct msm_fence *f = to_msm_fence(fence); |
119 | return f->fctx->name; | 119 | return f->fctx->name; |
120 | } | 120 | } |
121 | 121 | ||
122 | static bool msm_fence_enable_signaling(struct fence *fence) | 122 | static bool msm_fence_enable_signaling(struct dma_fence *fence) |
123 | { | 123 | { |
124 | return true; | 124 | return true; |
125 | } | 125 | } |
126 | 126 | ||
127 | static bool msm_fence_signaled(struct fence *fence) | 127 | static bool msm_fence_signaled(struct dma_fence *fence) |
128 | { | 128 | { |
129 | struct msm_fence *f = to_msm_fence(fence); | 129 | struct msm_fence *f = to_msm_fence(fence); |
130 | return fence_completed(f->fctx, f->base.seqno); | 130 | return fence_completed(f->fctx, f->base.seqno); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void msm_fence_release(struct fence *fence) | 133 | static void msm_fence_release(struct dma_fence *fence) |
134 | { | 134 | { |
135 | struct msm_fence *f = to_msm_fence(fence); | 135 | struct msm_fence *f = to_msm_fence(fence); |
136 | kfree_rcu(f, base.rcu); | 136 | kfree_rcu(f, base.rcu); |
137 | } | 137 | } |
138 | 138 | ||
139 | static const struct fence_ops msm_fence_ops = { | 139 | static const struct dma_fence_ops msm_fence_ops = { |
140 | .get_driver_name = msm_fence_get_driver_name, | 140 | .get_driver_name = msm_fence_get_driver_name, |
141 | .get_timeline_name = msm_fence_get_timeline_name, | 141 | .get_timeline_name = msm_fence_get_timeline_name, |
142 | .enable_signaling = msm_fence_enable_signaling, | 142 | .enable_signaling = msm_fence_enable_signaling, |
143 | .signaled = msm_fence_signaled, | 143 | .signaled = msm_fence_signaled, |
144 | .wait = fence_default_wait, | 144 | .wait = dma_fence_default_wait, |
145 | .release = msm_fence_release, | 145 | .release = msm_fence_release, |
146 | }; | 146 | }; |
147 | 147 | ||
148 | struct fence * | 148 | struct dma_fence * |
149 | msm_fence_alloc(struct msm_fence_context *fctx) | 149 | msm_fence_alloc(struct msm_fence_context *fctx) |
150 | { | 150 | { |
151 | struct msm_fence *f; | 151 | struct msm_fence *f; |
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx) | |||
156 | 156 | ||
157 | f->fctx = fctx; | 157 | f->fctx = fctx; |
158 | 158 | ||
159 | fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, | 159 | dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, |
160 | fctx->context, ++fctx->last_fence); | 160 | fctx->context, ++fctx->last_fence); |
161 | 161 | ||
162 | return &f->base; | 162 | return &f->base; |
163 | } | 163 | } |
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index ceb5b3d314b4..56061aa1959d 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h | |||
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx, | |||
41 | struct msm_fence_cb *cb, uint32_t fence); | 41 | struct msm_fence_cb *cb, uint32_t fence); |
42 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); | 42 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); |
43 | 43 | ||
44 | struct fence * msm_fence_alloc(struct msm_fence_context *fctx); | 44 | struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); |
45 | 45 | ||
46 | #endif | 46 | #endif |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..57db7dbbb618 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
521 | { | 521 | { |
522 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 522 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
523 | struct reservation_object_list *fobj; | 523 | struct reservation_object_list *fobj; |
524 | struct fence *fence; | 524 | struct dma_fence *fence; |
525 | int i, ret; | 525 | int i, ret; |
526 | 526 | ||
527 | if (!exclusive) { | 527 | if (!exclusive) { |
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
540 | fence = reservation_object_get_excl(msm_obj->resv); | 540 | fence = reservation_object_get_excl(msm_obj->resv); |
541 | /* don't need to wait on our own fences, since ring is fifo */ | 541 | /* don't need to wait on our own fences, since ring is fifo */ |
542 | if (fence && (fence->context != fctx->context)) { | 542 | if (fence && (fence->context != fctx->context)) { |
543 | ret = fence_wait(fence, true); | 543 | ret = dma_fence_wait(fence, true); |
544 | if (ret) | 544 | if (ret) |
545 | return ret; | 545 | return ret; |
546 | } | 546 | } |
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
553 | fence = rcu_dereference_protected(fobj->shared[i], | 553 | fence = rcu_dereference_protected(fobj->shared[i], |
554 | reservation_object_held(msm_obj->resv)); | 554 | reservation_object_held(msm_obj->resv)); |
555 | if (fence->context != fctx->context) { | 555 | if (fence->context != fctx->context) { |
556 | ret = fence_wait(fence, true); | 556 | ret = dma_fence_wait(fence, true); |
557 | if (ret) | 557 | if (ret) |
558 | return ret; | 558 | return ret; |
559 | } | 559 | } |
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
563 | } | 563 | } |
564 | 564 | ||
565 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 565 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
566 | struct msm_gpu *gpu, bool exclusive, struct fence *fence) | 566 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
567 | { | 567 | { |
568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
569 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); | 569 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) | |||
616 | } | 616 | } |
617 | 617 | ||
618 | #ifdef CONFIG_DEBUG_FS | 618 | #ifdef CONFIG_DEBUG_FS |
619 | static void describe_fence(struct fence *fence, const char *type, | 619 | static void describe_fence(struct dma_fence *fence, const char *type, |
620 | struct seq_file *m) | 620 | struct seq_file *m) |
621 | { | 621 | { |
622 | if (!fence_is_signaled(fence)) | 622 | if (!dma_fence_is_signaled(fence)) |
623 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, | 623 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
624 | fence->ops->get_driver_name(fence), | 624 | fence->ops->get_driver_name(fence), |
625 | fence->ops->get_timeline_name(fence), | 625 | fence->ops->get_timeline_name(fence), |
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
631 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 631 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
632 | struct reservation_object *robj = msm_obj->resv; | 632 | struct reservation_object *robj = msm_obj->resv; |
633 | struct reservation_object_list *fobj; | 633 | struct reservation_object_list *fobj; |
634 | struct fence *fence; | 634 | struct dma_fence *fence; |
635 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 635 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
636 | const char *madv; | 636 | const char *madv; |
637 | 637 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b2f13cfe945e..2cb8551fda70 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -104,7 +104,7 @@ struct msm_gem_submit { | |||
104 | struct list_head node; /* node in gpu submit_list */ | 104 | struct list_head node; /* node in gpu submit_list */ |
105 | struct list_head bo_list; | 105 | struct list_head bo_list; |
106 | struct ww_acquire_ctx ticket; | 106 | struct ww_acquire_ctx ticket; |
107 | struct fence *fence; | 107 | struct dma_fence *fence; |
108 | struct pid *pid; /* submitting process */ | 108 | struct pid *pid; /* submitting process */ |
109 | bool valid; /* true if no cmdstream patching needed */ | 109 | bool valid; /* true if no cmdstream patching needed */ |
110 | unsigned int nr_cmds; | 110 | unsigned int nr_cmds; |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b6a0f37a65f3..25e8786fa4ca 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
60 | 60 | ||
61 | void msm_gem_submit_free(struct msm_gem_submit *submit) | 61 | void msm_gem_submit_free(struct msm_gem_submit *submit) |
62 | { | 62 | { |
63 | fence_put(submit->fence); | 63 | dma_fence_put(submit->fence); |
64 | list_del(&submit->node); | 64 | list_del(&submit->node); |
65 | put_pid(submit->pid); | 65 | put_pid(submit->pid); |
66 | kfree(submit); | 66 | kfree(submit); |
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
380 | struct msm_file_private *ctx = file->driver_priv; | 380 | struct msm_file_private *ctx = file->driver_priv; |
381 | struct msm_gem_submit *submit; | 381 | struct msm_gem_submit *submit; |
382 | struct msm_gpu *gpu = priv->gpu; | 382 | struct msm_gpu *gpu = priv->gpu; |
383 | struct fence *in_fence = NULL; | 383 | struct dma_fence *in_fence = NULL; |
384 | struct sync_file *sync_file = NULL; | 384 | struct sync_file *sync_file = NULL; |
385 | int out_fence_fd = -1; | 385 | int out_fence_fd = -1; |
386 | unsigned i; | 386 | unsigned i; |
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
439 | */ | 439 | */ |
440 | 440 | ||
441 | if (in_fence->context != gpu->fctx->context) { | 441 | if (in_fence->context != gpu->fctx->context) { |
442 | ret = fence_wait(in_fence, true); | 442 | ret = dma_fence_wait(in_fence, true); |
443 | if (ret) | 443 | if (ret) |
444 | goto out; | 444 | goto out; |
445 | } | 445 | } |
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
542 | 542 | ||
543 | out: | 543 | out: |
544 | if (in_fence) | 544 | if (in_fence) |
545 | fence_put(in_fence); | 545 | dma_fence_put(in_fence); |
546 | submit_cleanup(submit); | 546 | submit_cleanup(submit); |
547 | if (ret) | 547 | if (ret) |
548 | msm_gem_submit_free(submit); | 548 | msm_gem_submit_free(submit); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..3249707e6834 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu) | |||
476 | submit = list_first_entry(&gpu->submit_list, | 476 | submit = list_first_entry(&gpu->submit_list, |
477 | struct msm_gem_submit, node); | 477 | struct msm_gem_submit, node); |
478 | 478 | ||
479 | if (fence_is_signaled(submit->fence)) { | 479 | if (dma_fence_is_signaled(submit->fence)) { |
480 | retire_submit(gpu, submit); | 480 | retire_submit(gpu, submit); |
481 | } else { | 481 | } else { |
482 | break; | 482 | break; |
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild index 2527bf4ca5d9..fde6e3656636 100644 --- a/drivers/gpu/drm/nouveau/Kbuild +++ b/drivers/gpu/drm/nouveau/Kbuild | |||
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o | |||
22 | nouveau-y += nouveau_drm.o | 22 | nouveau-y += nouveau_drm.o |
23 | nouveau-y += nouveau_hwmon.o | 23 | nouveau-y += nouveau_hwmon.o |
24 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o | 24 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o |
25 | nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o | ||
25 | nouveau-y += nouveau_nvif.o | 26 | nouveau-y += nouveau_nvif.o |
26 | nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o | 27 | nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o |
27 | nouveau-y += nouveau_usif.o # userspace <-> nvif | 28 | nouveau-y += nouveau_usif.o # userspace <-> nvif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h index a47d46dda704..b7a54e605469 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h | |||
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name { | |||
6 | DCB_GPIO_TVDAC1 = 0x2d, | 6 | DCB_GPIO_TVDAC1 = 0x2d, |
7 | DCB_GPIO_FAN = 0x09, | 7 | DCB_GPIO_FAN = 0x09, |
8 | DCB_GPIO_FAN_SENSE = 0x3d, | 8 | DCB_GPIO_FAN_SENSE = 0x3d, |
9 | DCB_GPIO_LOGO_LED_PWM = 0x84, | ||
9 | DCB_GPIO_UNUSED = 0xff, | 10 | DCB_GPIO_UNUSED = 0xff, |
10 | DCB_GPIO_VID0 = 0x04, | 11 | DCB_GPIO_VID0 = 0x04, |
11 | DCB_GPIO_VID1 = 0x05, | 12 | DCB_GPIO_VID1 = 0x05, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h index 9cb97477248b..e933d3eede70 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h | |||
@@ -1,10 +1,16 @@ | |||
1 | #ifndef __NVBIOS_ICCSENSE_H__ | 1 | #ifndef __NVBIOS_ICCSENSE_H__ |
2 | #define __NVBIOS_ICCSENSE_H__ | 2 | #define __NVBIOS_ICCSENSE_H__ |
3 | struct pwr_rail_resistor_t { | ||
4 | u8 mohm; | ||
5 | bool enabled; | ||
6 | }; | ||
7 | |||
3 | struct pwr_rail_t { | 8 | struct pwr_rail_t { |
4 | u8 mode; | 9 | u8 mode; |
5 | u8 extdev_id; | 10 | u8 extdev_id; |
6 | u8 resistor_mohm; | 11 | u8 resistor_count; |
7 | u8 rail; | 12 | struct pwr_rail_resistor_t resistors[3]; |
13 | u16 config; | ||
8 | }; | 14 | }; |
9 | 15 | ||
10 | struct nvbios_iccsense { | 16 | struct nvbios_iccsense { |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h index 6633c6db9281..8fa1294c27b7 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __NVBIOS_VMAP_H__ | 1 | #ifndef __NVBIOS_VMAP_H__ |
2 | #define __NVBIOS_VMAP_H__ | 2 | #define __NVBIOS_VMAP_H__ |
3 | struct nvbios_vmap { | 3 | struct nvbios_vmap { |
4 | u8 max0; | ||
5 | u8 max1; | ||
6 | u8 max2; | ||
4 | }; | 7 | }; |
5 | 8 | ||
6 | u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 9 | u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); |
@@ -8,7 +11,7 @@ u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | |||
8 | struct nvbios_vmap *); | 11 | struct nvbios_vmap *); |
9 | 12 | ||
10 | struct nvbios_vmap_entry { | 13 | struct nvbios_vmap_entry { |
11 | u8 unk0; | 14 | u8 mode; |
12 | u8 link; | 15 | u8 link; |
13 | u32 min; | 16 | u32 min; |
14 | u32 max; | 17 | u32 max; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h index b0df610cec2b..23f3d1b93ebb 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h | |||
@@ -13,8 +13,9 @@ struct nvbios_volt { | |||
13 | u32 base; | 13 | u32 base; |
14 | 14 | ||
15 | /* GPIO mode */ | 15 | /* GPIO mode */ |
16 | u8 vidmask; | 16 | bool ranged; |
17 | s16 step; | 17 | u8 vidmask; |
18 | s16 step; | ||
18 | 19 | ||
19 | /* PWM mode */ | 20 | /* PWM mode */ |
20 | u32 pwm_freq; | 21 | u32 pwm_freq; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h new file mode 100644 index 000000000000..87f804fc3a88 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __NVBIOS_VPSTATE_H__ | ||
2 | #define __NVBIOS_VPSTATE_H__ | ||
3 | struct nvbios_vpstate_header { | ||
4 | u32 offset; | ||
5 | |||
6 | u8 version; | ||
7 | u8 hlen; | ||
8 | u8 ecount; | ||
9 | u8 elen; | ||
10 | u8 scount; | ||
11 | u8 slen; | ||
12 | |||
13 | u8 base_id; | ||
14 | u8 boost_id; | ||
15 | u8 tdp_id; | ||
16 | }; | ||
17 | struct nvbios_vpstate_entry { | ||
18 | u8 pstate; | ||
19 | u16 clock_mhz; | ||
20 | }; | ||
21 | int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *); | ||
22 | int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *, | ||
23 | u8 idx, struct nvbios_vpstate_entry *); | ||
24 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h index fb54417bc458..e5275f742977 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h | |||
@@ -6,6 +6,10 @@ | |||
6 | struct nvbios_pll; | 6 | struct nvbios_pll; |
7 | struct nvkm_pll_vals; | 7 | struct nvkm_pll_vals; |
8 | 8 | ||
9 | #define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */ | ||
10 | #define NVKM_CLK_CSTATE_BASE -2 /* pstate base */ | ||
11 | #define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */ | ||
12 | |||
9 | enum nv_clk_src { | 13 | enum nv_clk_src { |
10 | nv_clk_src_crystal, | 14 | nv_clk_src_crystal, |
11 | nv_clk_src_href, | 15 | nv_clk_src_href, |
@@ -52,6 +56,7 @@ struct nvkm_cstate { | |||
52 | struct list_head head; | 56 | struct list_head head; |
53 | u8 voltage; | 57 | u8 voltage; |
54 | u32 domain[nv_clk_src_max]; | 58 | u32 domain[nv_clk_src_max]; |
59 | u8 id; | ||
55 | }; | 60 | }; |
56 | 61 | ||
57 | struct nvkm_pstate { | 62 | struct nvkm_pstate { |
@@ -67,7 +72,8 @@ struct nvkm_pstate { | |||
67 | struct nvkm_domain { | 72 | struct nvkm_domain { |
68 | enum nv_clk_src name; | 73 | enum nv_clk_src name; |
69 | u8 bios; /* 0xff for none */ | 74 | u8 bios; /* 0xff for none */ |
70 | #define NVKM_CLK_DOM_FLAG_CORE 0x01 | 75 | #define NVKM_CLK_DOM_FLAG_CORE 0x01 |
76 | #define NVKM_CLK_DOM_FLAG_VPSTATE 0x02 | ||
71 | u8 flags; | 77 | u8 flags; |
72 | const char *mname; | 78 | const char *mname; |
73 | int mdiv; | 79 | int mdiv; |
@@ -93,10 +99,16 @@ struct nvkm_clk { | |||
93 | int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */ | 99 | int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */ |
94 | int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */ | 100 | int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */ |
95 | int astate; /* perfmon adjustment (base) */ | 101 | int astate; /* perfmon adjustment (base) */ |
96 | int tstate; /* thermal adjustment (max-) */ | ||
97 | int dstate; /* display adjustment (min+) */ | 102 | int dstate; /* display adjustment (min+) */ |
103 | u8 temp; | ||
98 | 104 | ||
99 | bool allow_reclock; | 105 | bool allow_reclock; |
106 | #define NVKM_CLK_BOOST_NONE 0x0 | ||
107 | #define NVKM_CLK_BOOST_BIOS 0x1 | ||
108 | #define NVKM_CLK_BOOST_FULL 0x2 | ||
109 | u8 boost_mode; | ||
110 | u32 base_khz; | ||
111 | u32 boost_khz; | ||
100 | 112 | ||
101 | /*XXX: die, these are here *only* to support the completely | 113 | /*XXX: die, these are here *only* to support the completely |
102 | * bat-shit insane what-was-nouveau_hw.c code | 114 | * bat-shit insane what-was-nouveau_hw.c code |
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src); | |||
110 | int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr); | 122 | int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr); |
111 | int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait); | 123 | int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait); |
112 | int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel); | 124 | int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel); |
113 | int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel); | 125 | int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature); |
114 | 126 | ||
115 | int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **); | 127 | int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **); |
116 | int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **); | 128 | int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h index b765f4ffcde6..08ef9983c643 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h | |||
@@ -15,12 +15,28 @@ struct nvkm_volt { | |||
15 | 15 | ||
16 | u32 max_uv; | 16 | u32 max_uv; |
17 | u32 min_uv; | 17 | u32 min_uv; |
18 | |||
19 | /* | ||
20 | * These are fully functional map entries creating a sw ceiling for | ||
21 | * the voltage. These all can describe different kind of curves, so | ||
22 | * that for any given temperature a different one can return the lowest | ||
23 | * value of all three. | ||
24 | */ | ||
25 | u8 max0_id; | ||
26 | u8 max1_id; | ||
27 | u8 max2_id; | ||
28 | |||
29 | int speedo; | ||
18 | }; | 30 | }; |
19 | 31 | ||
32 | int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature); | ||
33 | int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id); | ||
20 | int nvkm_volt_get(struct nvkm_volt *); | 34 | int nvkm_volt_get(struct nvkm_volt *); |
21 | int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition); | 35 | int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp, |
36 | int condition); | ||
22 | 37 | ||
23 | int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); | 38 | int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); |
39 | int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **); | ||
24 | int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **); | 40 | int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **); |
25 | int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); | 41 | int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); |
26 | int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **); | 42 | int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index f5101be806cb..5e2c5685b4dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -232,6 +232,7 @@ nouveau_backlight_init(struct drm_device *dev) | |||
232 | case NV_DEVICE_INFO_V0_TESLA: | 232 | case NV_DEVICE_INFO_V0_TESLA: |
233 | case NV_DEVICE_INFO_V0_FERMI: | 233 | case NV_DEVICE_INFO_V0_FERMI: |
234 | case NV_DEVICE_INFO_V0_KEPLER: | 234 | case NV_DEVICE_INFO_V0_KEPLER: |
235 | case NV_DEVICE_INFO_V0_MAXWELL: | ||
235 | return nv50_backlight_init(connector); | 236 | return nv50_backlight_init(connector); |
236 | default: | 237 | default: |
237 | break; | 238 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 0067586eb015..18eb061ccafb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -31,10 +31,8 @@ | |||
31 | 31 | ||
32 | #define DCB_LOC_ON_CHIP 0 | 32 | #define DCB_LOC_ON_CHIP 0 |
33 | 33 | ||
34 | #define ROM16(x) le16_to_cpu(*(u16 *)&(x)) | 34 | #define ROM16(x) get_unaligned_le16(&(x)) |
35 | #define ROM32(x) le32_to_cpu(*(u32 *)&(x)) | 35 | #define ROM32(x) get_unaligned_le32(&(x)) |
36 | #define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); }) | ||
37 | #define ROM64(x) le64_to_cpu(*(u64 *)&(x)) | ||
38 | #define ROMPTR(d,x) ({ \ | 36 | #define ROMPTR(d,x) ({ \ |
39 | struct nouveau_drm *drm = nouveau_drm((d)); \ | 37 | struct nouveau_drm *drm = nouveau_drm((d)); \ |
40 | ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \ | 38 | ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 343b8659472c..e0c0007689e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) | |||
83 | 83 | ||
84 | static void | 84 | static void |
85 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, | 85 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
86 | struct fence *fence) | 86 | struct dma_fence *fence) |
87 | { | 87 | { |
88 | struct nouveau_drm *drm = nouveau_drm(dev); | 88 | struct nouveau_drm *drm = nouveau_drm(dev); |
89 | 89 | ||
90 | if (tile) { | 90 | if (tile) { |
91 | spin_lock(&drm->tile.lock); | 91 | spin_lock(&drm->tile.lock); |
92 | tile->fence = (struct nouveau_fence *)fence_get(fence); | 92 | tile->fence = (struct nouveau_fence *)dma_fence_get(fence); |
93 | tile->used = false; | 93 | tile->used = false; |
94 | spin_unlock(&drm->tile.lock); | 94 | spin_unlock(&drm->tile.lock); |
95 | } | 95 | } |
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
1243 | { | 1243 | { |
1244 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1244 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1245 | struct drm_device *dev = drm->dev; | 1245 | struct drm_device *dev = drm->dev; |
1246 | struct fence *fence = reservation_object_get_excl(bo->resv); | 1246 | struct dma_fence *fence = reservation_object_get_excl(bo->resv); |
1247 | 1247 | ||
1248 | nv10_bo_put_tile_region(dev, *old_tile, fence); | 1248 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
1249 | *old_tile = new_tile; | 1249 | *old_tile = new_tile; |
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = { | |||
1561 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | 1561 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
1562 | .invalidate_caches = nouveau_bo_invalidate_caches, | 1562 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1563 | .init_mem_type = nouveau_bo_init_mem_type, | 1563 | .init_mem_type = nouveau_bo_init_mem_type, |
1564 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
1564 | .evict_flags = nouveau_bo_evict_flags, | 1565 | .evict_flags = nouveau_bo_evict_flags, |
1565 | .move_notify = nouveau_bo_move_ntfy, | 1566 | .move_notify = nouveau_bo_move_ntfy, |
1566 | .move = nouveau_bo_move, | 1567 | .move = nouveau_bo_move, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 3100fd88a015..6adf94789417 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include "nouveau_ttm.h" | 47 | #include "nouveau_ttm.h" |
48 | #include "nouveau_gem.h" | 48 | #include "nouveau_gem.h" |
49 | #include "nouveau_vga.h" | 49 | #include "nouveau_vga.h" |
50 | #include "nouveau_led.h" | ||
50 | #include "nouveau_hwmon.h" | 51 | #include "nouveau_hwmon.h" |
51 | #include "nouveau_acpi.h" | 52 | #include "nouveau_acpi.h" |
52 | #include "nouveau_bios.h" | 53 | #include "nouveau_bios.h" |
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
475 | nouveau_hwmon_init(dev); | 476 | nouveau_hwmon_init(dev); |
476 | nouveau_accel_init(drm); | 477 | nouveau_accel_init(drm); |
477 | nouveau_fbcon_init(dev); | 478 | nouveau_fbcon_init(dev); |
479 | nouveau_led_init(dev); | ||
478 | 480 | ||
479 | if (nouveau_runtime_pm != 0) { | 481 | if (nouveau_runtime_pm != 0) { |
480 | pm_runtime_use_autosuspend(dev->dev); | 482 | pm_runtime_use_autosuspend(dev->dev); |
@@ -510,6 +512,7 @@ nouveau_drm_unload(struct drm_device *dev) | |||
510 | pm_runtime_forbid(dev->dev); | 512 | pm_runtime_forbid(dev->dev); |
511 | } | 513 | } |
512 | 514 | ||
515 | nouveau_led_fini(dev); | ||
513 | nouveau_fbcon_fini(dev); | 516 | nouveau_fbcon_fini(dev); |
514 | nouveau_accel_fini(drm); | 517 | nouveau_accel_fini(drm); |
515 | nouveau_hwmon_fini(dev); | 518 | nouveau_hwmon_fini(dev); |
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
561 | struct nouveau_cli *cli; | 564 | struct nouveau_cli *cli; |
562 | int ret; | 565 | int ret; |
563 | 566 | ||
567 | nouveau_led_suspend(dev); | ||
568 | |||
564 | if (dev->mode_config.num_crtc) { | 569 | if (dev->mode_config.num_crtc) { |
565 | NV_INFO(drm, "suspending console...\n"); | 570 | NV_INFO(drm, "suspending console...\n"); |
566 | nouveau_fbcon_set_suspend(dev, 1); | 571 | nouveau_fbcon_set_suspend(dev, 1); |
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime) | |||
649 | nouveau_fbcon_set_suspend(dev, 0); | 654 | nouveau_fbcon_set_suspend(dev, 0); |
650 | } | 655 | } |
651 | 656 | ||
657 | nouveau_led_resume(dev); | ||
658 | |||
652 | return 0; | 659 | return 0; |
653 | } | 660 | } |
654 | 661 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 822a0212cd48..c0e2b3207503 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -166,6 +166,9 @@ struct nouveau_drm { | |||
166 | struct nouveau_hwmon *hwmon; | 166 | struct nouveau_hwmon *hwmon; |
167 | struct nouveau_debugfs *debugfs; | 167 | struct nouveau_debugfs *debugfs; |
168 | 168 | ||
169 | /* led management */ | ||
170 | struct nouveau_led *led; | ||
171 | |||
169 | /* display power reference */ | 172 | /* display power reference */ |
170 | bool have_disp_power_ref; | 173 | bool have_disp_power_ref; |
171 | 174 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 4bb9ab892ae1..e9529ee6bc23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/ktime.h> | 29 | #include <linux/ktime.h> |
30 | #include <linux/hrtimer.h> | 30 | #include <linux/hrtimer.h> |
31 | #include <trace/events/fence.h> | 31 | #include <trace/events/dma_fence.h> |
32 | 32 | ||
33 | #include <nvif/cl826e.h> | 33 | #include <nvif/cl826e.h> |
34 | #include <nvif/notify.h> | 34 | #include <nvif/notify.h> |
@@ -38,11 +38,11 @@ | |||
38 | #include "nouveau_dma.h" | 38 | #include "nouveau_dma.h" |
39 | #include "nouveau_fence.h" | 39 | #include "nouveau_fence.h" |
40 | 40 | ||
41 | static const struct fence_ops nouveau_fence_ops_uevent; | 41 | static const struct dma_fence_ops nouveau_fence_ops_uevent; |
42 | static const struct fence_ops nouveau_fence_ops_legacy; | 42 | static const struct dma_fence_ops nouveau_fence_ops_legacy; |
43 | 43 | ||
44 | static inline struct nouveau_fence * | 44 | static inline struct nouveau_fence * |
45 | from_fence(struct fence *fence) | 45 | from_fence(struct dma_fence *fence) |
46 | { | 46 | { |
47 | return container_of(fence, struct nouveau_fence, base); | 47 | return container_of(fence, struct nouveau_fence, base); |
48 | } | 48 | } |
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence) | |||
58 | { | 58 | { |
59 | int drop = 0; | 59 | int drop = 0; |
60 | 60 | ||
61 | fence_signal_locked(&fence->base); | 61 | dma_fence_signal_locked(&fence->base); |
62 | list_del(&fence->head); | 62 | list_del(&fence->head); |
63 | rcu_assign_pointer(fence->channel, NULL); | 63 | rcu_assign_pointer(fence->channel, NULL); |
64 | 64 | ||
65 | if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { | 65 | if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { |
66 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 66 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
67 | 67 | ||
68 | if (!--fctx->notify_ref) | 68 | if (!--fctx->notify_ref) |
69 | drop = 1; | 69 | drop = 1; |
70 | } | 70 | } |
71 | 71 | ||
72 | fence_put(&fence->base); | 72 | dma_fence_put(&fence->base); |
73 | return drop; | 73 | return drop; |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct nouveau_fence * | 76 | static struct nouveau_fence * |
77 | nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) { | 77 | nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) { |
78 | struct nouveau_fence_priv *priv = (void*)drm->fence; | 78 | struct nouveau_fence_priv *priv = (void*)drm->fence; |
79 | 79 | ||
80 | if (fence->ops != &nouveau_fence_ops_legacy && | 80 | if (fence->ops != &nouveau_fence_ops_legacy && |
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha | |||
201 | 201 | ||
202 | struct nouveau_fence_work { | 202 | struct nouveau_fence_work { |
203 | struct work_struct work; | 203 | struct work_struct work; |
204 | struct fence_cb cb; | 204 | struct dma_fence_cb cb; |
205 | void (*func)(void *); | 205 | void (*func)(void *); |
206 | void *data; | 206 | void *data; |
207 | }; | 207 | }; |
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork) | |||
214 | kfree(work); | 214 | kfree(work); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) | 217 | static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
218 | { | 218 | { |
219 | struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); | 219 | struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); |
220 | 220 | ||
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | void | 224 | void |
225 | nouveau_fence_work(struct fence *fence, | 225 | nouveau_fence_work(struct dma_fence *fence, |
226 | void (*func)(void *), void *data) | 226 | void (*func)(void *), void *data) |
227 | { | 227 | { |
228 | struct nouveau_fence_work *work; | 228 | struct nouveau_fence_work *work; |
229 | 229 | ||
230 | if (fence_is_signaled(fence)) | 230 | if (dma_fence_is_signaled(fence)) |
231 | goto err; | 231 | goto err; |
232 | 232 | ||
233 | work = kmalloc(sizeof(*work), GFP_KERNEL); | 233 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence, | |||
245 | work->func = func; | 245 | work->func = func; |
246 | work->data = data; | 246 | work->data = data; |
247 | 247 | ||
248 | if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) | 248 | if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) |
249 | goto err_free; | 249 | goto err_free; |
250 | return; | 250 | return; |
251 | 251 | ||
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
266 | fence->timeout = jiffies + (15 * HZ); | 266 | fence->timeout = jiffies + (15 * HZ); |
267 | 267 | ||
268 | if (priv->uevent) | 268 | if (priv->uevent) |
269 | fence_init(&fence->base, &nouveau_fence_ops_uevent, | 269 | dma_fence_init(&fence->base, &nouveau_fence_ops_uevent, |
270 | &fctx->lock, fctx->context, ++fctx->sequence); | 270 | &fctx->lock, fctx->context, ++fctx->sequence); |
271 | else | 271 | else |
272 | fence_init(&fence->base, &nouveau_fence_ops_legacy, | 272 | dma_fence_init(&fence->base, &nouveau_fence_ops_legacy, |
273 | &fctx->lock, fctx->context, ++fctx->sequence); | 273 | &fctx->lock, fctx->context, ++fctx->sequence); |
274 | kref_get(&fctx->fence_ref); | 274 | kref_get(&fctx->fence_ref); |
275 | 275 | ||
276 | trace_fence_emit(&fence->base); | 276 | trace_dma_fence_emit(&fence->base); |
277 | ret = fctx->emit(fence); | 277 | ret = fctx->emit(fence); |
278 | if (!ret) { | 278 | if (!ret) { |
279 | fence_get(&fence->base); | 279 | dma_fence_get(&fence->base); |
280 | spin_lock_irq(&fctx->lock); | 280 | spin_lock_irq(&fctx->lock); |
281 | 281 | ||
282 | if (nouveau_fence_update(chan, fctx)) | 282 | if (nouveau_fence_update(chan, fctx)) |
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
298 | struct nouveau_channel *chan; | 298 | struct nouveau_channel *chan; |
299 | unsigned long flags; | 299 | unsigned long flags; |
300 | 300 | ||
301 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) | 301 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
302 | return true; | 302 | return true; |
303 | 303 | ||
304 | spin_lock_irqsave(&fctx->lock, flags); | 304 | spin_lock_irqsave(&fctx->lock, flags); |
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
307 | nvif_notify_put(&fctx->notify); | 307 | nvif_notify_put(&fctx->notify); |
308 | spin_unlock_irqrestore(&fctx->lock, flags); | 308 | spin_unlock_irqrestore(&fctx->lock, flags); |
309 | } | 309 | } |
310 | return fence_is_signaled(&fence->base); | 310 | return dma_fence_is_signaled(&fence->base); |
311 | } | 311 | } |
312 | 312 | ||
313 | static long | 313 | static long |
314 | nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait) | 314 | nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) |
315 | { | 315 | { |
316 | struct nouveau_fence *fence = from_fence(f); | 316 | struct nouveau_fence *fence = from_fence(f); |
317 | unsigned long sleep_time = NSEC_PER_MSEC / 1000; | 317 | unsigned long sleep_time = NSEC_PER_MSEC / 1000; |
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) | |||
378 | if (!lazy) | 378 | if (!lazy) |
379 | return nouveau_fence_wait_busy(fence, intr); | 379 | return nouveau_fence_wait_busy(fence, intr); |
380 | 380 | ||
381 | ret = fence_wait_timeout(&fence->base, intr, 15 * HZ); | 381 | ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); |
382 | if (ret < 0) | 382 | if (ret < 0) |
383 | return ret; | 383 | return ret; |
384 | else if (!ret) | 384 | else if (!ret) |
@@ -391,7 +391,7 @@ int | |||
391 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) | 391 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) |
392 | { | 392 | { |
393 | struct nouveau_fence_chan *fctx = chan->fence; | 393 | struct nouveau_fence_chan *fctx = chan->fence; |
394 | struct fence *fence; | 394 | struct dma_fence *fence; |
395 | struct reservation_object *resv = nvbo->bo.resv; | 395 | struct reservation_object *resv = nvbo->bo.resv; |
396 | struct reservation_object_list *fobj; | 396 | struct reservation_object_list *fobj; |
397 | struct nouveau_fence *f; | 397 | struct nouveau_fence *f; |
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
421 | } | 421 | } |
422 | 422 | ||
423 | if (must_wait) | 423 | if (must_wait) |
424 | ret = fence_wait(fence, intr); | 424 | ret = dma_fence_wait(fence, intr); |
425 | 425 | ||
426 | return ret; | 426 | return ret; |
427 | } | 427 | } |
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
446 | } | 446 | } |
447 | 447 | ||
448 | if (must_wait) | 448 | if (must_wait) |
449 | ret = fence_wait(fence, intr); | 449 | ret = dma_fence_wait(fence, intr); |
450 | } | 450 | } |
451 | 451 | ||
452 | return ret; | 452 | return ret; |
@@ -456,7 +456,7 @@ void | |||
456 | nouveau_fence_unref(struct nouveau_fence **pfence) | 456 | nouveau_fence_unref(struct nouveau_fence **pfence) |
457 | { | 457 | { |
458 | if (*pfence) | 458 | if (*pfence) |
459 | fence_put(&(*pfence)->base); | 459 | dma_fence_put(&(*pfence)->base); |
460 | *pfence = NULL; | 460 | *pfence = NULL; |
461 | } | 461 | } |
462 | 462 | ||
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, | |||
484 | return ret; | 484 | return ret; |
485 | } | 485 | } |
486 | 486 | ||
487 | static const char *nouveau_fence_get_get_driver_name(struct fence *fence) | 487 | static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) |
488 | { | 488 | { |
489 | return "nouveau"; | 489 | return "nouveau"; |
490 | } | 490 | } |
491 | 491 | ||
492 | static const char *nouveau_fence_get_timeline_name(struct fence *f) | 492 | static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) |
493 | { | 493 | { |
494 | struct nouveau_fence *fence = from_fence(f); | 494 | struct nouveau_fence *fence = from_fence(f); |
495 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 495 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) | |||
503 | * result. The drm node should still be there, so we can derive the index from | 503 | * result. The drm node should still be there, so we can derive the index from |
504 | * the fence context. | 504 | * the fence context. |
505 | */ | 505 | */ |
506 | static bool nouveau_fence_is_signaled(struct fence *f) | 506 | static bool nouveau_fence_is_signaled(struct dma_fence *f) |
507 | { | 507 | { |
508 | struct nouveau_fence *fence = from_fence(f); | 508 | struct nouveau_fence *fence = from_fence(f); |
509 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 509 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f) | |||
519 | return ret; | 519 | return ret; |
520 | } | 520 | } |
521 | 521 | ||
522 | static bool nouveau_fence_no_signaling(struct fence *f) | 522 | static bool nouveau_fence_no_signaling(struct dma_fence *f) |
523 | { | 523 | { |
524 | struct nouveau_fence *fence = from_fence(f); | 524 | struct nouveau_fence *fence = from_fence(f); |
525 | 525 | ||
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f) | |||
530 | WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); | 530 | WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * This needs uevents to work correctly, but fence_add_callback relies on | 533 | * This needs uevents to work correctly, but dma_fence_add_callback relies on |
534 | * being able to enable signaling. It will still get signaled eventually, | 534 | * being able to enable signaling. It will still get signaled eventually, |
535 | * just not right away. | 535 | * just not right away. |
536 | */ | 536 | */ |
537 | if (nouveau_fence_is_signaled(f)) { | 537 | if (nouveau_fence_is_signaled(f)) { |
538 | list_del(&fence->head); | 538 | list_del(&fence->head); |
539 | 539 | ||
540 | fence_put(&fence->base); | 540 | dma_fence_put(&fence->base); |
541 | return false; | 541 | return false; |
542 | } | 542 | } |
543 | 543 | ||
544 | return true; | 544 | return true; |
545 | } | 545 | } |
546 | 546 | ||
547 | static void nouveau_fence_release(struct fence *f) | 547 | static void nouveau_fence_release(struct dma_fence *f) |
548 | { | 548 | { |
549 | struct nouveau_fence *fence = from_fence(f); | 549 | struct nouveau_fence *fence = from_fence(f); |
550 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 550 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
551 | 551 | ||
552 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); | 552 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); |
553 | fence_free(&fence->base); | 553 | dma_fence_free(&fence->base); |
554 | } | 554 | } |
555 | 555 | ||
556 | static const struct fence_ops nouveau_fence_ops_legacy = { | 556 | static const struct dma_fence_ops nouveau_fence_ops_legacy = { |
557 | .get_driver_name = nouveau_fence_get_get_driver_name, | 557 | .get_driver_name = nouveau_fence_get_get_driver_name, |
558 | .get_timeline_name = nouveau_fence_get_timeline_name, | 558 | .get_timeline_name = nouveau_fence_get_timeline_name, |
559 | .enable_signaling = nouveau_fence_no_signaling, | 559 | .enable_signaling = nouveau_fence_no_signaling, |
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = { | |||
562 | .release = nouveau_fence_release | 562 | .release = nouveau_fence_release |
563 | }; | 563 | }; |
564 | 564 | ||
565 | static bool nouveau_fence_enable_signaling(struct fence *f) | 565 | static bool nouveau_fence_enable_signaling(struct dma_fence *f) |
566 | { | 566 | { |
567 | struct nouveau_fence *fence = from_fence(f); | 567 | struct nouveau_fence *fence = from_fence(f); |
568 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 568 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f) | |||
573 | 573 | ||
574 | ret = nouveau_fence_no_signaling(f); | 574 | ret = nouveau_fence_no_signaling(f); |
575 | if (ret) | 575 | if (ret) |
576 | set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags); | 576 | set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); |
577 | else if (!--fctx->notify_ref) | 577 | else if (!--fctx->notify_ref) |
578 | nvif_notify_put(&fctx->notify); | 578 | nvif_notify_put(&fctx->notify); |
579 | 579 | ||
580 | return ret; | 580 | return ret; |
581 | } | 581 | } |
582 | 582 | ||
583 | static const struct fence_ops nouveau_fence_ops_uevent = { | 583 | static const struct dma_fence_ops nouveau_fence_ops_uevent = { |
584 | .get_driver_name = nouveau_fence_get_get_driver_name, | 584 | .get_driver_name = nouveau_fence_get_get_driver_name, |
585 | .get_timeline_name = nouveau_fence_get_timeline_name, | 585 | .get_timeline_name = nouveau_fence_get_timeline_name, |
586 | .enable_signaling = nouveau_fence_enable_signaling, | 586 | .enable_signaling = nouveau_fence_enable_signaling, |
587 | .signaled = nouveau_fence_is_signaled, | 587 | .signaled = nouveau_fence_is_signaled, |
588 | .wait = fence_default_wait, | 588 | .wait = dma_fence_default_wait, |
589 | .release = NULL | 589 | .release = NULL |
590 | }; | 590 | }; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64c4ce7115ad..41f3c019e534 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -1,14 +1,14 @@ | |||
1 | #ifndef __NOUVEAU_FENCE_H__ | 1 | #ifndef __NOUVEAU_FENCE_H__ |
2 | #define __NOUVEAU_FENCE_H__ | 2 | #define __NOUVEAU_FENCE_H__ |
3 | 3 | ||
4 | #include <linux/fence.h> | 4 | #include <linux/dma-fence.h> |
5 | #include <nvif/notify.h> | 5 | #include <nvif/notify.h> |
6 | 6 | ||
7 | struct nouveau_drm; | 7 | struct nouveau_drm; |
8 | struct nouveau_bo; | 8 | struct nouveau_bo; |
9 | 9 | ||
10 | struct nouveau_fence { | 10 | struct nouveau_fence { |
11 | struct fence base; | 11 | struct dma_fence base; |
12 | 12 | ||
13 | struct list_head head; | 13 | struct list_head head; |
14 | 14 | ||
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **); | |||
24 | 24 | ||
25 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); | 25 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); |
26 | bool nouveau_fence_done(struct nouveau_fence *); | 26 | bool nouveau_fence_done(struct nouveau_fence *); |
27 | void nouveau_fence_work(struct fence *, void (*)(void *), void *); | 27 | void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *); |
28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); | 28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); |
29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); | 29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); |
30 | 30 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0bd7164bc817..7f083c95f422 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) | |||
119 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; | 119 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; |
120 | struct reservation_object *resv = nvbo->bo.resv; | 120 | struct reservation_object *resv = nvbo->bo.resv; |
121 | struct reservation_object_list *fobj; | 121 | struct reservation_object_list *fobj; |
122 | struct fence *fence = NULL; | 122 | struct dma_fence *fence = NULL; |
123 | 123 | ||
124 | fobj = reservation_object_get_list(resv); | 124 | fobj = reservation_object_get_list(resv); |
125 | 125 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c new file mode 100644 index 000000000000..3e2f1b6cd4df --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_led.c | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Martin Peres | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining | ||
5 | * a copy of this software and associated documentation files (the | ||
6 | * "Software"), to deal in the Software without restriction, including | ||
7 | * without limitation the rights to use, copy, modify, merge, publish, | ||
8 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
9 | * permit persons to whom the Software is furnished to do so, subject to | ||
10 | * the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the | ||
13 | * next paragraph) shall be included in all copies or substantial | ||
14 | * portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
19 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
20 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
21 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
22 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * Authors: | ||
28 | * Martin Peres <martin.peres@free.fr> | ||
29 | */ | ||
30 | |||
31 | #include <linux/leds.h> | ||
32 | |||
33 | #include "nouveau_led.h" | ||
34 | #include <nvkm/subdev/gpio.h> | ||
35 | |||
36 | static enum led_brightness | ||
37 | nouveau_led_get_brightness(struct led_classdev *led) | ||
38 | { | ||
39 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; | ||
40 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | ||
41 | struct nvif_object *device = &drm->device.object; | ||
42 | u32 div, duty; | ||
43 | |||
44 | div = nvif_rd32(device, 0x61c880) & 0x00ffffff; | ||
45 | duty = nvif_rd32(device, 0x61c884) & 0x00ffffff; | ||
46 | |||
47 | if (div > 0) | ||
48 | return duty * LED_FULL / div; | ||
49 | else | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static void | ||
54 | nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value) | ||
55 | { | ||
56 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; | ||
57 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | ||
58 | struct nvif_object *device = &drm->device.object; | ||
59 | |||
60 | u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */ | ||
61 | u32 freq = 100; /* this is what nvidia uses and it should be good-enough */ | ||
62 | u32 div, duty; | ||
63 | |||
64 | div = input_clk / freq; | ||
65 | duty = value * div / LED_FULL; | ||
66 | |||
67 | /* for now, this is safe to directly poke those registers because: | ||
68 | * - A: nvidia never puts the logo led to any other PWM controler | ||
69 | * than PDISPLAY.SOR[1].PWM. | ||
70 | * - B: nouveau does not touch these registers anywhere else | ||
71 | */ | ||
72 | nvif_wr32(device, 0x61c880, div); | ||
73 | nvif_wr32(device, 0x61c884, 0xc0000000 | duty); | ||
74 | } | ||
75 | |||
76 | |||
77 | int | ||
78 | nouveau_led_init(struct drm_device *dev) | ||
79 | { | ||
80 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
81 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | ||
82 | struct dcb_gpio_func logo_led; | ||
83 | int ret; | ||
84 | |||
85 | if (!gpio) | ||
86 | return 0; | ||
87 | |||
88 | /* check that there is a GPIO controlling the logo LED */ | ||
89 | if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led)) | ||
90 | return 0; | ||
91 | |||
92 | drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL); | ||
93 | if (!drm->led) | ||
94 | return -ENOMEM; | ||
95 | drm->led->dev = dev; | ||
96 | |||
97 | drm->led->led.name = "nvidia-logo"; | ||
98 | drm->led->led.max_brightness = 255; | ||
99 | drm->led->led.brightness_get = nouveau_led_get_brightness; | ||
100 | drm->led->led.brightness_set = nouveau_led_set_brightness; | ||
101 | |||
102 | ret = led_classdev_register(dev->dev, &drm->led->led); | ||
103 | if (ret) { | ||
104 | kfree(drm->led); | ||
105 | return ret; | ||
106 | } | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | void | ||
112 | nouveau_led_suspend(struct drm_device *dev) | ||
113 | { | ||
114 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
115 | |||
116 | if (drm->led) | ||
117 | led_classdev_suspend(&drm->led->led); | ||
118 | } | ||
119 | |||
120 | void | ||
121 | nouveau_led_resume(struct drm_device *dev) | ||
122 | { | ||
123 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
124 | |||
125 | if (drm->led) | ||
126 | led_classdev_resume(&drm->led->led); | ||
127 | } | ||
128 | |||
129 | void | ||
130 | nouveau_led_fini(struct drm_device *dev) | ||
131 | { | ||
132 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
133 | |||
134 | if (drm->led) { | ||
135 | led_classdev_unregister(&drm->led->led); | ||
136 | kfree(drm->led); | ||
137 | drm->led = NULL; | ||
138 | } | ||
139 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h new file mode 100644 index 000000000000..187ecdb82002 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_led.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * Copyright 2015 Martin Peres | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Martin Peres <martin.peres@free.fr> | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_LED_H__ | ||
26 | #define __NOUVEAU_LED_H__ | ||
27 | |||
28 | #include "nouveau_drv.h" | ||
29 | |||
30 | struct led_classdev; | ||
31 | |||
32 | struct nouveau_led { | ||
33 | struct drm_device *dev; | ||
34 | |||
35 | struct led_classdev led; | ||
36 | }; | ||
37 | |||
38 | static inline struct nouveau_led * | ||
39 | nouveau_led(struct drm_device *dev) | ||
40 | { | ||
41 | return nouveau_drm(dev)->led; | ||
42 | } | ||
43 | |||
44 | /* nouveau_led.c */ | ||
45 | #if IS_ENABLED(CONFIG_LEDS_CLASS) | ||
46 | int nouveau_led_init(struct drm_device *dev); | ||
47 | void nouveau_led_suspend(struct drm_device *dev); | ||
48 | void nouveau_led_resume(struct drm_device *dev); | ||
49 | void nouveau_led_fini(struct drm_device *dev); | ||
50 | #else | ||
51 | static inline int nouveau_led_init(struct drm_device *dev) { return 0; }; | ||
52 | static inline void nouveau_led_suspend(struct drm_device *dev) { }; | ||
53 | static inline void nouveau_led_resume(struct drm_device *dev) { }; | ||
54 | static inline void nouveau_led_fini(struct drm_device *dev) { }; | ||
55 | #endif | ||
56 | |||
57 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 1915b7b82a59..fa8f2375c398 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c | |||
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm) | |||
110 | priv->base.context_new = nv04_fence_context_new; | 110 | priv->base.context_new = nv04_fence_context_new; |
111 | priv->base.context_del = nv04_fence_context_del; | 111 | priv->base.context_del = nv04_fence_context_del; |
112 | priv->base.contexts = 15; | 112 | priv->base.contexts = 15; |
113 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 113 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 4e3de34ff6f4..f99fcf56928a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c | |||
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm) | |||
107 | priv->base.context_new = nv10_fence_context_new; | 107 | priv->base.context_new = nv10_fence_context_new; |
108 | priv->base.context_del = nv10_fence_context_del; | 108 | priv->base.context_del = nv10_fence_context_del; |
109 | priv->base.contexts = 31; | 109 | priv->base.contexts = 31; |
110 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 110 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
111 | spin_lock_init(&priv->lock); | 111 | spin_lock_init(&priv->lock); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 7d5e562a55c5..79bc01111351 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm) | |||
126 | priv->base.context_new = nv17_fence_context_new; | 126 | priv->base.context_new = nv17_fence_context_new; |
127 | priv->base.context_del = nv10_fence_context_del; | 127 | priv->base.context_del = nv10_fence_context_del; |
128 | priv->base.contexts = 31; | 128 | priv->base.contexts = 31; |
129 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 129 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
130 | spin_lock_init(&priv->lock); | 130 | spin_lock_init(&priv->lock); |
131 | 131 | ||
132 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 132 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 4d6f202b7770..8c5295414578 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
97 | priv->base.context_new = nv50_fence_context_new; | 97 | priv->base.context_new = nv50_fence_context_new; |
98 | priv->base.context_del = nv10_fence_context_del; | 98 | priv->base.context_del = nv10_fence_context_del; |
99 | priv->base.contexts = 127; | 99 | priv->base.contexts = 127; |
100 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 100 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
101 | spin_lock_init(&priv->lock); | 101 | spin_lock_init(&priv->lock); |
102 | 102 | ||
103 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 103 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 18bde9d8e6d6..23ef04b4e0b2 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
229 | priv->base.context_del = nv84_fence_context_del; | 229 | priv->base.context_del = nv84_fence_context_del; |
230 | 230 | ||
231 | priv->base.contexts = fifo->nr; | 231 | priv->base.contexts = fifo->nr; |
232 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 232 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
233 | priv->base.uevent = true; | 233 | priv->base.uevent = true; |
234 | 234 | ||
235 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | 235 | /* Use VRAM if there is any ; otherwise fallback to system memory */ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index 34ecd4a7e0c1..058ff46b5f16 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | #include <core/device.h> | 22 | #include <core/device.h> |
23 | #include <core/firmware.h> | ||
23 | 24 | ||
24 | /** | 25 | /** |
25 | * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory | 26 | * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 7218a067a6c5..53d171729353 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -1357,7 +1357,7 @@ nvc0_chipset = { | |||
1357 | .pmu = gf100_pmu_new, | 1357 | .pmu = gf100_pmu_new, |
1358 | .therm = gt215_therm_new, | 1358 | .therm = gt215_therm_new, |
1359 | .timer = nv41_timer_new, | 1359 | .timer = nv41_timer_new, |
1360 | .volt = nv40_volt_new, | 1360 | .volt = gf100_volt_new, |
1361 | .ce[0] = gf100_ce_new, | 1361 | .ce[0] = gf100_ce_new, |
1362 | .ce[1] = gf100_ce_new, | 1362 | .ce[1] = gf100_ce_new, |
1363 | .disp = gt215_disp_new, | 1363 | .disp = gt215_disp_new, |
@@ -1394,7 +1394,7 @@ nvc1_chipset = { | |||
1394 | .pmu = gf100_pmu_new, | 1394 | .pmu = gf100_pmu_new, |
1395 | .therm = gt215_therm_new, | 1395 | .therm = gt215_therm_new, |
1396 | .timer = nv41_timer_new, | 1396 | .timer = nv41_timer_new, |
1397 | .volt = nv40_volt_new, | 1397 | .volt = gf100_volt_new, |
1398 | .ce[0] = gf100_ce_new, | 1398 | .ce[0] = gf100_ce_new, |
1399 | .disp = gt215_disp_new, | 1399 | .disp = gt215_disp_new, |
1400 | .dma = gf100_dma_new, | 1400 | .dma = gf100_dma_new, |
@@ -1430,7 +1430,7 @@ nvc3_chipset = { | |||
1430 | .pmu = gf100_pmu_new, | 1430 | .pmu = gf100_pmu_new, |
1431 | .therm = gt215_therm_new, | 1431 | .therm = gt215_therm_new, |
1432 | .timer = nv41_timer_new, | 1432 | .timer = nv41_timer_new, |
1433 | .volt = nv40_volt_new, | 1433 | .volt = gf100_volt_new, |
1434 | .ce[0] = gf100_ce_new, | 1434 | .ce[0] = gf100_ce_new, |
1435 | .disp = gt215_disp_new, | 1435 | .disp = gt215_disp_new, |
1436 | .dma = gf100_dma_new, | 1436 | .dma = gf100_dma_new, |
@@ -1466,7 +1466,7 @@ nvc4_chipset = { | |||
1466 | .pmu = gf100_pmu_new, | 1466 | .pmu = gf100_pmu_new, |
1467 | .therm = gt215_therm_new, | 1467 | .therm = gt215_therm_new, |
1468 | .timer = nv41_timer_new, | 1468 | .timer = nv41_timer_new, |
1469 | .volt = nv40_volt_new, | 1469 | .volt = gf100_volt_new, |
1470 | .ce[0] = gf100_ce_new, | 1470 | .ce[0] = gf100_ce_new, |
1471 | .ce[1] = gf100_ce_new, | 1471 | .ce[1] = gf100_ce_new, |
1472 | .disp = gt215_disp_new, | 1472 | .disp = gt215_disp_new, |
@@ -1503,7 +1503,7 @@ nvc8_chipset = { | |||
1503 | .pmu = gf100_pmu_new, | 1503 | .pmu = gf100_pmu_new, |
1504 | .therm = gt215_therm_new, | 1504 | .therm = gt215_therm_new, |
1505 | .timer = nv41_timer_new, | 1505 | .timer = nv41_timer_new, |
1506 | .volt = nv40_volt_new, | 1506 | .volt = gf100_volt_new, |
1507 | .ce[0] = gf100_ce_new, | 1507 | .ce[0] = gf100_ce_new, |
1508 | .ce[1] = gf100_ce_new, | 1508 | .ce[1] = gf100_ce_new, |
1509 | .disp = gt215_disp_new, | 1509 | .disp = gt215_disp_new, |
@@ -1540,7 +1540,7 @@ nvce_chipset = { | |||
1540 | .pmu = gf100_pmu_new, | 1540 | .pmu = gf100_pmu_new, |
1541 | .therm = gt215_therm_new, | 1541 | .therm = gt215_therm_new, |
1542 | .timer = nv41_timer_new, | 1542 | .timer = nv41_timer_new, |
1543 | .volt = nv40_volt_new, | 1543 | .volt = gf100_volt_new, |
1544 | .ce[0] = gf100_ce_new, | 1544 | .ce[0] = gf100_ce_new, |
1545 | .ce[1] = gf100_ce_new, | 1545 | .ce[1] = gf100_ce_new, |
1546 | .disp = gt215_disp_new, | 1546 | .disp = gt215_disp_new, |
@@ -1577,7 +1577,7 @@ nvcf_chipset = { | |||
1577 | .pmu = gf100_pmu_new, | 1577 | .pmu = gf100_pmu_new, |
1578 | .therm = gt215_therm_new, | 1578 | .therm = gt215_therm_new, |
1579 | .timer = nv41_timer_new, | 1579 | .timer = nv41_timer_new, |
1580 | .volt = nv40_volt_new, | 1580 | .volt = gf100_volt_new, |
1581 | .ce[0] = gf100_ce_new, | 1581 | .ce[0] = gf100_ce_new, |
1582 | .disp = gt215_disp_new, | 1582 | .disp = gt215_disp_new, |
1583 | .dma = gf100_dma_new, | 1583 | .dma = gf100_dma_new, |
@@ -1612,6 +1612,7 @@ nvd7_chipset = { | |||
1612 | .pci = gf106_pci_new, | 1612 | .pci = gf106_pci_new, |
1613 | .therm = gf119_therm_new, | 1613 | .therm = gf119_therm_new, |
1614 | .timer = nv41_timer_new, | 1614 | .timer = nv41_timer_new, |
1615 | .volt = gf100_volt_new, | ||
1615 | .ce[0] = gf100_ce_new, | 1616 | .ce[0] = gf100_ce_new, |
1616 | .disp = gf119_disp_new, | 1617 | .disp = gf119_disp_new, |
1617 | .dma = gf119_dma_new, | 1618 | .dma = gf119_dma_new, |
@@ -1647,7 +1648,7 @@ nvd9_chipset = { | |||
1647 | .pmu = gf119_pmu_new, | 1648 | .pmu = gf119_pmu_new, |
1648 | .therm = gf119_therm_new, | 1649 | .therm = gf119_therm_new, |
1649 | .timer = nv41_timer_new, | 1650 | .timer = nv41_timer_new, |
1650 | .volt = nv40_volt_new, | 1651 | .volt = gf100_volt_new, |
1651 | .ce[0] = gf100_ce_new, | 1652 | .ce[0] = gf100_ce_new, |
1652 | .disp = gf119_disp_new, | 1653 | .disp = gf119_disp_new, |
1653 | .dma = gf119_dma_new, | 1654 | .dma = gf119_dma_new, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 62ad0300cfa5..0030cd9543b2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg, | |||
1665 | *pdevice = &pdev->device; | 1665 | *pdevice = &pdev->device; |
1666 | pdev->pdev = pci_dev; | 1666 | pdev->pdev = pci_dev; |
1667 | 1667 | ||
1668 | return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev, | 1668 | ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev, |
1669 | pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE : | 1669 | pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE : |
1670 | pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ? | 1670 | pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ? |
1671 | NVKM_DEVICE_AGP : NVKM_DEVICE_PCI, | 1671 | NVKM_DEVICE_AGP : NVKM_DEVICE_PCI, |
1672 | (u64)pci_domain_nr(pci_dev->bus) << 32 | | 1672 | (u64)pci_domain_nr(pci_dev->bus) << 32 | |
1673 | pci_dev->bus->number << 16 | | 1673 | pci_dev->bus->number << 16 | |
1674 | PCI_SLOT(pci_dev->devfn) << 8 | | 1674 | PCI_SLOT(pci_dev->devfn) << 8 | |
1675 | PCI_FUNC(pci_dev->devfn), name, | 1675 | PCI_FUNC(pci_dev->devfn), name, |
1676 | cfg, dbg, detect, mmio, subdev_mask, | 1676 | cfg, dbg, detect, mmio, subdev_mask, |
1677 | &pdev->device); | 1677 | &pdev->device); |
1678 | |||
1679 | if (ret) | ||
1680 | return ret; | ||
1681 | |||
1682 | /* | ||
1683 | * Set a preliminary DMA mask based on the .dma_bits member of the | ||
1684 | * MMU subdevice. This allows other subdevices to create DMA mappings | ||
1685 | * in their init() or oneinit() methods, which may be called before the | ||
1686 | * TTM layer sets the DMA mask definitively. | ||
1687 | * This is necessary for platforms where the default DMA mask of 32 | ||
1688 | * does not cover any system memory, i.e., when all RAM is > 4 GB. | ||
1689 | */ | ||
1690 | if (subdev_mask & BIT(NVKM_SUBDEV_MMU)) | ||
1691 | dma_set_mask_and_coherent(&pci_dev->dev, | ||
1692 | DMA_BIT_MASK(pdev->device.mmu->dma_bits)); | ||
1693 | |||
1694 | return 0; | ||
1678 | } | 1695 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c index 1bb9d661e9b3..4510cb6e10a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c | |||
@@ -45,14 +45,6 @@ static const struct nvkm_output_func | |||
45 | g94_sor_output_func = { | 45 | g94_sor_output_func = { |
46 | }; | 46 | }; |
47 | 47 | ||
48 | int | ||
49 | g94_sor_output_new(struct nvkm_disp *disp, int index, | ||
50 | struct dcb_output *dcbE, struct nvkm_output **poutp) | ||
51 | { | ||
52 | return nvkm_output_new_(&g94_sor_output_func, disp, | ||
53 | index, dcbE, poutp); | ||
54 | } | ||
55 | |||
56 | /******************************************************************************* | 48 | /******************************************************************************* |
57 | * DisplayPort | 49 | * DisplayPort |
58 | ******************************************************************************/ | 50 | ******************************************************************************/ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c index f1e15a4d4f64..b4e3c50badc7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c | |||
@@ -187,6 +187,7 @@ nv30_gr = { | |||
187 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ | 187 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ |
188 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ | 188 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ |
189 | { -1, -1, 0x0397, &nv04_gr_object }, /* rankine */ | 189 | { -1, -1, 0x0397, &nv04_gr_object }, /* rankine */ |
190 | { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */ | ||
190 | {} | 191 | {} |
191 | } | 192 | } |
192 | }; | 193 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c index 300f5ed5de0b..e7ed04b935cd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c | |||
@@ -123,6 +123,7 @@ nv34_gr = { | |||
123 | { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */ | 123 | { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */ |
124 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ | 124 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ |
125 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ | 125 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ |
126 | { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */ | ||
126 | { -1, -1, 0x0697, &nv04_gr_object }, /* rankine */ | 127 | { -1, -1, 0x0697, &nv04_gr_object }, /* rankine */ |
127 | {} | 128 | {} |
128 | } | 129 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c index 740df0f52c38..5e8abacbacc6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c | |||
@@ -124,6 +124,7 @@ nv35_gr = { | |||
124 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ | 124 | { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */ |
125 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ | 125 | { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */ |
126 | { -1, -1, 0x0497, &nv04_gr_object }, /* rankine */ | 126 | { -1, -1, 0x0497, &nv04_gr_object }, /* rankine */ |
127 | { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */ | ||
127 | {} | 128 | {} |
128 | } | 129 | } |
129 | }; | 130 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 370dcd8ff7b5..6eff637ac301 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c | |||
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) | |||
84 | start = 0x0100000000ULL; | 84 | start = 0x0100000000ULL; |
85 | limit = start + device->func->resource_size(device, 3); | 85 | limit = start + device->func->resource_size(device, 3); |
86 | 86 | ||
87 | ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm); | 87 | ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm); |
88 | if (ret) | 88 | if (ret) |
89 | return ret; | 89 | return ret; |
90 | 90 | ||
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) | |||
117 | start = 0x0000000000ULL; | 117 | start = 0x0000000000ULL; |
118 | limit = start + device->func->resource_size(device, 1); | 118 | limit = start + device->func->resource_size(device, 1); |
119 | 119 | ||
120 | ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm); | 120 | ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm); |
121 | if (ret) | 121 | if (ret) |
122 | return ret; | 122 | return ret; |
123 | 123 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild index dbcb0ef21587..be57220a2e01 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild | |||
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o | |||
31 | nvkm-y += nvkm/subdev/bios/therm.o | 31 | nvkm-y += nvkm/subdev/bios/therm.o |
32 | nvkm-y += nvkm/subdev/bios/vmap.o | 32 | nvkm-y += nvkm/subdev/bios/vmap.o |
33 | nvkm-y += nvkm/subdev/bios/volt.o | 33 | nvkm-y += nvkm/subdev/bios/volt.o |
34 | nvkm-y += nvkm/subdev/bios/vpstate.o | ||
34 | nvkm-y += nvkm/subdev/bios/xpio.o | 35 | nvkm-y += nvkm/subdev/bios/xpio.o |
35 | nvkm-y += nvkm/subdev/bios/M0203.o | 36 | nvkm-y += nvkm/subdev/bios/M0203.o |
36 | nvkm-y += nvkm/subdev/bios/M0205.o | 37 | nvkm-y += nvkm/subdev/bios/M0205.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c index 084328028af1..aafd5e17b1c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #include <subdev/bios.h> | 24 | #include <subdev/bios.h> |
25 | #include <subdev/bios/bit.h> | 25 | #include <subdev/bios/bit.h> |
26 | #include <subdev/bios/extdev.h> | ||
26 | #include <subdev/bios/iccsense.h> | 27 | #include <subdev/bios/iccsense.h> |
27 | 28 | ||
28 | static u16 | 29 | static u16 |
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense) | |||
77 | return -ENOMEM; | 78 | return -ENOMEM; |
78 | 79 | ||
79 | for (i = 0; i < cnt; ++i) { | 80 | for (i = 0; i < cnt; ++i) { |
81 | struct nvbios_extdev_func extdev; | ||
80 | struct pwr_rail_t *rail = &iccsense->rail[i]; | 82 | struct pwr_rail_t *rail = &iccsense->rail[i]; |
83 | u8 res_start = 0; | ||
84 | int r; | ||
85 | |||
81 | entry = table + hdr + i * len; | 86 | entry = table + hdr + i * len; |
82 | 87 | ||
83 | switch(ver) { | 88 | switch(ver) { |
84 | case 0x10: | 89 | case 0x10: |
85 | rail->mode = nvbios_rd08(bios, entry + 0x1); | 90 | rail->mode = nvbios_rd08(bios, entry + 0x1); |
86 | rail->extdev_id = nvbios_rd08(bios, entry + 0x2); | 91 | rail->extdev_id = nvbios_rd08(bios, entry + 0x2); |
87 | rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3); | 92 | res_start = 0x3; |
88 | rail->rail = nvbios_rd08(bios, entry + 0x4); | ||
89 | break; | 93 | break; |
90 | case 0x20: | 94 | case 0x20: |
91 | rail->mode = nvbios_rd08(bios, entry); | 95 | rail->mode = nvbios_rd08(bios, entry); |
92 | rail->extdev_id = nvbios_rd08(bios, entry + 0x1); | 96 | rail->extdev_id = nvbios_rd08(bios, entry + 0x1); |
93 | rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5); | 97 | res_start = 0x5; |
94 | rail->rail = nvbios_rd08(bios, entry + 0x6); | 98 | break; |
99 | }; | ||
100 | |||
101 | if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev)) | ||
102 | continue; | ||
103 | |||
104 | switch (extdev.type) { | ||
105 | case NVBIOS_EXTDEV_INA209: | ||
106 | case NVBIOS_EXTDEV_INA219: | ||
107 | rail->resistor_count = 1; | ||
108 | break; | ||
109 | case NVBIOS_EXTDEV_INA3221: | ||
110 | rail->resistor_count = 3; | ||
111 | break; | ||
112 | default: | ||
113 | rail->resistor_count = 0; | ||
95 | break; | 114 | break; |
96 | }; | 115 | }; |
116 | |||
117 | for (r = 0; r < rail->resistor_count; ++r) { | ||
118 | rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2); | ||
119 | rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40); | ||
120 | } | ||
121 | rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2); | ||
97 | } | 122 | } |
98 | 123 | ||
99 | return 0; | 124 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c index 2f13db745948..32bd8b1d154f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c | |||
@@ -61,7 +61,17 @@ nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | |||
61 | memset(info, 0x00, sizeof(*info)); | 61 | memset(info, 0x00, sizeof(*info)); |
62 | switch (!!vmap * *ver) { | 62 | switch (!!vmap * *ver) { |
63 | case 0x10: | 63 | case 0x10: |
64 | info->max0 = 0xff; | ||
65 | info->max1 = 0xff; | ||
66 | info->max2 = 0xff; | ||
67 | break; | ||
64 | case 0x20: | 68 | case 0x20: |
69 | info->max0 = nvbios_rd08(bios, vmap + 0x7); | ||
70 | info->max1 = nvbios_rd08(bios, vmap + 0x8); | ||
71 | if (*len >= 0xc) | ||
72 | info->max2 = nvbios_rd08(bios, vmap + 0xc); | ||
73 | else | ||
74 | info->max2 = 0xff; | ||
65 | break; | 75 | break; |
66 | } | 76 | } |
67 | return vmap; | 77 | return vmap; |
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len, | |||
95 | info->arg[2] = nvbios_rd32(bios, vmap + 0x10); | 105 | info->arg[2] = nvbios_rd32(bios, vmap + 0x10); |
96 | break; | 106 | break; |
97 | case 0x20: | 107 | case 0x20: |
98 | info->unk0 = nvbios_rd08(bios, vmap + 0x00); | 108 | info->mode = nvbios_rd08(bios, vmap + 0x00); |
99 | info->link = nvbios_rd08(bios, vmap + 0x01); | 109 | info->link = nvbios_rd08(bios, vmap + 0x01); |
100 | info->min = nvbios_rd32(bios, vmap + 0x02); | 110 | info->min = nvbios_rd32(bios, vmap + 0x02); |
101 | info->max = nvbios_rd32(bios, vmap + 0x06); | 111 | info->max = nvbios_rd32(bios, vmap + 0x06); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c index 6e0a33648be9..4504822ace51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c | |||
@@ -75,20 +75,24 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | |||
75 | case 0x12: | 75 | case 0x12: |
76 | info->type = NVBIOS_VOLT_GPIO; | 76 | info->type = NVBIOS_VOLT_GPIO; |
77 | info->vidmask = nvbios_rd08(bios, volt + 0x04); | 77 | info->vidmask = nvbios_rd08(bios, volt + 0x04); |
78 | info->ranged = false; | ||
78 | break; | 79 | break; |
79 | case 0x20: | 80 | case 0x20: |
80 | info->type = NVBIOS_VOLT_GPIO; | 81 | info->type = NVBIOS_VOLT_GPIO; |
81 | info->vidmask = nvbios_rd08(bios, volt + 0x05); | 82 | info->vidmask = nvbios_rd08(bios, volt + 0x05); |
83 | info->ranged = false; | ||
82 | break; | 84 | break; |
83 | case 0x30: | 85 | case 0x30: |
84 | info->type = NVBIOS_VOLT_GPIO; | 86 | info->type = NVBIOS_VOLT_GPIO; |
85 | info->vidmask = nvbios_rd08(bios, volt + 0x04); | 87 | info->vidmask = nvbios_rd08(bios, volt + 0x04); |
88 | info->ranged = false; | ||
86 | break; | 89 | break; |
87 | case 0x40: | 90 | case 0x40: |
88 | info->type = NVBIOS_VOLT_GPIO; | 91 | info->type = NVBIOS_VOLT_GPIO; |
89 | info->base = nvbios_rd32(bios, volt + 0x04); | 92 | info->base = nvbios_rd32(bios, volt + 0x04); |
90 | info->step = nvbios_rd16(bios, volt + 0x08); | 93 | info->step = nvbios_rd16(bios, volt + 0x08); |
91 | info->vidmask = nvbios_rd08(bios, volt + 0x0b); | 94 | info->vidmask = nvbios_rd08(bios, volt + 0x0b); |
95 | info->ranged = true; /* XXX: find the flag byte */ | ||
92 | /*XXX*/ | 96 | /*XXX*/ |
93 | info->min = 0; | 97 | info->min = 0; |
94 | info->max = info->base; | 98 | info->max = info->base; |
@@ -104,9 +108,11 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | |||
104 | info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000; | 108 | info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000; |
105 | info->pwm_range = nvbios_rd32(bios, volt + 0x16); | 109 | info->pwm_range = nvbios_rd32(bios, volt + 0x16); |
106 | } else { | 110 | } else { |
107 | info->type = NVBIOS_VOLT_GPIO; | 111 | info->type = NVBIOS_VOLT_GPIO; |
108 | info->vidmask = nvbios_rd08(bios, volt + 0x06); | 112 | info->vidmask = nvbios_rd08(bios, volt + 0x06); |
109 | info->step = nvbios_rd16(bios, volt + 0x16); | 113 | info->step = nvbios_rd16(bios, volt + 0x16); |
114 | info->ranged = | ||
115 | !!(nvbios_rd08(bios, volt + 0x4) & 0x2); | ||
110 | } | 116 | } |
111 | break; | 117 | break; |
112 | } | 118 | } |
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len, | |||
142 | info->vid = nvbios_rd08(bios, volt + 0x01) >> 2; | 148 | info->vid = nvbios_rd08(bios, volt + 0x01) >> 2; |
143 | break; | 149 | break; |
144 | case 0x40: | 150 | case 0x40: |
151 | break; | ||
145 | case 0x50: | 152 | case 0x50: |
153 | info->voltage = nvbios_rd32(bios, volt) & 0x001fffff; | ||
154 | info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff; | ||
146 | break; | 155 | break; |
147 | } | 156 | } |
148 | return volt; | 157 | return volt; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c new file mode 100644 index 000000000000..f199270163d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Karol Herbst | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Karol Herbst | ||
23 | */ | ||
24 | #include <subdev/bios.h> | ||
25 | #include <subdev/bios/bit.h> | ||
26 | #include <subdev/bios/vpstate.h> | ||
27 | |||
28 | static u32 | ||
29 | nvbios_vpstate_offset(struct nvkm_bios *b) | ||
30 | { | ||
31 | struct bit_entry bit_P; | ||
32 | |||
33 | if (!bit_entry(b, 'P', &bit_P)) { | ||
34 | if (bit_P.version == 2) | ||
35 | return nvbios_rd32(b, bit_P.offset + 0x38); | ||
36 | } | ||
37 | |||
38 | return 0x0000; | ||
39 | } | ||
40 | |||
41 | int | ||
42 | nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h) | ||
43 | { | ||
44 | if (!h) | ||
45 | return -EINVAL; | ||
46 | |||
47 | h->offset = nvbios_vpstate_offset(b); | ||
48 | if (!h->offset) | ||
49 | return -ENODEV; | ||
50 | |||
51 | h->version = nvbios_rd08(b, h->offset); | ||
52 | switch (h->version) { | ||
53 | case 0x10: | ||
54 | h->hlen = nvbios_rd08(b, h->offset + 0x1); | ||
55 | h->elen = nvbios_rd08(b, h->offset + 0x2); | ||
56 | h->slen = nvbios_rd08(b, h->offset + 0x3); | ||
57 | h->scount = nvbios_rd08(b, h->offset + 0x4); | ||
58 | h->ecount = nvbios_rd08(b, h->offset + 0x5); | ||
59 | |||
60 | h->base_id = nvbios_rd08(b, h->offset + 0x0f); | ||
61 | h->boost_id = nvbios_rd08(b, h->offset + 0x10); | ||
62 | h->tdp_id = nvbios_rd08(b, h->offset + 0x11); | ||
63 | return 0; | ||
64 | default: | ||
65 | return -EINVAL; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | int | ||
70 | nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h, | ||
71 | u8 idx, struct nvbios_vpstate_entry *e) | ||
72 | { | ||
73 | u32 offset; | ||
74 | |||
75 | if (!e || !h || idx > h->ecount) | ||
76 | return -EINVAL; | ||
77 | |||
78 | offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount)); | ||
79 | e->pstate = nvbios_rd08(b, offset); | ||
80 | e->clock_mhz = nvbios_rd16(b, offset + 0x5); | ||
81 | return 0; | ||
82 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c index 7102c25320fc..fa1c12185e19 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <subdev/bios/boost.h> | 27 | #include <subdev/bios/boost.h> |
28 | #include <subdev/bios/cstep.h> | 28 | #include <subdev/bios/cstep.h> |
29 | #include <subdev/bios/perf.h> | 29 | #include <subdev/bios/perf.h> |
30 | #include <subdev/bios/vpstate.h> | ||
30 | #include <subdev/fb.h> | 31 | #include <subdev/fb.h> |
31 | #include <subdev/therm.h> | 32 | #include <subdev/therm.h> |
32 | #include <subdev/volt.h> | 33 | #include <subdev/volt.h> |
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust, | |||
74 | /****************************************************************************** | 75 | /****************************************************************************** |
75 | * C-States | 76 | * C-States |
76 | *****************************************************************************/ | 77 | *****************************************************************************/ |
78 | static bool | ||
79 | nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate, | ||
80 | u32 max_volt, int temp) | ||
81 | { | ||
82 | const struct nvkm_domain *domain = clk->domains; | ||
83 | struct nvkm_volt *volt = clk->subdev.device->volt; | ||
84 | int voltage; | ||
85 | |||
86 | while (domain && domain->name != nv_clk_src_max) { | ||
87 | if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) { | ||
88 | u32 freq = cstate->domain[domain->name]; | ||
89 | switch (clk->boost_mode) { | ||
90 | case NVKM_CLK_BOOST_NONE: | ||
91 | if (clk->base_khz && freq > clk->base_khz) | ||
92 | return false; | ||
93 | case NVKM_CLK_BOOST_BIOS: | ||
94 | if (clk->boost_khz && freq > clk->boost_khz) | ||
95 | return false; | ||
96 | } | ||
97 | } | ||
98 | domain++; | ||
99 | } | ||
100 | |||
101 | if (!volt) | ||
102 | return true; | ||
103 | |||
104 | voltage = nvkm_volt_map(volt, cstate->voltage, temp); | ||
105 | if (voltage < 0) | ||
106 | return false; | ||
107 | return voltage <= min(max_volt, volt->max_uv); | ||
108 | } | ||
109 | |||
110 | static struct nvkm_cstate * | ||
111 | nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate, | ||
112 | struct nvkm_cstate *start) | ||
113 | { | ||
114 | struct nvkm_device *device = clk->subdev.device; | ||
115 | struct nvkm_volt *volt = device->volt; | ||
116 | struct nvkm_cstate *cstate; | ||
117 | int max_volt; | ||
118 | |||
119 | if (!pstate || !start) | ||
120 | return NULL; | ||
121 | |||
122 | if (!volt) | ||
123 | return start; | ||
124 | |||
125 | max_volt = volt->max_uv; | ||
126 | if (volt->max0_id != 0xff) | ||
127 | max_volt = min(max_volt, | ||
128 | nvkm_volt_map(volt, volt->max0_id, clk->temp)); | ||
129 | if (volt->max1_id != 0xff) | ||
130 | max_volt = min(max_volt, | ||
131 | nvkm_volt_map(volt, volt->max1_id, clk->temp)); | ||
132 | if (volt->max2_id != 0xff) | ||
133 | max_volt = min(max_volt, | ||
134 | nvkm_volt_map(volt, volt->max2_id, clk->temp)); | ||
135 | |||
136 | for (cstate = start; &cstate->head != &pstate->list; | ||
137 | cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) { | ||
138 | if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp)) | ||
139 | break; | ||
140 | } | ||
141 | |||
142 | return cstate; | ||
143 | } | ||
144 | |||
145 | static struct nvkm_cstate * | ||
146 | nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) | ||
147 | { | ||
148 | struct nvkm_cstate *cstate; | ||
149 | if (cstatei == NVKM_CLK_CSTATE_HIGHEST) | ||
150 | return list_last_entry(&pstate->list, typeof(*cstate), head); | ||
151 | else { | ||
152 | list_for_each_entry(cstate, &pstate->list, head) { | ||
153 | if (cstate->id == cstatei) | ||
154 | return cstate; | ||
155 | } | ||
156 | } | ||
157 | return NULL; | ||
158 | } | ||
159 | |||
77 | static int | 160 | static int |
78 | nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) | 161 | nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) |
79 | { | 162 | { |
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) | |||
85 | int ret; | 168 | int ret; |
86 | 169 | ||
87 | if (!list_empty(&pstate->list)) { | 170 | if (!list_empty(&pstate->list)) { |
88 | cstate = list_entry(pstate->list.prev, typeof(*cstate), head); | 171 | cstate = nvkm_cstate_get(clk, pstate, cstatei); |
172 | cstate = nvkm_cstate_find_best(clk, pstate, cstate); | ||
89 | } else { | 173 | } else { |
90 | cstate = &pstate->base; | 174 | cstate = &pstate->base; |
91 | } | 175 | } |
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) | |||
99 | } | 183 | } |
100 | 184 | ||
101 | if (volt) { | 185 | if (volt) { |
102 | ret = nvkm_volt_set_id(volt, cstate->voltage, +1); | 186 | ret = nvkm_volt_set_id(volt, cstate->voltage, |
187 | pstate->base.voltage, clk->temp, +1); | ||
103 | if (ret && ret != -ENODEV) { | 188 | if (ret && ret != -ENODEV) { |
104 | nvkm_error(subdev, "failed to raise voltage: %d\n", ret); | 189 | nvkm_error(subdev, "failed to raise voltage: %d\n", ret); |
105 | return ret; | 190 | return ret; |
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) | |||
113 | } | 198 | } |
114 | 199 | ||
115 | if (volt) { | 200 | if (volt) { |
116 | ret = nvkm_volt_set_id(volt, cstate->voltage, -1); | 201 | ret = nvkm_volt_set_id(volt, cstate->voltage, |
202 | pstate->base.voltage, clk->temp, -1); | ||
117 | if (ret && ret != -ENODEV) | 203 | if (ret && ret != -ENODEV) |
118 | nvkm_error(subdev, "failed to lower voltage: %d\n", ret); | 204 | nvkm_error(subdev, "failed to lower voltage: %d\n", ret); |
119 | } | 205 | } |
@@ -138,6 +224,7 @@ static int | |||
138 | nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate) | 224 | nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate) |
139 | { | 225 | { |
140 | struct nvkm_bios *bios = clk->subdev.device->bios; | 226 | struct nvkm_bios *bios = clk->subdev.device->bios; |
227 | struct nvkm_volt *volt = clk->subdev.device->volt; | ||
141 | const struct nvkm_domain *domain = clk->domains; | 228 | const struct nvkm_domain *domain = clk->domains; |
142 | struct nvkm_cstate *cstate = NULL; | 229 | struct nvkm_cstate *cstate = NULL; |
143 | struct nvbios_cstepX cstepX; | 230 | struct nvbios_cstepX cstepX; |
@@ -148,12 +235,16 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate) | |||
148 | if (!data) | 235 | if (!data) |
149 | return -ENOENT; | 236 | return -ENOENT; |
150 | 237 | ||
238 | if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv) | ||
239 | return -EINVAL; | ||
240 | |||
151 | cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); | 241 | cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); |
152 | if (!cstate) | 242 | if (!cstate) |
153 | return -ENOMEM; | 243 | return -ENOMEM; |
154 | 244 | ||
155 | *cstate = pstate->base; | 245 | *cstate = pstate->base; |
156 | cstate->voltage = cstepX.voltage; | 246 | cstate->voltage = cstepX.voltage; |
247 | cstate->id = idx; | ||
157 | 248 | ||
158 | while (domain && domain->name != nv_clk_src_max) { | 249 | while (domain && domain->name != nv_clk_src_max) { |
159 | if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { | 250 | if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { |
@@ -175,7 +266,7 @@ static int | |||
175 | nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) | 266 | nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) |
176 | { | 267 | { |
177 | struct nvkm_subdev *subdev = &clk->subdev; | 268 | struct nvkm_subdev *subdev = &clk->subdev; |
178 | struct nvkm_ram *ram = subdev->device->fb->ram; | 269 | struct nvkm_fb *fb = subdev->device->fb; |
179 | struct nvkm_pci *pci = subdev->device->pci; | 270 | struct nvkm_pci *pci = subdev->device->pci; |
180 | struct nvkm_pstate *pstate; | 271 | struct nvkm_pstate *pstate; |
181 | int ret, idx = 0; | 272 | int ret, idx = 0; |
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) | |||
190 | 281 | ||
191 | nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width); | 282 | nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width); |
192 | 283 | ||
193 | if (ram && ram->func->calc) { | 284 | if (fb && fb->ram && fb->ram->func->calc) { |
285 | struct nvkm_ram *ram = fb->ram; | ||
194 | int khz = pstate->base.domain[nv_clk_src_mem]; | 286 | int khz = pstate->base.domain[nv_clk_src_mem]; |
195 | do { | 287 | do { |
196 | ret = ram->func->calc(ram, khz); | 288 | ret = ram->func->calc(ram, khz); |
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) | |||
200 | ram->func->tidy(ram); | 292 | ram->func->tidy(ram); |
201 | } | 293 | } |
202 | 294 | ||
203 | return nvkm_cstate_prog(clk, pstate, 0); | 295 | return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST); |
204 | } | 296 | } |
205 | 297 | ||
206 | static void | 298 | static void |
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work) | |||
214 | return; | 306 | return; |
215 | clk->pwrsrc = power_supply_is_system_supplied(); | 307 | clk->pwrsrc = power_supply_is_system_supplied(); |
216 | 308 | ||
217 | nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n", | 309 | nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n", |
218 | clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, | 310 | clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, |
219 | clk->astate, clk->tstate, clk->dstate); | 311 | clk->astate, clk->temp, clk->dstate); |
220 | 312 | ||
221 | pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; | 313 | pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; |
222 | if (clk->state_nr && pstate != -1) { | 314 | if (clk->state_nr && pstate != -1) { |
223 | pstate = (pstate < 0) ? clk->astate : pstate; | 315 | pstate = (pstate < 0) ? clk->astate : pstate; |
224 | pstate = min(pstate, clk->state_nr - 1 + clk->tstate); | 316 | pstate = min(pstate, clk->state_nr - 1); |
225 | pstate = max(pstate, clk->dstate); | 317 | pstate = max(pstate, clk->dstate); |
226 | } else { | 318 | } else { |
227 | pstate = clk->pstate = -1; | 319 | pstate = clk->pstate = -1; |
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait) | |||
448 | } | 540 | } |
449 | 541 | ||
450 | int | 542 | int |
451 | nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel) | 543 | nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp) |
452 | { | 544 | { |
453 | if (!rel) clk->tstate = req; | 545 | if (clk->temp == temp) |
454 | if ( rel) clk->tstate += rel; | 546 | return 0; |
455 | clk->tstate = min(clk->tstate, 0); | 547 | clk->temp = temp; |
456 | clk->tstate = max(clk->tstate, -(clk->state_nr - 1)); | 548 | return nvkm_pstate_calc(clk, false); |
457 | return nvkm_pstate_calc(clk, true); | ||
458 | } | 549 | } |
459 | 550 | ||
460 | int | 551 | int |
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev) | |||
524 | return clk->func->init(clk); | 615 | return clk->func->init(clk); |
525 | 616 | ||
526 | clk->astate = clk->state_nr - 1; | 617 | clk->astate = clk->state_nr - 1; |
527 | clk->tstate = 0; | ||
528 | clk->dstate = 0; | 618 | clk->dstate = 0; |
529 | clk->pstate = -1; | 619 | clk->pstate = -1; |
620 | clk->temp = 90; /* reasonable default value */ | ||
530 | nvkm_pstate_calc(clk, true); | 621 | nvkm_pstate_calc(clk, true); |
531 | return 0; | 622 | return 0; |
532 | } | 623 | } |
@@ -561,10 +652,22 @@ int | |||
561 | nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, | 652 | nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, |
562 | int index, bool allow_reclock, struct nvkm_clk *clk) | 653 | int index, bool allow_reclock, struct nvkm_clk *clk) |
563 | { | 654 | { |
655 | struct nvkm_subdev *subdev = &clk->subdev; | ||
656 | struct nvkm_bios *bios = device->bios; | ||
564 | int ret, idx, arglen; | 657 | int ret, idx, arglen; |
565 | const char *mode; | 658 | const char *mode; |
659 | struct nvbios_vpstate_header h; | ||
660 | |||
661 | nvkm_subdev_ctor(&nvkm_clk, device, index, subdev); | ||
662 | |||
663 | if (bios && !nvbios_vpstate_parse(bios, &h)) { | ||
664 | struct nvbios_vpstate_entry base, boost; | ||
665 | if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost)) | ||
666 | clk->boost_khz = boost.clock_mhz * 1000; | ||
667 | if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base)) | ||
668 | clk->base_khz = base.clock_mhz * 1000; | ||
669 | } | ||
566 | 670 | ||
567 | nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev); | ||
568 | clk->func = func; | 671 | clk->func = func; |
569 | INIT_LIST_HEAD(&clk->states); | 672 | INIT_LIST_HEAD(&clk->states); |
570 | clk->domains = func->domains; | 673 | clk->domains = func->domains; |
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, | |||
607 | if (mode) | 710 | if (mode) |
608 | clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen); | 711 | clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen); |
609 | 712 | ||
713 | clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost", | ||
714 | NVKM_CLK_BOOST_NONE); | ||
610 | return 0; | 715 | return 0; |
611 | } | 716 | } |
612 | 717 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c index 89d5543118cf..7f67f9f5a550 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c | |||
@@ -457,7 +457,7 @@ gf100_clk = { | |||
457 | { nv_clk_src_hubk06 , 0x00 }, | 457 | { nv_clk_src_hubk06 , 0x00 }, |
458 | { nv_clk_src_hubk01 , 0x01 }, | 458 | { nv_clk_src_hubk01 , 0x01 }, |
459 | { nv_clk_src_copy , 0x02 }, | 459 | { nv_clk_src_copy , 0x02 }, |
460 | { nv_clk_src_gpc , 0x03, 0, "core", 2000 }, | 460 | { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 }, |
461 | { nv_clk_src_rop , 0x04 }, | 461 | { nv_clk_src_rop , 0x04 }, |
462 | { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, | 462 | { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, |
463 | { nv_clk_src_vdec , 0x06 }, | 463 | { nv_clk_src_vdec , 0x06 }, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c index 06bc0d2d6ae1..0b37e3da7feb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c | |||
@@ -491,7 +491,7 @@ gk104_clk = { | |||
491 | .domains = { | 491 | .domains = { |
492 | { nv_clk_src_crystal, 0xff }, | 492 | { nv_clk_src_crystal, 0xff }, |
493 | { nv_clk_src_href , 0xff }, | 493 | { nv_clk_src_href , 0xff }, |
494 | { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 }, | 494 | { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 }, |
495 | { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, | 495 | { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, |
496 | { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, | 496 | { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, |
497 | { nv_clk_src_mem , 0x03, 0, "memory", 500 }, | 497 | { nv_clk_src_mem , 0x03, 0, "memory", 500 }, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index 76433cc66fff..3841ad6be99e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | |||
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | int | 52 | int |
53 | gf100_fb_oneinit(struct nvkm_fb *fb) | 53 | gf100_fb_oneinit(struct nvkm_fb *base) |
54 | { | 54 | { |
55 | struct nvkm_device *device = fb->subdev.device; | 55 | struct gf100_fb *fb = gf100_fb(base); |
56 | struct nvkm_device *device = fb->base.subdev.device; | ||
56 | int ret, size = 0x1000; | 57 | int ret, size = 0x1000; |
57 | 58 | ||
58 | size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size); | 59 | size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size); |
59 | size = min(size, 0x1000); | 60 | size = min(size, 0x1000); |
60 | 61 | ||
61 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, | 62 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, |
62 | false, &fb->mmu_rd); | 63 | false, &fb->base.mmu_rd); |
63 | if (ret) | 64 | if (ret) |
64 | return ret; | 65 | return ret; |
65 | 66 | ||
66 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, | 67 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, |
67 | false, &fb->mmu_wr); | 68 | false, &fb->base.mmu_wr); |
68 | if (ret) | 69 | if (ret) |
69 | return ret; | 70 | return ret; |
70 | 71 | ||
72 | fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
73 | if (fb->r100c10_page) { | ||
74 | fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0, | ||
75 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
76 | if (dma_mapping_error(device->dev, fb->r100c10)) | ||
77 | return -EFAULT; | ||
78 | } | ||
79 | |||
71 | return 0; | 80 | return 0; |
72 | } | 81 | } |
73 | 82 | ||
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, | |||
123 | nvkm_fb_ctor(func, device, index, &fb->base); | 132 | nvkm_fb_ctor(func, device, index, &fb->base); |
124 | *pfb = &fb->base; | 133 | *pfb = &fb->base; |
125 | 134 | ||
126 | fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
127 | if (fb->r100c10_page) { | ||
128 | fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0, | ||
129 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
130 | if (dma_mapping_error(device->dev, fb->r100c10)) | ||
131 | return -EFAULT; | ||
132 | } | ||
133 | |||
134 | return 0; | 135 | return 0; |
135 | } | 136 | } |
136 | 137 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c index 1b5fb02eab2a..0595e0722bfc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c | |||
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base) | |||
210 | nvkm_fifo_chan_put(fifo, flags, &chan); | 210 | nvkm_fifo_chan_put(fifo, flags, &chan); |
211 | } | 211 | } |
212 | 212 | ||
213 | static int | ||
214 | nv50_fb_oneinit(struct nvkm_fb *base) | ||
215 | { | ||
216 | struct nv50_fb *fb = nv50_fb(base); | ||
217 | struct nvkm_device *device = fb->base.subdev.device; | ||
218 | |||
219 | fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
220 | if (fb->r100c08_page) { | ||
221 | fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0, | ||
222 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
223 | if (dma_mapping_error(device->dev, fb->r100c08)) | ||
224 | return -EFAULT; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
213 | static void | 230 | static void |
214 | nv50_fb_init(struct nvkm_fb *base) | 231 | nv50_fb_init(struct nvkm_fb *base) |
215 | { | 232 | { |
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base) | |||
245 | static const struct nvkm_fb_func | 262 | static const struct nvkm_fb_func |
246 | nv50_fb_ = { | 263 | nv50_fb_ = { |
247 | .dtor = nv50_fb_dtor, | 264 | .dtor = nv50_fb_dtor, |
265 | .oneinit = nv50_fb_oneinit, | ||
248 | .init = nv50_fb_init, | 266 | .init = nv50_fb_init, |
249 | .intr = nv50_fb_intr, | 267 | .intr = nv50_fb_intr, |
250 | .ram_new = nv50_fb_ram_new, | 268 | .ram_new = nv50_fb_ram_new, |
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, | |||
263 | fb->func = func; | 281 | fb->func = func; |
264 | *pfb = &fb->base; | 282 | *pfb = &fb->base; |
265 | 283 | ||
266 | fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
267 | if (fb->r100c08_page) { | ||
268 | fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0, | ||
269 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
270 | if (dma_mapping_error(device->dev, fb->r100c08)) | ||
271 | return -EFAULT; | ||
272 | } else { | ||
273 | nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n"); | ||
274 | } | ||
275 | |||
276 | return 0; | 284 | return 0; |
277 | } | 285 | } |
278 | 286 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h index b9ec0ae6723a..b60068b7d8f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h | |||
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *, | |||
24 | int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **); | 24 | int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **); |
25 | void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **); | 25 | void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **); |
26 | 26 | ||
27 | int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32); | ||
27 | int gk104_ram_init(struct nvkm_ram *ram); | 28 | int gk104_ram_init(struct nvkm_ram *ram); |
28 | 29 | ||
29 | /* RAM type-specific MR calculation routines */ | 30 | /* RAM type-specific MR calculation routines */ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index 1fa3ade468ae..7904fa41acef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c | |||
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq) | |||
259 | 259 | ||
260 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); | 260 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); |
261 | ram_block(fuc); | 261 | ram_block(fuc); |
262 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); | 262 | |
263 | if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) | ||
264 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); | ||
263 | 265 | ||
264 | /* MR1: turn termination on early, for some reason.. */ | 266 | /* MR1: turn termination on early, for some reason.. */ |
265 | if ((ram->base.mr[1] & 0x03c) != 0x030) { | 267 | if ((ram->base.mr[1] & 0x03c) != 0x030) { |
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq) | |||
658 | gk104_ram_train(fuc, 0x80020000, 0x01000000); | 660 | gk104_ram_train(fuc, 0x80020000, 0x01000000); |
659 | 661 | ||
660 | ram_unblock(fuc); | 662 | ram_unblock(fuc); |
661 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | 663 | |
664 | if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) | ||
665 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | ||
662 | 666 | ||
663 | if (next->bios.rammap_11_08_01) | 667 | if (next->bios.rammap_11_08_01) |
664 | data = 0x00000800; | 668 | data = 0x00000800; |
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) | |||
706 | 710 | ||
707 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); | 711 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); |
708 | ram_block(fuc); | 712 | ram_block(fuc); |
709 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); | 713 | |
714 | if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) | ||
715 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); | ||
710 | 716 | ||
711 | if (vc == 1 && ram_have(fuc, gpio2E)) { | 717 | if (vc == 1 && ram_have(fuc, gpio2E)) { |
712 | u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); | 718 | u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); |
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) | |||
936 | ram_nsec(fuc, 1000); | 942 | ram_nsec(fuc, 1000); |
937 | 943 | ||
938 | ram_unblock(fuc); | 944 | ram_unblock(fuc); |
939 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | 945 | |
946 | if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) | ||
947 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | ||
940 | 948 | ||
941 | if (next->bios.rammap_11_08_01) | 949 | if (next->bios.rammap_11_08_01) |
942 | data = 0x00000800; | 950 | data = 0x00000800; |
@@ -1530,6 +1538,12 @@ gk104_ram_func = { | |||
1530 | int | 1538 | int |
1531 | gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) | 1539 | gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) |
1532 | { | 1540 | { |
1541 | return gk104_ram_ctor(fb, pram, 0x022554); | ||
1542 | } | ||
1543 | |||
1544 | int | ||
1545 | gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr) | ||
1546 | { | ||
1533 | struct nvkm_subdev *subdev = &fb->subdev; | 1547 | struct nvkm_subdev *subdev = &fb->subdev; |
1534 | struct nvkm_device *device = subdev->device; | 1548 | struct nvkm_device *device = subdev->device; |
1535 | struct nvkm_bios *bios = device->bios; | 1549 | struct nvkm_bios *bios = device->bios; |
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) | |||
1544 | return -ENOMEM; | 1558 | return -ENOMEM; |
1545 | *pram = &ram->base; | 1559 | *pram = &ram->base; |
1546 | 1560 | ||
1547 | ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base); | 1561 | ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base); |
1548 | if (ret) | 1562 | if (ret) |
1549 | return ret; | 1563 | return ret; |
1550 | 1564 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c index 43d807f6ca71..ac862d1d77bd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c | |||
@@ -23,18 +23,8 @@ | |||
23 | */ | 23 | */ |
24 | #include "ram.h" | 24 | #include "ram.h" |
25 | 25 | ||
26 | static const struct nvkm_ram_func | ||
27 | gm107_ram_func = { | ||
28 | .init = gk104_ram_init, | ||
29 | .get = gf100_ram_get, | ||
30 | .put = gf100_ram_put, | ||
31 | }; | ||
32 | |||
33 | int | 26 | int |
34 | gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) | 27 | gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) |
35 | { | 28 | { |
36 | if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL))) | 29 | return gk104_ram_ctor(fb, pram, 0x021c14); |
37 | return -ENOMEM; | ||
38 | |||
39 | return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram); | ||
40 | } | 30 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c index b7159b338fac..1a4ab825852c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c | |||
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus) | |||
29 | nvkm_mask(device, 0x137250, 0x3f, 0); | 29 | nvkm_mask(device, 0x137250, 0x3f, 0); |
30 | 30 | ||
31 | nvkm_mask(device, 0x000200, 0x20, 0); | 31 | nvkm_mask(device, 0x000200, 0x20, 0); |
32 | usleep_range(20, 30); | 32 | udelay(20); |
33 | nvkm_mask(device, 0x000200, 0x20, 0x20); | 33 | nvkm_mask(device, 0x000200, 0x20, 0x20); |
34 | 34 | ||
35 | nvkm_wr32(device, 0x12004c, 0x4); | 35 | nvkm_wr32(device, 0x12004c, 0x4); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index 41bd5d0f7692..658355fc9354 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c | |||
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense, | |||
96 | } | 96 | } |
97 | 97 | ||
98 | static void | 98 | static void |
99 | nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense, | ||
100 | struct nvkm_iccsense_sensor *sensor) | ||
101 | { | ||
102 | struct nvkm_subdev *subdev = &iccsense->subdev; | ||
103 | /* configuration: | ||
104 | * 0x0007: 0x0007 shunt and bus continous | ||
105 | * 0x0078: 0x0078 128 samples shunt | ||
106 | * 0x0780: 0x0780 128 samples bus | ||
107 | * 0x1800: 0x0000 +-40 mV shunt range | ||
108 | * 0x2000: 0x0000 16V FSR | ||
109 | */ | ||
110 | u16 value = 0x07ff; | ||
111 | nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value); | ||
112 | nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value); | ||
113 | } | ||
114 | |||
115 | static void | ||
116 | nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense, | ||
117 | struct nvkm_iccsense_sensor *sensor) | ||
118 | { | ||
119 | struct nvkm_subdev *subdev = &iccsense->subdev; | ||
120 | /* configuration: | ||
121 | * 0x0007: 0x0007 shunt and bus continous | ||
122 | * 0x0031: 0x0000 140 us conversion time shunt | ||
123 | * 0x01c0: 0x0000 140 us conversion time bus | ||
124 | * 0x0f00: 0x0f00 1024 samples | ||
125 | * 0x7000: 0x?000 channels | ||
126 | */ | ||
127 | u16 value = 0x0e07; | ||
128 | if (sensor->rail_mask & 0x1) | ||
129 | value |= 0x1 << 14; | ||
130 | if (sensor->rail_mask & 0x2) | ||
131 | value |= 0x1 << 13; | ||
132 | if (sensor->rail_mask & 0x4) | ||
133 | value |= 0x1 << 12; | ||
134 | nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value); | ||
135 | nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value); | ||
136 | } | ||
137 | |||
138 | static void | ||
139 | nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense, | 99 | nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense, |
140 | struct nvkm_iccsense_sensor *sensor) | 100 | struct nvkm_iccsense_sensor *sensor) |
141 | { | 101 | { |
142 | switch (sensor->type) { | 102 | struct nvkm_subdev *subdev = &iccsense->subdev; |
143 | case NVBIOS_EXTDEV_INA209: | 103 | nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config); |
144 | case NVBIOS_EXTDEV_INA219: | 104 | nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config); |
145 | nvkm_iccsense_ina209_config(iccsense, sensor); | ||
146 | break; | ||
147 | case NVBIOS_EXTDEV_INA3221: | ||
148 | nvkm_iccsense_ina3221_config(iccsense, sensor); | ||
149 | break; | ||
150 | default: | ||
151 | break; | ||
152 | } | ||
153 | } | 105 | } |
154 | 106 | ||
155 | int | 107 | int |
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev) | |||
196 | static struct nvkm_iccsense_sensor* | 148 | static struct nvkm_iccsense_sensor* |
197 | nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id) | 149 | nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id) |
198 | { | 150 | { |
199 | |||
200 | struct nvkm_subdev *subdev = &iccsense->subdev; | 151 | struct nvkm_subdev *subdev = &iccsense->subdev; |
201 | struct nvkm_bios *bios = subdev->device->bios; | 152 | struct nvkm_bios *bios = subdev->device->bios; |
202 | struct nvkm_i2c *i2c = subdev->device->i2c; | 153 | struct nvkm_i2c *i2c = subdev->device->i2c; |
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id) | |||
245 | sensor->type = extdev.type; | 196 | sensor->type = extdev.type; |
246 | sensor->i2c = &i2c_bus->i2c; | 197 | sensor->i2c = &i2c_bus->i2c; |
247 | sensor->addr = addr; | 198 | sensor->addr = addr; |
248 | sensor->rail_mask = 0x0; | 199 | sensor->config = 0x0; |
249 | return sensor; | 200 | return sensor; |
250 | } | 201 | } |
251 | 202 | ||
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
273 | 224 | ||
274 | iccsense->data_valid = true; | 225 | iccsense->data_valid = true; |
275 | for (i = 0; i < stbl.nr_entry; ++i) { | 226 | for (i = 0; i < stbl.nr_entry; ++i) { |
276 | struct pwr_rail_t *r = &stbl.rail[i]; | 227 | struct pwr_rail_t *pwr_rail = &stbl.rail[i]; |
277 | struct nvkm_iccsense_rail *rail; | ||
278 | struct nvkm_iccsense_sensor *sensor; | 228 | struct nvkm_iccsense_sensor *sensor; |
279 | int (*read)(struct nvkm_iccsense *, | 229 | int r; |
280 | struct nvkm_iccsense_rail *); | ||
281 | 230 | ||
282 | if (!r->mode || r->resistor_mohm == 0) | 231 | if (pwr_rail->mode != 1 || !pwr_rail->resistor_count) |
283 | continue; | 232 | continue; |
284 | 233 | ||
285 | sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id); | 234 | sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id); |
286 | if (!sensor) | 235 | if (!sensor) |
287 | continue; | 236 | continue; |
288 | 237 | ||
289 | switch (sensor->type) { | 238 | if (!sensor->config) |
290 | case NVBIOS_EXTDEV_INA209: | 239 | sensor->config = pwr_rail->config; |
291 | if (r->rail != 0) | 240 | else if (sensor->config != pwr_rail->config) |
292 | continue; | 241 | nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id); |
293 | read = nvkm_iccsense_ina209_read; | 242 | |
294 | break; | 243 | for (r = 0; r < pwr_rail->resistor_count; ++r) { |
295 | case NVBIOS_EXTDEV_INA219: | 244 | struct nvkm_iccsense_rail *rail; |
296 | if (r->rail != 0) | 245 | struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r]; |
246 | int (*read)(struct nvkm_iccsense *, | ||
247 | struct nvkm_iccsense_rail *); | ||
248 | |||
249 | if (!res->mohm || !res->enabled) | ||
297 | continue; | 250 | continue; |
298 | read = nvkm_iccsense_ina219_read; | 251 | |
299 | break; | 252 | switch (sensor->type) { |
300 | case NVBIOS_EXTDEV_INA3221: | 253 | case NVBIOS_EXTDEV_INA209: |
301 | if (r->rail >= 3) | 254 | read = nvkm_iccsense_ina209_read; |
255 | break; | ||
256 | case NVBIOS_EXTDEV_INA219: | ||
257 | read = nvkm_iccsense_ina219_read; | ||
258 | break; | ||
259 | case NVBIOS_EXTDEV_INA3221: | ||
260 | read = nvkm_iccsense_ina3221_read; | ||
261 | break; | ||
262 | default: | ||
302 | continue; | 263 | continue; |
303 | read = nvkm_iccsense_ina3221_read; | 264 | } |
304 | break; | 265 | |
305 | default: | 266 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); |
306 | continue; | 267 | if (!rail) |
268 | return -ENOMEM; | ||
269 | |||
270 | rail->read = read; | ||
271 | rail->sensor = sensor; | ||
272 | rail->idx = r; | ||
273 | rail->mohm = res->mohm; | ||
274 | nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm); | ||
275 | list_add_tail(&rail->head, &iccsense->rails); | ||
307 | } | 276 | } |
308 | |||
309 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); | ||
310 | if (!rail) | ||
311 | return -ENOMEM; | ||
312 | sensor->rail_mask |= 1 << r->rail; | ||
313 | rail->read = read; | ||
314 | rail->sensor = sensor; | ||
315 | rail->idx = r->rail; | ||
316 | rail->mohm = r->resistor_mohm; | ||
317 | list_add_tail(&rail->head, &iccsense->rails); | ||
318 | } | 277 | } |
319 | return 0; | 278 | return 0; |
320 | } | 279 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h index b72c31d2f908..e90e0f6ed008 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h | |||
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor { | |||
10 | enum nvbios_extdev_type type; | 10 | enum nvbios_extdev_type type; |
11 | struct i2c_adapter *i2c; | 11 | struct i2c_adapter *i2c; |
12 | u8 addr; | 12 | u8 addr; |
13 | u8 rail_mask; | 13 | u16 config; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | struct nvkm_iccsense_rail { | 16 | struct nvkm_iccsense_rail { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c index 45a2f8e784f9..9abfa5e2fe9f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c | |||
@@ -23,8 +23,8 @@ | |||
23 | */ | 23 | */ |
24 | #include "mxms.h" | 24 | #include "mxms.h" |
25 | 25 | ||
26 | #define ROM16(x) le16_to_cpu(*(u16 *)&(x)) | 26 | #define ROM16(x) get_unaligned_le16(&(x)) |
27 | #define ROM32(x) le32_to_cpu(*(u32 *)&(x)) | 27 | #define ROM32(x) get_unaligned_le32(&(x)) |
28 | 28 | ||
29 | static u8 * | 29 | static u8 * |
30 | mxms_data(struct nvkm_mxm *mxm) | 30 | mxms_data(struct nvkm_mxm *mxm) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild index c34076223b7b..bcd179ba11d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild | |||
@@ -1,6 +1,7 @@ | |||
1 | nvkm-y += nvkm/subdev/volt/base.o | 1 | nvkm-y += nvkm/subdev/volt/base.o |
2 | nvkm-y += nvkm/subdev/volt/gpio.o | 2 | nvkm-y += nvkm/subdev/volt/gpio.o |
3 | nvkm-y += nvkm/subdev/volt/nv40.o | 3 | nvkm-y += nvkm/subdev/volt/nv40.o |
4 | nvkm-y += nvkm/subdev/volt/gf100.o | ||
4 | nvkm-y += nvkm/subdev/volt/gk104.o | 5 | nvkm-y += nvkm/subdev/volt/gk104.o |
5 | nvkm-y += nvkm/subdev/volt/gk20a.o | 6 | nvkm-y += nvkm/subdev/volt/gk20a.o |
6 | nvkm-y += nvkm/subdev/volt/gm20b.o | 7 | nvkm-y += nvkm/subdev/volt/gm20b.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index 1c3d23b0e84a..e8569b04b55d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <subdev/bios.h> | 26 | #include <subdev/bios.h> |
27 | #include <subdev/bios/vmap.h> | 27 | #include <subdev/bios/vmap.h> |
28 | #include <subdev/bios/volt.h> | 28 | #include <subdev/bios/volt.h> |
29 | #include <subdev/therm.h> | ||
29 | 30 | ||
30 | int | 31 | int |
31 | nvkm_volt_get(struct nvkm_volt *volt) | 32 | nvkm_volt_get(struct nvkm_volt *volt) |
@@ -50,23 +51,35 @@ static int | |||
50 | nvkm_volt_set(struct nvkm_volt *volt, u32 uv) | 51 | nvkm_volt_set(struct nvkm_volt *volt, u32 uv) |
51 | { | 52 | { |
52 | struct nvkm_subdev *subdev = &volt->subdev; | 53 | struct nvkm_subdev *subdev = &volt->subdev; |
53 | int i, ret = -EINVAL; | 54 | int i, ret = -EINVAL, best_err = volt->max_uv, best = -1; |
54 | 55 | ||
55 | if (volt->func->volt_set) | 56 | if (volt->func->volt_set) |
56 | return volt->func->volt_set(volt, uv); | 57 | return volt->func->volt_set(volt, uv); |
57 | 58 | ||
58 | for (i = 0; i < volt->vid_nr; i++) { | 59 | for (i = 0; i < volt->vid_nr; i++) { |
59 | if (volt->vid[i].uv == uv) { | 60 | int err = volt->vid[i].uv - uv; |
60 | ret = volt->func->vid_set(volt, volt->vid[i].vid); | 61 | if (err < 0 || err > best_err) |
61 | nvkm_debug(subdev, "set %duv: %d\n", uv, ret); | 62 | continue; |
63 | |||
64 | best_err = err; | ||
65 | best = i; | ||
66 | if (best_err == 0) | ||
62 | break; | 67 | break; |
63 | } | ||
64 | } | 68 | } |
69 | |||
70 | if (best == -1) { | ||
71 | nvkm_error(subdev, "couldn't set %iuv\n", uv); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | ret = volt->func->vid_set(volt, volt->vid[best].vid); | ||
76 | nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv, | ||
77 | volt->vid[best].uv, ret); | ||
65 | return ret; | 78 | return ret; |
66 | } | 79 | } |
67 | 80 | ||
68 | static int | 81 | int |
69 | nvkm_volt_map(struct nvkm_volt *volt, u8 id) | 82 | nvkm_volt_map_min(struct nvkm_volt *volt, u8 id) |
70 | { | 83 | { |
71 | struct nvkm_bios *bios = volt->subdev.device->bios; | 84 | struct nvkm_bios *bios = volt->subdev.device->bios; |
72 | struct nvbios_vmap_entry info; | 85 | struct nvbios_vmap_entry info; |
@@ -76,7 +89,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id) | |||
76 | vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); | 89 | vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); |
77 | if (vmap) { | 90 | if (vmap) { |
78 | if (info.link != 0xff) { | 91 | if (info.link != 0xff) { |
79 | int ret = nvkm_volt_map(volt, info.link); | 92 | int ret = nvkm_volt_map_min(volt, info.link); |
80 | if (ret < 0) | 93 | if (ret < 0) |
81 | return ret; | 94 | return ret; |
82 | info.min += ret; | 95 | info.min += ret; |
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id) | |||
88 | } | 101 | } |
89 | 102 | ||
90 | int | 103 | int |
91 | nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition) | 104 | nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp) |
105 | { | ||
106 | struct nvkm_bios *bios = volt->subdev.device->bios; | ||
107 | struct nvbios_vmap_entry info; | ||
108 | u8 ver, len; | ||
109 | u16 vmap; | ||
110 | |||
111 | vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); | ||
112 | if (vmap) { | ||
113 | s64 result; | ||
114 | |||
115 | if (volt->speedo < 0) | ||
116 | return volt->speedo; | ||
117 | |||
118 | if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) { | ||
119 | result = div64_s64((s64)info.arg[0], 10); | ||
120 | result += div64_s64((s64)info.arg[1] * volt->speedo, 10); | ||
121 | result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000); | ||
122 | } else if (ver == 0x20) { | ||
123 | switch (info.mode) { | ||
124 | /* 0x0 handled above! */ | ||
125 | case 0x1: | ||
126 | result = ((s64)info.arg[0] * 15625) >> 18; | ||
127 | result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18; | ||
128 | result += ((s64)info.arg[2] * temp * 15625) >> 10; | ||
129 | result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18; | ||
130 | result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30; | ||
131 | result += ((s64)info.arg[5] * temp * temp * 15625) >> 18; | ||
132 | break; | ||
133 | case 0x3: | ||
134 | result = (info.min + info.max) / 2; | ||
135 | break; | ||
136 | case 0x2: | ||
137 | default: | ||
138 | result = info.min; | ||
139 | break; | ||
140 | } | ||
141 | } else { | ||
142 | return -ENODEV; | ||
143 | } | ||
144 | |||
145 | result = min(max(result, (s64)info.min), (s64)info.max); | ||
146 | |||
147 | if (info.link != 0xff) { | ||
148 | int ret = nvkm_volt_map(volt, info.link, temp); | ||
149 | if (ret < 0) | ||
150 | return ret; | ||
151 | result += ret; | ||
152 | } | ||
153 | return result; | ||
154 | } | ||
155 | |||
156 | return id ? id * 10000 : -ENODEV; | ||
157 | } | ||
158 | |||
159 | int | ||
160 | nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp, | ||
161 | int condition) | ||
92 | { | 162 | { |
93 | int ret; | 163 | int ret; |
94 | 164 | ||
95 | if (volt->func->set_id) | 165 | if (volt->func->set_id) |
96 | return volt->func->set_id(volt, id, condition); | 166 | return volt->func->set_id(volt, id, condition); |
97 | 167 | ||
98 | ret = nvkm_volt_map(volt, id); | 168 | ret = nvkm_volt_map(volt, id, temp); |
99 | if (ret >= 0) { | 169 | if (ret >= 0) { |
100 | int prev = nvkm_volt_get(volt); | 170 | int prev = nvkm_volt_get(volt); |
101 | if (!condition || prev < 0 || | 171 | if (!condition || prev < 0 || |
102 | (condition < 0 && ret < prev) || | 172 | (condition < 0 && ret < prev) || |
103 | (condition > 0 && ret > prev)) { | 173 | (condition > 0 && ret > prev)) { |
174 | int min = nvkm_volt_map(volt, min_id, temp); | ||
175 | if (min >= 0) | ||
176 | ret = max(min, ret); | ||
104 | ret = nvkm_volt_set(volt, ret); | 177 | ret = nvkm_volt_set(volt, ret); |
105 | } else { | 178 | } else { |
106 | ret = 0; | 179 | ret = 0; |
@@ -112,6 +185,7 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition) | |||
112 | static void | 185 | static void |
113 | nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) | 186 | nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) |
114 | { | 187 | { |
188 | struct nvkm_subdev *subdev = &bios->subdev; | ||
115 | struct nvbios_volt_entry ivid; | 189 | struct nvbios_volt_entry ivid; |
116 | struct nvbios_volt info; | 190 | struct nvbios_volt info; |
117 | u8 ver, hdr, cnt, len; | 191 | u8 ver, hdr, cnt, len; |
@@ -119,7 +193,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) | |||
119 | int i; | 193 | int i; |
120 | 194 | ||
121 | data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); | 195 | data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); |
122 | if (data && info.vidmask && info.base && info.step) { | 196 | if (data && info.vidmask && info.base && info.step && info.ranged) { |
197 | nvkm_debug(subdev, "found ranged based VIDs\n"); | ||
123 | volt->min_uv = info.min; | 198 | volt->min_uv = info.min; |
124 | volt->max_uv = info.max; | 199 | volt->max_uv = info.max; |
125 | for (i = 0; i < info.vidmask + 1; i++) { | 200 | for (i = 0; i < info.vidmask + 1; i++) { |
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) | |||
132 | info.base += info.step; | 207 | info.base += info.step; |
133 | } | 208 | } |
134 | volt->vid_mask = info.vidmask; | 209 | volt->vid_mask = info.vidmask; |
135 | } else if (data && info.vidmask) { | 210 | } else if (data && info.vidmask && !info.ranged) { |
211 | nvkm_debug(subdev, "found entry based VIDs\n"); | ||
136 | volt->min_uv = 0xffffffff; | 212 | volt->min_uv = 0xffffffff; |
137 | volt->max_uv = 0; | 213 | volt->max_uv = 0; |
138 | for (i = 0; i < cnt; i++) { | 214 | for (i = 0; i < cnt; i++) { |
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) | |||
154 | } | 230 | } |
155 | 231 | ||
156 | static int | 232 | static int |
233 | nvkm_volt_speedo_read(struct nvkm_volt *volt) | ||
234 | { | ||
235 | if (volt->func->speedo_read) | ||
236 | return volt->func->speedo_read(volt); | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | |||
240 | static int | ||
157 | nvkm_volt_init(struct nvkm_subdev *subdev) | 241 | nvkm_volt_init(struct nvkm_subdev *subdev) |
158 | { | 242 | { |
159 | struct nvkm_volt *volt = nvkm_volt(subdev); | 243 | struct nvkm_volt *volt = nvkm_volt(subdev); |
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev) | |||
167 | return 0; | 251 | return 0; |
168 | } | 252 | } |
169 | 253 | ||
254 | static int | ||
255 | nvkm_volt_oneinit(struct nvkm_subdev *subdev) | ||
256 | { | ||
257 | struct nvkm_volt *volt = nvkm_volt(subdev); | ||
258 | |||
259 | volt->speedo = nvkm_volt_speedo_read(volt); | ||
260 | if (volt->speedo > 0) | ||
261 | nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo); | ||
262 | |||
263 | if (volt->func->oneinit) | ||
264 | return volt->func->oneinit(volt); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
170 | static void * | 269 | static void * |
171 | nvkm_volt_dtor(struct nvkm_subdev *subdev) | 270 | nvkm_volt_dtor(struct nvkm_subdev *subdev) |
172 | { | 271 | { |
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func | |||
177 | nvkm_volt = { | 276 | nvkm_volt = { |
178 | .dtor = nvkm_volt_dtor, | 277 | .dtor = nvkm_volt_dtor, |
179 | .init = nvkm_volt_init, | 278 | .init = nvkm_volt_init, |
279 | .oneinit = nvkm_volt_oneinit, | ||
180 | }; | 280 | }; |
181 | 281 | ||
182 | void | 282 | void |
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device, | |||
191 | 291 | ||
192 | /* Assuming the non-bios device should build the voltage table later */ | 292 | /* Assuming the non-bios device should build the voltage table later */ |
193 | if (bios) { | 293 | if (bios) { |
294 | u8 ver, hdr, cnt, len; | ||
295 | struct nvbios_vmap vmap; | ||
296 | |||
194 | nvkm_volt_parse_bios(bios, volt); | 297 | nvkm_volt_parse_bios(bios, volt); |
195 | nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n", | 298 | nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n", |
196 | volt->min_uv, volt->max_uv); | 299 | volt->min_uv, volt->max_uv); |
300 | |||
301 | if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { | ||
302 | volt->max0_id = vmap.max0; | ||
303 | volt->max1_id = vmap.max1; | ||
304 | volt->max2_id = vmap.max2; | ||
305 | } else { | ||
306 | volt->max0_id = 0xff; | ||
307 | volt->max1_id = 0xff; | ||
308 | volt->max2_id = 0xff; | ||
309 | } | ||
197 | } | 310 | } |
198 | 311 | ||
199 | if (volt->vid_nr) { | 312 | if (volt->vid_nr) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c new file mode 100644 index 000000000000..d9ed6925ca64 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Karol Herbst | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Karol Herbst | ||
23 | */ | ||
24 | #include "priv.h" | ||
25 | |||
26 | #include <subdev/fuse.h> | ||
27 | |||
28 | static int | ||
29 | gf100_volt_speedo_read(struct nvkm_volt *volt) | ||
30 | { | ||
31 | struct nvkm_device *device = volt->subdev.device; | ||
32 | struct nvkm_fuse *fuse = device->fuse; | ||
33 | |||
34 | if (!fuse) | ||
35 | return -EINVAL; | ||
36 | |||
37 | return nvkm_fuse_read(fuse, 0x1cc); | ||
38 | } | ||
39 | |||
40 | int | ||
41 | gf100_volt_oneinit(struct nvkm_volt *volt) | ||
42 | { | ||
43 | struct nvkm_subdev *subdev = &volt->subdev; | ||
44 | if (volt->speedo <= 0) | ||
45 | nvkm_error(subdev, "couldn't find speedo value, volting not " | ||
46 | "possible\n"); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static const struct nvkm_volt_func | ||
51 | gf100_volt = { | ||
52 | .oneinit = gf100_volt_oneinit, | ||
53 | .vid_get = nvkm_voltgpio_get, | ||
54 | .vid_set = nvkm_voltgpio_set, | ||
55 | .speedo_read = gf100_volt_speedo_read, | ||
56 | }; | ||
57 | |||
58 | int | ||
59 | gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) | ||
60 | { | ||
61 | struct nvkm_volt *volt; | ||
62 | int ret; | ||
63 | |||
64 | ret = nvkm_volt_new_(&gf100_volt, device, index, &volt); | ||
65 | *pvolt = volt; | ||
66 | if (ret) | ||
67 | return ret; | ||
68 | |||
69 | return nvkm_voltgpio_init(volt); | ||
70 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index 420bd84d8483..b2c5d1166a13 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <subdev/gpio.h> | 27 | #include <subdev/gpio.h> |
28 | #include <subdev/bios.h> | 28 | #include <subdev/bios.h> |
29 | #include <subdev/bios/volt.h> | 29 | #include <subdev/bios/volt.h> |
30 | #include <subdev/fuse.h> | ||
30 | 31 | ||
31 | #define gk104_volt(p) container_of((p), struct gk104_volt, base) | 32 | #define gk104_volt(p) container_of((p), struct gk104_volt, base) |
32 | struct gk104_volt { | 33 | struct gk104_volt { |
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) | |||
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
66 | 67 | ||
68 | static int | ||
69 | gk104_volt_speedo_read(struct nvkm_volt *volt) | ||
70 | { | ||
71 | struct nvkm_device *device = volt->subdev.device; | ||
72 | struct nvkm_fuse *fuse = device->fuse; | ||
73 | int ret; | ||
74 | |||
75 | if (!fuse) | ||
76 | return -EINVAL; | ||
77 | |||
78 | nvkm_wr32(device, 0x122634, 0x0); | ||
79 | ret = nvkm_fuse_read(fuse, 0x3a8); | ||
80 | nvkm_wr32(device, 0x122634, 0x41); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
67 | static const struct nvkm_volt_func | 84 | static const struct nvkm_volt_func |
68 | gk104_volt_pwm = { | 85 | gk104_volt_pwm = { |
86 | .oneinit = gf100_volt_oneinit, | ||
69 | .volt_get = gk104_volt_get, | 87 | .volt_get = gk104_volt_get, |
70 | .volt_set = gk104_volt_set, | 88 | .volt_set = gk104_volt_set, |
89 | .speedo_read = gk104_volt_speedo_read, | ||
71 | }, gk104_volt_gpio = { | 90 | }, gk104_volt_gpio = { |
91 | .oneinit = gf100_volt_oneinit, | ||
72 | .vid_get = nvkm_voltgpio_get, | 92 | .vid_get = nvkm_voltgpio_get, |
73 | .vid_set = nvkm_voltgpio_set, | 93 | .vid_set = nvkm_voltgpio_set, |
94 | .speedo_read = gk104_volt_speedo_read, | ||
74 | }; | 95 | }; |
75 | 96 | ||
76 | int | 97 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h index d5140d991161..354bafe4b4e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h | |||
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, | |||
9 | int index, struct nvkm_volt **); | 9 | int index, struct nvkm_volt **); |
10 | 10 | ||
11 | struct nvkm_volt_func { | 11 | struct nvkm_volt_func { |
12 | int (*oneinit)(struct nvkm_volt *); | ||
12 | int (*volt_get)(struct nvkm_volt *); | 13 | int (*volt_get)(struct nvkm_volt *); |
13 | int (*volt_set)(struct nvkm_volt *, u32 uv); | 14 | int (*volt_set)(struct nvkm_volt *, u32 uv); |
14 | int (*vid_get)(struct nvkm_volt *); | 15 | int (*vid_get)(struct nvkm_volt *); |
15 | int (*vid_set)(struct nvkm_volt *, u8 vid); | 16 | int (*vid_set)(struct nvkm_volt *, u8 vid); |
16 | int (*set_id)(struct nvkm_volt *, u8 id, int condition); | 17 | int (*set_id)(struct nvkm_volt *, u8 id, int condition); |
18 | int (*speedo_read)(struct nvkm_volt *); | ||
17 | }; | 19 | }; |
18 | 20 | ||
19 | int nvkm_voltgpio_init(struct nvkm_volt *); | 21 | int nvkm_voltgpio_init(struct nvkm_volt *); |
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8); | |||
23 | int nvkm_voltpwm_init(struct nvkm_volt *volt); | 25 | int nvkm_voltpwm_init(struct nvkm_volt *volt); |
24 | int nvkm_voltpwm_get(struct nvkm_volt *volt); | 26 | int nvkm_voltpwm_get(struct nvkm_volt *volt); |
25 | int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv); | 27 | int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv); |
28 | |||
29 | int gf100_volt_oneinit(struct nvkm_volt *); | ||
26 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 5f3e5ad99de7..84995ebc6ffc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -31,7 +31,7 @@ | |||
31 | * Definitions taken from spice-protocol, plus kernel driver specific bits. | 31 | * Definitions taken from spice-protocol, plus kernel driver specific bits. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/fence.h> | 34 | #include <linux/dma-fence.h> |
35 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
36 | #include <linux/firmware.h> | 36 | #include <linux/firmware.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
@@ -190,7 +190,7 @@ enum { | |||
190 | * spice-protocol/qxl_dev.h */ | 190 | * spice-protocol/qxl_dev.h */ |
191 | #define QXL_MAX_RES 96 | 191 | #define QXL_MAX_RES 96 |
192 | struct qxl_release { | 192 | struct qxl_release { |
193 | struct fence base; | 193 | struct dma_fence base; |
194 | 194 | ||
195 | int id; | 195 | int id; |
196 | int type; | 196 | int type; |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cd83f050cf3e..50b4e522f05f 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | #include "qxl_drv.h" | 22 | #include "qxl_drv.h" |
23 | #include "qxl_object.h" | 23 | #include "qxl_object.h" |
24 | #include <trace/events/fence.h> | 24 | #include <trace/events/dma_fence.h> |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * drawable cmd cache - allocate a bunch of VRAM pages, suballocate | 27 | * drawable cmd cache - allocate a bunch of VRAM pages, suballocate |
@@ -40,23 +40,24 @@ | |||
40 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 40 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
41 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 41 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
42 | 42 | ||
43 | static const char *qxl_get_driver_name(struct fence *fence) | 43 | static const char *qxl_get_driver_name(struct dma_fence *fence) |
44 | { | 44 | { |
45 | return "qxl"; | 45 | return "qxl"; |
46 | } | 46 | } |
47 | 47 | ||
48 | static const char *qxl_get_timeline_name(struct fence *fence) | 48 | static const char *qxl_get_timeline_name(struct dma_fence *fence) |
49 | { | 49 | { |
50 | return "release"; | 50 | return "release"; |
51 | } | 51 | } |
52 | 52 | ||
53 | static bool qxl_nop_signaling(struct fence *fence) | 53 | static bool qxl_nop_signaling(struct dma_fence *fence) |
54 | { | 54 | { |
55 | /* fences are always automatically signaled, so just pretend we did this.. */ | 55 | /* fences are always automatically signaled, so just pretend we did this.. */ |
56 | return true; | 56 | return true; |
57 | } | 57 | } |
58 | 58 | ||
59 | static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) | 59 | static long qxl_fence_wait(struct dma_fence *fence, bool intr, |
60 | signed long timeout) | ||
60 | { | 61 | { |
61 | struct qxl_device *qdev; | 62 | struct qxl_device *qdev; |
62 | struct qxl_release *release; | 63 | struct qxl_release *release; |
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) | |||
71 | retry: | 72 | retry: |
72 | sc++; | 73 | sc++; |
73 | 74 | ||
74 | if (fence_is_signaled(fence)) | 75 | if (dma_fence_is_signaled(fence)) |
75 | goto signaled; | 76 | goto signaled; |
76 | 77 | ||
77 | qxl_io_notify_oom(qdev); | 78 | qxl_io_notify_oom(qdev); |
@@ -80,11 +81,11 @@ retry: | |||
80 | if (!qxl_queue_garbage_collect(qdev, true)) | 81 | if (!qxl_queue_garbage_collect(qdev, true)) |
81 | break; | 82 | break; |
82 | 83 | ||
83 | if (fence_is_signaled(fence)) | 84 | if (dma_fence_is_signaled(fence)) |
84 | goto signaled; | 85 | goto signaled; |
85 | } | 86 | } |
86 | 87 | ||
87 | if (fence_is_signaled(fence)) | 88 | if (dma_fence_is_signaled(fence)) |
88 | goto signaled; | 89 | goto signaled; |
89 | 90 | ||
90 | if (have_drawable_releases || sc < 4) { | 91 | if (have_drawable_releases || sc < 4) { |
@@ -96,9 +97,9 @@ retry: | |||
96 | return 0; | 97 | return 0; |
97 | 98 | ||
98 | if (have_drawable_releases && sc > 300) { | 99 | if (have_drawable_releases && sc > 300) { |
99 | FENCE_WARN(fence, "failed to wait on release %llu " | 100 | DMA_FENCE_WARN(fence, "failed to wait on release %llu " |
100 | "after spincount %d\n", | 101 | "after spincount %d\n", |
101 | fence->context & ~0xf0000000, sc); | 102 | fence->context & ~0xf0000000, sc); |
102 | goto signaled; | 103 | goto signaled; |
103 | } | 104 | } |
104 | goto retry; | 105 | goto retry; |
@@ -115,7 +116,7 @@ signaled: | |||
115 | return end - cur; | 116 | return end - cur; |
116 | } | 117 | } |
117 | 118 | ||
118 | static const struct fence_ops qxl_fence_ops = { | 119 | static const struct dma_fence_ops qxl_fence_ops = { |
119 | .get_driver_name = qxl_get_driver_name, | 120 | .get_driver_name = qxl_get_driver_name, |
120 | .get_timeline_name = qxl_get_timeline_name, | 121 | .get_timeline_name = qxl_get_timeline_name, |
121 | .enable_signaling = qxl_nop_signaling, | 122 | .enable_signaling = qxl_nop_signaling, |
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev, | |||
192 | WARN_ON(list_empty(&release->bos)); | 193 | WARN_ON(list_empty(&release->bos)); |
193 | qxl_release_free_list(release); | 194 | qxl_release_free_list(release); |
194 | 195 | ||
195 | fence_signal(&release->base); | 196 | dma_fence_signal(&release->base); |
196 | fence_put(&release->base); | 197 | dma_fence_put(&release->base); |
197 | } else { | 198 | } else { |
198 | qxl_release_free_list(release); | 199 | qxl_release_free_list(release); |
199 | kfree(release); | 200 | kfree(release); |
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
453 | * Since we never really allocated a context and we don't want to conflict, | 454 | * Since we never really allocated a context and we don't want to conflict, |
454 | * set the highest bits. This will break if we really allow exporting of dma-bufs. | 455 | * set the highest bits. This will break if we really allow exporting of dma-bufs. |
455 | */ | 456 | */ |
456 | fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, | 457 | dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, |
457 | release->id | 0xf0000000, release->base.seqno); | 458 | release->id | 0xf0000000, release->base.seqno); |
458 | trace_fence_emit(&release->base); | 459 | trace_dma_fence_emit(&release->base); |
459 | 460 | ||
460 | driver = bdev->driver; | 461 | driver = bdev->driver; |
461 | glob = bo->glob; | 462 | glob = bo->glob; |
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index e26c82db948b..11761330a6b8 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = { | |||
387 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, | 387 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, |
388 | .invalidate_caches = &qxl_invalidate_caches, | 388 | .invalidate_caches = &qxl_invalidate_caches, |
389 | .init_mem_type = &qxl_init_mem_type, | 389 | .init_mem_type = &qxl_init_mem_type, |
390 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
390 | .evict_flags = &qxl_evict_flags, | 391 | .evict_flags = &qxl_evict_flags, |
391 | .move = &qxl_bo_move, | 392 | .move = &qxl_bo_move, |
392 | .verify_access = &qxl_verify_access, | 393 | .verify_access = &qxl_verify_access, |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 56bb758f4e33..fa4f8f008e4d 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_audio.h" | 30 | #include "radeon_audio.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "atom.h" | 32 | #include "atom.h" |
32 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
33 | 34 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index d960d3915408..f8b05090232a 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "evergreend.h" | 31 | #include "evergreend.h" |
31 | #include "evergreen_reg_safe.h" | 32 | #include "evergreen_reg_safe.h" |
32 | #include "cayman_reg_safe.h" | 33 | #include "cayman_reg_safe.h" |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index b69c8de35bd3..595a19736458 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "r600d.h" | 32 | #include "r600d.h" |
32 | #include "r600_reg_safe.h" | 33 | #include "r600_reg_safe.h" |
33 | 34 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1b0dcad916b0..44e0c5ed6418 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -66,7 +66,7 @@ | |||
66 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
67 | #include <linux/interval_tree.h> | 67 | #include <linux/interval_tree.h> |
68 | #include <linux/hashtable.h> | 68 | #include <linux/hashtable.h> |
69 | #include <linux/fence.h> | 69 | #include <linux/dma-fence.h> |
70 | 70 | ||
71 | #include <ttm/ttm_bo_api.h> | 71 | #include <ttm/ttm_bo_api.h> |
72 | #include <ttm/ttm_bo_driver.h> | 72 | #include <ttm/ttm_bo_driver.h> |
@@ -367,7 +367,7 @@ struct radeon_fence_driver { | |||
367 | }; | 367 | }; |
368 | 368 | ||
369 | struct radeon_fence { | 369 | struct radeon_fence { |
370 | struct fence base; | 370 | struct dma_fence base; |
371 | 371 | ||
372 | struct radeon_device *rdev; | 372 | struct radeon_device *rdev; |
373 | uint64_t seq; | 373 | uint64_t seq; |
@@ -746,7 +746,7 @@ struct radeon_flip_work { | |||
746 | uint64_t base; | 746 | uint64_t base; |
747 | struct drm_pending_vblank_event *event; | 747 | struct drm_pending_vblank_event *event; |
748 | struct radeon_bo *old_rbo; | 748 | struct radeon_bo *old_rbo; |
749 | struct fence *fence; | 749 | struct dma_fence *fence; |
750 | bool async; | 750 | bool async; |
751 | }; | 751 | }; |
752 | 752 | ||
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); | |||
2514 | /* | 2514 | /* |
2515 | * Cast helper | 2515 | * Cast helper |
2516 | */ | 2516 | */ |
2517 | extern const struct fence_ops radeon_fence_ops; | 2517 | extern const struct dma_fence_ops radeon_fence_ops; |
2518 | 2518 | ||
2519 | static inline struct radeon_fence *to_radeon_fence(struct fence *f) | 2519 | static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f) |
2520 | { | 2520 | { |
2521 | struct radeon_fence *__f = container_of(f, struct radeon_fence, base); | 2521 | struct radeon_fence *__f = container_of(f, struct radeon_fence, base); |
2522 | 2522 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5df3ec73021b..4134759a6823 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | #include "atom-bits.h" | 31 | #include "atom-bits.h" |
32 | #include "radeon_asic.h" | ||
32 | 33 | ||
33 | extern void | 34 | extern void |
34 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 35 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 38e396dae0a9..c1135feb93c1 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | 34 | ||
34 | /* 10 khz */ | 35 | /* 10 khz */ |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index eb92aef46e3c..0be8d5cd7826 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1320 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 1320 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
1321 | rdev->ring[i].idx = i; | 1321 | rdev->ring[i].idx = i; |
1322 | } | 1322 | } |
1323 | rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); | 1323 | rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); |
1324 | 1324 | ||
1325 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", | 1325 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", |
1326 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, | 1326 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, | |||
1651 | 1651 | ||
1652 | radeon_suspend(rdev); | 1652 | radeon_suspend(rdev); |
1653 | radeon_hpd_fini(rdev); | 1653 | radeon_hpd_fini(rdev); |
1654 | /* evict remaining vram memory */ | 1654 | /* evict remaining vram memory |
1655 | * This second call to evict vram is to evict the gart page table | ||
1656 | * using the CPU. | ||
1657 | */ | ||
1655 | radeon_bo_evict_vram(rdev); | 1658 | radeon_bo_evict_vram(rdev); |
1656 | 1659 | ||
1657 | radeon_agp_suspend(rdev); | 1660 | radeon_agp_suspend(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index cdb8cb568c15..e7409e8a9f87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
437 | down_read(&rdev->exclusive_lock); | 437 | down_read(&rdev->exclusive_lock); |
438 | } | 438 | } |
439 | } else | 439 | } else |
440 | r = fence_wait(work->fence, false); | 440 | r = dma_fence_wait(work->fence, false); |
441 | 441 | ||
442 | if (r) | 442 | if (r) |
443 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); | 443 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); |
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
447 | * confused about which BO the CRTC is scanning out | 447 | * confused about which BO the CRTC is scanning out |
448 | */ | 448 | */ |
449 | 449 | ||
450 | fence_put(work->fence); | 450 | dma_fence_put(work->fence); |
451 | work->fence = NULL; | 451 | work->fence = NULL; |
452 | } | 452 | } |
453 | 453 | ||
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, | |||
542 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 542 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
543 | goto cleanup; | 543 | goto cleanup; |
544 | } | 544 | } |
545 | work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); | 545 | work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); |
546 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); | 546 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
547 | radeon_bo_unreserve(new_rbo); | 547 | radeon_bo_unreserve(new_rbo); |
548 | 548 | ||
@@ -617,7 +617,7 @@ pflip_cleanup: | |||
617 | 617 | ||
618 | cleanup: | 618 | cleanup: |
619 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 619 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
620 | fence_put(work->fence); | 620 | dma_fence_put(work->fence); |
621 | kfree(work); | 621 | kfree(work); |
622 | return r; | 622 | return r; |
623 | } | 623 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index de504ea29c06..6d1237d6e1b8 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c | |||
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector, | |||
223 | return MODE_OK; | 223 | return MODE_OK; |
224 | } | 224 | } |
225 | 225 | ||
226 | struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) | 226 | static struct |
227 | drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) | ||
227 | { | 228 | { |
228 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 229 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
229 | 230 | ||
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = { | |||
341 | .hotplug = radeon_dp_mst_hotplug, | 342 | .hotplug = radeon_dp_mst_hotplug, |
342 | }; | 343 | }; |
343 | 344 | ||
344 | struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder) | 345 | static struct |
346 | radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder) | ||
345 | { | 347 | { |
346 | struct drm_device *dev = encoder->dev; | 348 | struct drm_device *dev = encoder->dev; |
347 | struct drm_connector *connector; | 349 | struct drm_connector *connector; |
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = { | |||
597 | .commit = radeon_mst_encoder_commit, | 599 | .commit = radeon_mst_encoder_commit, |
598 | }; | 600 | }; |
599 | 601 | ||
600 | void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder) | 602 | static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder) |
601 | { | 603 | { |
602 | drm_encoder_cleanup(encoder); | 604 | drm_encoder_cleanup(encoder); |
603 | kfree(encoder); | 605 | kfree(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ef075acde9c..ef09f0a63754 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev, | |||
141 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; | 141 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
142 | (*fence)->ring = ring; | 142 | (*fence)->ring = ring; |
143 | (*fence)->is_vm_update = false; | 143 | (*fence)->is_vm_update = false; |
144 | fence_init(&(*fence)->base, &radeon_fence_ops, | 144 | dma_fence_init(&(*fence)->base, &radeon_fence_ops, |
145 | &rdev->fence_queue.lock, rdev->fence_context + ring, seq); | 145 | &rdev->fence_queue.lock, |
146 | rdev->fence_context + ring, | ||
147 | seq); | ||
146 | radeon_fence_ring_emit(rdev, ring, *fence); | 148 | radeon_fence_ring_emit(rdev, ring, *fence); |
147 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); | 149 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
148 | radeon_fence_schedule_check(rdev, ring); | 150 | radeon_fence_schedule_check(rdev, ring); |
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl | |||
169 | */ | 171 | */ |
170 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); | 172 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); |
171 | if (seq >= fence->seq) { | 173 | if (seq >= fence->seq) { |
172 | int ret = fence_signal_locked(&fence->base); | 174 | int ret = dma_fence_signal_locked(&fence->base); |
173 | 175 | ||
174 | if (!ret) | 176 | if (!ret) |
175 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); | 177 | DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); |
176 | else | 178 | else |
177 | FENCE_TRACE(&fence->base, "was already signaled\n"); | 179 | DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); |
178 | 180 | ||
179 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); | 181 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); |
180 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); | 182 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); |
181 | fence_put(&fence->base); | 183 | dma_fence_put(&fence->base); |
182 | } else | 184 | } else |
183 | FENCE_TRACE(&fence->base, "pending\n"); | 185 | DMA_FENCE_TRACE(&fence->base, "pending\n"); |
184 | return 0; | 186 | return 0; |
185 | } | 187 | } |
186 | 188 | ||
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, | |||
351 | return false; | 353 | return false; |
352 | } | 354 | } |
353 | 355 | ||
354 | static bool radeon_fence_is_signaled(struct fence *f) | 356 | static bool radeon_fence_is_signaled(struct dma_fence *f) |
355 | { | 357 | { |
356 | struct radeon_fence *fence = to_radeon_fence(f); | 358 | struct radeon_fence *fence = to_radeon_fence(f); |
357 | struct radeon_device *rdev = fence->rdev; | 359 | struct radeon_device *rdev = fence->rdev; |
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f) | |||
381 | * to fence_queue that checks if this fence is signaled, and if so it | 383 | * to fence_queue that checks if this fence is signaled, and if so it |
382 | * signals the fence and removes itself. | 384 | * signals the fence and removes itself. |
383 | */ | 385 | */ |
384 | static bool radeon_fence_enable_signaling(struct fence *f) | 386 | static bool radeon_fence_enable_signaling(struct dma_fence *f) |
385 | { | 387 | { |
386 | struct radeon_fence *fence = to_radeon_fence(f); | 388 | struct radeon_fence *fence = to_radeon_fence(f); |
387 | struct radeon_device *rdev = fence->rdev; | 389 | struct radeon_device *rdev = fence->rdev; |
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f) | |||
414 | fence->fence_wake.private = NULL; | 416 | fence->fence_wake.private = NULL; |
415 | fence->fence_wake.func = radeon_fence_check_signaled; | 417 | fence->fence_wake.func = radeon_fence_check_signaled; |
416 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); | 418 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); |
417 | fence_get(f); | 419 | dma_fence_get(f); |
418 | 420 | ||
419 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); | 421 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); |
420 | return true; | 422 | return true; |
421 | } | 423 | } |
422 | 424 | ||
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence) | |||
436 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { | 438 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
437 | int ret; | 439 | int ret; |
438 | 440 | ||
439 | ret = fence_signal(&fence->base); | 441 | ret = dma_fence_signal(&fence->base); |
440 | if (!ret) | 442 | if (!ret) |
441 | FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); | 443 | DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); |
442 | return true; | 444 | return true; |
443 | } | 445 | } |
444 | return false; | 446 | return false; |
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo | |||
552 | * exclusive_lock is not held in that case. | 554 | * exclusive_lock is not held in that case. |
553 | */ | 555 | */ |
554 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) | 556 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) |
555 | return fence_wait(&fence->base, intr); | 557 | return dma_fence_wait(&fence->base, intr); |
556 | 558 | ||
557 | seq[fence->ring] = fence->seq; | 559 | seq[fence->ring] = fence->seq; |
558 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); | 560 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); |
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo | |||
560 | return r; | 562 | return r; |
561 | } | 563 | } |
562 | 564 | ||
563 | r_sig = fence_signal(&fence->base); | 565 | r_sig = dma_fence_signal(&fence->base); |
564 | if (!r_sig) | 566 | if (!r_sig) |
565 | FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); | 567 | DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
566 | return r; | 568 | return r; |
567 | } | 569 | } |
568 | 570 | ||
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) | |||
697 | */ | 699 | */ |
698 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) | 700 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
699 | { | 701 | { |
700 | fence_get(&fence->base); | 702 | dma_fence_get(&fence->base); |
701 | return fence; | 703 | return fence; |
702 | } | 704 | } |
703 | 705 | ||
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence) | |||
714 | 716 | ||
715 | *fence = NULL; | 717 | *fence = NULL; |
716 | if (tmp) { | 718 | if (tmp) { |
717 | fence_put(&tmp->base); | 719 | dma_fence_put(&tmp->base); |
718 | } | 720 | } |
719 | } | 721 | } |
720 | 722 | ||
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev) | |||
1028 | #endif | 1030 | #endif |
1029 | } | 1031 | } |
1030 | 1032 | ||
1031 | static const char *radeon_fence_get_driver_name(struct fence *fence) | 1033 | static const char *radeon_fence_get_driver_name(struct dma_fence *fence) |
1032 | { | 1034 | { |
1033 | return "radeon"; | 1035 | return "radeon"; |
1034 | } | 1036 | } |
1035 | 1037 | ||
1036 | static const char *radeon_fence_get_timeline_name(struct fence *f) | 1038 | static const char *radeon_fence_get_timeline_name(struct dma_fence *f) |
1037 | { | 1039 | { |
1038 | struct radeon_fence *fence = to_radeon_fence(f); | 1040 | struct radeon_fence *fence = to_radeon_fence(f); |
1039 | switch (fence->ring) { | 1041 | switch (fence->ring) { |
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f) | |||
1051 | 1053 | ||
1052 | static inline bool radeon_test_signaled(struct radeon_fence *fence) | 1054 | static inline bool radeon_test_signaled(struct radeon_fence *fence) |
1053 | { | 1055 | { |
1054 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1056 | return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | struct radeon_wait_cb { | 1059 | struct radeon_wait_cb { |
1058 | struct fence_cb base; | 1060 | struct dma_fence_cb base; |
1059 | struct task_struct *task; | 1061 | struct task_struct *task; |
1060 | }; | 1062 | }; |
1061 | 1063 | ||
1062 | static void | 1064 | static void |
1063 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | 1065 | radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
1064 | { | 1066 | { |
1065 | struct radeon_wait_cb *wait = | 1067 | struct radeon_wait_cb *wait = |
1066 | container_of(cb, struct radeon_wait_cb, base); | 1068 | container_of(cb, struct radeon_wait_cb, base); |
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
1068 | wake_up_process(wait->task); | 1070 | wake_up_process(wait->task); |
1069 | } | 1071 | } |
1070 | 1072 | ||
1071 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1073 | static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, |
1072 | signed long t) | 1074 | signed long t) |
1073 | { | 1075 | { |
1074 | struct radeon_fence *fence = to_radeon_fence(f); | 1076 | struct radeon_fence *fence = to_radeon_fence(f); |
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, | |||
1077 | 1079 | ||
1078 | cb.task = current; | 1080 | cb.task = current; |
1079 | 1081 | ||
1080 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) | 1082 | if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1081 | return t; | 1083 | return t; |
1082 | 1084 | ||
1083 | while (t > 0) { | 1085 | while (t > 0) { |
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, | |||
1105 | } | 1107 | } |
1106 | 1108 | ||
1107 | __set_current_state(TASK_RUNNING); | 1109 | __set_current_state(TASK_RUNNING); |
1108 | fence_remove_callback(f, &cb.base); | 1110 | dma_fence_remove_callback(f, &cb.base); |
1109 | 1111 | ||
1110 | return t; | 1112 | return t; |
1111 | } | 1113 | } |
1112 | 1114 | ||
1113 | const struct fence_ops radeon_fence_ops = { | 1115 | const struct dma_fence_ops radeon_fence_ops = { |
1114 | .get_driver_name = radeon_fence_get_driver_name, | 1116 | .get_driver_name = radeon_fence_get_driver_name, |
1115 | .get_timeline_name = radeon_fence_get_timeline_name, | 1117 | .get_timeline_name = radeon_fence_get_timeline_name, |
1116 | .enable_signaling = radeon_fence_enable_signaling, | 1118 | .enable_signaling = radeon_fence_enable_signaling, |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 868c3ba2efaa..222a1fa41d7c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include <linux/backlight.h> | 32 | #include <linux/backlight.h> |
32 | #ifdef CONFIG_PMAC_BACKLIGHT | 33 | #ifdef CONFIG_PMAC_BACKLIGHT |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4b6542538ff9..326ad068c15a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); | |||
47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | 47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); |
48 | static void radeon_pm_update_profile(struct radeon_device *rdev); | 48 | static void radeon_pm_update_profile(struct radeon_device *rdev); |
49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
50 | static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); | ||
50 | 51 | ||
51 | int radeon_pm_get_type_index(struct radeon_device *rdev, | 52 | int radeon_pm_get_type_index(struct radeon_device *rdev, |
52 | enum radeon_pm_state_type ps_type, | 53 | enum radeon_pm_state_type ps_type, |
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) | |||
79 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); | 80 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); |
80 | } | 81 | } |
81 | mutex_unlock(&rdev->pm.mutex); | 82 | mutex_unlock(&rdev->pm.mutex); |
83 | /* allow new DPM state to be picked */ | ||
84 | radeon_pm_compute_clocks_dpm(rdev); | ||
82 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 85 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
83 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | 86 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
84 | mutex_lock(&rdev->pm.mutex); | 87 | mutex_lock(&rdev->pm.mutex); |
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
882 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | 885 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; |
883 | /* balanced states don't exist at the moment */ | 886 | /* balanced states don't exist at the moment */ |
884 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | 887 | if (dpm_state == POWER_STATE_TYPE_BALANCED) |
885 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | 888 | dpm_state = rdev->pm.dpm.ac_power ? |
889 | POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; | ||
886 | 890 | ||
887 | restart_search: | 891 | restart_search: |
888 | /* Pick the best power state based on current conditions */ | 892 | /* Pick the best power state based on current conditions */ |
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 02ac8a1de4ff..be5d7a38d3aa 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c | |||
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
92 | bool shared) | 92 | bool shared) |
93 | { | 93 | { |
94 | struct reservation_object_list *flist; | 94 | struct reservation_object_list *flist; |
95 | struct fence *f; | 95 | struct dma_fence *f; |
96 | struct radeon_fence *fence; | 96 | struct radeon_fence *fence; |
97 | unsigned i; | 97 | unsigned i; |
98 | int r = 0; | 98 | int r = 0; |
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
103 | if (fence && fence->rdev == rdev) | 103 | if (fence && fence->rdev == rdev) |
104 | radeon_sync_fence(sync, fence); | 104 | radeon_sync_fence(sync, fence); |
105 | else if (f) | 105 | else if (f) |
106 | r = fence_wait(f, true); | 106 | r = dma_fence_wait(f, true); |
107 | 107 | ||
108 | flist = reservation_object_get_list(resv); | 108 | flist = reservation_object_get_list(resv); |
109 | if (shared || !flist || r) | 109 | if (shared || !flist || r) |
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev, | |||
116 | if (fence && fence->rdev == rdev) | 116 | if (fence && fence->rdev == rdev) |
117 | radeon_sync_fence(sync, fence); | 117 | radeon_sync_fence(sync, fence); |
118 | else | 118 | else |
119 | r = fence_wait(f, true); | 119 | r = dma_fence_wait(f, true); |
120 | 120 | ||
121 | if (r) | 121 | if (r) |
122 | break; | 122 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3de5e6e21662..0cf03ccbf0a7 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
863 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | 863 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, |
864 | .invalidate_caches = &radeon_invalidate_caches, | 864 | .invalidate_caches = &radeon_invalidate_caches, |
865 | .init_mem_type = &radeon_init_mem_type, | 865 | .init_mem_type = &radeon_init_mem_type, |
866 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
866 | .evict_flags = &radeon_evict_flags, | 867 | .evict_flags = &radeon_evict_flags, |
867 | .move = &radeon_bo_move, | 868 | .move = &radeon_bo_move, |
868 | .verify_access = &radeon_verify_access, | 869 | .verify_access = &radeon_verify_access, |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 0cd0e7bdee55..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
467 | { | 467 | { |
468 | int32_t *msg, msg_type, handle; | 468 | int32_t *msg, msg_type, handle; |
469 | unsigned img_size = 0; | 469 | unsigned img_size = 0; |
470 | struct fence *f; | 470 | struct dma_fence *f; |
471 | void *ptr; | 471 | void *ptr; |
472 | 472 | ||
473 | int i, r; | 473 | int i, r; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e402be8821c4..143280dc0851 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev) | |||
7858 | } | 7858 | } |
7859 | } | 7859 | } |
7860 | 7860 | ||
7861 | int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) | 7861 | static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) |
7862 | { | 7862 | { |
7863 | unsigned i; | 7863 | unsigned i; |
7864 | 7864 | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8c8cbe837e61..6fe161192bb4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <drm/drm_crtc_helper.h> | 20 | #include <drm/drm_crtc_helper.h> |
21 | #include <drm/drm_fb_helper.h> | 21 | #include <drm/drm_fb_helper.h> |
22 | #include <drm/drm_gem_cma_helper.h> | 22 | #include <drm/drm_gem_cma_helper.h> |
23 | #include <drm/drm_of.h> | ||
23 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
24 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev, | |||
388 | continue; | 389 | continue; |
389 | } | 390 | } |
390 | 391 | ||
391 | component_match_add(dev, match, compare_of, remote); | 392 | drm_of_component_match_add(dev, match, compare_of, remote); |
392 | of_node_put(remote); | 393 | of_node_put(remote); |
393 | } | 394 | } |
394 | } | 395 | } |
@@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) | |||
437 | } | 438 | } |
438 | 439 | ||
439 | of_node_put(iommu); | 440 | of_node_put(iommu); |
440 | component_match_add(dev, &match, compare_of, port->parent); | 441 | drm_of_component_match_add(dev, &match, compare_of, |
442 | port->parent); | ||
441 | of_node_put(port); | 443 | of_node_put(port); |
442 | } | 444 | } |
443 | 445 | ||
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 7087499969bc..6aead2013b62 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <drm/drm_crtc_helper.h> | 17 | #include <drm/drm_crtc_helper.h> |
18 | #include <drm/drm_gem_cma_helper.h> | 18 | #include <drm/drm_gem_cma_helper.h> |
19 | #include <drm/drm_fb_cma_helper.h> | 19 | #include <drm/drm_fb_cma_helper.h> |
20 | #include <drm/drm_of.h> | ||
20 | 21 | ||
21 | #include "sti_crtc.h" | 22 | #include "sti_crtc.h" |
22 | #include "sti_drv.h" | 23 | #include "sti_drv.h" |
@@ -424,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev) | |||
424 | child_np = of_get_next_available_child(node, NULL); | 425 | child_np = of_get_next_available_child(node, NULL); |
425 | 426 | ||
426 | while (child_np) { | 427 | while (child_np) { |
427 | component_match_add(dev, &match, compare_of, child_np); | 428 | drm_of_component_match_add(dev, &match, compare_of, |
428 | of_node_put(child_np); | 429 | child_np); |
429 | child_np = of_get_next_available_child(node, child_np); | 430 | child_np = of_get_next_available_child(node, child_np); |
430 | } | 431 | } |
431 | 432 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..b3c4ad605e81 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <drm/drm_fb_cma_helper.h> | 18 | #include <drm/drm_fb_cma_helper.h> |
19 | #include <drm/drm_gem_cma_helper.h> | 19 | #include <drm/drm_gem_cma_helper.h> |
20 | #include <drm/drm_fb_helper.h> | 20 | #include <drm/drm_fb_helper.h> |
21 | #include <drm/drm_of.h> | ||
21 | 22 | ||
22 | #include "sun4i_crtc.h" | 23 | #include "sun4i_crtc.h" |
23 | #include "sun4i_drv.h" | 24 | #include "sun4i_drv.h" |
@@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, | |||
239 | /* Add current component */ | 240 | /* Add current component */ |
240 | DRM_DEBUG_DRIVER("Adding component %s\n", | 241 | DRM_DEBUG_DRIVER("Adding component %s\n", |
241 | of_node_full_name(node)); | 242 | of_node_full_name(node)); |
242 | component_match_add(dev, match, compare_of, node); | 243 | drm_of_component_match_add(dev, match, compare_of, node); |
243 | count++; | 244 | count++; |
244 | } | 245 | } |
245 | 246 | ||
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021005..06a4c584f3cb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/component.h> | 11 | #include <linux/component.h> |
12 | #include <linux/of_graph.h> | 12 | #include <linux/of_graph.h> |
13 | #include <drm/drm_of.h> | ||
13 | 14 | ||
14 | #include "tilcdc_drv.h" | 15 | #include "tilcdc_drv.h" |
15 | #include "tilcdc_external.h" | 16 | #include "tilcdc_external.h" |
@@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev, | |||
160 | 161 | ||
161 | dev_dbg(dev, "Subdevice node '%s' found\n", node->name); | 162 | dev_dbg(dev, "Subdevice node '%s' found\n", node->name); |
162 | if (match) | 163 | if (match) |
163 | component_match_add(dev, match, dev_match_of, node); | 164 | drm_of_component_match_add(dev, match, dev_match_of, |
165 | node); | ||
164 | of_node_put(node); | 166 | of_node_put(node); |
165 | count++; | 167 | count++; |
166 | } | 168 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index fc6217dfe401..f6ff579e8918 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
148 | BUG_ON(!list_empty(&bo->ddestroy)); | 148 | BUG_ON(!list_empty(&bo->ddestroy)); |
149 | ttm_tt_destroy(bo->ttm); | 149 | ttm_tt_destroy(bo->ttm); |
150 | atomic_dec(&bo->glob->bo_count); | 150 | atomic_dec(&bo->glob->bo_count); |
151 | fence_put(bo->moving); | 151 | dma_fence_put(bo->moving); |
152 | if (bo->resv == &bo->ttm_resv) | 152 | if (bo->resv == &bo->ttm_resv) |
153 | reservation_object_fini(&bo->ttm_resv); | 153 | reservation_object_fini(&bo->ttm_resv); |
154 | mutex_destroy(&bo->wu_mutex); | 154 | mutex_destroy(&bo->wu_mutex); |
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
426 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) | 426 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
427 | { | 427 | { |
428 | struct reservation_object_list *fobj; | 428 | struct reservation_object_list *fobj; |
429 | struct fence *fence; | 429 | struct dma_fence *fence; |
430 | int i; | 430 | int i; |
431 | 431 | ||
432 | fobj = reservation_object_get_list(bo->resv); | 432 | fobj = reservation_object_get_list(bo->resv); |
433 | fence = reservation_object_get_excl(bo->resv); | 433 | fence = reservation_object_get_excl(bo->resv); |
434 | if (fence && !fence->ops->signaled) | 434 | if (fence && !fence->ops->signaled) |
435 | fence_enable_sw_signaling(fence); | 435 | dma_fence_enable_sw_signaling(fence); |
436 | 436 | ||
437 | for (i = 0; fobj && i < fobj->shared_count; ++i) { | 437 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
438 | fence = rcu_dereference_protected(fobj->shared[i], | 438 | fence = rcu_dereference_protected(fobj->shared[i], |
439 | reservation_object_held(bo->resv)); | 439 | reservation_object_held(bo->resv)); |
440 | 440 | ||
441 | if (!fence->ops->signaled) | 441 | if (!fence->ops->signaled) |
442 | fence_enable_sw_signaling(fence); | 442 | dma_fence_enable_sw_signaling(fence); |
443 | } | 443 | } |
444 | } | 444 | } |
445 | 445 | ||
@@ -717,6 +717,20 @@ out: | |||
717 | return ret; | 717 | return ret; |
718 | } | 718 | } |
719 | 719 | ||
720 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
721 | const struct ttm_place *place) | ||
722 | { | ||
723 | /* Don't evict this BO if it's outside of the | ||
724 | * requested placement range | ||
725 | */ | ||
726 | if (place->fpfn >= (bo->mem.start + bo->mem.size) || | ||
727 | (place->lpfn && place->lpfn <= bo->mem.start)) | ||
728 | return false; | ||
729 | |||
730 | return true; | ||
731 | } | ||
732 | EXPORT_SYMBOL(ttm_bo_eviction_valuable); | ||
733 | |||
720 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | 734 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
721 | uint32_t mem_type, | 735 | uint32_t mem_type, |
722 | const struct ttm_place *place, | 736 | const struct ttm_place *place, |
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | |||
731 | spin_lock(&glob->lru_lock); | 745 | spin_lock(&glob->lru_lock); |
732 | list_for_each_entry(bo, &man->lru, lru) { | 746 | list_for_each_entry(bo, &man->lru, lru) { |
733 | ret = __ttm_bo_reserve(bo, false, true, NULL); | 747 | ret = __ttm_bo_reserve(bo, false, true, NULL); |
734 | if (!ret) { | 748 | if (ret) |
735 | if (place && (place->fpfn || place->lpfn)) { | 749 | continue; |
736 | /* Don't evict this BO if it's outside of the | ||
737 | * requested placement range | ||
738 | */ | ||
739 | if (place->fpfn >= (bo->mem.start + bo->mem.size) || | ||
740 | (place->lpfn && place->lpfn <= bo->mem.start)) { | ||
741 | __ttm_bo_unreserve(bo); | ||
742 | ret = -EBUSY; | ||
743 | continue; | ||
744 | } | ||
745 | } | ||
746 | 750 | ||
747 | break; | 751 | if (place && !bdev->driver->eviction_valuable(bo, place)) { |
752 | __ttm_bo_unreserve(bo); | ||
753 | ret = -EBUSY; | ||
754 | continue; | ||
748 | } | 755 | } |
756 | |||
757 | break; | ||
749 | } | 758 | } |
750 | 759 | ||
751 | if (ret) { | 760 | if (ret) { |
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
792 | struct ttm_mem_type_manager *man, | 801 | struct ttm_mem_type_manager *man, |
793 | struct ttm_mem_reg *mem) | 802 | struct ttm_mem_reg *mem) |
794 | { | 803 | { |
795 | struct fence *fence; | 804 | struct dma_fence *fence; |
796 | int ret; | 805 | int ret; |
797 | 806 | ||
798 | spin_lock(&man->move_lock); | 807 | spin_lock(&man->move_lock); |
799 | fence = fence_get(man->move); | 808 | fence = dma_fence_get(man->move); |
800 | spin_unlock(&man->move_lock); | 809 | spin_unlock(&man->move_lock); |
801 | 810 | ||
802 | if (fence) { | 811 | if (fence) { |
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
806 | if (unlikely(ret)) | 815 | if (unlikely(ret)) |
807 | return ret; | 816 | return ret; |
808 | 817 | ||
809 | fence_put(bo->moving); | 818 | dma_fence_put(bo->moving); |
810 | bo->moving = fence; | 819 | bo->moving = fence; |
811 | } | 820 | } |
812 | 821 | ||
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1286 | { | 1295 | { |
1287 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 1296 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
1288 | struct ttm_bo_global *glob = bdev->glob; | 1297 | struct ttm_bo_global *glob = bdev->glob; |
1289 | struct fence *fence; | 1298 | struct dma_fence *fence; |
1290 | int ret; | 1299 | int ret; |
1291 | 1300 | ||
1292 | /* | 1301 | /* |
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1309 | spin_unlock(&glob->lru_lock); | 1318 | spin_unlock(&glob->lru_lock); |
1310 | 1319 | ||
1311 | spin_lock(&man->move_lock); | 1320 | spin_lock(&man->move_lock); |
1312 | fence = fence_get(man->move); | 1321 | fence = dma_fence_get(man->move); |
1313 | spin_unlock(&man->move_lock); | 1322 | spin_unlock(&man->move_lock); |
1314 | 1323 | ||
1315 | if (fence) { | 1324 | if (fence) { |
1316 | ret = fence_wait(fence, false); | 1325 | ret = dma_fence_wait(fence, false); |
1317 | fence_put(fence); | 1326 | dma_fence_put(fence); |
1318 | if (ret) { | 1327 | if (ret) { |
1319 | if (allow_errors) { | 1328 | if (allow_errors) { |
1320 | return ret; | 1329 | return ret; |
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1343 | mem_type); | 1352 | mem_type); |
1344 | return ret; | 1353 | return ret; |
1345 | } | 1354 | } |
1346 | fence_put(man->move); | 1355 | dma_fence_put(man->move); |
1347 | 1356 | ||
1348 | man->use_type = false; | 1357 | man->use_type = false; |
1349 | man->has_type = false; | 1358 | man->has_type = false; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bf6e21655c57..d0459b392e5e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
644 | EXPORT_SYMBOL(ttm_bo_kunmap); | 644 | EXPORT_SYMBOL(ttm_bo_kunmap); |
645 | 645 | ||
646 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 646 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
647 | struct fence *fence, | 647 | struct dma_fence *fence, |
648 | bool evict, | 648 | bool evict, |
649 | struct ttm_mem_reg *new_mem) | 649 | struct ttm_mem_reg *new_mem) |
650 | { | 650 | { |
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
674 | * operation has completed. | 674 | * operation has completed. |
675 | */ | 675 | */ |
676 | 676 | ||
677 | fence_put(bo->moving); | 677 | dma_fence_put(bo->moving); |
678 | bo->moving = fence_get(fence); | 678 | bo->moving = dma_fence_get(fence); |
679 | 679 | ||
680 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 680 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
681 | if (ret) | 681 | if (ret) |
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
706 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | 706 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
707 | 707 | ||
708 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | 708 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, |
709 | struct fence *fence, bool evict, | 709 | struct dma_fence *fence, bool evict, |
710 | struct ttm_mem_reg *new_mem) | 710 | struct ttm_mem_reg *new_mem) |
711 | { | 711 | { |
712 | struct ttm_bo_device *bdev = bo->bdev; | 712 | struct ttm_bo_device *bdev = bo->bdev; |
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
730 | * operation has completed. | 730 | * operation has completed. |
731 | */ | 731 | */ |
732 | 732 | ||
733 | fence_put(bo->moving); | 733 | dma_fence_put(bo->moving); |
734 | bo->moving = fence_get(fence); | 734 | bo->moving = dma_fence_get(fence); |
735 | 735 | ||
736 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 736 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
737 | if (ret) | 737 | if (ret) |
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |||
761 | */ | 761 | */ |
762 | 762 | ||
763 | spin_lock(&from->move_lock); | 763 | spin_lock(&from->move_lock); |
764 | if (!from->move || fence_is_later(fence, from->move)) { | 764 | if (!from->move || dma_fence_is_later(fence, from->move)) { |
765 | fence_put(from->move); | 765 | dma_fence_put(from->move); |
766 | from->move = fence_get(fence); | 766 | from->move = dma_fence_get(fence); |
767 | } | 767 | } |
768 | spin_unlock(&from->move_lock); | 768 | spin_unlock(&from->move_lock); |
769 | 769 | ||
770 | ttm_bo_free_old_node(bo); | 770 | ttm_bo_free_old_node(bo); |
771 | 771 | ||
772 | fence_put(bo->moving); | 772 | dma_fence_put(bo->moving); |
773 | bo->moving = fence_get(fence); | 773 | bo->moving = dma_fence_get(fence); |
774 | 774 | ||
775 | } else { | 775 | } else { |
776 | /** | 776 | /** |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a6ed9d5e5167..4748aedc933a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
54 | /* | 54 | /* |
55 | * Quick non-stalling check for idle. | 55 | * Quick non-stalling check for idle. |
56 | */ | 56 | */ |
57 | if (fence_is_signaled(bo->moving)) | 57 | if (dma_fence_is_signaled(bo->moving)) |
58 | goto out_clear; | 58 | goto out_clear; |
59 | 59 | ||
60 | /* | 60 | /* |
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
67 | goto out_unlock; | 67 | goto out_unlock; |
68 | 68 | ||
69 | up_read(&vma->vm_mm->mmap_sem); | 69 | up_read(&vma->vm_mm->mmap_sem); |
70 | (void) fence_wait(bo->moving, true); | 70 | (void) dma_fence_wait(bo->moving, true); |
71 | goto out_unlock; | 71 | goto out_unlock; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Ordinary wait. | 75 | * Ordinary wait. |
76 | */ | 76 | */ |
77 | ret = fence_wait(bo->moving, true); | 77 | ret = dma_fence_wait(bo->moving, true); |
78 | if (unlikely(ret != 0)) { | 78 | if (unlikely(ret != 0)) { |
79 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | 79 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : |
80 | VM_FAULT_NOPAGE; | 80 | VM_FAULT_NOPAGE; |
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
82 | } | 82 | } |
83 | 83 | ||
84 | out_clear: | 84 | out_clear: |
85 | fence_put(bo->moving); | 85 | dma_fence_put(bo->moving); |
86 | bo->moving = NULL; | 86 | bo->moving = NULL; |
87 | 87 | ||
88 | out_unlock: | 88 | out_unlock: |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index a80717b35dc6..d35bc491e8de 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
179 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 179 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
180 | 180 | ||
181 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 181 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
182 | struct list_head *list, struct fence *fence) | 182 | struct list_head *list, |
183 | struct dma_fence *fence) | ||
183 | { | 184 | { |
184 | struct ttm_validate_buffer *entry; | 185 | struct ttm_validate_buffer *entry; |
185 | struct ttm_buffer_object *bo; | 186 | struct ttm_buffer_object *bo; |
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 5c57c1ffa1f9..488909a21ed8 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c | |||
@@ -28,56 +28,57 @@ | |||
28 | #define VGEM_FENCE_TIMEOUT (10*HZ) | 28 | #define VGEM_FENCE_TIMEOUT (10*HZ) |
29 | 29 | ||
30 | struct vgem_fence { | 30 | struct vgem_fence { |
31 | struct fence base; | 31 | struct dma_fence base; |
32 | struct spinlock lock; | 32 | struct spinlock lock; |
33 | struct timer_list timer; | 33 | struct timer_list timer; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static const char *vgem_fence_get_driver_name(struct fence *fence) | 36 | static const char *vgem_fence_get_driver_name(struct dma_fence *fence) |
37 | { | 37 | { |
38 | return "vgem"; | 38 | return "vgem"; |
39 | } | 39 | } |
40 | 40 | ||
41 | static const char *vgem_fence_get_timeline_name(struct fence *fence) | 41 | static const char *vgem_fence_get_timeline_name(struct dma_fence *fence) |
42 | { | 42 | { |
43 | return "unbound"; | 43 | return "unbound"; |
44 | } | 44 | } |
45 | 45 | ||
46 | static bool vgem_fence_signaled(struct fence *fence) | 46 | static bool vgem_fence_signaled(struct dma_fence *fence) |
47 | { | 47 | { |
48 | return false; | 48 | return false; |
49 | } | 49 | } |
50 | 50 | ||
51 | static bool vgem_fence_enable_signaling(struct fence *fence) | 51 | static bool vgem_fence_enable_signaling(struct dma_fence *fence) |
52 | { | 52 | { |
53 | return true; | 53 | return true; |
54 | } | 54 | } |
55 | 55 | ||
56 | static void vgem_fence_release(struct fence *base) | 56 | static void vgem_fence_release(struct dma_fence *base) |
57 | { | 57 | { |
58 | struct vgem_fence *fence = container_of(base, typeof(*fence), base); | 58 | struct vgem_fence *fence = container_of(base, typeof(*fence), base); |
59 | 59 | ||
60 | del_timer_sync(&fence->timer); | 60 | del_timer_sync(&fence->timer); |
61 | fence_free(&fence->base); | 61 | dma_fence_free(&fence->base); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void vgem_fence_value_str(struct fence *fence, char *str, int size) | 64 | static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) |
65 | { | 65 | { |
66 | snprintf(str, size, "%u", fence->seqno); | 66 | snprintf(str, size, "%u", fence->seqno); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void vgem_fence_timeline_value_str(struct fence *fence, char *str, | 69 | static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, |
70 | int size) | 70 | int size) |
71 | { | 71 | { |
72 | snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); | 72 | snprintf(str, size, "%u", |
73 | dma_fence_is_signaled(fence) ? fence->seqno : 0); | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static const struct fence_ops vgem_fence_ops = { | 76 | static const struct dma_fence_ops vgem_fence_ops = { |
76 | .get_driver_name = vgem_fence_get_driver_name, | 77 | .get_driver_name = vgem_fence_get_driver_name, |
77 | .get_timeline_name = vgem_fence_get_timeline_name, | 78 | .get_timeline_name = vgem_fence_get_timeline_name, |
78 | .enable_signaling = vgem_fence_enable_signaling, | 79 | .enable_signaling = vgem_fence_enable_signaling, |
79 | .signaled = vgem_fence_signaled, | 80 | .signaled = vgem_fence_signaled, |
80 | .wait = fence_default_wait, | 81 | .wait = dma_fence_default_wait, |
81 | .release = vgem_fence_release, | 82 | .release = vgem_fence_release, |
82 | 83 | ||
83 | .fence_value_str = vgem_fence_value_str, | 84 | .fence_value_str = vgem_fence_value_str, |
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data) | |||
88 | { | 89 | { |
89 | struct vgem_fence *fence = (struct vgem_fence *)data; | 90 | struct vgem_fence *fence = (struct vgem_fence *)data; |
90 | 91 | ||
91 | fence_signal(&fence->base); | 92 | dma_fence_signal(&fence->base); |
92 | } | 93 | } |
93 | 94 | ||
94 | static struct fence *vgem_fence_create(struct vgem_file *vfile, | 95 | static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, |
95 | unsigned int flags) | 96 | unsigned int flags) |
96 | { | 97 | { |
97 | struct vgem_fence *fence; | 98 | struct vgem_fence *fence; |
98 | 99 | ||
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile, | |||
101 | return NULL; | 102 | return NULL; |
102 | 103 | ||
103 | spin_lock_init(&fence->lock); | 104 | spin_lock_init(&fence->lock); |
104 | fence_init(&fence->base, &vgem_fence_ops, &fence->lock, | 105 | dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, |
105 | fence_context_alloc(1), 1); | 106 | dma_fence_context_alloc(1), 1); |
106 | 107 | ||
107 | setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); | 108 | setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); |
108 | 109 | ||
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
157 | struct vgem_file *vfile = file->driver_priv; | 158 | struct vgem_file *vfile = file->driver_priv; |
158 | struct reservation_object *resv; | 159 | struct reservation_object *resv; |
159 | struct drm_gem_object *obj; | 160 | struct drm_gem_object *obj; |
160 | struct fence *fence; | 161 | struct dma_fence *fence; |
161 | int ret; | 162 | int ret; |
162 | 163 | ||
163 | if (arg->flags & ~VGEM_FENCE_WRITE) | 164 | if (arg->flags & ~VGEM_FENCE_WRITE) |
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, | |||
209 | } | 210 | } |
210 | err_fence: | 211 | err_fence: |
211 | if (ret) { | 212 | if (ret) { |
212 | fence_signal(fence); | 213 | dma_fence_signal(fence); |
213 | fence_put(fence); | 214 | dma_fence_put(fence); |
214 | } | 215 | } |
215 | err: | 216 | err: |
216 | drm_gem_object_unreference_unlocked(obj); | 217 | drm_gem_object_unreference_unlocked(obj); |
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, | |||
239 | { | 240 | { |
240 | struct vgem_file *vfile = file->driver_priv; | 241 | struct vgem_file *vfile = file->driver_priv; |
241 | struct drm_vgem_fence_signal *arg = data; | 242 | struct drm_vgem_fence_signal *arg = data; |
242 | struct fence *fence; | 243 | struct dma_fence *fence; |
243 | int ret = 0; | 244 | int ret = 0; |
244 | 245 | ||
245 | if (arg->flags) | 246 | if (arg->flags) |
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, | |||
253 | if (IS_ERR(fence)) | 254 | if (IS_ERR(fence)) |
254 | return PTR_ERR(fence); | 255 | return PTR_ERR(fence); |
255 | 256 | ||
256 | if (fence_is_signaled(fence)) | 257 | if (dma_fence_is_signaled(fence)) |
257 | ret = -ETIMEDOUT; | 258 | ret = -ETIMEDOUT; |
258 | 259 | ||
259 | fence_signal(fence); | 260 | dma_fence_signal(fence); |
260 | fence_put(fence); | 261 | dma_fence_put(fence); |
261 | return ret; | 262 | return ret; |
262 | } | 263 | } |
263 | 264 | ||
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile) | |||
271 | 272 | ||
272 | static int __vgem_fence_idr_fini(int id, void *p, void *data) | 273 | static int __vgem_fence_idr_fini(int id, void *p, void *data) |
273 | { | 274 | { |
274 | fence_signal(p); | 275 | dma_fence_signal(p); |
275 | fence_put(p); | 276 | dma_fence_put(p); |
276 | return 0; | 277 | return 0; |
277 | } | 278 | } |
278 | 279 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ae59080d63d1..ec1ebdcfe80b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver { | |||
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct virtio_gpu_fence { | 84 | struct virtio_gpu_fence { |
85 | struct fence f; | 85 | struct dma_fence f; |
86 | struct virtio_gpu_fence_driver *drv; | 86 | struct virtio_gpu_fence_driver *drv; |
87 | struct list_head node; | 87 | struct list_head node; |
88 | uint64_t seq; | 88 | uint64_t seq; |
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f3f70fa8a4c7..23353521f903 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c | |||
@@ -26,22 +26,22 @@ | |||
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | #include "virtgpu_drv.h" | 27 | #include "virtgpu_drv.h" |
28 | 28 | ||
29 | static const char *virtio_get_driver_name(struct fence *f) | 29 | static const char *virtio_get_driver_name(struct dma_fence *f) |
30 | { | 30 | { |
31 | return "virtio_gpu"; | 31 | return "virtio_gpu"; |
32 | } | 32 | } |
33 | 33 | ||
34 | static const char *virtio_get_timeline_name(struct fence *f) | 34 | static const char *virtio_get_timeline_name(struct dma_fence *f) |
35 | { | 35 | { |
36 | return "controlq"; | 36 | return "controlq"; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool virtio_enable_signaling(struct fence *f) | 39 | static bool virtio_enable_signaling(struct dma_fence *f) |
40 | { | 40 | { |
41 | return true; | 41 | return true; |
42 | } | 42 | } |
43 | 43 | ||
44 | static bool virtio_signaled(struct fence *f) | 44 | static bool virtio_signaled(struct dma_fence *f) |
45 | { | 45 | { |
46 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 46 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
47 | 47 | ||
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f) | |||
50 | return false; | 50 | return false; |
51 | } | 51 | } |
52 | 52 | ||
53 | static void virtio_fence_value_str(struct fence *f, char *str, int size) | 53 | static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) |
54 | { | 54 | { |
55 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 55 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
56 | 56 | ||
57 | snprintf(str, size, "%llu", fence->seq); | 57 | snprintf(str, size, "%llu", fence->seq); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void virtio_timeline_value_str(struct fence *f, char *str, int size) | 60 | static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) |
61 | { | 61 | { |
62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
63 | 63 | ||
64 | snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); | 64 | snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); |
65 | } | 65 | } |
66 | 66 | ||
67 | static const struct fence_ops virtio_fence_ops = { | 67 | static const struct dma_fence_ops virtio_fence_ops = { |
68 | .get_driver_name = virtio_get_driver_name, | 68 | .get_driver_name = virtio_get_driver_name, |
69 | .get_timeline_name = virtio_get_timeline_name, | 69 | .get_timeline_name = virtio_get_timeline_name, |
70 | .enable_signaling = virtio_enable_signaling, | 70 | .enable_signaling = virtio_enable_signaling, |
71 | .signaled = virtio_signaled, | 71 | .signaled = virtio_signaled, |
72 | .wait = fence_default_wait, | 72 | .wait = dma_fence_default_wait, |
73 | .fence_value_str = virtio_fence_value_str, | 73 | .fence_value_str = virtio_fence_value_str, |
74 | .timeline_value_str = virtio_timeline_value_str, | 74 | .timeline_value_str = virtio_timeline_value_str, |
75 | }; | 75 | }; |
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, | |||
88 | spin_lock_irqsave(&drv->lock, irq_flags); | 88 | spin_lock_irqsave(&drv->lock, irq_flags); |
89 | (*fence)->drv = drv; | 89 | (*fence)->drv = drv; |
90 | (*fence)->seq = ++drv->sync_seq; | 90 | (*fence)->seq = ++drv->sync_seq; |
91 | fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, | 91 | dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, |
92 | drv->context, (*fence)->seq); | 92 | drv->context, (*fence)->seq); |
93 | fence_get(&(*fence)->f); | 93 | dma_fence_get(&(*fence)->f); |
94 | list_add_tail(&(*fence)->node, &drv->fences); | 94 | list_add_tail(&(*fence)->node, &drv->fences); |
95 | spin_unlock_irqrestore(&drv->lock, irq_flags); | 95 | spin_unlock_irqrestore(&drv->lock, irq_flags); |
96 | 96 | ||
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, | |||
111 | list_for_each_entry_safe(fence, tmp, &drv->fences, node) { | 111 | list_for_each_entry_safe(fence, tmp, &drv->fences, node) { |
112 | if (last_seq < fence->seq) | 112 | if (last_seq < fence->seq) |
113 | continue; | 113 | continue; |
114 | fence_signal_locked(&fence->f); | 114 | dma_fence_signal_locked(&fence->f); |
115 | list_del(&fence->node); | 115 | list_del(&fence->node); |
116 | fence_put(&fence->f); | 116 | dma_fence_put(&fence->f); |
117 | } | 117 | } |
118 | spin_unlock_irqrestore(&drv->lock, irq_flags); | 118 | spin_unlock_irqrestore(&drv->lock, irq_flags); |
119 | } | 119 | } |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 818478b4c4f0..61f3a963af95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, | |||
172 | /* fence the command bo */ | 172 | /* fence the command bo */ |
173 | virtio_gpu_unref_list(&validate_list); | 173 | virtio_gpu_unref_list(&validate_list); |
174 | drm_free_large(buflist); | 174 | drm_free_large(buflist); |
175 | fence_put(&fence->f); | 175 | dma_fence_put(&fence->f); |
176 | return 0; | 176 | return 0; |
177 | 177 | ||
178 | out_unresv: | 178 | out_unresv: |
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, | |||
298 | drm_gem_object_release(obj); | 298 | drm_gem_object_release(obj); |
299 | if (vgdev->has_virgl_3d) { | 299 | if (vgdev->has_virgl_3d) { |
300 | virtio_gpu_unref_list(&validate_list); | 300 | virtio_gpu_unref_list(&validate_list); |
301 | fence_put(&fence->f); | 301 | dma_fence_put(&fence->f); |
302 | } | 302 | } |
303 | return ret; | 303 | return ret; |
304 | } | 304 | } |
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, | |||
309 | 309 | ||
310 | if (vgdev->has_virgl_3d) { | 310 | if (vgdev->has_virgl_3d) { |
311 | virtio_gpu_unref_list(&validate_list); | 311 | virtio_gpu_unref_list(&validate_list); |
312 | fence_put(&fence->f); | 312 | dma_fence_put(&fence->f); |
313 | } | 313 | } |
314 | return 0; | 314 | return 0; |
315 | fail_unref: | 315 | fail_unref: |
316 | if (vgdev->has_virgl_3d) { | 316 | if (vgdev->has_virgl_3d) { |
317 | virtio_gpu_unref_list(&validate_list); | 317 | virtio_gpu_unref_list(&validate_list); |
318 | fence_put(&fence->f); | 318 | dma_fence_put(&fence->f); |
319 | } | 319 | } |
320 | //fail_obj: | 320 | //fail_obj: |
321 | // drm_gem_object_handle_unreference_unlocked(obj); | 321 | // drm_gem_object_handle_unreference_unlocked(obj); |
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, | |||
383 | reservation_object_add_excl_fence(qobj->tbo.resv, | 383 | reservation_object_add_excl_fence(qobj->tbo.resv, |
384 | &fence->f); | 384 | &fence->f); |
385 | 385 | ||
386 | fence_put(&fence->f); | 386 | dma_fence_put(&fence->f); |
387 | out_unres: | 387 | out_unres: |
388 | virtio_gpu_object_unreserve(qobj); | 388 | virtio_gpu_object_unreserve(qobj); |
389 | out: | 389 | out: |
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, | |||
431 | args->level, &box, &fence); | 431 | args->level, &box, &fence); |
432 | reservation_object_add_excl_fence(qobj->tbo.resv, | 432 | reservation_object_add_excl_fence(qobj->tbo.resv, |
433 | &fence->f); | 433 | &fence->f); |
434 | fence_put(&fence->f); | 434 | dma_fence_put(&fence->f); |
435 | } | 435 | } |
436 | 436 | ||
437 | out_unres: | 437 | out_unres: |
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 036b0fbae0fb..1235519853f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c | |||
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) | |||
159 | virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); | 159 | virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); |
160 | virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); | 160 | virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); |
161 | 161 | ||
162 | vgdev->fence_drv.context = fence_context_alloc(1); | 162 | vgdev->fence_drv.context = dma_fence_context_alloc(1); |
163 | spin_lock_init(&vgdev->fence_drv.lock); | 163 | spin_lock_init(&vgdev->fence_drv.lock); |
164 | INIT_LIST_HEAD(&vgdev->fence_drv.fences); | 164 | INIT_LIST_HEAD(&vgdev->fence_drv.fences); |
165 | INIT_LIST_HEAD(&vgdev->cap_cache); | 165 | INIT_LIST_HEAD(&vgdev->cap_cache); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index ba28c0f6f28a..cb75f0663ba0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c | |||
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, | |||
152 | if (!ret) { | 152 | if (!ret) { |
153 | reservation_object_add_excl_fence(bo->tbo.resv, | 153 | reservation_object_add_excl_fence(bo->tbo.resv, |
154 | &fence->f); | 154 | &fence->f); |
155 | fence_put(&fence->f); | 155 | dma_fence_put(&fence->f); |
156 | fence = NULL; | 156 | fence = NULL; |
157 | virtio_gpu_object_unreserve(bo); | 157 | virtio_gpu_object_unreserve(bo); |
158 | virtio_gpu_object_wait(bo, false); | 158 | virtio_gpu_object_wait(bo, false); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 80482ac5f95d..4a1de9f81193 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c | |||
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = { | |||
425 | .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate, | 425 | .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate, |
426 | .invalidate_caches = &virtio_gpu_invalidate_caches, | 426 | .invalidate_caches = &virtio_gpu_invalidate_caches, |
427 | .init_mem_type = &virtio_gpu_init_mem_type, | 427 | .init_mem_type = &virtio_gpu_init_mem_type, |
428 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
428 | .evict_flags = &virtio_gpu_evict_flags, | 429 | .evict_flags = &virtio_gpu_evict_flags, |
429 | .move = &virtio_gpu_bo_move, | 430 | .move = &virtio_gpu_bo_move, |
430 | .verify_access = &virtio_gpu_verify_access, | 431 | .verify_access = &virtio_gpu_verify_access, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 78b75ee3c931..c894a48a74a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
849 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | 849 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
850 | .invalidate_caches = vmw_invalidate_caches, | 850 | .invalidate_caches = vmw_invalidate_caches, |
851 | .init_mem_type = vmw_init_mem_type, | 851 | .init_mem_type = vmw_init_mem_type, |
852 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
852 | .evict_flags = vmw_evict_flags, | 853 | .evict_flags = vmw_evict_flags, |
853 | .move = NULL, | 854 | .move = NULL, |
854 | .verify_access = vmw_verify_access, | 855 | .verify_access = vmw_verify_access, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 26ac8e80a478..6541dd8b82dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence) | |||
108 | * objects with actions attached to them. | 108 | * objects with actions attached to them. |
109 | */ | 109 | */ |
110 | 110 | ||
111 | static void vmw_fence_obj_destroy(struct fence *f) | 111 | static void vmw_fence_obj_destroy(struct dma_fence *f) |
112 | { | 112 | { |
113 | struct vmw_fence_obj *fence = | 113 | struct vmw_fence_obj *fence = |
114 | container_of(f, struct vmw_fence_obj, base); | 114 | container_of(f, struct vmw_fence_obj, base); |
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f) | |||
123 | fence->destroy(fence); | 123 | fence->destroy(fence); |
124 | } | 124 | } |
125 | 125 | ||
126 | static const char *vmw_fence_get_driver_name(struct fence *f) | 126 | static const char *vmw_fence_get_driver_name(struct dma_fence *f) |
127 | { | 127 | { |
128 | return "vmwgfx"; | 128 | return "vmwgfx"; |
129 | } | 129 | } |
130 | 130 | ||
131 | static const char *vmw_fence_get_timeline_name(struct fence *f) | 131 | static const char *vmw_fence_get_timeline_name(struct dma_fence *f) |
132 | { | 132 | { |
133 | return "svga"; | 133 | return "svga"; |
134 | } | 134 | } |
135 | 135 | ||
136 | static bool vmw_fence_enable_signaling(struct fence *f) | 136 | static bool vmw_fence_enable_signaling(struct dma_fence *f) |
137 | { | 137 | { |
138 | struct vmw_fence_obj *fence = | 138 | struct vmw_fence_obj *fence = |
139 | container_of(f, struct vmw_fence_obj, base); | 139 | container_of(f, struct vmw_fence_obj, base); |
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | struct vmwgfx_wait_cb { | 154 | struct vmwgfx_wait_cb { |
155 | struct fence_cb base; | 155 | struct dma_fence_cb base; |
156 | struct task_struct *task; | 156 | struct task_struct *task; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static void | 159 | static void |
160 | vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) | 160 | vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
161 | { | 161 | { |
162 | struct vmwgfx_wait_cb *wait = | 162 | struct vmwgfx_wait_cb *wait = |
163 | container_of(cb, struct vmwgfx_wait_cb, base); | 163 | container_of(cb, struct vmwgfx_wait_cb, base); |
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
167 | 167 | ||
168 | static void __vmw_fences_update(struct vmw_fence_manager *fman); | 168 | static void __vmw_fences_update(struct vmw_fence_manager *fman); |
169 | 169 | ||
170 | static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) | 170 | static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) |
171 | { | 171 | { |
172 | struct vmw_fence_obj *fence = | 172 | struct vmw_fence_obj *fence = |
173 | container_of(f, struct vmw_fence_obj, base); | 173 | container_of(f, struct vmw_fence_obj, base); |
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) | |||
197 | 197 | ||
198 | while (ret > 0) { | 198 | while (ret > 0) { |
199 | __vmw_fences_update(fman); | 199 | __vmw_fences_update(fman); |
200 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) | 200 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) |
201 | break; | 201 | break; |
202 | 202 | ||
203 | if (intr) | 203 | if (intr) |
@@ -225,7 +225,7 @@ out: | |||
225 | return ret; | 225 | return ret; |
226 | } | 226 | } |
227 | 227 | ||
228 | static struct fence_ops vmw_fence_ops = { | 228 | static struct dma_fence_ops vmw_fence_ops = { |
229 | .get_driver_name = vmw_fence_get_driver_name, | 229 | .get_driver_name = vmw_fence_get_driver_name, |
230 | .get_timeline_name = vmw_fence_get_timeline_name, | 230 | .get_timeline_name = vmw_fence_get_timeline_name, |
231 | .enable_signaling = vmw_fence_enable_signaling, | 231 | .enable_signaling = vmw_fence_enable_signaling, |
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
298 | fman->event_fence_action_size = | 298 | fman->event_fence_action_size = |
299 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); | 299 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); |
300 | mutex_init(&fman->goal_irq_mutex); | 300 | mutex_init(&fman->goal_irq_mutex); |
301 | fman->ctx = fence_context_alloc(1); | 301 | fman->ctx = dma_fence_context_alloc(1); |
302 | 302 | ||
303 | return fman; | 303 | return fman; |
304 | } | 304 | } |
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |||
326 | unsigned long irq_flags; | 326 | unsigned long irq_flags; |
327 | int ret = 0; | 327 | int ret = 0; |
328 | 328 | ||
329 | fence_init(&fence->base, &vmw_fence_ops, &fman->lock, | 329 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, |
330 | fman->ctx, seqno); | 330 | fman->ctx, seqno); |
331 | INIT_LIST_HEAD(&fence->seq_passed_actions); | 331 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
332 | fence->destroy = destroy; | 332 | fence->destroy = destroy; |
333 | 333 | ||
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) | |||
431 | u32 goal_seqno; | 431 | u32 goal_seqno; |
432 | u32 *fifo_mem; | 432 | u32 *fifo_mem; |
433 | 433 | ||
434 | if (fence_is_signaled_locked(&fence->base)) | 434 | if (dma_fence_is_signaled_locked(&fence->base)) |
435 | return false; | 435 | return false; |
436 | 436 | ||
437 | fifo_mem = fman->dev_priv->mmio_virt; | 437 | fifo_mem = fman->dev_priv->mmio_virt; |
@@ -459,7 +459,7 @@ rerun: | |||
459 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { | 459 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { |
460 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { | 460 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { |
461 | list_del_init(&fence->head); | 461 | list_del_init(&fence->head); |
462 | fence_signal_locked(&fence->base); | 462 | dma_fence_signal_locked(&fence->base); |
463 | INIT_LIST_HEAD(&action_list); | 463 | INIT_LIST_HEAD(&action_list); |
464 | list_splice_init(&fence->seq_passed_actions, | 464 | list_splice_init(&fence->seq_passed_actions, |
465 | &action_list); | 465 | &action_list); |
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) | |||
500 | { | 500 | { |
501 | struct vmw_fence_manager *fman = fman_from_fence(fence); | 501 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
502 | 502 | ||
503 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) | 503 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
504 | return 1; | 504 | return 1; |
505 | 505 | ||
506 | vmw_fences_update(fman); | 506 | vmw_fences_update(fman); |
507 | 507 | ||
508 | return fence_is_signaled(&fence->base); | 508 | return dma_fence_is_signaled(&fence->base); |
509 | } | 509 | } |
510 | 510 | ||
511 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, | 511 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, |
512 | bool interruptible, unsigned long timeout) | 512 | bool interruptible, unsigned long timeout) |
513 | { | 513 | { |
514 | long ret = fence_wait_timeout(&fence->base, interruptible, timeout); | 514 | long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); |
515 | 515 | ||
516 | if (likely(ret > 0)) | 516 | if (likely(ret > 0)) |
517 | return 0; | 517 | return 0; |
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) | |||
530 | 530 | ||
531 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) | 531 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) |
532 | { | 532 | { |
533 | fence_free(&fence->base); | 533 | dma_fence_free(&fence->base); |
534 | } | 534 | } |
535 | 535 | ||
536 | int vmw_fence_create(struct vmw_fence_manager *fman, | 536 | int vmw_fence_create(struct vmw_fence_manager *fman, |
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
669 | struct vmw_fence_obj *fence = | 669 | struct vmw_fence_obj *fence = |
670 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, | 670 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, |
671 | head); | 671 | head); |
672 | fence_get(&fence->base); | 672 | dma_fence_get(&fence->base); |
673 | spin_unlock_irq(&fman->lock); | 673 | spin_unlock_irq(&fman->lock); |
674 | 674 | ||
675 | ret = vmw_fence_obj_wait(fence, false, false, | 675 | ret = vmw_fence_obj_wait(fence, false, false, |
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
677 | 677 | ||
678 | if (unlikely(ret != 0)) { | 678 | if (unlikely(ret != 0)) { |
679 | list_del_init(&fence->head); | 679 | list_del_init(&fence->head); |
680 | fence_signal(&fence->base); | 680 | dma_fence_signal(&fence->base); |
681 | INIT_LIST_HEAD(&action_list); | 681 | INIT_LIST_HEAD(&action_list); |
682 | list_splice_init(&fence->seq_passed_actions, | 682 | list_splice_init(&fence->seq_passed_actions, |
683 | &action_list); | 683 | &action_list); |
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
685 | } | 685 | } |
686 | 686 | ||
687 | BUG_ON(!list_empty(&fence->head)); | 687 | BUG_ON(!list_empty(&fence->head)); |
688 | fence_put(&fence->base); | 688 | dma_fence_put(&fence->base); |
689 | spin_lock_irq(&fman->lock); | 689 | spin_lock_irq(&fman->lock); |
690 | } | 690 | } |
691 | spin_unlock_irq(&fman->lock); | 691 | spin_unlock_irq(&fman->lock); |
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |||
884 | spin_lock_irqsave(&fman->lock, irq_flags); | 884 | spin_lock_irqsave(&fman->lock, irq_flags); |
885 | 885 | ||
886 | fman->pending_actions[action->type]++; | 886 | fman->pending_actions[action->type]++; |
887 | if (fence_is_signaled_locked(&fence->base)) { | 887 | if (dma_fence_is_signaled_locked(&fence->base)) { |
888 | struct list_head action_list; | 888 | struct list_head action_list; |
889 | 889 | ||
890 | INIT_LIST_HEAD(&action_list); | 890 | INIT_LIST_HEAD(&action_list); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 83ae301ee141..d9d85aa6ed20 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | #ifndef _VMWGFX_FENCE_H_ | 28 | #ifndef _VMWGFX_FENCE_H_ |
29 | 29 | ||
30 | #include <linux/fence.h> | 30 | #include <linux/dma-fence.h> |
31 | 31 | ||
32 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) | 32 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) |
33 | 33 | ||
@@ -52,7 +52,7 @@ struct vmw_fence_action { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct vmw_fence_obj { | 54 | struct vmw_fence_obj { |
55 | struct fence base; | 55 | struct dma_fence base; |
56 | 56 | ||
57 | struct list_head head; | 57 | struct list_head head; |
58 | struct list_head seq_passed_actions; | 58 | struct list_head seq_passed_actions; |
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | |||
71 | 71 | ||
72 | *fence_p = NULL; | 72 | *fence_p = NULL; |
73 | if (fence) | 73 | if (fence) |
74 | fence_put(&fence->base); | 74 | dma_fence_put(&fence->base); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline struct vmw_fence_obj * | 77 | static inline struct vmw_fence_obj * |
78 | vmw_fence_obj_reference(struct vmw_fence_obj *fence) | 78 | vmw_fence_obj_reference(struct vmw_fence_obj *fence) |
79 | { | 79 | { |
80 | if (fence) | 80 | if (fence) |
81 | fence_get(&fence->base); | 81 | dma_fence_get(&fence->base); |
82 | return fence; | 82 | return fence; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1a85fb2d4dc6..8e86d6d4141b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1454 | if (fence == NULL) { | 1454 | if (fence == NULL) { |
1455 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1455 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1456 | reservation_object_add_excl_fence(bo->resv, &fence->base); | 1456 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1457 | fence_put(&fence->base); | 1457 | dma_fence_put(&fence->base); |
1458 | } else | 1458 | } else |
1459 | reservation_object_add_excl_fence(bo->resv, &fence->base); | 1459 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1460 | } | 1460 | } |
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h new file mode 100644 index 000000000000..3629b2734db6 --- /dev/null +++ b/include/drm/bridge/mhl.h | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * Defines for Mobile High-Definition Link (MHL) interface | ||
3 | * | ||
4 | * Copyright (C) 2015, Samsung Electronics, Co., Ltd. | ||
5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
6 | * | ||
7 | * Based on MHL driver for Android devices. | ||
8 | * Copyright (C) 2013-2014 Silicon Image, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MHL_H__ | ||
16 | #define __MHL_H__ | ||
17 | |||
18 | /* Device Capabilities Registers */ | ||
19 | enum { | ||
20 | MHL_DCAP_DEV_STATE, | ||
21 | MHL_DCAP_MHL_VERSION, | ||
22 | MHL_DCAP_CAT, | ||
23 | MHL_DCAP_ADOPTER_ID_H, | ||
24 | MHL_DCAP_ADOPTER_ID_L, | ||
25 | MHL_DCAP_VID_LINK_MODE, | ||
26 | MHL_DCAP_AUD_LINK_MODE, | ||
27 | MHL_DCAP_VIDEO_TYPE, | ||
28 | MHL_DCAP_LOG_DEV_MAP, | ||
29 | MHL_DCAP_BANDWIDTH, | ||
30 | MHL_DCAP_FEATURE_FLAG, | ||
31 | MHL_DCAP_DEVICE_ID_H, | ||
32 | MHL_DCAP_DEVICE_ID_L, | ||
33 | MHL_DCAP_SCRATCHPAD_SIZE, | ||
34 | MHL_DCAP_INT_STAT_SIZE, | ||
35 | MHL_DCAP_RESERVED, | ||
36 | MHL_DCAP_SIZE | ||
37 | }; | ||
38 | |||
39 | #define MHL_DCAP_CAT_SINK 0x01 | ||
40 | #define MHL_DCAP_CAT_SOURCE 0x02 | ||
41 | #define MHL_DCAP_CAT_POWER 0x10 | ||
42 | #define MHL_DCAP_CAT_PLIM(x) ((x) << 5) | ||
43 | |||
44 | #define MHL_DCAP_VID_LINK_RGB444 0x01 | ||
45 | #define MHL_DCAP_VID_LINK_YCBCR444 0x02 | ||
46 | #define MHL_DCAP_VID_LINK_YCBCR422 0x04 | ||
47 | #define MHL_DCAP_VID_LINK_PPIXEL 0x08 | ||
48 | #define MHL_DCAP_VID_LINK_ISLANDS 0x10 | ||
49 | #define MHL_DCAP_VID_LINK_VGA 0x20 | ||
50 | #define MHL_DCAP_VID_LINK_16BPP 0x40 | ||
51 | |||
52 | #define MHL_DCAP_AUD_LINK_2CH 0x01 | ||
53 | #define MHL_DCAP_AUD_LINK_8CH 0x02 | ||
54 | |||
55 | #define MHL_DCAP_VT_GRAPHICS 0x00 | ||
56 | #define MHL_DCAP_VT_PHOTO 0x02 | ||
57 | #define MHL_DCAP_VT_CINEMA 0x04 | ||
58 | #define MHL_DCAP_VT_GAMES 0x08 | ||
59 | #define MHL_DCAP_SUPP_VT 0x80 | ||
60 | |||
61 | #define MHL_DCAP_LD_DISPLAY 0x01 | ||
62 | #define MHL_DCAP_LD_VIDEO 0x02 | ||
63 | #define MHL_DCAP_LD_AUDIO 0x04 | ||
64 | #define MHL_DCAP_LD_MEDIA 0x08 | ||
65 | #define MHL_DCAP_LD_TUNER 0x10 | ||
66 | #define MHL_DCAP_LD_RECORD 0x20 | ||
67 | #define MHL_DCAP_LD_SPEAKER 0x40 | ||
68 | #define MHL_DCAP_LD_GUI 0x80 | ||
69 | #define MHL_DCAP_LD_ALL 0xFF | ||
70 | |||
71 | #define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01 | ||
72 | #define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02 | ||
73 | #define MHL_DCAP_FEATURE_SP_SUPPORT 0x04 | ||
74 | #define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08 | ||
75 | #define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10 | ||
76 | #define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40 | ||
77 | |||
78 | /* Extended Device Capabilities Registers */ | ||
79 | enum { | ||
80 | MHL_XDC_ECBUS_SPEEDS, | ||
81 | MHL_XDC_TMDS_SPEEDS, | ||
82 | MHL_XDC_ECBUS_ROLES, | ||
83 | MHL_XDC_LOG_DEV_MAPX, | ||
84 | MHL_XDC_SIZE | ||
85 | }; | ||
86 | |||
87 | #define MHL_XDC_ECBUS_S_075 0x01 | ||
88 | #define MHL_XDC_ECBUS_S_8BIT 0x02 | ||
89 | #define MHL_XDC_ECBUS_S_12BIT 0x04 | ||
90 | #define MHL_XDC_ECBUS_D_150 0x10 | ||
91 | #define MHL_XDC_ECBUS_D_8BIT 0x20 | ||
92 | |||
93 | #define MHL_XDC_TMDS_000 0x00 | ||
94 | #define MHL_XDC_TMDS_150 0x01 | ||
95 | #define MHL_XDC_TMDS_300 0x02 | ||
96 | #define MHL_XDC_TMDS_600 0x04 | ||
97 | |||
98 | /* MHL_XDC_ECBUS_ROLES flags */ | ||
99 | #define MHL_XDC_DEV_HOST 0x01 | ||
100 | #define MHL_XDC_DEV_DEVICE 0x02 | ||
101 | #define MHL_XDC_DEV_CHARGER 0x04 | ||
102 | #define MHL_XDC_HID_HOST 0x08 | ||
103 | #define MHL_XDC_HID_DEVICE 0x10 | ||
104 | |||
105 | /* MHL_XDC_LOG_DEV_MAPX flags */ | ||
106 | #define MHL_XDC_LD_PHONE 0x01 | ||
107 | |||
108 | /* Device Status Registers */ | ||
109 | enum { | ||
110 | MHL_DST_CONNECTED_RDY, | ||
111 | MHL_DST_LINK_MODE, | ||
112 | MHL_DST_VERSION, | ||
113 | MHL_DST_SIZE | ||
114 | }; | ||
115 | |||
116 | /* Offset of DEVSTAT registers */ | ||
117 | #define MHL_DST_OFFSET 0x30 | ||
118 | #define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name) | ||
119 | |||
120 | #define MHL_DST_CONN_DCAP_RDY 0x01 | ||
121 | #define MHL_DST_CONN_XDEVCAPP_SUPP 0x02 | ||
122 | #define MHL_DST_CONN_POW_STAT 0x04 | ||
123 | #define MHL_DST_CONN_PLIM_STAT_MASK 0x38 | ||
124 | |||
125 | #define MHL_DST_LM_CLK_MODE_MASK 0x07 | ||
126 | #define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02 | ||
127 | #define MHL_DST_LM_CLK_MODE_NORMAL 0x03 | ||
128 | #define MHL_DST_LM_PATH_EN_MASK 0x08 | ||
129 | #define MHL_DST_LM_PATH_ENABLED 0x08 | ||
130 | #define MHL_DST_LM_PATH_DISABLED 0x00 | ||
131 | #define MHL_DST_LM_MUTED_MASK 0x10 | ||
132 | |||
133 | /* Extended Device Status Registers */ | ||
134 | enum { | ||
135 | MHL_XDS_CURR_ECBUS_MODE, | ||
136 | MHL_XDS_AVLINK_MODE_STATUS, | ||
137 | MHL_XDS_AVLINK_MODE_CONTROL, | ||
138 | MHL_XDS_MULTI_SINK_STATUS, | ||
139 | MHL_XDS_SIZE | ||
140 | }; | ||
141 | |||
142 | /* Offset of XDEVSTAT registers */ | ||
143 | #define MHL_XDS_OFFSET 0x90 | ||
144 | #define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name) | ||
145 | |||
146 | /* MHL_XDS_REG_CURR_ECBUS_MODE flags */ | ||
147 | #define MHL_XDS_SLOT_MODE_8BIT 0x00 | ||
148 | #define MHL_XDS_SLOT_MODE_6BIT 0x01 | ||
149 | #define MHL_XDS_ECBUS_S 0x04 | ||
150 | #define MHL_XDS_ECBUS_D 0x08 | ||
151 | |||
152 | #define MHL_XDS_LINK_CLOCK_75MHZ 0x00 | ||
153 | #define MHL_XDS_LINK_CLOCK_150MHZ 0x10 | ||
154 | #define MHL_XDS_LINK_CLOCK_300MHZ 0x20 | ||
155 | #define MHL_XDS_LINK_CLOCK_600MHZ 0x30 | ||
156 | |||
157 | #define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00 | ||
158 | #define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01 | ||
159 | #define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02 | ||
160 | #define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03 | ||
161 | |||
162 | #define MHL_XDS_LINK_RATE_1_5_GBPS 0x00 | ||
163 | #define MHL_XDS_LINK_RATE_3_0_GBPS 0x01 | ||
164 | #define MHL_XDS_LINK_RATE_6_0_GBPS 0x02 | ||
165 | #define MHL_XDS_ATT_CAPABLE 0x08 | ||
166 | |||
167 | #define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00 | ||
168 | #define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01 | ||
169 | #define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00 | ||
170 | #define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04 | ||
171 | #define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00 | ||
172 | #define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10 | ||
173 | #define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00 | ||
174 | #define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40 | ||
175 | |||
176 | /* Interrupt Registers */ | ||
177 | enum { | ||
178 | MHL_INT_RCHANGE, | ||
179 | MHL_INT_DCHANGE, | ||
180 | MHL_INT_SIZE | ||
181 | }; | ||
182 | |||
183 | /* Offset of DEVSTAT registers */ | ||
184 | #define MHL_INT_OFFSET 0x20 | ||
185 | #define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name) | ||
186 | |||
187 | #define MHL_INT_RC_DCAP_CHG 0x01 | ||
188 | #define MHL_INT_RC_DSCR_CHG 0x02 | ||
189 | #define MHL_INT_RC_REQ_WRT 0x04 | ||
190 | #define MHL_INT_RC_GRT_WRT 0x08 | ||
191 | #define MHL_INT_RC_3D_REQ 0x10 | ||
192 | #define MHL_INT_RC_FEAT_REQ 0x20 | ||
193 | #define MHL_INT_RC_FEAT_COMPLETE 0x40 | ||
194 | |||
195 | #define MHL_INT_DC_EDID_CHG 0x02 | ||
196 | |||
197 | enum { | ||
198 | MHL_ACK = 0x33, /* Command or Data byte acknowledge */ | ||
199 | MHL_NACK = 0x34, /* Command or Data byte not acknowledge */ | ||
200 | MHL_ABORT = 0x35, /* Transaction abort */ | ||
201 | MHL_WRITE_STAT = 0xe0, /* Write one status register */ | ||
202 | MHL_SET_INT = 0x60, /* Write one interrupt register */ | ||
203 | MHL_READ_DEVCAP_REG = 0x61, /* Read one register */ | ||
204 | MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */ | ||
205 | MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */ | ||
206 | MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */ | ||
207 | MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */ | ||
208 | MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */ | ||
209 | MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */ | ||
210 | MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */ | ||
211 | MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */ | ||
212 | MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */ | ||
213 | MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */ | ||
214 | MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */ | ||
215 | MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */ | ||
216 | MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */ | ||
217 | MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */ | ||
218 | /* let the rest of these float, they are software specific */ | ||
219 | MHL_READ_EDID_BLOCK, | ||
220 | MHL_SEND_3D_REQ_OR_FEAT_REQ, | ||
221 | MHL_READ_DEVCAP, | ||
222 | MHL_READ_XDEVCAP | ||
223 | }; | ||
224 | |||
225 | /* MSC message types */ | ||
226 | enum { | ||
227 | MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */ | ||
228 | MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */ | ||
229 | MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */ | ||
230 | MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */ | ||
231 | MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */ | ||
232 | MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */ | ||
233 | MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */ | ||
234 | MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */ | ||
235 | MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */ | ||
236 | MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */ | ||
237 | MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */ | ||
238 | MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */ | ||
239 | MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */ | ||
240 | MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */ | ||
241 | MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */ | ||
242 | MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */ | ||
243 | MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */ | ||
244 | MHL_MSC_MSG_BIST_TRIGGER = 0x60, | ||
245 | MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61, | ||
246 | MHL_MSC_MSG_BIST_READY = 0x62, | ||
247 | MHL_MSC_MSG_BIST_STOP = 0x63, | ||
248 | }; | ||
249 | |||
250 | /* RAP action codes */ | ||
251 | #define MHL_RAP_POLL 0x00 /* Just do an ack */ | ||
252 | #define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */ | ||
253 | #define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */ | ||
254 | #define MHL_RAP_CBUS_MODE_DOWN 0x20 | ||
255 | #define MHL_RAP_CBUS_MODE_UP 0x21 | ||
256 | |||
257 | /* RAPK status codes */ | ||
258 | #define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */ | ||
259 | #define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */ | ||
260 | #define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ | ||
261 | #define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ | ||
262 | |||
263 | /* | ||
264 | * Error status codes for RCPE messages | ||
265 | */ | ||
266 | /* No error. (Not allowed in RCPE messages) */ | ||
267 | #define MHL_RCPE_STATUS_NO_ERROR 0x00 | ||
268 | /* Unsupported/unrecognized key code */ | ||
269 | #define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 | ||
270 | /* Responder busy. Initiator may retry message */ | ||
271 | #define MHL_RCPE_STATUS_BUSY 0x02 | ||
272 | |||
273 | /* | ||
274 | * Error status codes for RBPE messages | ||
275 | */ | ||
276 | /* No error. (Not allowed in RBPE messages) */ | ||
277 | #define MHL_RBPE_STATUS_NO_ERROR 0x00 | ||
278 | /* Unsupported/unrecognized button code */ | ||
279 | #define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01 | ||
280 | /* Responder busy. Initiator may retry message */ | ||
281 | #define MHL_RBPE_STATUS_BUSY 0x02 | ||
282 | |||
283 | /* | ||
284 | * Error status codes for UCPE messages | ||
285 | */ | ||
286 | /* No error. (Not allowed in UCPE messages) */ | ||
287 | #define MHL_UCPE_STATUS_NO_ERROR 0x00 | ||
288 | /* Unsupported/unrecognized key code */ | ||
289 | #define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 | ||
290 | |||
291 | #endif /* __MHL_H__ */ | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 672644031bd5..e336e3901876 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/types.h> | 57 | #include <linux/types.h> |
58 | #include <linux/vmalloc.h> | 58 | #include <linux/vmalloc.h> |
59 | #include <linux/workqueue.h> | 59 | #include <linux/workqueue.h> |
60 | #include <linux/fence.h> | 60 | #include <linux/dma-fence.h> |
61 | 61 | ||
62 | #include <asm/mman.h> | 62 | #include <asm/mman.h> |
63 | #include <asm/pgalloc.h> | 63 | #include <asm/pgalloc.h> |
@@ -362,7 +362,7 @@ struct drm_ioctl_desc { | |||
362 | struct drm_pending_event { | 362 | struct drm_pending_event { |
363 | struct completion *completion; | 363 | struct completion *completion; |
364 | struct drm_event *event; | 364 | struct drm_event *event; |
365 | struct fence *fence; | 365 | struct dma_fence *fence; |
366 | struct list_head link; | 366 | struct list_head link; |
367 | struct list_head pending_link; | 367 | struct list_head pending_link; |
368 | struct drm_file *file_priv; | 368 | struct drm_file *file_priv; |
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index fd351924e1c5..13221cf9b3eb 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h | |||
@@ -52,8 +52,6 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation) | |||
52 | return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); | 52 | return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); |
53 | } | 53 | } |
54 | 54 | ||
55 | struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, | ||
56 | unsigned int supported_rotations); | ||
57 | int drm_plane_create_rotation_property(struct drm_plane *plane, | 55 | int drm_plane_create_rotation_property(struct drm_plane *plane, |
58 | unsigned int rotation, | 56 | unsigned int rotation, |
59 | unsigned int supported_rotations); | 57 | unsigned int supported_rotations); |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 284c1b3aec10..fa1aa214c8ea 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -54,7 +54,7 @@ struct drm_mode_set; | |||
54 | struct drm_file; | 54 | struct drm_file; |
55 | struct drm_clip_rect; | 55 | struct drm_clip_rect; |
56 | struct device_node; | 56 | struct device_node; |
57 | struct fence; | 57 | struct dma_fence; |
58 | struct edid; | 58 | struct edid; |
59 | 59 | ||
60 | static inline int64_t U642I64(uint64_t val) | 60 | static inline int64_t U642I64(uint64_t val) |
@@ -1156,11 +1156,6 @@ struct drm_mode_config { | |||
1156 | */ | 1156 | */ |
1157 | struct drm_property *plane_type_property; | 1157 | struct drm_property *plane_type_property; |
1158 | /** | 1158 | /** |
1159 | * @rotation_property: Optional property for planes or CRTCs to specifiy | ||
1160 | * rotation. | ||
1161 | */ | ||
1162 | struct drm_property *rotation_property; | ||
1163 | /** | ||
1164 | * @prop_src_x: Default atomic plane property for the plane source | 1159 | * @prop_src_x: Default atomic plane property for the plane source |
1165 | * position in the connected &drm_framebuffer. | 1160 | * position in the connected &drm_framebuffer. |
1166 | */ | 1161 | */ |
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 3fd87b386ed7..26a64805cc15 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/of_graph.h> | 4 | #include <linux/of_graph.h> |
5 | 5 | ||
6 | struct component_master_ops; | 6 | struct component_master_ops; |
7 | struct component_match; | ||
7 | struct device; | 8 | struct device; |
8 | struct drm_device; | 9 | struct drm_device; |
9 | struct drm_encoder; | 10 | struct drm_encoder; |
@@ -12,6 +13,10 @@ struct device_node; | |||
12 | #ifdef CONFIG_OF | 13 | #ifdef CONFIG_OF |
13 | extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | 14 | extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, |
14 | struct device_node *port); | 15 | struct device_node *port); |
16 | extern void drm_of_component_match_add(struct device *master, | ||
17 | struct component_match **matchptr, | ||
18 | int (*compare)(struct device *, void *), | ||
19 | struct device_node *node); | ||
15 | extern int drm_of_component_probe(struct device *dev, | 20 | extern int drm_of_component_probe(struct device *dev, |
16 | int (*compare_of)(struct device *, void *), | 21 | int (*compare_of)(struct device *, void *), |
17 | const struct component_master_ops *m_ops); | 22 | const struct component_master_ops *m_ops); |
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, | |||
25 | return 0; | 30 | return 0; |
26 | } | 31 | } |
27 | 32 | ||
33 | static inline void | ||
34 | drm_of_component_match_add(struct device *master, | ||
35 | struct component_match **matchptr, | ||
36 | int (*compare)(struct device *, void *), | ||
37 | struct device_node *node) | ||
38 | { | ||
39 | } | ||
40 | |||
28 | static inline int | 41 | static inline int |
29 | drm_of_component_probe(struct device *dev, | 42 | drm_of_component_probe(struct device *dev, |
30 | int (*compare_of)(struct device *, void *), | 43 | int (*compare_of)(struct device *, void *), |
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 98b39d66eb32..c5e8a0df1623 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h | |||
@@ -59,7 +59,7 @@ struct drm_plane_state { | |||
59 | 59 | ||
60 | struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ | 60 | struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ |
61 | struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ | 61 | struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ |
62 | struct fence *fence; | 62 | struct dma_fence *fence; |
63 | 63 | ||
64 | /* Signed dest location allows it to be partially off screen */ | 64 | /* Signed dest location allows it to be partially off screen */ |
65 | int32_t crtc_x, crtc_y; | 65 | int32_t crtc_x, crtc_y; |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 9eb940d6755f..652e45be97c8 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -47,6 +47,8 @@ struct drm_mm_node; | |||
47 | 47 | ||
48 | struct ttm_placement; | 48 | struct ttm_placement; |
49 | 49 | ||
50 | struct ttm_place; | ||
51 | |||
50 | /** | 52 | /** |
51 | * struct ttm_bus_placement | 53 | * struct ttm_bus_placement |
52 | * | 54 | * |
@@ -209,7 +211,7 @@ struct ttm_buffer_object { | |||
209 | * Members protected by a bo reservation. | 211 | * Members protected by a bo reservation. |
210 | */ | 212 | */ |
211 | 213 | ||
212 | struct fence *moving; | 214 | struct dma_fence *moving; |
213 | 215 | ||
214 | struct drm_vma_offset_node vma_node; | 216 | struct drm_vma_offset_node vma_node; |
215 | 217 | ||
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, | |||
396 | int resched); | 398 | int resched); |
397 | 399 | ||
398 | /** | 400 | /** |
401 | * ttm_bo_eviction_valuable | ||
402 | * | ||
403 | * @bo: The buffer object to evict | ||
404 | * @place: the placement we need to make room for | ||
405 | * | ||
406 | * Check if it is valuable to evict the BO to make room for the given placement. | ||
407 | */ | ||
408 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
409 | const struct ttm_place *place); | ||
410 | |||
411 | /** | ||
399 | * ttm_bo_synccpu_write_grab | 412 | * ttm_bo_synccpu_write_grab |
400 | * | 413 | * |
401 | * @bo: The buffer object: | 414 | * @bo: The buffer object: |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 4f0a92185995..cdbdb40eb5bd 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager { | |||
303 | /* | 303 | /* |
304 | * Protected by @move_lock. | 304 | * Protected by @move_lock. |
305 | */ | 305 | */ |
306 | struct fence *move; | 306 | struct dma_fence *move; |
307 | }; | 307 | }; |
308 | 308 | ||
309 | /** | 309 | /** |
@@ -371,9 +371,21 @@ struct ttm_bo_driver { | |||
371 | * submission as a consequence. | 371 | * submission as a consequence. |
372 | */ | 372 | */ |
373 | 373 | ||
374 | int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); | 374 | int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); |
375 | int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, | 375 | int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, |
376 | struct ttm_mem_type_manager *man); | 376 | struct ttm_mem_type_manager *man); |
377 | |||
378 | /** | ||
379 | * struct ttm_bo_driver member eviction_valuable | ||
380 | * | ||
381 | * @bo: the buffer object to be evicted | ||
382 | * @place: placement we need room for | ||
383 | * | ||
384 | * Check with the driver if it is valuable to evict a BO to make room | ||
385 | * for a certain placement. | ||
386 | */ | ||
387 | bool (*eviction_valuable)(struct ttm_buffer_object *bo, | ||
388 | const struct ttm_place *place); | ||
377 | /** | 389 | /** |
378 | * struct ttm_bo_driver member evict_flags: | 390 | * struct ttm_bo_driver member evict_flags: |
379 | * | 391 | * |
@@ -384,8 +396,9 @@ struct ttm_bo_driver { | |||
384 | * finished, they'll end up in bo->mem.flags | 396 | * finished, they'll end up in bo->mem.flags |
385 | */ | 397 | */ |
386 | 398 | ||
387 | void(*evict_flags) (struct ttm_buffer_object *bo, | 399 | void (*evict_flags)(struct ttm_buffer_object *bo, |
388 | struct ttm_placement *placement); | 400 | struct ttm_placement *placement); |
401 | |||
389 | /** | 402 | /** |
390 | * struct ttm_bo_driver member move: | 403 | * struct ttm_bo_driver member move: |
391 | * | 404 | * |
@@ -399,10 +412,9 @@ struct ttm_bo_driver { | |||
399 | * | 412 | * |
400 | * Move a buffer between two memory regions. | 413 | * Move a buffer between two memory regions. |
401 | */ | 414 | */ |
402 | int (*move) (struct ttm_buffer_object *bo, | 415 | int (*move)(struct ttm_buffer_object *bo, bool evict, |
403 | bool evict, bool interruptible, | 416 | bool interruptible, bool no_wait_gpu, |
404 | bool no_wait_gpu, | 417 | struct ttm_mem_reg *new_mem); |
405 | struct ttm_mem_reg *new_mem); | ||
406 | 418 | ||
407 | /** | 419 | /** |
408 | * struct ttm_bo_driver_member verify_access | 420 | * struct ttm_bo_driver_member verify_access |
@@ -416,8 +428,8 @@ struct ttm_bo_driver { | |||
416 | * access for all buffer objects. | 428 | * access for all buffer objects. |
417 | * This function should return 0 if access is granted, -EPERM otherwise. | 429 | * This function should return 0 if access is granted, -EPERM otherwise. |
418 | */ | 430 | */ |
419 | int (*verify_access) (struct ttm_buffer_object *bo, | 431 | int (*verify_access)(struct ttm_buffer_object *bo, |
420 | struct file *filp); | 432 | struct file *filp); |
421 | 433 | ||
422 | /* hook to notify driver about a driver move so it | 434 | /* hook to notify driver about a driver move so it |
423 | * can do tiling things */ | 435 | * can do tiling things */ |
@@ -430,7 +442,7 @@ struct ttm_bo_driver { | |||
430 | /** | 442 | /** |
431 | * notify the driver that we're about to swap out this bo | 443 | * notify the driver that we're about to swap out this bo |
432 | */ | 444 | */ |
433 | void (*swap_notify) (struct ttm_buffer_object *bo); | 445 | void (*swap_notify)(struct ttm_buffer_object *bo); |
434 | 446 | ||
435 | /** | 447 | /** |
436 | * Driver callback on when mapping io memory (for bo_move_memcpy | 448 | * Driver callback on when mapping io memory (for bo_move_memcpy |
@@ -438,8 +450,10 @@ struct ttm_bo_driver { | |||
438 | * the mapping is not use anymore. io_mem_reserve & io_mem_free | 450 | * the mapping is not use anymore. io_mem_reserve & io_mem_free |
439 | * are balanced. | 451 | * are balanced. |
440 | */ | 452 | */ |
441 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | 453 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, |
442 | void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | 454 | struct ttm_mem_reg *mem); |
455 | void (*io_mem_free)(struct ttm_bo_device *bdev, | ||
456 | struct ttm_mem_reg *mem); | ||
443 | 457 | ||
444 | /** | 458 | /** |
445 | * Optional driver callback for when BO is removed from the LRU. | 459 | * Optional driver callback for when BO is removed from the LRU. |
@@ -1025,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); | |||
1025 | */ | 1039 | */ |
1026 | 1040 | ||
1027 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 1041 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
1028 | struct fence *fence, bool evict, | 1042 | struct dma_fence *fence, bool evict, |
1029 | struct ttm_mem_reg *new_mem); | 1043 | struct ttm_mem_reg *new_mem); |
1030 | 1044 | ||
1031 | /** | 1045 | /** |
@@ -1040,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
1040 | * immediately or hang it on a temporary buffer object. | 1054 | * immediately or hang it on a temporary buffer object. |
1041 | */ | 1055 | */ |
1042 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | 1056 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, |
1043 | struct fence *fence, bool evict, | 1057 | struct dma_fence *fence, bool evict, |
1044 | struct ttm_mem_reg *new_mem); | 1058 | struct ttm_mem_reg *new_mem); |
1045 | 1059 | ||
1046 | /** | 1060 | /** |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index b620c317c772..47f35b8e6d09 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
114 | 114 | ||
115 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 115 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
116 | struct list_head *list, | 116 | struct list_head *list, |
117 | struct fence *fence); | 117 | struct dma_fence *fence); |
118 | 118 | ||
119 | #endif | 119 | #endif |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
33 | #include <linux/fence.h> | 33 | #include <linux/dma-fence.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | 35 | ||
36 | struct device; | 36 | struct device; |
@@ -143,7 +143,7 @@ struct dma_buf { | |||
143 | wait_queue_head_t poll; | 143 | wait_queue_head_t poll; |
144 | 144 | ||
145 | struct dma_buf_poll_cb_t { | 145 | struct dma_buf_poll_cb_t { |
146 | struct fence_cb cb; | 146 | struct dma_fence_cb cb; |
147 | wait_queue_head_t *poll; | 147 | wait_queue_head_t *poll; |
148 | 148 | ||
149 | unsigned long active; | 149 | unsigned long active; |
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * fence-array: aggregates fence to be waited together | ||
3 | * | ||
4 | * Copyright (C) 2016 Collabora Ltd | ||
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
6 | * Authors: | ||
7 | * Gustavo Padovan <gustavo@padovan.org> | ||
8 | * Christian König <christian.koenig@amd.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published by | ||
12 | * the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_DMA_FENCE_ARRAY_H | ||
21 | #define __LINUX_DMA_FENCE_ARRAY_H | ||
22 | |||
23 | #include <linux/dma-fence.h> | ||
24 | |||
25 | /** | ||
26 | * struct dma_fence_array_cb - callback helper for fence array | ||
27 | * @cb: fence callback structure for signaling | ||
28 | * @array: reference to the parent fence array object | ||
29 | */ | ||
30 | struct dma_fence_array_cb { | ||
31 | struct dma_fence_cb cb; | ||
32 | struct dma_fence_array *array; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct dma_fence_array - fence to represent an array of fences | ||
37 | * @base: fence base class | ||
38 | * @lock: spinlock for fence handling | ||
39 | * @num_fences: number of fences in the array | ||
40 | * @num_pending: fences in the array still pending | ||
41 | * @fences: array of the fences | ||
42 | */ | ||
43 | struct dma_fence_array { | ||
44 | struct dma_fence base; | ||
45 | |||
46 | spinlock_t lock; | ||
47 | unsigned num_fences; | ||
48 | atomic_t num_pending; | ||
49 | struct dma_fence **fences; | ||
50 | }; | ||
51 | |||
52 | extern const struct dma_fence_ops dma_fence_array_ops; | ||
53 | |||
54 | /** | ||
55 | * dma_fence_is_array - check if a fence is from the array subsclass | ||
56 | * @fence: fence to test | ||
57 | * | ||
58 | * Return true if it is a dma_fence_array and false otherwise. | ||
59 | */ | ||
60 | static inline bool dma_fence_is_array(struct dma_fence *fence) | ||
61 | { | ||
62 | return fence->ops == &dma_fence_array_ops; | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * to_dma_fence_array - cast a fence to a dma_fence_array | ||
67 | * @fence: fence to cast to a dma_fence_array | ||
68 | * | ||
69 | * Returns NULL if the fence is not a dma_fence_array, | ||
70 | * or the dma_fence_array otherwise. | ||
71 | */ | ||
72 | static inline struct dma_fence_array * | ||
73 | to_dma_fence_array(struct dma_fence *fence) | ||
74 | { | ||
75 | if (fence->ops != &dma_fence_array_ops) | ||
76 | return NULL; | ||
77 | |||
78 | return container_of(fence, struct dma_fence_array, base); | ||
79 | } | ||
80 | |||
81 | struct dma_fence_array *dma_fence_array_create(int num_fences, | ||
82 | struct dma_fence **fences, | ||
83 | u64 context, unsigned seqno, | ||
84 | bool signal_on_any); | ||
85 | |||
86 | #endif /* __LINUX_DMA_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/fence.h b/include/linux/dma-fence.h index c9c5ba98c302..ba60c043a5d3 100644 --- a/include/linux/fence.h +++ b/include/linux/dma-fence.h | |||
@@ -18,8 +18,8 @@ | |||
18 | * more details. | 18 | * more details. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef __LINUX_FENCE_H | 21 | #ifndef __LINUX_DMA_FENCE_H |
22 | #define __LINUX_FENCE_H | 22 | #define __LINUX_DMA_FENCE_H |
23 | 23 | ||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
@@ -30,48 +30,48 @@ | |||
30 | #include <linux/printk.h> | 30 | #include <linux/printk.h> |
31 | #include <linux/rcupdate.h> | 31 | #include <linux/rcupdate.h> |
32 | 32 | ||
33 | struct fence; | 33 | struct dma_fence; |
34 | struct fence_ops; | 34 | struct dma_fence_ops; |
35 | struct fence_cb; | 35 | struct dma_fence_cb; |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * struct fence - software synchronization primitive | 38 | * struct dma_fence - software synchronization primitive |
39 | * @refcount: refcount for this fence | 39 | * @refcount: refcount for this fence |
40 | * @ops: fence_ops associated with this fence | 40 | * @ops: dma_fence_ops associated with this fence |
41 | * @rcu: used for releasing fence with kfree_rcu | 41 | * @rcu: used for releasing fence with kfree_rcu |
42 | * @cb_list: list of all callbacks to call | 42 | * @cb_list: list of all callbacks to call |
43 | * @lock: spin_lock_irqsave used for locking | 43 | * @lock: spin_lock_irqsave used for locking |
44 | * @context: execution context this fence belongs to, returned by | 44 | * @context: execution context this fence belongs to, returned by |
45 | * fence_context_alloc() | 45 | * dma_fence_context_alloc() |
46 | * @seqno: the sequence number of this fence inside the execution context, | 46 | * @seqno: the sequence number of this fence inside the execution context, |
47 | * can be compared to decide which fence would be signaled later. | 47 | * can be compared to decide which fence would be signaled later. |
48 | * @flags: A mask of FENCE_FLAG_* defined below | 48 | * @flags: A mask of DMA_FENCE_FLAG_* defined below |
49 | * @timestamp: Timestamp when the fence was signaled. | 49 | * @timestamp: Timestamp when the fence was signaled. |
50 | * @status: Optional, only valid if < 0, must be set before calling | 50 | * @status: Optional, only valid if < 0, must be set before calling |
51 | * fence_signal, indicates that the fence has completed with an error. | 51 | * dma_fence_signal, indicates that the fence has completed with an error. |
52 | * | 52 | * |
53 | * the flags member must be manipulated and read using the appropriate | 53 | * the flags member must be manipulated and read using the appropriate |
54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | 54 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
55 | * of the time. | 55 | * of the time. |
56 | * | 56 | * |
57 | * FENCE_FLAG_SIGNALED_BIT - fence is already signaled | 57 | * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
58 | * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* | 58 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called |
59 | * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the | 59 | * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
60 | * implementer of the fence for its own purposes. Can be used in different | 60 | * implementer of the fence for its own purposes. Can be used in different |
61 | * ways by different fence implementers, so do not rely on this. | 61 | * ways by different fence implementers, so do not rely on this. |
62 | * | 62 | * |
63 | * Since atomic bitops are used, this is not guaranteed to be the case. | 63 | * Since atomic bitops are used, this is not guaranteed to be the case. |
64 | * Particularly, if the bit was set, but fence_signal was called right | 64 | * Particularly, if the bit was set, but dma_fence_signal was called right |
65 | * before this bit was set, it would have been able to set the | 65 | * before this bit was set, it would have been able to set the |
66 | * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. | 66 | * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
67 | * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting | 67 | * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting |
68 | * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that | 68 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
69 | * after fence_signal was called, any enable_signaling call will have either | 69 | * after dma_fence_signal was called, any enable_signaling call will have either |
70 | * been completed, or never called at all. | 70 | * been completed, or never called at all. |
71 | */ | 71 | */ |
72 | struct fence { | 72 | struct dma_fence { |
73 | struct kref refcount; | 73 | struct kref refcount; |
74 | const struct fence_ops *ops; | 74 | const struct dma_fence_ops *ops; |
75 | struct rcu_head rcu; | 75 | struct rcu_head rcu; |
76 | struct list_head cb_list; | 76 | struct list_head cb_list; |
77 | spinlock_t *lock; | 77 | spinlock_t *lock; |
@@ -82,34 +82,35 @@ struct fence { | |||
82 | int status; | 82 | int status; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | enum fence_flag_bits { | 85 | enum dma_fence_flag_bits { |
86 | FENCE_FLAG_SIGNALED_BIT, | 86 | DMA_FENCE_FLAG_SIGNALED_BIT, |
87 | FENCE_FLAG_ENABLE_SIGNAL_BIT, | 87 | DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
88 | FENCE_FLAG_USER_BITS, /* must always be last member */ | 88 | DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ |
89 | }; | 89 | }; |
90 | 90 | ||
91 | typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); | 91 | typedef void (*dma_fence_func_t)(struct dma_fence *fence, |
92 | struct dma_fence_cb *cb); | ||
92 | 93 | ||
93 | /** | 94 | /** |
94 | * struct fence_cb - callback for fence_add_callback | 95 | * struct dma_fence_cb - callback for dma_fence_add_callback |
95 | * @node: used by fence_add_callback to append this struct to fence::cb_list | 96 | * @node: used by dma_fence_add_callback to append this struct to fence::cb_list |
96 | * @func: fence_func_t to call | 97 | * @func: dma_fence_func_t to call |
97 | * | 98 | * |
98 | * This struct will be initialized by fence_add_callback, additional | 99 | * This struct will be initialized by dma_fence_add_callback, additional |
99 | * data can be passed along by embedding fence_cb in another struct. | 100 | * data can be passed along by embedding dma_fence_cb in another struct. |
100 | */ | 101 | */ |
101 | struct fence_cb { | 102 | struct dma_fence_cb { |
102 | struct list_head node; | 103 | struct list_head node; |
103 | fence_func_t func; | 104 | dma_fence_func_t func; |
104 | }; | 105 | }; |
105 | 106 | ||
106 | /** | 107 | /** |
107 | * struct fence_ops - operations implemented for fence | 108 | * struct dma_fence_ops - operations implemented for fence |
108 | * @get_driver_name: returns the driver name. | 109 | * @get_driver_name: returns the driver name. |
109 | * @get_timeline_name: return the name of the context this fence belongs to. | 110 | * @get_timeline_name: return the name of the context this fence belongs to. |
110 | * @enable_signaling: enable software signaling of fence. | 111 | * @enable_signaling: enable software signaling of fence. |
111 | * @signaled: [optional] peek whether the fence is signaled, can be null. | 112 | * @signaled: [optional] peek whether the fence is signaled, can be null. |
112 | * @wait: custom wait implementation, or fence_default_wait. | 113 | * @wait: custom wait implementation, or dma_fence_default_wait. |
113 | * @release: [optional] called on destruction of fence, can be null | 114 | * @release: [optional] called on destruction of fence, can be null |
114 | * @fill_driver_data: [optional] callback to fill in free-form debug info | 115 | * @fill_driver_data: [optional] callback to fill in free-form debug info |
115 | * Returns amount of bytes filled, or -errno. | 116 | * Returns amount of bytes filled, or -errno. |
@@ -135,20 +136,20 @@ struct fence_cb { | |||
135 | * fence->status may be set in enable_signaling, but only when false is | 136 | * fence->status may be set in enable_signaling, but only when false is |
136 | * returned. | 137 | * returned. |
137 | * | 138 | * |
138 | * Calling fence_signal before enable_signaling is called allows | 139 | * Calling dma_fence_signal before enable_signaling is called allows |
139 | * for a tiny race window in which enable_signaling is called during, | 140 | * for a tiny race window in which enable_signaling is called during, |
140 | * before, or after fence_signal. To fight this, it is recommended | 141 | * before, or after dma_fence_signal. To fight this, it is recommended |
141 | * that before enable_signaling returns true an extra reference is | 142 | * that before enable_signaling returns true an extra reference is |
142 | * taken on the fence, to be released when the fence is signaled. | 143 | * taken on the fence, to be released when the fence is signaled. |
143 | * This will mean fence_signal will still be called twice, but | 144 | * This will mean dma_fence_signal will still be called twice, but |
144 | * the second time will be a noop since it was already signaled. | 145 | * the second time will be a noop since it was already signaled. |
145 | * | 146 | * |
146 | * Notes on signaled: | 147 | * Notes on signaled: |
147 | * May set fence->status if returning true. | 148 | * May set fence->status if returning true. |
148 | * | 149 | * |
149 | * Notes on wait: | 150 | * Notes on wait: |
150 | * Must not be NULL, set to fence_default_wait for default implementation. | 151 | * Must not be NULL, set to dma_fence_default_wait for default implementation. |
151 | * the fence_default_wait implementation should work for any fence, as long | 152 | * the dma_fence_default_wait implementation should work for any fence, as long |
152 | * as enable_signaling works correctly. | 153 | * as enable_signaling works correctly. |
153 | * | 154 | * |
154 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was | 155 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was |
@@ -163,42 +164,44 @@ struct fence_cb { | |||
163 | * If pointer is set to NULL, kfree will get called instead. | 164 | * If pointer is set to NULL, kfree will get called instead. |
164 | */ | 165 | */ |
165 | 166 | ||
166 | struct fence_ops { | 167 | struct dma_fence_ops { |
167 | const char * (*get_driver_name)(struct fence *fence); | 168 | const char * (*get_driver_name)(struct dma_fence *fence); |
168 | const char * (*get_timeline_name)(struct fence *fence); | 169 | const char * (*get_timeline_name)(struct dma_fence *fence); |
169 | bool (*enable_signaling)(struct fence *fence); | 170 | bool (*enable_signaling)(struct dma_fence *fence); |
170 | bool (*signaled)(struct fence *fence); | 171 | bool (*signaled)(struct dma_fence *fence); |
171 | signed long (*wait)(struct fence *fence, bool intr, signed long timeout); | 172 | signed long (*wait)(struct dma_fence *fence, |
172 | void (*release)(struct fence *fence); | 173 | bool intr, signed long timeout); |
173 | 174 | void (*release)(struct dma_fence *fence); | |
174 | int (*fill_driver_data)(struct fence *fence, void *data, int size); | 175 | |
175 | void (*fence_value_str)(struct fence *fence, char *str, int size); | 176 | int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); |
176 | void (*timeline_value_str)(struct fence *fence, char *str, int size); | 177 | void (*fence_value_str)(struct dma_fence *fence, char *str, int size); |
178 | void (*timeline_value_str)(struct dma_fence *fence, | ||
179 | char *str, int size); | ||
177 | }; | 180 | }; |
178 | 181 | ||
179 | void fence_init(struct fence *fence, const struct fence_ops *ops, | 182 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
180 | spinlock_t *lock, u64 context, unsigned seqno); | 183 | spinlock_t *lock, u64 context, unsigned seqno); |
181 | 184 | ||
182 | void fence_release(struct kref *kref); | 185 | void dma_fence_release(struct kref *kref); |
183 | void fence_free(struct fence *fence); | 186 | void dma_fence_free(struct dma_fence *fence); |
184 | 187 | ||
185 | /** | 188 | /** |
186 | * fence_put - decreases refcount of the fence | 189 | * dma_fence_put - decreases refcount of the fence |
187 | * @fence: [in] fence to reduce refcount of | 190 | * @fence: [in] fence to reduce refcount of |
188 | */ | 191 | */ |
189 | static inline void fence_put(struct fence *fence) | 192 | static inline void dma_fence_put(struct dma_fence *fence) |
190 | { | 193 | { |
191 | if (fence) | 194 | if (fence) |
192 | kref_put(&fence->refcount, fence_release); | 195 | kref_put(&fence->refcount, dma_fence_release); |
193 | } | 196 | } |
194 | 197 | ||
195 | /** | 198 | /** |
196 | * fence_get - increases refcount of the fence | 199 | * dma_fence_get - increases refcount of the fence |
197 | * @fence: [in] fence to increase refcount of | 200 | * @fence: [in] fence to increase refcount of |
198 | * | 201 | * |
199 | * Returns the same fence, with refcount increased by 1. | 202 | * Returns the same fence, with refcount increased by 1. |
200 | */ | 203 | */ |
201 | static inline struct fence *fence_get(struct fence *fence) | 204 | static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) |
202 | { | 205 | { |
203 | if (fence) | 206 | if (fence) |
204 | kref_get(&fence->refcount); | 207 | kref_get(&fence->refcount); |
@@ -206,12 +209,13 @@ static inline struct fence *fence_get(struct fence *fence) | |||
206 | } | 209 | } |
207 | 210 | ||
208 | /** | 211 | /** |
209 | * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock | 212 | * dma_fence_get_rcu - get a fence from a reservation_object_list with |
213 | * rcu read lock | ||
210 | * @fence: [in] fence to increase refcount of | 214 | * @fence: [in] fence to increase refcount of |
211 | * | 215 | * |
212 | * Function returns NULL if no refcount could be obtained, or the fence. | 216 | * Function returns NULL if no refcount could be obtained, or the fence. |
213 | */ | 217 | */ |
214 | static inline struct fence *fence_get_rcu(struct fence *fence) | 218 | static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) |
215 | { | 219 | { |
216 | if (kref_get_unless_zero(&fence->refcount)) | 220 | if (kref_get_unless_zero(&fence->refcount)) |
217 | return fence; | 221 | return fence; |
@@ -220,7 +224,7 @@ static inline struct fence *fence_get_rcu(struct fence *fence) | |||
220 | } | 224 | } |
221 | 225 | ||
222 | /** | 226 | /** |
223 | * fence_get_rcu_safe - acquire a reference to an RCU tracked fence | 227 | * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence |
224 | * @fence: [in] pointer to fence to increase refcount of | 228 | * @fence: [in] pointer to fence to increase refcount of |
225 | * | 229 | * |
226 | * Function returns NULL if no refcount could be obtained, or the fence. | 230 | * Function returns NULL if no refcount could be obtained, or the fence. |
@@ -235,16 +239,17 @@ static inline struct fence *fence_get_rcu(struct fence *fence) | |||
235 | * | 239 | * |
236 | * The caller is required to hold the RCU read lock. | 240 | * The caller is required to hold the RCU read lock. |
237 | */ | 241 | */ |
238 | static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep) | 242 | static inline struct dma_fence * |
243 | dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) | ||
239 | { | 244 | { |
240 | do { | 245 | do { |
241 | struct fence *fence; | 246 | struct dma_fence *fence; |
242 | 247 | ||
243 | fence = rcu_dereference(*fencep); | 248 | fence = rcu_dereference(*fencep); |
244 | if (!fence || !fence_get_rcu(fence)) | 249 | if (!fence || !dma_fence_get_rcu(fence)) |
245 | return NULL; | 250 | return NULL; |
246 | 251 | ||
247 | /* The atomic_inc_not_zero() inside fence_get_rcu() | 252 | /* The atomic_inc_not_zero() inside dma_fence_get_rcu() |
248 | * provides a full memory barrier upon success (such as now). | 253 | * provides a full memory barrier upon success (such as now). |
249 | * This is paired with the write barrier from assigning | 254 | * This is paired with the write barrier from assigning |
250 | * to the __rcu protected fence pointer so that if that | 255 | * to the __rcu protected fence pointer so that if that |
@@ -261,37 +266,41 @@ static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep) | |||
261 | if (fence == rcu_access_pointer(*fencep)) | 266 | if (fence == rcu_access_pointer(*fencep)) |
262 | return rcu_pointer_handoff(fence); | 267 | return rcu_pointer_handoff(fence); |
263 | 268 | ||
264 | fence_put(fence); | 269 | dma_fence_put(fence); |
265 | } while (1); | 270 | } while (1); |
266 | } | 271 | } |
267 | 272 | ||
268 | int fence_signal(struct fence *fence); | 273 | int dma_fence_signal(struct dma_fence *fence); |
269 | int fence_signal_locked(struct fence *fence); | 274 | int dma_fence_signal_locked(struct dma_fence *fence); |
270 | signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); | 275 | signed long dma_fence_default_wait(struct dma_fence *fence, |
271 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | 276 | bool intr, signed long timeout); |
272 | fence_func_t func); | 277 | int dma_fence_add_callback(struct dma_fence *fence, |
273 | bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); | 278 | struct dma_fence_cb *cb, |
274 | void fence_enable_sw_signaling(struct fence *fence); | 279 | dma_fence_func_t func); |
280 | bool dma_fence_remove_callback(struct dma_fence *fence, | ||
281 | struct dma_fence_cb *cb); | ||
282 | void dma_fence_enable_sw_signaling(struct dma_fence *fence); | ||
275 | 283 | ||
276 | /** | 284 | /** |
277 | * fence_is_signaled_locked - Return an indication if the fence is signaled yet. | 285 | * dma_fence_is_signaled_locked - Return an indication if the fence |
286 | * is signaled yet. | ||
278 | * @fence: [in] the fence to check | 287 | * @fence: [in] the fence to check |
279 | * | 288 | * |
280 | * Returns true if the fence was already signaled, false if not. Since this | 289 | * Returns true if the fence was already signaled, false if not. Since this |
281 | * function doesn't enable signaling, it is not guaranteed to ever return | 290 | * function doesn't enable signaling, it is not guaranteed to ever return |
282 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | 291 | * true if dma_fence_add_callback, dma_fence_wait or |
283 | * haven't been called before. | 292 | * dma_fence_enable_sw_signaling haven't been called before. |
284 | * | 293 | * |
285 | * This function requires fence->lock to be held. | 294 | * This function requires fence->lock to be held. |
286 | */ | 295 | */ |
287 | static inline bool | 296 | static inline bool |
288 | fence_is_signaled_locked(struct fence *fence) | 297 | dma_fence_is_signaled_locked(struct dma_fence *fence) |
289 | { | 298 | { |
290 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 299 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
291 | return true; | 300 | return true; |
292 | 301 | ||
293 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | 302 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
294 | fence_signal_locked(fence); | 303 | dma_fence_signal_locked(fence); |
295 | return true; | 304 | return true; |
296 | } | 305 | } |
297 | 306 | ||
@@ -299,27 +308,27 @@ fence_is_signaled_locked(struct fence *fence) | |||
299 | } | 308 | } |
300 | 309 | ||
301 | /** | 310 | /** |
302 | * fence_is_signaled - Return an indication if the fence is signaled yet. | 311 | * dma_fence_is_signaled - Return an indication if the fence is signaled yet. |
303 | * @fence: [in] the fence to check | 312 | * @fence: [in] the fence to check |
304 | * | 313 | * |
305 | * Returns true if the fence was already signaled, false if not. Since this | 314 | * Returns true if the fence was already signaled, false if not. Since this |
306 | * function doesn't enable signaling, it is not guaranteed to ever return | 315 | * function doesn't enable signaling, it is not guaranteed to ever return |
307 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | 316 | * true if dma_fence_add_callback, dma_fence_wait or |
308 | * haven't been called before. | 317 | * dma_fence_enable_sw_signaling haven't been called before. |
309 | * | 318 | * |
310 | * It's recommended for seqno fences to call fence_signal when the | 319 | * It's recommended for seqno fences to call dma_fence_signal when the |
311 | * operation is complete, it makes it possible to prevent issues from | 320 | * operation is complete, it makes it possible to prevent issues from |
312 | * wraparound between time of issue and time of use by checking the return | 321 | * wraparound between time of issue and time of use by checking the return |
313 | * value of this function before calling hardware-specific wait instructions. | 322 | * value of this function before calling hardware-specific wait instructions. |
314 | */ | 323 | */ |
315 | static inline bool | 324 | static inline bool |
316 | fence_is_signaled(struct fence *fence) | 325 | dma_fence_is_signaled(struct dma_fence *fence) |
317 | { | 326 | { |
318 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 327 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
319 | return true; | 328 | return true; |
320 | 329 | ||
321 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | 330 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
322 | fence_signal(fence); | 331 | dma_fence_signal(fence); |
323 | return true; | 332 | return true; |
324 | } | 333 | } |
325 | 334 | ||
@@ -327,14 +336,15 @@ fence_is_signaled(struct fence *fence) | |||
327 | } | 336 | } |
328 | 337 | ||
329 | /** | 338 | /** |
330 | * fence_is_later - return if f1 is chronologically later than f2 | 339 | * dma_fence_is_later - return if f1 is chronologically later than f2 |
331 | * @f1: [in] the first fence from the same context | 340 | * @f1: [in] the first fence from the same context |
332 | * @f2: [in] the second fence from the same context | 341 | * @f2: [in] the second fence from the same context |
333 | * | 342 | * |
334 | * Returns true if f1 is chronologically later than f2. Both fences must be | 343 | * Returns true if f1 is chronologically later than f2. Both fences must be |
335 | * from the same context, since a seqno is not re-used across contexts. | 344 | * from the same context, since a seqno is not re-used across contexts. |
336 | */ | 345 | */ |
337 | static inline bool fence_is_later(struct fence *f1, struct fence *f2) | 346 | static inline bool dma_fence_is_later(struct dma_fence *f1, |
347 | struct dma_fence *f2) | ||
338 | { | 348 | { |
339 | if (WARN_ON(f1->context != f2->context)) | 349 | if (WARN_ON(f1->context != f2->context)) |
340 | return false; | 350 | return false; |
@@ -343,7 +353,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) | |||
343 | } | 353 | } |
344 | 354 | ||
345 | /** | 355 | /** |
346 | * fence_later - return the chronologically later fence | 356 | * dma_fence_later - return the chronologically later fence |
347 | * @f1: [in] the first fence from the same context | 357 | * @f1: [in] the first fence from the same context |
348 | * @f2: [in] the second fence from the same context | 358 | * @f2: [in] the second fence from the same context |
349 | * | 359 | * |
@@ -351,28 +361,31 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) | |||
351 | * signaled last. Both fences must be from the same context, since a seqno is | 361 | * signaled last. Both fences must be from the same context, since a seqno is |
352 | * not re-used across contexts. | 362 | * not re-used across contexts. |
353 | */ | 363 | */ |
354 | static inline struct fence *fence_later(struct fence *f1, struct fence *f2) | 364 | static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, |
365 | struct dma_fence *f2) | ||
355 | { | 366 | { |
356 | if (WARN_ON(f1->context != f2->context)) | 367 | if (WARN_ON(f1->context != f2->context)) |
357 | return NULL; | 368 | return NULL; |
358 | 369 | ||
359 | /* | 370 | /* |
360 | * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been | 371 | * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never |
361 | * set if enable_signaling wasn't called, and enabling that here is | 372 | * have been set if enable_signaling wasn't called, and enabling that |
362 | * overkill. | 373 | * here is overkill. |
363 | */ | 374 | */ |
364 | if (fence_is_later(f1, f2)) | 375 | if (dma_fence_is_later(f1, f2)) |
365 | return fence_is_signaled(f1) ? NULL : f1; | 376 | return dma_fence_is_signaled(f1) ? NULL : f1; |
366 | else | 377 | else |
367 | return fence_is_signaled(f2) ? NULL : f2; | 378 | return dma_fence_is_signaled(f2) ? NULL : f2; |
368 | } | 379 | } |
369 | 380 | ||
370 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); | 381 | signed long dma_fence_wait_timeout(struct dma_fence *, |
371 | signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | ||
372 | bool intr, signed long timeout); | 382 | bool intr, signed long timeout); |
383 | signed long dma_fence_wait_any_timeout(struct dma_fence **fences, | ||
384 | uint32_t count, | ||
385 | bool intr, signed long timeout); | ||
373 | 386 | ||
374 | /** | 387 | /** |
375 | * fence_wait - sleep until the fence gets signaled | 388 | * dma_fence_wait - sleep until the fence gets signaled |
376 | * @fence: [in] the fence to wait on | 389 | * @fence: [in] the fence to wait on |
377 | * @intr: [in] if true, do an interruptible wait | 390 | * @intr: [in] if true, do an interruptible wait |
378 | * | 391 | * |
@@ -384,41 +397,41 @@ signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | |||
384 | * directly or indirectly holds a reference to the fence, otherwise the | 397 | * directly or indirectly holds a reference to the fence, otherwise the |
385 | * fence might be freed before return, resulting in undefined behavior. | 398 | * fence might be freed before return, resulting in undefined behavior. |
386 | */ | 399 | */ |
387 | static inline signed long fence_wait(struct fence *fence, bool intr) | 400 | static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) |
388 | { | 401 | { |
389 | signed long ret; | 402 | signed long ret; |
390 | 403 | ||
391 | /* Since fence_wait_timeout cannot timeout with | 404 | /* Since dma_fence_wait_timeout cannot timeout with |
392 | * MAX_SCHEDULE_TIMEOUT, only valid return values are | 405 | * MAX_SCHEDULE_TIMEOUT, only valid return values are |
393 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. | 406 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
394 | */ | 407 | */ |
395 | ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | 408 | ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
396 | 409 | ||
397 | return ret < 0 ? ret : 0; | 410 | return ret < 0 ? ret : 0; |
398 | } | 411 | } |
399 | 412 | ||
400 | u64 fence_context_alloc(unsigned num); | 413 | u64 dma_fence_context_alloc(unsigned num); |
401 | 414 | ||
402 | #define FENCE_TRACE(f, fmt, args...) \ | 415 | #define DMA_FENCE_TRACE(f, fmt, args...) \ |
403 | do { \ | 416 | do { \ |
404 | struct fence *__ff = (f); \ | 417 | struct dma_fence *__ff = (f); \ |
405 | if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ | 418 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ |
406 | pr_info("f %llu#%u: " fmt, \ | 419 | pr_info("f %llu#%u: " fmt, \ |
407 | __ff->context, __ff->seqno, ##args); \ | 420 | __ff->context, __ff->seqno, ##args); \ |
408 | } while (0) | 421 | } while (0) |
409 | 422 | ||
410 | #define FENCE_WARN(f, fmt, args...) \ | 423 | #define DMA_FENCE_WARN(f, fmt, args...) \ |
411 | do { \ | 424 | do { \ |
412 | struct fence *__ff = (f); \ | 425 | struct dma_fence *__ff = (f); \ |
413 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 426 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
414 | ##args); \ | 427 | ##args); \ |
415 | } while (0) | 428 | } while (0) |
416 | 429 | ||
417 | #define FENCE_ERR(f, fmt, args...) \ | 430 | #define DMA_FENCE_ERR(f, fmt, args...) \ |
418 | do { \ | 431 | do { \ |
419 | struct fence *__ff = (f); \ | 432 | struct dma_fence *__ff = (f); \ |
420 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 433 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
421 | ##args); \ | 434 | ##args); \ |
422 | } while (0) | 435 | } while (0) |
423 | 436 | ||
424 | #endif /* __LINUX_FENCE_H */ | 437 | #endif /* __LINUX_DMA_FENCE_H */ |
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index a44794e508df..000000000000 --- a/include/linux/fence-array.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * fence-array: aggregates fence to be waited together | ||
3 | * | ||
4 | * Copyright (C) 2016 Collabora Ltd | ||
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
6 | * Authors: | ||
7 | * Gustavo Padovan <gustavo@padovan.org> | ||
8 | * Christian König <christian.koenig@amd.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published by | ||
12 | * the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_FENCE_ARRAY_H | ||
21 | #define __LINUX_FENCE_ARRAY_H | ||
22 | |||
23 | #include <linux/fence.h> | ||
24 | |||
25 | /** | ||
26 | * struct fence_array_cb - callback helper for fence array | ||
27 | * @cb: fence callback structure for signaling | ||
28 | * @array: reference to the parent fence array object | ||
29 | */ | ||
30 | struct fence_array_cb { | ||
31 | struct fence_cb cb; | ||
32 | struct fence_array *array; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct fence_array - fence to represent an array of fences | ||
37 | * @base: fence base class | ||
38 | * @lock: spinlock for fence handling | ||
39 | * @num_fences: number of fences in the array | ||
40 | * @num_pending: fences in the array still pending | ||
41 | * @fences: array of the fences | ||
42 | */ | ||
43 | struct fence_array { | ||
44 | struct fence base; | ||
45 | |||
46 | spinlock_t lock; | ||
47 | unsigned num_fences; | ||
48 | atomic_t num_pending; | ||
49 | struct fence **fences; | ||
50 | }; | ||
51 | |||
52 | extern const struct fence_ops fence_array_ops; | ||
53 | |||
54 | /** | ||
55 | * fence_is_array - check if a fence is from the array subsclass | ||
56 | * | ||
57 | * Return true if it is a fence_array and false otherwise. | ||
58 | */ | ||
59 | static inline bool fence_is_array(struct fence *fence) | ||
60 | { | ||
61 | return fence->ops == &fence_array_ops; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * to_fence_array - cast a fence to a fence_array | ||
66 | * @fence: fence to cast to a fence_array | ||
67 | * | ||
68 | * Returns NULL if the fence is not a fence_array, | ||
69 | * or the fence_array otherwise. | ||
70 | */ | ||
71 | static inline struct fence_array *to_fence_array(struct fence *fence) | ||
72 | { | ||
73 | if (fence->ops != &fence_array_ops) | ||
74 | return NULL; | ||
75 | |||
76 | return container_of(fence, struct fence_array, base); | ||
77 | } | ||
78 | |||
79 | struct fence_array *fence_array_create(int num_fences, struct fence **fences, | ||
80 | u64 context, unsigned seqno, | ||
81 | bool signal_on_any); | ||
82 | |||
83 | #endif /* __LINUX_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..2e313cca08f0 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define _LINUX_RESERVATION_H | 40 | #define _LINUX_RESERVATION_H |
41 | 41 | ||
42 | #include <linux/ww_mutex.h> | 42 | #include <linux/ww_mutex.h> |
43 | #include <linux/fence.h> | 43 | #include <linux/dma-fence.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/seqlock.h> | 45 | #include <linux/seqlock.h> |
46 | #include <linux/rcupdate.h> | 46 | #include <linux/rcupdate.h> |
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; | |||
59 | struct reservation_object_list { | 59 | struct reservation_object_list { |
60 | struct rcu_head rcu; | 60 | struct rcu_head rcu; |
61 | u32 shared_count, shared_max; | 61 | u32 shared_count, shared_max; |
62 | struct fence __rcu *shared[]; | 62 | struct dma_fence __rcu *shared[]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** | 65 | /** |
@@ -74,7 +74,7 @@ struct reservation_object { | |||
74 | struct ww_mutex lock; | 74 | struct ww_mutex lock; |
75 | seqcount_t seq; | 75 | seqcount_t seq; |
76 | 76 | ||
77 | struct fence __rcu *fence_excl; | 77 | struct dma_fence __rcu *fence_excl; |
78 | struct reservation_object_list __rcu *fence; | 78 | struct reservation_object_list __rcu *fence; |
79 | struct reservation_object_list *staged; | 79 | struct reservation_object_list *staged; |
80 | }; | 80 | }; |
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) | |||
107 | { | 107 | { |
108 | int i; | 108 | int i; |
109 | struct reservation_object_list *fobj; | 109 | struct reservation_object_list *fobj; |
110 | struct fence *excl; | 110 | struct dma_fence *excl; |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * This object should be dead and all references must have | 113 | * This object should be dead and all references must have |
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) | |||
115 | */ | 115 | */ |
116 | excl = rcu_dereference_protected(obj->fence_excl, 1); | 116 | excl = rcu_dereference_protected(obj->fence_excl, 1); |
117 | if (excl) | 117 | if (excl) |
118 | fence_put(excl); | 118 | dma_fence_put(excl); |
119 | 119 | ||
120 | fobj = rcu_dereference_protected(obj->fence, 1); | 120 | fobj = rcu_dereference_protected(obj->fence, 1); |
121 | if (fobj) { | 121 | if (fobj) { |
122 | for (i = 0; i < fobj->shared_count; ++i) | 122 | for (i = 0; i < fobj->shared_count; ++i) |
123 | fence_put(rcu_dereference_protected(fobj->shared[i], 1)); | 123 | dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
124 | 124 | ||
125 | kfree(fobj); | 125 | kfree(fobj); |
126 | } | 126 | } |
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) | |||
155 | * RETURNS | 155 | * RETURNS |
156 | * The exclusive fence or NULL | 156 | * The exclusive fence or NULL |
157 | */ | 157 | */ |
158 | static inline struct fence * | 158 | static inline struct dma_fence * |
159 | reservation_object_get_excl(struct reservation_object *obj) | 159 | reservation_object_get_excl(struct reservation_object *obj) |
160 | { | 160 | { |
161 | return rcu_dereference_protected(obj->fence_excl, | 161 | return rcu_dereference_protected(obj->fence_excl, |
@@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj) | |||
173 | * RETURNS | 173 | * RETURNS |
174 | * The exclusive fence or NULL if none | 174 | * The exclusive fence or NULL if none |
175 | */ | 175 | */ |
176 | static inline struct fence * | 176 | static inline struct dma_fence * |
177 | reservation_object_get_excl_rcu(struct reservation_object *obj) | 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) |
178 | { | 178 | { |
179 | struct fence *fence; | 179 | struct dma_fence *fence; |
180 | unsigned seq; | 180 | unsigned seq; |
181 | retry: | 181 | retry: |
182 | seq = read_seqcount_begin(&obj->seq); | 182 | seq = read_seqcount_begin(&obj->seq); |
@@ -186,22 +186,22 @@ retry: | |||
186 | rcu_read_unlock(); | 186 | rcu_read_unlock(); |
187 | goto retry; | 187 | goto retry; |
188 | } | 188 | } |
189 | fence = fence_get(fence); | 189 | fence = dma_fence_get(fence); |
190 | rcu_read_unlock(); | 190 | rcu_read_unlock(); |
191 | return fence; | 191 | return fence; |
192 | } | 192 | } |
193 | 193 | ||
194 | int reservation_object_reserve_shared(struct reservation_object *obj); | 194 | int reservation_object_reserve_shared(struct reservation_object *obj); |
195 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 195 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
196 | struct fence *fence); | 196 | struct dma_fence *fence); |
197 | 197 | ||
198 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 198 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
199 | struct fence *fence); | 199 | struct dma_fence *fence); |
200 | 200 | ||
201 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 201 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
202 | struct fence **pfence_excl, | 202 | struct dma_fence **pfence_excl, |
203 | unsigned *pshared_count, | 203 | unsigned *pshared_count, |
204 | struct fence ***pshared); | 204 | struct dma_fence ***pshared); |
205 | 205 | ||
206 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 206 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
207 | bool wait_all, bool intr, | 207 | bool wait_all, bool intr, |
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #ifndef __LINUX_SEQNO_FENCE_H | 20 | #ifndef __LINUX_SEQNO_FENCE_H |
21 | #define __LINUX_SEQNO_FENCE_H | 21 | #define __LINUX_SEQNO_FENCE_H |
22 | 22 | ||
23 | #include <linux/fence.h> | 23 | #include <linux/dma-fence.h> |
24 | #include <linux/dma-buf.h> | 24 | #include <linux/dma-buf.h> |
25 | 25 | ||
26 | enum seqno_fence_condition { | 26 | enum seqno_fence_condition { |
@@ -29,15 +29,15 @@ enum seqno_fence_condition { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | struct seqno_fence { | 31 | struct seqno_fence { |
32 | struct fence base; | 32 | struct dma_fence base; |
33 | 33 | ||
34 | const struct fence_ops *ops; | 34 | const struct dma_fence_ops *ops; |
35 | struct dma_buf *sync_buf; | 35 | struct dma_buf *sync_buf; |
36 | uint32_t seqno_ofs; | 36 | uint32_t seqno_ofs; |
37 | enum seqno_fence_condition condition; | 37 | enum seqno_fence_condition condition; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | extern const struct fence_ops seqno_fence_ops; | 40 | extern const struct dma_fence_ops seqno_fence_ops; |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * to_seqno_fence - cast a fence to a seqno_fence | 43 | * to_seqno_fence - cast a fence to a seqno_fence |
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; | |||
47 | * or the seqno_fence otherwise. | 47 | * or the seqno_fence otherwise. |
48 | */ | 48 | */ |
49 | static inline struct seqno_fence * | 49 | static inline struct seqno_fence * |
50 | to_seqno_fence(struct fence *fence) | 50 | to_seqno_fence(struct dma_fence *fence) |
51 | { | 51 | { |
52 | if (fence->ops != &seqno_fence_ops) | 52 | if (fence->ops != &seqno_fence_ops) |
53 | return NULL; | 53 | return NULL; |
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) | |||
83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the | 83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the |
84 | * device's vm can be expensive. | 84 | * device's vm can be expensive. |
85 | * | 85 | * |
86 | * It is recommended for creators of seqno_fence to call fence_signal | 86 | * It is recommended for creators of seqno_fence to call dma_fence_signal() |
87 | * before destruction. This will prevent possible issues from wraparound at | 87 | * before destruction. This will prevent possible issues from wraparound at |
88 | * time of issue vs time of check, since users can check fence_is_signaled | 88 | * time of issue vs time of check, since users can check dma_fence_is_signaled() |
89 | * before submitting instructions for the hardware to wait on the fence. | 89 | * before submitting instructions for the hardware to wait on the fence. |
90 | * However, when ops.enable_signaling is not called, it doesn't have to be | 90 | * However, when ops.enable_signaling is not called, it doesn't have to be |
91 | * done as soon as possible, just before there's any real danger of seqno | 91 | * done as soon as possible, just before there's any real danger of seqno |
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, | |||
96 | struct dma_buf *sync_buf, uint32_t context, | 96 | struct dma_buf *sync_buf, uint32_t context, |
97 | uint32_t seqno_ofs, uint32_t seqno, | 97 | uint32_t seqno_ofs, uint32_t seqno, |
98 | enum seqno_fence_condition cond, | 98 | enum seqno_fence_condition cond, |
99 | const struct fence_ops *ops) | 99 | const struct dma_fence_ops *ops) |
100 | { | 100 | { |
101 | BUG_ON(!fence || !sync_buf || !ops); | 101 | BUG_ON(!fence || !sync_buf || !ops); |
102 | BUG_ON(!ops->wait || !ops->enable_signaling || | 102 | BUG_ON(!ops->wait || !ops->enable_signaling || |
103 | !ops->get_driver_name || !ops->get_timeline_name); | 103 | !ops->get_driver_name || !ops->get_timeline_name); |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * ops is used in fence_init for get_driver_name, so needs to be | 106 | * ops is used in dma_fence_init for get_driver_name, so needs to be |
107 | * initialized first | 107 | * initialized first |
108 | */ | 108 | */ |
109 | fence->ops = ops; | 109 | fence->ops = ops; |
110 | fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); | 110 | dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); |
111 | get_dma_buf(sync_buf); | 111 | get_dma_buf(sync_buf); |
112 | fence->sync_buf = sync_buf; | 112 | fence->sync_buf = sync_buf; |
113 | fence->seqno_ofs = seqno_ofs; | 113 | fence->seqno_ofs = seqno_ofs; |
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/ktime.h> | 18 | #include <linux/ktime.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/fence.h> | 21 | #include <linux/dma-fence.h> |
22 | #include <linux/fence-array.h> | 22 | #include <linux/dma-fence-array.h> |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * struct sync_file - sync file to export to the userspace | 25 | * struct sync_file - sync file to export to the userspace |
@@ -41,13 +41,13 @@ struct sync_file { | |||
41 | 41 | ||
42 | wait_queue_head_t wq; | 42 | wait_queue_head_t wq; |
43 | 43 | ||
44 | struct fence *fence; | 44 | struct dma_fence *fence; |
45 | struct fence_cb cb; | 45 | struct dma_fence_cb cb; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define POLL_ENABLED FENCE_FLAG_USER_BITS | 48 | #define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS |
49 | 49 | ||
50 | struct sync_file *sync_file_create(struct fence *fence); | 50 | struct sync_file *sync_file_create(struct dma_fence *fence); |
51 | struct fence *sync_file_get_fence(int fd); | 51 | struct dma_fence *sync_file_get_fence(int fd); |
52 | 52 | ||
53 | #endif /* _LINUX_SYNC_H */ | 53 | #endif /* _LINUX_SYNC_H */ |
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h index d6dfa05ba322..1157cb4c3c6f 100644 --- a/include/trace/events/fence.h +++ b/include/trace/events/dma_fence.h | |||
@@ -1,17 +1,17 @@ | |||
1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM fence | 2 | #define TRACE_SYSTEM dma_fence |
3 | 3 | ||
4 | #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) | 4 | #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) |
5 | #define _TRACE_FENCE_H | 5 | #define _TRACE_DMA_FENCE_H |
6 | 6 | ||
7 | #include <linux/tracepoint.h> | 7 | #include <linux/tracepoint.h> |
8 | 8 | ||
9 | struct fence; | 9 | struct dma_fence; |
10 | 10 | ||
11 | TRACE_EVENT(fence_annotate_wait_on, | 11 | TRACE_EVENT(dma_fence_annotate_wait_on, |
12 | 12 | ||
13 | /* fence: the fence waiting on f1, f1: the fence to be waited on. */ | 13 | /* fence: the fence waiting on f1, f1: the fence to be waited on. */ |
14 | TP_PROTO(struct fence *fence, struct fence *f1), | 14 | TP_PROTO(struct dma_fence *fence, struct dma_fence *f1), |
15 | 15 | ||
16 | TP_ARGS(fence, f1), | 16 | TP_ARGS(fence, f1), |
17 | 17 | ||
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on, | |||
48 | __entry->waiting_context, __entry->waiting_seqno) | 48 | __entry->waiting_context, __entry->waiting_seqno) |
49 | ); | 49 | ); |
50 | 50 | ||
51 | DECLARE_EVENT_CLASS(fence, | 51 | DECLARE_EVENT_CLASS(dma_fence, |
52 | 52 | ||
53 | TP_PROTO(struct fence *fence), | 53 | TP_PROTO(struct dma_fence *fence), |
54 | 54 | ||
55 | TP_ARGS(fence), | 55 | TP_ARGS(fence), |
56 | 56 | ||
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence, | |||
73 | __entry->seqno) | 73 | __entry->seqno) |
74 | ); | 74 | ); |
75 | 75 | ||
76 | DEFINE_EVENT(fence, fence_emit, | 76 | DEFINE_EVENT(dma_fence, dma_fence_emit, |
77 | 77 | ||
78 | TP_PROTO(struct fence *fence), | 78 | TP_PROTO(struct dma_fence *fence), |
79 | 79 | ||
80 | TP_ARGS(fence) | 80 | TP_ARGS(fence) |
81 | ); | 81 | ); |
82 | 82 | ||
83 | DEFINE_EVENT(fence, fence_init, | 83 | DEFINE_EVENT(dma_fence, dma_fence_init, |
84 | 84 | ||
85 | TP_PROTO(struct fence *fence), | 85 | TP_PROTO(struct dma_fence *fence), |
86 | 86 | ||
87 | TP_ARGS(fence) | 87 | TP_ARGS(fence) |
88 | ); | 88 | ); |
89 | 89 | ||
90 | DEFINE_EVENT(fence, fence_destroy, | 90 | DEFINE_EVENT(dma_fence, dma_fence_destroy, |
91 | 91 | ||
92 | TP_PROTO(struct fence *fence), | 92 | TP_PROTO(struct dma_fence *fence), |
93 | 93 | ||
94 | TP_ARGS(fence) | 94 | TP_ARGS(fence) |
95 | ); | 95 | ); |
96 | 96 | ||
97 | DEFINE_EVENT(fence, fence_enable_signal, | 97 | DEFINE_EVENT(dma_fence, dma_fence_enable_signal, |
98 | 98 | ||
99 | TP_PROTO(struct fence *fence), | 99 | TP_PROTO(struct dma_fence *fence), |
100 | 100 | ||
101 | TP_ARGS(fence) | 101 | TP_ARGS(fence) |
102 | ); | 102 | ); |
103 | 103 | ||
104 | DEFINE_EVENT(fence, fence_signaled, | 104 | DEFINE_EVENT(dma_fence, dma_fence_signaled, |
105 | 105 | ||
106 | TP_PROTO(struct fence *fence), | 106 | TP_PROTO(struct dma_fence *fence), |
107 | 107 | ||
108 | TP_ARGS(fence) | 108 | TP_ARGS(fence) |
109 | ); | 109 | ); |
110 | 110 | ||
111 | DEFINE_EVENT(fence, fence_wait_start, | 111 | DEFINE_EVENT(dma_fence, dma_fence_wait_start, |
112 | 112 | ||
113 | TP_PROTO(struct fence *fence), | 113 | TP_PROTO(struct dma_fence *fence), |
114 | 114 | ||
115 | TP_ARGS(fence) | 115 | TP_ARGS(fence) |
116 | ); | 116 | ); |
117 | 117 | ||
118 | DEFINE_EVENT(fence, fence_wait_end, | 118 | DEFINE_EVENT(dma_fence, dma_fence_wait_end, |
119 | 119 | ||
120 | TP_PROTO(struct fence *fence), | 120 | TP_PROTO(struct dma_fence *fence), |
121 | 121 | ||
122 | TP_ARGS(fence) | 122 | TP_ARGS(fence) |
123 | ); | 123 | ); |
124 | 124 | ||
125 | #endif /* _TRACE_FENCE_H */ | 125 | #endif /* _TRACE_DMA_FENCE_H */ |
126 | 126 | ||
127 | /* This part must be outside protection */ | 127 | /* This part must be outside protection */ |
128 | #include <trace/define_trace.h> | 128 | #include <trace/define_trace.h> |
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index d6b5a21f3d3c..4684f378f046 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h | |||
@@ -81,6 +81,8 @@ extern "C" { | |||
81 | #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) | 81 | #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) |
82 | /* Flag that create shadow bo(GTT) while allocating vram bo */ | 82 | /* Flag that create shadow bo(GTT) while allocating vram bo */ |
83 | #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) | 83 | #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) |
84 | /* Flag that allocating the BO should use linear VRAM */ | ||
85 | #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) | ||
84 | 86 | ||
85 | struct drm_amdgpu_gem_create_in { | 87 | struct drm_amdgpu_gem_create_in { |
86 | /** the requested memory size */ | 88 | /** the requested memory size */ |
@@ -436,6 +438,7 @@ struct drm_amdgpu_cs_chunk_data { | |||
436 | * | 438 | * |
437 | */ | 439 | */ |
438 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 | 440 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 |
441 | #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 | ||
439 | 442 | ||
440 | /* indicate if acceleration can be working */ | 443 | /* indicate if acceleration can be working */ |
441 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 | 444 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 |
@@ -487,6 +490,10 @@ struct drm_amdgpu_cs_chunk_data { | |||
487 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 | 490 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 |
488 | /* number of TTM buffer evictions */ | 491 | /* number of TTM buffer evictions */ |
489 | #define AMDGPU_INFO_NUM_EVICTIONS 0x18 | 492 | #define AMDGPU_INFO_NUM_EVICTIONS 0x18 |
493 | /* Query memory about VRAM and GTT domains */ | ||
494 | #define AMDGPU_INFO_MEMORY 0x19 | ||
495 | /* Query vce clock table */ | ||
496 | #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A | ||
490 | 497 | ||
491 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 | 498 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 |
492 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff | 499 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff |
@@ -572,6 +579,34 @@ struct drm_amdgpu_info_vram_gtt { | |||
572 | __u64 gtt_size; | 579 | __u64 gtt_size; |
573 | }; | 580 | }; |
574 | 581 | ||
582 | struct drm_amdgpu_heap_info { | ||
583 | /** max. physical memory */ | ||
584 | __u64 total_heap_size; | ||
585 | |||
586 | /** Theoretical max. available memory in the given heap */ | ||
587 | __u64 usable_heap_size; | ||
588 | |||
589 | /** | ||
590 | * Number of bytes allocated in the heap. This includes all processes | ||
591 | * and private allocations in the kernel. It changes when new buffers | ||
592 | * are allocated, freed, and moved. It cannot be larger than | ||
593 | * heap_size. | ||
594 | */ | ||
595 | __u64 heap_usage; | ||
596 | |||
597 | /** | ||
598 | * Theoretical possible max. size of buffer which | ||
599 | * could be allocated in the given heap | ||
600 | */ | ||
601 | __u64 max_allocation; | ||
602 | }; | ||
603 | |||
604 | struct drm_amdgpu_memory_info { | ||
605 | struct drm_amdgpu_heap_info vram; | ||
606 | struct drm_amdgpu_heap_info cpu_accessible_vram; | ||
607 | struct drm_amdgpu_heap_info gtt; | ||
608 | }; | ||
609 | |||
575 | struct drm_amdgpu_info_firmware { | 610 | struct drm_amdgpu_info_firmware { |
576 | __u32 ver; | 611 | __u32 ver; |
577 | __u32 feature; | 612 | __u32 feature; |
@@ -645,6 +680,24 @@ struct drm_amdgpu_info_hw_ip { | |||
645 | __u32 _pad; | 680 | __u32 _pad; |
646 | }; | 681 | }; |
647 | 682 | ||
683 | #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 | ||
684 | |||
685 | struct drm_amdgpu_info_vce_clock_table_entry { | ||
686 | /** System clock */ | ||
687 | __u32 sclk; | ||
688 | /** Memory clock */ | ||
689 | __u32 mclk; | ||
690 | /** VCE clock */ | ||
691 | __u32 eclk; | ||
692 | __u32 pad; | ||
693 | }; | ||
694 | |||
695 | struct drm_amdgpu_info_vce_clock_table { | ||
696 | struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; | ||
697 | __u32 num_valid_entries; | ||
698 | __u32 pad; | ||
699 | }; | ||
700 | |||
648 | /* | 701 | /* |
649 | * Supported GPU families | 702 | * Supported GPU families |
650 | */ | 703 | */ |