diff options
| author | Andrey Grodzovsky <Andrey.Grodzovsky@amd.com> | 2017-10-12 16:41:39 -0400 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 16:33:10 -0500 |
| commit | 27105db6c63a571b91d01e749d026105a1e63bcf (patch) | |
| tree | c1da4f3b9a28ec5ab48a796beb99c7dd1fb3e629 | |
| parent | a8a51a70416baab813606c6014c5f0746958dfb2 (diff) | |
drm/amdgpu: Add SPSC queue to scheduler.
It is intended to sabstitute the bounded fifo we are currently
using.
Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| -rw-r--r-- | drivers/gpu/drm/amd/scheduler/spsc_queue.h | 121 |
1 files changed, 121 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h new file mode 100644 index 000000000000..5902f35ce759 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/spsc_queue.h | |||
| @@ -0,0 +1,121 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2017 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef AMD_SCHEDULER_SPSC_QUEUE_H_ | ||
| 25 | #define AMD_SCHEDULER_SPSC_QUEUE_H_ | ||
| 26 | |||
| 27 | #include <linux/atomic.h> | ||
| 28 | |||
| 29 | /** SPSC lockless queue */ | ||
| 30 | |||
| 31 | struct spsc_node { | ||
| 32 | |||
| 33 | /* Stores spsc_node* */ | ||
| 34 | struct spsc_node *next; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct spsc_queue { | ||
| 38 | |||
| 39 | struct spsc_node *head; | ||
| 40 | |||
| 41 | /* atomic pointer to struct spsc_node* */ | ||
| 42 | atomic_long_t tail; | ||
| 43 | |||
| 44 | atomic_t job_count; | ||
| 45 | }; | ||
| 46 | |||
| 47 | static inline void spsc_queue_init(struct spsc_queue *queue) | ||
| 48 | { | ||
| 49 | queue->head = NULL; | ||
| 50 | atomic_long_set(&queue->tail, (long)&queue->head); | ||
| 51 | atomic_set(&queue->job_count, 0); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) | ||
| 55 | { | ||
| 56 | return queue->head; | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline int spsc_queue_count(struct spsc_queue *queue) | ||
| 60 | { | ||
| 61 | return atomic_read(&queue->job_count); | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) | ||
| 65 | { | ||
| 66 | struct spsc_node **tail; | ||
| 67 | |||
| 68 | node->next = NULL; | ||
| 69 | |||
| 70 | preempt_disable(); | ||
| 71 | |||
| 72 | tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); | ||
| 73 | WRITE_ONCE(*tail, node); | ||
| 74 | atomic_inc(&queue->job_count); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * In case of first element verify new node will be visible to the consumer | ||
| 78 | * thread when we ping the kernel thread that there is new work to do. | ||
| 79 | */ | ||
| 80 | smp_wmb(); | ||
| 81 | |||
| 82 | preempt_enable(); | ||
| 83 | |||
| 84 | return tail == &queue->head; | ||
| 85 | } | ||
| 86 | |||
| 87 | |||
| 88 | static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) | ||
| 89 | { | ||
| 90 | struct spsc_node *next, *node; | ||
| 91 | |||
| 92 | /* Verify reading from memory and not the cache */ | ||
| 93 | smp_rmb(); | ||
| 94 | |||
| 95 | node = READ_ONCE(queue->head); | ||
| 96 | |||
| 97 | if (!node) | ||
| 98 | return NULL; | ||
| 99 | |||
| 100 | next = READ_ONCE(node->next); | ||
| 101 | WRITE_ONCE(queue->head, next); | ||
| 102 | |||
| 103 | if (unlikely(!next)) { | ||
| 104 | /* slowpath for the last element in the queue */ | ||
| 105 | |||
| 106 | if (atomic_long_cmpxchg(&queue->tail, | ||
| 107 | (long)&node->next, (long) &queue->head) != (long)&node->next) { | ||
| 108 | /* Updating tail failed wait for new next to appear */ | ||
| 109 | do { | ||
| 110 | smp_rmb(); | ||
| 111 | } while (unlikely(!(queue->head = READ_ONCE(node->next)))); | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | atomic_dec(&queue->job_count); | ||
| 116 | return node; | ||
| 117 | } | ||
| 118 | |||
| 119 | |||
| 120 | |||
| 121 | #endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */ | ||
