diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 21:12:22 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 21:12:22 -0400 |
| commit | 6b25e21fa6f26d0f0d45f161d169029411c84286 (patch) | |
| tree | fdff805ecd81ec46951f49577efe450ddb7d060a /drivers/dma-buf | |
| parent | a379f71a30dddbd2e7393624e455ce53c87965d1 (diff) | |
| parent | 69405d3da98b48633b78a49403e4f9cdb7c6a0f5 (diff) | |
Merge tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Core:
- Fence destaging work
- DRIVER_LEGACY to split off legacy drm drivers
- drm_mm refactoring
- Splitting drm_crtc.c into chunks and documenting better
- Display info fixes
- rbtree support for prime buffer lookup
- Simple VGA DAC driver
Panel:
- Add Nexus 7 panel
- More simple panels
i915:
- Refactoring GEM naming
- Refactored vma/active tracking
- Lockless request lookups
- Better stolen memory support
- FBC fixes
- SKL watermark fixes
- VGPU improvements
- dma-buf fencing support
- Better DP dongle support
amdgpu:
- Powerplay for Iceland asics
- Improved GPU reset support
- UVD/VEC powergating support for CZ/ST
- Preinitialised VRAM buffer support
- Virtual display support
- Initial SI support
- GTT rework
- PCI shutdown callback support
- HPD IRQ storm fixes
amdkfd:
- bugfixes
tilcdc:
- Atomic modesetting support
mediatek:
- AAL + GAMMA engine support
- Hook up gamma LUT
- Temporal dithering support
imx:
- Pixel clock from devicetree
- drm bridge support for LVDS bridges
- active plane reconfiguration
- VDIC deinterlacer support
- Frame synchronisation unit support
- Color space conversion support
analogix:
- PSR support
- Better panel on/off support
rockchip:
- rk3399 vop/crtc support
- PSR support
vc4:
- Interlaced vblank timing
- 3D rendering CPU overhead reduction
- HDMI output fixes
tda998x:
- HDMI audio ASoC support
sunxi:
- Allwinner A33 support
- better TCON support
msm:
- DT binding cleanups
- Explicit fence-fd support
sti:
- remove sti415/416 support
etnaviv:
- MMUv2 refactoring
- GC3000 support
exynos:
- Refactoring HDMI DCC/PHY
- G2D pm regression fix
- Page fault issues with wait for vblank
There is no nouveau work in this tree, as Ben didn't get a pull
request in, and he was fighting moving to atomic and adding mst
support, so maybe best it waits for a cycle"
* tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux: (1412 commits)
drm/crtc: constify drm_crtc_index parameter
drm/i915: Fix conflict resolution from backmerge of v4.8-rc8 to drm-next
drm/i915/guc: Unwind GuC workqueue reservation if request construction fails
drm/i915: Reset the breadcrumbs IRQ more carefully
drm/i915: Force relocations via cpu if we run out of idle aperture
drm/i915: Distinguish last emitted request from last submitted request
drm/i915: Allow DP to work w/o EDID
drm/i915: Move long hpd handling into the hotplug work
drm/i915/execlists: Reinitialise context image after GPU hang
drm/i915: Use correct index for backtracking HUNG semaphores
drm/i915: Unalias obj->phys_handle and obj->userptr
drm/i915: Just clear the mmiodebug before a register access
drm/i915/gen9: only add the planes actually affected by ddb changes
drm/i915: Allow PCH DPLL sharing regardless of DPLL_SDVO_HIGH_SPEED
drm/i915/bxt: Fix HDMI DPLL configuration
drm/i915/gen9: fix the watermark res_blocks value
drm/i915/gen9: fix plane_blocks_per_line on watermarks calculations
drm/i915/gen9: minimum scanlines for Y tile is not always 4
drm/i915/gen9: fix the WaWmMemoryReadLatency implementation
drm/i915/kbl: KBL also needs to run the SAGV code
...
Diffstat (limited to 'drivers/dma-buf')
| -rw-r--r-- | drivers/dma-buf/dma-buf.c | 23 | ||||
| -rw-r--r-- | drivers/dma-buf/fence-array.c | 7 | ||||
| -rw-r--r-- | drivers/dma-buf/reservation.c | 2 | ||||
| -rw-r--r-- | drivers/dma-buf/sync_debug.c | 12 | ||||
| -rw-r--r-- | drivers/dma-buf/sync_file.c | 204 |
5 files changed, 176 insertions, 72 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ddaee60ae52a..cf04d249a6a4 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
| @@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | |||
| 586 | } | 586 | } |
| 587 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); | 587 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); |
| 588 | 588 | ||
| 589 | static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, | ||
| 590 | enum dma_data_direction direction) | ||
| 591 | { | ||
| 592 | bool write = (direction == DMA_BIDIRECTIONAL || | ||
| 593 | direction == DMA_TO_DEVICE); | ||
| 594 | struct reservation_object *resv = dmabuf->resv; | ||
| 595 | long ret; | ||
| 596 | |||
| 597 | /* Wait on any implicit rendering fences */ | ||
| 598 | ret = reservation_object_wait_timeout_rcu(resv, write, true, | ||
| 599 | MAX_SCHEDULE_TIMEOUT); | ||
| 600 | if (ret < 0) | ||
| 601 | return ret; | ||
| 602 | |||
| 603 | return 0; | ||
| 604 | } | ||
| 589 | 605 | ||
| 590 | /** | 606 | /** |
| 591 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | 607 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the |
| @@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, | |||
| 608 | if (dmabuf->ops->begin_cpu_access) | 624 | if (dmabuf->ops->begin_cpu_access) |
| 609 | ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); | 625 | ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); |
| 610 | 626 | ||
| 627 | /* Ensure that all fences are waited upon - but we first allow | ||
| 628 | * the native handler the chance to do so more efficiently if it | ||
| 629 | * chooses. A double invocation here will be reasonably cheap no-op. | ||
| 630 | */ | ||
| 631 | if (ret == 0) | ||
| 632 | ret = __dma_buf_begin_cpu_access(dmabuf, direction); | ||
| 633 | |||
| 611 | return ret; | 634 | return ret; |
| 612 | } | 635 | } |
| 613 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); | 636 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); |
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c index a8731c853da6..f1989fcaf354 100644 --- a/drivers/dma-buf/fence-array.c +++ b/drivers/dma-buf/fence-array.c | |||
| @@ -99,6 +99,7 @@ const struct fence_ops fence_array_ops = { | |||
| 99 | .wait = fence_default_wait, | 99 | .wait = fence_default_wait, |
| 100 | .release = fence_array_release, | 100 | .release = fence_array_release, |
| 101 | }; | 101 | }; |
| 102 | EXPORT_SYMBOL(fence_array_ops); | ||
| 102 | 103 | ||
| 103 | /** | 104 | /** |
| 104 | * fence_array_create - Create a custom fence array | 105 | * fence_array_create - Create a custom fence array |
| @@ -106,14 +107,14 @@ const struct fence_ops fence_array_ops = { | |||
| 106 | * @fences: [in] array containing the fences | 107 | * @fences: [in] array containing the fences |
| 107 | * @context: [in] fence context to use | 108 | * @context: [in] fence context to use |
| 108 | * @seqno: [in] sequence number to use | 109 | * @seqno: [in] sequence number to use |
| 109 | * @signal_on_any [in] signal on any fence in the array | 110 | * @signal_on_any: [in] signal on any fence in the array |
| 110 | * | 111 | * |
| 111 | * Allocate a fence_array object and initialize the base fence with fence_init(). | 112 | * Allocate a fence_array object and initialize the base fence with fence_init(). |
| 112 | * In case of error it returns NULL. | 113 | * In case of error it returns NULL. |
| 113 | * | 114 | * |
| 114 | * The caller should allocte the fences array with num_fences size | 115 | * The caller should allocate the fences array with num_fences size |
| 115 | * and fill it with the fences it wants to add to the object. Ownership of this | 116 | * and fill it with the fences it wants to add to the object. Ownership of this |
| 116 | * array is take and fence_put() is used on each fence on release. | 117 | * array is taken and fence_put() is used on each fence on release. |
| 117 | * | 118 | * |
| 118 | * If @signal_on_any is true the fence array signals if any fence in the array | 119 | * If @signal_on_any is true the fence array signals if any fence in the array |
| 119 | * signals, otherwise it signals when all fences in the array signal. | 120 | * signals, otherwise it signals when all fences in the array signal. |
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 9566a62ad8e3..723d8af988e5 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
| @@ -205,7 +205,7 @@ done: | |||
| 205 | * @fence: the shared fence to add | 205 | * @fence: the shared fence to add |
| 206 | * | 206 | * |
| 207 | * Add a fence to a shared slot, obj->lock must be held, and | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
| 208 | * reservation_object_reserve_shared_fence has been called. | 208 | * reservation_object_reserve_shared() has been called. |
| 209 | */ | 209 | */ |
| 210 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 210 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
| 211 | struct fence *fence) | 211 | struct fence *fence) |
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index fab95204cf74..2dd4c3db6caa 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c | |||
| @@ -135,10 +135,16 @@ static void sync_print_sync_file(struct seq_file *s, | |||
| 135 | int i; | 135 | int i; |
| 136 | 136 | ||
| 137 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, | 137 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, |
| 138 | sync_status_str(atomic_read(&sync_file->status))); | 138 | sync_status_str(!fence_is_signaled(sync_file->fence))); |
| 139 | 139 | ||
| 140 | for (i = 0; i < sync_file->num_fences; ++i) | 140 | if (fence_is_array(sync_file->fence)) { |
| 141 | sync_print_fence(s, sync_file->cbs[i].fence, true); | 141 | struct fence_array *array = to_fence_array(sync_file->fence); |
| 142 | |||
| 143 | for (i = 0; i < array->num_fences; ++i) | ||
| 144 | sync_print_fence(s, array->fences[i], true); | ||
| 145 | } else { | ||
| 146 | sync_print_fence(s, sync_file->fence, true); | ||
| 147 | } | ||
| 142 | } | 148 | } |
| 143 | 149 | ||
| 144 | static int sync_debugfs_show(struct seq_file *s, void *unused) | 150 | static int sync_debugfs_show(struct seq_file *s, void *unused) |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 9aaa608dfe01..b29a9e817320 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
| @@ -28,11 +28,11 @@ | |||
| 28 | 28 | ||
| 29 | static const struct file_operations sync_file_fops; | 29 | static const struct file_operations sync_file_fops; |
| 30 | 30 | ||
| 31 | static struct sync_file *sync_file_alloc(int size) | 31 | static struct sync_file *sync_file_alloc(void) |
| 32 | { | 32 | { |
| 33 | struct sync_file *sync_file; | 33 | struct sync_file *sync_file; |
| 34 | 34 | ||
| 35 | sync_file = kzalloc(size, GFP_KERNEL); | 35 | sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL); |
| 36 | if (!sync_file) | 36 | if (!sync_file) |
| 37 | return NULL; | 37 | return NULL; |
| 38 | 38 | ||
| @@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size) | |||
| 45 | 45 | ||
| 46 | init_waitqueue_head(&sync_file->wq); | 46 | init_waitqueue_head(&sync_file->wq); |
| 47 | 47 | ||
| 48 | INIT_LIST_HEAD(&sync_file->cb.node); | ||
| 49 | |||
| 48 | return sync_file; | 50 | return sync_file; |
| 49 | 51 | ||
| 50 | err: | 52 | err: |
| @@ -54,14 +56,11 @@ err: | |||
| 54 | 56 | ||
| 55 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) | 57 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) |
| 56 | { | 58 | { |
| 57 | struct sync_file_cb *check; | ||
| 58 | struct sync_file *sync_file; | 59 | struct sync_file *sync_file; |
| 59 | 60 | ||
| 60 | check = container_of(cb, struct sync_file_cb, cb); | 61 | sync_file = container_of(cb, struct sync_file, cb); |
| 61 | sync_file = check->sync_file; | ||
| 62 | 62 | ||
| 63 | if (atomic_dec_and_test(&sync_file->status)) | 63 | wake_up_all(&sync_file->wq); |
| 64 | wake_up_all(&sync_file->wq); | ||
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | /** | 66 | /** |
| @@ -76,23 +75,17 @@ struct sync_file *sync_file_create(struct fence *fence) | |||
| 76 | { | 75 | { |
| 77 | struct sync_file *sync_file; | 76 | struct sync_file *sync_file; |
| 78 | 77 | ||
| 79 | sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1])); | 78 | sync_file = sync_file_alloc(); |
| 80 | if (!sync_file) | 79 | if (!sync_file) |
| 81 | return NULL; | 80 | return NULL; |
| 82 | 81 | ||
| 83 | sync_file->num_fences = 1; | 82 | sync_file->fence = fence; |
| 84 | atomic_set(&sync_file->status, 1); | 83 | |
| 85 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", | 84 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", |
| 86 | fence->ops->get_driver_name(fence), | 85 | fence->ops->get_driver_name(fence), |
| 87 | fence->ops->get_timeline_name(fence), fence->context, | 86 | fence->ops->get_timeline_name(fence), fence->context, |
| 88 | fence->seqno); | 87 | fence->seqno); |
| 89 | 88 | ||
| 90 | sync_file->cbs[0].fence = fence; | ||
| 91 | sync_file->cbs[0].sync_file = sync_file; | ||
| 92 | if (fence_add_callback(fence, &sync_file->cbs[0].cb, | ||
| 93 | fence_check_cb_func)) | ||
| 94 | atomic_dec(&sync_file->status); | ||
| 95 | |||
| 96 | return sync_file; | 89 | return sync_file; |
| 97 | } | 90 | } |
| 98 | EXPORT_SYMBOL(sync_file_create); | 91 | EXPORT_SYMBOL(sync_file_create); |
| @@ -121,14 +114,73 @@ err: | |||
| 121 | return NULL; | 114 | return NULL; |
| 122 | } | 115 | } |
| 123 | 116 | ||
| 124 | static void sync_file_add_pt(struct sync_file *sync_file, int *i, | 117 | /** |
| 125 | struct fence *fence) | 118 | * sync_file_get_fence - get the fence related to the sync_file fd |
| 119 | * @fd: sync_file fd to get the fence from | ||
| 120 | * | ||
| 121 | * Ensures @fd references a valid sync_file and returns a fence that | ||
| 122 | * represents all fence in the sync_file. On error NULL is returned. | ||
| 123 | */ | ||
| 124 | struct fence *sync_file_get_fence(int fd) | ||
| 125 | { | ||
| 126 | struct sync_file *sync_file; | ||
| 127 | struct fence *fence; | ||
| 128 | |||
| 129 | sync_file = sync_file_fdget(fd); | ||
| 130 | if (!sync_file) | ||
| 131 | return NULL; | ||
| 132 | |||
| 133 | fence = fence_get(sync_file->fence); | ||
| 134 | fput(sync_file->file); | ||
| 135 | |||
| 136 | return fence; | ||
| 137 | } | ||
| 138 | EXPORT_SYMBOL(sync_file_get_fence); | ||
| 139 | |||
| 140 | static int sync_file_set_fence(struct sync_file *sync_file, | ||
| 141 | struct fence **fences, int num_fences) | ||
| 142 | { | ||
| 143 | struct fence_array *array; | ||
| 144 | |||
| 145 | /* | ||
| 146 | * The reference for the fences in the new sync_file and held | ||
| 147 | * in add_fence() during the merge procedure, so for num_fences == 1 | ||
| 148 | * we already own a new reference to the fence. For num_fence > 1 | ||
| 149 | * we own the reference of the fence_array creation. | ||
| 150 | */ | ||
| 151 | if (num_fences == 1) { | ||
| 152 | sync_file->fence = fences[0]; | ||
| 153 | kfree(fences); | ||
| 154 | } else { | ||
| 155 | array = fence_array_create(num_fences, fences, | ||
| 156 | fence_context_alloc(1), 1, false); | ||
| 157 | if (!array) | ||
| 158 | return -ENOMEM; | ||
| 159 | |||
| 160 | sync_file->fence = &array->base; | ||
| 161 | } | ||
| 162 | |||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) | ||
| 167 | { | ||
| 168 | if (fence_is_array(sync_file->fence)) { | ||
| 169 | struct fence_array *array = to_fence_array(sync_file->fence); | ||
| 170 | |||
| 171 | *num_fences = array->num_fences; | ||
| 172 | return array->fences; | ||
| 173 | } | ||
| 174 | |||
| 175 | *num_fences = 1; | ||
| 176 | return &sync_file->fence; | ||
| 177 | } | ||
| 178 | |||
| 179 | static void add_fence(struct fence **fences, int *i, struct fence *fence) | ||
| 126 | { | 180 | { |
| 127 | sync_file->cbs[*i].fence = fence; | 181 | fences[*i] = fence; |
| 128 | sync_file->cbs[*i].sync_file = sync_file; | ||
| 129 | 182 | ||
| 130 | if (!fence_add_callback(fence, &sync_file->cbs[*i].cb, | 183 | if (!fence_is_signaled(fence)) { |
| 131 | fence_check_cb_func)) { | ||
| 132 | fence_get(fence); | 184 | fence_get(fence); |
| 133 | (*i)++; | 185 | (*i)++; |
| 134 | } | 186 | } |
| @@ -147,16 +199,24 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i, | |||
| 147 | static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | 199 | static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, |
| 148 | struct sync_file *b) | 200 | struct sync_file *b) |
| 149 | { | 201 | { |
| 150 | int num_fences = a->num_fences + b->num_fences; | ||
| 151 | struct sync_file *sync_file; | 202 | struct sync_file *sync_file; |
| 152 | int i, i_a, i_b; | 203 | struct fence **fences, **nfences, **a_fences, **b_fences; |
| 153 | unsigned long size = offsetof(struct sync_file, cbs[num_fences]); | 204 | int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; |
| 154 | 205 | ||
| 155 | sync_file = sync_file_alloc(size); | 206 | sync_file = sync_file_alloc(); |
| 156 | if (!sync_file) | 207 | if (!sync_file) |
| 157 | return NULL; | 208 | return NULL; |
| 158 | 209 | ||
| 159 | atomic_set(&sync_file->status, num_fences); | 210 | a_fences = get_fences(a, &a_num_fences); |
| 211 | b_fences = get_fences(b, &b_num_fences); | ||
| 212 | if (a_num_fences > INT_MAX - b_num_fences) | ||
| 213 | return NULL; | ||
| 214 | |||
| 215 | num_fences = a_num_fences + b_num_fences; | ||
| 216 | |||
| 217 | fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); | ||
| 218 | if (!fences) | ||
| 219 | goto err; | ||
| 160 | 220 | ||
| 161 | /* | 221 | /* |
| 162 | * Assume sync_file a and b are both ordered and have no | 222 | * Assume sync_file a and b are both ordered and have no |
| @@ -165,55 +225,69 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, | |||
| 165 | * If a sync_file can only be created with sync_file_merge | 225 | * If a sync_file can only be created with sync_file_merge |
| 166 | * and sync_file_create, this is a reasonable assumption. | 226 | * and sync_file_create, this is a reasonable assumption. |
| 167 | */ | 227 | */ |
| 168 | for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { | 228 | for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { |
| 169 | struct fence *pt_a = a->cbs[i_a].fence; | 229 | struct fence *pt_a = a_fences[i_a]; |
| 170 | struct fence *pt_b = b->cbs[i_b].fence; | 230 | struct fence *pt_b = b_fences[i_b]; |
| 171 | 231 | ||
| 172 | if (pt_a->context < pt_b->context) { | 232 | if (pt_a->context < pt_b->context) { |
| 173 | sync_file_add_pt(sync_file, &i, pt_a); | 233 | add_fence(fences, &i, pt_a); |
| 174 | 234 | ||
| 175 | i_a++; | 235 | i_a++; |
| 176 | } else if (pt_a->context > pt_b->context) { | 236 | } else if (pt_a->context > pt_b->context) { |
| 177 | sync_file_add_pt(sync_file, &i, pt_b); | 237 | add_fence(fences, &i, pt_b); |
| 178 | 238 | ||
| 179 | i_b++; | 239 | i_b++; |
| 180 | } else { | 240 | } else { |
| 181 | if (pt_a->seqno - pt_b->seqno <= INT_MAX) | 241 | if (pt_a->seqno - pt_b->seqno <= INT_MAX) |
| 182 | sync_file_add_pt(sync_file, &i, pt_a); | 242 | add_fence(fences, &i, pt_a); |
| 183 | else | 243 | else |
| 184 | sync_file_add_pt(sync_file, &i, pt_b); | 244 | add_fence(fences, &i, pt_b); |
| 185 | 245 | ||
| 186 | i_a++; | 246 | i_a++; |
| 187 | i_b++; | 247 | i_b++; |
| 188 | } | 248 | } |
| 189 | } | 249 | } |
| 190 | 250 | ||
| 191 | for (; i_a < a->num_fences; i_a++) | 251 | for (; i_a < a_num_fences; i_a++) |
| 192 | sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence); | 252 | add_fence(fences, &i, a_fences[i_a]); |
| 253 | |||
| 254 | for (; i_b < b_num_fences; i_b++) | ||
| 255 | add_fence(fences, &i, b_fences[i_b]); | ||
| 193 | 256 | ||
| 194 | for (; i_b < b->num_fences; i_b++) | 257 | if (i == 0) |
| 195 | sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence); | 258 | fences[i++] = fence_get(a_fences[0]); |
| 196 | 259 | ||
| 197 | if (num_fences > i) | 260 | if (num_fences > i) { |
| 198 | atomic_sub(num_fences - i, &sync_file->status); | 261 | nfences = krealloc(fences, i * sizeof(*fences), |
| 199 | sync_file->num_fences = i; | 262 | GFP_KERNEL); |
| 263 | if (!nfences) | ||
| 264 | goto err; | ||
| 265 | |||
| 266 | fences = nfences; | ||
| 267 | } | ||
| 268 | |||
| 269 | if (sync_file_set_fence(sync_file, fences, i) < 0) { | ||
| 270 | kfree(fences); | ||
| 271 | goto err; | ||
| 272 | } | ||
| 200 | 273 | ||
| 201 | strlcpy(sync_file->name, name, sizeof(sync_file->name)); | 274 | strlcpy(sync_file->name, name, sizeof(sync_file->name)); |
| 202 | return sync_file; | 275 | return sync_file; |
| 276 | |||
| 277 | err: | ||
| 278 | fput(sync_file->file); | ||
| 279 | return NULL; | ||
| 280 | |||
| 203 | } | 281 | } |
| 204 | 282 | ||
| 205 | static void sync_file_free(struct kref *kref) | 283 | static void sync_file_free(struct kref *kref) |
| 206 | { | 284 | { |
| 207 | struct sync_file *sync_file = container_of(kref, struct sync_file, | 285 | struct sync_file *sync_file = container_of(kref, struct sync_file, |
| 208 | kref); | 286 | kref); |
| 209 | int i; | ||
| 210 | |||
| 211 | for (i = 0; i < sync_file->num_fences; ++i) { | ||
| 212 | fence_remove_callback(sync_file->cbs[i].fence, | ||
| 213 | &sync_file->cbs[i].cb); | ||
| 214 | fence_put(sync_file->cbs[i].fence); | ||
| 215 | } | ||
| 216 | 287 | ||
| 288 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) | ||
| 289 | fence_remove_callback(sync_file->fence, &sync_file->cb); | ||
| 290 | fence_put(sync_file->fence); | ||
| 217 | kfree(sync_file); | 291 | kfree(sync_file); |
| 218 | } | 292 | } |
| 219 | 293 | ||
| @@ -228,17 +302,17 @@ static int sync_file_release(struct inode *inode, struct file *file) | |||
| 228 | static unsigned int sync_file_poll(struct file *file, poll_table *wait) | 302 | static unsigned int sync_file_poll(struct file *file, poll_table *wait) |
| 229 | { | 303 | { |
| 230 | struct sync_file *sync_file = file->private_data; | 304 | struct sync_file *sync_file = file->private_data; |
| 231 | int status; | ||
| 232 | 305 | ||
| 233 | poll_wait(file, &sync_file->wq, wait); | 306 | poll_wait(file, &sync_file->wq, wait); |
| 234 | 307 | ||
| 235 | status = atomic_read(&sync_file->status); | 308 | if (!poll_does_not_wait(wait) && |
| 309 | !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { | ||
| 310 | if (fence_add_callback(sync_file->fence, &sync_file->cb, | ||
| 311 | fence_check_cb_func) < 0) | ||
| 312 | wake_up_all(&sync_file->wq); | ||
| 313 | } | ||
| 236 | 314 | ||
| 237 | if (!status) | 315 | return fence_is_signaled(sync_file->fence) ? POLLIN : 0; |
| 238 | return POLLIN; | ||
| 239 | if (status < 0) | ||
| 240 | return POLLERR; | ||
| 241 | return 0; | ||
| 242 | } | 316 | } |
| 243 | 317 | ||
| 244 | static long sync_file_ioctl_merge(struct sync_file *sync_file, | 318 | static long sync_file_ioctl_merge(struct sync_file *sync_file, |
| @@ -315,8 +389,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 315 | { | 389 | { |
| 316 | struct sync_file_info info; | 390 | struct sync_file_info info; |
| 317 | struct sync_fence_info *fence_info = NULL; | 391 | struct sync_fence_info *fence_info = NULL; |
| 392 | struct fence **fences; | ||
| 318 | __u32 size; | 393 | __u32 size; |
| 319 | int ret, i; | 394 | int num_fences, ret, i; |
| 320 | 395 | ||
| 321 | if (copy_from_user(&info, (void __user *)arg, sizeof(info))) | 396 | if (copy_from_user(&info, (void __user *)arg, sizeof(info))) |
| 322 | return -EFAULT; | 397 | return -EFAULT; |
| @@ -324,6 +399,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 324 | if (info.flags || info.pad) | 399 | if (info.flags || info.pad) |
| 325 | return -EINVAL; | 400 | return -EINVAL; |
| 326 | 401 | ||
| 402 | fences = get_fences(sync_file, &num_fences); | ||
| 403 | |||
| 327 | /* | 404 | /* |
| 328 | * Passing num_fences = 0 means that userspace doesn't want to | 405 | * Passing num_fences = 0 means that userspace doesn't want to |
| 329 | * retrieve any sync_fence_info. If num_fences = 0 we skip filling | 406 | * retrieve any sync_fence_info. If num_fences = 0 we skip filling |
| @@ -333,16 +410,16 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 333 | if (!info.num_fences) | 410 | if (!info.num_fences) |
| 334 | goto no_fences; | 411 | goto no_fences; |
| 335 | 412 | ||
| 336 | if (info.num_fences < sync_file->num_fences) | 413 | if (info.num_fences < num_fences) |
| 337 | return -EINVAL; | 414 | return -EINVAL; |
| 338 | 415 | ||
| 339 | size = sync_file->num_fences * sizeof(*fence_info); | 416 | size = num_fences * sizeof(*fence_info); |
| 340 | fence_info = kzalloc(size, GFP_KERNEL); | 417 | fence_info = kzalloc(size, GFP_KERNEL); |
| 341 | if (!fence_info) | 418 | if (!fence_info) |
| 342 | return -ENOMEM; | 419 | return -ENOMEM; |
| 343 | 420 | ||
| 344 | for (i = 0; i < sync_file->num_fences; ++i) | 421 | for (i = 0; i < num_fences; i++) |
| 345 | sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]); | 422 | sync_fill_fence_info(fences[i], &fence_info[i]); |
| 346 | 423 | ||
| 347 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, | 424 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, |
| 348 | size)) { | 425 | size)) { |
| @@ -352,11 +429,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 352 | 429 | ||
| 353 | no_fences: | 430 | no_fences: |
| 354 | strlcpy(info.name, sync_file->name, sizeof(info.name)); | 431 | strlcpy(info.name, sync_file->name, sizeof(info.name)); |
| 355 | info.status = atomic_read(&sync_file->status); | 432 | info.status = fence_is_signaled(sync_file->fence); |
| 356 | if (info.status >= 0) | 433 | info.num_fences = num_fences; |
| 357 | info.status = !info.status; | ||
| 358 | |||
| 359 | info.num_fences = sync_file->num_fences; | ||
| 360 | 434 | ||
| 361 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 435 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
| 362 | ret = -EFAULT; | 436 | ret = -EFAULT; |
