diff options
Diffstat (limited to 'drivers/gpu')
55 files changed, 2045 insertions, 2102 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a6771cef85e2..bc6a16a3c36e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -18,7 +18,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
| 18 | drm_encoder.o drm_mode_object.o drm_property.o \ | 18 | drm_encoder.o drm_mode_object.o drm_property.o \ |
| 19 | drm_plane.o drm_color_mgmt.o drm_print.o \ | 19 | drm_plane.o drm_color_mgmt.o drm_print.o \ |
| 20 | drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ | 20 | drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ |
| 21 | drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o | 21 | drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ |
| 22 | drm_atomic_uapi.o | ||
| 22 | 23 | ||
| 23 | drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o | 24 | drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o |
| 24 | drm-$(CONFIG_DRM_VM) += drm_vm.o | 25 | drm-$(CONFIG_DRM_VM) += drm_vm.o |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b6e9df11115d..8f05e28607e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -1098,7 +1098,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, | |||
| 1098 | { | 1098 | { |
| 1099 | int r; | 1099 | int r; |
| 1100 | struct dma_fence *fence; | 1100 | struct dma_fence *fence; |
| 1101 | r = drm_syncobj_find_fence(p->filp, handle, &fence); | 1101 | r = drm_syncobj_find_fence(p->filp, handle, 0, &fence); |
| 1102 | if (r) | 1102 | if (r) |
| 1103 | return r; | 1103 | return r; |
| 1104 | 1104 | ||
| @@ -1187,7 +1187,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
| 1187 | int i; | 1187 | int i; |
| 1188 | 1188 | ||
| 1189 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) | 1189 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) |
| 1190 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); | 1190 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence); |
| 1191 | } | 1191 | } |
| 1192 | 1192 | ||
| 1193 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 1193 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index eb7dfb65ef47..8d770641fcc4 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | #include <drm/drmP.h> | 9 | #include <drm/drmP.h> |
| 10 | #include <drm/drm_atomic.h> | 10 | #include <drm/drm_atomic.h> |
| 11 | #include <drm/drm_atomic_uapi.h> | ||
| 11 | #include <drm/drm_atomic_helper.h> | 12 | #include <drm/drm_atomic_helper.h> |
| 12 | #include <drm/drm_plane_helper.h> | 13 | #include <drm/drm_plane_helper.h> |
| 13 | #include <drm/armada_drm.h> | 14 | #include <drm/armada_drm.h> |
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 375bf92cd04f..b4f6bb521900 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h | |||
| @@ -51,11 +51,6 @@ enum bochs_types { | |||
| 51 | BOCHS_UNKNOWN, | 51 | BOCHS_UNKNOWN, |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | struct bochs_framebuffer { | ||
| 55 | struct drm_framebuffer base; | ||
| 56 | struct drm_gem_object *obj; | ||
| 57 | }; | ||
| 58 | |||
| 59 | struct bochs_device { | 54 | struct bochs_device { |
| 60 | /* hw */ | 55 | /* hw */ |
| 61 | void __iomem *mmio; | 56 | void __iomem *mmio; |
| @@ -88,15 +83,11 @@ struct bochs_device { | |||
| 88 | 83 | ||
| 89 | /* fbdev */ | 84 | /* fbdev */ |
| 90 | struct { | 85 | struct { |
| 91 | struct bochs_framebuffer gfb; | 86 | struct drm_framebuffer *fb; |
| 92 | struct drm_fb_helper helper; | 87 | struct drm_fb_helper helper; |
| 93 | int size; | ||
| 94 | bool initialized; | ||
| 95 | } fb; | 88 | } fb; |
| 96 | }; | 89 | }; |
| 97 | 90 | ||
| 98 | #define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base) | ||
| 99 | |||
| 100 | struct bochs_bo { | 91 | struct bochs_bo { |
| 101 | struct ttm_buffer_object bo; | 92 | struct ttm_buffer_object bo; |
| 102 | struct ttm_placement placement; | 93 | struct ttm_placement placement; |
| @@ -126,7 +117,7 @@ static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo) | |||
| 126 | /* ---------------------------------------------------------------------- */ | 117 | /* ---------------------------------------------------------------------- */ |
| 127 | 118 | ||
| 128 | /* bochs_hw.c */ | 119 | /* bochs_hw.c */ |
| 129 | int bochs_hw_init(struct drm_device *dev, uint32_t flags); | 120 | int bochs_hw_init(struct drm_device *dev); |
| 130 | void bochs_hw_fini(struct drm_device *dev); | 121 | void bochs_hw_fini(struct drm_device *dev); |
| 131 | 122 | ||
| 132 | void bochs_hw_setmode(struct bochs_device *bochs, | 123 | void bochs_hw_setmode(struct bochs_device *bochs, |
| @@ -148,15 +139,9 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
| 148 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | 139 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, |
| 149 | uint32_t handle, uint64_t *offset); | 140 | uint32_t handle, uint64_t *offset); |
| 150 | 141 | ||
| 151 | int bochs_framebuffer_init(struct drm_device *dev, | ||
| 152 | struct bochs_framebuffer *gfb, | ||
| 153 | const struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 154 | struct drm_gem_object *obj); | ||
| 155 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); | 142 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); |
| 156 | int bochs_bo_unpin(struct bochs_bo *bo); | 143 | int bochs_bo_unpin(struct bochs_bo *bo); |
| 157 | 144 | ||
| 158 | extern const struct drm_mode_config_funcs bochs_mode_funcs; | ||
| 159 | |||
| 160 | /* bochs_kms.c */ | 145 | /* bochs_kms.c */ |
| 161 | int bochs_kms_init(struct bochs_device *bochs); | 146 | int bochs_kms_init(struct bochs_device *bochs); |
| 162 | void bochs_kms_fini(struct bochs_device *bochs); | 147 | void bochs_kms_fini(struct bochs_device *bochs); |
| @@ -164,3 +149,5 @@ void bochs_kms_fini(struct bochs_device *bochs); | |||
| 164 | /* bochs_fbdev.c */ | 149 | /* bochs_fbdev.c */ |
| 165 | int bochs_fbdev_init(struct bochs_device *bochs); | 150 | int bochs_fbdev_init(struct bochs_device *bochs); |
| 166 | void bochs_fbdev_fini(struct bochs_device *bochs); | 151 | void bochs_fbdev_fini(struct bochs_device *bochs); |
| 152 | |||
| 153 | extern const struct drm_mode_config_funcs bochs_mode_funcs; | ||
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index c61b40c72b62..f3dd66ae990a 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c | |||
| @@ -35,7 +35,7 @@ static void bochs_unload(struct drm_device *dev) | |||
| 35 | dev->dev_private = NULL; | 35 | dev->dev_private = NULL; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static int bochs_load(struct drm_device *dev, unsigned long flags) | 38 | static int bochs_load(struct drm_device *dev) |
| 39 | { | 39 | { |
| 40 | struct bochs_device *bochs; | 40 | struct bochs_device *bochs; |
| 41 | int ret; | 41 | int ret; |
| @@ -46,7 +46,7 @@ static int bochs_load(struct drm_device *dev, unsigned long flags) | |||
| 46 | dev->dev_private = bochs; | 46 | dev->dev_private = bochs; |
| 47 | bochs->dev = dev; | 47 | bochs->dev = dev; |
| 48 | 48 | ||
| 49 | ret = bochs_hw_init(dev, flags); | 49 | ret = bochs_hw_init(dev); |
| 50 | if (ret) | 50 | if (ret) |
| 51 | goto err; | 51 | goto err; |
| 52 | 52 | ||
| @@ -82,8 +82,6 @@ static const struct file_operations bochs_fops = { | |||
| 82 | 82 | ||
| 83 | static struct drm_driver bochs_driver = { | 83 | static struct drm_driver bochs_driver = { |
| 84 | .driver_features = DRIVER_GEM | DRIVER_MODESET, | 84 | .driver_features = DRIVER_GEM | DRIVER_MODESET, |
| 85 | .load = bochs_load, | ||
| 86 | .unload = bochs_unload, | ||
| 87 | .fops = &bochs_fops, | 85 | .fops = &bochs_fops, |
| 88 | .name = "bochs-drm", | 86 | .name = "bochs-drm", |
| 89 | .desc = "bochs dispi vga interface (qemu stdvga)", | 87 | .desc = "bochs dispi vga interface (qemu stdvga)", |
| @@ -107,11 +105,7 @@ static int bochs_pm_suspend(struct device *dev) | |||
| 107 | 105 | ||
| 108 | drm_kms_helper_poll_disable(drm_dev); | 106 | drm_kms_helper_poll_disable(drm_dev); |
| 109 | 107 | ||
| 110 | if (bochs->fb.initialized) { | 108 | drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1); |
| 111 | console_lock(); | ||
| 112 | drm_fb_helper_set_suspend(&bochs->fb.helper, 1); | ||
| 113 | console_unlock(); | ||
| 114 | } | ||
| 115 | 109 | ||
| 116 | return 0; | 110 | return 0; |
| 117 | } | 111 | } |
| @@ -124,11 +118,7 @@ static int bochs_pm_resume(struct device *dev) | |||
| 124 | 118 | ||
| 125 | drm_helper_resume_force_mode(drm_dev); | 119 | drm_helper_resume_force_mode(drm_dev); |
| 126 | 120 | ||
| 127 | if (bochs->fb.initialized) { | 121 | drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0); |
| 128 | console_lock(); | ||
| 129 | drm_fb_helper_set_suspend(&bochs->fb.helper, 0); | ||
| 130 | console_unlock(); | ||
| 131 | } | ||
| 132 | 122 | ||
| 133 | drm_kms_helper_poll_enable(drm_dev); | 123 | drm_kms_helper_poll_enable(drm_dev); |
| 134 | return 0; | 124 | return 0; |
| @@ -146,6 +136,7 @@ static const struct dev_pm_ops bochs_pm_ops = { | |||
| 146 | static int bochs_pci_probe(struct pci_dev *pdev, | 136 | static int bochs_pci_probe(struct pci_dev *pdev, |
| 147 | const struct pci_device_id *ent) | 137 | const struct pci_device_id *ent) |
| 148 | { | 138 | { |
| 139 | struct drm_device *dev; | ||
| 149 | unsigned long fbsize; | 140 | unsigned long fbsize; |
| 150 | int ret; | 141 | int ret; |
| 151 | 142 | ||
| @@ -159,14 +150,37 @@ static int bochs_pci_probe(struct pci_dev *pdev, | |||
| 159 | if (ret) | 150 | if (ret) |
| 160 | return ret; | 151 | return ret; |
| 161 | 152 | ||
| 162 | return drm_get_pci_dev(pdev, ent, &bochs_driver); | 153 | dev = drm_dev_alloc(&bochs_driver, &pdev->dev); |
| 154 | if (IS_ERR(dev)) | ||
| 155 | return PTR_ERR(dev); | ||
| 156 | |||
| 157 | dev->pdev = pdev; | ||
| 158 | pci_set_drvdata(pdev, dev); | ||
| 159 | |||
| 160 | ret = bochs_load(dev); | ||
| 161 | if (ret) | ||
| 162 | goto err_free_dev; | ||
| 163 | |||
| 164 | ret = drm_dev_register(dev, 0); | ||
| 165 | if (ret) | ||
| 166 | goto err_unload; | ||
| 167 | |||
| 168 | return ret; | ||
| 169 | |||
| 170 | err_unload: | ||
| 171 | bochs_unload(dev); | ||
| 172 | err_free_dev: | ||
| 173 | drm_dev_put(dev); | ||
| 174 | return ret; | ||
| 163 | } | 175 | } |
| 164 | 176 | ||
| 165 | static void bochs_pci_remove(struct pci_dev *pdev) | 177 | static void bochs_pci_remove(struct pci_dev *pdev) |
| 166 | { | 178 | { |
| 167 | struct drm_device *dev = pci_get_drvdata(pdev); | 179 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 168 | 180 | ||
| 169 | drm_put_dev(dev); | 181 | drm_dev_unregister(dev); |
| 182 | bochs_unload(dev); | ||
| 183 | drm_dev_put(dev); | ||
| 170 | } | 184 | } |
| 171 | 185 | ||
| 172 | static const struct pci_device_id bochs_pci_tbl[] = { | 186 | static const struct pci_device_id bochs_pci_tbl[] = { |
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 14eb8d0d5a00..8f4d6c052f7b 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include "bochs.h" | 8 | #include "bochs.h" |
| 9 | #include <drm/drm_gem_framebuffer_helper.h> | ||
| 9 | 10 | ||
| 10 | /* ---------------------------------------------------------------------- */ | 11 | /* ---------------------------------------------------------------------- */ |
| 11 | 12 | ||
| @@ -13,9 +14,7 @@ static int bochsfb_mmap(struct fb_info *info, | |||
| 13 | struct vm_area_struct *vma) | 14 | struct vm_area_struct *vma) |
| 14 | { | 15 | { |
| 15 | struct drm_fb_helper *fb_helper = info->par; | 16 | struct drm_fb_helper *fb_helper = info->par; |
| 16 | struct bochs_device *bochs = | 17 | struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]); |
| 17 | container_of(fb_helper, struct bochs_device, fb.helper); | ||
| 18 | struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj); | ||
| 19 | 18 | ||
| 20 | return ttm_fbdev_mmap(vma, &bo->bo); | 19 | return ttm_fbdev_mmap(vma, &bo->bo); |
| 21 | } | 20 | } |
| @@ -101,19 +100,20 @@ static int bochsfb_create(struct drm_fb_helper *helper, | |||
| 101 | 100 | ||
| 102 | /* init fb device */ | 101 | /* init fb device */ |
| 103 | info = drm_fb_helper_alloc_fbi(helper); | 102 | info = drm_fb_helper_alloc_fbi(helper); |
| 104 | if (IS_ERR(info)) | 103 | if (IS_ERR(info)) { |
| 104 | DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info)); | ||
| 105 | return PTR_ERR(info); | 105 | return PTR_ERR(info); |
| 106 | } | ||
| 106 | 107 | ||
| 107 | info->par = &bochs->fb.helper; | 108 | info->par = &bochs->fb.helper; |
| 108 | 109 | ||
| 109 | ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); | 110 | fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL); |
| 110 | if (ret) | 111 | if (IS_ERR(fb)) { |
| 111 | return ret; | 112 | DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb)); |
| 112 | 113 | return PTR_ERR(fb); | |
| 113 | bochs->fb.size = size; | 114 | } |
| 114 | 115 | ||
| 115 | /* setup helper */ | 116 | /* setup helper */ |
| 116 | fb = &bochs->fb.gfb.base; | ||
| 117 | bochs->fb.helper.fb = fb; | 117 | bochs->fb.helper.fb = fb; |
| 118 | 118 | ||
| 119 | strcpy(info->fix.id, "bochsdrmfb"); | 119 | strcpy(info->fix.id, "bochsdrmfb"); |
| @@ -130,27 +130,6 @@ static int bochsfb_create(struct drm_fb_helper *helper, | |||
| 130 | drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); | 130 | drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); |
| 131 | info->fix.smem_start = 0; | 131 | info->fix.smem_start = 0; |
| 132 | info->fix.smem_len = size; | 132 | info->fix.smem_len = size; |
| 133 | |||
| 134 | bochs->fb.initialized = true; | ||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | |||
| 138 | static int bochs_fbdev_destroy(struct bochs_device *bochs) | ||
| 139 | { | ||
| 140 | struct bochs_framebuffer *gfb = &bochs->fb.gfb; | ||
| 141 | |||
| 142 | DRM_DEBUG_DRIVER("\n"); | ||
| 143 | |||
| 144 | drm_fb_helper_unregister_fbi(&bochs->fb.helper); | ||
| 145 | |||
| 146 | if (gfb->obj) { | ||
| 147 | drm_gem_object_unreference_unlocked(gfb->obj); | ||
| 148 | gfb->obj = NULL; | ||
| 149 | } | ||
| 150 | |||
| 151 | drm_framebuffer_unregister_private(&gfb->base); | ||
| 152 | drm_framebuffer_cleanup(&gfb->base); | ||
| 153 | |||
| 154 | return 0; | 133 | return 0; |
| 155 | } | 134 | } |
| 156 | 135 | ||
| @@ -158,41 +137,17 @@ static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { | |||
| 158 | .fb_probe = bochsfb_create, | 137 | .fb_probe = bochsfb_create, |
| 159 | }; | 138 | }; |
| 160 | 139 | ||
| 140 | const struct drm_mode_config_funcs bochs_mode_funcs = { | ||
| 141 | .fb_create = drm_gem_fb_create, | ||
| 142 | }; | ||
| 143 | |||
| 161 | int bochs_fbdev_init(struct bochs_device *bochs) | 144 | int bochs_fbdev_init(struct bochs_device *bochs) |
| 162 | { | 145 | { |
| 163 | int ret; | 146 | return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper, |
| 164 | 147 | &bochs_fb_helper_funcs, 32, 1); | |
| 165 | drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, | ||
| 166 | &bochs_fb_helper_funcs); | ||
| 167 | |||
| 168 | ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1); | ||
| 169 | if (ret) | ||
| 170 | return ret; | ||
| 171 | |||
| 172 | ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); | ||
| 173 | if (ret) | ||
| 174 | goto fini; | ||
| 175 | |||
| 176 | drm_helper_disable_unused_functions(bochs->dev); | ||
| 177 | |||
| 178 | ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32); | ||
| 179 | if (ret) | ||
| 180 | goto fini; | ||
| 181 | |||
| 182 | return 0; | ||
| 183 | |||
| 184 | fini: | ||
| 185 | drm_fb_helper_fini(&bochs->fb.helper); | ||
| 186 | return ret; | ||
| 187 | } | 148 | } |
| 188 | 149 | ||
| 189 | void bochs_fbdev_fini(struct bochs_device *bochs) | 150 | void bochs_fbdev_fini(struct bochs_device *bochs) |
| 190 | { | 151 | { |
| 191 | if (bochs->fb.initialized) | 152 | drm_fb_helper_fbdev_teardown(bochs->dev); |
| 192 | bochs_fbdev_destroy(bochs); | ||
| 193 | |||
| 194 | if (bochs->fb.helper.fbdev) | ||
| 195 | drm_fb_helper_fini(&bochs->fb.helper); | ||
| 196 | |||
| 197 | bochs->fb.initialized = false; | ||
| 198 | } | 153 | } |
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index a39b0343c197..16e4f1caccca 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c | |||
| @@ -47,7 +47,7 @@ static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) | |||
| 47 | } | 47 | } |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | int bochs_hw_init(struct drm_device *dev, uint32_t flags) | 50 | int bochs_hw_init(struct drm_device *dev) |
| 51 | { | 51 | { |
| 52 | struct bochs_device *bochs = dev->dev_private; | 52 | struct bochs_device *bochs = dev->dev_private; |
| 53 | struct pci_dev *pdev = dev->pdev; | 53 | struct pci_dev *pdev = dev->pdev; |
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index ca5a9afdd5cf..ea9a43d31bf1 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c | |||
| @@ -35,14 +35,12 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 35 | { | 35 | { |
| 36 | struct bochs_device *bochs = | 36 | struct bochs_device *bochs = |
| 37 | container_of(crtc, struct bochs_device, crtc); | 37 | container_of(crtc, struct bochs_device, crtc); |
| 38 | struct bochs_framebuffer *bochs_fb; | ||
| 39 | struct bochs_bo *bo; | 38 | struct bochs_bo *bo; |
| 40 | u64 gpu_addr = 0; | 39 | u64 gpu_addr = 0; |
| 41 | int ret; | 40 | int ret; |
| 42 | 41 | ||
| 43 | if (old_fb) { | 42 | if (old_fb) { |
| 44 | bochs_fb = to_bochs_framebuffer(old_fb); | 43 | bo = gem_to_bochs_bo(old_fb->obj[0]); |
| 45 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
| 46 | ret = ttm_bo_reserve(&bo->bo, true, false, NULL); | 44 | ret = ttm_bo_reserve(&bo->bo, true, false, NULL); |
| 47 | if (ret) { | 45 | if (ret) { |
| 48 | DRM_ERROR("failed to reserve old_fb bo\n"); | 46 | DRM_ERROR("failed to reserve old_fb bo\n"); |
| @@ -55,8 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 55 | if (WARN_ON(crtc->primary->fb == NULL)) | 53 | if (WARN_ON(crtc->primary->fb == NULL)) |
| 56 | return -EINVAL; | 54 | return -EINVAL; |
| 57 | 55 | ||
| 58 | bochs_fb = to_bochs_framebuffer(crtc->primary->fb); | 56 | bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]); |
| 59 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
| 60 | ret = ttm_bo_reserve(&bo->bo, true, false, NULL); | 57 | ret = ttm_bo_reserve(&bo->bo, true, false, NULL); |
| 61 | if (ret) | 58 | if (ret) |
| 62 | return ret; | 59 | return ret; |
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index c9c7097030ca..a61c1ecb2bdc 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
| @@ -457,77 +457,3 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | |||
| 457 | drm_gem_object_unreference_unlocked(obj); | 457 | drm_gem_object_unreference_unlocked(obj); |
| 458 | return 0; | 458 | return 0; |
| 459 | } | 459 | } |
| 460 | |||
| 461 | /* ---------------------------------------------------------------------- */ | ||
| 462 | |||
| 463 | static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
| 464 | { | ||
| 465 | struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); | ||
| 466 | |||
| 467 | drm_gem_object_unreference_unlocked(bochs_fb->obj); | ||
| 468 | drm_framebuffer_cleanup(fb); | ||
| 469 | kfree(fb); | ||
| 470 | } | ||
| 471 | |||
| 472 | static const struct drm_framebuffer_funcs bochs_fb_funcs = { | ||
| 473 | .destroy = bochs_user_framebuffer_destroy, | ||
| 474 | }; | ||
| 475 | |||
| 476 | int bochs_framebuffer_init(struct drm_device *dev, | ||
| 477 | struct bochs_framebuffer *gfb, | ||
| 478 | const struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 479 | struct drm_gem_object *obj) | ||
| 480 | { | ||
| 481 | int ret; | ||
| 482 | |||
| 483 | drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd); | ||
| 484 | gfb->obj = obj; | ||
| 485 | ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs); | ||
| 486 | if (ret) { | ||
| 487 | DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); | ||
| 488 | return ret; | ||
| 489 | } | ||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | static struct drm_framebuffer * | ||
| 494 | bochs_user_framebuffer_create(struct drm_device *dev, | ||
| 495 | struct drm_file *filp, | ||
| 496 | const struct drm_mode_fb_cmd2 *mode_cmd) | ||
| 497 | { | ||
| 498 | struct drm_gem_object *obj; | ||
| 499 | struct bochs_framebuffer *bochs_fb; | ||
| 500 | int ret; | ||
| 501 | |||
| 502 | DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n", | ||
| 503 | mode_cmd->width, mode_cmd->height, | ||
| 504 | (mode_cmd->pixel_format) & 0xff, | ||
| 505 | (mode_cmd->pixel_format >> 8) & 0xff, | ||
| 506 | (mode_cmd->pixel_format >> 16) & 0xff, | ||
| 507 | (mode_cmd->pixel_format >> 24) & 0xff); | ||
| 508 | |||
| 509 | if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888) | ||
| 510 | return ERR_PTR(-ENOENT); | ||
| 511 | |||
| 512 | obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); | ||
| 513 | if (obj == NULL) | ||
| 514 | return ERR_PTR(-ENOENT); | ||
| 515 | |||
| 516 | bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL); | ||
| 517 | if (!bochs_fb) { | ||
| 518 | drm_gem_object_unreference_unlocked(obj); | ||
| 519 | return ERR_PTR(-ENOMEM); | ||
| 520 | } | ||
| 521 | |||
| 522 | ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj); | ||
| 523 | if (ret) { | ||
| 524 | drm_gem_object_unreference_unlocked(obj); | ||
| 525 | kfree(bochs_fb); | ||
| 526 | return ERR_PTR(ret); | ||
| 527 | } | ||
| 528 | return &bochs_fb->base; | ||
| 529 | } | ||
| 530 | |||
| 531 | const struct drm_mode_config_funcs bochs_mode_funcs = { | ||
| 532 | .fb_create = bochs_user_framebuffer_create, | ||
| 533 | }; | ||
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index d68986cea132..2f21d3b6850b 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | |||
| @@ -554,7 +554,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) | |||
| 554 | if (retval < 0) | 554 | if (retval < 0) |
| 555 | return retval; | 555 | return retval; |
| 556 | 556 | ||
| 557 | dev_info(dp->dev, "Link Training Clock Recovery success\n"); | 557 | dev_dbg(dp->dev, "Link Training Clock Recovery success\n"); |
| 558 | dp->link_train.lt_state = EQUALIZER_TRAINING; | 558 | dp->link_train.lt_state = EQUALIZER_TRAINING; |
| 559 | } else { | 559 | } else { |
| 560 | for (lane = 0; lane < lane_count; lane++) { | 560 | for (lane = 0; lane < lane_count; lane++) { |
| @@ -634,7 +634,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) | |||
| 634 | if (retval < 0) | 634 | if (retval < 0) |
| 635 | return retval; | 635 | return retval; |
| 636 | 636 | ||
| 637 | dev_info(dp->dev, "Link Training success!\n"); | 637 | dev_dbg(dp->dev, "Link Training success!\n"); |
| 638 | analogix_dp_get_link_bandwidth(dp, ®); | 638 | analogix_dp_get_link_bandwidth(dp, ®); |
| 639 | dp->link_train.link_rate = reg; | 639 | dp->link_train.link_rate = reg; |
| 640 | dev_dbg(dp->dev, "final bandwidth = %.2x\n", | 640 | dev_dbg(dp->dev, "final bandwidth = %.2x\n", |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index d0478abc01bd..7ada75919756 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | 28 | ||
| 29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/drm_atomic.h> | 30 | #include <drm/drm_atomic.h> |
| 31 | #include <drm/drm_atomic_uapi.h> | ||
| 31 | #include <drm/drm_mode.h> | 32 | #include <drm/drm_mode.h> |
| 32 | #include <drm/drm_print.h> | 33 | #include <drm/drm_print.h> |
| 33 | #include <drm/drm_writeback.h> | 34 | #include <drm/drm_writeback.h> |
| @@ -309,350 +310,6 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, | |||
| 309 | } | 310 | } |
| 310 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); | 311 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); |
| 311 | 312 | ||
| 312 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, | ||
| 313 | struct drm_crtc *crtc, s32 __user *fence_ptr) | ||
| 314 | { | ||
| 315 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; | ||
| 316 | } | ||
| 317 | |||
| 318 | static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, | ||
| 319 | struct drm_crtc *crtc) | ||
| 320 | { | ||
| 321 | s32 __user *fence_ptr; | ||
| 322 | |||
| 323 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; | ||
| 324 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; | ||
| 325 | |||
| 326 | return fence_ptr; | ||
| 327 | } | ||
| 328 | |||
| 329 | static int set_out_fence_for_connector(struct drm_atomic_state *state, | ||
| 330 | struct drm_connector *connector, | ||
| 331 | s32 __user *fence_ptr) | ||
| 332 | { | ||
| 333 | unsigned int index = drm_connector_index(connector); | ||
| 334 | |||
| 335 | if (!fence_ptr) | ||
| 336 | return 0; | ||
| 337 | |||
| 338 | if (put_user(-1, fence_ptr)) | ||
| 339 | return -EFAULT; | ||
| 340 | |||
| 341 | state->connectors[index].out_fence_ptr = fence_ptr; | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 346 | static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, | ||
| 347 | struct drm_connector *connector) | ||
| 348 | { | ||
| 349 | unsigned int index = drm_connector_index(connector); | ||
| 350 | s32 __user *fence_ptr; | ||
| 351 | |||
| 352 | fence_ptr = state->connectors[index].out_fence_ptr; | ||
| 353 | state->connectors[index].out_fence_ptr = NULL; | ||
| 354 | |||
| 355 | return fence_ptr; | ||
| 356 | } | ||
| 357 | |||
| 358 | /** | ||
| 359 | * drm_atomic_set_mode_for_crtc - set mode for CRTC | ||
| 360 | * @state: the CRTC whose incoming state to update | ||
| 361 | * @mode: kernel-internal mode to use for the CRTC, or NULL to disable | ||
| 362 | * | ||
| 363 | * Set a mode (originating from the kernel) on the desired CRTC state and update | ||
| 364 | * the enable property. | ||
| 365 | * | ||
| 366 | * RETURNS: | ||
| 367 | * Zero on success, error code on failure. Cannot return -EDEADLK. | ||
| 368 | */ | ||
| 369 | int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, | ||
| 370 | const struct drm_display_mode *mode) | ||
| 371 | { | ||
| 372 | struct drm_crtc *crtc = state->crtc; | ||
| 373 | struct drm_mode_modeinfo umode; | ||
| 374 | |||
| 375 | /* Early return for no change. */ | ||
| 376 | if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) | ||
| 377 | return 0; | ||
| 378 | |||
| 379 | drm_property_blob_put(state->mode_blob); | ||
| 380 | state->mode_blob = NULL; | ||
| 381 | |||
| 382 | if (mode) { | ||
| 383 | drm_mode_convert_to_umode(&umode, mode); | ||
| 384 | state->mode_blob = | ||
| 385 | drm_property_create_blob(state->crtc->dev, | ||
| 386 | sizeof(umode), | ||
| 387 | &umode); | ||
| 388 | if (IS_ERR(state->mode_blob)) | ||
| 389 | return PTR_ERR(state->mode_blob); | ||
| 390 | |||
| 391 | drm_mode_copy(&state->mode, mode); | ||
| 392 | state->enable = true; | ||
| 393 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", | ||
| 394 | mode->name, crtc->base.id, crtc->name, state); | ||
| 395 | } else { | ||
| 396 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 397 | state->enable = false; | ||
| 398 | DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", | ||
| 399 | crtc->base.id, crtc->name, state); | ||
| 400 | } | ||
| 401 | |||
| 402 | return 0; | ||
| 403 | } | ||
| 404 | EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); | ||
| 405 | |||
| 406 | /** | ||
| 407 | * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC | ||
| 408 | * @state: the CRTC whose incoming state to update | ||
| 409 | * @blob: pointer to blob property to use for mode | ||
| 410 | * | ||
| 411 | * Set a mode (originating from a blob property) on the desired CRTC state. | ||
| 412 | * This function will take a reference on the blob property for the CRTC state, | ||
| 413 | * and release the reference held on the state's existing mode property, if any | ||
| 414 | * was set. | ||
| 415 | * | ||
| 416 | * RETURNS: | ||
| 417 | * Zero on success, error code on failure. Cannot return -EDEADLK. | ||
| 418 | */ | ||
| 419 | int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | ||
| 420 | struct drm_property_blob *blob) | ||
| 421 | { | ||
| 422 | struct drm_crtc *crtc = state->crtc; | ||
| 423 | |||
| 424 | if (blob == state->mode_blob) | ||
| 425 | return 0; | ||
| 426 | |||
| 427 | drm_property_blob_put(state->mode_blob); | ||
| 428 | state->mode_blob = NULL; | ||
| 429 | |||
| 430 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 431 | |||
| 432 | if (blob) { | ||
| 433 | int ret; | ||
| 434 | |||
| 435 | if (blob->length != sizeof(struct drm_mode_modeinfo)) { | ||
| 436 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", | ||
| 437 | crtc->base.id, crtc->name, | ||
| 438 | blob->length); | ||
| 439 | return -EINVAL; | ||
| 440 | } | ||
| 441 | |||
| 442 | ret = drm_mode_convert_umode(crtc->dev, | ||
| 443 | &state->mode, blob->data); | ||
| 444 | if (ret) { | ||
| 445 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", | ||
| 446 | crtc->base.id, crtc->name, | ||
| 447 | ret, drm_get_mode_status_name(state->mode.status)); | ||
| 448 | drm_mode_debug_printmodeline(&state->mode); | ||
| 449 | return -EINVAL; | ||
| 450 | } | ||
| 451 | |||
| 452 | state->mode_blob = drm_property_blob_get(blob); | ||
| 453 | state->enable = true; | ||
| 454 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", | ||
| 455 | state->mode.name, crtc->base.id, crtc->name, | ||
| 456 | state); | ||
| 457 | } else { | ||
| 458 | state->enable = false; | ||
| 459 | DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", | ||
| 460 | crtc->base.id, crtc->name, state); | ||
| 461 | } | ||
| 462 | |||
| 463 | return 0; | ||
| 464 | } | ||
| 465 | EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); | ||
| 466 | |||
| 467 | /** | ||
| 468 | * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it | ||
| 469 | * @dev: DRM device | ||
| 470 | * @blob: a pointer to the member blob to be replaced | ||
| 471 | * @blob_id: ID of the new blob | ||
| 472 | * @expected_size: total expected size of the blob data (in bytes) | ||
| 473 | * @expected_elem_size: expected element size of the blob data (in bytes) | ||
| 474 | * @replaced: did the blob get replaced? | ||
| 475 | * | ||
| 476 | * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero | ||
| 477 | * @blob becomes NULL. | ||
| 478 | * | ||
| 479 | * If @expected_size is positive the new blob length is expected to be equal | ||
| 480 | * to @expected_size bytes. If @expected_elem_size is positive the new blob | ||
| 481 | * length is expected to be a multiple of @expected_elem_size bytes. Otherwise | ||
| 482 | * an error is returned. | ||
| 483 | * | ||
| 484 | * @replaced will indicate to the caller whether the blob was replaced or not. | ||
| 485 | * If the old and new blobs were in fact the same blob @replaced will be false | ||
| 486 | * otherwise it will be true. | ||
| 487 | * | ||
| 488 | * RETURNS: | ||
| 489 | * Zero on success, error code on failure. | ||
| 490 | */ | ||
| 491 | static int | ||
| 492 | drm_atomic_replace_property_blob_from_id(struct drm_device *dev, | ||
| 493 | struct drm_property_blob **blob, | ||
| 494 | uint64_t blob_id, | ||
| 495 | ssize_t expected_size, | ||
| 496 | ssize_t expected_elem_size, | ||
| 497 | bool *replaced) | ||
| 498 | { | ||
| 499 | struct drm_property_blob *new_blob = NULL; | ||
| 500 | |||
| 501 | if (blob_id != 0) { | ||
| 502 | new_blob = drm_property_lookup_blob(dev, blob_id); | ||
| 503 | if (new_blob == NULL) | ||
| 504 | return -EINVAL; | ||
| 505 | |||
| 506 | if (expected_size > 0 && | ||
| 507 | new_blob->length != expected_size) { | ||
| 508 | drm_property_blob_put(new_blob); | ||
| 509 | return -EINVAL; | ||
| 510 | } | ||
| 511 | if (expected_elem_size > 0 && | ||
| 512 | new_blob->length % expected_elem_size != 0) { | ||
| 513 | drm_property_blob_put(new_blob); | ||
| 514 | return -EINVAL; | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | *replaced |= drm_property_replace_blob(blob, new_blob); | ||
| 519 | drm_property_blob_put(new_blob); | ||
| 520 | |||
| 521 | return 0; | ||
| 522 | } | ||
| 523 | |||
| 524 | /** | ||
| 525 | * drm_atomic_crtc_set_property - set property on CRTC | ||
| 526 | * @crtc: the drm CRTC to set a property on | ||
| 527 | * @state: the state object to update with the new property value | ||
| 528 | * @property: the property to set | ||
| 529 | * @val: the new property value | ||
| 530 | * | ||
| 531 | * This function handles generic/core properties and calls out to driver's | ||
| 532 | * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure | ||
| 533 | * consistent behavior you must call this function rather than the driver hook | ||
| 534 | * directly. | ||
| 535 | * | ||
| 536 | * RETURNS: | ||
| 537 | * Zero on success, error code on failure | ||
| 538 | */ | ||
| 539 | int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | ||
| 540 | struct drm_crtc_state *state, struct drm_property *property, | ||
| 541 | uint64_t val) | ||
| 542 | { | ||
| 543 | struct drm_device *dev = crtc->dev; | ||
| 544 | struct drm_mode_config *config = &dev->mode_config; | ||
| 545 | bool replaced = false; | ||
| 546 | int ret; | ||
| 547 | |||
| 548 | if (property == config->prop_active) | ||
| 549 | state->active = val; | ||
| 550 | else if (property == config->prop_mode_id) { | ||
| 551 | struct drm_property_blob *mode = | ||
| 552 | drm_property_lookup_blob(dev, val); | ||
| 553 | ret = drm_atomic_set_mode_prop_for_crtc(state, mode); | ||
| 554 | drm_property_blob_put(mode); | ||
| 555 | return ret; | ||
| 556 | } else if (property == config->degamma_lut_property) { | ||
| 557 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 558 | &state->degamma_lut, | ||
| 559 | val, | ||
| 560 | -1, sizeof(struct drm_color_lut), | ||
| 561 | &replaced); | ||
| 562 | state->color_mgmt_changed |= replaced; | ||
| 563 | return ret; | ||
| 564 | } else if (property == config->ctm_property) { | ||
| 565 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 566 | &state->ctm, | ||
| 567 | val, | ||
| 568 | sizeof(struct drm_color_ctm), -1, | ||
| 569 | &replaced); | ||
| 570 | state->color_mgmt_changed |= replaced; | ||
| 571 | return ret; | ||
| 572 | } else if (property == config->gamma_lut_property) { | ||
| 573 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 574 | &state->gamma_lut, | ||
| 575 | val, | ||
| 576 | -1, sizeof(struct drm_color_lut), | ||
| 577 | &replaced); | ||
| 578 | state->color_mgmt_changed |= replaced; | ||
| 579 | return ret; | ||
| 580 | } else if (property == config->prop_out_fence_ptr) { | ||
| 581 | s32 __user *fence_ptr = u64_to_user_ptr(val); | ||
| 582 | |||
| 583 | if (!fence_ptr) | ||
| 584 | return 0; | ||
| 585 | |||
| 586 | if (put_user(-1, fence_ptr)) | ||
| 587 | return -EFAULT; | ||
| 588 | |||
| 589 | set_out_fence_for_crtc(state->state, crtc, fence_ptr); | ||
| 590 | } else if (crtc->funcs->atomic_set_property) { | ||
| 591 | return crtc->funcs->atomic_set_property(crtc, state, property, val); | ||
| 592 | } else { | ||
| 593 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 594 | crtc->base.id, crtc->name, | ||
| 595 | property->base.id, property->name); | ||
| 596 | return -EINVAL; | ||
| 597 | } | ||
| 598 | |||
| 599 | return 0; | ||
| 600 | } | ||
| 601 | EXPORT_SYMBOL(drm_atomic_crtc_set_property); | ||
| 602 | |||
| 603 | /** | ||
| 604 | * drm_atomic_crtc_get_property - get property value from CRTC state | ||
| 605 | * @crtc: the drm CRTC to set a property on | ||
| 606 | * @state: the state object to get the property value from | ||
| 607 | * @property: the property to set | ||
| 608 | * @val: return location for the property value | ||
| 609 | * | ||
| 610 | * This function handles generic/core properties and calls out to driver's | ||
| 611 | * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure | ||
| 612 | * consistent behavior you must call this function rather than the driver hook | ||
| 613 | * directly. | ||
| 614 | * | ||
| 615 | * RETURNS: | ||
| 616 | * Zero on success, error code on failure | ||
| 617 | */ | ||
| 618 | static int | ||
| 619 | drm_atomic_crtc_get_property(struct drm_crtc *crtc, | ||
| 620 | const struct drm_crtc_state *state, | ||
| 621 | struct drm_property *property, uint64_t *val) | ||
| 622 | { | ||
| 623 | struct drm_device *dev = crtc->dev; | ||
| 624 | struct drm_mode_config *config = &dev->mode_config; | ||
| 625 | |||
| 626 | if (property == config->prop_active) | ||
| 627 | *val = state->active; | ||
| 628 | else if (property == config->prop_mode_id) | ||
| 629 | *val = (state->mode_blob) ? state->mode_blob->base.id : 0; | ||
| 630 | else if (property == config->degamma_lut_property) | ||
| 631 | *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; | ||
| 632 | else if (property == config->ctm_property) | ||
| 633 | *val = (state->ctm) ? state->ctm->base.id : 0; | ||
| 634 | else if (property == config->gamma_lut_property) | ||
| 635 | *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; | ||
| 636 | else if (property == config->prop_out_fence_ptr) | ||
| 637 | *val = 0; | ||
| 638 | else if (crtc->funcs->atomic_get_property) | ||
| 639 | return crtc->funcs->atomic_get_property(crtc, state, property, val); | ||
| 640 | else | ||
| 641 | return -EINVAL; | ||
| 642 | |||
| 643 | return 0; | ||
| 644 | } | ||
| 645 | |||
| 646 | /** | ||
| 647 | * drm_atomic_crtc_check - check crtc state | ||
| 648 | * @crtc: crtc to check | ||
| 649 | * @state: crtc state to check | ||
| 650 | * | ||
| 651 | * Provides core sanity checks for crtc state. | ||
| 652 | * | ||
| 653 | * RETURNS: | ||
| 654 | * Zero on success, error code on failure | ||
| 655 | */ | ||
| 656 | static int drm_atomic_crtc_check(struct drm_crtc *crtc, | 313 | static int drm_atomic_crtc_check(struct drm_crtc *crtc, |
| 657 | struct drm_crtc_state *state) | 314 | struct drm_crtc_state *state) |
| 658 | { | 315 | { |
| @@ -728,16 +385,6 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p, | |||
| 728 | crtc->funcs->atomic_print_state(p, state); | 385 | crtc->funcs->atomic_print_state(p, state); |
| 729 | } | 386 | } |
| 730 | 387 | ||
| 731 | /** | ||
| 732 | * drm_atomic_connector_check - check connector state | ||
| 733 | * @connector: connector to check | ||
| 734 | * @state: connector state to check | ||
| 735 | * | ||
| 736 | * Provides core sanity checks for connector state. | ||
| 737 | * | ||
| 738 | * RETURNS: | ||
| 739 | * Zero on success, error code on failure | ||
| 740 | */ | ||
| 741 | static int drm_atomic_connector_check(struct drm_connector *connector, | 388 | static int drm_atomic_connector_check(struct drm_connector *connector, |
| 742 | struct drm_connector_state *state) | 389 | struct drm_connector_state *state) |
| 743 | { | 390 | { |
| @@ -836,159 +483,6 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, | |||
| 836 | } | 483 | } |
| 837 | EXPORT_SYMBOL(drm_atomic_get_plane_state); | 484 | EXPORT_SYMBOL(drm_atomic_get_plane_state); |
| 838 | 485 | ||
| 839 | /** | ||
| 840 | * drm_atomic_plane_set_property - set property on plane | ||
| 841 | * @plane: the drm plane to set a property on | ||
| 842 | * @state: the state object to update with the new property value | ||
| 843 | * @property: the property to set | ||
| 844 | * @val: the new property value | ||
| 845 | * | ||
| 846 | * This function handles generic/core properties and calls out to driver's | ||
| 847 | * &drm_plane_funcs.atomic_set_property for driver properties. To ensure | ||
| 848 | * consistent behavior you must call this function rather than the driver hook | ||
| 849 | * directly. | ||
| 850 | * | ||
| 851 | * RETURNS: | ||
| 852 | * Zero on success, error code on failure | ||
| 853 | */ | ||
| 854 | static int drm_atomic_plane_set_property(struct drm_plane *plane, | ||
| 855 | struct drm_plane_state *state, struct drm_property *property, | ||
| 856 | uint64_t val) | ||
| 857 | { | ||
| 858 | struct drm_device *dev = plane->dev; | ||
| 859 | struct drm_mode_config *config = &dev->mode_config; | ||
| 860 | |||
| 861 | if (property == config->prop_fb_id) { | ||
| 862 | struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); | ||
| 863 | drm_atomic_set_fb_for_plane(state, fb); | ||
| 864 | if (fb) | ||
| 865 | drm_framebuffer_put(fb); | ||
| 866 | } else if (property == config->prop_in_fence_fd) { | ||
| 867 | if (state->fence) | ||
| 868 | return -EINVAL; | ||
| 869 | |||
| 870 | if (U642I64(val) == -1) | ||
| 871 | return 0; | ||
| 872 | |||
| 873 | state->fence = sync_file_get_fence(val); | ||
| 874 | if (!state->fence) | ||
| 875 | return -EINVAL; | ||
| 876 | |||
| 877 | } else if (property == config->prop_crtc_id) { | ||
| 878 | struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); | ||
| 879 | return drm_atomic_set_crtc_for_plane(state, crtc); | ||
| 880 | } else if (property == config->prop_crtc_x) { | ||
| 881 | state->crtc_x = U642I64(val); | ||
| 882 | } else if (property == config->prop_crtc_y) { | ||
| 883 | state->crtc_y = U642I64(val); | ||
| 884 | } else if (property == config->prop_crtc_w) { | ||
| 885 | state->crtc_w = val; | ||
| 886 | } else if (property == config->prop_crtc_h) { | ||
| 887 | state->crtc_h = val; | ||
| 888 | } else if (property == config->prop_src_x) { | ||
| 889 | state->src_x = val; | ||
| 890 | } else if (property == config->prop_src_y) { | ||
| 891 | state->src_y = val; | ||
| 892 | } else if (property == config->prop_src_w) { | ||
| 893 | state->src_w = val; | ||
| 894 | } else if (property == config->prop_src_h) { | ||
| 895 | state->src_h = val; | ||
| 896 | } else if (property == plane->alpha_property) { | ||
| 897 | state->alpha = val; | ||
| 898 | } else if (property == plane->blend_mode_property) { | ||
| 899 | state->pixel_blend_mode = val; | ||
| 900 | } else if (property == plane->rotation_property) { | ||
| 901 | if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { | ||
| 902 | DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", | ||
| 903 | plane->base.id, plane->name, val); | ||
| 904 | return -EINVAL; | ||
| 905 | } | ||
| 906 | state->rotation = val; | ||
| 907 | } else if (property == plane->zpos_property) { | ||
| 908 | state->zpos = val; | ||
| 909 | } else if (property == plane->color_encoding_property) { | ||
| 910 | state->color_encoding = val; | ||
| 911 | } else if (property == plane->color_range_property) { | ||
| 912 | state->color_range = val; | ||
| 913 | } else if (plane->funcs->atomic_set_property) { | ||
| 914 | return plane->funcs->atomic_set_property(plane, state, | ||
| 915 | property, val); | ||
| 916 | } else { | ||
| 917 | DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 918 | plane->base.id, plane->name, | ||
| 919 | property->base.id, property->name); | ||
| 920 | return -EINVAL; | ||
| 921 | } | ||
| 922 | |||
| 923 | return 0; | ||
| 924 | } | ||
| 925 | |||
| 926 | /** | ||
| 927 | * drm_atomic_plane_get_property - get property value from plane state | ||
| 928 | * @plane: the drm plane to set a property on | ||
| 929 | * @state: the state object to get the property value from | ||
| 930 | * @property: the property to set | ||
| 931 | * @val: return location for the property value | ||
| 932 | * | ||
| 933 | * This function handles generic/core properties and calls out to driver's | ||
| 934 | * &drm_plane_funcs.atomic_get_property for driver properties. To ensure | ||
| 935 | * consistent behavior you must call this function rather than the driver hook | ||
| 936 | * directly. | ||
| 937 | * | ||
| 938 | * RETURNS: | ||
| 939 | * Zero on success, error code on failure | ||
| 940 | */ | ||
| 941 | static int | ||
| 942 | drm_atomic_plane_get_property(struct drm_plane *plane, | ||
| 943 | const struct drm_plane_state *state, | ||
| 944 | struct drm_property *property, uint64_t *val) | ||
| 945 | { | ||
| 946 | struct drm_device *dev = plane->dev; | ||
| 947 | struct drm_mode_config *config = &dev->mode_config; | ||
| 948 | |||
| 949 | if (property == config->prop_fb_id) { | ||
| 950 | *val = (state->fb) ? state->fb->base.id : 0; | ||
| 951 | } else if (property == config->prop_in_fence_fd) { | ||
| 952 | *val = -1; | ||
| 953 | } else if (property == config->prop_crtc_id) { | ||
| 954 | *val = (state->crtc) ? state->crtc->base.id : 0; | ||
| 955 | } else if (property == config->prop_crtc_x) { | ||
| 956 | *val = I642U64(state->crtc_x); | ||
| 957 | } else if (property == config->prop_crtc_y) { | ||
| 958 | *val = I642U64(state->crtc_y); | ||
| 959 | } else if (property == config->prop_crtc_w) { | ||
| 960 | *val = state->crtc_w; | ||
| 961 | } else if (property == config->prop_crtc_h) { | ||
| 962 | *val = state->crtc_h; | ||
| 963 | } else if (property == config->prop_src_x) { | ||
| 964 | *val = state->src_x; | ||
| 965 | } else if (property == config->prop_src_y) { | ||
| 966 | *val = state->src_y; | ||
| 967 | } else if (property == config->prop_src_w) { | ||
| 968 | *val = state->src_w; | ||
| 969 | } else if (property == config->prop_src_h) { | ||
| 970 | *val = state->src_h; | ||
| 971 | } else if (property == plane->alpha_property) { | ||
| 972 | *val = state->alpha; | ||
| 973 | } else if (property == plane->blend_mode_property) { | ||
| 974 | *val = state->pixel_blend_mode; | ||
| 975 | } else if (property == plane->rotation_property) { | ||
| 976 | *val = state->rotation; | ||
| 977 | } else if (property == plane->zpos_property) { | ||
| 978 | *val = state->zpos; | ||
| 979 | } else if (property == plane->color_encoding_property) { | ||
| 980 | *val = state->color_encoding; | ||
| 981 | } else if (property == plane->color_range_property) { | ||
| 982 | *val = state->color_range; | ||
| 983 | } else if (plane->funcs->atomic_get_property) { | ||
| 984 | return plane->funcs->atomic_get_property(plane, state, property, val); | ||
| 985 | } else { | ||
| 986 | return -EINVAL; | ||
| 987 | } | ||
| 988 | |||
| 989 | return 0; | ||
| 990 | } | ||
| 991 | |||
| 992 | static bool | 486 | static bool |
| 993 | plane_switching_crtc(struct drm_atomic_state *state, | 487 | plane_switching_crtc(struct drm_atomic_state *state, |
| 994 | struct drm_plane *plane, | 488 | struct drm_plane *plane, |
| @@ -1328,111 +822,6 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, | |||
| 1328 | } | 822 | } |
| 1329 | EXPORT_SYMBOL(drm_atomic_get_connector_state); | 823 | EXPORT_SYMBOL(drm_atomic_get_connector_state); |
| 1330 | 824 | ||
| 1331 | /** | ||
| 1332 | * drm_atomic_connector_set_property - set property on connector. | ||
| 1333 | * @connector: the drm connector to set a property on | ||
| 1334 | * @state: the state object to update with the new property value | ||
| 1335 | * @property: the property to set | ||
| 1336 | * @val: the new property value | ||
| 1337 | * | ||
| 1338 | * This function handles generic/core properties and calls out to driver's | ||
| 1339 | * &drm_connector_funcs.atomic_set_property for driver properties. To ensure | ||
| 1340 | * consistent behavior you must call this function rather than the driver hook | ||
| 1341 | * directly. | ||
| 1342 | * | ||
| 1343 | * RETURNS: | ||
| 1344 | * Zero on success, error code on failure | ||
| 1345 | */ | ||
| 1346 | static int drm_atomic_connector_set_property(struct drm_connector *connector, | ||
| 1347 | struct drm_connector_state *state, struct drm_property *property, | ||
| 1348 | uint64_t val) | ||
| 1349 | { | ||
| 1350 | struct drm_device *dev = connector->dev; | ||
| 1351 | struct drm_mode_config *config = &dev->mode_config; | ||
| 1352 | |||
| 1353 | if (property == config->prop_crtc_id) { | ||
| 1354 | struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); | ||
| 1355 | return drm_atomic_set_crtc_for_connector(state, crtc); | ||
| 1356 | } else if (property == config->dpms_property) { | ||
| 1357 | /* setting DPMS property requires special handling, which | ||
| 1358 | * is done in legacy setprop path for us. Disallow (for | ||
| 1359 | * now?) atomic writes to DPMS property: | ||
| 1360 | */ | ||
| 1361 | return -EINVAL; | ||
| 1362 | } else if (property == config->tv_select_subconnector_property) { | ||
| 1363 | state->tv.subconnector = val; | ||
| 1364 | } else if (property == config->tv_left_margin_property) { | ||
| 1365 | state->tv.margins.left = val; | ||
| 1366 | } else if (property == config->tv_right_margin_property) { | ||
| 1367 | state->tv.margins.right = val; | ||
| 1368 | } else if (property == config->tv_top_margin_property) { | ||
| 1369 | state->tv.margins.top = val; | ||
| 1370 | } else if (property == config->tv_bottom_margin_property) { | ||
| 1371 | state->tv.margins.bottom = val; | ||
| 1372 | } else if (property == config->tv_mode_property) { | ||
| 1373 | state->tv.mode = val; | ||
| 1374 | } else if (property == config->tv_brightness_property) { | ||
| 1375 | state->tv.brightness = val; | ||
| 1376 | } else if (property == config->tv_contrast_property) { | ||
| 1377 | state->tv.contrast = val; | ||
| 1378 | } else if (property == config->tv_flicker_reduction_property) { | ||
| 1379 | state->tv.flicker_reduction = val; | ||
| 1380 | } else if (property == config->tv_overscan_property) { | ||
| 1381 | state->tv.overscan = val; | ||
| 1382 | } else if (property == config->tv_saturation_property) { | ||
| 1383 | state->tv.saturation = val; | ||
| 1384 | } else if (property == config->tv_hue_property) { | ||
| 1385 | state->tv.hue = val; | ||
| 1386 | } else if (property == config->link_status_property) { | ||
| 1387 | /* Never downgrade from GOOD to BAD on userspace's request here, | ||
| 1388 | * only hw issues can do that. | ||
| 1389 | * | ||
| 1390 | * For an atomic property the userspace doesn't need to be able | ||
| 1391 | * to understand all the properties, but needs to be able to | ||
| 1392 | * restore the state it wants on VT switch. So if the userspace | ||
| 1393 | * tries to change the link_status from GOOD to BAD, driver | ||
| 1394 | * silently rejects it and returns a 0. This prevents userspace | ||
| 1395 | * from accidently breaking the display when it restores the | ||
| 1396 | * state. | ||
| 1397 | */ | ||
| 1398 | if (state->link_status != DRM_LINK_STATUS_GOOD) | ||
| 1399 | state->link_status = val; | ||
| 1400 | } else if (property == config->aspect_ratio_property) { | ||
| 1401 | state->picture_aspect_ratio = val; | ||
| 1402 | } else if (property == config->content_type_property) { | ||
| 1403 | state->content_type = val; | ||
| 1404 | } else if (property == connector->scaling_mode_property) { | ||
| 1405 | state->scaling_mode = val; | ||
| 1406 | } else if (property == connector->content_protection_property) { | ||
| 1407 | if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { | ||
| 1408 | DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); | ||
| 1409 | return -EINVAL; | ||
| 1410 | } | ||
| 1411 | state->content_protection = val; | ||
| 1412 | } else if (property == config->writeback_fb_id_property) { | ||
| 1413 | struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); | ||
| 1414 | int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); | ||
| 1415 | if (fb) | ||
| 1416 | drm_framebuffer_put(fb); | ||
| 1417 | return ret; | ||
| 1418 | } else if (property == config->writeback_out_fence_ptr_property) { | ||
| 1419 | s32 __user *fence_ptr = u64_to_user_ptr(val); | ||
| 1420 | |||
| 1421 | return set_out_fence_for_connector(state->state, connector, | ||
| 1422 | fence_ptr); | ||
| 1423 | } else if (connector->funcs->atomic_set_property) { | ||
| 1424 | return connector->funcs->atomic_set_property(connector, | ||
| 1425 | state, property, val); | ||
| 1426 | } else { | ||
| 1427 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 1428 | connector->base.id, connector->name, | ||
| 1429 | property->base.id, property->name); | ||
| 1430 | return -EINVAL; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | return 0; | ||
| 1434 | } | ||
| 1435 | |||
| 1436 | static void drm_atomic_connector_print_state(struct drm_printer *p, | 825 | static void drm_atomic_connector_print_state(struct drm_printer *p, |
| 1437 | const struct drm_connector_state *state) | 826 | const struct drm_connector_state *state) |
| 1438 | { | 827 | { |
| @@ -1450,360 +839,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, | |||
| 1450 | } | 839 | } |
| 1451 | 840 | ||
| 1452 | /** | 841 | /** |
| 1453 | * drm_atomic_connector_get_property - get property value from connector state | ||
| 1454 | * @connector: the drm connector to set a property on | ||
| 1455 | * @state: the state object to get the property value from | ||
| 1456 | * @property: the property to set | ||
| 1457 | * @val: return location for the property value | ||
| 1458 | * | ||
| 1459 | * This function handles generic/core properties and calls out to driver's | ||
| 1460 | * &drm_connector_funcs.atomic_get_property for driver properties. To ensure | ||
| 1461 | * consistent behavior you must call this function rather than the driver hook | ||
| 1462 | * directly. | ||
| 1463 | * | ||
| 1464 | * RETURNS: | ||
| 1465 | * Zero on success, error code on failure | ||
| 1466 | */ | ||
| 1467 | static int | ||
| 1468 | drm_atomic_connector_get_property(struct drm_connector *connector, | ||
| 1469 | const struct drm_connector_state *state, | ||
| 1470 | struct drm_property *property, uint64_t *val) | ||
| 1471 | { | ||
| 1472 | struct drm_device *dev = connector->dev; | ||
| 1473 | struct drm_mode_config *config = &dev->mode_config; | ||
| 1474 | |||
| 1475 | if (property == config->prop_crtc_id) { | ||
| 1476 | *val = (state->crtc) ? state->crtc->base.id : 0; | ||
| 1477 | } else if (property == config->dpms_property) { | ||
| 1478 | *val = connector->dpms; | ||
| 1479 | } else if (property == config->tv_select_subconnector_property) { | ||
| 1480 | *val = state->tv.subconnector; | ||
| 1481 | } else if (property == config->tv_left_margin_property) { | ||
| 1482 | *val = state->tv.margins.left; | ||
| 1483 | } else if (property == config->tv_right_margin_property) { | ||
| 1484 | *val = state->tv.margins.right; | ||
| 1485 | } else if (property == config->tv_top_margin_property) { | ||
| 1486 | *val = state->tv.margins.top; | ||
| 1487 | } else if (property == config->tv_bottom_margin_property) { | ||
| 1488 | *val = state->tv.margins.bottom; | ||
| 1489 | } else if (property == config->tv_mode_property) { | ||
| 1490 | *val = state->tv.mode; | ||
| 1491 | } else if (property == config->tv_brightness_property) { | ||
| 1492 | *val = state->tv.brightness; | ||
| 1493 | } else if (property == config->tv_contrast_property) { | ||
| 1494 | *val = state->tv.contrast; | ||
| 1495 | } else if (property == config->tv_flicker_reduction_property) { | ||
| 1496 | *val = state->tv.flicker_reduction; | ||
| 1497 | } else if (property == config->tv_overscan_property) { | ||
| 1498 | *val = state->tv.overscan; | ||
| 1499 | } else if (property == config->tv_saturation_property) { | ||
| 1500 | *val = state->tv.saturation; | ||
| 1501 | } else if (property == config->tv_hue_property) { | ||
| 1502 | *val = state->tv.hue; | ||
| 1503 | } else if (property == config->link_status_property) { | ||
| 1504 | *val = state->link_status; | ||
| 1505 | } else if (property == config->aspect_ratio_property) { | ||
| 1506 | *val = state->picture_aspect_ratio; | ||
| 1507 | } else if (property == config->content_type_property) { | ||
| 1508 | *val = state->content_type; | ||
| 1509 | } else if (property == connector->scaling_mode_property) { | ||
| 1510 | *val = state->scaling_mode; | ||
| 1511 | } else if (property == connector->content_protection_property) { | ||
| 1512 | *val = state->content_protection; | ||
| 1513 | } else if (property == config->writeback_fb_id_property) { | ||
| 1514 | /* Writeback framebuffer is one-shot, write and forget */ | ||
| 1515 | *val = 0; | ||
| 1516 | } else if (property == config->writeback_out_fence_ptr_property) { | ||
| 1517 | *val = 0; | ||
| 1518 | } else if (connector->funcs->atomic_get_property) { | ||
| 1519 | return connector->funcs->atomic_get_property(connector, | ||
| 1520 | state, property, val); | ||
| 1521 | } else { | ||
| 1522 | return -EINVAL; | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | return 0; | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | int drm_atomic_get_property(struct drm_mode_object *obj, | ||
| 1529 | struct drm_property *property, uint64_t *val) | ||
| 1530 | { | ||
| 1531 | struct drm_device *dev = property->dev; | ||
| 1532 | int ret; | ||
| 1533 | |||
| 1534 | switch (obj->type) { | ||
| 1535 | case DRM_MODE_OBJECT_CONNECTOR: { | ||
| 1536 | struct drm_connector *connector = obj_to_connector(obj); | ||
| 1537 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
| 1538 | ret = drm_atomic_connector_get_property(connector, | ||
| 1539 | connector->state, property, val); | ||
| 1540 | break; | ||
| 1541 | } | ||
| 1542 | case DRM_MODE_OBJECT_CRTC: { | ||
| 1543 | struct drm_crtc *crtc = obj_to_crtc(obj); | ||
| 1544 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | ||
| 1545 | ret = drm_atomic_crtc_get_property(crtc, | ||
| 1546 | crtc->state, property, val); | ||
| 1547 | break; | ||
| 1548 | } | ||
| 1549 | case DRM_MODE_OBJECT_PLANE: { | ||
| 1550 | struct drm_plane *plane = obj_to_plane(obj); | ||
| 1551 | WARN_ON(!drm_modeset_is_locked(&plane->mutex)); | ||
| 1552 | ret = drm_atomic_plane_get_property(plane, | ||
| 1553 | plane->state, property, val); | ||
| 1554 | break; | ||
| 1555 | } | ||
| 1556 | default: | ||
| 1557 | ret = -EINVAL; | ||
| 1558 | break; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | return ret; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | /** | ||
| 1565 | * drm_atomic_set_crtc_for_plane - set crtc for plane | ||
| 1566 | * @plane_state: the plane whose incoming state to update | ||
| 1567 | * @crtc: crtc to use for the plane | ||
| 1568 | * | ||
| 1569 | * Changing the assigned crtc for a plane requires us to grab the lock and state | ||
| 1570 | * for the new crtc, as needed. This function takes care of all these details | ||
| 1571 | * besides updating the pointer in the state object itself. | ||
| 1572 | * | ||
| 1573 | * Returns: | ||
| 1574 | * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK | ||
| 1575 | * then the w/w mutex code has detected a deadlock and the entire atomic | ||
| 1576 | * sequence must be restarted. All other errors are fatal. | ||
| 1577 | */ | ||
| 1578 | int | ||
| 1579 | drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, | ||
| 1580 | struct drm_crtc *crtc) | ||
| 1581 | { | ||
| 1582 | struct drm_plane *plane = plane_state->plane; | ||
| 1583 | struct drm_crtc_state *crtc_state; | ||
| 1584 | /* Nothing to do for same crtc*/ | ||
| 1585 | if (plane_state->crtc == crtc) | ||
| 1586 | return 0; | ||
| 1587 | if (plane_state->crtc) { | ||
| 1588 | crtc_state = drm_atomic_get_crtc_state(plane_state->state, | ||
| 1589 | plane_state->crtc); | ||
| 1590 | if (WARN_ON(IS_ERR(crtc_state))) | ||
| 1591 | return PTR_ERR(crtc_state); | ||
| 1592 | |||
| 1593 | crtc_state->plane_mask &= ~drm_plane_mask(plane); | ||
| 1594 | } | ||
| 1595 | |||
| 1596 | plane_state->crtc = crtc; | ||
| 1597 | |||
| 1598 | if (crtc) { | ||
| 1599 | crtc_state = drm_atomic_get_crtc_state(plane_state->state, | ||
| 1600 | crtc); | ||
| 1601 | if (IS_ERR(crtc_state)) | ||
| 1602 | return PTR_ERR(crtc_state); | ||
| 1603 | crtc_state->plane_mask |= drm_plane_mask(plane); | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | if (crtc) | ||
| 1607 | DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", | ||
| 1608 | plane->base.id, plane->name, plane_state, | ||
| 1609 | crtc->base.id, crtc->name); | ||
| 1610 | else | ||
| 1611 | DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", | ||
| 1612 | plane->base.id, plane->name, plane_state); | ||
| 1613 | |||
| 1614 | return 0; | ||
| 1615 | } | ||
| 1616 | EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); | ||
| 1617 | |||
| 1618 | /** | ||
| 1619 | * drm_atomic_set_fb_for_plane - set framebuffer for plane | ||
| 1620 | * @plane_state: atomic state object for the plane | ||
| 1621 | * @fb: fb to use for the plane | ||
| 1622 | * | ||
| 1623 | * Changing the assigned framebuffer for a plane requires us to grab a reference | ||
| 1624 | * to the new fb and drop the reference to the old fb, if there is one. This | ||
| 1625 | * function takes care of all these details besides updating the pointer in the | ||
| 1626 | * state object itself. | ||
| 1627 | */ | ||
| 1628 | void | ||
| 1629 | drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, | ||
| 1630 | struct drm_framebuffer *fb) | ||
| 1631 | { | ||
| 1632 | struct drm_plane *plane = plane_state->plane; | ||
| 1633 | |||
| 1634 | if (fb) | ||
| 1635 | DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", | ||
| 1636 | fb->base.id, plane->base.id, plane->name, | ||
| 1637 | plane_state); | ||
| 1638 | else | ||
| 1639 | DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", | ||
| 1640 | plane->base.id, plane->name, plane_state); | ||
| 1641 | |||
| 1642 | drm_framebuffer_assign(&plane_state->fb, fb); | ||
| 1643 | } | ||
| 1644 | EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); | ||
| 1645 | |||
| 1646 | /** | ||
| 1647 | * drm_atomic_set_fence_for_plane - set fence for plane | ||
| 1648 | * @plane_state: atomic state object for the plane | ||
| 1649 | * @fence: dma_fence to use for the plane | ||
| 1650 | * | ||
| 1651 | * Helper to setup the plane_state fence in case it is not set yet. | ||
| 1652 | * By using this drivers doesn't need to worry if the user choose | ||
| 1653 | * implicit or explicit fencing. | ||
| 1654 | * | ||
| 1655 | * This function will not set the fence to the state if it was set | ||
| 1656 | * via explicit fencing interfaces on the atomic ioctl. In that case it will | ||
| 1657 | * drop the reference to the fence as we are not storing it anywhere. | ||
| 1658 | * Otherwise, if &drm_plane_state.fence is not set this function we just set it | ||
| 1659 | * with the received implicit fence. In both cases this function consumes a | ||
| 1660 | * reference for @fence. | ||
| 1661 | * | ||
| 1662 | * This way explicit fencing can be used to overrule implicit fencing, which is | ||
| 1663 | * important to make explicit fencing use-cases work: One example is using one | ||
| 1664 | * buffer for 2 screens with different refresh rates. Implicit fencing will | ||
| 1665 | * clamp rendering to the refresh rate of the slower screen, whereas explicit | ||
| 1666 | * fence allows 2 independent render and display loops on a single buffer. If a | ||
| 1667 | * driver allows obeys both implicit and explicit fences for plane updates, then | ||
| 1668 | * it will break all the benefits of explicit fencing. | ||
| 1669 | */ | ||
| 1670 | void | ||
| 1671 | drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, | ||
| 1672 | struct dma_fence *fence) | ||
| 1673 | { | ||
| 1674 | if (plane_state->fence) { | ||
| 1675 | dma_fence_put(fence); | ||
| 1676 | return; | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | plane_state->fence = fence; | ||
| 1680 | } | ||
| 1681 | EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); | ||
| 1682 | |||
| 1683 | /** | ||
| 1684 | * drm_atomic_set_crtc_for_connector - set crtc for connector | ||
| 1685 | * @conn_state: atomic state object for the connector | ||
| 1686 | * @crtc: crtc to use for the connector | ||
| 1687 | * | ||
| 1688 | * Changing the assigned crtc for a connector requires us to grab the lock and | ||
| 1689 | * state for the new crtc, as needed. This function takes care of all these | ||
| 1690 | * details besides updating the pointer in the state object itself. | ||
| 1691 | * | ||
| 1692 | * Returns: | ||
| 1693 | * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK | ||
| 1694 | * then the w/w mutex code has detected a deadlock and the entire atomic | ||
| 1695 | * sequence must be restarted. All other errors are fatal. | ||
| 1696 | */ | ||
| 1697 | int | ||
| 1698 | drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, | ||
| 1699 | struct drm_crtc *crtc) | ||
| 1700 | { | ||
| 1701 | struct drm_connector *connector = conn_state->connector; | ||
| 1702 | struct drm_crtc_state *crtc_state; | ||
| 1703 | |||
| 1704 | if (conn_state->crtc == crtc) | ||
| 1705 | return 0; | ||
| 1706 | |||
| 1707 | if (conn_state->crtc) { | ||
| 1708 | crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, | ||
| 1709 | conn_state->crtc); | ||
| 1710 | |||
| 1711 | crtc_state->connector_mask &= | ||
| 1712 | ~drm_connector_mask(conn_state->connector); | ||
| 1713 | |||
| 1714 | drm_connector_put(conn_state->connector); | ||
| 1715 | conn_state->crtc = NULL; | ||
| 1716 | } | ||
| 1717 | |||
| 1718 | if (crtc) { | ||
| 1719 | crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); | ||
| 1720 | if (IS_ERR(crtc_state)) | ||
| 1721 | return PTR_ERR(crtc_state); | ||
| 1722 | |||
| 1723 | crtc_state->connector_mask |= | ||
| 1724 | drm_connector_mask(conn_state->connector); | ||
| 1725 | |||
| 1726 | drm_connector_get(conn_state->connector); | ||
| 1727 | conn_state->crtc = crtc; | ||
| 1728 | |||
| 1729 | DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", | ||
| 1730 | connector->base.id, connector->name, | ||
| 1731 | conn_state, crtc->base.id, crtc->name); | ||
| 1732 | } else { | ||
| 1733 | DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", | ||
| 1734 | connector->base.id, connector->name, | ||
| 1735 | conn_state); | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | return 0; | ||
| 1739 | } | ||
| 1740 | EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); | ||
| 1741 | |||
| 1742 | /* | ||
| 1743 | * drm_atomic_get_writeback_job - return or allocate a writeback job | ||
| 1744 | * @conn_state: Connector state to get the job for | ||
| 1745 | * | ||
| 1746 | * Writeback jobs have a different lifetime to the atomic state they are | ||
| 1747 | * associated with. This convenience function takes care of allocating a job | ||
| 1748 | * if there isn't yet one associated with the connector state, otherwise | ||
| 1749 | * it just returns the existing job. | ||
| 1750 | * | ||
| 1751 | * Returns: The writeback job for the given connector state | ||
| 1752 | */ | ||
| 1753 | static struct drm_writeback_job * | ||
| 1754 | drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) | ||
| 1755 | { | ||
| 1756 | WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); | ||
| 1757 | |||
| 1758 | if (!conn_state->writeback_job) | ||
| 1759 | conn_state->writeback_job = | ||
| 1760 | kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); | ||
| 1761 | |||
| 1762 | return conn_state->writeback_job; | ||
| 1763 | } | ||
| 1764 | |||
| 1765 | /** | ||
| 1766 | * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer | ||
| 1767 | * @conn_state: atomic state object for the connector | ||
| 1768 | * @fb: fb to use for the connector | ||
| 1769 | * | ||
| 1770 | * This is used to set the framebuffer for a writeback connector, which outputs | ||
| 1771 | * to a buffer instead of an actual physical connector. | ||
| 1772 | * Changing the assigned framebuffer requires us to grab a reference to the new | ||
| 1773 | * fb and drop the reference to the old fb, if there is one. This function | ||
| 1774 | * takes care of all these details besides updating the pointer in the | ||
| 1775 | * state object itself. | ||
| 1776 | * | ||
| 1777 | * Note: The only way conn_state can already have an fb set is if the commit | ||
| 1778 | * sets the property more than once. | ||
| 1779 | * | ||
| 1780 | * See also: drm_writeback_connector_init() | ||
| 1781 | * | ||
| 1782 | * Returns: 0 on success | ||
| 1783 | */ | ||
| 1784 | int drm_atomic_set_writeback_fb_for_connector( | ||
| 1785 | struct drm_connector_state *conn_state, | ||
| 1786 | struct drm_framebuffer *fb) | ||
| 1787 | { | ||
| 1788 | struct drm_writeback_job *job = | ||
| 1789 | drm_atomic_get_writeback_job(conn_state); | ||
| 1790 | if (!job) | ||
| 1791 | return -ENOMEM; | ||
| 1792 | |||
| 1793 | drm_framebuffer_assign(&job->fb, fb); | ||
| 1794 | |||
| 1795 | if (fb) | ||
| 1796 | DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", | ||
| 1797 | fb->base.id, conn_state); | ||
| 1798 | else | ||
| 1799 | DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", | ||
| 1800 | conn_state); | ||
| 1801 | |||
| 1802 | return 0; | ||
| 1803 | } | ||
| 1804 | EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector); | ||
| 1805 | |||
| 1806 | /** | ||
| 1807 | * drm_atomic_add_affected_connectors - add connectors for crtc | 842 | * drm_atomic_add_affected_connectors - add connectors for crtc |
| 1808 | * @state: atomic state | 843 | * @state: atomic state |
| 1809 | * @crtc: DRM crtc | 844 | * @crtc: DRM crtc |
| @@ -2039,7 +1074,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) | |||
| 2039 | } | 1074 | } |
| 2040 | EXPORT_SYMBOL(drm_atomic_nonblocking_commit); | 1075 | EXPORT_SYMBOL(drm_atomic_nonblocking_commit); |
| 2041 | 1076 | ||
| 2042 | static void drm_atomic_print_state(const struct drm_atomic_state *state) | 1077 | void drm_atomic_print_state(const struct drm_atomic_state *state) |
| 2043 | { | 1078 | { |
| 2044 | struct drm_printer p = drm_info_printer(state->dev->dev); | 1079 | struct drm_printer p = drm_info_printer(state->dev->dev); |
| 2045 | struct drm_plane *plane; | 1080 | struct drm_plane *plane; |
| @@ -2146,544 +1181,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor) | |||
| 2146 | } | 1181 | } |
| 2147 | #endif | 1182 | #endif |
| 2148 | 1183 | ||
| 2149 | /* | ||
| 2150 | * The big monster ioctl | ||
| 2151 | */ | ||
| 2152 | |||
| 2153 | static struct drm_pending_vblank_event *create_vblank_event( | ||
| 2154 | struct drm_crtc *crtc, uint64_t user_data) | ||
| 2155 | { | ||
| 2156 | struct drm_pending_vblank_event *e = NULL; | ||
| 2157 | |||
| 2158 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
| 2159 | if (!e) | ||
| 2160 | return NULL; | ||
| 2161 | |||
| 2162 | e->event.base.type = DRM_EVENT_FLIP_COMPLETE; | ||
| 2163 | e->event.base.length = sizeof(e->event); | ||
| 2164 | e->event.vbl.crtc_id = crtc->base.id; | ||
| 2165 | e->event.vbl.user_data = user_data; | ||
| 2166 | |||
| 2167 | return e; | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, | ||
| 2171 | struct drm_connector *connector, | ||
| 2172 | int mode) | ||
| 2173 | { | ||
| 2174 | struct drm_connector *tmp_connector; | ||
| 2175 | struct drm_connector_state *new_conn_state; | ||
| 2176 | struct drm_crtc *crtc; | ||
| 2177 | struct drm_crtc_state *crtc_state; | ||
| 2178 | int i, ret, old_mode = connector->dpms; | ||
| 2179 | bool active = false; | ||
| 2180 | |||
| 2181 | ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, | ||
| 2182 | state->acquire_ctx); | ||
| 2183 | if (ret) | ||
| 2184 | return ret; | ||
| 2185 | |||
| 2186 | if (mode != DRM_MODE_DPMS_ON) | ||
| 2187 | mode = DRM_MODE_DPMS_OFF; | ||
| 2188 | connector->dpms = mode; | ||
| 2189 | |||
| 2190 | crtc = connector->state->crtc; | ||
| 2191 | if (!crtc) | ||
| 2192 | goto out; | ||
| 2193 | ret = drm_atomic_add_affected_connectors(state, crtc); | ||
| 2194 | if (ret) | ||
| 2195 | goto out; | ||
| 2196 | |||
| 2197 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 2198 | if (IS_ERR(crtc_state)) { | ||
| 2199 | ret = PTR_ERR(crtc_state); | ||
| 2200 | goto out; | ||
| 2201 | } | ||
| 2202 | |||
| 2203 | for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { | ||
| 2204 | if (new_conn_state->crtc != crtc) | ||
| 2205 | continue; | ||
| 2206 | if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { | ||
| 2207 | active = true; | ||
| 2208 | break; | ||
| 2209 | } | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | crtc_state->active = active; | ||
| 2213 | ret = drm_atomic_commit(state); | ||
| 2214 | out: | ||
| 2215 | if (ret != 0) | ||
| 2216 | connector->dpms = old_mode; | ||
| 2217 | return ret; | ||
| 2218 | } | ||
| 2219 | |||
| 2220 | int drm_atomic_set_property(struct drm_atomic_state *state, | ||
| 2221 | struct drm_mode_object *obj, | ||
| 2222 | struct drm_property *prop, | ||
| 2223 | uint64_t prop_value) | ||
| 2224 | { | ||
| 2225 | struct drm_mode_object *ref; | ||
| 2226 | int ret; | ||
| 2227 | |||
| 2228 | if (!drm_property_change_valid_get(prop, prop_value, &ref)) | ||
| 2229 | return -EINVAL; | ||
| 2230 | |||
| 2231 | switch (obj->type) { | ||
| 2232 | case DRM_MODE_OBJECT_CONNECTOR: { | ||
| 2233 | struct drm_connector *connector = obj_to_connector(obj); | ||
| 2234 | struct drm_connector_state *connector_state; | ||
| 2235 | |||
| 2236 | connector_state = drm_atomic_get_connector_state(state, connector); | ||
| 2237 | if (IS_ERR(connector_state)) { | ||
| 2238 | ret = PTR_ERR(connector_state); | ||
| 2239 | break; | ||
| 2240 | } | ||
| 2241 | |||
| 2242 | ret = drm_atomic_connector_set_property(connector, | ||
| 2243 | connector_state, prop, prop_value); | ||
| 2244 | break; | ||
| 2245 | } | ||
| 2246 | case DRM_MODE_OBJECT_CRTC: { | ||
| 2247 | struct drm_crtc *crtc = obj_to_crtc(obj); | ||
| 2248 | struct drm_crtc_state *crtc_state; | ||
| 2249 | |||
| 2250 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 2251 | if (IS_ERR(crtc_state)) { | ||
| 2252 | ret = PTR_ERR(crtc_state); | ||
| 2253 | break; | ||
| 2254 | } | ||
| 2255 | |||
| 2256 | ret = drm_atomic_crtc_set_property(crtc, | ||
| 2257 | crtc_state, prop, prop_value); | ||
| 2258 | break; | ||
| 2259 | } | ||
| 2260 | case DRM_MODE_OBJECT_PLANE: { | ||
| 2261 | struct drm_plane *plane = obj_to_plane(obj); | ||
| 2262 | struct drm_plane_state *plane_state; | ||
| 2263 | |||
| 2264 | plane_state = drm_atomic_get_plane_state(state, plane); | ||
| 2265 | if (IS_ERR(plane_state)) { | ||
| 2266 | ret = PTR_ERR(plane_state); | ||
| 2267 | break; | ||
| 2268 | } | ||
| 2269 | |||
| 2270 | ret = drm_atomic_plane_set_property(plane, | ||
| 2271 | plane_state, prop, prop_value); | ||
| 2272 | break; | ||
| 2273 | } | ||
| 2274 | default: | ||
| 2275 | ret = -EINVAL; | ||
| 2276 | break; | ||
| 2277 | } | ||
| 2278 | |||
| 2279 | drm_property_change_valid_put(prop, ref); | ||
| 2280 | return ret; | ||
| 2281 | } | ||
| 2282 | |||
| 2283 | /** | ||
| 2284 | * DOC: explicit fencing properties | ||
| 2285 | * | ||
| 2286 | * Explicit fencing allows userspace to control the buffer synchronization | ||
| 2287 | * between devices. A Fence or a group of fences are transfered to/from | ||
| 2288 | * userspace using Sync File fds and there are two DRM properties for that. | ||
| 2289 | * IN_FENCE_FD on each DRM Plane to send fences to the kernel and | ||
| 2290 | * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. | ||
| 2291 | * | ||
| 2292 | * As a contrast, with implicit fencing the kernel keeps track of any | ||
| 2293 | * ongoing rendering, and automatically ensures that the atomic update waits | ||
| 2294 | * for any pending rendering to complete. For shared buffers represented with | ||
| 2295 | * a &struct dma_buf this is tracked in &struct reservation_object. | ||
| 2296 | * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), | ||
| 2297 | * whereas explicit fencing is what Android wants. | ||
| 2298 | * | ||
| 2299 | * "IN_FENCE_FD”: | ||
| 2300 | * Use this property to pass a fence that DRM should wait on before | ||
| 2301 | * proceeding with the Atomic Commit request and show the framebuffer for | ||
| 2302 | * the plane on the screen. The fence can be either a normal fence or a | ||
| 2303 | * merged one, the sync_file framework will handle both cases and use a | ||
| 2304 | * fence_array if a merged fence is received. Passing -1 here means no | ||
| 2305 | * fences to wait on. | ||
| 2306 | * | ||
| 2307 | * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag | ||
| 2308 | * it will only check if the Sync File is a valid one. | ||
| 2309 | * | ||
| 2310 | * On the driver side the fence is stored on the @fence parameter of | ||
| 2311 | * &struct drm_plane_state. Drivers which also support implicit fencing | ||
| 2312 | * should set the implicit fence using drm_atomic_set_fence_for_plane(), | ||
| 2313 | * to make sure there's consistent behaviour between drivers in precedence | ||
| 2314 | * of implicit vs. explicit fencing. | ||
| 2315 | * | ||
| 2316 | * "OUT_FENCE_PTR”: | ||
| 2317 | * Use this property to pass a file descriptor pointer to DRM. Once the | ||
| 2318 | * Atomic Commit request call returns OUT_FENCE_PTR will be filled with | ||
| 2319 | * the file descriptor number of a Sync File. This Sync File contains the | ||
| 2320 | * CRTC fence that will be signaled when all framebuffers present on the | ||
| 2321 | * Atomic Commit * request for that given CRTC are scanned out on the | ||
| 2322 | * screen. | ||
| 2323 | * | ||
| 2324 | * The Atomic Commit request fails if a invalid pointer is passed. If the | ||
| 2325 | * Atomic Commit request fails for any other reason the out fence fd | ||
| 2326 | * returned will be -1. On a Atomic Commit with the | ||
| 2327 | * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. | ||
| 2328 | * | ||
| 2329 | * Note that out-fences don't have a special interface to drivers and are | ||
| 2330 | * internally represented by a &struct drm_pending_vblank_event in struct | ||
| 2331 | * &drm_crtc_state, which is also used by the nonblocking atomic commit | ||
| 2332 | * helpers and for the DRM event handling for existing userspace. | ||
| 2333 | */ | ||
| 2334 | |||
| 2335 | struct drm_out_fence_state { | ||
| 2336 | s32 __user *out_fence_ptr; | ||
| 2337 | struct sync_file *sync_file; | ||
| 2338 | int fd; | ||
| 2339 | }; | ||
| 2340 | |||
| 2341 | static int setup_out_fence(struct drm_out_fence_state *fence_state, | ||
| 2342 | struct dma_fence *fence) | ||
| 2343 | { | ||
| 2344 | fence_state->fd = get_unused_fd_flags(O_CLOEXEC); | ||
| 2345 | if (fence_state->fd < 0) | ||
| 2346 | return fence_state->fd; | ||
| 2347 | |||
| 2348 | if (put_user(fence_state->fd, fence_state->out_fence_ptr)) | ||
| 2349 | return -EFAULT; | ||
| 2350 | |||
| 2351 | fence_state->sync_file = sync_file_create(fence); | ||
| 2352 | if (!fence_state->sync_file) | ||
| 2353 | return -ENOMEM; | ||
| 2354 | |||
| 2355 | return 0; | ||
| 2356 | } | ||
| 2357 | |||
| 2358 | static int prepare_signaling(struct drm_device *dev, | ||
| 2359 | struct drm_atomic_state *state, | ||
| 2360 | struct drm_mode_atomic *arg, | ||
| 2361 | struct drm_file *file_priv, | ||
| 2362 | struct drm_out_fence_state **fence_state, | ||
| 2363 | unsigned int *num_fences) | ||
| 2364 | { | ||
| 2365 | struct drm_crtc *crtc; | ||
| 2366 | struct drm_crtc_state *crtc_state; | ||
| 2367 | struct drm_connector *conn; | ||
| 2368 | struct drm_connector_state *conn_state; | ||
| 2369 | int i, c = 0, ret; | ||
| 2370 | |||
| 2371 | if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) | ||
| 2372 | return 0; | ||
| 2373 | |||
| 2374 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 2375 | s32 __user *fence_ptr; | ||
| 2376 | |||
| 2377 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); | ||
| 2378 | |||
| 2379 | if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { | ||
| 2380 | struct drm_pending_vblank_event *e; | ||
| 2381 | |||
| 2382 | e = create_vblank_event(crtc, arg->user_data); | ||
| 2383 | if (!e) | ||
| 2384 | return -ENOMEM; | ||
| 2385 | |||
| 2386 | crtc_state->event = e; | ||
| 2387 | } | ||
| 2388 | |||
| 2389 | if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { | ||
| 2390 | struct drm_pending_vblank_event *e = crtc_state->event; | ||
| 2391 | |||
| 2392 | if (!file_priv) | ||
| 2393 | continue; | ||
| 2394 | |||
| 2395 | ret = drm_event_reserve_init(dev, file_priv, &e->base, | ||
| 2396 | &e->event.base); | ||
| 2397 | if (ret) { | ||
| 2398 | kfree(e); | ||
| 2399 | crtc_state->event = NULL; | ||
| 2400 | return ret; | ||
| 2401 | } | ||
| 2402 | } | ||
| 2403 | |||
| 2404 | if (fence_ptr) { | ||
| 2405 | struct dma_fence *fence; | ||
| 2406 | struct drm_out_fence_state *f; | ||
| 2407 | |||
| 2408 | f = krealloc(*fence_state, sizeof(**fence_state) * | ||
| 2409 | (*num_fences + 1), GFP_KERNEL); | ||
| 2410 | if (!f) | ||
| 2411 | return -ENOMEM; | ||
| 2412 | |||
| 2413 | memset(&f[*num_fences], 0, sizeof(*f)); | ||
| 2414 | |||
| 2415 | f[*num_fences].out_fence_ptr = fence_ptr; | ||
| 2416 | *fence_state = f; | ||
| 2417 | |||
| 2418 | fence = drm_crtc_create_fence(crtc); | ||
| 2419 | if (!fence) | ||
| 2420 | return -ENOMEM; | ||
| 2421 | |||
| 2422 | ret = setup_out_fence(&f[(*num_fences)++], fence); | ||
| 2423 | if (ret) { | ||
| 2424 | dma_fence_put(fence); | ||
| 2425 | return ret; | ||
| 2426 | } | ||
| 2427 | |||
| 2428 | crtc_state->event->base.fence = fence; | ||
| 2429 | } | ||
| 2430 | |||
| 2431 | c++; | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | for_each_new_connector_in_state(state, conn, conn_state, i) { | ||
| 2435 | struct drm_writeback_connector *wb_conn; | ||
| 2436 | struct drm_writeback_job *job; | ||
| 2437 | struct drm_out_fence_state *f; | ||
| 2438 | struct dma_fence *fence; | ||
| 2439 | s32 __user *fence_ptr; | ||
| 2440 | |||
| 2441 | fence_ptr = get_out_fence_for_connector(state, conn); | ||
| 2442 | if (!fence_ptr) | ||
| 2443 | continue; | ||
| 2444 | |||
| 2445 | job = drm_atomic_get_writeback_job(conn_state); | ||
| 2446 | if (!job) | ||
| 2447 | return -ENOMEM; | ||
| 2448 | |||
| 2449 | f = krealloc(*fence_state, sizeof(**fence_state) * | ||
| 2450 | (*num_fences + 1), GFP_KERNEL); | ||
| 2451 | if (!f) | ||
| 2452 | return -ENOMEM; | ||
| 2453 | |||
| 2454 | memset(&f[*num_fences], 0, sizeof(*f)); | ||
| 2455 | |||
| 2456 | f[*num_fences].out_fence_ptr = fence_ptr; | ||
| 2457 | *fence_state = f; | ||
| 2458 | |||
| 2459 | wb_conn = drm_connector_to_writeback(conn); | ||
| 2460 | fence = drm_writeback_get_out_fence(wb_conn); | ||
| 2461 | if (!fence) | ||
| 2462 | return -ENOMEM; | ||
| 2463 | |||
| 2464 | ret = setup_out_fence(&f[(*num_fences)++], fence); | ||
| 2465 | if (ret) { | ||
| 2466 | dma_fence_put(fence); | ||
| 2467 | return ret; | ||
| 2468 | } | ||
| 2469 | |||
| 2470 | job->out_fence = fence; | ||
| 2471 | } | ||
| 2472 | |||
| 2473 | /* | ||
| 2474 | * Having this flag means user mode pends on event which will never | ||
| 2475 | * reach due to lack of at least one CRTC for signaling | ||
| 2476 | */ | ||
| 2477 | if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) | ||
| 2478 | return -EINVAL; | ||
| 2479 | |||
| 2480 | return 0; | ||
| 2481 | } | ||
| 2482 | |||
| 2483 | static void complete_signaling(struct drm_device *dev, | ||
| 2484 | struct drm_atomic_state *state, | ||
| 2485 | struct drm_out_fence_state *fence_state, | ||
| 2486 | unsigned int num_fences, | ||
| 2487 | bool install_fds) | ||
| 2488 | { | ||
| 2489 | struct drm_crtc *crtc; | ||
| 2490 | struct drm_crtc_state *crtc_state; | ||
| 2491 | int i; | ||
| 2492 | |||
| 2493 | if (install_fds) { | ||
| 2494 | for (i = 0; i < num_fences; i++) | ||
| 2495 | fd_install(fence_state[i].fd, | ||
| 2496 | fence_state[i].sync_file->file); | ||
| 2497 | |||
| 2498 | kfree(fence_state); | ||
| 2499 | return; | ||
| 2500 | } | ||
| 2501 | |||
| 2502 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 2503 | struct drm_pending_vblank_event *event = crtc_state->event; | ||
| 2504 | /* | ||
| 2505 | * Free the allocated event. drm_atomic_helper_setup_commit | ||
| 2506 | * can allocate an event too, so only free it if it's ours | ||
| 2507 | * to prevent a double free in drm_atomic_state_clear. | ||
| 2508 | */ | ||
| 2509 | if (event && (event->base.fence || event->base.file_priv)) { | ||
| 2510 | drm_event_cancel_free(dev, &event->base); | ||
| 2511 | crtc_state->event = NULL; | ||
| 2512 | } | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | if (!fence_state) | ||
| 2516 | return; | ||
| 2517 | |||
| 2518 | for (i = 0; i < num_fences; i++) { | ||
| 2519 | if (fence_state[i].sync_file) | ||
| 2520 | fput(fence_state[i].sync_file->file); | ||
| 2521 | if (fence_state[i].fd >= 0) | ||
| 2522 | put_unused_fd(fence_state[i].fd); | ||
| 2523 | |||
| 2524 | /* If this fails log error to the user */ | ||
| 2525 | if (fence_state[i].out_fence_ptr && | ||
| 2526 | put_user(-1, fence_state[i].out_fence_ptr)) | ||
| 2527 | DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); | ||
| 2528 | } | ||
| 2529 | |||
| 2530 | kfree(fence_state); | ||
| 2531 | } | ||
| 2532 | |||
| 2533 | int drm_mode_atomic_ioctl(struct drm_device *dev, | ||
| 2534 | void *data, struct drm_file *file_priv) | ||
| 2535 | { | ||
| 2536 | struct drm_mode_atomic *arg = data; | ||
| 2537 | uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); | ||
| 2538 | uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); | ||
| 2539 | uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); | ||
| 2540 | uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); | ||
| 2541 | unsigned int copied_objs, copied_props; | ||
| 2542 | struct drm_atomic_state *state; | ||
| 2543 | struct drm_modeset_acquire_ctx ctx; | ||
| 2544 | struct drm_out_fence_state *fence_state; | ||
| 2545 | int ret = 0; | ||
| 2546 | unsigned int i, j, num_fences; | ||
| 2547 | |||
| 2548 | /* disallow for drivers not supporting atomic: */ | ||
| 2549 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) | ||
| 2550 | return -EINVAL; | ||
| 2551 | |||
| 2552 | /* disallow for userspace that has not enabled atomic cap (even | ||
| 2553 | * though this may be a bit overkill, since legacy userspace | ||
| 2554 | * wouldn't know how to call this ioctl) | ||
| 2555 | */ | ||
| 2556 | if (!file_priv->atomic) | ||
| 2557 | return -EINVAL; | ||
| 2558 | |||
| 2559 | if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) | ||
| 2560 | return -EINVAL; | ||
| 2561 | |||
| 2562 | if (arg->reserved) | ||
| 2563 | return -EINVAL; | ||
| 2564 | |||
| 2565 | if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && | ||
| 2566 | !dev->mode_config.async_page_flip) | ||
| 2567 | return -EINVAL; | ||
| 2568 | |||
| 2569 | /* can't test and expect an event at the same time. */ | ||
| 2570 | if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && | ||
| 2571 | (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) | ||
| 2572 | return -EINVAL; | ||
| 2573 | |||
| 2574 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | ||
| 2575 | |||
| 2576 | state = drm_atomic_state_alloc(dev); | ||
| 2577 | if (!state) | ||
| 2578 | return -ENOMEM; | ||
| 2579 | |||
| 2580 | state->acquire_ctx = &ctx; | ||
| 2581 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); | ||
| 2582 | |||
| 2583 | retry: | ||
| 2584 | copied_objs = 0; | ||
| 2585 | copied_props = 0; | ||
| 2586 | fence_state = NULL; | ||
| 2587 | num_fences = 0; | ||
| 2588 | |||
| 2589 | for (i = 0; i < arg->count_objs; i++) { | ||
| 2590 | uint32_t obj_id, count_props; | ||
| 2591 | struct drm_mode_object *obj; | ||
| 2592 | |||
| 2593 | if (get_user(obj_id, objs_ptr + copied_objs)) { | ||
| 2594 | ret = -EFAULT; | ||
| 2595 | goto out; | ||
| 2596 | } | ||
| 2597 | |||
| 2598 | obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); | ||
| 2599 | if (!obj) { | ||
| 2600 | ret = -ENOENT; | ||
| 2601 | goto out; | ||
| 2602 | } | ||
| 2603 | |||
| 2604 | if (!obj->properties) { | ||
| 2605 | drm_mode_object_put(obj); | ||
| 2606 | ret = -ENOENT; | ||
| 2607 | goto out; | ||
| 2608 | } | ||
| 2609 | |||
| 2610 | if (get_user(count_props, count_props_ptr + copied_objs)) { | ||
| 2611 | drm_mode_object_put(obj); | ||
| 2612 | ret = -EFAULT; | ||
| 2613 | goto out; | ||
| 2614 | } | ||
| 2615 | |||
| 2616 | copied_objs++; | ||
| 2617 | |||
| 2618 | for (j = 0; j < count_props; j++) { | ||
| 2619 | uint32_t prop_id; | ||
| 2620 | uint64_t prop_value; | ||
| 2621 | struct drm_property *prop; | ||
| 2622 | |||
| 2623 | if (get_user(prop_id, props_ptr + copied_props)) { | ||
| 2624 | drm_mode_object_put(obj); | ||
| 2625 | ret = -EFAULT; | ||
| 2626 | goto out; | ||
| 2627 | } | ||
| 2628 | |||
| 2629 | prop = drm_mode_obj_find_prop_id(obj, prop_id); | ||
| 2630 | if (!prop) { | ||
| 2631 | drm_mode_object_put(obj); | ||
| 2632 | ret = -ENOENT; | ||
| 2633 | goto out; | ||
| 2634 | } | ||
| 2635 | |||
| 2636 | if (copy_from_user(&prop_value, | ||
| 2637 | prop_values_ptr + copied_props, | ||
| 2638 | sizeof(prop_value))) { | ||
| 2639 | drm_mode_object_put(obj); | ||
| 2640 | ret = -EFAULT; | ||
| 2641 | goto out; | ||
| 2642 | } | ||
| 2643 | |||
| 2644 | ret = drm_atomic_set_property(state, obj, prop, | ||
| 2645 | prop_value); | ||
| 2646 | if (ret) { | ||
| 2647 | drm_mode_object_put(obj); | ||
| 2648 | goto out; | ||
| 2649 | } | ||
| 2650 | |||
| 2651 | copied_props++; | ||
| 2652 | } | ||
| 2653 | |||
| 2654 | drm_mode_object_put(obj); | ||
| 2655 | } | ||
| 2656 | |||
| 2657 | ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, | ||
| 2658 | &num_fences); | ||
| 2659 | if (ret) | ||
| 2660 | goto out; | ||
| 2661 | |||
| 2662 | if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { | ||
| 2663 | ret = drm_atomic_check_only(state); | ||
| 2664 | } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { | ||
| 2665 | ret = drm_atomic_nonblocking_commit(state); | ||
| 2666 | } else { | ||
| 2667 | if (unlikely(drm_debug & DRM_UT_STATE)) | ||
| 2668 | drm_atomic_print_state(state); | ||
| 2669 | |||
| 2670 | ret = drm_atomic_commit(state); | ||
| 2671 | } | ||
| 2672 | |||
| 2673 | out: | ||
| 2674 | complete_signaling(dev, state, fence_state, num_fences, !ret); | ||
| 2675 | |||
| 2676 | if (ret == -EDEADLK) { | ||
| 2677 | drm_atomic_state_clear(state); | ||
| 2678 | ret = drm_modeset_backoff(&ctx); | ||
| 2679 | if (!ret) | ||
| 2680 | goto retry; | ||
| 2681 | } | ||
| 2682 | |||
| 2683 | drm_atomic_state_put(state); | ||
| 2684 | |||
| 2685 | drm_modeset_drop_locks(&ctx); | ||
| 2686 | drm_modeset_acquire_fini(&ctx); | ||
| 2687 | |||
| 2688 | return ret; | ||
| 2689 | } | ||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2c23a48482da..3cf1aa132778 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
| 29 | #include <drm/drm_atomic.h> | 29 | #include <drm/drm_atomic.h> |
| 30 | #include <drm/drm_atomic_uapi.h> | ||
| 30 | #include <drm/drm_plane_helper.h> | 31 | #include <drm/drm_plane_helper.h> |
| 31 | #include <drm/drm_crtc_helper.h> | 32 | #include <drm/drm_crtc_helper.h> |
| 32 | #include <drm/drm_atomic_helper.h> | 33 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c new file mode 100644 index 000000000000..26690a664ec6 --- /dev/null +++ b/drivers/gpu/drm/drm_atomic_uapi.c | |||
| @@ -0,0 +1,1393 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Red Hat | ||
| 3 | * Copyright (C) 2014 Intel Corp. | ||
| 4 | * Copyright (C) 2018 Intel Corp. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: | ||
| 25 | * Rob Clark <robdclark@gmail.com> | ||
| 26 | * Daniel Vetter <daniel.vetter@ffwll.ch> | ||
| 27 | */ | ||
| 28 | |||
| 29 | #include <drm/drm_atomic_uapi.h> | ||
| 30 | #include <drm/drm_atomic.h> | ||
| 31 | #include <drm/drm_print.h> | ||
| 32 | #include <drm/drm_drv.h> | ||
| 33 | #include <drm/drm_writeback.h> | ||
| 34 | #include <drm/drm_vblank.h> | ||
| 35 | |||
| 36 | #include <linux/dma-fence.h> | ||
| 37 | #include <linux/uaccess.h> | ||
| 38 | #include <linux/sync_file.h> | ||
| 39 | #include <linux/file.h> | ||
| 40 | |||
| 41 | #include "drm_crtc_internal.h" | ||
| 42 | |||
| 43 | /** | ||
| 44 | * DOC: overview | ||
| 45 | * | ||
| 46 | * This file contains the marshalling and demarshalling glue for the atomic UAPI | ||
| 47 | * in all it's form: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and | ||
| 48 | * SET_PROPERTY IOCTls. Plus interface functions for compatibility helpers and | ||
| 49 | * drivers which have special needs to construct their own atomic updates, e.g. | ||
| 50 | * for load detect or similiar. | ||
| 51 | */ | ||
| 52 | |||
| 53 | /** | ||
| 54 | * drm_atomic_set_mode_for_crtc - set mode for CRTC | ||
| 55 | * @state: the CRTC whose incoming state to update | ||
| 56 | * @mode: kernel-internal mode to use for the CRTC, or NULL to disable | ||
| 57 | * | ||
| 58 | * Set a mode (originating from the kernel) on the desired CRTC state and update | ||
| 59 | * the enable property. | ||
| 60 | * | ||
| 61 | * RETURNS: | ||
| 62 | * Zero on success, error code on failure. Cannot return -EDEADLK. | ||
| 63 | */ | ||
| 64 | int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, | ||
| 65 | const struct drm_display_mode *mode) | ||
| 66 | { | ||
| 67 | struct drm_crtc *crtc = state->crtc; | ||
| 68 | struct drm_mode_modeinfo umode; | ||
| 69 | |||
| 70 | /* Early return for no change. */ | ||
| 71 | if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) | ||
| 72 | return 0; | ||
| 73 | |||
| 74 | drm_property_blob_put(state->mode_blob); | ||
| 75 | state->mode_blob = NULL; | ||
| 76 | |||
| 77 | if (mode) { | ||
| 78 | drm_mode_convert_to_umode(&umode, mode); | ||
| 79 | state->mode_blob = | ||
| 80 | drm_property_create_blob(state->crtc->dev, | ||
| 81 | sizeof(umode), | ||
| 82 | &umode); | ||
| 83 | if (IS_ERR(state->mode_blob)) | ||
| 84 | return PTR_ERR(state->mode_blob); | ||
| 85 | |||
| 86 | drm_mode_copy(&state->mode, mode); | ||
| 87 | state->enable = true; | ||
| 88 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", | ||
| 89 | mode->name, crtc->base.id, crtc->name, state); | ||
| 90 | } else { | ||
| 91 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 92 | state->enable = false; | ||
| 93 | DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", | ||
| 94 | crtc->base.id, crtc->name, state); | ||
| 95 | } | ||
| 96 | |||
| 97 | return 0; | ||
| 98 | } | ||
| 99 | EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); | ||
| 100 | |||
| 101 | /** | ||
| 102 | * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC | ||
| 103 | * @state: the CRTC whose incoming state to update | ||
| 104 | * @blob: pointer to blob property to use for mode | ||
| 105 | * | ||
| 106 | * Set a mode (originating from a blob property) on the desired CRTC state. | ||
| 107 | * This function will take a reference on the blob property for the CRTC state, | ||
| 108 | * and release the reference held on the state's existing mode property, if any | ||
| 109 | * was set. | ||
| 110 | * | ||
| 111 | * RETURNS: | ||
| 112 | * Zero on success, error code on failure. Cannot return -EDEADLK. | ||
| 113 | */ | ||
| 114 | int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | ||
| 115 | struct drm_property_blob *blob) | ||
| 116 | { | ||
| 117 | struct drm_crtc *crtc = state->crtc; | ||
| 118 | |||
| 119 | if (blob == state->mode_blob) | ||
| 120 | return 0; | ||
| 121 | |||
| 122 | drm_property_blob_put(state->mode_blob); | ||
| 123 | state->mode_blob = NULL; | ||
| 124 | |||
| 125 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 126 | |||
| 127 | if (blob) { | ||
| 128 | int ret; | ||
| 129 | |||
| 130 | if (blob->length != sizeof(struct drm_mode_modeinfo)) { | ||
| 131 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", | ||
| 132 | crtc->base.id, crtc->name, | ||
| 133 | blob->length); | ||
| 134 | return -EINVAL; | ||
| 135 | } | ||
| 136 | |||
| 137 | ret = drm_mode_convert_umode(crtc->dev, | ||
| 138 | &state->mode, blob->data); | ||
| 139 | if (ret) { | ||
| 140 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", | ||
| 141 | crtc->base.id, crtc->name, | ||
| 142 | ret, drm_get_mode_status_name(state->mode.status)); | ||
| 143 | drm_mode_debug_printmodeline(&state->mode); | ||
| 144 | return -EINVAL; | ||
| 145 | } | ||
| 146 | |||
| 147 | state->mode_blob = drm_property_blob_get(blob); | ||
| 148 | state->enable = true; | ||
| 149 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", | ||
| 150 | state->mode.name, crtc->base.id, crtc->name, | ||
| 151 | state); | ||
| 152 | } else { | ||
| 153 | state->enable = false; | ||
| 154 | DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", | ||
| 155 | crtc->base.id, crtc->name, state); | ||
| 156 | } | ||
| 157 | |||
| 158 | return 0; | ||
| 159 | } | ||
| 160 | EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); | ||
| 161 | |||
| 162 | /** | ||
| 163 | * drm_atomic_set_crtc_for_plane - set crtc for plane | ||
| 164 | * @plane_state: the plane whose incoming state to update | ||
| 165 | * @crtc: crtc to use for the plane | ||
| 166 | * | ||
| 167 | * Changing the assigned crtc for a plane requires us to grab the lock and state | ||
| 168 | * for the new crtc, as needed. This function takes care of all these details | ||
| 169 | * besides updating the pointer in the state object itself. | ||
| 170 | * | ||
| 171 | * Returns: | ||
| 172 | * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK | ||
| 173 | * then the w/w mutex code has detected a deadlock and the entire atomic | ||
| 174 | * sequence must be restarted. All other errors are fatal. | ||
| 175 | */ | ||
| 176 | int | ||
| 177 | drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, | ||
| 178 | struct drm_crtc *crtc) | ||
| 179 | { | ||
| 180 | struct drm_plane *plane = plane_state->plane; | ||
| 181 | struct drm_crtc_state *crtc_state; | ||
| 182 | /* Nothing to do for same crtc*/ | ||
| 183 | if (plane_state->crtc == crtc) | ||
| 184 | return 0; | ||
| 185 | if (plane_state->crtc) { | ||
| 186 | crtc_state = drm_atomic_get_crtc_state(plane_state->state, | ||
| 187 | plane_state->crtc); | ||
| 188 | if (WARN_ON(IS_ERR(crtc_state))) | ||
| 189 | return PTR_ERR(crtc_state); | ||
| 190 | |||
| 191 | crtc_state->plane_mask &= ~drm_plane_mask(plane); | ||
| 192 | } | ||
| 193 | |||
| 194 | plane_state->crtc = crtc; | ||
| 195 | |||
| 196 | if (crtc) { | ||
| 197 | crtc_state = drm_atomic_get_crtc_state(plane_state->state, | ||
| 198 | crtc); | ||
| 199 | if (IS_ERR(crtc_state)) | ||
| 200 | return PTR_ERR(crtc_state); | ||
| 201 | crtc_state->plane_mask |= drm_plane_mask(plane); | ||
| 202 | } | ||
| 203 | |||
| 204 | if (crtc) | ||
| 205 | DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", | ||
| 206 | plane->base.id, plane->name, plane_state, | ||
| 207 | crtc->base.id, crtc->name); | ||
| 208 | else | ||
| 209 | DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", | ||
| 210 | plane->base.id, plane->name, plane_state); | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); | ||
| 215 | |||
| 216 | /** | ||
| 217 | * drm_atomic_set_fb_for_plane - set framebuffer for plane | ||
| 218 | * @plane_state: atomic state object for the plane | ||
| 219 | * @fb: fb to use for the plane | ||
| 220 | * | ||
| 221 | * Changing the assigned framebuffer for a plane requires us to grab a reference | ||
| 222 | * to the new fb and drop the reference to the old fb, if there is one. This | ||
| 223 | * function takes care of all these details besides updating the pointer in the | ||
| 224 | * state object itself. | ||
| 225 | */ | ||
| 226 | void | ||
| 227 | drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, | ||
| 228 | struct drm_framebuffer *fb) | ||
| 229 | { | ||
| 230 | struct drm_plane *plane = plane_state->plane; | ||
| 231 | |||
| 232 | if (fb) | ||
| 233 | DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", | ||
| 234 | fb->base.id, plane->base.id, plane->name, | ||
| 235 | plane_state); | ||
| 236 | else | ||
| 237 | DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", | ||
| 238 | plane->base.id, plane->name, plane_state); | ||
| 239 | |||
| 240 | drm_framebuffer_assign(&plane_state->fb, fb); | ||
| 241 | } | ||
| 242 | EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); | ||
| 243 | |||
| 244 | /** | ||
| 245 | * drm_atomic_set_fence_for_plane - set fence for plane | ||
| 246 | * @plane_state: atomic state object for the plane | ||
| 247 | * @fence: dma_fence to use for the plane | ||
| 248 | * | ||
| 249 | * Helper to setup the plane_state fence in case it is not set yet. | ||
| 250 | * By using this drivers doesn't need to worry if the user choose | ||
| 251 | * implicit or explicit fencing. | ||
| 252 | * | ||
| 253 | * This function will not set the fence to the state if it was set | ||
| 254 | * via explicit fencing interfaces on the atomic ioctl. In that case it will | ||
| 255 | * drop the reference to the fence as we are not storing it anywhere. | ||
| 256 | * Otherwise, if &drm_plane_state.fence is not set this function we just set it | ||
| 257 | * with the received implicit fence. In both cases this function consumes a | ||
| 258 | * reference for @fence. | ||
| 259 | * | ||
| 260 | * This way explicit fencing can be used to overrule implicit fencing, which is | ||
| 261 | * important to make explicit fencing use-cases work: One example is using one | ||
| 262 | * buffer for 2 screens with different refresh rates. Implicit fencing will | ||
| 263 | * clamp rendering to the refresh rate of the slower screen, whereas explicit | ||
| 264 | * fence allows 2 independent render and display loops on a single buffer. If a | ||
| 265 | * driver allows obeys both implicit and explicit fences for plane updates, then | ||
| 266 | * it will break all the benefits of explicit fencing. | ||
| 267 | */ | ||
| 268 | void | ||
| 269 | drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, | ||
| 270 | struct dma_fence *fence) | ||
| 271 | { | ||
| 272 | if (plane_state->fence) { | ||
| 273 | dma_fence_put(fence); | ||
| 274 | return; | ||
| 275 | } | ||
| 276 | |||
| 277 | plane_state->fence = fence; | ||
| 278 | } | ||
| 279 | EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); | ||
| 280 | |||
| 281 | /** | ||
| 282 | * drm_atomic_set_crtc_for_connector - set crtc for connector | ||
| 283 | * @conn_state: atomic state object for the connector | ||
| 284 | * @crtc: crtc to use for the connector | ||
| 285 | * | ||
| 286 | * Changing the assigned crtc for a connector requires us to grab the lock and | ||
| 287 | * state for the new crtc, as needed. This function takes care of all these | ||
| 288 | * details besides updating the pointer in the state object itself. | ||
| 289 | * | ||
| 290 | * Returns: | ||
| 291 | * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK | ||
| 292 | * then the w/w mutex code has detected a deadlock and the entire atomic | ||
| 293 | * sequence must be restarted. All other errors are fatal. | ||
| 294 | */ | ||
| 295 | int | ||
| 296 | drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, | ||
| 297 | struct drm_crtc *crtc) | ||
| 298 | { | ||
| 299 | struct drm_connector *connector = conn_state->connector; | ||
| 300 | struct drm_crtc_state *crtc_state; | ||
| 301 | |||
| 302 | if (conn_state->crtc == crtc) | ||
| 303 | return 0; | ||
| 304 | |||
| 305 | if (conn_state->crtc) { | ||
| 306 | crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, | ||
| 307 | conn_state->crtc); | ||
| 308 | |||
| 309 | crtc_state->connector_mask &= | ||
| 310 | ~drm_connector_mask(conn_state->connector); | ||
| 311 | |||
| 312 | drm_connector_put(conn_state->connector); | ||
| 313 | conn_state->crtc = NULL; | ||
| 314 | } | ||
| 315 | |||
| 316 | if (crtc) { | ||
| 317 | crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); | ||
| 318 | if (IS_ERR(crtc_state)) | ||
| 319 | return PTR_ERR(crtc_state); | ||
| 320 | |||
| 321 | crtc_state->connector_mask |= | ||
| 322 | drm_connector_mask(conn_state->connector); | ||
| 323 | |||
| 324 | drm_connector_get(conn_state->connector); | ||
| 325 | conn_state->crtc = crtc; | ||
| 326 | |||
| 327 | DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", | ||
| 328 | connector->base.id, connector->name, | ||
| 329 | conn_state, crtc->base.id, crtc->name); | ||
| 330 | } else { | ||
| 331 | DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", | ||
| 332 | connector->base.id, connector->name, | ||
| 333 | conn_state); | ||
| 334 | } | ||
| 335 | |||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); | ||
| 339 | |||
| 340 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, | ||
| 341 | struct drm_crtc *crtc, s32 __user *fence_ptr) | ||
| 342 | { | ||
| 343 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; | ||
| 344 | } | ||
| 345 | |||
| 346 | static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, | ||
| 347 | struct drm_crtc *crtc) | ||
| 348 | { | ||
| 349 | s32 __user *fence_ptr; | ||
| 350 | |||
| 351 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; | ||
| 352 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; | ||
| 353 | |||
| 354 | return fence_ptr; | ||
| 355 | } | ||
| 356 | |||
| 357 | static int set_out_fence_for_connector(struct drm_atomic_state *state, | ||
| 358 | struct drm_connector *connector, | ||
| 359 | s32 __user *fence_ptr) | ||
| 360 | { | ||
| 361 | unsigned int index = drm_connector_index(connector); | ||
| 362 | |||
| 363 | if (!fence_ptr) | ||
| 364 | return 0; | ||
| 365 | |||
| 366 | if (put_user(-1, fence_ptr)) | ||
| 367 | return -EFAULT; | ||
| 368 | |||
| 369 | state->connectors[index].out_fence_ptr = fence_ptr; | ||
| 370 | |||
| 371 | return 0; | ||
| 372 | } | ||
| 373 | |||
| 374 | static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, | ||
| 375 | struct drm_connector *connector) | ||
| 376 | { | ||
| 377 | unsigned int index = drm_connector_index(connector); | ||
| 378 | s32 __user *fence_ptr; | ||
| 379 | |||
| 380 | fence_ptr = state->connectors[index].out_fence_ptr; | ||
| 381 | state->connectors[index].out_fence_ptr = NULL; | ||
| 382 | |||
| 383 | return fence_ptr; | ||
| 384 | } | ||
| 385 | |||
| 386 | static int | ||
| 387 | drm_atomic_replace_property_blob_from_id(struct drm_device *dev, | ||
| 388 | struct drm_property_blob **blob, | ||
| 389 | uint64_t blob_id, | ||
| 390 | ssize_t expected_size, | ||
| 391 | ssize_t expected_elem_size, | ||
| 392 | bool *replaced) | ||
| 393 | { | ||
| 394 | struct drm_property_blob *new_blob = NULL; | ||
| 395 | |||
| 396 | if (blob_id != 0) { | ||
| 397 | new_blob = drm_property_lookup_blob(dev, blob_id); | ||
| 398 | if (new_blob == NULL) | ||
| 399 | return -EINVAL; | ||
| 400 | |||
| 401 | if (expected_size > 0 && | ||
| 402 | new_blob->length != expected_size) { | ||
| 403 | drm_property_blob_put(new_blob); | ||
| 404 | return -EINVAL; | ||
| 405 | } | ||
| 406 | if (expected_elem_size > 0 && | ||
| 407 | new_blob->length % expected_elem_size != 0) { | ||
| 408 | drm_property_blob_put(new_blob); | ||
| 409 | return -EINVAL; | ||
| 410 | } | ||
| 411 | } | ||
| 412 | |||
| 413 | *replaced |= drm_property_replace_blob(blob, new_blob); | ||
| 414 | drm_property_blob_put(new_blob); | ||
| 415 | |||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | ||
| 420 | struct drm_crtc_state *state, struct drm_property *property, | ||
| 421 | uint64_t val) | ||
| 422 | { | ||
| 423 | struct drm_device *dev = crtc->dev; | ||
| 424 | struct drm_mode_config *config = &dev->mode_config; | ||
| 425 | bool replaced = false; | ||
| 426 | int ret; | ||
| 427 | |||
| 428 | if (property == config->prop_active) | ||
| 429 | state->active = val; | ||
| 430 | else if (property == config->prop_mode_id) { | ||
| 431 | struct drm_property_blob *mode = | ||
| 432 | drm_property_lookup_blob(dev, val); | ||
| 433 | ret = drm_atomic_set_mode_prop_for_crtc(state, mode); | ||
| 434 | drm_property_blob_put(mode); | ||
| 435 | return ret; | ||
| 436 | } else if (property == config->degamma_lut_property) { | ||
| 437 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 438 | &state->degamma_lut, | ||
| 439 | val, | ||
| 440 | -1, sizeof(struct drm_color_lut), | ||
| 441 | &replaced); | ||
| 442 | state->color_mgmt_changed |= replaced; | ||
| 443 | return ret; | ||
| 444 | } else if (property == config->ctm_property) { | ||
| 445 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 446 | &state->ctm, | ||
| 447 | val, | ||
| 448 | sizeof(struct drm_color_ctm), -1, | ||
| 449 | &replaced); | ||
| 450 | state->color_mgmt_changed |= replaced; | ||
| 451 | return ret; | ||
| 452 | } else if (property == config->gamma_lut_property) { | ||
| 453 | ret = drm_atomic_replace_property_blob_from_id(dev, | ||
| 454 | &state->gamma_lut, | ||
| 455 | val, | ||
| 456 | -1, sizeof(struct drm_color_lut), | ||
| 457 | &replaced); | ||
| 458 | state->color_mgmt_changed |= replaced; | ||
| 459 | return ret; | ||
| 460 | } else if (property == config->prop_out_fence_ptr) { | ||
| 461 | s32 __user *fence_ptr = u64_to_user_ptr(val); | ||
| 462 | |||
| 463 | if (!fence_ptr) | ||
| 464 | return 0; | ||
| 465 | |||
| 466 | if (put_user(-1, fence_ptr)) | ||
| 467 | return -EFAULT; | ||
| 468 | |||
| 469 | set_out_fence_for_crtc(state->state, crtc, fence_ptr); | ||
| 470 | } else if (crtc->funcs->atomic_set_property) { | ||
| 471 | return crtc->funcs->atomic_set_property(crtc, state, property, val); | ||
| 472 | } else { | ||
| 473 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 474 | crtc->base.id, crtc->name, | ||
| 475 | property->base.id, property->name); | ||
| 476 | return -EINVAL; | ||
| 477 | } | ||
| 478 | |||
| 479 | return 0; | ||
| 480 | } | ||
| 481 | |||
| 482 | static int | ||
| 483 | drm_atomic_crtc_get_property(struct drm_crtc *crtc, | ||
| 484 | const struct drm_crtc_state *state, | ||
| 485 | struct drm_property *property, uint64_t *val) | ||
| 486 | { | ||
| 487 | struct drm_device *dev = crtc->dev; | ||
| 488 | struct drm_mode_config *config = &dev->mode_config; | ||
| 489 | |||
| 490 | if (property == config->prop_active) | ||
| 491 | *val = state->active; | ||
| 492 | else if (property == config->prop_mode_id) | ||
| 493 | *val = (state->mode_blob) ? state->mode_blob->base.id : 0; | ||
| 494 | else if (property == config->degamma_lut_property) | ||
| 495 | *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; | ||
| 496 | else if (property == config->ctm_property) | ||
| 497 | *val = (state->ctm) ? state->ctm->base.id : 0; | ||
| 498 | else if (property == config->gamma_lut_property) | ||
| 499 | *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; | ||
| 500 | else if (property == config->prop_out_fence_ptr) | ||
| 501 | *val = 0; | ||
| 502 | else if (crtc->funcs->atomic_get_property) | ||
| 503 | return crtc->funcs->atomic_get_property(crtc, state, property, val); | ||
| 504 | else | ||
| 505 | return -EINVAL; | ||
| 506 | |||
| 507 | return 0; | ||
| 508 | } | ||
| 509 | |||
| 510 | static int drm_atomic_plane_set_property(struct drm_plane *plane, | ||
| 511 | struct drm_plane_state *state, struct drm_property *property, | ||
| 512 | uint64_t val) | ||
| 513 | { | ||
| 514 | struct drm_device *dev = plane->dev; | ||
| 515 | struct drm_mode_config *config = &dev->mode_config; | ||
| 516 | |||
| 517 | if (property == config->prop_fb_id) { | ||
| 518 | struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); | ||
| 519 | drm_atomic_set_fb_for_plane(state, fb); | ||
| 520 | if (fb) | ||
| 521 | drm_framebuffer_put(fb); | ||
| 522 | } else if (property == config->prop_in_fence_fd) { | ||
| 523 | if (state->fence) | ||
| 524 | return -EINVAL; | ||
| 525 | |||
| 526 | if (U642I64(val) == -1) | ||
| 527 | return 0; | ||
| 528 | |||
| 529 | state->fence = sync_file_get_fence(val); | ||
| 530 | if (!state->fence) | ||
| 531 | return -EINVAL; | ||
| 532 | |||
| 533 | } else if (property == config->prop_crtc_id) { | ||
| 534 | struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); | ||
| 535 | return drm_atomic_set_crtc_for_plane(state, crtc); | ||
| 536 | } else if (property == config->prop_crtc_x) { | ||
| 537 | state->crtc_x = U642I64(val); | ||
| 538 | } else if (property == config->prop_crtc_y) { | ||
| 539 | state->crtc_y = U642I64(val); | ||
| 540 | } else if (property == config->prop_crtc_w) { | ||
| 541 | state->crtc_w = val; | ||
| 542 | } else if (property == config->prop_crtc_h) { | ||
| 543 | state->crtc_h = val; | ||
| 544 | } else if (property == config->prop_src_x) { | ||
| 545 | state->src_x = val; | ||
| 546 | } else if (property == config->prop_src_y) { | ||
| 547 | state->src_y = val; | ||
| 548 | } else if (property == config->prop_src_w) { | ||
| 549 | state->src_w = val; | ||
| 550 | } else if (property == config->prop_src_h) { | ||
| 551 | state->src_h = val; | ||
| 552 | } else if (property == plane->alpha_property) { | ||
| 553 | state->alpha = val; | ||
| 554 | } else if (property == plane->blend_mode_property) { | ||
| 555 | state->pixel_blend_mode = val; | ||
| 556 | } else if (property == plane->rotation_property) { | ||
| 557 | if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { | ||
| 558 | DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", | ||
| 559 | plane->base.id, plane->name, val); | ||
| 560 | return -EINVAL; | ||
| 561 | } | ||
| 562 | state->rotation = val; | ||
| 563 | } else if (property == plane->zpos_property) { | ||
| 564 | state->zpos = val; | ||
| 565 | } else if (property == plane->color_encoding_property) { | ||
| 566 | state->color_encoding = val; | ||
| 567 | } else if (property == plane->color_range_property) { | ||
| 568 | state->color_range = val; | ||
| 569 | } else if (plane->funcs->atomic_set_property) { | ||
| 570 | return plane->funcs->atomic_set_property(plane, state, | ||
| 571 | property, val); | ||
| 572 | } else { | ||
| 573 | DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 574 | plane->base.id, plane->name, | ||
| 575 | property->base.id, property->name); | ||
| 576 | return -EINVAL; | ||
| 577 | } | ||
| 578 | |||
| 579 | return 0; | ||
| 580 | } | ||
| 581 | |||
| 582 | static int | ||
| 583 | drm_atomic_plane_get_property(struct drm_plane *plane, | ||
| 584 | const struct drm_plane_state *state, | ||
| 585 | struct drm_property *property, uint64_t *val) | ||
| 586 | { | ||
| 587 | struct drm_device *dev = plane->dev; | ||
| 588 | struct drm_mode_config *config = &dev->mode_config; | ||
| 589 | |||
| 590 | if (property == config->prop_fb_id) { | ||
| 591 | *val = (state->fb) ? state->fb->base.id : 0; | ||
| 592 | } else if (property == config->prop_in_fence_fd) { | ||
| 593 | *val = -1; | ||
| 594 | } else if (property == config->prop_crtc_id) { | ||
| 595 | *val = (state->crtc) ? state->crtc->base.id : 0; | ||
| 596 | } else if (property == config->prop_crtc_x) { | ||
| 597 | *val = I642U64(state->crtc_x); | ||
| 598 | } else if (property == config->prop_crtc_y) { | ||
| 599 | *val = I642U64(state->crtc_y); | ||
| 600 | } else if (property == config->prop_crtc_w) { | ||
| 601 | *val = state->crtc_w; | ||
| 602 | } else if (property == config->prop_crtc_h) { | ||
| 603 | *val = state->crtc_h; | ||
| 604 | } else if (property == config->prop_src_x) { | ||
| 605 | *val = state->src_x; | ||
| 606 | } else if (property == config->prop_src_y) { | ||
| 607 | *val = state->src_y; | ||
| 608 | } else if (property == config->prop_src_w) { | ||
| 609 | *val = state->src_w; | ||
| 610 | } else if (property == config->prop_src_h) { | ||
| 611 | *val = state->src_h; | ||
| 612 | } else if (property == plane->alpha_property) { | ||
| 613 | *val = state->alpha; | ||
| 614 | } else if (property == plane->blend_mode_property) { | ||
| 615 | *val = state->pixel_blend_mode; | ||
| 616 | } else if (property == plane->rotation_property) { | ||
| 617 | *val = state->rotation; | ||
| 618 | } else if (property == plane->zpos_property) { | ||
| 619 | *val = state->zpos; | ||
| 620 | } else if (property == plane->color_encoding_property) { | ||
| 621 | *val = state->color_encoding; | ||
| 622 | } else if (property == plane->color_range_property) { | ||
| 623 | *val = state->color_range; | ||
| 624 | } else if (plane->funcs->atomic_get_property) { | ||
| 625 | return plane->funcs->atomic_get_property(plane, state, property, val); | ||
| 626 | } else { | ||
| 627 | return -EINVAL; | ||
| 628 | } | ||
| 629 | |||
| 630 | return 0; | ||
| 631 | } | ||
| 632 | |||
| 633 | static struct drm_writeback_job * | ||
| 634 | drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) | ||
| 635 | { | ||
| 636 | WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); | ||
| 637 | |||
| 638 | if (!conn_state->writeback_job) | ||
| 639 | conn_state->writeback_job = | ||
| 640 | kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); | ||
| 641 | |||
| 642 | return conn_state->writeback_job; | ||
| 643 | } | ||
| 644 | |||
| 645 | static int drm_atomic_set_writeback_fb_for_connector( | ||
| 646 | struct drm_connector_state *conn_state, | ||
| 647 | struct drm_framebuffer *fb) | ||
| 648 | { | ||
| 649 | struct drm_writeback_job *job = | ||
| 650 | drm_atomic_get_writeback_job(conn_state); | ||
| 651 | if (!job) | ||
| 652 | return -ENOMEM; | ||
| 653 | |||
| 654 | drm_framebuffer_assign(&job->fb, fb); | ||
| 655 | |||
| 656 | if (fb) | ||
| 657 | DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", | ||
| 658 | fb->base.id, conn_state); | ||
| 659 | else | ||
| 660 | DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", | ||
| 661 | conn_state); | ||
| 662 | |||
| 663 | return 0; | ||
| 664 | } | ||
| 665 | |||
| 666 | static int drm_atomic_connector_set_property(struct drm_connector *connector, | ||
| 667 | struct drm_connector_state *state, struct drm_property *property, | ||
| 668 | uint64_t val) | ||
| 669 | { | ||
| 670 | struct drm_device *dev = connector->dev; | ||
| 671 | struct drm_mode_config *config = &dev->mode_config; | ||
| 672 | |||
| 673 | if (property == config->prop_crtc_id) { | ||
| 674 | struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); | ||
| 675 | return drm_atomic_set_crtc_for_connector(state, crtc); | ||
| 676 | } else if (property == config->dpms_property) { | ||
| 677 | /* setting DPMS property requires special handling, which | ||
| 678 | * is done in legacy setprop path for us. Disallow (for | ||
| 679 | * now?) atomic writes to DPMS property: | ||
| 680 | */ | ||
| 681 | return -EINVAL; | ||
| 682 | } else if (property == config->tv_select_subconnector_property) { | ||
| 683 | state->tv.subconnector = val; | ||
| 684 | } else if (property == config->tv_left_margin_property) { | ||
| 685 | state->tv.margins.left = val; | ||
| 686 | } else if (property == config->tv_right_margin_property) { | ||
| 687 | state->tv.margins.right = val; | ||
| 688 | } else if (property == config->tv_top_margin_property) { | ||
| 689 | state->tv.margins.top = val; | ||
| 690 | } else if (property == config->tv_bottom_margin_property) { | ||
| 691 | state->tv.margins.bottom = val; | ||
| 692 | } else if (property == config->tv_mode_property) { | ||
| 693 | state->tv.mode = val; | ||
| 694 | } else if (property == config->tv_brightness_property) { | ||
| 695 | state->tv.brightness = val; | ||
| 696 | } else if (property == config->tv_contrast_property) { | ||
| 697 | state->tv.contrast = val; | ||
| 698 | } else if (property == config->tv_flicker_reduction_property) { | ||
| 699 | state->tv.flicker_reduction = val; | ||
| 700 | } else if (property == config->tv_overscan_property) { | ||
| 701 | state->tv.overscan = val; | ||
| 702 | } else if (property == config->tv_saturation_property) { | ||
| 703 | state->tv.saturation = val; | ||
| 704 | } else if (property == config->tv_hue_property) { | ||
| 705 | state->tv.hue = val; | ||
| 706 | } else if (property == config->link_status_property) { | ||
| 707 | /* Never downgrade from GOOD to BAD on userspace's request here, | ||
| 708 | * only hw issues can do that. | ||
| 709 | * | ||
| 710 | * For an atomic property the userspace doesn't need to be able | ||
| 711 | * to understand all the properties, but needs to be able to | ||
| 712 | * restore the state it wants on VT switch. So if the userspace | ||
| 713 | * tries to change the link_status from GOOD to BAD, driver | ||
| 714 | * silently rejects it and returns a 0. This prevents userspace | ||
| 715 | * from accidently breaking the display when it restores the | ||
| 716 | * state. | ||
| 717 | */ | ||
| 718 | if (state->link_status != DRM_LINK_STATUS_GOOD) | ||
| 719 | state->link_status = val; | ||
| 720 | } else if (property == config->aspect_ratio_property) { | ||
| 721 | state->picture_aspect_ratio = val; | ||
| 722 | } else if (property == config->content_type_property) { | ||
| 723 | state->content_type = val; | ||
| 724 | } else if (property == connector->scaling_mode_property) { | ||
| 725 | state->scaling_mode = val; | ||
| 726 | } else if (property == connector->content_protection_property) { | ||
| 727 | if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { | ||
| 728 | DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); | ||
| 729 | return -EINVAL; | ||
| 730 | } | ||
| 731 | state->content_protection = val; | ||
| 732 | } else if (property == config->writeback_fb_id_property) { | ||
| 733 | struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); | ||
| 734 | int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); | ||
| 735 | if (fb) | ||
| 736 | drm_framebuffer_put(fb); | ||
| 737 | return ret; | ||
| 738 | } else if (property == config->writeback_out_fence_ptr_property) { | ||
| 739 | s32 __user *fence_ptr = u64_to_user_ptr(val); | ||
| 740 | |||
| 741 | return set_out_fence_for_connector(state->state, connector, | ||
| 742 | fence_ptr); | ||
| 743 | } else if (connector->funcs->atomic_set_property) { | ||
| 744 | return connector->funcs->atomic_set_property(connector, | ||
| 745 | state, property, val); | ||
| 746 | } else { | ||
| 747 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", | ||
| 748 | connector->base.id, connector->name, | ||
| 749 | property->base.id, property->name); | ||
| 750 | return -EINVAL; | ||
| 751 | } | ||
| 752 | |||
| 753 | return 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | static int | ||
| 757 | drm_atomic_connector_get_property(struct drm_connector *connector, | ||
| 758 | const struct drm_connector_state *state, | ||
| 759 | struct drm_property *property, uint64_t *val) | ||
| 760 | { | ||
| 761 | struct drm_device *dev = connector->dev; | ||
| 762 | struct drm_mode_config *config = &dev->mode_config; | ||
| 763 | |||
| 764 | if (property == config->prop_crtc_id) { | ||
| 765 | *val = (state->crtc) ? state->crtc->base.id : 0; | ||
| 766 | } else if (property == config->dpms_property) { | ||
| 767 | *val = connector->dpms; | ||
| 768 | } else if (property == config->tv_select_subconnector_property) { | ||
| 769 | *val = state->tv.subconnector; | ||
| 770 | } else if (property == config->tv_left_margin_property) { | ||
| 771 | *val = state->tv.margins.left; | ||
| 772 | } else if (property == config->tv_right_margin_property) { | ||
| 773 | *val = state->tv.margins.right; | ||
| 774 | } else if (property == config->tv_top_margin_property) { | ||
| 775 | *val = state->tv.margins.top; | ||
| 776 | } else if (property == config->tv_bottom_margin_property) { | ||
| 777 | *val = state->tv.margins.bottom; | ||
| 778 | } else if (property == config->tv_mode_property) { | ||
| 779 | *val = state->tv.mode; | ||
| 780 | } else if (property == config->tv_brightness_property) { | ||
| 781 | *val = state->tv.brightness; | ||
| 782 | } else if (property == config->tv_contrast_property) { | ||
| 783 | *val = state->tv.contrast; | ||
| 784 | } else if (property == config->tv_flicker_reduction_property) { | ||
| 785 | *val = state->tv.flicker_reduction; | ||
| 786 | } else if (property == config->tv_overscan_property) { | ||
| 787 | *val = state->tv.overscan; | ||
| 788 | } else if (property == config->tv_saturation_property) { | ||
| 789 | *val = state->tv.saturation; | ||
| 790 | } else if (property == config->tv_hue_property) { | ||
| 791 | *val = state->tv.hue; | ||
| 792 | } else if (property == config->link_status_property) { | ||
| 793 | *val = state->link_status; | ||
| 794 | } else if (property == config->aspect_ratio_property) { | ||
| 795 | *val = state->picture_aspect_ratio; | ||
| 796 | } else if (property == config->content_type_property) { | ||
| 797 | *val = state->content_type; | ||
| 798 | } else if (property == connector->scaling_mode_property) { | ||
| 799 | *val = state->scaling_mode; | ||
| 800 | } else if (property == connector->content_protection_property) { | ||
| 801 | *val = state->content_protection; | ||
| 802 | } else if (property == config->writeback_fb_id_property) { | ||
| 803 | /* Writeback framebuffer is one-shot, write and forget */ | ||
| 804 | *val = 0; | ||
| 805 | } else if (property == config->writeback_out_fence_ptr_property) { | ||
| 806 | *val = 0; | ||
| 807 | } else if (connector->funcs->atomic_get_property) { | ||
| 808 | return connector->funcs->atomic_get_property(connector, | ||
| 809 | state, property, val); | ||
| 810 | } else { | ||
| 811 | return -EINVAL; | ||
| 812 | } | ||
| 813 | |||
| 814 | return 0; | ||
| 815 | } | ||
| 816 | |||
| 817 | int drm_atomic_get_property(struct drm_mode_object *obj, | ||
| 818 | struct drm_property *property, uint64_t *val) | ||
| 819 | { | ||
| 820 | struct drm_device *dev = property->dev; | ||
| 821 | int ret; | ||
| 822 | |||
| 823 | switch (obj->type) { | ||
| 824 | case DRM_MODE_OBJECT_CONNECTOR: { | ||
| 825 | struct drm_connector *connector = obj_to_connector(obj); | ||
| 826 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
| 827 | ret = drm_atomic_connector_get_property(connector, | ||
| 828 | connector->state, property, val); | ||
| 829 | break; | ||
| 830 | } | ||
| 831 | case DRM_MODE_OBJECT_CRTC: { | ||
| 832 | struct drm_crtc *crtc = obj_to_crtc(obj); | ||
| 833 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | ||
| 834 | ret = drm_atomic_crtc_get_property(crtc, | ||
| 835 | crtc->state, property, val); | ||
| 836 | break; | ||
| 837 | } | ||
| 838 | case DRM_MODE_OBJECT_PLANE: { | ||
| 839 | struct drm_plane *plane = obj_to_plane(obj); | ||
| 840 | WARN_ON(!drm_modeset_is_locked(&plane->mutex)); | ||
| 841 | ret = drm_atomic_plane_get_property(plane, | ||
| 842 | plane->state, property, val); | ||
| 843 | break; | ||
| 844 | } | ||
| 845 | default: | ||
| 846 | ret = -EINVAL; | ||
| 847 | break; | ||
| 848 | } | ||
| 849 | |||
| 850 | return ret; | ||
| 851 | } | ||
| 852 | |||
| 853 | /* | ||
| 854 | * The big monster ioctl | ||
| 855 | */ | ||
| 856 | |||
| 857 | static struct drm_pending_vblank_event *create_vblank_event( | ||
| 858 | struct drm_crtc *crtc, uint64_t user_data) | ||
| 859 | { | ||
| 860 | struct drm_pending_vblank_event *e = NULL; | ||
| 861 | |||
| 862 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
| 863 | if (!e) | ||
| 864 | return NULL; | ||
| 865 | |||
| 866 | e->event.base.type = DRM_EVENT_FLIP_COMPLETE; | ||
| 867 | e->event.base.length = sizeof(e->event); | ||
| 868 | e->event.vbl.crtc_id = crtc->base.id; | ||
| 869 | e->event.vbl.user_data = user_data; | ||
| 870 | |||
| 871 | return e; | ||
| 872 | } | ||
| 873 | |||
| 874 | int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, | ||
| 875 | struct drm_connector *connector, | ||
| 876 | int mode) | ||
| 877 | { | ||
| 878 | struct drm_connector *tmp_connector; | ||
| 879 | struct drm_connector_state *new_conn_state; | ||
| 880 | struct drm_crtc *crtc; | ||
| 881 | struct drm_crtc_state *crtc_state; | ||
| 882 | int i, ret, old_mode = connector->dpms; | ||
| 883 | bool active = false; | ||
| 884 | |||
| 885 | ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, | ||
| 886 | state->acquire_ctx); | ||
| 887 | if (ret) | ||
| 888 | return ret; | ||
| 889 | |||
| 890 | if (mode != DRM_MODE_DPMS_ON) | ||
| 891 | mode = DRM_MODE_DPMS_OFF; | ||
| 892 | connector->dpms = mode; | ||
| 893 | |||
| 894 | crtc = connector->state->crtc; | ||
| 895 | if (!crtc) | ||
| 896 | goto out; | ||
| 897 | ret = drm_atomic_add_affected_connectors(state, crtc); | ||
| 898 | if (ret) | ||
| 899 | goto out; | ||
| 900 | |||
| 901 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 902 | if (IS_ERR(crtc_state)) { | ||
| 903 | ret = PTR_ERR(crtc_state); | ||
| 904 | goto out; | ||
| 905 | } | ||
| 906 | |||
| 907 | for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { | ||
| 908 | if (new_conn_state->crtc != crtc) | ||
| 909 | continue; | ||
| 910 | if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { | ||
| 911 | active = true; | ||
| 912 | break; | ||
| 913 | } | ||
| 914 | } | ||
| 915 | |||
| 916 | crtc_state->active = active; | ||
| 917 | ret = drm_atomic_commit(state); | ||
| 918 | out: | ||
| 919 | if (ret != 0) | ||
| 920 | connector->dpms = old_mode; | ||
| 921 | return ret; | ||
| 922 | } | ||
| 923 | |||
| 924 | int drm_atomic_set_property(struct drm_atomic_state *state, | ||
| 925 | struct drm_mode_object *obj, | ||
| 926 | struct drm_property *prop, | ||
| 927 | uint64_t prop_value) | ||
| 928 | { | ||
| 929 | struct drm_mode_object *ref; | ||
| 930 | int ret; | ||
| 931 | |||
| 932 | if (!drm_property_change_valid_get(prop, prop_value, &ref)) | ||
| 933 | return -EINVAL; | ||
| 934 | |||
| 935 | switch (obj->type) { | ||
| 936 | case DRM_MODE_OBJECT_CONNECTOR: { | ||
| 937 | struct drm_connector *connector = obj_to_connector(obj); | ||
| 938 | struct drm_connector_state *connector_state; | ||
| 939 | |||
| 940 | connector_state = drm_atomic_get_connector_state(state, connector); | ||
| 941 | if (IS_ERR(connector_state)) { | ||
| 942 | ret = PTR_ERR(connector_state); | ||
| 943 | break; | ||
| 944 | } | ||
| 945 | |||
| 946 | ret = drm_atomic_connector_set_property(connector, | ||
| 947 | connector_state, prop, prop_value); | ||
| 948 | break; | ||
| 949 | } | ||
| 950 | case DRM_MODE_OBJECT_CRTC: { | ||
| 951 | struct drm_crtc *crtc = obj_to_crtc(obj); | ||
| 952 | struct drm_crtc_state *crtc_state; | ||
| 953 | |||
| 954 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 955 | if (IS_ERR(crtc_state)) { | ||
| 956 | ret = PTR_ERR(crtc_state); | ||
| 957 | break; | ||
| 958 | } | ||
| 959 | |||
| 960 | ret = drm_atomic_crtc_set_property(crtc, | ||
| 961 | crtc_state, prop, prop_value); | ||
| 962 | break; | ||
| 963 | } | ||
| 964 | case DRM_MODE_OBJECT_PLANE: { | ||
| 965 | struct drm_plane *plane = obj_to_plane(obj); | ||
| 966 | struct drm_plane_state *plane_state; | ||
| 967 | |||
| 968 | plane_state = drm_atomic_get_plane_state(state, plane); | ||
| 969 | if (IS_ERR(plane_state)) { | ||
| 970 | ret = PTR_ERR(plane_state); | ||
| 971 | break; | ||
| 972 | } | ||
| 973 | |||
| 974 | ret = drm_atomic_plane_set_property(plane, | ||
| 975 | plane_state, prop, prop_value); | ||
| 976 | break; | ||
| 977 | } | ||
| 978 | default: | ||
| 979 | ret = -EINVAL; | ||
| 980 | break; | ||
| 981 | } | ||
| 982 | |||
| 983 | drm_property_change_valid_put(prop, ref); | ||
| 984 | return ret; | ||
| 985 | } | ||
| 986 | |||
| 987 | /** | ||
| 988 | * DOC: explicit fencing properties | ||
| 989 | * | ||
| 990 | * Explicit fencing allows userspace to control the buffer synchronization | ||
| 991 | * between devices. A Fence or a group of fences are transfered to/from | ||
| 992 | * userspace using Sync File fds and there are two DRM properties for that. | ||
| 993 | * IN_FENCE_FD on each DRM Plane to send fences to the kernel and | ||
| 994 | * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. | ||
| 995 | * | ||
| 996 | * As a contrast, with implicit fencing the kernel keeps track of any | ||
| 997 | * ongoing rendering, and automatically ensures that the atomic update waits | ||
| 998 | * for any pending rendering to complete. For shared buffers represented with | ||
| 999 | * a &struct dma_buf this is tracked in &struct reservation_object. | ||
| 1000 | * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), | ||
| 1001 | * whereas explicit fencing is what Android wants. | ||
| 1002 | * | ||
| 1003 | * "IN_FENCE_FD”: | ||
| 1004 | * Use this property to pass a fence that DRM should wait on before | ||
| 1005 | * proceeding with the Atomic Commit request and show the framebuffer for | ||
| 1006 | * the plane on the screen. The fence can be either a normal fence or a | ||
| 1007 | * merged one, the sync_file framework will handle both cases and use a | ||
| 1008 | * fence_array if a merged fence is received. Passing -1 here means no | ||
| 1009 | * fences to wait on. | ||
| 1010 | * | ||
| 1011 | * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag | ||
| 1012 | * it will only check if the Sync File is a valid one. | ||
| 1013 | * | ||
| 1014 | * On the driver side the fence is stored on the @fence parameter of | ||
| 1015 | * &struct drm_plane_state. Drivers which also support implicit fencing | ||
| 1016 | * should set the implicit fence using drm_atomic_set_fence_for_plane(), | ||
| 1017 | * to make sure there's consistent behaviour between drivers in precedence | ||
| 1018 | * of implicit vs. explicit fencing. | ||
| 1019 | * | ||
| 1020 | * "OUT_FENCE_PTR”: | ||
| 1021 | * Use this property to pass a file descriptor pointer to DRM. Once the | ||
| 1022 | * Atomic Commit request call returns OUT_FENCE_PTR will be filled with | ||
| 1023 | * the file descriptor number of a Sync File. This Sync File contains the | ||
| 1024 | * CRTC fence that will be signaled when all framebuffers present on the | ||
| 1025 | * Atomic Commit * request for that given CRTC are scanned out on the | ||
| 1026 | * screen. | ||
| 1027 | * | ||
| 1028 | * The Atomic Commit request fails if a invalid pointer is passed. If the | ||
| 1029 | * Atomic Commit request fails for any other reason the out fence fd | ||
| 1030 | * returned will be -1. On a Atomic Commit with the | ||
| 1031 | * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. | ||
| 1032 | * | ||
| 1033 | * Note that out-fences don't have a special interface to drivers and are | ||
| 1034 | * internally represented by a &struct drm_pending_vblank_event in struct | ||
| 1035 | * &drm_crtc_state, which is also used by the nonblocking atomic commit | ||
| 1036 | * helpers and for the DRM event handling for existing userspace. | ||
| 1037 | */ | ||
| 1038 | |||
| 1039 | struct drm_out_fence_state { | ||
| 1040 | s32 __user *out_fence_ptr; | ||
| 1041 | struct sync_file *sync_file; | ||
| 1042 | int fd; | ||
| 1043 | }; | ||
| 1044 | |||
| 1045 | static int setup_out_fence(struct drm_out_fence_state *fence_state, | ||
| 1046 | struct dma_fence *fence) | ||
| 1047 | { | ||
| 1048 | fence_state->fd = get_unused_fd_flags(O_CLOEXEC); | ||
| 1049 | if (fence_state->fd < 0) | ||
| 1050 | return fence_state->fd; | ||
| 1051 | |||
| 1052 | if (put_user(fence_state->fd, fence_state->out_fence_ptr)) | ||
| 1053 | return -EFAULT; | ||
| 1054 | |||
| 1055 | fence_state->sync_file = sync_file_create(fence); | ||
| 1056 | if (!fence_state->sync_file) | ||
| 1057 | return -ENOMEM; | ||
| 1058 | |||
| 1059 | return 0; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | static int prepare_signaling(struct drm_device *dev, | ||
| 1063 | struct drm_atomic_state *state, | ||
| 1064 | struct drm_mode_atomic *arg, | ||
| 1065 | struct drm_file *file_priv, | ||
| 1066 | struct drm_out_fence_state **fence_state, | ||
| 1067 | unsigned int *num_fences) | ||
| 1068 | { | ||
| 1069 | struct drm_crtc *crtc; | ||
| 1070 | struct drm_crtc_state *crtc_state; | ||
| 1071 | struct drm_connector *conn; | ||
| 1072 | struct drm_connector_state *conn_state; | ||
| 1073 | int i, c = 0, ret; | ||
| 1074 | |||
| 1075 | if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) | ||
| 1076 | return 0; | ||
| 1077 | |||
| 1078 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 1079 | s32 __user *fence_ptr; | ||
| 1080 | |||
| 1081 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); | ||
| 1082 | |||
| 1083 | if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { | ||
| 1084 | struct drm_pending_vblank_event *e; | ||
| 1085 | |||
| 1086 | e = create_vblank_event(crtc, arg->user_data); | ||
| 1087 | if (!e) | ||
| 1088 | return -ENOMEM; | ||
| 1089 | |||
| 1090 | crtc_state->event = e; | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { | ||
| 1094 | struct drm_pending_vblank_event *e = crtc_state->event; | ||
| 1095 | |||
| 1096 | if (!file_priv) | ||
| 1097 | continue; | ||
| 1098 | |||
| 1099 | ret = drm_event_reserve_init(dev, file_priv, &e->base, | ||
| 1100 | &e->event.base); | ||
| 1101 | if (ret) { | ||
| 1102 | kfree(e); | ||
| 1103 | crtc_state->event = NULL; | ||
| 1104 | return ret; | ||
| 1105 | } | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | if (fence_ptr) { | ||
| 1109 | struct dma_fence *fence; | ||
| 1110 | struct drm_out_fence_state *f; | ||
| 1111 | |||
| 1112 | f = krealloc(*fence_state, sizeof(**fence_state) * | ||
| 1113 | (*num_fences + 1), GFP_KERNEL); | ||
| 1114 | if (!f) | ||
| 1115 | return -ENOMEM; | ||
| 1116 | |||
| 1117 | memset(&f[*num_fences], 0, sizeof(*f)); | ||
| 1118 | |||
| 1119 | f[*num_fences].out_fence_ptr = fence_ptr; | ||
| 1120 | *fence_state = f; | ||
| 1121 | |||
| 1122 | fence = drm_crtc_create_fence(crtc); | ||
| 1123 | if (!fence) | ||
| 1124 | return -ENOMEM; | ||
| 1125 | |||
| 1126 | ret = setup_out_fence(&f[(*num_fences)++], fence); | ||
| 1127 | if (ret) { | ||
| 1128 | dma_fence_put(fence); | ||
| 1129 | return ret; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | crtc_state->event->base.fence = fence; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | c++; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | for_each_new_connector_in_state(state, conn, conn_state, i) { | ||
| 1139 | struct drm_writeback_connector *wb_conn; | ||
| 1140 | struct drm_writeback_job *job; | ||
| 1141 | struct drm_out_fence_state *f; | ||
| 1142 | struct dma_fence *fence; | ||
| 1143 | s32 __user *fence_ptr; | ||
| 1144 | |||
| 1145 | fence_ptr = get_out_fence_for_connector(state, conn); | ||
| 1146 | if (!fence_ptr) | ||
| 1147 | continue; | ||
| 1148 | |||
| 1149 | job = drm_atomic_get_writeback_job(conn_state); | ||
| 1150 | if (!job) | ||
| 1151 | return -ENOMEM; | ||
| 1152 | |||
| 1153 | f = krealloc(*fence_state, sizeof(**fence_state) * | ||
| 1154 | (*num_fences + 1), GFP_KERNEL); | ||
| 1155 | if (!f) | ||
| 1156 | return -ENOMEM; | ||
| 1157 | |||
| 1158 | memset(&f[*num_fences], 0, sizeof(*f)); | ||
| 1159 | |||
| 1160 | f[*num_fences].out_fence_ptr = fence_ptr; | ||
| 1161 | *fence_state = f; | ||
| 1162 | |||
| 1163 | wb_conn = drm_connector_to_writeback(conn); | ||
| 1164 | fence = drm_writeback_get_out_fence(wb_conn); | ||
| 1165 | if (!fence) | ||
| 1166 | return -ENOMEM; | ||
| 1167 | |||
| 1168 | ret = setup_out_fence(&f[(*num_fences)++], fence); | ||
| 1169 | if (ret) { | ||
| 1170 | dma_fence_put(fence); | ||
| 1171 | return ret; | ||
| 1172 | } | ||
| 1173 | |||
| 1174 | job->out_fence = fence; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | /* | ||
| 1178 | * Having this flag means user mode pends on event which will never | ||
| 1179 | * reach due to lack of at least one CRTC for signaling | ||
| 1180 | */ | ||
| 1181 | if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) | ||
| 1182 | return -EINVAL; | ||
| 1183 | |||
| 1184 | return 0; | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | static void complete_signaling(struct drm_device *dev, | ||
| 1188 | struct drm_atomic_state *state, | ||
| 1189 | struct drm_out_fence_state *fence_state, | ||
| 1190 | unsigned int num_fences, | ||
| 1191 | bool install_fds) | ||
| 1192 | { | ||
| 1193 | struct drm_crtc *crtc; | ||
| 1194 | struct drm_crtc_state *crtc_state; | ||
| 1195 | int i; | ||
| 1196 | |||
| 1197 | if (install_fds) { | ||
| 1198 | for (i = 0; i < num_fences; i++) | ||
| 1199 | fd_install(fence_state[i].fd, | ||
| 1200 | fence_state[i].sync_file->file); | ||
| 1201 | |||
| 1202 | kfree(fence_state); | ||
| 1203 | return; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 1207 | struct drm_pending_vblank_event *event = crtc_state->event; | ||
| 1208 | /* | ||
| 1209 | * Free the allocated event. drm_atomic_helper_setup_commit | ||
| 1210 | * can allocate an event too, so only free it if it's ours | ||
| 1211 | * to prevent a double free in drm_atomic_state_clear. | ||
| 1212 | */ | ||
| 1213 | if (event && (event->base.fence || event->base.file_priv)) { | ||
| 1214 | drm_event_cancel_free(dev, &event->base); | ||
| 1215 | crtc_state->event = NULL; | ||
| 1216 | } | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | if (!fence_state) | ||
| 1220 | return; | ||
| 1221 | |||
| 1222 | for (i = 0; i < num_fences; i++) { | ||
| 1223 | if (fence_state[i].sync_file) | ||
| 1224 | fput(fence_state[i].sync_file->file); | ||
| 1225 | if (fence_state[i].fd >= 0) | ||
| 1226 | put_unused_fd(fence_state[i].fd); | ||
| 1227 | |||
| 1228 | /* If this fails log error to the user */ | ||
| 1229 | if (fence_state[i].out_fence_ptr && | ||
| 1230 | put_user(-1, fence_state[i].out_fence_ptr)) | ||
| 1231 | DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | kfree(fence_state); | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | int drm_mode_atomic_ioctl(struct drm_device *dev, | ||
| 1238 | void *data, struct drm_file *file_priv) | ||
| 1239 | { | ||
| 1240 | struct drm_mode_atomic *arg = data; | ||
| 1241 | uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); | ||
| 1242 | uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); | ||
| 1243 | uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); | ||
| 1244 | uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); | ||
| 1245 | unsigned int copied_objs, copied_props; | ||
| 1246 | struct drm_atomic_state *state; | ||
| 1247 | struct drm_modeset_acquire_ctx ctx; | ||
| 1248 | struct drm_out_fence_state *fence_state; | ||
| 1249 | int ret = 0; | ||
| 1250 | unsigned int i, j, num_fences; | ||
| 1251 | |||
| 1252 | /* disallow for drivers not supporting atomic: */ | ||
| 1253 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) | ||
| 1254 | return -EINVAL; | ||
| 1255 | |||
| 1256 | /* disallow for userspace that has not enabled atomic cap (even | ||
| 1257 | * though this may be a bit overkill, since legacy userspace | ||
| 1258 | * wouldn't know how to call this ioctl) | ||
| 1259 | */ | ||
| 1260 | if (!file_priv->atomic) | ||
| 1261 | return -EINVAL; | ||
| 1262 | |||
| 1263 | if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) | ||
| 1264 | return -EINVAL; | ||
| 1265 | |||
| 1266 | if (arg->reserved) | ||
| 1267 | return -EINVAL; | ||
| 1268 | |||
| 1269 | if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && | ||
| 1270 | !dev->mode_config.async_page_flip) | ||
| 1271 | return -EINVAL; | ||
| 1272 | |||
| 1273 | /* can't test and expect an event at the same time. */ | ||
| 1274 | if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && | ||
| 1275 | (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) | ||
| 1276 | return -EINVAL; | ||
| 1277 | |||
| 1278 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | ||
| 1279 | |||
| 1280 | state = drm_atomic_state_alloc(dev); | ||
| 1281 | if (!state) | ||
| 1282 | return -ENOMEM; | ||
| 1283 | |||
| 1284 | state->acquire_ctx = &ctx; | ||
| 1285 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); | ||
| 1286 | |||
| 1287 | retry: | ||
| 1288 | copied_objs = 0; | ||
| 1289 | copied_props = 0; | ||
| 1290 | fence_state = NULL; | ||
| 1291 | num_fences = 0; | ||
| 1292 | |||
| 1293 | for (i = 0; i < arg->count_objs; i++) { | ||
| 1294 | uint32_t obj_id, count_props; | ||
| 1295 | struct drm_mode_object *obj; | ||
| 1296 | |||
| 1297 | if (get_user(obj_id, objs_ptr + copied_objs)) { | ||
| 1298 | ret = -EFAULT; | ||
| 1299 | goto out; | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); | ||
| 1303 | if (!obj) { | ||
| 1304 | ret = -ENOENT; | ||
| 1305 | goto out; | ||
| 1306 | } | ||
| 1307 | |||
| 1308 | if (!obj->properties) { | ||
| 1309 | drm_mode_object_put(obj); | ||
| 1310 | ret = -ENOENT; | ||
| 1311 | goto out; | ||
| 1312 | } | ||
| 1313 | |||
| 1314 | if (get_user(count_props, count_props_ptr + copied_objs)) { | ||
| 1315 | drm_mode_object_put(obj); | ||
| 1316 | ret = -EFAULT; | ||
| 1317 | goto out; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | copied_objs++; | ||
| 1321 | |||
| 1322 | for (j = 0; j < count_props; j++) { | ||
| 1323 | uint32_t prop_id; | ||
| 1324 | uint64_t prop_value; | ||
| 1325 | struct drm_property *prop; | ||
| 1326 | |||
| 1327 | if (get_user(prop_id, props_ptr + copied_props)) { | ||
| 1328 | drm_mode_object_put(obj); | ||
| 1329 | ret = -EFAULT; | ||
| 1330 | goto out; | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | prop = drm_mode_obj_find_prop_id(obj, prop_id); | ||
| 1334 | if (!prop) { | ||
| 1335 | drm_mode_object_put(obj); | ||
| 1336 | ret = -ENOENT; | ||
| 1337 | goto out; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | if (copy_from_user(&prop_value, | ||
| 1341 | prop_values_ptr + copied_props, | ||
| 1342 | sizeof(prop_value))) { | ||
| 1343 | drm_mode_object_put(obj); | ||
| 1344 | ret = -EFAULT; | ||
| 1345 | goto out; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | ret = drm_atomic_set_property(state, obj, prop, | ||
| 1349 | prop_value); | ||
| 1350 | if (ret) { | ||
| 1351 | drm_mode_object_put(obj); | ||
| 1352 | goto out; | ||
| 1353 | } | ||
| 1354 | |||
| 1355 | copied_props++; | ||
| 1356 | } | ||
| 1357 | |||
| 1358 | drm_mode_object_put(obj); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, | ||
| 1362 | &num_fences); | ||
| 1363 | if (ret) | ||
| 1364 | goto out; | ||
| 1365 | |||
| 1366 | if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { | ||
| 1367 | ret = drm_atomic_check_only(state); | ||
| 1368 | } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { | ||
| 1369 | ret = drm_atomic_nonblocking_commit(state); | ||
| 1370 | } else { | ||
| 1371 | if (unlikely(drm_debug & DRM_UT_STATE)) | ||
| 1372 | drm_atomic_print_state(state); | ||
| 1373 | |||
| 1374 | ret = drm_atomic_commit(state); | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | out: | ||
| 1378 | complete_signaling(dev, state, fence_state, num_fences, !ret); | ||
| 1379 | |||
| 1380 | if (ret == -EDEADLK) { | ||
| 1381 | drm_atomic_state_clear(state); | ||
| 1382 | ret = drm_modeset_backoff(&ctx); | ||
| 1383 | if (!ret) | ||
| 1384 | goto retry; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | drm_atomic_state_put(state); | ||
| 1388 | |||
| 1389 | drm_modeset_drop_locks(&ctx); | ||
| 1390 | drm_modeset_acquire_fini(&ctx); | ||
| 1391 | |||
| 1392 | return ret; | ||
| 1393 | } | ||
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 402b62d3f072..0c78ca386cbe 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c | |||
| @@ -101,6 +101,28 @@ | |||
| 101 | * Without this property the rectangle is only scaled, but not rotated or | 101 | * Without this property the rectangle is only scaled, but not rotated or |
| 102 | * reflected. | 102 | * reflected. |
| 103 | * | 103 | * |
| 104 | * Possbile values: | ||
| 105 | * | ||
| 106 | * "rotate-<degrees>": | ||
| 107 | * Signals that a drm plane is rotated <degrees> degrees in counter | ||
| 108 | * clockwise direction. | ||
| 109 | * | ||
| 110 | * "reflect-<axis>": | ||
| 111 | * Signals that the contents of a drm plane is reflected along the | ||
| 112 | * <axis> axis, in the same way as mirroring. | ||
| 113 | * | ||
| 114 | * reflect-x:: | ||
| 115 | * | ||
| 116 | * |o | | o| | ||
| 117 | * | | -> | | | ||
| 118 | * | v| |v | | ||
| 119 | * | ||
| 120 | * reflect-y:: | ||
| 121 | * | ||
| 122 | * |o | | ^| | ||
| 123 | * | | -> | | | ||
| 124 | * | v| |o | | ||
| 125 | * | ||
| 104 | * zpos: | 126 | * zpos: |
| 105 | * Z position is set up with drm_plane_create_zpos_immutable_property() and | 127 | * Z position is set up with drm_plane_create_zpos_immutable_property() and |
| 106 | * drm_plane_create_zpos_property(). It controls the visibility of overlapping | 128 | * drm_plane_create_zpos_property(). It controls the visibility of overlapping |
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 1638bfe9627c..ba7025041e46 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c | |||
| @@ -104,6 +104,10 @@ EXPORT_SYMBOL(drm_bridge_remove); | |||
| 104 | * If non-NULL the previous bridge must be already attached by a call to this | 104 | * If non-NULL the previous bridge must be already attached by a call to this |
| 105 | * function. | 105 | * function. |
| 106 | * | 106 | * |
| 107 | * Note that bridges attached to encoders are auto-detached during encoder | ||
| 108 | * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally | ||
| 109 | * *not* be balanced with a drm_bridge_detach() in driver code. | ||
| 110 | * | ||
| 107 | * RETURNS: | 111 | * RETURNS: |
| 108 | * Zero on success, error code on failure | 112 | * Zero on success, error code on failure |
| 109 | */ | 113 | */ |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 6011d769d50b..526619f963e5 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
| @@ -20,11 +20,15 @@ | |||
| 20 | * OF THIS SOFTWARE. | 20 | * OF THIS SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <drm/drmP.h> | ||
| 24 | #include <drm/drm_connector.h> | 23 | #include <drm/drm_connector.h> |
| 25 | #include <drm/drm_edid.h> | 24 | #include <drm/drm_edid.h> |
| 26 | #include <drm/drm_encoder.h> | 25 | #include <drm/drm_encoder.h> |
| 27 | #include <drm/drm_utils.h> | 26 | #include <drm/drm_utils.h> |
| 27 | #include <drm/drm_print.h> | ||
| 28 | #include <drm/drm_drv.h> | ||
| 29 | #include <drm/drm_file.h> | ||
| 30 | |||
| 31 | #include <linux/uaccess.h> | ||
| 28 | 32 | ||
| 29 | #include "drm_crtc_internal.h" | 33 | #include "drm_crtc_internal.h" |
| 30 | #include "drm_internal.h" | 34 | #include "drm_internal.h" |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bae43938c8f6..2f6c877299e4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/export.h> | 35 | #include <linux/export.h> |
| 36 | #include <linux/dma-fence.h> | 36 | #include <linux/dma-fence.h> |
| 37 | #include <drm/drmP.h> | 37 | #include <linux/uaccess.h> |
| 38 | #include <drm/drm_crtc.h> | 38 | #include <drm/drm_crtc.h> |
| 39 | #include <drm/drm_edid.h> | 39 | #include <drm/drm_edid.h> |
| 40 | #include <drm/drm_fourcc.h> | 40 | #include <drm/drm_fourcc.h> |
| @@ -42,6 +42,9 @@ | |||
| 42 | #include <drm/drm_atomic.h> | 42 | #include <drm/drm_atomic.h> |
| 43 | #include <drm/drm_auth.h> | 43 | #include <drm/drm_auth.h> |
| 44 | #include <drm/drm_debugfs_crc.h> | 44 | #include <drm/drm_debugfs_crc.h> |
| 45 | #include <drm/drm_drv.h> | ||
| 46 | #include <drm/drm_print.h> | ||
| 47 | #include <drm/drm_file.h> | ||
| 45 | 48 | ||
| 46 | #include "drm_crtc_internal.h" | 49 | #include "drm_crtc_internal.h" |
| 47 | #include "drm_internal.h" | 50 | #include "drm_internal.h" |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 5a84c3bc915d..ce75e9506e85 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | #include <drm/drmP.h> | 36 | #include <drm/drmP.h> |
| 37 | #include <drm/drm_atomic.h> | 37 | #include <drm/drm_atomic.h> |
| 38 | #include <drm/drm_atomic_uapi.h> | ||
| 38 | #include <drm/drm_crtc.h> | 39 | #include <drm/drm_crtc.h> |
| 39 | #include <drm/drm_encoder.h> | 40 | #include <drm/drm_encoder.h> |
| 40 | #include <drm/drm_fourcc.h> | 41 | #include <drm/drm_fourcc.h> |
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index b61322763394..86893448f486 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h | |||
| @@ -31,6 +31,14 @@ | |||
| 31 | * and are not exported to drivers. | 31 | * and are not exported to drivers. |
| 32 | */ | 32 | */ |
| 33 | 33 | ||
| 34 | enum drm_mode_status; | ||
| 35 | enum drm_connector_force; | ||
| 36 | |||
| 37 | struct drm_display_mode; | ||
| 38 | struct work_struct; | ||
| 39 | struct drm_connector; | ||
| 40 | struct drm_bridge; | ||
| 41 | struct edid; | ||
| 34 | 42 | ||
| 35 | /* drm_crtc.c */ | 43 | /* drm_crtc.c */ |
| 36 | int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, | 44 | int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, |
| @@ -174,6 +182,8 @@ void drm_fb_release(struct drm_file *file_priv); | |||
| 174 | 182 | ||
| 175 | int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, | 183 | int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, |
| 176 | struct drm_file *file_priv); | 184 | struct drm_file *file_priv); |
| 185 | int drm_mode_addfb2(struct drm_device *dev, | ||
| 186 | void *data, struct drm_file *file_priv); | ||
| 177 | int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, | 187 | int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, |
| 178 | struct drm_file *file_priv); | 188 | struct drm_file *file_priv); |
| 179 | 189 | ||
| @@ -181,8 +191,8 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, | |||
| 181 | /* IOCTL */ | 191 | /* IOCTL */ |
| 182 | int drm_mode_addfb_ioctl(struct drm_device *dev, | 192 | int drm_mode_addfb_ioctl(struct drm_device *dev, |
| 183 | void *data, struct drm_file *file_priv); | 193 | void *data, struct drm_file *file_priv); |
| 184 | int drm_mode_addfb2(struct drm_device *dev, | 194 | int drm_mode_addfb2_ioctl(struct drm_device *dev, |
| 185 | void *data, struct drm_file *file_priv); | 195 | void *data, struct drm_file *file_priv); |
| 186 | int drm_mode_rmfb_ioctl(struct drm_device *dev, | 196 | int drm_mode_rmfb_ioctl(struct drm_device *dev, |
| 187 | void *data, struct drm_file *file_priv); | 197 | void *data, struct drm_file *file_priv); |
| 188 | int drm_mode_getfb(struct drm_device *dev, | 198 | int drm_mode_getfb(struct drm_device *dev, |
| @@ -196,6 +206,9 @@ struct drm_minor; | |||
| 196 | int drm_atomic_debugfs_init(struct drm_minor *minor); | 206 | int drm_atomic_debugfs_init(struct drm_minor *minor); |
| 197 | #endif | 207 | #endif |
| 198 | 208 | ||
| 209 | void drm_atomic_print_state(const struct drm_atomic_state *state); | ||
| 210 | |||
| 211 | /* drm_atomic_uapi.c */ | ||
| 199 | int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, | 212 | int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, |
| 200 | struct drm_connector *connector, | 213 | struct drm_connector *connector, |
| 201 | int mode); | 214 | int mode); |
| @@ -205,6 +218,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state, | |||
| 205 | uint64_t prop_value); | 218 | uint64_t prop_value); |
| 206 | int drm_atomic_get_property(struct drm_mode_object *obj, | 219 | int drm_atomic_get_property(struct drm_mode_object *obj, |
| 207 | struct drm_property *property, uint64_t *val); | 220 | struct drm_property *property, uint64_t *val); |
| 221 | |||
| 222 | /* IOCTL */ | ||
| 208 | int drm_mode_atomic_ioctl(struct drm_device *dev, | 223 | int drm_mode_atomic_ioctl(struct drm_device *dev, |
| 209 | void *data, struct drm_file *file_priv); | 224 | void *data, struct drm_file *file_priv); |
| 210 | 225 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4b0dd20bccb8..8e95d0f7c71d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -2673,6 +2673,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper, | |||
| 2673 | 2673 | ||
| 2674 | info = fb_helper->fbdev; | 2674 | info = fb_helper->fbdev; |
| 2675 | info->var.pixclock = 0; | 2675 | info->var.pixclock = 0; |
| 2676 | /* don't leak any physical addresses to userspace */ | ||
| 2677 | info->flags |= FBINFO_HIDE_SMEM_START; | ||
| 2676 | 2678 | ||
| 2677 | /* Need to drop locks to avoid recursive deadlock in | 2679 | /* Need to drop locks to avoid recursive deadlock in |
| 2678 | * register_framebuffer. This is ok because the only thing left to do is | 2680 | * register_framebuffer. This is ok because the only thing left to do is |
| @@ -2821,7 +2823,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event); | |||
| 2821 | * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback | 2823 | * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback |
| 2822 | * function. | 2824 | * function. |
| 2823 | * | 2825 | * |
| 2824 | * See also: drm_fb_helper_initial_config() | 2826 | * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev. |
| 2827 | * | ||
| 2828 | * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup(). | ||
| 2825 | * | 2829 | * |
| 2826 | * Returns: | 2830 | * Returns: |
| 2827 | * Zero on success or negative error code on failure. | 2831 | * Zero on success or negative error code on failure. |
| @@ -3037,7 +3041,7 @@ static struct fb_deferred_io drm_fbdev_defio = { | |||
| 3037 | * @fb_helper: fbdev helper structure | 3041 | * @fb_helper: fbdev helper structure |
| 3038 | * @sizes: describes fbdev size and scanout surface size | 3042 | * @sizes: describes fbdev size and scanout surface size |
| 3039 | * | 3043 | * |
| 3040 | * This function uses the client API to crate a framebuffer backed by a dumb buffer. | 3044 | * This function uses the client API to create a framebuffer backed by a dumb buffer. |
| 3041 | * | 3045 | * |
| 3042 | * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, | 3046 | * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, |
| 3043 | * fb_copyarea, fb_imageblit. | 3047 | * fb_copyarea, fb_imageblit. |
| @@ -3165,8 +3169,10 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client) | |||
| 3165 | if (dev->fb_helper) | 3169 | if (dev->fb_helper) |
| 3166 | return drm_fb_helper_hotplug_event(dev->fb_helper); | 3170 | return drm_fb_helper_hotplug_event(dev->fb_helper); |
| 3167 | 3171 | ||
| 3168 | if (!dev->mode_config.num_connector) | 3172 | if (!dev->mode_config.num_connector) { |
| 3173 | DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n"); | ||
| 3169 | return 0; | 3174 | return 0; |
| 3175 | } | ||
| 3170 | 3176 | ||
| 3171 | ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs, | 3177 | ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs, |
| 3172 | fb_helper->preferred_bpp, 0); | 3178 | fb_helper->preferred_bpp, 0); |
| @@ -3187,13 +3193,14 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { | |||
| 3187 | }; | 3193 | }; |
| 3188 | 3194 | ||
| 3189 | /** | 3195 | /** |
| 3190 | * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation | 3196 | * drm_fbdev_generic_setup() - Setup generic fbdev emulation |
| 3191 | * @dev: DRM device | 3197 | * @dev: DRM device |
| 3192 | * @preferred_bpp: Preferred bits per pixel for the device. | 3198 | * @preferred_bpp: Preferred bits per pixel for the device. |
| 3193 | * @dev->mode_config.preferred_depth is used if this is zero. | 3199 | * @dev->mode_config.preferred_depth is used if this is zero. |
| 3194 | * | 3200 | * |
| 3195 | * This function sets up generic fbdev emulation for drivers that supports | 3201 | * This function sets up generic fbdev emulation for drivers that supports |
| 3196 | * dumb buffers with a virtual address and that can be mmap'ed. | 3202 | * dumb buffers with a virtual address and that can be mmap'ed. If the driver |
| 3203 | * does not support these functions, it could use drm_fb_helper_fbdev_setup(). | ||
| 3197 | * | 3204 | * |
| 3198 | * Restore, hotplug events and teardown are all taken care of. Drivers that do | 3205 | * Restore, hotplug events and teardown are all taken care of. Drivers that do |
| 3199 | * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. | 3206 | * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. |
| @@ -3206,6 +3213,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { | |||
| 3206 | * This function is safe to call even when there are no connectors present. | 3213 | * This function is safe to call even when there are no connectors present. |
| 3207 | * Setup will be retried on the next hotplug event. | 3214 | * Setup will be retried on the next hotplug event. |
| 3208 | * | 3215 | * |
| 3216 | * The fbdev is destroyed by drm_dev_unregister(). | ||
| 3217 | * | ||
| 3209 | * Returns: | 3218 | * Returns: |
| 3210 | * Zero on success or negative error code on failure. | 3219 | * Zero on success or negative error code on failure. |
| 3211 | */ | 3220 | */ |
| @@ -3214,6 +3223,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) | |||
| 3214 | struct drm_fb_helper *fb_helper; | 3223 | struct drm_fb_helper *fb_helper; |
| 3215 | int ret; | 3224 | int ret; |
| 3216 | 3225 | ||
| 3226 | WARN(dev->fb_helper, "fb_helper is already set!\n"); | ||
| 3227 | |||
| 3217 | if (!drm_fbdev_emulation) | 3228 | if (!drm_fbdev_emulation) |
| 3218 | return 0; | 3229 | return 0; |
| 3219 | 3230 | ||
| @@ -3224,12 +3235,15 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) | |||
| 3224 | ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); | 3235 | ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); |
| 3225 | if (ret) { | 3236 | if (ret) { |
| 3226 | kfree(fb_helper); | 3237 | kfree(fb_helper); |
| 3238 | DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret); | ||
| 3227 | return ret; | 3239 | return ret; |
| 3228 | } | 3240 | } |
| 3229 | 3241 | ||
| 3230 | fb_helper->preferred_bpp = preferred_bpp; | 3242 | fb_helper->preferred_bpp = preferred_bpp; |
| 3231 | 3243 | ||
| 3232 | drm_fbdev_client_hotplug(&fb_helper->client); | 3244 | ret = drm_fbdev_client_hotplug(&fb_helper->client); |
| 3245 | if (ret) | ||
| 3246 | DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret); | ||
| 3233 | 3247 | ||
| 3234 | return 0; | 3248 | return 0; |
| 3235 | } | 3249 | } |
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 35c1e2742c27..be1d6aaef651 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c | |||
| @@ -45,32 +45,49 @@ static char printable_char(int c) | |||
| 45 | */ | 45 | */ |
| 46 | uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) | 46 | uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) |
| 47 | { | 47 | { |
| 48 | uint32_t fmt; | 48 | uint32_t fmt = DRM_FORMAT_INVALID; |
| 49 | 49 | ||
| 50 | switch (bpp) { | 50 | switch (bpp) { |
| 51 | case 8: | 51 | case 8: |
| 52 | fmt = DRM_FORMAT_C8; | 52 | if (depth == 8) |
| 53 | fmt = DRM_FORMAT_C8; | ||
| 53 | break; | 54 | break; |
| 55 | |||
| 54 | case 16: | 56 | case 16: |
| 55 | if (depth == 15) | 57 | switch (depth) { |
| 58 | case 15: | ||
| 56 | fmt = DRM_FORMAT_XRGB1555; | 59 | fmt = DRM_FORMAT_XRGB1555; |
| 57 | else | 60 | break; |
| 61 | case 16: | ||
| 58 | fmt = DRM_FORMAT_RGB565; | 62 | fmt = DRM_FORMAT_RGB565; |
| 63 | break; | ||
| 64 | default: | ||
| 65 | break; | ||
| 66 | } | ||
| 59 | break; | 67 | break; |
| 68 | |||
| 60 | case 24: | 69 | case 24: |
| 61 | fmt = DRM_FORMAT_RGB888; | 70 | if (depth == 24) |
| 71 | fmt = DRM_FORMAT_RGB888; | ||
| 62 | break; | 72 | break; |
| 73 | |||
| 63 | case 32: | 74 | case 32: |
| 64 | if (depth == 24) | 75 | switch (depth) { |
| 76 | case 24: | ||
| 65 | fmt = DRM_FORMAT_XRGB8888; | 77 | fmt = DRM_FORMAT_XRGB8888; |
| 66 | else if (depth == 30) | 78 | break; |
| 79 | case 30: | ||
| 67 | fmt = DRM_FORMAT_XRGB2101010; | 80 | fmt = DRM_FORMAT_XRGB2101010; |
| 68 | else | 81 | break; |
| 82 | case 32: | ||
| 69 | fmt = DRM_FORMAT_ARGB8888; | 83 | fmt = DRM_FORMAT_ARGB8888; |
| 84 | break; | ||
| 85 | default: | ||
| 86 | break; | ||
| 87 | } | ||
| 70 | break; | 88 | break; |
| 89 | |||
| 71 | default: | 90 | default: |
| 72 | DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); | ||
| 73 | fmt = DRM_FORMAT_XRGB8888; | ||
| 74 | break; | 91 | break; |
| 75 | } | 92 | } |
| 76 | 93 | ||
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 781af1d42d76..6eaacd4eb8cc 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <drm/drm_auth.h> | 25 | #include <drm/drm_auth.h> |
| 26 | #include <drm/drm_framebuffer.h> | 26 | #include <drm/drm_framebuffer.h> |
| 27 | #include <drm/drm_atomic.h> | 27 | #include <drm/drm_atomic.h> |
| 28 | #include <drm/drm_atomic_uapi.h> | ||
| 28 | #include <drm/drm_print.h> | 29 | #include <drm/drm_print.h> |
| 29 | 30 | ||
| 30 | #include "drm_internal.h" | 31 | #include "drm_internal.h" |
| @@ -112,18 +113,34 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, | |||
| 112 | struct drm_mode_fb_cmd2 r = {}; | 113 | struct drm_mode_fb_cmd2 r = {}; |
| 113 | int ret; | 114 | int ret; |
| 114 | 115 | ||
| 116 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); | ||
| 117 | if (r.pixel_format == DRM_FORMAT_INVALID) { | ||
| 118 | DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth); | ||
| 119 | return -EINVAL; | ||
| 120 | } | ||
| 121 | |||
| 115 | /* convert to new format and call new ioctl */ | 122 | /* convert to new format and call new ioctl */ |
| 116 | r.fb_id = or->fb_id; | 123 | r.fb_id = or->fb_id; |
| 117 | r.width = or->width; | 124 | r.width = or->width; |
| 118 | r.height = or->height; | 125 | r.height = or->height; |
| 119 | r.pitches[0] = or->pitch; | 126 | r.pitches[0] = or->pitch; |
| 120 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); | ||
| 121 | r.handles[0] = or->handle; | 127 | r.handles[0] = or->handle; |
| 122 | 128 | ||
| 123 | if (r.pixel_format == DRM_FORMAT_XRGB2101010 && | 129 | if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp && |
| 124 | dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) | 130 | r.pixel_format == DRM_FORMAT_XRGB2101010) |
| 125 | r.pixel_format = DRM_FORMAT_XBGR2101010; | 131 | r.pixel_format = DRM_FORMAT_XBGR2101010; |
| 126 | 132 | ||
| 133 | if (dev->mode_config.quirk_addfb_prefer_host_byte_order) { | ||
| 134 | if (r.pixel_format == DRM_FORMAT_XRGB8888) | ||
| 135 | r.pixel_format = DRM_FORMAT_HOST_XRGB8888; | ||
| 136 | if (r.pixel_format == DRM_FORMAT_ARGB8888) | ||
| 137 | r.pixel_format = DRM_FORMAT_HOST_ARGB8888; | ||
| 138 | if (r.pixel_format == DRM_FORMAT_RGB565) | ||
| 139 | r.pixel_format = DRM_FORMAT_HOST_RGB565; | ||
| 140 | if (r.pixel_format == DRM_FORMAT_XRGB1555) | ||
| 141 | r.pixel_format = DRM_FORMAT_HOST_XRGB1555; | ||
| 142 | } | ||
| 143 | |||
| 127 | ret = drm_mode_addfb2(dev, &r, file_priv); | 144 | ret = drm_mode_addfb2(dev, &r, file_priv); |
| 128 | if (ret) | 145 | if (ret) |
| 129 | return ret; | 146 | return ret; |
| @@ -164,7 +181,7 @@ static int framebuffer_check(struct drm_device *dev, | |||
| 164 | int i; | 181 | int i; |
| 165 | 182 | ||
| 166 | /* check if the format is supported at all */ | 183 | /* check if the format is supported at all */ |
| 167 | info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN); | 184 | info = __drm_format_info(r->pixel_format); |
| 168 | if (!info) { | 185 | if (!info) { |
| 169 | struct drm_format_name_buf format_name; | 186 | struct drm_format_name_buf format_name; |
| 170 | 187 | ||
| @@ -352,6 +369,30 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
| 352 | return 0; | 369 | return 0; |
| 353 | } | 370 | } |
| 354 | 371 | ||
| 372 | int drm_mode_addfb2_ioctl(struct drm_device *dev, | ||
| 373 | void *data, struct drm_file *file_priv) | ||
| 374 | { | ||
| 375 | #ifdef __BIG_ENDIAN | ||
| 376 | if (!dev->mode_config.quirk_addfb_prefer_host_byte_order) { | ||
| 377 | /* | ||
| 378 | * Drivers must set the | ||
| 379 | * quirk_addfb_prefer_host_byte_order quirk to make | ||
| 380 | * the drm_mode_addfb() compat code work correctly on | ||
| 381 | * bigendian machines. | ||
| 382 | * | ||
| 383 | * If they don't they interpret pixel_format values | ||
| 384 | * incorrectly for bug compatibility, which in turn | ||
| 385 | * implies the ADDFB2 ioctl does not work correctly | ||
| 386 | * then. So block it to make userspace fallback to | ||
| 387 | * ADDFB. | ||
| 388 | */ | ||
| 389 | DRM_DEBUG_KMS("addfb2 broken on bigendian"); | ||
| 390 | return -EINVAL; | ||
| 391 | } | ||
| 392 | #endif | ||
| 393 | return drm_mode_addfb2(dev, data, file_priv); | ||
| 394 | } | ||
| 395 | |||
| 355 | struct drm_mode_rmfb_work { | 396 | struct drm_mode_rmfb_work { |
| 356 | struct work_struct work; | 397 | struct work_struct work; |
| 357 | struct list_head fbs; | 398 | struct list_head fbs; |
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 2810d4131411..7607f9cd6f77 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | #include <drm/drmP.h> | 17 | #include <drm/drmP.h> |
| 18 | #include <drm/drm_atomic.h> | 18 | #include <drm/drm_atomic.h> |
| 19 | #include <drm/drm_atomic_uapi.h> | ||
| 19 | #include <drm/drm_fb_helper.h> | 20 | #include <drm/drm_fb_helper.h> |
| 20 | #include <drm/drm_fourcc.h> | 21 | #include <drm/drm_fourcc.h> |
| 21 | #include <drm/drm_framebuffer.h> | 22 | #include <drm/drm_framebuffer.h> |
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 40179c5fc6b8..0c4eb4a9ab31 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h | |||
| @@ -21,9 +21,14 @@ | |||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | 21 | * OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <drm/drm_ioctl.h> | ||
| 25 | |||
| 24 | #define DRM_IF_MAJOR 1 | 26 | #define DRM_IF_MAJOR 1 |
| 25 | #define DRM_IF_MINOR 4 | 27 | #define DRM_IF_MINOR 4 |
| 26 | 28 | ||
| 29 | struct drm_prime_file_private; | ||
| 30 | struct dma_buf; | ||
| 31 | |||
| 27 | /* drm_file.c */ | 32 | /* drm_file.c */ |
| 28 | extern struct mutex drm_global_mutex; | 33 | extern struct mutex drm_global_mutex; |
| 29 | struct drm_file *drm_file_alloc(struct drm_minor *minor); | 34 | struct drm_file *drm_file_alloc(struct drm_minor *minor); |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ea10e9a26aad..6b4a633b4240 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
| @@ -645,7 +645,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { | |||
| 645 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), | 645 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), |
| 646 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), | 646 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), |
| 647 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), | 647 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), |
| 648 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED), | 648 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED), |
| 649 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), | 649 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), |
| 650 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), | 650 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), |
| 651 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), | 651 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), |
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 6153cbda239f..4a72c6829d73 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c | |||
| @@ -20,8 +20,17 @@ | |||
| 20 | * OF THIS SOFTWARE. | 20 | * OF THIS SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <drm/drmP.h> | 23 | #include <linux/slab.h> |
| 24 | #include <linux/uaccess.h> | ||
| 25 | |||
| 24 | #include <drm/drm_plane.h> | 26 | #include <drm/drm_plane.h> |
| 27 | #include <drm/drm_drv.h> | ||
| 28 | #include <drm/drm_print.h> | ||
| 29 | #include <drm/drm_framebuffer.h> | ||
| 30 | #include <drm/drm_file.h> | ||
| 31 | #include <drm/drm_crtc.h> | ||
| 32 | #include <drm/drm_fourcc.h> | ||
| 33 | #include <drm/drm_vblank.h> | ||
| 25 | 34 | ||
| 26 | #include "drm_crtc_internal.h" | 35 | #include "drm_crtc_internal.h" |
| 27 | 36 | ||
| @@ -463,7 +472,6 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, | |||
| 463 | struct drm_file *file_priv) | 472 | struct drm_file *file_priv) |
| 464 | { | 473 | { |
| 465 | struct drm_mode_get_plane_res *plane_resp = data; | 474 | struct drm_mode_get_plane_res *plane_resp = data; |
| 466 | struct drm_mode_config *config; | ||
| 467 | struct drm_plane *plane; | 475 | struct drm_plane *plane; |
| 468 | uint32_t __user *plane_ptr; | 476 | uint32_t __user *plane_ptr; |
| 469 | int count = 0; | 477 | int count = 0; |
| @@ -471,7 +479,6 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, | |||
| 471 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 479 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 472 | return -EINVAL; | 480 | return -EINVAL; |
| 473 | 481 | ||
| 474 | config = &dev->mode_config; | ||
| 475 | plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr); | 482 | plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr); |
| 476 | 483 | ||
| 477 | /* | 484 | /* |
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 621f17643bb0..a393756b664e 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <drm/drm_plane_helper.h> | 28 | #include <drm/drm_plane_helper.h> |
| 29 | #include <drm/drm_rect.h> | 29 | #include <drm/drm_rect.h> |
| 30 | #include <drm/drm_atomic.h> | 30 | #include <drm/drm_atomic.h> |
| 31 | #include <drm/drm_atomic_uapi.h> | ||
| 31 | #include <drm/drm_crtc_helper.h> | 32 | #include <drm/drm_crtc_helper.h> |
| 32 | #include <drm/drm_encoder.h> | 33 | #include <drm/drm_encoder.h> |
| 33 | #include <drm/drm_atomic_helper.h> | 34 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 3a8837c49639..e9ce623d049e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c | |||
| @@ -56,6 +56,33 @@ | |||
| 56 | #include "drm_internal.h" | 56 | #include "drm_internal.h" |
| 57 | #include <drm/drm_syncobj.h> | 57 | #include <drm/drm_syncobj.h> |
| 58 | 58 | ||
| 59 | struct drm_syncobj_stub_fence { | ||
| 60 | struct dma_fence base; | ||
| 61 | spinlock_t lock; | ||
| 62 | }; | ||
| 63 | |||
| 64 | static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence) | ||
| 65 | { | ||
| 66 | return "syncobjstub"; | ||
| 67 | } | ||
| 68 | |||
| 69 | static bool drm_syncobj_stub_fence_enable_signaling(struct dma_fence *fence) | ||
| 70 | { | ||
| 71 | return !dma_fence_is_signaled(fence); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void drm_syncobj_stub_fence_release(struct dma_fence *f) | ||
| 75 | { | ||
| 76 | kfree(f); | ||
| 77 | } | ||
| 78 | static const struct dma_fence_ops drm_syncobj_stub_fence_ops = { | ||
| 79 | .get_driver_name = drm_syncobj_stub_fence_get_name, | ||
| 80 | .get_timeline_name = drm_syncobj_stub_fence_get_name, | ||
| 81 | .enable_signaling = drm_syncobj_stub_fence_enable_signaling, | ||
| 82 | .release = drm_syncobj_stub_fence_release, | ||
| 83 | }; | ||
| 84 | |||
| 85 | |||
| 59 | /** | 86 | /** |
| 60 | * drm_syncobj_find - lookup and reference a sync object. | 87 | * drm_syncobj_find - lookup and reference a sync object. |
| 61 | * @file_private: drm file private pointer | 88 | * @file_private: drm file private pointer |
| @@ -140,11 +167,13 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, | |||
| 140 | /** | 167 | /** |
| 141 | * drm_syncobj_replace_fence - replace fence in a sync object. | 168 | * drm_syncobj_replace_fence - replace fence in a sync object. |
| 142 | * @syncobj: Sync object to replace fence in | 169 | * @syncobj: Sync object to replace fence in |
| 170 | * @point: timeline point | ||
| 143 | * @fence: fence to install in sync file. | 171 | * @fence: fence to install in sync file. |
| 144 | * | 172 | * |
| 145 | * This replaces the fence on a sync object. | 173 | * This replaces the fence on a sync object, or a timeline point fence. |
| 146 | */ | 174 | */ |
| 147 | void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, | 175 | void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, |
| 176 | u64 point, | ||
| 148 | struct dma_fence *fence) | 177 | struct dma_fence *fence) |
| 149 | { | 178 | { |
| 150 | struct dma_fence *old_fence; | 179 | struct dma_fence *old_fence; |
| @@ -172,42 +201,19 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, | |||
| 172 | } | 201 | } |
| 173 | EXPORT_SYMBOL(drm_syncobj_replace_fence); | 202 | EXPORT_SYMBOL(drm_syncobj_replace_fence); |
| 174 | 203 | ||
| 175 | struct drm_syncobj_null_fence { | ||
| 176 | struct dma_fence base; | ||
| 177 | spinlock_t lock; | ||
| 178 | }; | ||
| 179 | |||
| 180 | static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) | ||
| 181 | { | ||
| 182 | return "syncobjnull"; | ||
| 183 | } | ||
| 184 | |||
| 185 | static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) | ||
| 186 | { | ||
| 187 | dma_fence_enable_sw_signaling(fence); | ||
| 188 | return !dma_fence_is_signaled(fence); | ||
| 189 | } | ||
| 190 | |||
| 191 | static const struct dma_fence_ops drm_syncobj_null_fence_ops = { | ||
| 192 | .get_driver_name = drm_syncobj_null_fence_get_name, | ||
| 193 | .get_timeline_name = drm_syncobj_null_fence_get_name, | ||
| 194 | .enable_signaling = drm_syncobj_null_fence_enable_signaling, | ||
| 195 | .release = NULL, | ||
| 196 | }; | ||
| 197 | |||
| 198 | static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) | 204 | static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) |
| 199 | { | 205 | { |
| 200 | struct drm_syncobj_null_fence *fence; | 206 | struct drm_syncobj_stub_fence *fence; |
| 201 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | 207 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
| 202 | if (fence == NULL) | 208 | if (fence == NULL) |
| 203 | return -ENOMEM; | 209 | return -ENOMEM; |
| 204 | 210 | ||
| 205 | spin_lock_init(&fence->lock); | 211 | spin_lock_init(&fence->lock); |
| 206 | dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, | 212 | dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops, |
| 207 | &fence->lock, 0, 0); | 213 | &fence->lock, 0, 0); |
| 208 | dma_fence_signal(&fence->base); | 214 | dma_fence_signal(&fence->base); |
| 209 | 215 | ||
| 210 | drm_syncobj_replace_fence(syncobj, &fence->base); | 216 | drm_syncobj_replace_fence(syncobj, 0, &fence->base); |
| 211 | 217 | ||
| 212 | dma_fence_put(&fence->base); | 218 | dma_fence_put(&fence->base); |
| 213 | 219 | ||
| @@ -218,6 +224,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) | |||
| 218 | * drm_syncobj_find_fence - lookup and reference the fence in a sync object | 224 | * drm_syncobj_find_fence - lookup and reference the fence in a sync object |
| 219 | * @file_private: drm file private pointer | 225 | * @file_private: drm file private pointer |
| 220 | * @handle: sync object handle to lookup. | 226 | * @handle: sync object handle to lookup. |
| 227 | * @point: timeline point | ||
| 221 | * @fence: out parameter for the fence | 228 | * @fence: out parameter for the fence |
| 222 | * | 229 | * |
| 223 | * This is just a convenience function that combines drm_syncobj_find() and | 230 | * This is just a convenience function that combines drm_syncobj_find() and |
| @@ -228,7 +235,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) | |||
| 228 | * dma_fence_put(). | 235 | * dma_fence_put(). |
| 229 | */ | 236 | */ |
| 230 | int drm_syncobj_find_fence(struct drm_file *file_private, | 237 | int drm_syncobj_find_fence(struct drm_file *file_private, |
| 231 | u32 handle, | 238 | u32 handle, u64 point, |
| 232 | struct dma_fence **fence) | 239 | struct dma_fence **fence) |
| 233 | { | 240 | { |
| 234 | struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); | 241 | struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); |
| @@ -257,7 +264,7 @@ void drm_syncobj_free(struct kref *kref) | |||
| 257 | struct drm_syncobj *syncobj = container_of(kref, | 264 | struct drm_syncobj *syncobj = container_of(kref, |
| 258 | struct drm_syncobj, | 265 | struct drm_syncobj, |
| 259 | refcount); | 266 | refcount); |
| 260 | drm_syncobj_replace_fence(syncobj, NULL); | 267 | drm_syncobj_replace_fence(syncobj, 0, NULL); |
| 261 | kfree(syncobj); | 268 | kfree(syncobj); |
| 262 | } | 269 | } |
| 263 | EXPORT_SYMBOL(drm_syncobj_free); | 270 | EXPORT_SYMBOL(drm_syncobj_free); |
| @@ -297,7 +304,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, | |||
| 297 | } | 304 | } |
| 298 | 305 | ||
| 299 | if (fence) | 306 | if (fence) |
| 300 | drm_syncobj_replace_fence(syncobj, fence); | 307 | drm_syncobj_replace_fence(syncobj, 0, fence); |
| 301 | 308 | ||
| 302 | *out_syncobj = syncobj; | 309 | *out_syncobj = syncobj; |
| 303 | return 0; | 310 | return 0; |
| @@ -482,7 +489,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, | |||
| 482 | return -ENOENT; | 489 | return -ENOENT; |
| 483 | } | 490 | } |
| 484 | 491 | ||
| 485 | drm_syncobj_replace_fence(syncobj, fence); | 492 | drm_syncobj_replace_fence(syncobj, 0, fence); |
| 486 | dma_fence_put(fence); | 493 | dma_fence_put(fence); |
| 487 | drm_syncobj_put(syncobj); | 494 | drm_syncobj_put(syncobj); |
| 488 | return 0; | 495 | return 0; |
| @@ -499,7 +506,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private, | |||
| 499 | if (fd < 0) | 506 | if (fd < 0) |
| 500 | return fd; | 507 | return fd; |
| 501 | 508 | ||
| 502 | ret = drm_syncobj_find_fence(file_private, handle, &fence); | 509 | ret = drm_syncobj_find_fence(file_private, handle, 0, &fence); |
| 503 | if (ret) | 510 | if (ret) |
| 504 | goto err_put_fd; | 511 | goto err_put_fd; |
| 505 | 512 | ||
| @@ -964,7 +971,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, | |||
| 964 | return ret; | 971 | return ret; |
| 965 | 972 | ||
| 966 | for (i = 0; i < args->count_handles; i++) | 973 | for (i = 0; i < args->count_handles; i++) |
| 967 | drm_syncobj_replace_fence(syncobjs[i], NULL); | 974 | drm_syncobj_replace_fence(syncobjs[i], 0, NULL); |
| 968 | 975 | ||
| 969 | drm_syncobj_array_free(syncobjs, args->count_handles); | 976 | drm_syncobj_array_free(syncobjs, args->count_handles); |
| 970 | 977 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2ccb982a5dba..7ea442033a57 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include <drm/drm_gem.h> | 52 | #include <drm/drm_gem.h> |
| 53 | #include <drm/drm_auth.h> | 53 | #include <drm/drm_auth.h> |
| 54 | #include <drm/drm_cache.h> | 54 | #include <drm/drm_cache.h> |
| 55 | #include <drm/drm_util.h> | ||
| 55 | 56 | ||
| 56 | #include "i915_params.h" | 57 | #include "i915_params.h" |
| 57 | #include "i915_reg.h" | 58 | #include "i915_reg.h" |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7d0b3a2c30e2..22b4cb775576 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -2181,7 +2181,7 @@ signal_fence_array(struct i915_execbuffer *eb, | |||
| 2181 | if (!(flags & I915_EXEC_FENCE_SIGNAL)) | 2181 | if (!(flags & I915_EXEC_FENCE_SIGNAL)) |
| 2182 | continue; | 2182 | continue; |
| 2183 | 2183 | ||
| 2184 | drm_syncobj_replace_fence(syncobj, fence); | 2184 | drm_syncobj_replace_fence(syncobj, 0, fence); |
| 2185 | } | 2185 | } |
| 2186 | } | 2186 | } |
| 2187 | 2187 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1bd14c61dab5..b2bab57cd113 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <drm/drm_crtc_helper.h> | 46 | #include <drm/drm_crtc_helper.h> |
| 47 | #include <drm/drm_plane_helper.h> | 47 | #include <drm/drm_plane_helper.h> |
| 48 | #include <drm/drm_rect.h> | 48 | #include <drm/drm_rect.h> |
| 49 | #include <drm/drm_atomic_uapi.h> | ||
| 49 | #include <linux/dma_remapping.h> | 50 | #include <linux/dma_remapping.h> |
| 50 | #include <linux/reservation.h> | 51 | #include <linux/reservation.h> |
| 51 | 52 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index e20e6a36a748..ed474da6c200 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #ifndef _INTEL_DISPLAY_H_ | 25 | #ifndef _INTEL_DISPLAY_H_ |
| 26 | #define _INTEL_DISPLAY_H_ | 26 | #define _INTEL_DISPLAY_H_ |
| 27 | 27 | ||
| 28 | #include <drm/drm_util.h> | ||
| 29 | |||
| 28 | enum i915_gpio { | 30 | enum i915_gpio { |
| 29 | GPIOA, | 31 | GPIOA, |
| 30 | GPIOB, | 32 | GPIOB, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3f6920dd7880..2dfa585712c2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | #ifndef _INTEL_RINGBUFFER_H_ | 2 | #ifndef _INTEL_RINGBUFFER_H_ |
| 3 | #define _INTEL_RINGBUFFER_H_ | 3 | #define _INTEL_RINGBUFFER_H_ |
| 4 | 4 | ||
| 5 | #include <drm/drm_util.h> | ||
| 6 | |||
| 5 | #include <linux/hashtable.h> | 7 | #include <linux/hashtable.h> |
| 6 | #include <linux/seqlock.h> | 8 | #include <linux/seqlock.h> |
| 7 | 9 | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index b640e39ebaca..015341e2dd4c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
| 22 | #include <linux/dma-buf.h> | 22 | #include <linux/dma-buf.h> |
| 23 | 23 | ||
| 24 | #include <drm/drm_atomic_uapi.h> | ||
| 25 | |||
| 24 | #include "msm_drv.h" | 26 | #include "msm_drv.h" |
| 25 | #include "dpu_kms.h" | 27 | #include "dpu_kms.h" |
| 26 | #include "dpu_formats.h" | 28 | #include "dpu_formats.h" |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index c1f1779c980f..4bcdeca7479d 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <drm/drm_atomic_uapi.h> | ||
| 19 | |||
| 18 | #include "msm_drv.h" | 20 | #include "msm_drv.h" |
| 19 | #include "msm_gem.h" | 21 | #include "msm_gem.h" |
| 20 | #include "msm_kms.h" | 22 | #include "msm_kms.h" |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8412119bd940..a9bb656058e5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
| @@ -2174,7 +2174,7 @@ nv50_display_create(struct drm_device *dev) | |||
| 2174 | nouveau_display(dev)->fini = nv50_display_fini; | 2174 | nouveau_display(dev)->fini = nv50_display_fini; |
| 2175 | disp->disp = &nouveau_display(dev)->disp; | 2175 | disp->disp = &nouveau_display(dev)->disp; |
| 2176 | dev->mode_config.funcs = &nv50_disp_func; | 2176 | dev->mode_config.funcs = &nv50_disp_func; |
| 2177 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; | 2177 | dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true; |
| 2178 | 2178 | ||
| 2179 | /* small shared memory area we use for notifiers and semaphores */ | 2179 | /* small shared memory area we use for notifiers and semaphores */ |
| 2180 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 2180 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index dc7454e7f19a..0acc07555bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include <drm/drm_edid.h> | 32 | #include <drm/drm_edid.h> |
| 33 | #include <drm/drm_encoder.h> | 33 | #include <drm/drm_encoder.h> |
| 34 | #include <drm/drm_dp_helper.h> | 34 | #include <drm/drm_dp_helper.h> |
| 35 | #include <drm/drm_util.h> | ||
| 36 | |||
| 35 | #include "nouveau_crtc.h" | 37 | #include "nouveau_crtc.h" |
| 36 | #include "nouveau_encoder.h" | 38 | #include "nouveau_encoder.h" |
| 37 | 39 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 844498c4267c..20a260887be3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -379,7 +379,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
| 379 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | 379 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | |
| 380 | FBINFO_HWACCEL_FILLRECT | | 380 | FBINFO_HWACCEL_FILLRECT | |
| 381 | FBINFO_HWACCEL_IMAGEBLIT; | 381 | FBINFO_HWACCEL_IMAGEBLIT; |
| 382 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; | ||
| 383 | info->fbops = &nouveau_fbcon_sw_ops; | 382 | info->fbops = &nouveau_fbcon_sw_ops; |
| 384 | info->fix.smem_start = fb->nvbo->bo.mem.bus.base + | 383 | info->fix.smem_start = fb->nvbo->bo.mem.bus.base + |
| 385 | fb->nvbo->bo.mem.bus.offset; | 384 | fb->nvbo->bo.mem.bus.offset; |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 01704a7f07cb..87d16a0ce01e 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <drm/drm_plane_helper.h> | 28 | #include <drm/drm_plane_helper.h> |
| 29 | #include <drm/drm_atomic_helper.h> | 29 | #include <drm/drm_atomic_helper.h> |
| 30 | #include <drm/drm_atomic.h> | 30 | #include <drm/drm_atomic.h> |
| 31 | #include <drm/drm_gem_framebuffer_helper.h> | ||
| 31 | 32 | ||
| 32 | #include "qxl_drv.h" | 33 | #include "qxl_drv.h" |
| 33 | #include "qxl_object.h" | 34 | #include "qxl_object.h" |
| @@ -388,17 +389,6 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { | |||
| 388 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, | 389 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, |
| 389 | }; | 390 | }; |
| 390 | 391 | ||
| 391 | void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
| 392 | { | ||
| 393 | struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); | ||
| 394 | struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj); | ||
| 395 | |||
| 396 | WARN_ON(bo->shadow); | ||
| 397 | drm_gem_object_put_unlocked(qxl_fb->obj); | ||
| 398 | drm_framebuffer_cleanup(fb); | ||
| 399 | kfree(qxl_fb); | ||
| 400 | } | ||
| 401 | |||
| 402 | static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | 392 | static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, |
| 403 | struct drm_file *file_priv, | 393 | struct drm_file *file_priv, |
| 404 | unsigned flags, unsigned color, | 394 | unsigned flags, unsigned color, |
| @@ -406,15 +396,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 406 | unsigned num_clips) | 396 | unsigned num_clips) |
| 407 | { | 397 | { |
| 408 | /* TODO: vmwgfx where this was cribbed from had locking. Why? */ | 398 | /* TODO: vmwgfx where this was cribbed from had locking. Why? */ |
| 409 | struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); | 399 | struct qxl_device *qdev = fb->dev->dev_private; |
| 410 | struct qxl_device *qdev = qxl_fb->base.dev->dev_private; | ||
| 411 | struct drm_clip_rect norect; | 400 | struct drm_clip_rect norect; |
| 412 | struct qxl_bo *qobj; | 401 | struct qxl_bo *qobj; |
| 413 | int inc = 1; | 402 | int inc = 1; |
| 414 | 403 | ||
| 415 | drm_modeset_lock_all(fb->dev); | 404 | drm_modeset_lock_all(fb->dev); |
| 416 | 405 | ||
| 417 | qobj = gem_to_qxl_bo(qxl_fb->obj); | 406 | qobj = gem_to_qxl_bo(fb->obj[0]); |
| 418 | /* if we aren't primary surface ignore this */ | 407 | /* if we aren't primary surface ignore this */ |
| 419 | if (!qobj->is_primary) { | 408 | if (!qobj->is_primary) { |
| 420 | drm_modeset_unlock_all(fb->dev); | 409 | drm_modeset_unlock_all(fb->dev); |
| @@ -432,7 +421,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 432 | inc = 2; /* skip source rects */ | 421 | inc = 2; /* skip source rects */ |
| 433 | } | 422 | } |
| 434 | 423 | ||
| 435 | qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, | 424 | qxl_draw_dirty_fb(qdev, fb, qobj, flags, color, |
| 436 | clips, num_clips, inc); | 425 | clips, num_clips, inc); |
| 437 | 426 | ||
| 438 | drm_modeset_unlock_all(fb->dev); | 427 | drm_modeset_unlock_all(fb->dev); |
| @@ -441,31 +430,11 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 441 | } | 430 | } |
| 442 | 431 | ||
| 443 | static const struct drm_framebuffer_funcs qxl_fb_funcs = { | 432 | static const struct drm_framebuffer_funcs qxl_fb_funcs = { |
| 444 | .destroy = qxl_user_framebuffer_destroy, | 433 | .destroy = drm_gem_fb_destroy, |
| 445 | .dirty = qxl_framebuffer_surface_dirty, | 434 | .dirty = qxl_framebuffer_surface_dirty, |
| 446 | /* TODO? | 435 | .create_handle = drm_gem_fb_create_handle, |
| 447 | * .create_handle = qxl_user_framebuffer_create_handle, */ | ||
| 448 | }; | 436 | }; |
| 449 | 437 | ||
| 450 | int | ||
| 451 | qxl_framebuffer_init(struct drm_device *dev, | ||
| 452 | struct qxl_framebuffer *qfb, | ||
| 453 | const struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 454 | struct drm_gem_object *obj, | ||
| 455 | const struct drm_framebuffer_funcs *funcs) | ||
| 456 | { | ||
| 457 | int ret; | ||
| 458 | |||
| 459 | qfb->obj = obj; | ||
| 460 | drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd); | ||
| 461 | ret = drm_framebuffer_init(dev, &qfb->base, funcs); | ||
| 462 | if (ret) { | ||
| 463 | qfb->obj = NULL; | ||
| 464 | return ret; | ||
| 465 | } | ||
| 466 | return 0; | ||
| 467 | } | ||
| 468 | |||
| 469 | static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, | 438 | static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, |
| 470 | struct drm_crtc_state *old_state) | 439 | struct drm_crtc_state *old_state) |
| 471 | { | 440 | { |
| @@ -488,14 +457,12 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, | |||
| 488 | struct drm_plane_state *state) | 457 | struct drm_plane_state *state) |
| 489 | { | 458 | { |
| 490 | struct qxl_device *qdev = plane->dev->dev_private; | 459 | struct qxl_device *qdev = plane->dev->dev_private; |
| 491 | struct qxl_framebuffer *qfb; | ||
| 492 | struct qxl_bo *bo; | 460 | struct qxl_bo *bo; |
| 493 | 461 | ||
| 494 | if (!state->crtc || !state->fb) | 462 | if (!state->crtc || !state->fb) |
| 495 | return 0; | 463 | return 0; |
| 496 | 464 | ||
| 497 | qfb = to_qxl_framebuffer(state->fb); | 465 | bo = gem_to_qxl_bo(state->fb->obj[0]); |
| 498 | bo = gem_to_qxl_bo(qfb->obj); | ||
| 499 | 466 | ||
| 500 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { | 467 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { |
| 501 | DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); | 468 | DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); |
| @@ -556,23 +523,19 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, | |||
| 556 | struct drm_plane_state *old_state) | 523 | struct drm_plane_state *old_state) |
| 557 | { | 524 | { |
| 558 | struct qxl_device *qdev = plane->dev->dev_private; | 525 | struct qxl_device *qdev = plane->dev->dev_private; |
| 559 | struct qxl_framebuffer *qfb = | 526 | struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]); |
| 560 | to_qxl_framebuffer(plane->state->fb); | ||
| 561 | struct qxl_framebuffer *qfb_old; | ||
| 562 | struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); | ||
| 563 | struct qxl_bo *bo_old; | 527 | struct qxl_bo *bo_old; |
| 564 | struct drm_clip_rect norect = { | 528 | struct drm_clip_rect norect = { |
| 565 | .x1 = 0, | 529 | .x1 = 0, |
| 566 | .y1 = 0, | 530 | .y1 = 0, |
| 567 | .x2 = qfb->base.width, | 531 | .x2 = plane->state->fb->width, |
| 568 | .y2 = qfb->base.height | 532 | .y2 = plane->state->fb->height |
| 569 | }; | 533 | }; |
| 570 | int ret; | 534 | int ret; |
| 571 | bool same_shadow = false; | 535 | bool same_shadow = false; |
| 572 | 536 | ||
| 573 | if (old_state->fb) { | 537 | if (old_state->fb) { |
| 574 | qfb_old = to_qxl_framebuffer(old_state->fb); | 538 | bo_old = gem_to_qxl_bo(old_state->fb->obj[0]); |
| 575 | bo_old = gem_to_qxl_bo(qfb_old->obj); | ||
| 576 | } else { | 539 | } else { |
| 577 | bo_old = NULL; | 540 | bo_old = NULL; |
| 578 | } | 541 | } |
| @@ -602,7 +565,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, | |||
| 602 | bo->is_primary = true; | 565 | bo->is_primary = true; |
| 603 | } | 566 | } |
| 604 | 567 | ||
| 605 | qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); | 568 | qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1); |
| 606 | } | 569 | } |
| 607 | 570 | ||
| 608 | static void qxl_primary_atomic_disable(struct drm_plane *plane, | 571 | static void qxl_primary_atomic_disable(struct drm_plane *plane, |
| @@ -611,9 +574,7 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, | |||
| 611 | struct qxl_device *qdev = plane->dev->dev_private; | 574 | struct qxl_device *qdev = plane->dev->dev_private; |
| 612 | 575 | ||
| 613 | if (old_state->fb) { | 576 | if (old_state->fb) { |
| 614 | struct qxl_framebuffer *qfb = | 577 | struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]); |
| 615 | to_qxl_framebuffer(old_state->fb); | ||
| 616 | struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); | ||
| 617 | 578 | ||
| 618 | if (bo->is_primary) { | 579 | if (bo->is_primary) { |
| 619 | qxl_io_destroy_primary(qdev); | 580 | qxl_io_destroy_primary(qdev); |
| @@ -645,7 +606,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
| 645 | return; | 606 | return; |
| 646 | 607 | ||
| 647 | if (fb != old_state->fb) { | 608 | if (fb != old_state->fb) { |
| 648 | obj = to_qxl_framebuffer(fb)->obj; | 609 | obj = fb->obj[0]; |
| 649 | user_bo = gem_to_qxl_bo(obj); | 610 | user_bo = gem_to_qxl_bo(obj); |
| 650 | 611 | ||
| 651 | /* pinning is done in the prepare/cleanup framevbuffer */ | 612 | /* pinning is done in the prepare/cleanup framevbuffer */ |
| @@ -765,13 +726,13 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, | |||
| 765 | if (!new_state->fb) | 726 | if (!new_state->fb) |
| 766 | return 0; | 727 | return 0; |
| 767 | 728 | ||
| 768 | obj = to_qxl_framebuffer(new_state->fb)->obj; | 729 | obj = new_state->fb->obj[0]; |
| 769 | user_bo = gem_to_qxl_bo(obj); | 730 | user_bo = gem_to_qxl_bo(obj); |
| 770 | 731 | ||
| 771 | if (plane->type == DRM_PLANE_TYPE_PRIMARY && | 732 | if (plane->type == DRM_PLANE_TYPE_PRIMARY && |
| 772 | user_bo->is_dumb && !user_bo->shadow) { | 733 | user_bo->is_dumb && !user_bo->shadow) { |
| 773 | if (plane->state->fb) { | 734 | if (plane->state->fb) { |
| 774 | obj = to_qxl_framebuffer(plane->state->fb)->obj; | 735 | obj = plane->state->fb->obj[0]; |
| 775 | old_bo = gem_to_qxl_bo(obj); | 736 | old_bo = gem_to_qxl_bo(obj); |
| 776 | } | 737 | } |
| 777 | if (old_bo && old_bo->shadow && | 738 | if (old_bo && old_bo->shadow && |
| @@ -815,7 +776,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, | |||
| 815 | return; | 776 | return; |
| 816 | } | 777 | } |
| 817 | 778 | ||
| 818 | obj = to_qxl_framebuffer(old_state->fb)->obj; | 779 | obj = old_state->fb->obj[0]; |
| 819 | user_bo = gem_to_qxl_bo(obj); | 780 | user_bo = gem_to_qxl_bo(obj); |
| 820 | qxl_bo_unpin(user_bo); | 781 | qxl_bo_unpin(user_bo); |
| 821 | 782 | ||
| @@ -1115,26 +1076,8 @@ qxl_user_framebuffer_create(struct drm_device *dev, | |||
| 1115 | struct drm_file *file_priv, | 1076 | struct drm_file *file_priv, |
| 1116 | const struct drm_mode_fb_cmd2 *mode_cmd) | 1077 | const struct drm_mode_fb_cmd2 *mode_cmd) |
| 1117 | { | 1078 | { |
| 1118 | struct drm_gem_object *obj; | 1079 | return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, |
| 1119 | struct qxl_framebuffer *qxl_fb; | 1080 | &qxl_fb_funcs); |
| 1120 | int ret; | ||
| 1121 | |||
| 1122 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); | ||
| 1123 | if (!obj) | ||
| 1124 | return NULL; | ||
| 1125 | |||
| 1126 | qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL); | ||
| 1127 | if (qxl_fb == NULL) | ||
| 1128 | return NULL; | ||
| 1129 | |||
| 1130 | ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs); | ||
| 1131 | if (ret) { | ||
| 1132 | kfree(qxl_fb); | ||
| 1133 | drm_gem_object_put_unlocked(obj); | ||
| 1134 | return NULL; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | return &qxl_fb->base; | ||
| 1138 | } | 1081 | } |
| 1139 | 1082 | ||
| 1140 | static const struct drm_mode_config_funcs qxl_mode_funcs = { | 1083 | static const struct drm_mode_config_funcs qxl_mode_funcs = { |
| @@ -1221,7 +1164,6 @@ int qxl_modeset_init(struct qxl_device *qdev) | |||
| 1221 | } | 1164 | } |
| 1222 | 1165 | ||
| 1223 | qxl_display_read_client_monitors_config(qdev); | 1166 | qxl_display_read_client_monitors_config(qdev); |
| 1224 | qdev->mode_info.mode_config_initialized = true; | ||
| 1225 | 1167 | ||
| 1226 | drm_mode_config_reset(&qdev->ddev); | 1168 | drm_mode_config_reset(&qdev->ddev); |
| 1227 | 1169 | ||
| @@ -1237,8 +1179,5 @@ void qxl_modeset_fini(struct qxl_device *qdev) | |||
| 1237 | qxl_fbdev_fini(qdev); | 1179 | qxl_fbdev_fini(qdev); |
| 1238 | 1180 | ||
| 1239 | qxl_destroy_monitors_object(qdev); | 1181 | qxl_destroy_monitors_object(qdev); |
| 1240 | if (qdev->mode_info.mode_config_initialized) { | 1182 | drm_mode_config_cleanup(&qdev->ddev); |
| 1241 | drm_mode_config_cleanup(&qdev->ddev); | ||
| 1242 | qdev->mode_info.mode_config_initialized = false; | ||
| 1243 | } | ||
| 1244 | } | 1183 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 4d8681e84e68..cc5b32e749ce 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
| @@ -262,7 +262,7 @@ out_free_drawable: | |||
| 262 | * by treating them differently in the server. | 262 | * by treating them differently in the server. |
| 263 | */ | 263 | */ |
| 264 | void qxl_draw_dirty_fb(struct qxl_device *qdev, | 264 | void qxl_draw_dirty_fb(struct qxl_device *qdev, |
| 265 | struct qxl_framebuffer *qxl_fb, | 265 | struct drm_framebuffer *fb, |
| 266 | struct qxl_bo *bo, | 266 | struct qxl_bo *bo, |
| 267 | unsigned flags, unsigned color, | 267 | unsigned flags, unsigned color, |
| 268 | struct drm_clip_rect *clips, | 268 | struct drm_clip_rect *clips, |
| @@ -281,9 +281,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 281 | struct qxl_drawable *drawable; | 281 | struct qxl_drawable *drawable; |
| 282 | struct qxl_rect drawable_rect; | 282 | struct qxl_rect drawable_rect; |
| 283 | struct qxl_rect *rects; | 283 | struct qxl_rect *rects; |
| 284 | int stride = qxl_fb->base.pitches[0]; | 284 | int stride = fb->pitches[0]; |
| 285 | /* depth is not actually interesting, we don't mask with it */ | 285 | /* depth is not actually interesting, we don't mask with it */ |
| 286 | int depth = qxl_fb->base.format->cpp[0] * 8; | 286 | int depth = fb->format->cpp[0] * 8; |
| 287 | uint8_t *surface_base; | 287 | uint8_t *surface_base; |
| 288 | struct qxl_release *release; | 288 | struct qxl_release *release; |
| 289 | struct qxl_bo *clips_bo; | 289 | struct qxl_bo *clips_bo; |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 01220d386b0a..8ff70a7281a7 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | 38 | ||
| 39 | #include <drm/drm_crtc.h> | 39 | #include <drm/drm_crtc.h> |
| 40 | #include <drm/drm_encoder.h> | 40 | #include <drm/drm_encoder.h> |
| 41 | #include <drm/drm_fb_helper.h> | ||
| 41 | #include <drm/drm_gem.h> | 42 | #include <drm/drm_gem.h> |
| 42 | #include <drm/drmP.h> | 43 | #include <drm/drmP.h> |
| 43 | #include <drm/ttm/ttm_bo_api.h> | 44 | #include <drm/ttm/ttm_bo_api.h> |
| @@ -121,15 +122,9 @@ struct qxl_output { | |||
| 121 | struct drm_encoder enc; | 122 | struct drm_encoder enc; |
| 122 | }; | 123 | }; |
| 123 | 124 | ||
| 124 | struct qxl_framebuffer { | ||
| 125 | struct drm_framebuffer base; | ||
| 126 | struct drm_gem_object *obj; | ||
| 127 | }; | ||
| 128 | |||
| 129 | #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) | 125 | #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) |
| 130 | #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) | 126 | #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) |
| 131 | #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) | 127 | #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) |
| 132 | #define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base) | ||
| 133 | 128 | ||
| 134 | struct qxl_mman { | 129 | struct qxl_mman { |
| 135 | struct ttm_bo_global_ref bo_global_ref; | 130 | struct ttm_bo_global_ref bo_global_ref; |
| @@ -138,13 +133,6 @@ struct qxl_mman { | |||
| 138 | struct ttm_bo_device bdev; | 133 | struct ttm_bo_device bdev; |
| 139 | }; | 134 | }; |
| 140 | 135 | ||
| 141 | struct qxl_mode_info { | ||
| 142 | bool mode_config_initialized; | ||
| 143 | |||
| 144 | /* pointer to fbdev info structure */ | ||
| 145 | struct qxl_fbdev *qfbdev; | ||
| 146 | }; | ||
| 147 | |||
| 148 | 136 | ||
| 149 | struct qxl_memslot { | 137 | struct qxl_memslot { |
| 150 | uint8_t generation; | 138 | uint8_t generation; |
| @@ -232,10 +220,9 @@ struct qxl_device { | |||
| 232 | void *ram; | 220 | void *ram; |
| 233 | struct qxl_mman mman; | 221 | struct qxl_mman mman; |
| 234 | struct qxl_gem gem; | 222 | struct qxl_gem gem; |
| 235 | struct qxl_mode_info mode_info; | ||
| 236 | 223 | ||
| 237 | struct fb_info *fbdev_info; | 224 | struct drm_fb_helper fb_helper; |
| 238 | struct qxl_framebuffer *fbdev_qfb; | 225 | |
| 239 | void *ram_physical; | 226 | void *ram_physical; |
| 240 | 227 | ||
| 241 | struct qxl_ring *release_ring; | 228 | struct qxl_ring *release_ring; |
| @@ -349,19 +336,8 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, | |||
| 349 | 336 | ||
| 350 | int qxl_fbdev_init(struct qxl_device *qdev); | 337 | int qxl_fbdev_init(struct qxl_device *qdev); |
| 351 | void qxl_fbdev_fini(struct qxl_device *qdev); | 338 | void qxl_fbdev_fini(struct qxl_device *qdev); |
| 352 | int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, | ||
| 353 | struct drm_file *file_priv, | ||
| 354 | uint32_t *handle); | ||
| 355 | void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state); | ||
| 356 | 339 | ||
| 357 | /* qxl_display.c */ | 340 | /* qxl_display.c */ |
| 358 | void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb); | ||
| 359 | int | ||
| 360 | qxl_framebuffer_init(struct drm_device *dev, | ||
| 361 | struct qxl_framebuffer *rfb, | ||
| 362 | const struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 363 | struct drm_gem_object *obj, | ||
| 364 | const struct drm_framebuffer_funcs *funcs); | ||
| 365 | void qxl_display_read_client_monitors_config(struct qxl_device *qdev); | 341 | void qxl_display_read_client_monitors_config(struct qxl_device *qdev); |
| 366 | int qxl_create_monitors_object(struct qxl_device *qdev); | 342 | int qxl_create_monitors_object(struct qxl_device *qdev); |
| 367 | int qxl_destroy_monitors_object(struct qxl_device *qdev); | 343 | int qxl_destroy_monitors_object(struct qxl_device *qdev); |
| @@ -471,7 +447,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
| 471 | int stride /* filled in if 0 */); | 447 | int stride /* filled in if 0 */); |
| 472 | 448 | ||
| 473 | void qxl_draw_dirty_fb(struct qxl_device *qdev, | 449 | void qxl_draw_dirty_fb(struct qxl_device *qdev, |
| 474 | struct qxl_framebuffer *qxl_fb, | 450 | struct drm_framebuffer *fb, |
| 475 | struct qxl_bo *bo, | 451 | struct qxl_bo *bo, |
| 476 | unsigned flags, unsigned color, | 452 | unsigned flags, unsigned color, |
| 477 | struct drm_clip_rect *clips, | 453 | struct drm_clip_rect *clips, |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index ca465c0d49fa..2294b7f14fdf 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
| @@ -30,24 +30,12 @@ | |||
| 30 | #include <drm/drm_crtc.h> | 30 | #include <drm/drm_crtc.h> |
| 31 | #include <drm/drm_crtc_helper.h> | 31 | #include <drm/drm_crtc_helper.h> |
| 32 | #include <drm/drm_fb_helper.h> | 32 | #include <drm/drm_fb_helper.h> |
| 33 | #include <drm/drm_gem_framebuffer_helper.h> | ||
| 33 | 34 | ||
| 34 | #include "qxl_drv.h" | 35 | #include "qxl_drv.h" |
| 35 | 36 | ||
| 36 | #include "qxl_object.h" | 37 | #include "qxl_object.h" |
| 37 | 38 | ||
| 38 | #define QXL_DIRTY_DELAY (HZ / 30) | ||
| 39 | |||
| 40 | struct qxl_fbdev { | ||
| 41 | struct drm_fb_helper helper; | ||
| 42 | struct qxl_framebuffer qfb; | ||
| 43 | struct qxl_device *qdev; | ||
| 44 | |||
| 45 | spinlock_t delayed_ops_lock; | ||
| 46 | struct list_head delayed_ops; | ||
| 47 | void *shadow; | ||
| 48 | int size; | ||
| 49 | }; | ||
| 50 | |||
| 51 | static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, | 39 | static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, |
| 52 | struct qxl_device *qdev, struct fb_info *info, | 40 | struct qxl_device *qdev, struct fb_info *info, |
| 53 | const struct fb_image *image) | 41 | const struct fb_image *image) |
| @@ -73,13 +61,6 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, | |||
| 73 | } | 61 | } |
| 74 | } | 62 | } |
| 75 | 63 | ||
| 76 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
| 77 | static struct fb_deferred_io qxl_defio = { | ||
| 78 | .delay = QXL_DIRTY_DELAY, | ||
| 79 | .deferred_io = drm_fb_helper_deferred_io, | ||
| 80 | }; | ||
| 81 | #endif | ||
| 82 | |||
| 83 | static struct fb_ops qxlfb_ops = { | 64 | static struct fb_ops qxlfb_ops = { |
| 84 | .owner = THIS_MODULE, | 65 | .owner = THIS_MODULE, |
| 85 | DRM_FB_HELPER_DEFAULT_OPS, | 66 | DRM_FB_HELPER_DEFAULT_OPS, |
| @@ -98,26 +79,10 @@ static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
| 98 | drm_gem_object_put_unlocked(gobj); | 79 | drm_gem_object_put_unlocked(gobj); |
| 99 | } | 80 | } |
| 100 | 81 | ||
| 101 | int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, | 82 | static int qxlfb_create_pinned_object(struct qxl_device *qdev, |
| 102 | struct drm_file *file_priv, | ||
| 103 | uint32_t *handle) | ||
| 104 | { | ||
| 105 | int r; | ||
| 106 | struct drm_gem_object *gobj = qdev->fbdev_qfb->obj; | ||
| 107 | |||
| 108 | BUG_ON(!gobj); | ||
| 109 | /* drm_get_handle_create adds a reference - good */ | ||
| 110 | r = drm_gem_handle_create(file_priv, gobj, handle); | ||
| 111 | if (r) | ||
| 112 | return r; | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, | ||
| 117 | const struct drm_mode_fb_cmd2 *mode_cmd, | 83 | const struct drm_mode_fb_cmd2 *mode_cmd, |
| 118 | struct drm_gem_object **gobj_p) | 84 | struct drm_gem_object **gobj_p) |
| 119 | { | 85 | { |
| 120 | struct qxl_device *qdev = qfbdev->qdev; | ||
| 121 | struct drm_gem_object *gobj = NULL; | 86 | struct drm_gem_object *gobj = NULL; |
| 122 | struct qxl_bo *qbo = NULL; | 87 | struct qxl_bo *qbo = NULL; |
| 123 | int ret; | 88 | int ret; |
| @@ -174,13 +139,12 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 174 | unsigned num_clips) | 139 | unsigned num_clips) |
| 175 | { | 140 | { |
| 176 | struct qxl_device *qdev = fb->dev->dev_private; | 141 | struct qxl_device *qdev = fb->dev->dev_private; |
| 177 | struct fb_info *info = qdev->fbdev_info; | 142 | struct fb_info *info = qdev->fb_helper.fbdev; |
| 178 | struct qxl_fbdev *qfbdev = info->par; | ||
| 179 | struct qxl_fb_image qxl_fb_image; | 143 | struct qxl_fb_image qxl_fb_image; |
| 180 | struct fb_image *image = &qxl_fb_image.fb_image; | 144 | struct fb_image *image = &qxl_fb_image.fb_image; |
| 181 | 145 | ||
| 182 | /* TODO: hard coding 32 bpp */ | 146 | /* TODO: hard coding 32 bpp */ |
| 183 | int stride = qfbdev->qfb.base.pitches[0]; | 147 | int stride = fb->pitches[0]; |
| 184 | 148 | ||
| 185 | /* | 149 | /* |
| 186 | * we are using a shadow draw buffer, at qdev->surface0_shadow | 150 | * we are using a shadow draw buffer, at qdev->surface0_shadow |
| @@ -199,7 +163,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 199 | image->cmap.green = NULL; | 163 | image->cmap.green = NULL; |
| 200 | image->cmap.blue = NULL; | 164 | image->cmap.blue = NULL; |
| 201 | image->cmap.transp = NULL; | 165 | image->cmap.transp = NULL; |
| 202 | image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1); | 166 | image->data = info->screen_base + (clips->x1 * 4) + (stride * clips->y1); |
| 203 | 167 | ||
| 204 | qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); | 168 | qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); |
| 205 | qxl_draw_opaque_fb(&qxl_fb_image, stride); | 169 | qxl_draw_opaque_fb(&qxl_fb_image, stride); |
| @@ -208,21 +172,22 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 208 | } | 172 | } |
| 209 | 173 | ||
| 210 | static const struct drm_framebuffer_funcs qxlfb_fb_funcs = { | 174 | static const struct drm_framebuffer_funcs qxlfb_fb_funcs = { |
| 211 | .destroy = qxl_user_framebuffer_destroy, | 175 | .destroy = drm_gem_fb_destroy, |
| 176 | .create_handle = drm_gem_fb_create_handle, | ||
| 212 | .dirty = qxlfb_framebuffer_dirty, | 177 | .dirty = qxlfb_framebuffer_dirty, |
| 213 | }; | 178 | }; |
| 214 | 179 | ||
| 215 | static int qxlfb_create(struct qxl_fbdev *qfbdev, | 180 | static int qxlfb_create(struct drm_fb_helper *helper, |
| 216 | struct drm_fb_helper_surface_size *sizes) | 181 | struct drm_fb_helper_surface_size *sizes) |
| 217 | { | 182 | { |
| 218 | struct qxl_device *qdev = qfbdev->qdev; | 183 | struct qxl_device *qdev = |
| 184 | container_of(helper, struct qxl_device, fb_helper); | ||
| 219 | struct fb_info *info; | 185 | struct fb_info *info; |
| 220 | struct drm_framebuffer *fb = NULL; | 186 | struct drm_framebuffer *fb = NULL; |
| 221 | struct drm_mode_fb_cmd2 mode_cmd; | 187 | struct drm_mode_fb_cmd2 mode_cmd; |
| 222 | struct drm_gem_object *gobj = NULL; | 188 | struct drm_gem_object *gobj = NULL; |
| 223 | struct qxl_bo *qbo = NULL; | 189 | struct qxl_bo *qbo = NULL; |
| 224 | int ret; | 190 | int ret; |
| 225 | int size; | ||
| 226 | int bpp = sizes->surface_bpp; | 191 | int bpp = sizes->surface_bpp; |
| 227 | int depth = sizes->surface_depth; | 192 | int depth = sizes->surface_depth; |
| 228 | void *shadow; | 193 | void *shadow; |
| @@ -233,7 +198,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
| 233 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); | 198 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); |
| 234 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); | 199 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); |
| 235 | 200 | ||
| 236 | ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj); | 201 | ret = qxlfb_create_pinned_object(qdev, &mode_cmd, &gobj); |
| 237 | if (ret < 0) | 202 | if (ret < 0) |
| 238 | return ret; | 203 | return ret; |
| 239 | 204 | ||
| @@ -247,25 +212,26 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
| 247 | DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", | 212 | DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", |
| 248 | qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), | 213 | qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), |
| 249 | qbo->kptr, shadow); | 214 | qbo->kptr, shadow); |
| 250 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
| 251 | 215 | ||
| 252 | info = drm_fb_helper_alloc_fbi(&qfbdev->helper); | 216 | info = drm_fb_helper_alloc_fbi(helper); |
| 253 | if (IS_ERR(info)) { | 217 | if (IS_ERR(info)) { |
| 254 | ret = PTR_ERR(info); | 218 | ret = PTR_ERR(info); |
| 255 | goto out_unref; | 219 | goto out_unref; |
| 256 | } | 220 | } |
| 257 | 221 | ||
| 258 | info->par = qfbdev; | 222 | info->par = helper; |
| 259 | |||
| 260 | qxl_framebuffer_init(&qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj, | ||
| 261 | &qxlfb_fb_funcs); | ||
| 262 | 223 | ||
| 263 | fb = &qfbdev->qfb.base; | 224 | fb = drm_gem_fbdev_fb_create(&qdev->ddev, sizes, 64, gobj, |
| 225 | &qxlfb_fb_funcs); | ||
| 226 | if (IS_ERR(fb)) { | ||
| 227 | DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb)); | ||
| 228 | ret = PTR_ERR(fb); | ||
| 229 | goto out_unref; | ||
| 230 | } | ||
| 264 | 231 | ||
| 265 | /* setup helper with fb data */ | 232 | /* setup helper with fb data */ |
| 266 | qfbdev->helper.fb = fb; | 233 | qdev->fb_helper.fb = fb; |
| 267 | 234 | ||
| 268 | qfbdev->shadow = shadow; | ||
| 269 | strcpy(info->fix.id, "qxldrmfb"); | 235 | strcpy(info->fix.id, "qxldrmfb"); |
| 270 | 236 | ||
| 271 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); | 237 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); |
| @@ -278,10 +244,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
| 278 | */ | 244 | */ |
| 279 | info->fix.smem_start = qdev->vram_base; /* TODO - correct? */ | 245 | info->fix.smem_start = qdev->vram_base; /* TODO - correct? */ |
| 280 | info->fix.smem_len = gobj->size; | 246 | info->fix.smem_len = gobj->size; |
| 281 | info->screen_base = qfbdev->shadow; | 247 | info->screen_base = shadow; |
| 282 | info->screen_size = gobj->size; | 248 | info->screen_size = gobj->size; |
| 283 | 249 | ||
| 284 | drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, | 250 | drm_fb_helper_fill_var(info, &qdev->fb_helper, sizes->fb_width, |
| 285 | sizes->fb_height); | 251 | sizes->fb_height); |
| 286 | 252 | ||
| 287 | /* setup aperture base/size for vesafb takeover */ | 253 | /* setup aperture base/size for vesafb takeover */ |
| @@ -296,13 +262,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
| 296 | goto out_unref; | 262 | goto out_unref; |
| 297 | } | 263 | } |
| 298 | 264 | ||
| 299 | #ifdef CONFIG_DRM_FBDEV_EMULATION | 265 | /* XXX error handling. */ |
| 300 | info->fbdefio = &qxl_defio; | 266 | drm_fb_helper_defio_init(helper); |
| 301 | fb_deferred_io_init(info); | ||
| 302 | #endif | ||
| 303 | 267 | ||
| 304 | qdev->fbdev_info = info; | ||
| 305 | qdev->fbdev_qfb = &qfbdev->qfb; | ||
| 306 | DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); | 268 | DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); |
| 307 | DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", | 269 | DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", |
| 308 | fb->format->depth, fb->pitches[0], fb->width, fb->height); | 270 | fb->format->depth, fb->pitches[0], fb->width, fb->height); |
| @@ -313,119 +275,26 @@ out_unref: | |||
| 313 | qxl_bo_kunmap(qbo); | 275 | qxl_bo_kunmap(qbo); |
| 314 | qxl_bo_unpin(qbo); | 276 | qxl_bo_unpin(qbo); |
| 315 | } | 277 | } |
| 316 | if (fb && ret) { | ||
| 317 | drm_gem_object_put_unlocked(gobj); | ||
| 318 | drm_framebuffer_cleanup(fb); | ||
| 319 | kfree(fb); | ||
| 320 | } | ||
| 321 | drm_gem_object_put_unlocked(gobj); | 278 | drm_gem_object_put_unlocked(gobj); |
| 322 | return ret; | 279 | return ret; |
| 323 | } | 280 | } |
| 324 | 281 | ||
| 325 | static int qxl_fb_find_or_create_single( | ||
| 326 | struct drm_fb_helper *helper, | ||
| 327 | struct drm_fb_helper_surface_size *sizes) | ||
| 328 | { | ||
| 329 | struct qxl_fbdev *qfbdev = | ||
| 330 | container_of(helper, struct qxl_fbdev, helper); | ||
| 331 | int new_fb = 0; | ||
| 332 | int ret; | ||
| 333 | |||
| 334 | if (!helper->fb) { | ||
| 335 | ret = qxlfb_create(qfbdev, sizes); | ||
| 336 | if (ret) | ||
| 337 | return ret; | ||
| 338 | new_fb = 1; | ||
| 339 | } | ||
| 340 | return new_fb; | ||
| 341 | } | ||
| 342 | |||
| 343 | static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) | ||
| 344 | { | ||
| 345 | struct qxl_framebuffer *qfb = &qfbdev->qfb; | ||
| 346 | |||
| 347 | drm_fb_helper_unregister_fbi(&qfbdev->helper); | ||
| 348 | |||
| 349 | if (qfb->obj) { | ||
| 350 | qxlfb_destroy_pinned_object(qfb->obj); | ||
| 351 | qfb->obj = NULL; | ||
| 352 | } | ||
| 353 | drm_fb_helper_fini(&qfbdev->helper); | ||
| 354 | vfree(qfbdev->shadow); | ||
| 355 | drm_framebuffer_cleanup(&qfb->base); | ||
| 356 | |||
| 357 | return 0; | ||
| 358 | } | ||
| 359 | |||
| 360 | static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { | 282 | static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { |
| 361 | .fb_probe = qxl_fb_find_or_create_single, | 283 | .fb_probe = qxlfb_create, |
| 362 | }; | 284 | }; |
| 363 | 285 | ||
| 364 | int qxl_fbdev_init(struct qxl_device *qdev) | 286 | int qxl_fbdev_init(struct qxl_device *qdev) |
| 365 | { | 287 | { |
| 366 | int ret = 0; | 288 | return drm_fb_helper_fbdev_setup(&qdev->ddev, &qdev->fb_helper, |
| 367 | 289 | &qxl_fb_helper_funcs, 32, | |
| 368 | #ifdef CONFIG_DRM_FBDEV_EMULATION | 290 | QXLFB_CONN_LIMIT); |
| 369 | struct qxl_fbdev *qfbdev; | ||
| 370 | int bpp_sel = 32; /* TODO: parameter from somewhere? */ | ||
| 371 | |||
| 372 | qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); | ||
| 373 | if (!qfbdev) | ||
| 374 | return -ENOMEM; | ||
| 375 | |||
| 376 | qfbdev->qdev = qdev; | ||
| 377 | qdev->mode_info.qfbdev = qfbdev; | ||
| 378 | spin_lock_init(&qfbdev->delayed_ops_lock); | ||
| 379 | INIT_LIST_HEAD(&qfbdev->delayed_ops); | ||
| 380 | |||
| 381 | drm_fb_helper_prepare(&qdev->ddev, &qfbdev->helper, | ||
| 382 | &qxl_fb_helper_funcs); | ||
| 383 | |||
| 384 | ret = drm_fb_helper_init(&qdev->ddev, &qfbdev->helper, | ||
| 385 | QXLFB_CONN_LIMIT); | ||
| 386 | if (ret) | ||
| 387 | goto free; | ||
| 388 | |||
| 389 | ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper); | ||
| 390 | if (ret) | ||
| 391 | goto fini; | ||
| 392 | |||
| 393 | ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel); | ||
| 394 | if (ret) | ||
| 395 | goto fini; | ||
| 396 | |||
| 397 | return 0; | ||
| 398 | |||
| 399 | fini: | ||
| 400 | drm_fb_helper_fini(&qfbdev->helper); | ||
| 401 | free: | ||
| 402 | kfree(qfbdev); | ||
| 403 | #endif | ||
| 404 | |||
| 405 | return ret; | ||
| 406 | } | 291 | } |
| 407 | 292 | ||
| 408 | void qxl_fbdev_fini(struct qxl_device *qdev) | 293 | void qxl_fbdev_fini(struct qxl_device *qdev) |
| 409 | { | 294 | { |
| 410 | if (!qdev->mode_info.qfbdev) | 295 | struct fb_info *fbi = qdev->fb_helper.fbdev; |
| 411 | return; | 296 | void *shadow = fbi ? fbi->screen_buffer : NULL; |
| 412 | 297 | ||
| 413 | qxl_fbdev_destroy(&qdev->ddev, qdev->mode_info.qfbdev); | 298 | drm_fb_helper_fbdev_teardown(&qdev->ddev); |
| 414 | kfree(qdev->mode_info.qfbdev); | 299 | vfree(shadow); |
| 415 | qdev->mode_info.qfbdev = NULL; | ||
| 416 | } | ||
| 417 | |||
| 418 | void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) | ||
| 419 | { | ||
| 420 | if (!qdev->mode_info.qfbdev) | ||
| 421 | return; | ||
| 422 | |||
| 423 | drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); | ||
| 424 | } | ||
| 425 | |||
| 426 | bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj) | ||
| 427 | { | ||
| 428 | if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj)) | ||
| 429 | return true; | ||
| 430 | return false; | ||
| 431 | } | 300 | } |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 5864cb452c5c..941f35233b1f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
| @@ -448,6 +448,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev) | |||
| 448 | return 0; | 448 | return 0; |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | static void rockchip_drm_platform_shutdown(struct platform_device *pdev) | ||
| 452 | { | ||
| 453 | rockchip_drm_platform_remove(pdev); | ||
| 454 | } | ||
| 455 | |||
| 451 | static const struct of_device_id rockchip_drm_dt_ids[] = { | 456 | static const struct of_device_id rockchip_drm_dt_ids[] = { |
| 452 | { .compatible = "rockchip,display-subsystem", }, | 457 | { .compatible = "rockchip,display-subsystem", }, |
| 453 | { /* sentinel */ }, | 458 | { /* sentinel */ }, |
| @@ -457,6 +462,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); | |||
| 457 | static struct platform_driver rockchip_drm_platform_driver = { | 462 | static struct platform_driver rockchip_drm_platform_driver = { |
| 458 | .probe = rockchip_drm_platform_probe, | 463 | .probe = rockchip_drm_platform_probe, |
| 459 | .remove = rockchip_drm_platform_remove, | 464 | .remove = rockchip_drm_platform_remove, |
| 465 | .shutdown = rockchip_drm_platform_shutdown, | ||
| 460 | .driver = { | 466 | .driver = { |
| 461 | .name = "rockchip-drm", | 467 | .name = "rockchip-drm", |
| 462 | .of_match_table = rockchip_drm_dt_ids, | 468 | .of_match_table = rockchip_drm_dt_ids, |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0cebb2db5b99..c78cd35a1294 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <drm/drmP.h> | 13 | #include <drm/drmP.h> |
| 14 | #include <drm/drm_atomic_helper.h> | 14 | #include <drm/drm_atomic_helper.h> |
| 15 | #include <drm/drm_connector.h> | ||
| 15 | #include <drm/drm_crtc.h> | 16 | #include <drm/drm_crtc.h> |
| 16 | #include <drm/drm_crtc_helper.h> | 17 | #include <drm/drm_crtc_helper.h> |
| 17 | #include <drm/drm_encoder.h> | 18 | #include <drm/drm_encoder.h> |
| @@ -277,10 +278,64 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon, | |||
| 277 | SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); | 278 | SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); |
| 278 | } | 279 | } |
| 279 | 280 | ||
| 281 | static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon, | ||
| 282 | const struct drm_connector *connector) | ||
| 283 | { | ||
| 284 | u32 bus_format = 0; | ||
| 285 | u32 val = 0; | ||
| 286 | |||
| 287 | /* XXX Would this ever happen? */ | ||
| 288 | if (!connector) | ||
| 289 | return; | ||
| 290 | |||
| 291 | /* | ||
| 292 | * FIXME: Undocumented bits | ||
| 293 | * | ||
| 294 | * The whole dithering process and these parameters are not | ||
| 295 | * explained in the vendor documents or BSP kernel code. | ||
| 296 | */ | ||
| 297 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PR_REG, 0x11111111); | ||
| 298 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PG_REG, 0x11111111); | ||
| 299 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PB_REG, 0x11111111); | ||
| 300 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LR_REG, 0x11111111); | ||
| 301 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LG_REG, 0x11111111); | ||
| 302 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LB_REG, 0x11111111); | ||
| 303 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL0_REG, 0x01010000); | ||
| 304 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL1_REG, 0x15151111); | ||
| 305 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL2_REG, 0x57575555); | ||
| 306 | regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL3_REG, 0x7f7f7777); | ||
| 307 | |||
| 308 | /* Do dithering if panel only supports 6 bits per color */ | ||
| 309 | if (connector->display_info.bpc == 6) | ||
| 310 | val |= SUN4I_TCON0_FRM_CTL_EN; | ||
| 311 | |||
| 312 | if (connector->display_info.num_bus_formats == 1) | ||
| 313 | bus_format = connector->display_info.bus_formats[0]; | ||
| 314 | |||
| 315 | /* Check the connection format */ | ||
| 316 | switch (bus_format) { | ||
| 317 | case MEDIA_BUS_FMT_RGB565_1X16: | ||
| 318 | /* R and B components are only 5 bits deep */ | ||
| 319 | val |= SUN4I_TCON0_FRM_CTL_MODE_R; | ||
| 320 | val |= SUN4I_TCON0_FRM_CTL_MODE_B; | ||
| 321 | case MEDIA_BUS_FMT_RGB666_1X18: | ||
| 322 | case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: | ||
| 323 | /* Fall through: enable dithering */ | ||
| 324 | val |= SUN4I_TCON0_FRM_CTL_EN; | ||
| 325 | break; | ||
| 326 | } | ||
| 327 | |||
| 328 | /* Write dithering settings */ | ||
| 329 | regmap_write(tcon->regs, SUN4I_TCON_FRM_CTL_REG, val); | ||
| 330 | } | ||
| 331 | |||
| 280 | static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, | 332 | static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, |
| 281 | struct mipi_dsi_device *device, | 333 | const struct drm_encoder *encoder, |
| 282 | const struct drm_display_mode *mode) | 334 | const struct drm_display_mode *mode) |
| 283 | { | 335 | { |
| 336 | /* TODO support normal CPU interface modes */ | ||
| 337 | struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder); | ||
| 338 | struct mipi_dsi_device *device = dsi->device; | ||
| 284 | u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format); | 339 | u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format); |
| 285 | u8 lanes = device->lanes; | 340 | u8 lanes = device->lanes; |
| 286 | u32 block_space, start_delay; | 341 | u32 block_space, start_delay; |
| @@ -291,6 +346,9 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, | |||
| 291 | 346 | ||
| 292 | sun4i_tcon0_mode_set_common(tcon, mode); | 347 | sun4i_tcon0_mode_set_common(tcon, mode); |
| 293 | 348 | ||
| 349 | /* Set dithering if needed */ | ||
| 350 | sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder)); | ||
| 351 | |||
| 294 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, | 352 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, |
| 295 | SUN4I_TCON0_CTL_IF_MASK, | 353 | SUN4I_TCON0_CTL_IF_MASK, |
| 296 | SUN4I_TCON0_CTL_IF_8080); | 354 | SUN4I_TCON0_CTL_IF_8080); |
| @@ -356,6 +414,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, | |||
| 356 | tcon->dclk_max_div = 7; | 414 | tcon->dclk_max_div = 7; |
| 357 | sun4i_tcon0_mode_set_common(tcon, mode); | 415 | sun4i_tcon0_mode_set_common(tcon, mode); |
| 358 | 416 | ||
| 417 | /* Set dithering if needed */ | ||
| 418 | sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder)); | ||
| 419 | |||
| 359 | /* Adjust clock delay */ | 420 | /* Adjust clock delay */ |
| 360 | clk_delay = sun4i_tcon_get_clk_delay(mode, 0); | 421 | clk_delay = sun4i_tcon_get_clk_delay(mode, 0); |
| 361 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, | 422 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, |
| @@ -429,6 +490,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | |||
| 429 | tcon->dclk_max_div = 127; | 490 | tcon->dclk_max_div = 127; |
| 430 | sun4i_tcon0_mode_set_common(tcon, mode); | 491 | sun4i_tcon0_mode_set_common(tcon, mode); |
| 431 | 492 | ||
| 493 | /* Set dithering if needed */ | ||
| 494 | sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector); | ||
| 495 | |||
| 432 | /* Adjust clock delay */ | 496 | /* Adjust clock delay */ |
| 433 | clk_delay = sun4i_tcon_get_clk_delay(mode, 0); | 497 | clk_delay = sun4i_tcon_get_clk_delay(mode, 0); |
| 434 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, | 498 | regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, |
| @@ -610,16 +674,10 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon, | |||
| 610 | const struct drm_encoder *encoder, | 674 | const struct drm_encoder *encoder, |
| 611 | const struct drm_display_mode *mode) | 675 | const struct drm_display_mode *mode) |
| 612 | { | 676 | { |
| 613 | struct sun6i_dsi *dsi; | ||
| 614 | |||
| 615 | switch (encoder->encoder_type) { | 677 | switch (encoder->encoder_type) { |
| 616 | case DRM_MODE_ENCODER_DSI: | 678 | case DRM_MODE_ENCODER_DSI: |
| 617 | /* | 679 | /* DSI is tied to special case of CPU interface */ |
| 618 | * This is not really elegant, but it's the "cleaner" | 680 | sun4i_tcon0_mode_set_cpu(tcon, encoder, mode); |
| 619 | * way I could think of... | ||
| 620 | */ | ||
| 621 | dsi = encoder_to_sun6i_dsi(encoder); | ||
| 622 | sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode); | ||
| 623 | break; | 681 | break; |
| 624 | case DRM_MODE_ENCODER_LVDS: | 682 | case DRM_MODE_ENCODER_LVDS: |
| 625 | sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); | 683 | sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); |
| @@ -916,7 +974,8 @@ static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node) | |||
| 916 | 974 | ||
| 917 | remote = of_graph_get_remote_node(node, 0, -1); | 975 | remote = of_graph_get_remote_node(node, 0, -1); |
| 918 | if (remote) { | 976 | if (remote) { |
| 919 | ret = !!of_match_node(sun8i_tcon_top_of_table, remote); | 977 | ret = !!(IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && |
| 978 | of_match_node(sun8i_tcon_top_of_table, remote)); | ||
| 920 | of_node_put(remote); | 979 | of_node_put(remote); |
| 921 | } | 980 | } |
| 922 | 981 | ||
| @@ -1344,13 +1403,20 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, | |||
| 1344 | if (!pdev) | 1403 | if (!pdev) |
| 1345 | return -EINVAL; | 1404 | return -EINVAL; |
| 1346 | 1405 | ||
| 1347 | if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { | 1406 | if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && |
| 1407 | encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { | ||
| 1348 | ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); | 1408 | ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); |
| 1349 | if (ret) | 1409 | if (ret) |
| 1350 | return ret; | 1410 | return ret; |
| 1351 | } | 1411 | } |
| 1352 | 1412 | ||
| 1353 | return sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); | 1413 | if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) { |
| 1414 | ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); | ||
| 1415 | if (ret) | ||
| 1416 | return ret; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | return 0; | ||
| 1354 | } | 1420 | } |
| 1355 | 1421 | ||
| 1356 | static const struct sun4i_tcon_quirks sun4i_a10_quirks = { | 1422 | static const struct sun4i_tcon_quirks sun4i_a10_quirks = { |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index f6a071cd5a6f..3d492c8be1fc 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h | |||
| @@ -37,18 +37,21 @@ | |||
| 37 | #define SUN4I_TCON_GINT1_REG 0x8 | 37 | #define SUN4I_TCON_GINT1_REG 0x8 |
| 38 | 38 | ||
| 39 | #define SUN4I_TCON_FRM_CTL_REG 0x10 | 39 | #define SUN4I_TCON_FRM_CTL_REG 0x10 |
| 40 | #define SUN4I_TCON_FRM_CTL_EN BIT(31) | 40 | #define SUN4I_TCON0_FRM_CTL_EN BIT(31) |
| 41 | 41 | #define SUN4I_TCON0_FRM_CTL_MODE_R BIT(6) | |
| 42 | #define SUN4I_TCON_FRM_SEED_PR_REG 0x14 | 42 | #define SUN4I_TCON0_FRM_CTL_MODE_G BIT(5) |
| 43 | #define SUN4I_TCON_FRM_SEED_PG_REG 0x18 | 43 | #define SUN4I_TCON0_FRM_CTL_MODE_B BIT(4) |
| 44 | #define SUN4I_TCON_FRM_SEED_PB_REG 0x1c | 44 | |
| 45 | #define SUN4I_TCON_FRM_SEED_LR_REG 0x20 | 45 | #define SUN4I_TCON0_FRM_SEED_PR_REG 0x14 |
| 46 | #define SUN4I_TCON_FRM_SEED_LG_REG 0x24 | 46 | #define SUN4I_TCON0_FRM_SEED_PG_REG 0x18 |
| 47 | #define SUN4I_TCON_FRM_SEED_LB_REG 0x28 | 47 | #define SUN4I_TCON0_FRM_SEED_PB_REG 0x1c |
| 48 | #define SUN4I_TCON_FRM_TBL0_REG 0x2c | 48 | #define SUN4I_TCON0_FRM_SEED_LR_REG 0x20 |
| 49 | #define SUN4I_TCON_FRM_TBL1_REG 0x30 | 49 | #define SUN4I_TCON0_FRM_SEED_LG_REG 0x24 |
| 50 | #define SUN4I_TCON_FRM_TBL2_REG 0x34 | 50 | #define SUN4I_TCON0_FRM_SEED_LB_REG 0x28 |
| 51 | #define SUN4I_TCON_FRM_TBL3_REG 0x38 | 51 | #define SUN4I_TCON0_FRM_TBL0_REG 0x2c |
| 52 | #define SUN4I_TCON0_FRM_TBL1_REG 0x30 | ||
| 53 | #define SUN4I_TCON0_FRM_TBL2_REG 0x34 | ||
| 54 | #define SUN4I_TCON0_FRM_TBL3_REG 0x38 | ||
| 52 | 55 | ||
| 53 | #define SUN4I_TCON0_CTL_REG 0x40 | 56 | #define SUN4I_TCON0_CTL_REG 0x40 |
| 54 | #define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31) | 57 | #define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31) |
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 5ce24098a5fd..70c54774400b 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c | |||
| @@ -521,12 +521,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 521 | kref_init(&exec->refcount); | 521 | kref_init(&exec->refcount); |
| 522 | 522 | ||
| 523 | ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, | 523 | ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, |
| 524 | &exec->bin.in_fence); | 524 | 0, &exec->bin.in_fence); |
| 525 | if (ret == -EINVAL) | 525 | if (ret == -EINVAL) |
| 526 | goto fail; | 526 | goto fail; |
| 527 | 527 | ||
| 528 | ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, | 528 | ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, |
| 529 | &exec->render.in_fence); | 529 | 0, &exec->render.in_fence); |
| 530 | if (ret == -EINVAL) | 530 | if (ret == -EINVAL) |
| 531 | goto fail; | 531 | goto fail; |
| 532 | 532 | ||
| @@ -584,7 +584,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 584 | /* Update the return sync object for the */ | 584 | /* Update the return sync object for the */ |
| 585 | sync_out = drm_syncobj_find(file_priv, args->out_sync); | 585 | sync_out = drm_syncobj_find(file_priv, args->out_sync); |
| 586 | if (sync_out) { | 586 | if (sync_out) { |
| 587 | drm_syncobj_replace_fence(sync_out, | 587 | drm_syncobj_replace_fence(sync_out, 0, |
| 588 | &exec->render.base.s_fence->finished); | 588 | &exec->render.base.s_fence->finished); |
| 589 | drm_syncobj_put(sync_out); | 589 | drm_syncobj_put(sync_out); |
| 590 | } | 590 | } |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 0e6a121858d1..3ce136ba8791 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <drm/drm_atomic.h> | 35 | #include <drm/drm_atomic.h> |
| 36 | #include <drm/drm_atomic_helper.h> | 36 | #include <drm/drm_atomic_helper.h> |
| 37 | #include <drm/drm_crtc_helper.h> | 37 | #include <drm/drm_crtc_helper.h> |
| 38 | #include <drm/drm_atomic_uapi.h> | ||
| 38 | #include <linux/clk.h> | 39 | #include <linux/clk.h> |
| 39 | #include <drm/drm_fb_cma_helper.h> | 40 | #include <drm/drm_fb_cma_helper.h> |
| 40 | #include <linux/component.h> | 41 | #include <linux/component.h> |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 7910b9acedd6..5b22e996af6c 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
| @@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, | |||
| 681 | exec->fence = &fence->base; | 681 | exec->fence = &fence->base; |
| 682 | 682 | ||
| 683 | if (out_sync) | 683 | if (out_sync) |
| 684 | drm_syncobj_replace_fence(out_sync, exec->fence); | 684 | drm_syncobj_replace_fence(out_sync, 0, exec->fence); |
| 685 | 685 | ||
| 686 | vc4_update_bo_seqnos(exec, seqno); | 686 | vc4_update_bo_seqnos(exec, seqno); |
| 687 | 687 | ||
| @@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 1173 | 1173 | ||
| 1174 | if (args->in_sync) { | 1174 | if (args->in_sync) { |
| 1175 | ret = drm_syncobj_find_fence(file_priv, args->in_sync, | 1175 | ret = drm_syncobj_find_fence(file_priv, args->in_sync, |
| 1176 | &in_fence); | 1176 | 0, &in_fence); |
| 1177 | if (ret) | 1177 | if (ret) |
| 1178 | goto fail; | 1178 | goto fail; |
| 1179 | 1179 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cf78f74bb87f..f39ee212412d 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <drm/drm_atomic_helper.h> | 22 | #include <drm/drm_atomic_helper.h> |
| 23 | #include <drm/drm_fb_cma_helper.h> | 23 | #include <drm/drm_fb_cma_helper.h> |
| 24 | #include <drm/drm_plane_helper.h> | 24 | #include <drm/drm_plane_helper.h> |
| 25 | #include <drm/drm_atomic_uapi.h> | ||
| 25 | 26 | ||
| 26 | #include "uapi/drm/vc4_drm.h" | 27 | #include "uapi/drm/vc4_drm.h" |
| 27 | #include "vc4_drv.h" | 28 | #include "vc4_drv.h" |
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 9f1e0a669d4c..0379d6897659 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c | |||
| @@ -75,12 +75,9 @@ virtio_gpu_framebuffer_init(struct drm_device *dev, | |||
| 75 | struct drm_gem_object *obj) | 75 | struct drm_gem_object *obj) |
| 76 | { | 76 | { |
| 77 | int ret; | 77 | int ret; |
| 78 | struct virtio_gpu_object *bo; | ||
| 79 | 78 | ||
| 80 | vgfb->base.obj[0] = obj; | 79 | vgfb->base.obj[0] = obj; |
| 81 | 80 | ||
| 82 | bo = gem_to_virtio_gpu_obj(obj); | ||
| 83 | |||
| 84 | drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); | 81 | drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); |
| 85 | 82 | ||
| 86 | ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); | 83 | ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); |
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 68db42f15086..0a2745646dfa 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c | |||
| @@ -1,36 +1,143 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include "vkms_drv.h" | 2 | #include "vkms_drv.h" |
| 3 | #include <linux/crc32.h> | 3 | #include <linux/crc32.h> |
| 4 | #include <drm/drm_atomic.h> | ||
| 5 | #include <drm/drm_atomic_helper.h> | ||
| 4 | #include <drm/drm_gem_framebuffer_helper.h> | 6 | #include <drm/drm_gem_framebuffer_helper.h> |
| 5 | 7 | ||
| 6 | static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data) | 8 | /** |
| 9 | * compute_crc - Compute CRC value on output frame | ||
| 10 | * | ||
| 11 | * @vaddr_out: address to final framebuffer | ||
| 12 | * @crc_out: framebuffer's metadata | ||
| 13 | * | ||
| 14 | * returns CRC value computed using crc32 on the visible portion of | ||
| 15 | * the final framebuffer at vaddr_out | ||
| 16 | */ | ||
| 17 | static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out) | ||
| 18 | { | ||
| 19 | int i, j, src_offset; | ||
| 20 | int x_src = crc_out->src.x1 >> 16; | ||
| 21 | int y_src = crc_out->src.y1 >> 16; | ||
| 22 | int h_src = drm_rect_height(&crc_out->src) >> 16; | ||
| 23 | int w_src = drm_rect_width(&crc_out->src) >> 16; | ||
| 24 | u32 crc = 0; | ||
| 25 | |||
| 26 | for (i = y_src; i < y_src + h_src; ++i) { | ||
| 27 | for (j = x_src; j < x_src + w_src; ++j) { | ||
| 28 | src_offset = crc_out->offset | ||
| 29 | + (i * crc_out->pitch) | ||
| 30 | + (j * crc_out->cpp); | ||
| 31 | /* XRGB format ignores Alpha channel */ | ||
| 32 | memset(vaddr_out + src_offset + 24, 0, 8); | ||
| 33 | crc = crc32_le(crc, vaddr_out + src_offset, | ||
| 34 | sizeof(u32)); | ||
| 35 | } | ||
| 36 | } | ||
| 37 | |||
| 38 | return crc; | ||
| 39 | } | ||
| 40 | |||
| 41 | /** | ||
| 42 | * blend - belnd value at vaddr_src with value at vaddr_dst | ||
| 43 | * @vaddr_dst: destination address | ||
| 44 | * @vaddr_src: source address | ||
| 45 | * @crc_dst: destination framebuffer's metadata | ||
| 46 | * @crc_src: source framebuffer's metadata | ||
| 47 | * | ||
| 48 | * Blend value at vaddr_src with value at vaddr_dst. | ||
| 49 | * Currently, this function write value at vaddr_src on value | ||
| 50 | * at vaddr_dst using buffer's metadata to locate the new values | ||
| 51 | * from vaddr_src and their distenation at vaddr_dst. | ||
| 52 | * | ||
| 53 | * Todo: Use the alpha value to blend vaddr_src with vaddr_dst | ||
| 54 | * instead of overwriting it. | ||
| 55 | */ | ||
| 56 | static void blend(void *vaddr_dst, void *vaddr_src, | ||
| 57 | struct vkms_crc_data *crc_dst, | ||
| 58 | struct vkms_crc_data *crc_src) | ||
| 7 | { | 59 | { |
| 8 | struct drm_framebuffer *fb = &crc_data->fb; | 60 | int i, j, j_dst, i_dst; |
| 61 | int offset_src, offset_dst; | ||
| 62 | |||
| 63 | int x_src = crc_src->src.x1 >> 16; | ||
| 64 | int y_src = crc_src->src.y1 >> 16; | ||
| 65 | |||
| 66 | int x_dst = crc_src->dst.x1; | ||
| 67 | int y_dst = crc_src->dst.y1; | ||
| 68 | int h_dst = drm_rect_height(&crc_src->dst); | ||
| 69 | int w_dst = drm_rect_width(&crc_src->dst); | ||
| 70 | |||
| 71 | int y_limit = y_src + h_dst; | ||
| 72 | int x_limit = x_src + w_dst; | ||
| 73 | |||
| 74 | for (i = y_src, i_dst = y_dst; i < y_limit; ++i) { | ||
| 75 | for (j = x_src, j_dst = x_dst; j < x_limit; ++j) { | ||
| 76 | offset_dst = crc_dst->offset | ||
| 77 | + (i_dst * crc_dst->pitch) | ||
| 78 | + (j_dst++ * crc_dst->cpp); | ||
| 79 | offset_src = crc_src->offset | ||
| 80 | + (i * crc_src->pitch) | ||
| 81 | + (j * crc_src->cpp); | ||
| 82 | |||
| 83 | memcpy(vaddr_dst + offset_dst, | ||
| 84 | vaddr_src + offset_src, sizeof(u32)); | ||
| 85 | } | ||
| 86 | i_dst++; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | static void compose_cursor(struct vkms_crc_data *cursor_crc, | ||
| 91 | struct vkms_crc_data *primary_crc, void *vaddr_out) | ||
| 92 | { | ||
| 93 | struct drm_gem_object *cursor_obj; | ||
| 94 | struct vkms_gem_object *cursor_vkms_obj; | ||
| 95 | |||
| 96 | cursor_obj = drm_gem_fb_get_obj(&cursor_crc->fb, 0); | ||
| 97 | cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj); | ||
| 98 | |||
| 99 | mutex_lock(&cursor_vkms_obj->pages_lock); | ||
| 100 | if (!cursor_vkms_obj->vaddr) { | ||
| 101 | DRM_WARN("cursor plane vaddr is NULL"); | ||
| 102 | goto out; | ||
| 103 | } | ||
| 104 | |||
| 105 | blend(vaddr_out, cursor_vkms_obj->vaddr, primary_crc, cursor_crc); | ||
| 106 | |||
| 107 | out: | ||
| 108 | mutex_unlock(&cursor_vkms_obj->pages_lock); | ||
| 109 | } | ||
| 110 | |||
| 111 | static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc, | ||
| 112 | struct vkms_crc_data *cursor_crc) | ||
| 113 | { | ||
| 114 | struct drm_framebuffer *fb = &primary_crc->fb; | ||
| 9 | struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); | 115 | struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); |
| 10 | struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); | 116 | struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); |
| 117 | void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL); | ||
| 11 | u32 crc = 0; | 118 | u32 crc = 0; |
| 12 | int i = 0; | ||
| 13 | unsigned int x = crc_data->src.x1 >> 16; | ||
| 14 | unsigned int y = crc_data->src.y1 >> 16; | ||
| 15 | unsigned int height = drm_rect_height(&crc_data->src) >> 16; | ||
| 16 | unsigned int width = drm_rect_width(&crc_data->src) >> 16; | ||
| 17 | unsigned int cpp = fb->format->cpp[0]; | ||
| 18 | unsigned int src_offset; | ||
| 19 | unsigned int size_byte = width * cpp; | ||
| 20 | void *vaddr; | ||
| 21 | 119 | ||
| 22 | mutex_lock(&vkms_obj->pages_lock); | 120 | if (!vaddr_out) { |
| 23 | vaddr = vkms_obj->vaddr; | 121 | DRM_ERROR("Failed to allocate memory for output frame."); |
| 24 | if (WARN_ON(!vaddr)) | 122 | return 0; |
| 25 | goto out; | 123 | } |
| 26 | 124 | ||
| 27 | for (i = y; i < y + height; i++) { | 125 | mutex_lock(&vkms_obj->pages_lock); |
| 28 | src_offset = fb->offsets[0] + (i * fb->pitches[0]) + (x * cpp); | 126 | if (WARN_ON(!vkms_obj->vaddr)) { |
| 29 | crc = crc32_le(crc, vaddr + src_offset, size_byte); | 127 | mutex_unlock(&vkms_obj->pages_lock); |
| 128 | return crc; | ||
| 30 | } | 129 | } |
| 31 | 130 | ||
| 32 | out: | 131 | memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); |
| 33 | mutex_unlock(&vkms_obj->pages_lock); | 132 | mutex_unlock(&vkms_obj->pages_lock); |
| 133 | |||
| 134 | if (cursor_crc) | ||
| 135 | compose_cursor(cursor_crc, primary_crc, vaddr_out); | ||
| 136 | |||
| 137 | crc = compute_crc(vaddr_out, primary_crc); | ||
| 138 | |||
| 139 | kfree(vaddr_out); | ||
| 140 | |||
| 34 | return crc; | 141 | return crc; |
| 35 | } | 142 | } |
| 36 | 143 | ||
| @@ -53,6 +160,7 @@ void vkms_crc_work_handle(struct work_struct *work) | |||
| 53 | struct vkms_device *vdev = container_of(out, struct vkms_device, | 160 | struct vkms_device *vdev = container_of(out, struct vkms_device, |
| 54 | output); | 161 | output); |
| 55 | struct vkms_crc_data *primary_crc = NULL; | 162 | struct vkms_crc_data *primary_crc = NULL; |
| 163 | struct vkms_crc_data *cursor_crc = NULL; | ||
| 56 | struct drm_plane *plane; | 164 | struct drm_plane *plane; |
| 57 | u32 crc32 = 0; | 165 | u32 crc32 = 0; |
| 58 | u64 frame_start, frame_end; | 166 | u64 frame_start, frame_end; |
| @@ -77,14 +185,14 @@ void vkms_crc_work_handle(struct work_struct *work) | |||
| 77 | if (drm_framebuffer_read_refcount(&crc_data->fb) == 0) | 185 | if (drm_framebuffer_read_refcount(&crc_data->fb) == 0) |
| 78 | continue; | 186 | continue; |
| 79 | 187 | ||
| 80 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) { | 188 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) |
| 81 | primary_crc = crc_data; | 189 | primary_crc = crc_data; |
| 82 | break; | 190 | else |
| 83 | } | 191 | cursor_crc = crc_data; |
| 84 | } | 192 | } |
| 85 | 193 | ||
| 86 | if (primary_crc) | 194 | if (primary_crc) |
| 87 | crc32 = _vkms_get_crc(primary_crc); | 195 | crc32 = _vkms_get_crc(primary_crc, cursor_crc); |
| 88 | 196 | ||
| 89 | frame_end = drm_crtc_accurate_vblank_count(crtc); | 197 | frame_end = drm_crtc_accurate_vblank_count(crtc); |
| 90 | 198 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index bd9d4b2389bd..07cfde1b4132 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c | |||
| @@ -5,6 +5,15 @@ | |||
| 5 | * (at your option) any later version. | 5 | * (at your option) any later version. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | /** | ||
| 9 | * DOC: vkms (Virtual Kernel Modesetting) | ||
| 10 | * | ||
| 11 | * vkms is a software-only model of a kms driver that is useful for testing, | ||
| 12 | * or for running X (or similar) on headless machines and be able to still | ||
| 13 | * use the GPU. vkms aims to enable a virtual display without the need for | ||
| 14 | * a hardware display capability. | ||
| 15 | */ | ||
| 16 | |||
| 8 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 9 | #include <drm/drm_gem.h> | 18 | #include <drm/drm_gem.h> |
| 10 | #include <drm/drm_crtc_helper.h> | 19 | #include <drm/drm_crtc_helper.h> |
| @@ -21,6 +30,10 @@ | |||
| 21 | 30 | ||
| 22 | static struct vkms_device *vkms_device; | 31 | static struct vkms_device *vkms_device; |
| 23 | 32 | ||
| 33 | bool enable_cursor; | ||
| 34 | module_param_named(enable_cursor, enable_cursor, bool, 0444); | ||
| 35 | MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support"); | ||
| 36 | |||
| 24 | static const struct file_operations vkms_driver_fops = { | 37 | static const struct file_operations vkms_driver_fops = { |
| 25 | .owner = THIS_MODULE, | 38 | .owner = THIS_MODULE, |
| 26 | .open = drm_open, | 39 | .open = drm_open, |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 80af6d3a65e7..1c93990693e3 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h | |||
| @@ -7,8 +7,8 @@ | |||
| 7 | #include <drm/drm_encoder.h> | 7 | #include <drm/drm_encoder.h> |
| 8 | #include <linux/hrtimer.h> | 8 | #include <linux/hrtimer.h> |
| 9 | 9 | ||
| 10 | #define XRES_MIN 32 | 10 | #define XRES_MIN 20 |
| 11 | #define YRES_MIN 32 | 11 | #define YRES_MIN 20 |
| 12 | 12 | ||
| 13 | #define XRES_DEF 1024 | 13 | #define XRES_DEF 1024 |
| 14 | #define YRES_DEF 768 | 14 | #define YRES_DEF 768 |
| @@ -16,13 +16,22 @@ | |||
| 16 | #define XRES_MAX 8192 | 16 | #define XRES_MAX 8192 |
| 17 | #define YRES_MAX 8192 | 17 | #define YRES_MAX 8192 |
| 18 | 18 | ||
| 19 | extern bool enable_cursor; | ||
| 20 | |||
| 19 | static const u32 vkms_formats[] = { | 21 | static const u32 vkms_formats[] = { |
| 20 | DRM_FORMAT_XRGB8888, | 22 | DRM_FORMAT_XRGB8888, |
| 21 | }; | 23 | }; |
| 22 | 24 | ||
| 25 | static const u32 vkms_cursor_formats[] = { | ||
| 26 | DRM_FORMAT_ARGB8888, | ||
| 27 | }; | ||
| 28 | |||
| 23 | struct vkms_crc_data { | 29 | struct vkms_crc_data { |
| 24 | struct drm_rect src; | ||
| 25 | struct drm_framebuffer fb; | 30 | struct drm_framebuffer fb; |
| 31 | struct drm_rect src, dst; | ||
| 32 | unsigned int offset; | ||
| 33 | unsigned int pitch; | ||
| 34 | unsigned int cpp; | ||
| 26 | }; | 35 | }; |
| 27 | 36 | ||
| 28 | /** | 37 | /** |
| @@ -104,7 +113,8 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, | |||
| 104 | 113 | ||
| 105 | int vkms_output_init(struct vkms_device *vkmsdev); | 114 | int vkms_output_init(struct vkms_device *vkmsdev); |
| 106 | 115 | ||
| 107 | struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev); | 116 | struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, |
| 117 | enum drm_plane_type type); | ||
| 108 | 118 | ||
| 109 | /* Gem stuff */ | 119 | /* Gem stuff */ |
| 110 | struct drm_gem_object *vkms_gem_create(struct drm_device *dev, | 120 | struct drm_gem_object *vkms_gem_create(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 901012cb1af1..271a0eb9042c 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c | |||
| @@ -49,14 +49,22 @@ int vkms_output_init(struct vkms_device *vkmsdev) | |||
| 49 | struct drm_connector *connector = &output->connector; | 49 | struct drm_connector *connector = &output->connector; |
| 50 | struct drm_encoder *encoder = &output->encoder; | 50 | struct drm_encoder *encoder = &output->encoder; |
| 51 | struct drm_crtc *crtc = &output->crtc; | 51 | struct drm_crtc *crtc = &output->crtc; |
| 52 | struct drm_plane *primary; | 52 | struct drm_plane *primary, *cursor = NULL; |
| 53 | int ret; | 53 | int ret; |
| 54 | 54 | ||
| 55 | primary = vkms_plane_init(vkmsdev); | 55 | primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY); |
| 56 | if (IS_ERR(primary)) | 56 | if (IS_ERR(primary)) |
| 57 | return PTR_ERR(primary); | 57 | return PTR_ERR(primary); |
| 58 | 58 | ||
| 59 | ret = vkms_crtc_init(dev, crtc, primary, NULL); | 59 | if (enable_cursor) { |
| 60 | cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR); | ||
| 61 | if (IS_ERR(cursor)) { | ||
| 62 | ret = PTR_ERR(cursor); | ||
| 63 | goto err_cursor; | ||
| 64 | } | ||
| 65 | } | ||
| 66 | |||
| 67 | ret = vkms_crtc_init(dev, crtc, primary, cursor); | ||
| 60 | if (ret) | 68 | if (ret) |
| 61 | goto err_crtc; | 69 | goto err_crtc; |
| 62 | 70 | ||
| @@ -106,6 +114,11 @@ err_connector: | |||
| 106 | drm_crtc_cleanup(crtc); | 114 | drm_crtc_cleanup(crtc); |
| 107 | 115 | ||
| 108 | err_crtc: | 116 | err_crtc: |
| 117 | if (enable_cursor) | ||
| 118 | drm_plane_cleanup(cursor); | ||
| 119 | |||
| 120 | err_cursor: | ||
| 109 | drm_plane_cleanup(primary); | 121 | drm_plane_cleanup(primary); |
| 122 | |||
| 110 | return ret; | 123 | return ret; |
| 111 | } | 124 | } |
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index c91661631c76..7041007396ae 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
| @@ -81,26 +81,33 @@ static const struct drm_plane_funcs vkms_plane_funcs = { | |||
| 81 | .atomic_destroy_state = vkms_plane_destroy_state, | 81 | .atomic_destroy_state = vkms_plane_destroy_state, |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | static void vkms_primary_plane_update(struct drm_plane *plane, | 84 | static void vkms_plane_atomic_update(struct drm_plane *plane, |
| 85 | struct drm_plane_state *old_state) | 85 | struct drm_plane_state *old_state) |
| 86 | { | 86 | { |
| 87 | struct vkms_plane_state *vkms_plane_state; | 87 | struct vkms_plane_state *vkms_plane_state; |
| 88 | struct drm_framebuffer *fb = plane->state->fb; | ||
| 88 | struct vkms_crc_data *crc_data; | 89 | struct vkms_crc_data *crc_data; |
| 89 | 90 | ||
| 90 | if (!plane->state->crtc || !plane->state->fb) | 91 | if (!plane->state->crtc || !fb) |
| 91 | return; | 92 | return; |
| 92 | 93 | ||
| 93 | vkms_plane_state = to_vkms_plane_state(plane->state); | 94 | vkms_plane_state = to_vkms_plane_state(plane->state); |
| 95 | |||
| 94 | crc_data = vkms_plane_state->crc_data; | 96 | crc_data = vkms_plane_state->crc_data; |
| 95 | memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect)); | 97 | memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect)); |
| 96 | memcpy(&crc_data->fb, plane->state->fb, sizeof(struct drm_framebuffer)); | 98 | memcpy(&crc_data->dst, &plane->state->dst, sizeof(struct drm_rect)); |
| 99 | memcpy(&crc_data->fb, fb, sizeof(struct drm_framebuffer)); | ||
| 97 | drm_framebuffer_get(&crc_data->fb); | 100 | drm_framebuffer_get(&crc_data->fb); |
| 101 | crc_data->offset = fb->offsets[0]; | ||
| 102 | crc_data->pitch = fb->pitches[0]; | ||
| 103 | crc_data->cpp = fb->format->cpp[0]; | ||
| 98 | } | 104 | } |
| 99 | 105 | ||
| 100 | static int vkms_plane_atomic_check(struct drm_plane *plane, | 106 | static int vkms_plane_atomic_check(struct drm_plane *plane, |
| 101 | struct drm_plane_state *state) | 107 | struct drm_plane_state *state) |
| 102 | { | 108 | { |
| 103 | struct drm_crtc_state *crtc_state; | 109 | struct drm_crtc_state *crtc_state; |
| 110 | bool can_position = false; | ||
| 104 | int ret; | 111 | int ret; |
| 105 | 112 | ||
| 106 | if (!state->fb | !state->crtc) | 113 | if (!state->fb | !state->crtc) |
| @@ -110,15 +117,18 @@ static int vkms_plane_atomic_check(struct drm_plane *plane, | |||
| 110 | if (IS_ERR(crtc_state)) | 117 | if (IS_ERR(crtc_state)) |
| 111 | return PTR_ERR(crtc_state); | 118 | return PTR_ERR(crtc_state); |
| 112 | 119 | ||
| 120 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
| 121 | can_position = true; | ||
| 122 | |||
| 113 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, | 123 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, |
| 114 | DRM_PLANE_HELPER_NO_SCALING, | 124 | DRM_PLANE_HELPER_NO_SCALING, |
| 115 | DRM_PLANE_HELPER_NO_SCALING, | 125 | DRM_PLANE_HELPER_NO_SCALING, |
| 116 | false, true); | 126 | can_position, true); |
| 117 | if (ret != 0) | 127 | if (ret != 0) |
| 118 | return ret; | 128 | return ret; |
| 119 | 129 | ||
| 120 | /* for now primary plane must be visible and full screen */ | 130 | /* for now primary plane must be visible and full screen */ |
| 121 | if (!state->visible) | 131 | if (!state->visible && !can_position) |
| 122 | return -EINVAL; | 132 | return -EINVAL; |
| 123 | 133 | ||
| 124 | return 0; | 134 | return 0; |
| @@ -156,15 +166,17 @@ static void vkms_cleanup_fb(struct drm_plane *plane, | |||
| 156 | } | 166 | } |
| 157 | 167 | ||
| 158 | static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { | 168 | static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { |
| 159 | .atomic_update = vkms_primary_plane_update, | 169 | .atomic_update = vkms_plane_atomic_update, |
| 160 | .atomic_check = vkms_plane_atomic_check, | 170 | .atomic_check = vkms_plane_atomic_check, |
| 161 | .prepare_fb = vkms_prepare_fb, | 171 | .prepare_fb = vkms_prepare_fb, |
| 162 | .cleanup_fb = vkms_cleanup_fb, | 172 | .cleanup_fb = vkms_cleanup_fb, |
| 163 | }; | 173 | }; |
| 164 | 174 | ||
| 165 | struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev) | 175 | struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, |
| 176 | enum drm_plane_type type) | ||
| 166 | { | 177 | { |
| 167 | struct drm_device *dev = &vkmsdev->drm; | 178 | struct drm_device *dev = &vkmsdev->drm; |
| 179 | const struct drm_plane_helper_funcs *funcs; | ||
| 168 | struct drm_plane *plane; | 180 | struct drm_plane *plane; |
| 169 | const u32 *formats; | 181 | const u32 *formats; |
| 170 | int ret, nformats; | 182 | int ret, nformats; |
| @@ -173,19 +185,26 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev) | |||
| 173 | if (!plane) | 185 | if (!plane) |
| 174 | return ERR_PTR(-ENOMEM); | 186 | return ERR_PTR(-ENOMEM); |
| 175 | 187 | ||
| 176 | formats = vkms_formats; | 188 | if (type == DRM_PLANE_TYPE_CURSOR) { |
| 177 | nformats = ARRAY_SIZE(vkms_formats); | 189 | formats = vkms_cursor_formats; |
| 190 | nformats = ARRAY_SIZE(vkms_cursor_formats); | ||
| 191 | funcs = &vkms_primary_helper_funcs; | ||
| 192 | } else { | ||
| 193 | formats = vkms_formats; | ||
| 194 | nformats = ARRAY_SIZE(vkms_formats); | ||
| 195 | funcs = &vkms_primary_helper_funcs; | ||
| 196 | } | ||
| 178 | 197 | ||
| 179 | ret = drm_universal_plane_init(dev, plane, 0, | 198 | ret = drm_universal_plane_init(dev, plane, 0, |
| 180 | &vkms_plane_funcs, | 199 | &vkms_plane_funcs, |
| 181 | formats, nformats, | 200 | formats, nformats, |
| 182 | NULL, DRM_PLANE_TYPE_PRIMARY, NULL); | 201 | NULL, type, NULL); |
| 183 | if (ret) { | 202 | if (ret) { |
| 184 | kfree(plane); | 203 | kfree(plane); |
| 185 | return ERR_PTR(ret); | 204 | return ERR_PTR(ret); |
| 186 | } | 205 | } |
| 187 | 206 | ||
| 188 | drm_plane_helper_add(plane, &vkms_primary_helper_funcs); | 207 | drm_plane_helper_add(plane, funcs); |
| 189 | 208 | ||
| 190 | return plane; | 209 | return plane; |
| 191 | } | 210 | } |
