diff options
| author | Joonyoung Shim <jy0922.shim@samsung.com> | 2015-08-16 01:38:49 -0400 |
|---|---|---|
| committer | Inki Dae <inki.dae@samsung.com> | 2015-08-16 01:39:32 -0400 |
| commit | 2a8cb48945408984cd04c850b293f467b32ec5af (patch) | |
| tree | 28e936ef468bdecd33be038de59a64e8c5f94b51 /drivers | |
| parent | 01ed50ddbd6f7b4fafcf366994949d5a1a8356c0 (diff) | |
drm/exynos: merge exynos_drm_buf.c to exynos_drm_gem.c
The struct exynos_drm_gem_obj can have fields of the struct
exynos_drm_gem_buf then don't need to use exynos_drm_buf.c file.
Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/gpu/drm/exynos/Makefile | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_buf.c | 170 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_buf.h | 33 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fb.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fb.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 30 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 209 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.h | 40 | ||||
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_plane.c | 10 |
9 files changed, 180 insertions, 334 deletions
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 6916b22a7382..02aecfed6354 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos | 5 | ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos |
| 6 | exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ | 6 | exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ |
| 7 | exynos_drm_fb.o exynos_drm_buf.o exynos_drm_gem.o \ | 7 | exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ |
| 8 | exynos_drm_core.o exynos_drm_plane.o | 8 | exynos_drm_plane.o |
| 9 | 9 | ||
| 10 | exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o | 10 | exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o |
| 11 | exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o | 11 | exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c deleted file mode 100644 index 9260dfb3b7e5..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ /dev/null | |||
| @@ -1,170 +0,0 @@ | |||
| 1 | /* exynos_drm_buf.c | ||
| 2 | * | ||
| 3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
| 4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 9 | * option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <drm/drmP.h> | ||
| 13 | #include <drm/exynos_drm.h> | ||
| 14 | |||
| 15 | #include "exynos_drm_drv.h" | ||
| 16 | #include "exynos_drm_gem.h" | ||
| 17 | #include "exynos_drm_buf.h" | ||
| 18 | #include "exynos_drm_iommu.h" | ||
| 19 | |||
| 20 | static int lowlevel_buffer_allocate(struct drm_device *dev, | ||
| 21 | unsigned int flags, struct exynos_drm_gem_buf *buf) | ||
| 22 | { | ||
| 23 | int ret = 0; | ||
| 24 | enum dma_attr attr; | ||
| 25 | unsigned int nr_pages; | ||
| 26 | |||
| 27 | if (buf->dma_addr) { | ||
| 28 | DRM_DEBUG_KMS("already allocated.\n"); | ||
| 29 | return 0; | ||
| 30 | } | ||
| 31 | |||
| 32 | init_dma_attrs(&buf->dma_attrs); | ||
| 33 | |||
| 34 | /* | ||
| 35 | * if EXYNOS_BO_CONTIG, fully physically contiguous memory | ||
| 36 | * region will be allocated else physically contiguous | ||
| 37 | * as possible. | ||
| 38 | */ | ||
| 39 | if (!(flags & EXYNOS_BO_NONCONTIG)) | ||
| 40 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs); | ||
| 41 | |||
| 42 | /* | ||
| 43 | * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping | ||
| 44 | * else cachable mapping. | ||
| 45 | */ | ||
| 46 | if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE)) | ||
| 47 | attr = DMA_ATTR_WRITE_COMBINE; | ||
| 48 | else | ||
| 49 | attr = DMA_ATTR_NON_CONSISTENT; | ||
| 50 | |||
| 51 | dma_set_attr(attr, &buf->dma_attrs); | ||
| 52 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); | ||
| 53 | |||
| 54 | nr_pages = buf->size >> PAGE_SHIFT; | ||
| 55 | |||
| 56 | if (!is_drm_iommu_supported(dev)) { | ||
| 57 | dma_addr_t start_addr; | ||
| 58 | unsigned int i = 0; | ||
| 59 | |||
| 60 | buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); | ||
| 61 | if (!buf->pages) { | ||
| 62 | DRM_ERROR("failed to allocate pages.\n"); | ||
| 63 | return -ENOMEM; | ||
| 64 | } | ||
| 65 | |||
| 66 | buf->cookie = dma_alloc_attrs(dev->dev, | ||
| 67 | buf->size, | ||
| 68 | &buf->dma_addr, GFP_KERNEL, | ||
| 69 | &buf->dma_attrs); | ||
| 70 | if (!buf->cookie) { | ||
| 71 | DRM_ERROR("failed to allocate buffer.\n"); | ||
| 72 | ret = -ENOMEM; | ||
| 73 | goto err_free; | ||
| 74 | } | ||
| 75 | |||
| 76 | start_addr = buf->dma_addr; | ||
| 77 | while (i < nr_pages) { | ||
| 78 | buf->pages[i] = phys_to_page(start_addr); | ||
| 79 | start_addr += PAGE_SIZE; | ||
| 80 | i++; | ||
| 81 | } | ||
| 82 | } else { | ||
| 83 | |||
| 84 | buf->pages = dma_alloc_attrs(dev->dev, buf->size, | ||
| 85 | &buf->dma_addr, GFP_KERNEL, | ||
| 86 | &buf->dma_attrs); | ||
| 87 | if (!buf->pages) { | ||
| 88 | DRM_ERROR("failed to allocate buffer.\n"); | ||
| 89 | return -ENOMEM; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | ||
| 94 | (unsigned long)buf->dma_addr, | ||
| 95 | buf->size); | ||
| 96 | |||
| 97 | return ret; | ||
| 98 | |||
| 99 | err_free: | ||
| 100 | if (!is_drm_iommu_supported(dev)) | ||
| 101 | drm_free_large(buf->pages); | ||
| 102 | |||
| 103 | return ret; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void lowlevel_buffer_deallocate(struct drm_device *dev, | ||
| 107 | unsigned int flags, struct exynos_drm_gem_buf *buf) | ||
| 108 | { | ||
| 109 | if (!buf->dma_addr) { | ||
| 110 | DRM_DEBUG_KMS("dma_addr is invalid.\n"); | ||
| 111 | return; | ||
| 112 | } | ||
| 113 | |||
| 114 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | ||
| 115 | (unsigned long)buf->dma_addr, | ||
| 116 | buf->size); | ||
| 117 | |||
| 118 | if (!is_drm_iommu_supported(dev)) { | ||
| 119 | dma_free_attrs(dev->dev, buf->size, buf->cookie, | ||
| 120 | (dma_addr_t)buf->dma_addr, &buf->dma_attrs); | ||
| 121 | drm_free_large(buf->pages); | ||
| 122 | } else | ||
| 123 | dma_free_attrs(dev->dev, buf->size, buf->pages, | ||
| 124 | (dma_addr_t)buf->dma_addr, &buf->dma_attrs); | ||
| 125 | |||
| 126 | buf->dma_addr = (dma_addr_t)NULL; | ||
| 127 | } | ||
| 128 | |||
| 129 | struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, | ||
| 130 | unsigned int size) | ||
| 131 | { | ||
| 132 | struct exynos_drm_gem_buf *buffer; | ||
| 133 | |||
| 134 | DRM_DEBUG_KMS("desired size = 0x%x\n", size); | ||
| 135 | |||
| 136 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | ||
| 137 | if (!buffer) | ||
| 138 | return NULL; | ||
| 139 | |||
| 140 | buffer->size = size; | ||
| 141 | return buffer; | ||
| 142 | } | ||
| 143 | |||
| 144 | void exynos_drm_fini_buf(struct drm_device *dev, | ||
| 145 | struct exynos_drm_gem_buf *buffer) | ||
| 146 | { | ||
| 147 | kfree(buffer); | ||
| 148 | buffer = NULL; | ||
| 149 | } | ||
| 150 | |||
| 151 | int exynos_drm_alloc_buf(struct drm_device *dev, | ||
| 152 | struct exynos_drm_gem_buf *buf, unsigned int flags) | ||
| 153 | { | ||
| 154 | |||
| 155 | /* | ||
| 156 | * allocate memory region and set the memory information | ||
| 157 | * to vaddr and dma_addr of a buffer object. | ||
| 158 | */ | ||
| 159 | if (lowlevel_buffer_allocate(dev, flags, buf) < 0) | ||
| 160 | return -ENOMEM; | ||
| 161 | |||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | void exynos_drm_free_buf(struct drm_device *dev, | ||
| 166 | unsigned int flags, struct exynos_drm_gem_buf *buffer) | ||
| 167 | { | ||
| 168 | |||
| 169 | lowlevel_buffer_deallocate(dev, flags, buffer); | ||
| 170 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h deleted file mode 100644 index a6412f19673c..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ /dev/null | |||
| @@ -1,33 +0,0 @@ | |||
| 1 | /* exynos_drm_buf.h | ||
| 2 | * | ||
| 3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
| 4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 9 | * option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef _EXYNOS_DRM_BUF_H_ | ||
| 13 | #define _EXYNOS_DRM_BUF_H_ | ||
| 14 | |||
| 15 | /* create and initialize buffer object. */ | ||
| 16 | struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, | ||
| 17 | unsigned int size); | ||
| 18 | |||
| 19 | /* destroy buffer object. */ | ||
| 20 | void exynos_drm_fini_buf(struct drm_device *dev, | ||
| 21 | struct exynos_drm_gem_buf *buffer); | ||
| 22 | |||
| 23 | /* allocate physical memory region and setup sgt. */ | ||
| 24 | int exynos_drm_alloc_buf(struct drm_device *dev, | ||
| 25 | struct exynos_drm_gem_buf *buf, | ||
| 26 | unsigned int flags); | ||
| 27 | |||
| 28 | /* release physical memory region, and sgt. */ | ||
| 29 | void exynos_drm_free_buf(struct drm_device *dev, | ||
| 30 | unsigned int flags, | ||
| 31 | struct exynos_drm_gem_buf *buffer); | ||
| 32 | |||
| 33 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 2b6320e6eae2..9738f4e0c6eb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
| @@ -238,22 +238,22 @@ err_free: | |||
| 238 | return ERR_PTR(ret); | 238 | return ERR_PTR(ret); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, | 241 | struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, |
| 242 | int index) | 242 | int index) |
| 243 | { | 243 | { |
| 244 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 244 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); |
| 245 | struct exynos_drm_gem_buf *buffer; | 245 | struct exynos_drm_gem_obj *obj; |
| 246 | 246 | ||
| 247 | if (index >= MAX_FB_BUFFER) | 247 | if (index >= MAX_FB_BUFFER) |
| 248 | return NULL; | 248 | return NULL; |
| 249 | 249 | ||
| 250 | buffer = exynos_fb->exynos_gem_obj[index]->buffer; | 250 | obj = exynos_fb->exynos_gem_obj[index]; |
| 251 | if (!buffer) | 251 | if (!obj) |
| 252 | return NULL; | 252 | return NULL; |
| 253 | 253 | ||
| 254 | DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr); | 254 | DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr); |
| 255 | 255 | ||
| 256 | return buffer; | 256 | return obj; |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static void exynos_drm_output_poll_changed(struct drm_device *dev) | 259 | static void exynos_drm_output_poll_changed(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 517471b37566..1c9e27c32cd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h | |||
| @@ -19,8 +19,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev, | |||
| 19 | struct drm_mode_fb_cmd2 *mode_cmd, | 19 | struct drm_mode_fb_cmd2 *mode_cmd, |
| 20 | struct drm_gem_object *obj); | 20 | struct drm_gem_object *obj); |
| 21 | 21 | ||
| 22 | /* get memory information of a drm framebuffer */ | 22 | /* get gem object of a drm framebuffer */ |
| 23 | struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, | 23 | struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, |
| 24 | int index); | 24 | int index); |
| 25 | 25 | ||
| 26 | void exynos_drm_mode_config_init(struct drm_device *dev); | 26 | void exynos_drm_mode_config_init(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index dd64bc04ffbb..624595afbce0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
| @@ -40,8 +40,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, | |||
| 40 | { | 40 | { |
| 41 | struct drm_fb_helper *helper = info->par; | 41 | struct drm_fb_helper *helper = info->par; |
| 42 | struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); | 42 | struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); |
| 43 | struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; | 43 | struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj; |
| 44 | struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer; | ||
| 45 | unsigned long vm_size; | 44 | unsigned long vm_size; |
| 46 | int ret; | 45 | int ret; |
| 47 | 46 | ||
| @@ -49,11 +48,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info, | |||
| 49 | 48 | ||
| 50 | vm_size = vma->vm_end - vma->vm_start; | 49 | vm_size = vma->vm_end - vma->vm_start; |
| 51 | 50 | ||
| 52 | if (vm_size > buffer->size) | 51 | if (vm_size > obj->size) |
| 53 | return -EINVAL; | 52 | return -EINVAL; |
| 54 | 53 | ||
| 55 | ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages, | 54 | ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr, |
| 56 | buffer->dma_addr, buffer->size, &buffer->dma_attrs); | 55 | obj->size, &obj->dma_attrs); |
| 57 | if (ret < 0) { | 56 | if (ret < 0) { |
| 58 | DRM_ERROR("failed to mmap.\n"); | 57 | DRM_ERROR("failed to mmap.\n"); |
| 59 | return ret; | 58 | return ret; |
| @@ -80,7 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
| 80 | struct drm_framebuffer *fb) | 79 | struct drm_framebuffer *fb) |
| 81 | { | 80 | { |
| 82 | struct fb_info *fbi = helper->fbdev; | 81 | struct fb_info *fbi = helper->fbdev; |
| 83 | struct exynos_drm_gem_buf *buffer; | 82 | struct exynos_drm_gem_obj *obj; |
| 84 | unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); | 83 | unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); |
| 85 | unsigned int nr_pages; | 84 | unsigned int nr_pages; |
| 86 | unsigned long offset; | 85 | unsigned long offset; |
| @@ -89,18 +88,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
| 89 | drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); | 88 | drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); |
| 90 | 89 | ||
| 91 | /* RGB formats use only one buffer */ | 90 | /* RGB formats use only one buffer */ |
| 92 | buffer = exynos_drm_fb_buffer(fb, 0); | 91 | obj = exynos_drm_fb_gem_obj(fb, 0); |
| 93 | if (!buffer) { | 92 | if (!obj) { |
| 94 | DRM_DEBUG_KMS("buffer is null.\n"); | 93 | DRM_DEBUG_KMS("gem object is null.\n"); |
| 95 | return -EFAULT; | 94 | return -EFAULT; |
| 96 | } | 95 | } |
| 97 | 96 | ||
| 98 | nr_pages = buffer->size >> PAGE_SHIFT; | 97 | nr_pages = obj->size >> PAGE_SHIFT; |
| 99 | 98 | ||
| 100 | buffer->kvaddr = (void __iomem *) vmap(buffer->pages, | 99 | obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP, |
| 101 | nr_pages, VM_MAP, | ||
| 102 | pgprot_writecombine(PAGE_KERNEL)); | 100 | pgprot_writecombine(PAGE_KERNEL)); |
| 103 | if (!buffer->kvaddr) { | 101 | if (!obj->kvaddr) { |
| 104 | DRM_ERROR("failed to map pages to kernel space.\n"); | 102 | DRM_ERROR("failed to map pages to kernel space.\n"); |
| 105 | return -EIO; | 103 | return -EIO; |
| 106 | } | 104 | } |
| @@ -111,7 +109,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
| 111 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); | 109 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); |
| 112 | offset += fbi->var.yoffset * fb->pitches[0]; | 110 | offset += fbi->var.yoffset * fb->pitches[0]; |
| 113 | 111 | ||
| 114 | fbi->screen_base = buffer->kvaddr + offset; | 112 | fbi->screen_base = obj->kvaddr + offset; |
| 115 | fbi->screen_size = size; | 113 | fbi->screen_size = size; |
| 116 | fbi->fix.smem_len = size; | 114 | fbi->fix.smem_len = size; |
| 117 | 115 | ||
| @@ -290,8 +288,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, | |||
| 290 | struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; | 288 | struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; |
| 291 | struct drm_framebuffer *fb; | 289 | struct drm_framebuffer *fb; |
| 292 | 290 | ||
| 293 | if (exynos_gem_obj->buffer->kvaddr) | 291 | if (exynos_gem_obj->kvaddr) |
| 294 | vunmap(exynos_gem_obj->buffer->kvaddr); | 292 | vunmap(exynos_gem_obj->kvaddr); |
| 295 | 293 | ||
| 296 | /* release drm framebuffer and real buffer */ | 294 | /* release drm framebuffer and real buffer */ |
| 297 | if (fb_helper->fb && fb_helper->fb->funcs) { | 295 | if (fb_helper->fb && fb_helper->fb->funcs) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 768a632dfa2a..67461b77f040 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
| @@ -18,9 +18,109 @@ | |||
| 18 | 18 | ||
| 19 | #include "exynos_drm_drv.h" | 19 | #include "exynos_drm_drv.h" |
| 20 | #include "exynos_drm_gem.h" | 20 | #include "exynos_drm_gem.h" |
| 21 | #include "exynos_drm_buf.h" | ||
| 22 | #include "exynos_drm_iommu.h" | 21 | #include "exynos_drm_iommu.h" |
| 23 | 22 | ||
| 23 | static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) | ||
| 24 | { | ||
| 25 | struct drm_device *dev = obj->base.dev; | ||
| 26 | enum dma_attr attr; | ||
| 27 | unsigned int nr_pages; | ||
| 28 | |||
| 29 | if (obj->dma_addr) { | ||
| 30 | DRM_DEBUG_KMS("already allocated.\n"); | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | init_dma_attrs(&obj->dma_attrs); | ||
| 35 | |||
| 36 | /* | ||
| 37 | * if EXYNOS_BO_CONTIG, fully physically contiguous memory | ||
| 38 | * region will be allocated else physically contiguous | ||
| 39 | * as possible. | ||
| 40 | */ | ||
| 41 | if (!(obj->flags & EXYNOS_BO_NONCONTIG)) | ||
| 42 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs); | ||
| 43 | |||
| 44 | /* | ||
| 45 | * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping | ||
| 46 | * else cachable mapping. | ||
| 47 | */ | ||
| 48 | if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE)) | ||
| 49 | attr = DMA_ATTR_WRITE_COMBINE; | ||
| 50 | else | ||
| 51 | attr = DMA_ATTR_NON_CONSISTENT; | ||
| 52 | |||
| 53 | dma_set_attr(attr, &obj->dma_attrs); | ||
| 54 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs); | ||
| 55 | |||
| 56 | nr_pages = obj->size >> PAGE_SHIFT; | ||
| 57 | |||
| 58 | if (!is_drm_iommu_supported(dev)) { | ||
| 59 | dma_addr_t start_addr; | ||
| 60 | unsigned int i = 0; | ||
| 61 | |||
| 62 | obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); | ||
| 63 | if (!obj->pages) { | ||
| 64 | DRM_ERROR("failed to allocate pages.\n"); | ||
| 65 | return -ENOMEM; | ||
| 66 | } | ||
| 67 | |||
| 68 | obj->cookie = dma_alloc_attrs(dev->dev, | ||
| 69 | obj->size, | ||
| 70 | &obj->dma_addr, GFP_KERNEL, | ||
| 71 | &obj->dma_attrs); | ||
| 72 | if (!obj->cookie) { | ||
| 73 | DRM_ERROR("failed to allocate buffer.\n"); | ||
| 74 | drm_free_large(obj->pages); | ||
| 75 | return -ENOMEM; | ||
| 76 | } | ||
| 77 | |||
| 78 | start_addr = obj->dma_addr; | ||
| 79 | while (i < nr_pages) { | ||
| 80 | obj->pages[i] = phys_to_page(start_addr); | ||
| 81 | start_addr += PAGE_SIZE; | ||
| 82 | i++; | ||
| 83 | } | ||
| 84 | } else { | ||
| 85 | obj->pages = dma_alloc_attrs(dev->dev, obj->size, | ||
| 86 | &obj->dma_addr, GFP_KERNEL, | ||
| 87 | &obj->dma_attrs); | ||
| 88 | if (!obj->pages) { | ||
| 89 | DRM_ERROR("failed to allocate buffer.\n"); | ||
| 90 | return -ENOMEM; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | ||
| 95 | (unsigned long)obj->dma_addr, | ||
| 96 | obj->size); | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) | ||
| 102 | { | ||
| 103 | struct drm_device *dev = obj->base.dev; | ||
| 104 | |||
| 105 | if (!obj->dma_addr) { | ||
| 106 | DRM_DEBUG_KMS("dma_addr is invalid.\n"); | ||
| 107 | return; | ||
| 108 | } | ||
| 109 | |||
| 110 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | ||
| 111 | (unsigned long)obj->dma_addr, obj->size); | ||
| 112 | |||
| 113 | if (!is_drm_iommu_supported(dev)) { | ||
| 114 | dma_free_attrs(dev->dev, obj->size, obj->cookie, | ||
| 115 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
| 116 | drm_free_large(obj->pages); | ||
| 117 | } else | ||
| 118 | dma_free_attrs(dev->dev, obj->size, obj->pages, | ||
| 119 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
| 120 | |||
| 121 | obj->dma_addr = (dma_addr_t)NULL; | ||
| 122 | } | ||
| 123 | |||
| 24 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | 124 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, |
| 25 | struct drm_file *file_priv, | 125 | struct drm_file *file_priv, |
| 26 | unsigned int *handle) | 126 | unsigned int *handle) |
| @@ -45,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | |||
| 45 | 145 | ||
| 46 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | 146 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) |
| 47 | { | 147 | { |
| 48 | struct drm_gem_object *obj; | 148 | struct drm_gem_object *obj = &exynos_gem_obj->base; |
| 49 | struct exynos_drm_gem_buf *buf; | ||
| 50 | |||
| 51 | obj = &exynos_gem_obj->base; | ||
| 52 | buf = exynos_gem_obj->buffer; | ||
| 53 | 149 | ||
| 54 | DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); | 150 | DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); |
| 55 | 151 | ||
| @@ -62,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | |||
| 62 | if (obj->import_attach) | 158 | if (obj->import_attach) |
| 63 | goto out; | 159 | goto out; |
| 64 | 160 | ||
| 65 | exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); | 161 | exynos_drm_free_buf(exynos_gem_obj); |
| 66 | 162 | ||
| 67 | out: | 163 | out: |
| 68 | exynos_drm_fini_buf(obj->dev, buf); | ||
| 69 | exynos_gem_obj->buffer = NULL; | ||
| 70 | |||
| 71 | drm_gem_free_mmap_offset(obj); | 164 | drm_gem_free_mmap_offset(obj); |
| 72 | 165 | ||
| 73 | /* release file pointer to gem object. */ | 166 | /* release file pointer to gem object. */ |
| @@ -94,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | |||
| 94 | 187 | ||
| 95 | drm_gem_object_unreference_unlocked(obj); | 188 | drm_gem_object_unreference_unlocked(obj); |
| 96 | 189 | ||
| 97 | return exynos_gem_obj->buffer->size; | 190 | return exynos_gem_obj->size; |
| 98 | } | 191 | } |
| 99 | 192 | ||
| 100 | 193 | ||
| @@ -129,7 +222,6 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | |||
| 129 | unsigned long size) | 222 | unsigned long size) |
| 130 | { | 223 | { |
| 131 | struct exynos_drm_gem_obj *exynos_gem_obj; | 224 | struct exynos_drm_gem_obj *exynos_gem_obj; |
| 132 | struct exynos_drm_gem_buf *buf; | ||
| 133 | int ret; | 225 | int ret; |
| 134 | 226 | ||
| 135 | if (flags & ~(EXYNOS_BO_MASK)) { | 227 | if (flags & ~(EXYNOS_BO_MASK)) { |
| @@ -144,33 +236,21 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | |||
| 144 | 236 | ||
| 145 | size = roundup(size, PAGE_SIZE); | 237 | size = roundup(size, PAGE_SIZE); |
| 146 | 238 | ||
| 147 | buf = exynos_drm_init_buf(dev, size); | ||
| 148 | if (!buf) | ||
| 149 | return ERR_PTR(-ENOMEM); | ||
| 150 | |||
| 151 | exynos_gem_obj = exynos_drm_gem_init(dev, size); | 239 | exynos_gem_obj = exynos_drm_gem_init(dev, size); |
| 152 | if (IS_ERR(exynos_gem_obj)) { | 240 | if (IS_ERR(exynos_gem_obj)) |
| 153 | ret = PTR_ERR(exynos_gem_obj); | 241 | return exynos_gem_obj; |
| 154 | goto err_fini_buf; | ||
| 155 | } | ||
| 156 | |||
| 157 | exynos_gem_obj->buffer = buf; | ||
| 158 | 242 | ||
| 159 | /* set memory type and cache attribute from user side. */ | 243 | /* set memory type and cache attribute from user side. */ |
| 160 | exynos_gem_obj->flags = flags; | 244 | exynos_gem_obj->flags = flags; |
| 161 | 245 | ||
| 162 | ret = exynos_drm_alloc_buf(dev, buf, flags); | 246 | ret = exynos_drm_alloc_buf(exynos_gem_obj); |
| 163 | if (ret < 0) | 247 | if (ret < 0) { |
| 164 | goto err_gem_fini; | 248 | drm_gem_object_release(&exynos_gem_obj->base); |
| 249 | kfree(exynos_gem_obj); | ||
| 250 | return ERR_PTR(ret); | ||
| 251 | } | ||
| 165 | 252 | ||
| 166 | return exynos_gem_obj; | 253 | return exynos_gem_obj; |
| 167 | |||
| 168 | err_gem_fini: | ||
| 169 | drm_gem_object_release(&exynos_gem_obj->base); | ||
| 170 | kfree(exynos_gem_obj); | ||
| 171 | err_fini_buf: | ||
| 172 | exynos_drm_fini_buf(dev, buf); | ||
| 173 | return ERR_PTR(ret); | ||
| 174 | } | 254 | } |
| 175 | 255 | ||
| 176 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | 256 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, |
| @@ -209,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | |||
| 209 | 289 | ||
| 210 | exynos_gem_obj = to_exynos_gem_obj(obj); | 290 | exynos_gem_obj = to_exynos_gem_obj(obj); |
| 211 | 291 | ||
| 212 | return &exynos_gem_obj->buffer->dma_addr; | 292 | return &exynos_gem_obj->dma_addr; |
| 213 | } | 293 | } |
| 214 | 294 | ||
| 215 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | 295 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, |
| @@ -237,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | |||
| 237 | struct vm_area_struct *vma) | 317 | struct vm_area_struct *vma) |
| 238 | { | 318 | { |
| 239 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; | 319 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; |
| 240 | struct exynos_drm_gem_buf *buffer; | ||
| 241 | unsigned long vm_size; | 320 | unsigned long vm_size; |
| 242 | int ret; | 321 | int ret; |
| 243 | 322 | ||
| @@ -246,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | |||
| 246 | 325 | ||
| 247 | vm_size = vma->vm_end - vma->vm_start; | 326 | vm_size = vma->vm_end - vma->vm_start; |
| 248 | 327 | ||
| 249 | /* | ||
| 250 | * a buffer contains information to physically continuous memory | ||
| 251 | * allocated by user request or at framebuffer creation. | ||
| 252 | */ | ||
| 253 | buffer = exynos_gem_obj->buffer; | ||
| 254 | |||
| 255 | /* check if user-requested size is valid. */ | 328 | /* check if user-requested size is valid. */ |
| 256 | if (vm_size > buffer->size) | 329 | if (vm_size > exynos_gem_obj->size) |
| 257 | return -EINVAL; | 330 | return -EINVAL; |
| 258 | 331 | ||
| 259 | ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, | 332 | ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages, |
| 260 | buffer->dma_addr, buffer->size, | 333 | exynos_gem_obj->dma_addr, exynos_gem_obj->size, |
| 261 | &buffer->dma_attrs); | 334 | &exynos_gem_obj->dma_attrs); |
| 262 | if (ret < 0) { | 335 | if (ret < 0) { |
| 263 | DRM_ERROR("failed to mmap.\n"); | 336 | DRM_ERROR("failed to mmap.\n"); |
| 264 | return ret; | 337 | return ret; |
| @@ -418,12 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, | |||
| 418 | 491 | ||
| 419 | void exynos_drm_gem_free_object(struct drm_gem_object *obj) | 492 | void exynos_drm_gem_free_object(struct drm_gem_object *obj) |
| 420 | { | 493 | { |
| 421 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
| 422 | struct exynos_drm_gem_buf *buf; | ||
| 423 | |||
| 424 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
| 425 | buf = exynos_gem_obj->buffer; | ||
| 426 | |||
| 427 | exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); | 494 | exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); |
| 428 | } | 495 | } |
| 429 | 496 | ||
| @@ -508,7 +575,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 508 | { | 575 | { |
| 509 | struct drm_gem_object *obj = vma->vm_private_data; | 576 | struct drm_gem_object *obj = vma->vm_private_data; |
| 510 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 577 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
| 511 | struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; | ||
| 512 | unsigned long pfn; | 578 | unsigned long pfn; |
| 513 | pgoff_t page_offset; | 579 | pgoff_t page_offset; |
| 514 | int ret; | 580 | int ret; |
| @@ -516,13 +582,13 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 516 | page_offset = ((unsigned long)vmf->virtual_address - | 582 | page_offset = ((unsigned long)vmf->virtual_address - |
| 517 | vma->vm_start) >> PAGE_SHIFT; | 583 | vma->vm_start) >> PAGE_SHIFT; |
| 518 | 584 | ||
| 519 | if (page_offset >= (buf->size >> PAGE_SHIFT)) { | 585 | if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) { |
| 520 | DRM_ERROR("invalid page offset\n"); | 586 | DRM_ERROR("invalid page offset\n"); |
| 521 | ret = -EINVAL; | 587 | ret = -EINVAL; |
| 522 | goto out; | 588 | goto out; |
| 523 | } | 589 | } |
| 524 | 590 | ||
| 525 | pfn = page_to_pfn(buf->pages[page_offset]); | 591 | pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]); |
| 526 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | 592 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); |
| 527 | 593 | ||
| 528 | out: | 594 | out: |
| @@ -583,12 +649,11 @@ err_close_vm: | |||
| 583 | struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) | 649 | struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) |
| 584 | { | 650 | { |
| 585 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 651 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
| 586 | struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; | ||
| 587 | int npages; | 652 | int npages; |
| 588 | 653 | ||
| 589 | npages = buf->size >> PAGE_SHIFT; | 654 | npages = exynos_gem_obj->size >> PAGE_SHIFT; |
| 590 | 655 | ||
| 591 | return drm_prime_pages_to_sg(buf->pages, npages); | 656 | return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages); |
| 592 | } | 657 | } |
| 593 | 658 | ||
| 594 | struct drm_gem_object * | 659 | struct drm_gem_object * |
| @@ -597,34 +662,29 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, | |||
| 597 | struct sg_table *sgt) | 662 | struct sg_table *sgt) |
| 598 | { | 663 | { |
| 599 | struct exynos_drm_gem_obj *exynos_gem_obj; | 664 | struct exynos_drm_gem_obj *exynos_gem_obj; |
| 600 | struct exynos_drm_gem_buf *buf; | ||
| 601 | int npages; | 665 | int npages; |
| 602 | int ret; | 666 | int ret; |
| 603 | 667 | ||
| 604 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | 668 | exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size); |
| 605 | if (!buf) | 669 | if (IS_ERR(exynos_gem_obj)) { |
| 606 | return ERR_PTR(-ENOMEM); | 670 | ret = PTR_ERR(exynos_gem_obj); |
| 671 | goto err; | ||
| 672 | } | ||
| 607 | 673 | ||
| 608 | buf->size = attach->dmabuf->size; | 674 | exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl); |
| 609 | buf->dma_addr = sg_dma_address(sgt->sgl); | ||
| 610 | 675 | ||
| 611 | npages = buf->size >> PAGE_SHIFT; | 676 | npages = exynos_gem_obj->size >> PAGE_SHIFT; |
| 612 | buf->pages = drm_malloc_ab(npages, sizeof(struct page *)); | 677 | exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); |
| 613 | if (!buf->pages) { | 678 | if (!exynos_gem_obj->pages) { |
| 614 | ret = -ENOMEM; | 679 | ret = -ENOMEM; |
| 615 | goto err; | 680 | goto err; |
| 616 | } | 681 | } |
| 617 | 682 | ||
| 618 | ret = drm_prime_sg_to_page_addr_arrays(sgt, buf->pages, NULL, npages); | 683 | ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL, |
| 684 | npages); | ||
| 619 | if (ret < 0) | 685 | if (ret < 0) |
| 620 | goto err_free_large; | 686 | goto err_free_large; |
| 621 | 687 | ||
| 622 | exynos_gem_obj = exynos_drm_gem_init(dev, buf->size); | ||
| 623 | if (IS_ERR(exynos_gem_obj)) { | ||
| 624 | ret = PTR_ERR(exynos_gem_obj); | ||
| 625 | goto err; | ||
| 626 | } | ||
| 627 | |||
| 628 | if (sgt->nents == 1) { | 688 | if (sgt->nents == 1) { |
| 629 | /* always physically continuous memory if sgt->nents is 1. */ | 689 | /* always physically continuous memory if sgt->nents is 1. */ |
| 630 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; | 690 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; |
| @@ -641,9 +701,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, | |||
| 641 | return &exynos_gem_obj->base; | 701 | return &exynos_gem_obj->base; |
| 642 | 702 | ||
| 643 | err_free_large: | 703 | err_free_large: |
| 644 | drm_free_large(buf->pages); | 704 | drm_free_large(exynos_gem_obj->pages); |
| 645 | err: | 705 | err: |
| 646 | kfree(buf); | 706 | drm_gem_object_release(&exynos_gem_obj->base); |
| 707 | kfree(exynos_gem_obj); | ||
| 647 | return ERR_PTR(ret); | 708 | return ERR_PTR(ret); |
| 648 | } | 709 | } |
| 649 | 710 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 5e20da6e094e..cd62f8410d1e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
| @@ -20,26 +20,6 @@ | |||
| 20 | #define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) | 20 | #define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) |
| 21 | 21 | ||
| 22 | /* | 22 | /* |
| 23 | * exynos drm gem buffer structure. | ||
| 24 | * | ||
| 25 | * @cookie: cookie returned by dma_alloc_attrs | ||
| 26 | * @kvaddr: kernel virtual address to allocated memory region. | ||
| 27 | * @dma_addr: bus address(accessed by dma) to allocated memory region. | ||
| 28 | * - this address could be physical address without IOMMU and | ||
| 29 | * device address with IOMMU. | ||
| 30 | * @pages: Array of backing pages. | ||
| 31 | * @size: size of allocated memory region. | ||
| 32 | */ | ||
| 33 | struct exynos_drm_gem_buf { | ||
| 34 | void *cookie; | ||
| 35 | void __iomem *kvaddr; | ||
| 36 | dma_addr_t dma_addr; | ||
| 37 | struct dma_attrs dma_attrs; | ||
| 38 | struct page **pages; | ||
| 39 | unsigned long size; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * exynos drm buffer structure. | 23 | * exynos drm buffer structure. |
| 44 | * | 24 | * |
| 45 | * @base: a gem object. | 25 | * @base: a gem object. |
| @@ -50,18 +30,28 @@ struct exynos_drm_gem_buf { | |||
| 50 | * by user request or at framebuffer creation. | 30 | * by user request or at framebuffer creation. |
| 51 | * continuous memory region allocated by user request | 31 | * continuous memory region allocated by user request |
| 52 | * or at framebuffer creation. | 32 | * or at framebuffer creation. |
| 33 | * @flags: indicate memory type to allocated buffer and cache attruibute. | ||
| 53 | * @size: size requested from user, in bytes and this size is aligned | 34 | * @size: size requested from user, in bytes and this size is aligned |
| 54 | * in page unit. | 35 | * in page unit. |
| 55 | * @flags: indicate memory type to allocated buffer and cache attruibute. | 36 | * @cookie: cookie returned by dma_alloc_attrs |
| 37 | * @kvaddr: kernel virtual address to allocated memory region. | ||
| 38 | * @dma_addr: bus address(accessed by dma) to allocated memory region. | ||
| 39 | * - this address could be physical address without IOMMU and | ||
| 40 | * device address with IOMMU. | ||
| 41 | * @pages: Array of backing pages. | ||
| 56 | * | 42 | * |
| 57 | * P.S. this object would be transferred to user as kms_bo.handle so | 43 | * P.S. this object would be transferred to user as kms_bo.handle so |
| 58 | * user can access the buffer through kms_bo.handle. | 44 | * user can access the buffer through kms_bo.handle. |
| 59 | */ | 45 | */ |
| 60 | struct exynos_drm_gem_obj { | 46 | struct exynos_drm_gem_obj { |
| 61 | struct drm_gem_object base; | 47 | struct drm_gem_object base; |
| 62 | struct exynos_drm_gem_buf *buffer; | 48 | unsigned int flags; |
| 63 | unsigned long size; | 49 | unsigned long size; |
| 64 | unsigned int flags; | 50 | void *cookie; |
| 51 | void __iomem *kvaddr; | ||
| 52 | dma_addr_t dma_addr; | ||
| 53 | struct dma_attrs dma_attrs; | ||
| 54 | struct page **pages; | ||
| 65 | }; | 55 | }; |
| 66 | 56 | ||
| 67 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 57 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index bebc9572d404..d9a68fd83120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
| @@ -134,15 +134,15 @@ static int exynos_plane_atomic_check(struct drm_plane *plane, | |||
| 134 | 134 | ||
| 135 | nr = exynos_drm_fb_get_buf_cnt(state->fb); | 135 | nr = exynos_drm_fb_get_buf_cnt(state->fb); |
| 136 | for (i = 0; i < nr; i++) { | 136 | for (i = 0; i < nr; i++) { |
| 137 | struct exynos_drm_gem_buf *buffer = | 137 | struct exynos_drm_gem_obj *obj = |
| 138 | exynos_drm_fb_buffer(state->fb, i); | 138 | exynos_drm_fb_gem_obj(state->fb, i); |
| 139 | 139 | ||
| 140 | if (!buffer) { | 140 | if (!obj) { |
| 141 | DRM_DEBUG_KMS("buffer is null\n"); | 141 | DRM_DEBUG_KMS("gem object is null\n"); |
| 142 | return -EFAULT; | 142 | return -EFAULT; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | exynos_plane->dma_addr[i] = buffer->dma_addr + | 145 | exynos_plane->dma_addr[i] = obj->dma_addr + |
| 146 | state->fb->offsets[i]; | 146 | state->fb->offsets[i]; |
| 147 | 147 | ||
| 148 | DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", | 148 | DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", |
