aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-06-23 10:35:32 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-07-12 06:41:36 -0400
commit5ba6c9ff961a79809ec0daf2713e8d39e3f77199 (patch)
tree9c2eb224abb10f2168e627f5243e2c8d4c461007
parent2ae995887830b335f9bdab3040018071da54bcdb (diff)
drm/vgem: Fix mmaping
The vGEM mmap code has bitrotted slightly and now immediately BUGs. Since vGEM was last updated, there are new core GEM facilities to provide more common functions, so let's use those here. v2: drm_gem_free_mmap_offset() is performed from drm_gem_object_release() so we can remove the redundant call. Testcase: igt/vgem_basic/mmap Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96603 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Sean Paul <seanpaul@chromium.org> Cc: Zach Reizner <zachr@google.com> Cc: Matthew Auld <matthew.auld@intel.com> Tested-by: Humberto Israel Perez Rodriguez <humberto.i.perez.rodriguez@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1466692534-28303-1-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c164
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h6
2 files changed, 61 insertions, 109 deletions
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 35ea5d02a827..c161b6d7e427 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,38 @@
42#define DRIVER_MAJOR 1 42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0 43#define DRIVER_MINOR 0
44 44
45void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
46{
47 drm_gem_put_pages(&obj->base, obj->pages, false, false);
48 obj->pages = NULL;
49}
50
51static void vgem_gem_free_object(struct drm_gem_object *obj) 45static void vgem_gem_free_object(struct drm_gem_object *obj)
52{ 46{
53 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
54 48
55 drm_gem_free_mmap_offset(obj);
56
57 if (vgem_obj->use_dma_buf && obj->dma_buf) {
58 dma_buf_put(obj->dma_buf);
59 obj->dma_buf = NULL;
60 }
61
62 drm_gem_object_release(obj); 49 drm_gem_object_release(obj);
63
64 if (vgem_obj->pages)
65 vgem_gem_put_pages(vgem_obj);
66
67 vgem_obj->pages = NULL;
68
69 kfree(vgem_obj); 50 kfree(vgem_obj);
70} 51}
71 52
72int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
73{
74 struct page **pages;
75
76 if (obj->pages || obj->use_dma_buf)
77 return 0;
78
79 pages = drm_gem_get_pages(&obj->base);
80 if (IS_ERR(pages)) {
81 return PTR_ERR(pages);
82 }
83
84 obj->pages = pages;
85
86 return 0;
87}
88
89static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 53static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90{ 54{
91 struct drm_vgem_gem_object *obj = vma->vm_private_data; 55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
92 loff_t num_pages;
93 pgoff_t page_offset;
94 int ret;
95
96 /* We don't use vmf->pgoff since that has the fake offset */ 56 /* We don't use vmf->pgoff since that has the fake offset */
97 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 57 unsigned long vaddr = (unsigned long)vmf->virtual_address;
98 PAGE_SHIFT; 58 struct page *page;
99 59
100 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 60 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
101 61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
102 if (page_offset > num_pages) 62 if (!IS_ERR(page)) {
103 return VM_FAULT_SIGBUS; 63 vmf->page = page;
104 64 return 0;
105 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, 65 } else switch (PTR_ERR(page)) {
106 obj->pages[page_offset]); 66 case -ENOSPC:
107 switch (ret) { 67 case -ENOMEM:
108 case 0: 68 return VM_FAULT_OOM;
109 return VM_FAULT_NOPAGE; 69 case -EBUSY:
110 case -ENOMEM: 70 return VM_FAULT_RETRY;
111 return VM_FAULT_OOM; 71 case -EFAULT:
112 case -EBUSY: 72 case -EINVAL:
113 return VM_FAULT_RETRY; 73 return VM_FAULT_SIGBUS;
114 case -EFAULT: 74 default:
115 case -EINVAL: 75 WARN_ON_ONCE(PTR_ERR(page));
116 return VM_FAULT_SIGBUS; 76 return VM_FAULT_SIGBUS;
117 default:
118 WARN_ON(1);
119 return VM_FAULT_SIGBUS;
120 } 77 }
121} 78}
122 79
@@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
134 unsigned long size) 91 unsigned long size)
135{ 92{
136 struct drm_vgem_gem_object *obj; 93 struct drm_vgem_gem_object *obj;
137 struct drm_gem_object *gem_object; 94 int ret;
138 int err;
139
140 size = roundup(size, PAGE_SIZE);
141 95
142 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 96 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
143 if (!obj) 97 if (!obj)
144 return ERR_PTR(-ENOMEM); 98 return ERR_PTR(-ENOMEM);
145 99
146 gem_object = &obj->base; 100 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
147 101 if (ret)
148 err = drm_gem_object_init(dev, gem_object, size); 102 goto err_free;
149 if (err)
150 goto out;
151
152 err = vgem_gem_get_pages(obj);
153 if (err)
154 goto out;
155
156 err = drm_gem_handle_create(file, gem_object, handle);
157 if (err)
158 goto handle_out;
159 103
160 drm_gem_object_unreference_unlocked(gem_object); 104 ret = drm_gem_handle_create(file, &obj->base, handle);
105 drm_gem_object_unreference_unlocked(&obj->base);
106 if (ret)
107 goto err;
161 108
162 return gem_object; 109 return &obj->base;
163 110
164handle_out: 111err_free:
165 drm_gem_object_release(gem_object);
166out:
167 kfree(obj); 112 kfree(obj);
168 return ERR_PTR(err); 113err:
114 return ERR_PTR(ret);
169} 115}
170 116
171static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 117static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
172 struct drm_mode_create_dumb *args) 118 struct drm_mode_create_dumb *args)
173{ 119{
174 struct drm_gem_object *gem_object; 120 struct drm_gem_object *gem_object;
175 uint64_t size; 121 u64 pitch, size;
176 uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
177 122
123 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
178 size = args->height * pitch; 124 size = args->height * pitch;
179 if (size == 0) 125 if (size == 0)
180 return -EINVAL; 126 return -EINVAL;
181 127
182 gem_object = vgem_gem_create(dev, file, &args->handle, size); 128 gem_object = vgem_gem_create(dev, file, &args->handle, size);
183 129 if (IS_ERR(gem_object))
184 if (IS_ERR(gem_object)) {
185 DRM_DEBUG_DRIVER("object creation failed\n");
186 return PTR_ERR(gem_object); 130 return PTR_ERR(gem_object);
187 }
188 131
189 args->size = gem_object->size; 132 args->size = gem_object->size;
190 args->pitch = pitch; 133 args->pitch = pitch;
@@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
194 return 0; 137 return 0;
195} 138}
196 139
197int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 140static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
198 uint32_t handle, uint64_t *offset) 141 uint32_t handle, uint64_t *offset)
199{ 142{
200 int ret = 0;
201 struct drm_gem_object *obj; 143 struct drm_gem_object *obj;
144 int ret;
202 145
203 obj = drm_gem_object_lookup(file, handle); 146 obj = drm_gem_object_lookup(file, handle);
204 if (!obj) 147 if (!obj)
205 return -ENOENT; 148 return -ENOENT;
206 149
150 if (!obj->filp) {
151 ret = -EINVAL;
152 goto unref;
153 }
154
207 ret = drm_gem_create_mmap_offset(obj); 155 ret = drm_gem_create_mmap_offset(obj);
208 if (ret) 156 if (ret)
209 goto unref; 157 goto unref;
210 158
211 BUG_ON(!obj->filp);
212
213 obj->filp->private_data = obj;
214
215 *offset = drm_vma_node_offset_addr(&obj->vma_node); 159 *offset = drm_vma_node_offset_addr(&obj->vma_node);
216
217unref: 160unref:
218 drm_gem_object_unreference_unlocked(obj); 161 drm_gem_object_unreference_unlocked(obj);
219 162
@@ -223,10 +166,26 @@ unref:
223static struct drm_ioctl_desc vgem_ioctls[] = { 166static struct drm_ioctl_desc vgem_ioctls[] = {
224}; 167};
225 168
169static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
170{
171 unsigned long flags = vma->vm_flags;
172 int ret;
173
174 ret = drm_gem_mmap(filp, vma);
175 if (ret)
176 return ret;
177
178 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
179 * are ordinary and not special.
180 */
181 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
182 return 0;
183}
184
226static const struct file_operations vgem_driver_fops = { 185static const struct file_operations vgem_driver_fops = {
227 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
228 .open = drm_open, 187 .open = drm_open,
229 .mmap = drm_gem_mmap, 188 .mmap = vgem_mmap,
230 .poll = drm_poll, 189 .poll = drm_poll,
231 .read = drm_read, 190 .read = drm_read,
232 .unlocked_ioctl = drm_ioctl, 191 .unlocked_ioctl = drm_ioctl,
@@ -248,7 +207,7 @@ static struct drm_driver vgem_driver = {
248 .minor = DRIVER_MINOR, 207 .minor = DRIVER_MINOR,
249}; 208};
250 209
251struct drm_device *vgem_device; 210static struct drm_device *vgem_device;
252 211
253static int __init vgem_init(void) 212static int __init vgem_init(void)
254{ 213{
@@ -261,7 +220,6 @@ static int __init vgem_init(void)
261 } 220 }
262 221
263 ret = drm_dev_register(vgem_device, 0); 222 ret = drm_dev_register(vgem_device, 0);
264
265 if (ret) 223 if (ret)
266 goto out_unref; 224 goto out_unref;
267 225
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee275..988cbaae7588 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -35,12 +35,6 @@
35#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) 35#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
36struct drm_vgem_gem_object { 36struct drm_vgem_gem_object {
37 struct drm_gem_object base; 37 struct drm_gem_object base;
38 struct page **pages;
39 bool use_dma_buf;
40}; 38};
41 39
42/* vgem_drv.c */
43extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
44extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
45
46#endif 40#endif