aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/udl/udl_main.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@gmail.com>2010-12-14 16:14:24 -0500
committerDave Airlie <airlied@redhat.com>2012-03-15 09:35:34 -0400
commit5320918b9a87865223fd6b228e530bf30bc64d9d (patch)
tree2bc55de1fc03c57851fd86d0cfaa7377d34cdc25 /drivers/gpu/drm/udl/udl_main.c
parent2c07a21d6fb0be47fda696a618b726ea258ed1dd (diff)
drm/udl: initial UDL driver (v4)
This is an initial drm/kms driver for the displaylink devices. Supports fb_defio, supports KMS dumb interface supports 24bpp via conversion to 16bpp, hw can do this better. supports hot unplug using new drm core features. On an unplug, it disables connector polling, unplugs connectors from sysfs, unplugs fbdev layer (using Kay's API), drops all the USB device URBs, and call the drm core to unplug the device. This driver is based in large parts on udlfb.c so I've licensed it under GPLv2. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/udl/udl_main.c')
-rw-r--r--drivers/gpu/drm/udl/udl_main.c338
1 files changed, 338 insertions, 0 deletions
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
new file mode 100644
index 000000000000..a8d5f09428c7
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13#include "drmP.h"
14#include "udl_drv.h"
15
16/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
17#define BULK_SIZE 512
18
19#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
20#define WRITES_IN_FLIGHT (4)
21#define MAX_VENDOR_DESCRIPTOR_SIZE 256
22
23#define GET_URB_TIMEOUT HZ
24#define FREE_URB_TIMEOUT (HZ*2)
25
26static int udl_parse_vendor_descriptor(struct drm_device *dev,
27 struct usb_device *usbdev)
28{
29 struct udl_device *udl = dev->dev_private;
30 char *desc;
31 char *buf;
32 char *desc_end;
33
34 u8 total_len = 0;
35
36 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
37 if (!buf)
38 return false;
39 desc = buf;
40
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
45 "%02x %02x %02x %02x %02x %02x %02x\n",
46 total_len, desc[0],
47 desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
48 desc[7], desc[8], desc[9], desc[10]);
49
50 if ((desc[0] != total_len) || /* descriptor length */
51 (desc[1] != 0x5f) || /* vendor descriptor type */
52 (desc[2] != 0x01) || /* version (2 bytes) */
53 (desc[3] != 0x00) ||
54 (desc[4] != total_len - 2)) /* length after type */
55 goto unrecognized;
56
57 desc_end = desc + total_len;
58 desc += 5; /* the fixed header we've already parsed */
59
60 while (desc < desc_end) {
61 u8 length;
62 u16 key;
63
64 key = *((u16 *) desc);
65 desc += sizeof(u16);
66 length = *desc;
67 desc++;
68
69 switch (key) {
70 case 0x0200: { /* max_area */
71 u32 max_area;
72 max_area = le32_to_cpu(*((u32 *)desc));
73 DRM_DEBUG("DL chip limited to %d pixel modes\n",
74 max_area);
75 udl->sku_pixel_limit = max_area;
76 break;
77 }
78 default:
79 break;
80 }
81 desc += length;
82 }
83 }
84
85 goto success;
86
87unrecognized:
88 /* allow udlfb to load for now even if firmware unrecognized */
89 DRM_ERROR("Unrecognized vendor firmware descriptor\n");
90
91success:
92 kfree(buf);
93 return true;
94}
95
96static void udl_release_urb_work(struct work_struct *work)
97{
98 struct urb_node *unode = container_of(work, struct urb_node,
99 release_urb_work.work);
100
101 up(&unode->dev->urbs.limit_sem);
102}
103
104void udl_urb_completion(struct urb *urb)
105{
106 struct urb_node *unode = urb->context;
107 struct udl_device *udl = unode->dev;
108 unsigned long flags;
109
110 /* sync/async unlink faults aren't errors */
111 if (urb->status) {
112 if (!(urb->status == -ENOENT ||
113 urb->status == -ECONNRESET ||
114 urb->status == -ESHUTDOWN)) {
115 DRM_ERROR("%s - nonzero write bulk status received: %d\n",
116 __func__, urb->status);
117 atomic_set(&udl->lost_pixels, 1);
118 }
119 }
120
121 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
122
123 spin_lock_irqsave(&udl->urbs.lock, flags);
124 list_add_tail(&unode->entry, &udl->urbs.list);
125 udl->urbs.available++;
126 spin_unlock_irqrestore(&udl->urbs.lock, flags);
127
128#if 0
129 /*
130 * When using fb_defio, we deadlock if up() is called
131 * while another is waiting. So queue to another process.
132 */
133 if (fb_defio)
134 schedule_delayed_work(&unode->release_urb_work, 0);
135 else
136#endif
137 up(&udl->urbs.limit_sem);
138}
139
140static void udl_free_urb_list(struct drm_device *dev)
141{
142 struct udl_device *udl = dev->dev_private;
143 int count = udl->urbs.count;
144 struct list_head *node;
145 struct urb_node *unode;
146 struct urb *urb;
147 int ret;
148 unsigned long flags;
149
150 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
151
152 /* keep waiting and freeing, until we've got 'em all */
153 while (count--) {
154
155 /* Getting interrupted means a leak, but ok at shutdown*/
156 ret = down_interruptible(&udl->urbs.limit_sem);
157 if (ret)
158 break;
159
160 spin_lock_irqsave(&udl->urbs.lock, flags);
161
162 node = udl->urbs.list.next; /* have reserved one with sem */
163 list_del_init(node);
164
165 spin_unlock_irqrestore(&udl->urbs.lock, flags);
166
167 unode = list_entry(node, struct urb_node, entry);
168 urb = unode->urb;
169
170 /* Free each separately allocated piece */
171 usb_free_coherent(urb->dev, udl->urbs.size,
172 urb->transfer_buffer, urb->transfer_dma);
173 usb_free_urb(urb);
174 kfree(node);
175 }
176 udl->urbs.count = 0;
177}
178
179static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
180{
181 struct udl_device *udl = dev->dev_private;
182 int i = 0;
183 struct urb *urb;
184 struct urb_node *unode;
185 char *buf;
186
187 spin_lock_init(&udl->urbs.lock);
188
189 udl->urbs.size = size;
190 INIT_LIST_HEAD(&udl->urbs.list);
191
192 while (i < count) {
193 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
194 if (!unode)
195 break;
196 unode->dev = udl;
197
198 INIT_DELAYED_WORK(&unode->release_urb_work,
199 udl_release_urb_work);
200
201 urb = usb_alloc_urb(0, GFP_KERNEL);
202 if (!urb) {
203 kfree(unode);
204 break;
205 }
206 unode->urb = urb;
207
208 buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
209 &urb->transfer_dma);
210 if (!buf) {
211 kfree(unode);
212 usb_free_urb(urb);
213 break;
214 }
215
216 /* urb->transfer_buffer_length set to actual before submit */
217 usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
218 buf, size, udl_urb_completion, unode);
219 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
220
221 list_add_tail(&unode->entry, &udl->urbs.list);
222
223 i++;
224 }
225
226 sema_init(&udl->urbs.limit_sem, i);
227 udl->urbs.count = i;
228 udl->urbs.available = i;
229
230 DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
231
232 return i;
233}
234
235struct urb *udl_get_urb(struct drm_device *dev)
236{
237 struct udl_device *udl = dev->dev_private;
238 int ret = 0;
239 struct list_head *entry;
240 struct urb_node *unode;
241 struct urb *urb = NULL;
242 unsigned long flags;
243
244 /* Wait for an in-flight buffer to complete and get re-queued */
245 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
246 if (ret) {
247 atomic_set(&udl->lost_pixels, 1);
248 DRM_INFO("wait for urb interrupted: %x available: %d\n",
249 ret, udl->urbs.available);
250 goto error;
251 }
252
253 spin_lock_irqsave(&udl->urbs.lock, flags);
254
255 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
256 entry = udl->urbs.list.next;
257 list_del_init(entry);
258 udl->urbs.available--;
259
260 spin_unlock_irqrestore(&udl->urbs.lock, flags);
261
262 unode = list_entry(entry, struct urb_node, entry);
263 urb = unode->urb;
264
265error:
266 return urb;
267}
268
269int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
270{
271 struct udl_device *udl = dev->dev_private;
272 int ret;
273
274 BUG_ON(len > udl->urbs.size);
275
276 urb->transfer_buffer_length = len; /* set to actual payload len */
277 ret = usb_submit_urb(urb, GFP_ATOMIC);
278 if (ret) {
279 udl_urb_completion(urb); /* because no one else will */
280 atomic_set(&udl->lost_pixels, 1);
281 DRM_ERROR("usb_submit_urb error %x\n", ret);
282 }
283 return ret;
284}
285
286int udl_driver_load(struct drm_device *dev, unsigned long flags)
287{
288 struct udl_device *udl;
289 int ret;
290
291 DRM_DEBUG("\n");
292 udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
293 if (!udl)
294 return -ENOMEM;
295
296 udl->ddev = dev;
297 dev->dev_private = udl;
298
299 if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
300 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
301 goto err;
302 }
303
304 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
305 ret = -ENOMEM;
306 DRM_ERROR("udl_alloc_urb_list failed\n");
307 goto err;
308 }
309
310 DRM_DEBUG("\n");
311 ret = udl_modeset_init(dev);
312
313 ret = udl_fbdev_init(dev);
314 return 0;
315err:
316 kfree(udl);
317 DRM_ERROR("%d\n", ret);
318 return ret;
319}
320
321int udl_drop_usb(struct drm_device *dev)
322{
323 udl_free_urb_list(dev);
324 return 0;
325}
326
327int udl_driver_unload(struct drm_device *dev)
328{
329 struct udl_device *udl = dev->dev_private;
330
331 if (udl->urbs.count)
332 udl_free_urb_list(dev);
333
334 udl_fbdev_cleanup(dev);
335 udl_modeset_cleanup(dev);
336 kfree(udl);
337 return 0;
338}