diff options
Diffstat (limited to 'drivers/video/udlfb.c')
-rw-r--r-- | drivers/video/udlfb.c | 1915 |
1 files changed, 1915 insertions, 0 deletions
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c new file mode 100644 index 000000000000..0cca4873d490 --- /dev/null +++ b/drivers/video/udlfb.c | |||
@@ -0,0 +1,1915 @@ | |||
1 | /* | ||
2 | * udlfb.c -- Framebuffer driver for DisplayLink USB controller | ||
3 | * | ||
4 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
5 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
6 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License v2. See the file COPYING in the main directory of this archive for | ||
10 | * more details. | ||
11 | * | ||
12 | * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, | ||
13 | * usb-skeleton by GregKH. | ||
14 | * | ||
15 | * Device-specific portions based on information from Displaylink, with work | ||
16 | * from Florian Echtler, Henrik Bjerregaard Pedersen, and others. | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/usb.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/fb.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <video/udlfb.h> | ||
30 | |||
31 | static struct fb_fix_screeninfo dlfb_fix = { | ||
32 | .id = "udlfb", | ||
33 | .type = FB_TYPE_PACKED_PIXELS, | ||
34 | .visual = FB_VISUAL_TRUECOLOR, | ||
35 | .xpanstep = 0, | ||
36 | .ypanstep = 0, | ||
37 | .ywrapstep = 0, | ||
38 | .accel = FB_ACCEL_NONE, | ||
39 | }; | ||
40 | |||
41 | static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | | ||
42 | #ifdef FBINFO_VIRTFB | ||
43 | FBINFO_VIRTFB | | ||
44 | #endif | ||
45 | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | | ||
46 | FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; | ||
47 | |||
48 | /* | ||
49 | * There are many DisplayLink-based products, all with unique PIDs. We are able | ||
50 | * to support all volume ones (circa 2009) with a single driver, so we match | ||
51 | * globally on VID. TODO: Probe() needs to detect when we might be running | ||
52 | * "future" chips, and bail on those, so a compatible driver can match. | ||
53 | */ | ||
54 | static struct usb_device_id id_table[] = { | ||
55 | {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,}, | ||
56 | {}, | ||
57 | }; | ||
58 | MODULE_DEVICE_TABLE(usb, id_table); | ||
59 | |||
60 | /* module options */ | ||
61 | static int console; /* Optionally allow fbcon to consume first framebuffer */ | ||
62 | static int fb_defio; /* Optionally enable experimental fb_defio mmap support */ | ||
63 | |||
64 | /* dlfb keeps a list of urbs for efficient bulk transfers */ | ||
65 | static void dlfb_urb_completion(struct urb *urb); | ||
66 | static struct urb *dlfb_get_urb(struct dlfb_data *dev); | ||
67 | static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len); | ||
68 | static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size); | ||
69 | static void dlfb_free_urb_list(struct dlfb_data *dev); | ||
70 | |||
71 | /* | ||
72 | * All DisplayLink bulk operations start with 0xAF, followed by specific code | ||
73 | * All operations are written to buffers which then later get sent to device | ||
74 | */ | ||
75 | static char *dlfb_set_register(char *buf, u8 reg, u8 val) | ||
76 | { | ||
77 | *buf++ = 0xAF; | ||
78 | *buf++ = 0x20; | ||
79 | *buf++ = reg; | ||
80 | *buf++ = val; | ||
81 | return buf; | ||
82 | } | ||
83 | |||
84 | static char *dlfb_vidreg_lock(char *buf) | ||
85 | { | ||
86 | return dlfb_set_register(buf, 0xFF, 0x00); | ||
87 | } | ||
88 | |||
89 | static char *dlfb_vidreg_unlock(char *buf) | ||
90 | { | ||
91 | return dlfb_set_register(buf, 0xFF, 0xFF); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * On/Off for driving the DisplayLink framebuffer to the display | ||
96 | * 0x00 H and V sync on | ||
97 | * 0x01 H and V sync off (screen blank but powered) | ||
98 | * 0x07 DPMS powerdown (requires modeset to come back) | ||
99 | */ | ||
100 | static char *dlfb_enable_hvsync(char *buf, bool enable) | ||
101 | { | ||
102 | if (enable) | ||
103 | return dlfb_set_register(buf, 0x1F, 0x00); | ||
104 | else | ||
105 | return dlfb_set_register(buf, 0x1F, 0x07); | ||
106 | } | ||
107 | |||
108 | static char *dlfb_set_color_depth(char *buf, u8 selection) | ||
109 | { | ||
110 | return dlfb_set_register(buf, 0x00, selection); | ||
111 | } | ||
112 | |||
113 | static char *dlfb_set_base16bpp(char *wrptr, u32 base) | ||
114 | { | ||
115 | /* the base pointer is 16 bits wide, 0x20 is hi byte. */ | ||
116 | wrptr = dlfb_set_register(wrptr, 0x20, base >> 16); | ||
117 | wrptr = dlfb_set_register(wrptr, 0x21, base >> 8); | ||
118 | return dlfb_set_register(wrptr, 0x22, base); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * DisplayLink HW has separate 16bpp and 8bpp framebuffers. | ||
123 | * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer | ||
124 | */ | ||
125 | static char *dlfb_set_base8bpp(char *wrptr, u32 base) | ||
126 | { | ||
127 | wrptr = dlfb_set_register(wrptr, 0x26, base >> 16); | ||
128 | wrptr = dlfb_set_register(wrptr, 0x27, base >> 8); | ||
129 | return dlfb_set_register(wrptr, 0x28, base); | ||
130 | } | ||
131 | |||
132 | static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value) | ||
133 | { | ||
134 | wrptr = dlfb_set_register(wrptr, reg, value >> 8); | ||
135 | return dlfb_set_register(wrptr, reg+1, value); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * This is kind of weird because the controller takes some | ||
140 | * register values in a different byte order than other registers. | ||
141 | */ | ||
142 | static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value) | ||
143 | { | ||
144 | wrptr = dlfb_set_register(wrptr, reg, value); | ||
145 | return dlfb_set_register(wrptr, reg+1, value >> 8); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * LFSR is linear feedback shift register. The reason we have this is | ||
150 | * because the display controller needs to minimize the clock depth of | ||
151 | * various counters used in the display path. So this code reverses the | ||
152 | * provided value into the lfsr16 value by counting backwards to get | ||
153 | * the value that needs to be set in the hardware comparator to get the | ||
154 | * same actual count. This makes sense once you read above a couple of | ||
155 | * times and think about it from a hardware perspective. | ||
156 | */ | ||
157 | static u16 dlfb_lfsr16(u16 actual_count) | ||
158 | { | ||
159 | u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */ | ||
160 | |||
161 | while (actual_count--) { | ||
162 | lv = ((lv << 1) | | ||
163 | (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1)) | ||
164 | & 0xFFFF; | ||
165 | } | ||
166 | |||
167 | return (u16) lv; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * This does LFSR conversion on the value that is to be written. | ||
172 | * See LFSR explanation above for more detail. | ||
173 | */ | ||
174 | static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value) | ||
175 | { | ||
176 | return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value)); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * This takes a standard fbdev screeninfo struct and all of its monitor mode | ||
181 | * details and converts them into the DisplayLink equivalent register commands. | ||
182 | */ | ||
183 | static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var) | ||
184 | { | ||
185 | u16 xds, yds; | ||
186 | u16 xde, yde; | ||
187 | u16 yec; | ||
188 | |||
189 | /* x display start */ | ||
190 | xds = var->left_margin + var->hsync_len; | ||
191 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds); | ||
192 | /* x display end */ | ||
193 | xde = xds + var->xres; | ||
194 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde); | ||
195 | |||
196 | /* y display start */ | ||
197 | yds = var->upper_margin + var->vsync_len; | ||
198 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds); | ||
199 | /* y display end */ | ||
200 | yde = yds + var->yres; | ||
201 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde); | ||
202 | |||
203 | /* x end count is active + blanking - 1 */ | ||
204 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x09, | ||
205 | xde + var->right_margin - 1); | ||
206 | |||
207 | /* libdlo hardcodes hsync start to 1 */ | ||
208 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1); | ||
209 | |||
210 | /* hsync end is width of sync pulse + 1 */ | ||
211 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1); | ||
212 | |||
213 | /* hpixels is active pixels */ | ||
214 | wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres); | ||
215 | |||
216 | /* yendcount is vertical active + vertical blanking */ | ||
217 | yec = var->yres + var->upper_margin + var->lower_margin + | ||
218 | var->vsync_len; | ||
219 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec); | ||
220 | |||
221 | /* libdlo hardcodes vsync start to 0 */ | ||
222 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0); | ||
223 | |||
224 | /* vsync end is width of vsync pulse */ | ||
225 | wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len); | ||
226 | |||
227 | /* vpixels is active pixels */ | ||
228 | wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres); | ||
229 | |||
230 | /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */ | ||
231 | wrptr = dlfb_set_register_16be(wrptr, 0x1B, | ||
232 | 200*1000*1000/var->pixclock); | ||
233 | |||
234 | return wrptr; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * This takes a standard fbdev screeninfo struct that was fetched or prepared | ||
239 | * and then generates the appropriate command sequence that then drives the | ||
240 | * display controller. | ||
241 | */ | ||
242 | static int dlfb_set_video_mode(struct dlfb_data *dev, | ||
243 | struct fb_var_screeninfo *var) | ||
244 | { | ||
245 | char *buf; | ||
246 | char *wrptr; | ||
247 | int retval = 0; | ||
248 | int writesize; | ||
249 | struct urb *urb; | ||
250 | |||
251 | if (!atomic_read(&dev->usb_active)) | ||
252 | return -EPERM; | ||
253 | |||
254 | urb = dlfb_get_urb(dev); | ||
255 | if (!urb) | ||
256 | return -ENOMEM; | ||
257 | |||
258 | buf = (char *) urb->transfer_buffer; | ||
259 | |||
260 | /* | ||
261 | * This first section has to do with setting the base address on the | ||
262 | * controller * associated with the display. There are 2 base | ||
263 | * pointers, currently, we only * use the 16 bpp segment. | ||
264 | */ | ||
265 | wrptr = dlfb_vidreg_lock(buf); | ||
266 | wrptr = dlfb_set_color_depth(wrptr, 0x00); | ||
267 | /* set base for 16bpp segment to 0 */ | ||
268 | wrptr = dlfb_set_base16bpp(wrptr, 0); | ||
269 | /* set base for 8bpp segment to end of fb */ | ||
270 | wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len); | ||
271 | |||
272 | wrptr = dlfb_set_vid_cmds(wrptr, var); | ||
273 | wrptr = dlfb_enable_hvsync(wrptr, true); | ||
274 | wrptr = dlfb_vidreg_unlock(wrptr); | ||
275 | |||
276 | writesize = wrptr - buf; | ||
277 | |||
278 | retval = dlfb_submit_urb(dev, urb, writesize); | ||
279 | |||
280 | return retval; | ||
281 | } | ||
282 | |||
283 | static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) | ||
284 | { | ||
285 | unsigned long start = vma->vm_start; | ||
286 | unsigned long size = vma->vm_end - vma->vm_start; | ||
287 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
288 | unsigned long page, pos; | ||
289 | |||
290 | if (offset + size > info->fix.smem_len) | ||
291 | return -EINVAL; | ||
292 | |||
293 | pos = (unsigned long)info->fix.smem_start + offset; | ||
294 | |||
295 | dl_notice("mmap() framebuffer addr:%lu size:%lu\n", | ||
296 | pos, size); | ||
297 | |||
298 | while (size > 0) { | ||
299 | page = vmalloc_to_pfn((void *)pos); | ||
300 | if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) | ||
301 | return -EAGAIN; | ||
302 | |||
303 | start += PAGE_SIZE; | ||
304 | pos += PAGE_SIZE; | ||
305 | if (size > PAGE_SIZE) | ||
306 | size -= PAGE_SIZE; | ||
307 | else | ||
308 | size = 0; | ||
309 | } | ||
310 | |||
311 | vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Trims identical data from front and back of line | ||
317 | * Sets new front buffer address and width | ||
318 | * And returns byte count of identical pixels | ||
319 | * Assumes CPU natural alignment (unsigned long) | ||
320 | * for back and front buffer ptrs and width | ||
321 | */ | ||
322 | static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) | ||
323 | { | ||
324 | int j, k; | ||
325 | const unsigned long *back = (const unsigned long *) bback; | ||
326 | const unsigned long *front = (const unsigned long *) *bfront; | ||
327 | const int width = *width_bytes / sizeof(unsigned long); | ||
328 | int identical = width; | ||
329 | int start = width; | ||
330 | int end = width; | ||
331 | |||
332 | prefetch((void *) front); | ||
333 | prefetch((void *) back); | ||
334 | |||
335 | for (j = 0; j < width; j++) { | ||
336 | if (back[j] != front[j]) { | ||
337 | start = j; | ||
338 | break; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | for (k = width - 1; k > j; k--) { | ||
343 | if (back[k] != front[k]) { | ||
344 | end = k+1; | ||
345 | break; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | identical = start + (width - end); | ||
350 | *bfront = (u8 *) &front[start]; | ||
351 | *width_bytes = (end - start) * sizeof(unsigned long); | ||
352 | |||
353 | return identical * sizeof(unsigned long); | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * Render a command stream for an encoded horizontal line segment of pixels. | ||
358 | * | ||
359 | * A command buffer holds several commands. | ||
360 | * It always begins with a fresh command header | ||
361 | * (the protocol doesn't require this, but we enforce it to allow | ||
362 | * multiple buffers to be potentially encoded and sent in parallel). | ||
363 | * A single command encodes one contiguous horizontal line of pixels | ||
364 | * | ||
365 | * The function relies on the client to do all allocation, so that | ||
366 | * rendering can be done directly to output buffers (e.g. USB URBs). | ||
367 | * The function fills the supplied command buffer, providing information | ||
368 | * on where it left off, so the client may call in again with additional | ||
369 | * buffers if the line will take several buffers to complete. | ||
370 | * | ||
371 | * A single command can transmit a maximum of 256 pixels, | ||
372 | * regardless of the compression ratio (protocol design limit). | ||
373 | * To the hardware, 0 for a size byte means 256 | ||
374 | * | ||
375 | * Rather than 256 pixel commands which are either rl or raw encoded, | ||
376 | * the rlx command simply assumes alternating raw and rl spans within one cmd. | ||
377 | * This has a slightly larger header overhead, but produces more even results. | ||
378 | * It also processes all data (read and write) in a single pass. | ||
379 | * Performance benchmarks of common cases show it having just slightly better | ||
380 | * compression than 256 pixel raw or rle commands, with similar CPU consumpion. | ||
381 | * But for very rl friendly data, will compress not quite as well. | ||
382 | */ | ||
383 | static void dlfb_compress_hline( | ||
384 | const uint16_t **pixel_start_ptr, | ||
385 | const uint16_t *const pixel_end, | ||
386 | uint32_t *device_address_ptr, | ||
387 | uint8_t **command_buffer_ptr, | ||
388 | const uint8_t *const cmd_buffer_end) | ||
389 | { | ||
390 | const uint16_t *pixel = *pixel_start_ptr; | ||
391 | uint32_t dev_addr = *device_address_ptr; | ||
392 | uint8_t *cmd = *command_buffer_ptr; | ||
393 | const int bpp = 2; | ||
394 | |||
395 | while ((pixel_end > pixel) && | ||
396 | (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { | ||
397 | uint8_t *raw_pixels_count_byte = 0; | ||
398 | uint8_t *cmd_pixels_count_byte = 0; | ||
399 | const uint16_t *raw_pixel_start = 0; | ||
400 | const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0; | ||
401 | |||
402 | prefetchw((void *) cmd); /* pull in one cache line at least */ | ||
403 | |||
404 | *cmd++ = 0xAF; | ||
405 | *cmd++ = 0x6B; | ||
406 | *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF); | ||
407 | *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF); | ||
408 | *cmd++ = (uint8_t) ((dev_addr) & 0xFF); | ||
409 | |||
410 | cmd_pixels_count_byte = cmd++; /* we'll know this later */ | ||
411 | cmd_pixel_start = pixel; | ||
412 | |||
413 | raw_pixels_count_byte = cmd++; /* we'll know this later */ | ||
414 | raw_pixel_start = pixel; | ||
415 | |||
416 | cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1, | ||
417 | min((int)(pixel_end - pixel), | ||
418 | (int)(cmd_buffer_end - cmd) / bpp)); | ||
419 | |||
420 | prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); | ||
421 | |||
422 | while (pixel < cmd_pixel_end) { | ||
423 | const uint16_t * const repeating_pixel = pixel; | ||
424 | |||
425 | *(uint16_t *)cmd = cpu_to_be16p(pixel); | ||
426 | cmd += 2; | ||
427 | pixel++; | ||
428 | |||
429 | if (unlikely((pixel < cmd_pixel_end) && | ||
430 | (*pixel == *repeating_pixel))) { | ||
431 | /* go back and fill in raw pixel count */ | ||
432 | *raw_pixels_count_byte = ((repeating_pixel - | ||
433 | raw_pixel_start) + 1) & 0xFF; | ||
434 | |||
435 | while ((pixel < cmd_pixel_end) | ||
436 | && (*pixel == *repeating_pixel)) { | ||
437 | pixel++; | ||
438 | } | ||
439 | |||
440 | /* immediately after raw data is repeat byte */ | ||
441 | *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF; | ||
442 | |||
443 | /* Then start another raw pixel span */ | ||
444 | raw_pixel_start = pixel; | ||
445 | raw_pixels_count_byte = cmd++; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | if (pixel > raw_pixel_start) { | ||
450 | /* finalize last RAW span */ | ||
451 | *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF; | ||
452 | } | ||
453 | |||
454 | *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF; | ||
455 | dev_addr += (pixel - cmd_pixel_start) * bpp; | ||
456 | } | ||
457 | |||
458 | if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { | ||
459 | /* Fill leftover bytes with no-ops */ | ||
460 | if (cmd_buffer_end > cmd) | ||
461 | memset(cmd, 0xAF, cmd_buffer_end - cmd); | ||
462 | cmd = (uint8_t *) cmd_buffer_end; | ||
463 | } | ||
464 | |||
465 | *command_buffer_ptr = cmd; | ||
466 | *pixel_start_ptr = pixel; | ||
467 | *device_address_ptr = dev_addr; | ||
468 | |||
469 | return; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * There are 3 copies of every pixel: The front buffer that the fbdev | ||
474 | * client renders to, the actual framebuffer across the USB bus in hardware | ||
475 | * (that we can only write to, slowly, and can never read), and (optionally) | ||
476 | * our shadow copy that tracks what's been sent to that hardware buffer. | ||
477 | */ | ||
478 | static int dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr, | ||
479 | const char *front, char **urb_buf_ptr, | ||
480 | u32 byte_offset, u32 byte_width, | ||
481 | int *ident_ptr, int *sent_ptr) | ||
482 | { | ||
483 | const u8 *line_start, *line_end, *next_pixel; | ||
484 | u32 dev_addr = dev->base16 + byte_offset; | ||
485 | struct urb *urb = *urb_ptr; | ||
486 | u8 *cmd = *urb_buf_ptr; | ||
487 | u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; | ||
488 | |||
489 | line_start = (u8 *) (front + byte_offset); | ||
490 | next_pixel = line_start; | ||
491 | line_end = next_pixel + byte_width; | ||
492 | |||
493 | if (dev->backing_buffer) { | ||
494 | int offset; | ||
495 | const u8 *back_start = (u8 *) (dev->backing_buffer | ||
496 | + byte_offset); | ||
497 | |||
498 | *ident_ptr += dlfb_trim_hline(back_start, &next_pixel, | ||
499 | &byte_width); | ||
500 | |||
501 | offset = next_pixel - line_start; | ||
502 | line_end = next_pixel + byte_width; | ||
503 | dev_addr += offset; | ||
504 | back_start += offset; | ||
505 | line_start += offset; | ||
506 | |||
507 | memcpy((char *)back_start, (char *) line_start, | ||
508 | byte_width); | ||
509 | } | ||
510 | |||
511 | while (next_pixel < line_end) { | ||
512 | |||
513 | dlfb_compress_hline((const uint16_t **) &next_pixel, | ||
514 | (const uint16_t *) line_end, &dev_addr, | ||
515 | (u8 **) &cmd, (u8 *) cmd_end); | ||
516 | |||
517 | if (cmd >= cmd_end) { | ||
518 | int len = cmd - (u8 *) urb->transfer_buffer; | ||
519 | if (dlfb_submit_urb(dev, urb, len)) | ||
520 | return 1; /* lost pixels is set */ | ||
521 | *sent_ptr += len; | ||
522 | urb = dlfb_get_urb(dev); | ||
523 | if (!urb) | ||
524 | return 1; /* lost_pixels is set */ | ||
525 | *urb_ptr = urb; | ||
526 | cmd = urb->transfer_buffer; | ||
527 | cmd_end = &cmd[urb->transfer_buffer_length]; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | *urb_buf_ptr = cmd; | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, | ||
537 | int width, int height, char *data) | ||
538 | { | ||
539 | int i, ret; | ||
540 | char *cmd; | ||
541 | cycles_t start_cycles, end_cycles; | ||
542 | int bytes_sent = 0; | ||
543 | int bytes_identical = 0; | ||
544 | struct urb *urb; | ||
545 | int aligned_x; | ||
546 | |||
547 | start_cycles = get_cycles(); | ||
548 | |||
549 | aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); | ||
550 | width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); | ||
551 | x = aligned_x; | ||
552 | |||
553 | if ((width <= 0) || | ||
554 | (x + width > dev->info->var.xres) || | ||
555 | (y + height > dev->info->var.yres)) | ||
556 | return -EINVAL; | ||
557 | |||
558 | if (!atomic_read(&dev->usb_active)) | ||
559 | return 0; | ||
560 | |||
561 | urb = dlfb_get_urb(dev); | ||
562 | if (!urb) | ||
563 | return 0; | ||
564 | cmd = urb->transfer_buffer; | ||
565 | |||
566 | for (i = y; i < y + height ; i++) { | ||
567 | const int line_offset = dev->info->fix.line_length * i; | ||
568 | const int byte_offset = line_offset + (x * BPP); | ||
569 | |||
570 | if (dlfb_render_hline(dev, &urb, | ||
571 | (char *) dev->info->fix.smem_start, | ||
572 | &cmd, byte_offset, width * BPP, | ||
573 | &bytes_identical, &bytes_sent)) | ||
574 | goto error; | ||
575 | } | ||
576 | |||
577 | if (cmd > (char *) urb->transfer_buffer) { | ||
578 | /* Send partial buffer remaining before exiting */ | ||
579 | int len = cmd - (char *) urb->transfer_buffer; | ||
580 | ret = dlfb_submit_urb(dev, urb, len); | ||
581 | bytes_sent += len; | ||
582 | } else | ||
583 | dlfb_urb_completion(urb); | ||
584 | |||
585 | error: | ||
586 | atomic_add(bytes_sent, &dev->bytes_sent); | ||
587 | atomic_add(bytes_identical, &dev->bytes_identical); | ||
588 | atomic_add(width*height*2, &dev->bytes_rendered); | ||
589 | end_cycles = get_cycles(); | ||
590 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | ||
591 | >> 10)), /* Kcycles */ | ||
592 | &dev->cpu_kcycles_used); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static ssize_t dlfb_ops_read(struct fb_info *info, char __user *buf, | ||
598 | size_t count, loff_t *ppos) | ||
599 | { | ||
600 | ssize_t result = -ENOSYS; | ||
601 | |||
602 | #if defined CONFIG_FB_SYS_FOPS || defined CONFIG_FB_SYS_FOPS_MODULE | ||
603 | result = fb_sys_read(info, buf, count, ppos); | ||
604 | #endif | ||
605 | |||
606 | return result; | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * Path triggered by usermode clients who write to filesystem | ||
611 | * e.g. cat filename > /dev/fb1 | ||
612 | * Not used by X Windows or text-mode console. But useful for testing. | ||
613 | * Slow because of extra copy and we must assume all pixels dirty. | ||
614 | */ | ||
615 | static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, | ||
616 | size_t count, loff_t *ppos) | ||
617 | { | ||
618 | ssize_t result = -ENOSYS; | ||
619 | struct dlfb_data *dev = info->par; | ||
620 | u32 offset = (u32) *ppos; | ||
621 | |||
622 | #if defined CONFIG_FB_SYS_FOPS || defined CONFIG_FB_SYS_FOPS_MODULE | ||
623 | |||
624 | result = fb_sys_write(info, buf, count, ppos); | ||
625 | |||
626 | if (result > 0) { | ||
627 | int start = max((int)(offset / info->fix.line_length) - 1, 0); | ||
628 | int lines = min((u32)((result / info->fix.line_length) + 1), | ||
629 | (u32)info->var.yres); | ||
630 | |||
631 | dlfb_handle_damage(dev, 0, start, info->var.xres, | ||
632 | lines, info->screen_base); | ||
633 | } | ||
634 | #endif | ||
635 | |||
636 | return result; | ||
637 | } | ||
638 | |||
639 | /* hardware has native COPY command (see libdlo), but not worth it for fbcon */ | ||
640 | static void dlfb_ops_copyarea(struct fb_info *info, | ||
641 | const struct fb_copyarea *area) | ||
642 | { | ||
643 | |||
644 | struct dlfb_data *dev = info->par; | ||
645 | |||
646 | #if defined CONFIG_FB_SYS_COPYAREA || defined CONFIG_FB_SYS_COPYAREA_MODULE | ||
647 | |||
648 | sys_copyarea(info, area); | ||
649 | |||
650 | dlfb_handle_damage(dev, area->dx, area->dy, | ||
651 | area->width, area->height, info->screen_base); | ||
652 | #endif | ||
653 | |||
654 | } | ||
655 | |||
656 | static void dlfb_ops_imageblit(struct fb_info *info, | ||
657 | const struct fb_image *image) | ||
658 | { | ||
659 | struct dlfb_data *dev = info->par; | ||
660 | |||
661 | #if defined CONFIG_FB_SYS_IMAGEBLIT || defined CONFIG_FB_SYS_IMAGEBLIT_MODULE | ||
662 | |||
663 | sys_imageblit(info, image); | ||
664 | |||
665 | dlfb_handle_damage(dev, image->dx, image->dy, | ||
666 | image->width, image->height, info->screen_base); | ||
667 | |||
668 | #endif | ||
669 | |||
670 | } | ||
671 | |||
672 | static void dlfb_ops_fillrect(struct fb_info *info, | ||
673 | const struct fb_fillrect *rect) | ||
674 | { | ||
675 | struct dlfb_data *dev = info->par; | ||
676 | |||
677 | #if defined CONFIG_FB_SYS_FILLRECT || defined CONFIG_FB_SYS_FILLRECT_MODULE | ||
678 | |||
679 | sys_fillrect(info, rect); | ||
680 | |||
681 | dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width, | ||
682 | rect->height, info->screen_base); | ||
683 | #endif | ||
684 | |||
685 | } | ||
686 | |||
687 | #ifdef CONFIG_FB_DEFERRED_IO | ||
688 | /* | ||
689 | * NOTE: fb_defio.c is holding info->fbdefio.mutex | ||
690 | * Touching ANY framebuffer memory that triggers a page fault | ||
691 | * in fb_defio will cause a deadlock, when it also tries to | ||
692 | * grab the same mutex. | ||
693 | */ | ||
694 | static void dlfb_dpy_deferred_io(struct fb_info *info, | ||
695 | struct list_head *pagelist) | ||
696 | { | ||
697 | struct page *cur; | ||
698 | struct fb_deferred_io *fbdefio = info->fbdefio; | ||
699 | struct dlfb_data *dev = info->par; | ||
700 | struct urb *urb; | ||
701 | char *cmd; | ||
702 | cycles_t start_cycles, end_cycles; | ||
703 | int bytes_sent = 0; | ||
704 | int bytes_identical = 0; | ||
705 | int bytes_rendered = 0; | ||
706 | |||
707 | if (!fb_defio) | ||
708 | return; | ||
709 | |||
710 | if (!atomic_read(&dev->usb_active)) | ||
711 | return; | ||
712 | |||
713 | start_cycles = get_cycles(); | ||
714 | |||
715 | urb = dlfb_get_urb(dev); | ||
716 | if (!urb) | ||
717 | return; | ||
718 | |||
719 | cmd = urb->transfer_buffer; | ||
720 | |||
721 | /* walk the written page list and render each to device */ | ||
722 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | ||
723 | |||
724 | if (dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start, | ||
725 | &cmd, cur->index << PAGE_SHIFT, | ||
726 | PAGE_SIZE, &bytes_identical, &bytes_sent)) | ||
727 | goto error; | ||
728 | bytes_rendered += PAGE_SIZE; | ||
729 | } | ||
730 | |||
731 | if (cmd > (char *) urb->transfer_buffer) { | ||
732 | /* Send partial buffer remaining before exiting */ | ||
733 | int len = cmd - (char *) urb->transfer_buffer; | ||
734 | dlfb_submit_urb(dev, urb, len); | ||
735 | bytes_sent += len; | ||
736 | } else | ||
737 | dlfb_urb_completion(urb); | ||
738 | |||
739 | error: | ||
740 | atomic_add(bytes_sent, &dev->bytes_sent); | ||
741 | atomic_add(bytes_identical, &dev->bytes_identical); | ||
742 | atomic_add(bytes_rendered, &dev->bytes_rendered); | ||
743 | end_cycles = get_cycles(); | ||
744 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | ||
745 | >> 10)), /* Kcycles */ | ||
746 | &dev->cpu_kcycles_used); | ||
747 | } | ||
748 | |||
749 | #endif | ||
750 | |||
751 | static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) | ||
752 | { | ||
753 | int i; | ||
754 | int ret; | ||
755 | char *rbuf; | ||
756 | |||
757 | rbuf = kmalloc(2, GFP_KERNEL); | ||
758 | if (!rbuf) | ||
759 | return 0; | ||
760 | |||
761 | for (i = 0; i < len; i++) { | ||
762 | ret = usb_control_msg(dev->udev, | ||
763 | usb_rcvctrlpipe(dev->udev, 0), (0x02), | ||
764 | (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, | ||
765 | HZ); | ||
766 | if (ret < 1) { | ||
767 | dl_err("Read EDID byte %d failed err %x\n", i, ret); | ||
768 | i--; | ||
769 | break; | ||
770 | } | ||
771 | edid[i] = rbuf[1]; | ||
772 | } | ||
773 | |||
774 | kfree(rbuf); | ||
775 | |||
776 | return i; | ||
777 | } | ||
778 | |||
779 | static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd, | ||
780 | unsigned long arg) | ||
781 | { | ||
782 | |||
783 | struct dlfb_data *dev = info->par; | ||
784 | struct dloarea *area = NULL; | ||
785 | |||
786 | if (!atomic_read(&dev->usb_active)) | ||
787 | return 0; | ||
788 | |||
789 | /* TODO: Update X server to get this from sysfs instead */ | ||
790 | if (cmd == DLFB_IOCTL_RETURN_EDID) { | ||
791 | char *edid = (char *)arg; | ||
792 | if (copy_to_user(edid, dev->edid, dev->edid_size)) | ||
793 | return -EFAULT; | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ | ||
798 | if (cmd == DLFB_IOCTL_REPORT_DAMAGE) { | ||
799 | |||
800 | /* | ||
801 | * If we have a damage-aware client, turn fb_defio "off" | ||
802 | * To avoid perf imact of unecessary page fault handling. | ||
803 | * Done by resetting the delay for this fb_info to a very | ||
804 | * long period. Pages will become writable and stay that way. | ||
805 | * Reset to normal value when all clients have closed this fb. | ||
806 | */ | ||
807 | if (info->fbdefio) | ||
808 | info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE; | ||
809 | |||
810 | area = (struct dloarea *)arg; | ||
811 | |||
812 | if (area->x < 0) | ||
813 | area->x = 0; | ||
814 | |||
815 | if (area->x > info->var.xres) | ||
816 | area->x = info->var.xres; | ||
817 | |||
818 | if (area->y < 0) | ||
819 | area->y = 0; | ||
820 | |||
821 | if (area->y > info->var.yres) | ||
822 | area->y = info->var.yres; | ||
823 | |||
824 | dlfb_handle_damage(dev, area->x, area->y, area->w, area->h, | ||
825 | info->screen_base); | ||
826 | } | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | /* taken from vesafb */ | ||
832 | static int | ||
833 | dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green, | ||
834 | unsigned blue, unsigned transp, struct fb_info *info) | ||
835 | { | ||
836 | int err = 0; | ||
837 | |||
838 | if (regno >= info->cmap.len) | ||
839 | return 1; | ||
840 | |||
841 | if (regno < 16) { | ||
842 | if (info->var.red.offset == 10) { | ||
843 | /* 1:5:5:5 */ | ||
844 | ((u32 *) (info->pseudo_palette))[regno] = | ||
845 | ((red & 0xf800) >> 1) | | ||
846 | ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); | ||
847 | } else { | ||
848 | /* 0:5:6:5 */ | ||
849 | ((u32 *) (info->pseudo_palette))[regno] = | ||
850 | ((red & 0xf800)) | | ||
851 | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); | ||
852 | } | ||
853 | } | ||
854 | |||
855 | return err; | ||
856 | } | ||
857 | |||
858 | /* | ||
859 | * It's common for several clients to have framebuffer open simultaneously. | ||
860 | * e.g. both fbcon and X. Makes things interesting. | ||
861 | * Assumes caller is holding info->lock (for open and release at least) | ||
862 | */ | ||
863 | static int dlfb_ops_open(struct fb_info *info, int user) | ||
864 | { | ||
865 | struct dlfb_data *dev = info->par; | ||
866 | |||
867 | /* | ||
868 | * fbcon aggressively connects to first framebuffer it finds, | ||
869 | * preventing other clients (X) from working properly. Usually | ||
870 | * not what the user wants. Fail by default with option to enable. | ||
871 | */ | ||
872 | if ((user == 0) & (!console)) | ||
873 | return -EBUSY; | ||
874 | |||
875 | /* If the USB device is gone, we don't accept new opens */ | ||
876 | if (dev->virtualized) | ||
877 | return -ENODEV; | ||
878 | |||
879 | dev->fb_count++; | ||
880 | |||
881 | kref_get(&dev->kref); | ||
882 | |||
883 | #ifdef CONFIG_FB_DEFERRED_IO | ||
884 | if (fb_defio && (info->fbdefio == NULL)) { | ||
885 | /* enable defio at last moment if not disabled by client */ | ||
886 | |||
887 | struct fb_deferred_io *fbdefio; | ||
888 | |||
889 | fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); | ||
890 | |||
891 | if (fbdefio) { | ||
892 | fbdefio->delay = DL_DEFIO_WRITE_DELAY; | ||
893 | fbdefio->deferred_io = dlfb_dpy_deferred_io; | ||
894 | } | ||
895 | |||
896 | info->fbdefio = fbdefio; | ||
897 | fb_deferred_io_init(info); | ||
898 | } | ||
899 | #endif | ||
900 | |||
901 | dl_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", | ||
902 | info->node, user, info, dev->fb_count); | ||
903 | |||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Called when all client interfaces to start transactions have been disabled, | ||
909 | * and all references to our device instance (dlfb_data) are released. | ||
910 | * Every transaction must have a reference, so we know are fully spun down | ||
911 | */ | ||
912 | static void dlfb_free(struct kref *kref) | ||
913 | { | ||
914 | struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref); | ||
915 | |||
916 | /* this function will wait for all in-flight urbs to complete */ | ||
917 | if (dev->urbs.count > 0) | ||
918 | dlfb_free_urb_list(dev); | ||
919 | |||
920 | if (dev->backing_buffer) | ||
921 | vfree(dev->backing_buffer); | ||
922 | |||
923 | kfree(dev->edid); | ||
924 | |||
925 | dl_warn("freeing dlfb_data %p\n", dev); | ||
926 | |||
927 | kfree(dev); | ||
928 | } | ||
929 | |||
930 | static void dlfb_release_urb_work(struct work_struct *work) | ||
931 | { | ||
932 | struct urb_node *unode = container_of(work, struct urb_node, | ||
933 | release_urb_work.work); | ||
934 | |||
935 | up(&unode->dev->urbs.limit_sem); | ||
936 | } | ||
937 | |||
938 | static void dlfb_free_framebuffer_work(struct work_struct *work) | ||
939 | { | ||
940 | struct dlfb_data *dev = container_of(work, struct dlfb_data, | ||
941 | free_framebuffer_work.work); | ||
942 | struct fb_info *info = dev->info; | ||
943 | int node = info->node; | ||
944 | |||
945 | unregister_framebuffer(info); | ||
946 | |||
947 | if (info->cmap.len != 0) | ||
948 | fb_dealloc_cmap(&info->cmap); | ||
949 | if (info->monspecs.modedb) | ||
950 | fb_destroy_modedb(info->monspecs.modedb); | ||
951 | if (info->screen_base) | ||
952 | vfree(info->screen_base); | ||
953 | |||
954 | fb_destroy_modelist(&info->modelist); | ||
955 | |||
956 | dev->info = 0; | ||
957 | |||
958 | /* Assume info structure is freed after this point */ | ||
959 | framebuffer_release(info); | ||
960 | |||
961 | dl_warn("fb_info for /dev/fb%d has been freed\n", node); | ||
962 | |||
963 | /* ref taken in probe() as part of registering framebfufer */ | ||
964 | kref_put(&dev->kref, dlfb_free); | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Assumes caller is holding info->lock mutex (for open and release at least) | ||
969 | */ | ||
970 | static int dlfb_ops_release(struct fb_info *info, int user) | ||
971 | { | ||
972 | struct dlfb_data *dev = info->par; | ||
973 | |||
974 | dev->fb_count--; | ||
975 | |||
976 | /* We can't free fb_info here - fbmem will touch it when we return */ | ||
977 | if (dev->virtualized && (dev->fb_count == 0)) | ||
978 | schedule_delayed_work(&dev->free_framebuffer_work, HZ); | ||
979 | |||
980 | #ifdef CONFIG_FB_DEFERRED_IO | ||
981 | if ((dev->fb_count == 0) && (info->fbdefio)) { | ||
982 | fb_deferred_io_cleanup(info); | ||
983 | kfree(info->fbdefio); | ||
984 | info->fbdefio = NULL; | ||
985 | info->fbops->fb_mmap = dlfb_ops_mmap; | ||
986 | } | ||
987 | #endif | ||
988 | |||
989 | dl_warn("released /dev/fb%d user=%d count=%d\n", | ||
990 | info->node, user, dev->fb_count); | ||
991 | |||
992 | kref_put(&dev->kref, dlfb_free); | ||
993 | |||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | /* | ||
998 | * Check whether a video mode is supported by the DisplayLink chip | ||
999 | * We start from monitor's modes, so don't need to filter that here | ||
1000 | */ | ||
1001 | static int dlfb_is_valid_mode(struct fb_videomode *mode, | ||
1002 | struct fb_info *info) | ||
1003 | { | ||
1004 | struct dlfb_data *dev = info->par; | ||
1005 | |||
1006 | if (mode->xres * mode->yres > dev->sku_pixel_limit) { | ||
1007 | dl_warn("%dx%d beyond chip capabilities\n", | ||
1008 | mode->xres, mode->yres); | ||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
1012 | dl_info("%dx%d valid mode\n", mode->xres, mode->yres); | ||
1013 | |||
1014 | return 1; | ||
1015 | } | ||
1016 | |||
1017 | static void dlfb_var_color_format(struct fb_var_screeninfo *var) | ||
1018 | { | ||
1019 | const struct fb_bitfield red = { 11, 5, 0 }; | ||
1020 | const struct fb_bitfield green = { 5, 6, 0 }; | ||
1021 | const struct fb_bitfield blue = { 0, 5, 0 }; | ||
1022 | |||
1023 | var->bits_per_pixel = 16; | ||
1024 | var->red = red; | ||
1025 | var->green = green; | ||
1026 | var->blue = blue; | ||
1027 | } | ||
1028 | |||
1029 | static int dlfb_ops_check_var(struct fb_var_screeninfo *var, | ||
1030 | struct fb_info *info) | ||
1031 | { | ||
1032 | struct fb_videomode mode; | ||
1033 | |||
1034 | /* TODO: support dynamically changing framebuffer size */ | ||
1035 | if ((var->xres * var->yres * 2) > info->fix.smem_len) | ||
1036 | return -EINVAL; | ||
1037 | |||
1038 | /* set device-specific elements of var unrelated to mode */ | ||
1039 | dlfb_var_color_format(var); | ||
1040 | |||
1041 | fb_var_to_videomode(&mode, var); | ||
1042 | |||
1043 | if (!dlfb_is_valid_mode(&mode, info)) | ||
1044 | return -EINVAL; | ||
1045 | |||
1046 | return 0; | ||
1047 | } | ||
1048 | |||
1049 | static int dlfb_ops_set_par(struct fb_info *info) | ||
1050 | { | ||
1051 | struct dlfb_data *dev = info->par; | ||
1052 | int result; | ||
1053 | u16 *pix_framebuffer; | ||
1054 | int i; | ||
1055 | |||
1056 | dl_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres); | ||
1057 | |||
1058 | result = dlfb_set_video_mode(dev, &info->var); | ||
1059 | |||
1060 | if ((result == 0) && (dev->fb_count == 0)) { | ||
1061 | |||
1062 | /* paint greenscreen */ | ||
1063 | |||
1064 | pix_framebuffer = (u16 *) info->screen_base; | ||
1065 | for (i = 0; i < info->fix.smem_len / 2; i++) | ||
1066 | pix_framebuffer[i] = 0x37e6; | ||
1067 | |||
1068 | dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres, | ||
1069 | info->screen_base); | ||
1070 | } | ||
1071 | |||
1072 | return result; | ||
1073 | } | ||
1074 | |||
1075 | /* | ||
1076 | * In order to come back from full DPMS off, we need to set the mode again | ||
1077 | */ | ||
1078 | static int dlfb_ops_blank(int blank_mode, struct fb_info *info) | ||
1079 | { | ||
1080 | struct dlfb_data *dev = info->par; | ||
1081 | |||
1082 | if (blank_mode != FB_BLANK_UNBLANK) { | ||
1083 | char *bufptr; | ||
1084 | struct urb *urb; | ||
1085 | |||
1086 | urb = dlfb_get_urb(dev); | ||
1087 | if (!urb) | ||
1088 | return 0; | ||
1089 | |||
1090 | bufptr = (char *) urb->transfer_buffer; | ||
1091 | bufptr = dlfb_vidreg_lock(bufptr); | ||
1092 | bufptr = dlfb_enable_hvsync(bufptr, false); | ||
1093 | bufptr = dlfb_vidreg_unlock(bufptr); | ||
1094 | |||
1095 | dlfb_submit_urb(dev, urb, bufptr - | ||
1096 | (char *) urb->transfer_buffer); | ||
1097 | } else { | ||
1098 | dlfb_set_video_mode(dev, &info->var); | ||
1099 | } | ||
1100 | |||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static struct fb_ops dlfb_ops = { | ||
1105 | .owner = THIS_MODULE, | ||
1106 | .fb_read = dlfb_ops_read, | ||
1107 | .fb_write = dlfb_ops_write, | ||
1108 | .fb_setcolreg = dlfb_ops_setcolreg, | ||
1109 | .fb_fillrect = dlfb_ops_fillrect, | ||
1110 | .fb_copyarea = dlfb_ops_copyarea, | ||
1111 | .fb_imageblit = dlfb_ops_imageblit, | ||
1112 | .fb_mmap = dlfb_ops_mmap, | ||
1113 | .fb_ioctl = dlfb_ops_ioctl, | ||
1114 | .fb_open = dlfb_ops_open, | ||
1115 | .fb_release = dlfb_ops_release, | ||
1116 | .fb_blank = dlfb_ops_blank, | ||
1117 | .fb_check_var = dlfb_ops_check_var, | ||
1118 | .fb_set_par = dlfb_ops_set_par, | ||
1119 | }; | ||
1120 | |||
1121 | |||
1122 | /* | ||
1123 | * Assumes &info->lock held by caller | ||
1124 | * Assumes no active clients have framebuffer open | ||
1125 | */ | ||
1126 | static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info) | ||
1127 | { | ||
1128 | int retval = -ENOMEM; | ||
1129 | int old_len = info->fix.smem_len; | ||
1130 | int new_len; | ||
1131 | unsigned char *old_fb = info->screen_base; | ||
1132 | unsigned char *new_fb; | ||
1133 | unsigned char *new_back; | ||
1134 | |||
1135 | dl_warn("Reallocating framebuffer. Addresses will change!\n"); | ||
1136 | |||
1137 | new_len = info->fix.line_length * info->var.yres; | ||
1138 | |||
1139 | if (PAGE_ALIGN(new_len) > old_len) { | ||
1140 | /* | ||
1141 | * Alloc system memory for virtual framebuffer | ||
1142 | */ | ||
1143 | new_fb = vmalloc(new_len); | ||
1144 | if (!new_fb) { | ||
1145 | dl_err("Virtual framebuffer alloc failed\n"); | ||
1146 | goto error; | ||
1147 | } | ||
1148 | |||
1149 | if (info->screen_base) { | ||
1150 | memcpy(new_fb, old_fb, old_len); | ||
1151 | vfree(info->screen_base); | ||
1152 | } | ||
1153 | |||
1154 | info->screen_base = new_fb; | ||
1155 | info->fix.smem_len = PAGE_ALIGN(new_len); | ||
1156 | info->fix.smem_start = (unsigned long) new_fb; | ||
1157 | info->flags = udlfb_info_flags; | ||
1158 | |||
1159 | /* | ||
1160 | * Second framebuffer copy to mirror the framebuffer state | ||
1161 | * on the physical USB device. We can function without this. | ||
1162 | * But with imperfect damage info we may send pixels over USB | ||
1163 | * that were, in fact, unchanged - wasting limited USB bandwidth | ||
1164 | */ | ||
1165 | new_back = vmalloc(new_len); | ||
1166 | if (!new_back) | ||
1167 | dl_info("No shadow/backing buffer allcoated\n"); | ||
1168 | else { | ||
1169 | if (dev->backing_buffer) | ||
1170 | vfree(dev->backing_buffer); | ||
1171 | dev->backing_buffer = new_back; | ||
1172 | memset(dev->backing_buffer, 0, new_len); | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | retval = 0; | ||
1177 | |||
1178 | error: | ||
1179 | return retval; | ||
1180 | } | ||
1181 | |||
1182 | /* | ||
1183 | * 1) Get EDID from hw, or use sw default | ||
1184 | * 2) Parse into various fb_info structs | ||
1185 | * 3) Allocate virtual framebuffer memory to back highest res mode | ||
1186 | * | ||
1187 | * Parses EDID into three places used by various parts of fbdev: | ||
1188 | * fb_var_screeninfo contains the timing of the monitor's preferred mode | ||
1189 | * fb_info.monspecs is full parsed EDID info, including monspecs.modedb | ||
1190 | * fb_info.modelist is a linked list of all monitor & VESA modes which work | ||
1191 | * | ||
1192 | * If EDID is not readable/valid, then modelist is all VESA modes, | ||
1193 | * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode | ||
1194 | * Returns 0 if successful | ||
1195 | */ | ||
1196 | static int dlfb_setup_modes(struct dlfb_data *dev, | ||
1197 | struct fb_info *info, | ||
1198 | char *default_edid, size_t default_edid_size) | ||
1199 | { | ||
1200 | int i; | ||
1201 | const struct fb_videomode *default_vmode = NULL; | ||
1202 | int result = 0; | ||
1203 | char *edid; | ||
1204 | int tries = 3; | ||
1205 | |||
1206 | if (info->dev) /* only use mutex if info has been registered */ | ||
1207 | mutex_lock(&info->lock); | ||
1208 | |||
1209 | edid = kmalloc(MAX_EDID_SIZE, GFP_KERNEL); | ||
1210 | if (!edid) { | ||
1211 | result = -ENOMEM; | ||
1212 | goto error; | ||
1213 | } | ||
1214 | |||
1215 | fb_destroy_modelist(&info->modelist); | ||
1216 | memset(&info->monspecs, 0, sizeof(info->monspecs)); | ||
1217 | |||
1218 | /* | ||
1219 | * Try to (re)read EDID from hardware first | ||
1220 | * EDID data may return, but not parse as valid | ||
1221 | * Try again a few times, in case of e.g. analog cable noise | ||
1222 | */ | ||
1223 | while (tries--) { | ||
1224 | |||
1225 | i = dlfb_get_edid(dev, edid, MAX_EDID_SIZE); | ||
1226 | |||
1227 | if (i >= MIN_EDID_SIZE) | ||
1228 | fb_edid_to_monspecs(edid, &info->monspecs); | ||
1229 | |||
1230 | if (info->monspecs.modedb_len > 0) { | ||
1231 | dev->edid = edid; | ||
1232 | dev->edid_size = i; | ||
1233 | break; | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1237 | /* If that fails, use a previously returned EDID if available */ | ||
1238 | if (info->monspecs.modedb_len == 0) { | ||
1239 | |||
1240 | dl_err("Unable to get valid EDID from device/display\n"); | ||
1241 | |||
1242 | if (dev->edid) { | ||
1243 | fb_edid_to_monspecs(dev->edid, &info->monspecs); | ||
1244 | if (info->monspecs.modedb_len > 0) | ||
1245 | dl_err("Using previously queried EDID\n"); | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | /* If that fails, use the default EDID we were handed */ | ||
1250 | if (info->monspecs.modedb_len == 0) { | ||
1251 | if (default_edid_size >= MIN_EDID_SIZE) { | ||
1252 | fb_edid_to_monspecs(default_edid, &info->monspecs); | ||
1253 | if (info->monspecs.modedb_len > 0) { | ||
1254 | memcpy(edid, default_edid, default_edid_size); | ||
1255 | dev->edid = edid; | ||
1256 | dev->edid_size = default_edid_size; | ||
1257 | dl_err("Using default/backup EDID\n"); | ||
1258 | } | ||
1259 | } | ||
1260 | } | ||
1261 | |||
1262 | /* If we've got modes, let's pick a best default mode */ | ||
1263 | if (info->monspecs.modedb_len > 0) { | ||
1264 | |||
1265 | for (i = 0; i < info->monspecs.modedb_len; i++) { | ||
1266 | if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info)) | ||
1267 | fb_add_videomode(&info->monspecs.modedb[i], | ||
1268 | &info->modelist); | ||
1269 | else /* if we've removed top/best mode */ | ||
1270 | info->monspecs.misc &= ~FB_MISC_1ST_DETAIL; | ||
1271 | } | ||
1272 | |||
1273 | default_vmode = fb_find_best_display(&info->monspecs, | ||
1274 | &info->modelist); | ||
1275 | } | ||
1276 | |||
1277 | /* If everything else has failed, fall back to safe default mode */ | ||
1278 | if (default_vmode == NULL) { | ||
1279 | |||
1280 | struct fb_videomode fb_vmode = {0}; | ||
1281 | |||
1282 | /* | ||
1283 | * Add the standard VESA modes to our modelist | ||
1284 | * Since we don't have EDID, there may be modes that | ||
1285 | * overspec monitor and/or are incorrect aspect ratio, etc. | ||
1286 | * But at least the user has a chance to choose | ||
1287 | */ | ||
1288 | for (i = 0; i < VESA_MODEDB_SIZE; i++) { | ||
1289 | if (dlfb_is_valid_mode((struct fb_videomode *) | ||
1290 | &vesa_modes[i], info)) | ||
1291 | fb_add_videomode(&vesa_modes[i], | ||
1292 | &info->modelist); | ||
1293 | } | ||
1294 | |||
1295 | /* | ||
1296 | * default to resolution safe for projectors | ||
1297 | * (since they are most common case without EDID) | ||
1298 | */ | ||
1299 | fb_vmode.xres = 800; | ||
1300 | fb_vmode.yres = 600; | ||
1301 | fb_vmode.refresh = 60; | ||
1302 | default_vmode = fb_find_nearest_mode(&fb_vmode, | ||
1303 | &info->modelist); | ||
1304 | } | ||
1305 | |||
1306 | /* If we have good mode and no active clients*/ | ||
1307 | if ((default_vmode != NULL) && (dev->fb_count == 0)) { | ||
1308 | |||
1309 | fb_videomode_to_var(&info->var, default_vmode); | ||
1310 | dlfb_var_color_format(&info->var); | ||
1311 | |||
1312 | /* | ||
1313 | * with mode size info, we can now alloc our framebuffer. | ||
1314 | */ | ||
1315 | memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix)); | ||
1316 | info->fix.line_length = info->var.xres * | ||
1317 | (info->var.bits_per_pixel / 8); | ||
1318 | |||
1319 | result = dlfb_realloc_framebuffer(dev, info); | ||
1320 | |||
1321 | } else | ||
1322 | result = -EINVAL; | ||
1323 | |||
1324 | error: | ||
1325 | if (edid && (dev->edid != edid)) | ||
1326 | kfree(edid); | ||
1327 | |||
1328 | if (info->dev) | ||
1329 | mutex_unlock(&info->lock); | ||
1330 | |||
1331 | return result; | ||
1332 | } | ||
1333 | |||
1334 | static ssize_t metrics_bytes_rendered_show(struct device *fbdev, | ||
1335 | struct device_attribute *a, char *buf) { | ||
1336 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1337 | struct dlfb_data *dev = fb_info->par; | ||
1338 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
1339 | atomic_read(&dev->bytes_rendered)); | ||
1340 | } | ||
1341 | |||
1342 | static ssize_t metrics_bytes_identical_show(struct device *fbdev, | ||
1343 | struct device_attribute *a, char *buf) { | ||
1344 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1345 | struct dlfb_data *dev = fb_info->par; | ||
1346 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
1347 | atomic_read(&dev->bytes_identical)); | ||
1348 | } | ||
1349 | |||
1350 | static ssize_t metrics_bytes_sent_show(struct device *fbdev, | ||
1351 | struct device_attribute *a, char *buf) { | ||
1352 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1353 | struct dlfb_data *dev = fb_info->par; | ||
1354 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
1355 | atomic_read(&dev->bytes_sent)); | ||
1356 | } | ||
1357 | |||
1358 | static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, | ||
1359 | struct device_attribute *a, char *buf) { | ||
1360 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1361 | struct dlfb_data *dev = fb_info->par; | ||
1362 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
1363 | atomic_read(&dev->cpu_kcycles_used)); | ||
1364 | } | ||
1365 | |||
1366 | static ssize_t edid_show( | ||
1367 | struct file *filp, | ||
1368 | struct kobject *kobj, struct bin_attribute *a, | ||
1369 | char *buf, loff_t off, size_t count) { | ||
1370 | struct device *fbdev = container_of(kobj, struct device, kobj); | ||
1371 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1372 | struct dlfb_data *dev = fb_info->par; | ||
1373 | |||
1374 | if (dev->edid == NULL) | ||
1375 | return 0; | ||
1376 | |||
1377 | if ((off >= dev->edid_size) || (count > dev->edid_size)) | ||
1378 | return 0; | ||
1379 | |||
1380 | if (off + count > dev->edid_size) | ||
1381 | count = dev->edid_size - off; | ||
1382 | |||
1383 | dl_info("sysfs edid copy %p to %p, %d bytes\n", | ||
1384 | dev->edid, buf, (int) count); | ||
1385 | |||
1386 | memcpy(buf, dev->edid, count); | ||
1387 | |||
1388 | return count; | ||
1389 | } | ||
1390 | |||
1391 | static ssize_t edid_store( | ||
1392 | struct file *filp, | ||
1393 | struct kobject *kobj, struct bin_attribute *a, | ||
1394 | char *src, loff_t src_off, size_t src_size) { | ||
1395 | struct device *fbdev = container_of(kobj, struct device, kobj); | ||
1396 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1397 | struct dlfb_data *dev = fb_info->par; | ||
1398 | |||
1399 | /* We only support write of entire EDID at once, no offset*/ | ||
1400 | if ((src_size < MIN_EDID_SIZE) || | ||
1401 | (src_size > MAX_EDID_SIZE) || | ||
1402 | (src_off != 0)) | ||
1403 | return 0; | ||
1404 | |||
1405 | dlfb_setup_modes(dev, fb_info, src, src_size); | ||
1406 | |||
1407 | if (dev->edid && (memcmp(src, dev->edid, src_size) == 0)) { | ||
1408 | dl_info("sysfs written EDID is new default\n"); | ||
1409 | dlfb_ops_set_par(fb_info); | ||
1410 | return src_size; | ||
1411 | } else | ||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | static ssize_t metrics_reset_store(struct device *fbdev, | ||
1416 | struct device_attribute *attr, | ||
1417 | const char *buf, size_t count) | ||
1418 | { | ||
1419 | struct fb_info *fb_info = dev_get_drvdata(fbdev); | ||
1420 | struct dlfb_data *dev = fb_info->par; | ||
1421 | |||
1422 | atomic_set(&dev->bytes_rendered, 0); | ||
1423 | atomic_set(&dev->bytes_identical, 0); | ||
1424 | atomic_set(&dev->bytes_sent, 0); | ||
1425 | atomic_set(&dev->cpu_kcycles_used, 0); | ||
1426 | |||
1427 | return count; | ||
1428 | } | ||
1429 | |||
1430 | static struct bin_attribute edid_attr = { | ||
1431 | .attr.name = "edid", | ||
1432 | .attr.mode = 0666, | ||
1433 | .size = MAX_EDID_SIZE, | ||
1434 | .read = edid_show, | ||
1435 | .write = edid_store | ||
1436 | }; | ||
1437 | |||
1438 | static struct device_attribute fb_device_attrs[] = { | ||
1439 | __ATTR_RO(metrics_bytes_rendered), | ||
1440 | __ATTR_RO(metrics_bytes_identical), | ||
1441 | __ATTR_RO(metrics_bytes_sent), | ||
1442 | __ATTR_RO(metrics_cpu_kcycles_used), | ||
1443 | __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store), | ||
1444 | }; | ||
1445 | |||
1446 | /* | ||
1447 | * This is necessary before we can communicate with the display controller. | ||
1448 | */ | ||
1449 | static int dlfb_select_std_channel(struct dlfb_data *dev) | ||
1450 | { | ||
1451 | int ret; | ||
1452 | u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, | ||
1453 | 0x1C, 0x88, 0x5E, 0x15, | ||
1454 | 0x60, 0xFE, 0xC6, 0x97, | ||
1455 | 0x16, 0x3D, 0x47, 0xF2 }; | ||
1456 | |||
1457 | ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
1458 | NR_USB_REQUEST_CHANNEL, | ||
1459 | (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, | ||
1460 | set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); | ||
1461 | return ret; | ||
1462 | } | ||
1463 | |||
1464 | static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev, | ||
1465 | struct usb_device *usbdev) | ||
1466 | { | ||
1467 | char *desc; | ||
1468 | char *buf; | ||
1469 | char *desc_end; | ||
1470 | |||
1471 | u8 total_len = 0; | ||
1472 | |||
1473 | buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); | ||
1474 | if (!buf) | ||
1475 | return false; | ||
1476 | desc = buf; | ||
1477 | |||
1478 | total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ | ||
1479 | 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); | ||
1480 | if (total_len > 5) { | ||
1481 | dl_info("vendor descriptor length:%x data:%02x %02x %02x %02x" \ | ||
1482 | "%02x %02x %02x %02x %02x %02x %02x\n", | ||
1483 | total_len, desc[0], | ||
1484 | desc[1], desc[2], desc[3], desc[4], desc[5], desc[6], | ||
1485 | desc[7], desc[8], desc[9], desc[10]); | ||
1486 | |||
1487 | if ((desc[0] != total_len) || /* descriptor length */ | ||
1488 | (desc[1] != 0x5f) || /* vendor descriptor type */ | ||
1489 | (desc[2] != 0x01) || /* version (2 bytes) */ | ||
1490 | (desc[3] != 0x00) || | ||
1491 | (desc[4] != total_len - 2)) /* length after type */ | ||
1492 | goto unrecognized; | ||
1493 | |||
1494 | desc_end = desc + total_len; | ||
1495 | desc += 5; /* the fixed header we've already parsed */ | ||
1496 | |||
1497 | while (desc < desc_end) { | ||
1498 | u8 length; | ||
1499 | u16 key; | ||
1500 | |||
1501 | key = *((u16 *) desc); | ||
1502 | desc += sizeof(u16); | ||
1503 | length = *desc; | ||
1504 | desc++; | ||
1505 | |||
1506 | switch (key) { | ||
1507 | case 0x0200: { /* max_area */ | ||
1508 | u32 max_area; | ||
1509 | max_area = le32_to_cpu(*((u32 *)desc)); | ||
1510 | dl_warn("DL chip limited to %d pixel modes\n", | ||
1511 | max_area); | ||
1512 | dev->sku_pixel_limit = max_area; | ||
1513 | break; | ||
1514 | } | ||
1515 | default: | ||
1516 | break; | ||
1517 | } | ||
1518 | desc += length; | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1522 | goto success; | ||
1523 | |||
1524 | unrecognized: | ||
1525 | /* allow udlfb to load for now even if firmware unrecognized */ | ||
1526 | dl_err("Unrecognized vendor firmware descriptor\n"); | ||
1527 | |||
1528 | success: | ||
1529 | kfree(buf); | ||
1530 | return true; | ||
1531 | } | ||
1532 | static int dlfb_usb_probe(struct usb_interface *interface, | ||
1533 | const struct usb_device_id *id) | ||
1534 | { | ||
1535 | struct usb_device *usbdev; | ||
1536 | struct dlfb_data *dev = 0; | ||
1537 | struct fb_info *info = 0; | ||
1538 | int retval = -ENOMEM; | ||
1539 | int i; | ||
1540 | |||
1541 | /* usb initialization */ | ||
1542 | |||
1543 | usbdev = interface_to_usbdev(interface); | ||
1544 | |||
1545 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
1546 | if (dev == NULL) { | ||
1547 | err("dlfb_usb_probe: failed alloc of dev struct\n"); | ||
1548 | goto error; | ||
1549 | } | ||
1550 | |||
1551 | /* we need to wait for both usb and fbdev to spin down on disconnect */ | ||
1552 | kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */ | ||
1553 | kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */ | ||
1554 | |||
1555 | dev->udev = usbdev; | ||
1556 | dev->gdev = &usbdev->dev; /* our generic struct device * */ | ||
1557 | usb_set_intfdata(interface, dev); | ||
1558 | |||
1559 | dl_info("%s %s - serial #%s\n", | ||
1560 | usbdev->manufacturer, usbdev->product, usbdev->serial); | ||
1561 | dl_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", | ||
1562 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | ||
1563 | usbdev->descriptor.bcdDevice, dev); | ||
1564 | dl_info("console enable=%d\n", console); | ||
1565 | dl_info("fb_defio enable=%d\n", fb_defio); | ||
1566 | |||
1567 | dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */ | ||
1568 | |||
1569 | if (!dlfb_parse_vendor_descriptor(dev, usbdev)) { | ||
1570 | dl_err("firmware not recognized. Assume incompatible device\n"); | ||
1571 | goto error; | ||
1572 | } | ||
1573 | |||
1574 | if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { | ||
1575 | retval = -ENOMEM; | ||
1576 | dl_err("dlfb_alloc_urb_list failed\n"); | ||
1577 | goto error; | ||
1578 | } | ||
1579 | |||
1580 | /* We don't register a new USB class. Our client interface is fbdev */ | ||
1581 | |||
1582 | /* allocates framebuffer driver structure, not framebuffer memory */ | ||
1583 | info = framebuffer_alloc(0, &usbdev->dev); | ||
1584 | if (!info) { | ||
1585 | retval = -ENOMEM; | ||
1586 | dl_err("framebuffer_alloc failed\n"); | ||
1587 | goto error; | ||
1588 | } | ||
1589 | |||
1590 | dev->info = info; | ||
1591 | info->par = dev; | ||
1592 | info->pseudo_palette = dev->pseudo_palette; | ||
1593 | info->fbops = &dlfb_ops; | ||
1594 | |||
1595 | retval = fb_alloc_cmap(&info->cmap, 256, 0); | ||
1596 | if (retval < 0) { | ||
1597 | dl_err("fb_alloc_cmap failed %x\n", retval); | ||
1598 | goto error; | ||
1599 | } | ||
1600 | |||
1601 | INIT_DELAYED_WORK(&dev->free_framebuffer_work, | ||
1602 | dlfb_free_framebuffer_work); | ||
1603 | |||
1604 | INIT_LIST_HEAD(&info->modelist); | ||
1605 | |||
1606 | retval = dlfb_setup_modes(dev, info, NULL, 0); | ||
1607 | if (retval != 0) { | ||
1608 | dl_err("unable to find common mode for display and adapter\n"); | ||
1609 | goto error; | ||
1610 | } | ||
1611 | |||
1612 | /* ready to begin using device */ | ||
1613 | |||
1614 | atomic_set(&dev->usb_active, 1); | ||
1615 | dlfb_select_std_channel(dev); | ||
1616 | |||
1617 | dlfb_ops_check_var(&info->var, info); | ||
1618 | dlfb_ops_set_par(info); | ||
1619 | |||
1620 | retval = register_framebuffer(info); | ||
1621 | if (retval < 0) { | ||
1622 | dl_err("register_framebuffer failed %d\n", retval); | ||
1623 | goto error; | ||
1624 | } | ||
1625 | |||
1626 | for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) | ||
1627 | device_create_file(info->dev, &fb_device_attrs[i]); | ||
1628 | |||
1629 | device_create_bin_file(info->dev, &edid_attr); | ||
1630 | |||
1631 | dl_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." | ||
1632 | " Using %dK framebuffer memory\n", info->node, | ||
1633 | info->var.xres, info->var.yres, | ||
1634 | ((dev->backing_buffer) ? | ||
1635 | info->fix.smem_len * 2 : info->fix.smem_len) >> 10); | ||
1636 | return 0; | ||
1637 | |||
1638 | error: | ||
1639 | if (dev) { | ||
1640 | |||
1641 | if (info) { | ||
1642 | if (info->cmap.len != 0) | ||
1643 | fb_dealloc_cmap(&info->cmap); | ||
1644 | if (info->monspecs.modedb) | ||
1645 | fb_destroy_modedb(info->monspecs.modedb); | ||
1646 | if (info->screen_base) | ||
1647 | vfree(info->screen_base); | ||
1648 | |||
1649 | fb_destroy_modelist(&info->modelist); | ||
1650 | |||
1651 | framebuffer_release(info); | ||
1652 | } | ||
1653 | |||
1654 | if (dev->backing_buffer) | ||
1655 | vfree(dev->backing_buffer); | ||
1656 | |||
1657 | kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */ | ||
1658 | kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */ | ||
1659 | |||
1660 | /* dev has been deallocated. Do not dereference */ | ||
1661 | } | ||
1662 | |||
1663 | return retval; | ||
1664 | } | ||
1665 | |||
1666 | static void dlfb_usb_disconnect(struct usb_interface *interface) | ||
1667 | { | ||
1668 | struct dlfb_data *dev; | ||
1669 | struct fb_info *info; | ||
1670 | int i; | ||
1671 | |||
1672 | dev = usb_get_intfdata(interface); | ||
1673 | info = dev->info; | ||
1674 | |||
1675 | dl_info("USB disconnect starting\n"); | ||
1676 | |||
1677 | /* we virtualize until all fb clients release. Then we free */ | ||
1678 | dev->virtualized = true; | ||
1679 | |||
1680 | /* When non-active we'll update virtual framebuffer, but no new urbs */ | ||
1681 | atomic_set(&dev->usb_active, 0); | ||
1682 | |||
1683 | /* remove udlfb's sysfs interfaces */ | ||
1684 | for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) | ||
1685 | device_remove_file(info->dev, &fb_device_attrs[i]); | ||
1686 | device_remove_bin_file(info->dev, &edid_attr); | ||
1687 | |||
1688 | usb_set_intfdata(interface, NULL); | ||
1689 | |||
1690 | /* if clients still have us open, will be freed on last close */ | ||
1691 | if (dev->fb_count == 0) | ||
1692 | schedule_delayed_work(&dev->free_framebuffer_work, 0); | ||
1693 | |||
1694 | /* release reference taken by kref_init in probe() */ | ||
1695 | kref_put(&dev->kref, dlfb_free); | ||
1696 | |||
1697 | /* consider dlfb_data freed */ | ||
1698 | |||
1699 | return; | ||
1700 | } | ||
1701 | |||
1702 | static struct usb_driver dlfb_driver = { | ||
1703 | .name = "udlfb", | ||
1704 | .probe = dlfb_usb_probe, | ||
1705 | .disconnect = dlfb_usb_disconnect, | ||
1706 | .id_table = id_table, | ||
1707 | }; | ||
1708 | |||
1709 | static int __init dlfb_module_init(void) | ||
1710 | { | ||
1711 | int res; | ||
1712 | |||
1713 | res = usb_register(&dlfb_driver); | ||
1714 | if (res) | ||
1715 | err("usb_register failed. Error number %d", res); | ||
1716 | |||
1717 | return res; | ||
1718 | } | ||
1719 | |||
1720 | static void __exit dlfb_module_exit(void) | ||
1721 | { | ||
1722 | usb_deregister(&dlfb_driver); | ||
1723 | } | ||
1724 | |||
1725 | module_init(dlfb_module_init); | ||
1726 | module_exit(dlfb_module_exit); | ||
1727 | |||
1728 | static void dlfb_urb_completion(struct urb *urb) | ||
1729 | { | ||
1730 | struct urb_node *unode = urb->context; | ||
1731 | struct dlfb_data *dev = unode->dev; | ||
1732 | unsigned long flags; | ||
1733 | |||
1734 | /* sync/async unlink faults aren't errors */ | ||
1735 | if (urb->status) { | ||
1736 | if (!(urb->status == -ENOENT || | ||
1737 | urb->status == -ECONNRESET || | ||
1738 | urb->status == -ESHUTDOWN)) { | ||
1739 | dl_err("%s - nonzero write bulk status received: %d\n", | ||
1740 | __func__, urb->status); | ||
1741 | atomic_set(&dev->lost_pixels, 1); | ||
1742 | } | ||
1743 | } | ||
1744 | |||
1745 | urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */ | ||
1746 | |||
1747 | spin_lock_irqsave(&dev->urbs.lock, flags); | ||
1748 | list_add_tail(&unode->entry, &dev->urbs.list); | ||
1749 | dev->urbs.available++; | ||
1750 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | ||
1751 | |||
1752 | /* | ||
1753 | * When using fb_defio, we deadlock if up() is called | ||
1754 | * while another is waiting. So queue to another process. | ||
1755 | */ | ||
1756 | if (fb_defio) | ||
1757 | schedule_delayed_work(&unode->release_urb_work, 0); | ||
1758 | else | ||
1759 | up(&dev->urbs.limit_sem); | ||
1760 | } | ||
1761 | |||
1762 | static void dlfb_free_urb_list(struct dlfb_data *dev) | ||
1763 | { | ||
1764 | int count = dev->urbs.count; | ||
1765 | struct list_head *node; | ||
1766 | struct urb_node *unode; | ||
1767 | struct urb *urb; | ||
1768 | int ret; | ||
1769 | unsigned long flags; | ||
1770 | |||
1771 | dl_notice("Waiting for completes and freeing all render urbs\n"); | ||
1772 | |||
1773 | /* keep waiting and freeing, until we've got 'em all */ | ||
1774 | while (count--) { | ||
1775 | |||
1776 | /* Getting interrupted means a leak, but ok at shutdown*/ | ||
1777 | ret = down_interruptible(&dev->urbs.limit_sem); | ||
1778 | if (ret) | ||
1779 | break; | ||
1780 | |||
1781 | spin_lock_irqsave(&dev->urbs.lock, flags); | ||
1782 | |||
1783 | node = dev->urbs.list.next; /* have reserved one with sem */ | ||
1784 | list_del_init(node); | ||
1785 | |||
1786 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | ||
1787 | |||
1788 | unode = list_entry(node, struct urb_node, entry); | ||
1789 | urb = unode->urb; | ||
1790 | |||
1791 | /* Free each separately allocated piece */ | ||
1792 | usb_free_coherent(urb->dev, dev->urbs.size, | ||
1793 | urb->transfer_buffer, urb->transfer_dma); | ||
1794 | usb_free_urb(urb); | ||
1795 | kfree(node); | ||
1796 | } | ||
1797 | |||
1798 | } | ||
1799 | |||
1800 | static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size) | ||
1801 | { | ||
1802 | int i = 0; | ||
1803 | struct urb *urb; | ||
1804 | struct urb_node *unode; | ||
1805 | char *buf; | ||
1806 | |||
1807 | spin_lock_init(&dev->urbs.lock); | ||
1808 | |||
1809 | dev->urbs.size = size; | ||
1810 | INIT_LIST_HEAD(&dev->urbs.list); | ||
1811 | |||
1812 | while (i < count) { | ||
1813 | unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); | ||
1814 | if (!unode) | ||
1815 | break; | ||
1816 | unode->dev = dev; | ||
1817 | |||
1818 | INIT_DELAYED_WORK(&unode->release_urb_work, | ||
1819 | dlfb_release_urb_work); | ||
1820 | |||
1821 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
1822 | if (!urb) { | ||
1823 | kfree(unode); | ||
1824 | break; | ||
1825 | } | ||
1826 | unode->urb = urb; | ||
1827 | |||
1828 | buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL, | ||
1829 | &urb->transfer_dma); | ||
1830 | if (!buf) { | ||
1831 | kfree(unode); | ||
1832 | usb_free_urb(urb); | ||
1833 | break; | ||
1834 | } | ||
1835 | |||
1836 | /* urb->transfer_buffer_length set to actual before submit */ | ||
1837 | usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1), | ||
1838 | buf, size, dlfb_urb_completion, unode); | ||
1839 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
1840 | |||
1841 | list_add_tail(&unode->entry, &dev->urbs.list); | ||
1842 | |||
1843 | i++; | ||
1844 | } | ||
1845 | |||
1846 | sema_init(&dev->urbs.limit_sem, i); | ||
1847 | dev->urbs.count = i; | ||
1848 | dev->urbs.available = i; | ||
1849 | |||
1850 | dl_notice("allocated %d %d byte urbs\n", i, (int) size); | ||
1851 | |||
1852 | return i; | ||
1853 | } | ||
1854 | |||
1855 | static struct urb *dlfb_get_urb(struct dlfb_data *dev) | ||
1856 | { | ||
1857 | int ret = 0; | ||
1858 | struct list_head *entry; | ||
1859 | struct urb_node *unode; | ||
1860 | struct urb *urb = NULL; | ||
1861 | unsigned long flags; | ||
1862 | |||
1863 | /* Wait for an in-flight buffer to complete and get re-queued */ | ||
1864 | ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT); | ||
1865 | if (ret) { | ||
1866 | atomic_set(&dev->lost_pixels, 1); | ||
1867 | dl_warn("wait for urb interrupted: %x available: %d\n", | ||
1868 | ret, dev->urbs.available); | ||
1869 | goto error; | ||
1870 | } | ||
1871 | |||
1872 | spin_lock_irqsave(&dev->urbs.lock, flags); | ||
1873 | |||
1874 | BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */ | ||
1875 | entry = dev->urbs.list.next; | ||
1876 | list_del_init(entry); | ||
1877 | dev->urbs.available--; | ||
1878 | |||
1879 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | ||
1880 | |||
1881 | unode = list_entry(entry, struct urb_node, entry); | ||
1882 | urb = unode->urb; | ||
1883 | |||
1884 | error: | ||
1885 | return urb; | ||
1886 | } | ||
1887 | |||
1888 | static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len) | ||
1889 | { | ||
1890 | int ret; | ||
1891 | |||
1892 | BUG_ON(len > dev->urbs.size); | ||
1893 | |||
1894 | urb->transfer_buffer_length = len; /* set to actual payload len */ | ||
1895 | ret = usb_submit_urb(urb, GFP_KERNEL); | ||
1896 | if (ret) { | ||
1897 | dlfb_urb_completion(urb); /* because no one else will */ | ||
1898 | atomic_set(&dev->lost_pixels, 1); | ||
1899 | dl_err("usb_submit_urb error %x\n", ret); | ||
1900 | } | ||
1901 | return ret; | ||
1902 | } | ||
1903 | |||
1904 | module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | ||
1905 | MODULE_PARM_DESC(console, "Allow fbcon to consume first framebuffer found"); | ||
1906 | |||
1907 | module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | ||
1908 | MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support. *Experimental*"); | ||
1909 | |||
1910 | MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, " | ||
1911 | "Jaya Kumar <jayakumar.lkml@gmail.com>, " | ||
1912 | "Bernie Thompson <bernie@plugable.com>"); | ||
1913 | MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver"); | ||
1914 | MODULE_LICENSE("GPL"); | ||
1915 | |||