aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/udl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/udl')
-rw-r--r--drivers/gpu/drm/udl/Kconfig12
-rw-r--r--drivers/gpu/drm/udl/Makefile6
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c141
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c99
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h141
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c80
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c611
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c227
-rw-r--r--drivers/gpu/drm/udl/udl_main.c338
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c414
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c253
11 files changed, 2322 insertions, 0 deletions
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
new file mode 100644
index 000000000000..0b5e096d39a6
--- /dev/null
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -0,0 +1,12 @@
1config DRM_UDL
2 tristate "DisplayLink"
3 depends on DRM && EXPERIMENTAL
4 select DRM_USB
5 select FB_SYS_FILLRECT
6 select FB_SYS_COPYAREA
7 select FB_SYS_IMAGEBLIT
8 select FB_DEFERRED_IO
9 select DRM_KMS_HELPER
10 help
11 This is a KMS driver for the USB displaylink video adapters.
12 Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
new file mode 100644
index 000000000000..05c7481bfd40
--- /dev/null
+++ b/drivers/gpu/drm/udl/Makefile
@@ -0,0 +1,6 @@
1
2ccflags-y := -Iinclude/drm
3
4udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
5
6obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
new file mode 100644
index 000000000000..ba055e9ca007
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 * based in parts on udlfb.c:
4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License v2. See the file COPYING in the main directory of this archive for
10 * more details.
11 */
12
13#include "drmP.h"
14#include "drm_crtc.h"
15#include "drm_edid.h"
16#include "drm_crtc_helper.h"
17#include "udl_drv.h"
18
19/* dummy connector to just get EDID,
20 all UDL appear to have a DVI-D */
21
22static u8 *udl_get_edid(struct udl_device *udl)
23{
24 u8 *block;
25 char rbuf[3];
26 int ret, i;
27
28 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
29 if (block == NULL)
30 return NULL;
31
32 for (i = 0; i < EDID_LENGTH; i++) {
33 ret = usb_control_msg(udl->ddev->usbdev,
34 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
35 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
36 HZ);
37 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 i--;
40 goto error;
41 }
42 block[i] = rbuf[1];
43 }
44
45 return block;
46
47error:
48 kfree(block);
49 return NULL;
50}
51
52static int udl_get_modes(struct drm_connector *connector)
53{
54 struct udl_device *udl = connector->dev->dev_private;
55 struct edid *edid;
56 int ret;
57
58 edid = (struct edid *)udl_get_edid(udl);
59
60 connector->display_info.raw_edid = (char *)edid;
61
62 drm_mode_connector_update_edid_property(connector, edid);
63 ret = drm_add_edid_modes(connector, edid);
64 connector->display_info.raw_edid = NULL;
65 kfree(edid);
66 return ret;
67}
68
69static int udl_mode_valid(struct drm_connector *connector,
70 struct drm_display_mode *mode)
71{
72 return 0;
73}
74
75static enum drm_connector_status
76udl_detect(struct drm_connector *connector, bool force)
77{
78 if (drm_device_is_unplugged(connector->dev))
79 return connector_status_disconnected;
80 return connector_status_connected;
81}
82
83struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
84{
85 int enc_id = connector->encoder_ids[0];
86 struct drm_mode_object *obj;
87 struct drm_encoder *encoder;
88
89 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
90 if (!obj)
91 return NULL;
92 encoder = obj_to_encoder(obj);
93 return encoder;
94}
95
96int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
97 uint64_t val)
98{
99 return 0;
100}
101
102static void udl_connector_destroy(struct drm_connector *connector)
103{
104 drm_sysfs_connector_remove(connector);
105 drm_connector_cleanup(connector);
106 kfree(connector);
107}
108
109struct drm_connector_helper_funcs udl_connector_helper_funcs = {
110 .get_modes = udl_get_modes,
111 .mode_valid = udl_mode_valid,
112 .best_encoder = udl_best_single_encoder,
113};
114
115struct drm_connector_funcs udl_connector_funcs = {
116 .dpms = drm_helper_connector_dpms,
117 .detect = udl_detect,
118 .fill_modes = drm_helper_probe_single_connector_modes,
119 .destroy = udl_connector_destroy,
120 .set_property = udl_connector_set_property,
121};
122
123int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
124{
125 struct drm_connector *connector;
126
127 connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
128 if (!connector)
129 return -ENOMEM;
130
131 drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
132 drm_connector_helper_add(connector, &udl_connector_helper_funcs);
133
134 drm_sysfs_connector_add(connector);
135 drm_mode_connector_attach_encoder(connector, encoder);
136
137 drm_connector_attach_property(connector,
138 dev->mode_config.dirty_info_property,
139 1);
140 return 0;
141}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
new file mode 100644
index 000000000000..5340c5f3987b
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
9#include <linux/module.h>
10#include "drm_usb.h"
11#include "drm_crtc_helper.h"
12#include "udl_drv.h"
13
14static struct drm_driver driver;
15
16static struct usb_device_id id_table[] = {
17 {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
18 {},
19};
20MODULE_DEVICE_TABLE(usb, id_table);
21
22MODULE_LICENSE("GPL");
23
24static int udl_usb_probe(struct usb_interface *interface,
25 const struct usb_device_id *id)
26{
27 return drm_get_usb_dev(interface, id, &driver);
28}
29
30static void udl_usb_disconnect(struct usb_interface *interface)
31{
32 struct drm_device *dev = usb_get_intfdata(interface);
33
34 drm_kms_helper_poll_disable(dev);
35 drm_connector_unplug_all(dev);
36 udl_fbdev_unplug(dev);
37 udl_drop_usb(dev);
38 drm_unplug_dev(dev);
39}
40
41static struct vm_operations_struct udl_gem_vm_ops = {
42 .fault = udl_gem_fault,
43 .open = drm_gem_vm_open,
44 .close = drm_gem_vm_close,
45};
46
47static const struct file_operations udl_driver_fops = {
48 .owner = THIS_MODULE,
49 .open = drm_open,
50 .mmap = drm_gem_mmap,
51 .poll = drm_poll,
52 .read = drm_read,
53 .unlocked_ioctl = drm_ioctl,
54 .release = drm_release,
55 .fasync = drm_fasync,
56 .llseek = noop_llseek,
57};
58
59static struct drm_driver driver = {
60 .driver_features = DRIVER_MODESET | DRIVER_GEM,
61 .load = udl_driver_load,
62 .unload = udl_driver_unload,
63
64 /* gem hooks */
65 .gem_init_object = udl_gem_init_object,
66 .gem_free_object = udl_gem_free_object,
67 .gem_vm_ops = &udl_gem_vm_ops,
68
69 .dumb_create = udl_dumb_create,
70 .dumb_map_offset = udl_gem_mmap,
71 .dumb_destroy = udl_dumb_destroy,
72 .fops = &udl_driver_fops,
73 .name = DRIVER_NAME,
74 .desc = DRIVER_DESC,
75 .date = DRIVER_DATE,
76 .major = DRIVER_MAJOR,
77 .minor = DRIVER_MINOR,
78 .patchlevel = DRIVER_PATCHLEVEL,
79};
80
81static struct usb_driver udl_driver = {
82 .name = "udl",
83 .probe = udl_usb_probe,
84 .disconnect = udl_usb_disconnect,
85 .id_table = id_table,
86};
87
88static int __init udl_init(void)
89{
90 return drm_usb_init(&driver, &udl_driver);
91}
92
93static void __exit udl_exit(void)
94{
95 drm_usb_exit(&driver, &udl_driver);
96}
97
98module_init(udl_init);
99module_exit(udl_exit);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
new file mode 100644
index 000000000000..1612954a5bc4
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13
14#ifndef UDL_DRV_H
15#define UDL_DRV_H
16
17#include <linux/usb.h>
18
19#define DRIVER_NAME "udl"
20#define DRIVER_DESC "DisplayLink"
21#define DRIVER_DATE "20120220"
22
23#define DRIVER_MAJOR 0
24#define DRIVER_MINOR 0
25#define DRIVER_PATCHLEVEL 1
26
27struct udl_device;
28
29struct urb_node {
30 struct list_head entry;
31 struct udl_device *dev;
32 struct delayed_work release_urb_work;
33 struct urb *urb;
34};
35
36struct urb_list {
37 struct list_head list;
38 spinlock_t lock;
39 struct semaphore limit_sem;
40 int available;
41 int count;
42 size_t size;
43};
44
45struct udl_fbdev;
46
47struct udl_device {
48 struct device *dev;
49 struct drm_device *ddev;
50
51 int sku_pixel_limit;
52
53 struct urb_list urbs;
54 atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
55
56 struct udl_fbdev *fbdev;
57 char mode_buf[1024];
58 uint32_t mode_buf_len;
59 atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
60 atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61 atomic_t bytes_sent; /* to usb, after compression including overhead */
62 atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63};
64
65struct udl_gem_object {
66 struct drm_gem_object base;
67 struct page **pages;
68 void *vmapping;
69};
70
71#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
72
73struct udl_framebuffer {
74 struct drm_framebuffer base;
75 struct udl_gem_object *obj;
76 bool active_16; /* active on the 16-bit channel */
77};
78
79#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
80
81/* modeset */
82int udl_modeset_init(struct drm_device *dev);
83void udl_modeset_cleanup(struct drm_device *dev);
84int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
85
86struct drm_encoder *udl_encoder_init(struct drm_device *dev);
87
88struct urb *udl_get_urb(struct drm_device *dev);
89
90int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
91void udl_urb_completion(struct urb *urb);
92
93int udl_driver_load(struct drm_device *dev, unsigned long flags);
94int udl_driver_unload(struct drm_device *dev);
95
96int udl_fbdev_init(struct drm_device *dev);
97void udl_fbdev_cleanup(struct drm_device *dev);
98void udl_fbdev_unplug(struct drm_device *dev);
99struct drm_framebuffer *
100udl_fb_user_fb_create(struct drm_device *dev,
101 struct drm_file *file,
102 struct drm_mode_fb_cmd2 *mode_cmd);
103
104int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
105 const char *front, char **urb_buf_ptr,
106 u32 byte_offset, u32 byte_width,
107 int *ident_ptr, int *sent_ptr);
108
109int udl_dumb_create(struct drm_file *file_priv,
110 struct drm_device *dev,
111 struct drm_mode_create_dumb *args);
112int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
113 uint32_t handle, uint64_t *offset);
114int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
115 uint32_t handle);
116
117int udl_gem_init_object(struct drm_gem_object *obj);
118void udl_gem_free_object(struct drm_gem_object *gem_obj);
119struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
120 size_t size);
121
122int udl_gem_vmap(struct udl_gem_object *obj);
123void udl_gem_vunmap(struct udl_gem_object *obj);
124int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
125
126int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
127 int width, int height);
128
129int udl_drop_usb(struct drm_device *dev);
130
131#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
132#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
133#define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */
134#define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */
135
136#define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */
137#define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */
138#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
139#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
140
141#endif
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
new file mode 100644
index 000000000000..56e75f0f1df5
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 * based in parts on udlfb.c:
4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License v2. See the file COPYING in the main directory of this archive for
10 * more details.
11 */
12
13#include "drmP.h"
14#include "drm_crtc.h"
15#include "drm_crtc_helper.h"
16#include "udl_drv.h"
17
18/* dummy encoder */
19void udl_enc_destroy(struct drm_encoder *encoder)
20{
21 drm_encoder_cleanup(encoder);
22 kfree(encoder);
23}
24
25static void udl_encoder_disable(struct drm_encoder *encoder)
26{
27}
28
29static bool udl_mode_fixup(struct drm_encoder *encoder,
30 struct drm_display_mode *mode,
31 struct drm_display_mode *adjusted_mode)
32{
33 return true;
34}
35
36static void udl_encoder_prepare(struct drm_encoder *encoder)
37{
38}
39
40static void udl_encoder_commit(struct drm_encoder *encoder)
41{
42}
43
44static void udl_encoder_mode_set(struct drm_encoder *encoder,
45 struct drm_display_mode *mode,
46 struct drm_display_mode *adjusted_mode)
47{
48}
49
50static void
51udl_encoder_dpms(struct drm_encoder *encoder, int mode)
52{
53}
54
55static const struct drm_encoder_helper_funcs udl_helper_funcs = {
56 .dpms = udl_encoder_dpms,
57 .mode_fixup = udl_mode_fixup,
58 .prepare = udl_encoder_prepare,
59 .mode_set = udl_encoder_mode_set,
60 .commit = udl_encoder_commit,
61 .disable = udl_encoder_disable,
62};
63
64static const struct drm_encoder_funcs udl_enc_funcs = {
65 .destroy = udl_enc_destroy,
66};
67
68struct drm_encoder *udl_encoder_init(struct drm_device *dev)
69{
70 struct drm_encoder *encoder;
71
72 encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
73 if (!encoder)
74 return NULL;
75
76 drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
77 drm_encoder_helper_add(encoder, &udl_helper_funcs);
78 encoder->possible_crtcs = 1;
79 return encoder;
80}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
new file mode 100644
index 000000000000..4d9c3a5d8a45
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -0,0 +1,611 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/fb.h>
16
17#include "drmP.h"
18#include "drm.h"
19#include "drm_crtc.h"
20#include "drm_crtc_helper.h"
21#include "udl_drv.h"
22
23#include "drm_fb_helper.h"
24
25#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
26
27static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
28static int fb_bpp = 16;
29
30module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
31module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
32
33struct udl_fbdev {
34 struct drm_fb_helper helper;
35 struct udl_framebuffer ufb;
36 struct list_head fbdev_list;
37 int fb_count;
38};
39
40#define DL_ALIGN_UP(x, a) ALIGN(x, a)
41#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
42
43/** Read the red component (0..255) of a 32 bpp colour. */
44#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
45
46/** Read the green component (0..255) of a 32 bpp colour. */
47#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
48
49/** Read the blue component (0..255) of a 32 bpp colour. */
50#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
51
52/** Return red/green component of a 16 bpp colour number. */
53#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
54
55/** Return green/blue component of a 16 bpp colour number. */
56#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
57
58/** Return 8 bpp colour number from red, green and blue components. */
59#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
60
61#if 0
62static uint8_t rgb8(uint32_t col)
63{
64 uint8_t red = DLO_RGB_GETRED(col);
65 uint8_t grn = DLO_RGB_GETGRN(col);
66 uint8_t blu = DLO_RGB_GETBLU(col);
67
68 return DLO_RGB8(red, grn, blu);
69}
70
71static uint16_t rgb16(uint32_t col)
72{
73 uint8_t red = DLO_RGB_GETRED(col);
74 uint8_t grn = DLO_RGB_GETGRN(col);
75 uint8_t blu = DLO_RGB_GETBLU(col);
76
77 return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
78}
79#endif
80
81/*
82 * NOTE: fb_defio.c is holding info->fbdefio.mutex
83 * Touching ANY framebuffer memory that triggers a page fault
84 * in fb_defio will cause a deadlock, when it also tries to
85 * grab the same mutex.
86 */
87static void udlfb_dpy_deferred_io(struct fb_info *info,
88 struct list_head *pagelist)
89{
90 struct page *cur;
91 struct fb_deferred_io *fbdefio = info->fbdefio;
92 struct udl_fbdev *ufbdev = info->par;
93 struct drm_device *dev = ufbdev->ufb.base.dev;
94 struct udl_device *udl = dev->dev_private;
95 struct urb *urb;
96 char *cmd;
97 cycles_t start_cycles, end_cycles;
98 int bytes_sent = 0;
99 int bytes_identical = 0;
100 int bytes_rendered = 0;
101
102 if (!fb_defio)
103 return;
104
105 start_cycles = get_cycles();
106
107 urb = udl_get_urb(dev);
108 if (!urb)
109 return;
110
111 cmd = urb->transfer_buffer;
112
113 /* walk the written page list and render each to device */
114 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
115
116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
117 &urb, (char *) info->fix.smem_start,
118 &cmd, cur->index << PAGE_SHIFT,
119 PAGE_SIZE, &bytes_identical, &bytes_sent))
120 goto error;
121 bytes_rendered += PAGE_SIZE;
122 }
123
124 if (cmd > (char *) urb->transfer_buffer) {
125 /* Send partial buffer remaining before exiting */
126 int len = cmd - (char *) urb->transfer_buffer;
127 udl_submit_urb(dev, urb, len);
128 bytes_sent += len;
129 } else
130 udl_urb_completion(urb);
131
132error:
133 atomic_add(bytes_sent, &udl->bytes_sent);
134 atomic_add(bytes_identical, &udl->bytes_identical);
135 atomic_add(bytes_rendered, &udl->bytes_rendered);
136 end_cycles = get_cycles();
137 atomic_add(((unsigned int) ((end_cycles - start_cycles)
138 >> 10)), /* Kcycles */
139 &udl->cpu_kcycles_used);
140}
141
142int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
143 int width, int height)
144{
145 struct drm_device *dev = fb->base.dev;
146 struct udl_device *udl = dev->dev_private;
147 int i, ret;
148 char *cmd;
149 cycles_t start_cycles, end_cycles;
150 int bytes_sent = 0;
151 int bytes_identical = 0;
152 struct urb *urb;
153 int aligned_x;
154 int bpp = (fb->base.bits_per_pixel / 8);
155
156 if (!fb->active_16)
157 return 0;
158
159 if (!fb->obj->vmapping)
160 udl_gem_vmap(fb->obj);
161
162 start_cycles = get_cycles();
163
164 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
165 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
166 x = aligned_x;
167
168 if ((width <= 0) ||
169 (x + width > fb->base.width) ||
170 (y + height > fb->base.height))
171 return -EINVAL;
172
173 urb = udl_get_urb(dev);
174 if (!urb)
175 return 0;
176 cmd = urb->transfer_buffer;
177
178 for (i = y; i < y + height ; i++) {
179 const int line_offset = fb->base.pitches[0] * i;
180 const int byte_offset = line_offset + (x * bpp);
181
182 if (udl_render_hline(dev, bpp, &urb,
183 (char *) fb->obj->vmapping,
184 &cmd, byte_offset, width * bpp,
185 &bytes_identical, &bytes_sent))
186 goto error;
187 }
188
189 if (cmd > (char *) urb->transfer_buffer) {
190 /* Send partial buffer remaining before exiting */
191 int len = cmd - (char *) urb->transfer_buffer;
192 ret = udl_submit_urb(dev, urb, len);
193 bytes_sent += len;
194 } else
195 udl_urb_completion(urb);
196
197error:
198 atomic_add(bytes_sent, &udl->bytes_sent);
199 atomic_add(bytes_identical, &udl->bytes_identical);
200 atomic_add(width*height*bpp, &udl->bytes_rendered);
201 end_cycles = get_cycles();
202 atomic_add(((unsigned int) ((end_cycles - start_cycles)
203 >> 10)), /* Kcycles */
204 &udl->cpu_kcycles_used);
205
206 return 0;
207}
208
209static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
210{
211 unsigned long start = vma->vm_start;
212 unsigned long size = vma->vm_end - vma->vm_start;
213 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
214 unsigned long page, pos;
215
216 if (offset + size > info->fix.smem_len)
217 return -EINVAL;
218
219 pos = (unsigned long)info->fix.smem_start + offset;
220
221 pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
222 pos, size);
223
224 while (size > 0) {
225 page = vmalloc_to_pfn((void *)pos);
226 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
227 return -EAGAIN;
228
229 start += PAGE_SIZE;
230 pos += PAGE_SIZE;
231 if (size > PAGE_SIZE)
232 size -= PAGE_SIZE;
233 else
234 size = 0;
235 }
236
237 vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
238 return 0;
239}
240
241static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
242{
243 struct udl_fbdev *ufbdev = info->par;
244
245 sys_fillrect(info, rect);
246
247 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
248 rect->height);
249}
250
251static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
252{
253 struct udl_fbdev *ufbdev = info->par;
254
255 sys_copyarea(info, region);
256
257 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
258 region->height);
259}
260
261static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
262{
263 struct udl_fbdev *ufbdev = info->par;
264
265 sys_imageblit(info, image);
266
267 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
268 image->height);
269}
270
271/*
272 * It's common for several clients to have framebuffer open simultaneously.
273 * e.g. both fbcon and X. Makes things interesting.
274 * Assumes caller is holding info->lock (for open and release at least)
275 */
276static int udl_fb_open(struct fb_info *info, int user)
277{
278 struct udl_fbdev *ufbdev = info->par;
279 struct drm_device *dev = ufbdev->ufb.base.dev;
280 struct udl_device *udl = dev->dev_private;
281
282 /* If the USB device is gone, we don't accept new opens */
283 if (drm_device_is_unplugged(udl->ddev))
284 return -ENODEV;
285
286 ufbdev->fb_count++;
287
288 if (fb_defio && (info->fbdefio == NULL)) {
289 /* enable defio at last moment if not disabled by client */
290
291 struct fb_deferred_io *fbdefio;
292
293 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
294
295 if (fbdefio) {
296 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
297 fbdefio->deferred_io = udlfb_dpy_deferred_io;
298 }
299
300 info->fbdefio = fbdefio;
301 fb_deferred_io_init(info);
302 }
303
304 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
305 info->node, user, info, ufbdev->fb_count);
306
307 return 0;
308}
309
310
311/*
312 * Assumes caller is holding info->lock mutex (for open and release at least)
313 */
314static int udl_fb_release(struct fb_info *info, int user)
315{
316 struct udl_fbdev *ufbdev = info->par;
317
318 ufbdev->fb_count--;
319
320 if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
321 fb_deferred_io_cleanup(info);
322 kfree(info->fbdefio);
323 info->fbdefio = NULL;
324 info->fbops->fb_mmap = udl_fb_mmap;
325 }
326
327 pr_warn("released /dev/fb%d user=%d count=%d\n",
328 info->node, user, ufbdev->fb_count);
329
330 return 0;
331}
332
333static struct fb_ops udlfb_ops = {
334 .owner = THIS_MODULE,
335 .fb_check_var = drm_fb_helper_check_var,
336 .fb_set_par = drm_fb_helper_set_par,
337 .fb_fillrect = udl_fb_fillrect,
338 .fb_copyarea = udl_fb_copyarea,
339 .fb_imageblit = udl_fb_imageblit,
340 .fb_pan_display = drm_fb_helper_pan_display,
341 .fb_blank = drm_fb_helper_blank,
342 .fb_setcmap = drm_fb_helper_setcmap,
343 .fb_debug_enter = drm_fb_helper_debug_enter,
344 .fb_debug_leave = drm_fb_helper_debug_leave,
345 .fb_mmap = udl_fb_mmap,
346 .fb_open = udl_fb_open,
347 .fb_release = udl_fb_release,
348};
349
350void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
351 u16 blue, int regno)
352{
353}
354
355void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
356 u16 *blue, int regno)
357{
358 *red = 0;
359 *green = 0;
360 *blue = 0;
361}
362
363static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
364 struct drm_file *file,
365 unsigned flags, unsigned color,
366 struct drm_clip_rect *clips,
367 unsigned num_clips)
368{
369 struct udl_framebuffer *ufb = to_udl_fb(fb);
370 int i;
371
372 if (!ufb->active_16)
373 return 0;
374
375 for (i = 0; i < num_clips; i++) {
376 udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
377 clips[i].x2 - clips[i].x1,
378 clips[i].y2 - clips[i].y1);
379 }
380 return 0;
381}
382
383static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
384{
385 struct udl_framebuffer *ufb = to_udl_fb(fb);
386
387 if (ufb->obj)
388 drm_gem_object_unreference_unlocked(&ufb->obj->base);
389
390 drm_framebuffer_cleanup(fb);
391 kfree(ufb);
392}
393
394static const struct drm_framebuffer_funcs udlfb_funcs = {
395 .destroy = udl_user_framebuffer_destroy,
396 .dirty = udl_user_framebuffer_dirty,
397 .create_handle = NULL,
398};
399
400
401static int
402udl_framebuffer_init(struct drm_device *dev,
403 struct udl_framebuffer *ufb,
404 struct drm_mode_fb_cmd2 *mode_cmd,
405 struct udl_gem_object *obj)
406{
407 int ret;
408
409 ufb->obj = obj;
410 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
411 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
412 return ret;
413}
414
415
416static int udlfb_create(struct udl_fbdev *ufbdev,
417 struct drm_fb_helper_surface_size *sizes)
418{
419 struct drm_device *dev = ufbdev->helper.dev;
420 struct fb_info *info;
421 struct device *device = &dev->usbdev->dev;
422 struct drm_framebuffer *fb;
423 struct drm_mode_fb_cmd2 mode_cmd;
424 struct udl_gem_object *obj;
425 uint32_t size;
426 int ret = 0;
427
428 if (sizes->surface_bpp == 24)
429 sizes->surface_bpp = 32;
430
431 mode_cmd.width = sizes->surface_width;
432 mode_cmd.height = sizes->surface_height;
433 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
434
435 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
436 sizes->surface_depth);
437
438 size = mode_cmd.pitches[0] * mode_cmd.height;
439 size = ALIGN(size, PAGE_SIZE);
440
441 obj = udl_gem_alloc_object(dev, size);
442 if (!obj)
443 goto out;
444
445 ret = udl_gem_vmap(obj);
446 if (ret) {
447 DRM_ERROR("failed to vmap fb\n");
448 goto out_gfree;
449 }
450
451 info = framebuffer_alloc(0, device);
452 if (!info) {
453 ret = -ENOMEM;
454 goto out_gfree;
455 }
456 info->par = ufbdev;
457
458 ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
459 if (ret)
460 goto out_gfree;
461
462 fb = &ufbdev->ufb.base;
463
464 ufbdev->helper.fb = fb;
465 ufbdev->helper.fbdev = info;
466
467 strcpy(info->fix.id, "udldrmfb");
468
469 info->screen_base = ufbdev->ufb.obj->vmapping;
470 info->fix.smem_len = size;
471 info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
472
473 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
474 info->fbops = &udlfb_ops;
475 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
476 drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
477
478 ret = fb_alloc_cmap(&info->cmap, 256, 0);
479 if (ret) {
480 ret = -ENOMEM;
481 goto out_gfree;
482 }
483
484
485 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
486 fb->width, fb->height,
487 ufbdev->ufb.obj->vmapping);
488
489 return ret;
490out_gfree:
491 drm_gem_object_unreference(&ufbdev->ufb.obj->base);
492out:
493 return ret;
494}
495
496static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
497 struct drm_fb_helper_surface_size *sizes)
498{
499 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
500 int new_fb = 0;
501 int ret;
502
503 if (!helper->fb) {
504 ret = udlfb_create(ufbdev, sizes);
505 if (ret)
506 return ret;
507
508 new_fb = 1;
509 }
510 return new_fb;
511}
512
513static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
514 .gamma_set = udl_crtc_fb_gamma_set,
515 .gamma_get = udl_crtc_fb_gamma_get,
516 .fb_probe = udl_fb_find_or_create_single,
517};
518
519static void udl_fbdev_destroy(struct drm_device *dev,
520 struct udl_fbdev *ufbdev)
521{
522 struct fb_info *info;
523 if (ufbdev->helper.fbdev) {
524 info = ufbdev->helper.fbdev;
525 unregister_framebuffer(info);
526 if (info->cmap.len)
527 fb_dealloc_cmap(&info->cmap);
528 framebuffer_release(info);
529 }
530 drm_fb_helper_fini(&ufbdev->helper);
531 drm_framebuffer_cleanup(&ufbdev->ufb.base);
532 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
533}
534
535int udl_fbdev_init(struct drm_device *dev)
536{
537 struct udl_device *udl = dev->dev_private;
538 int bpp_sel = fb_bpp;
539 struct udl_fbdev *ufbdev;
540 int ret;
541
542 ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
543 if (!ufbdev)
544 return -ENOMEM;
545
546 udl->fbdev = ufbdev;
547 ufbdev->helper.funcs = &udl_fb_helper_funcs;
548
549 ret = drm_fb_helper_init(dev, &ufbdev->helper,
550 1, 1);
551 if (ret) {
552 kfree(ufbdev);
553 return ret;
554
555 }
556
557 drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
558 drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
559 return 0;
560}
561
562void udl_fbdev_cleanup(struct drm_device *dev)
563{
564 struct udl_device *udl = dev->dev_private;
565 if (!udl->fbdev)
566 return;
567
568 udl_fbdev_destroy(dev, udl->fbdev);
569 kfree(udl->fbdev);
570 udl->fbdev = NULL;
571}
572
573void udl_fbdev_unplug(struct drm_device *dev)
574{
575 struct udl_device *udl = dev->dev_private;
576 struct udl_fbdev *ufbdev;
577 if (!udl->fbdev)
578 return;
579
580 ufbdev = udl->fbdev;
581 if (ufbdev->helper.fbdev) {
582 struct fb_info *info;
583 info = ufbdev->helper.fbdev;
584 unlink_framebuffer(info);
585 }
586}
587
588struct drm_framebuffer *
589udl_fb_user_fb_create(struct drm_device *dev,
590 struct drm_file *file,
591 struct drm_mode_fb_cmd2 *mode_cmd)
592{
593 struct drm_gem_object *obj;
594 struct udl_framebuffer *ufb;
595 int ret;
596
597 obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
598 if (obj == NULL)
599 return ERR_PTR(-ENOENT);
600
601 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
602 if (ufb == NULL)
603 return ERR_PTR(-ENOMEM);
604
605 ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
606 if (ret) {
607 kfree(ufb);
608 return ERR_PTR(-EINVAL);
609 }
610 return &ufb->base;
611}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
new file mode 100644
index 000000000000..852642dc1187
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
9#include "drmP.h"
10#include "udl_drv.h"
11#include <linux/shmem_fs.h>
12
13struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
14 size_t size)
15{
16 struct udl_gem_object *obj;
17
18 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
19 if (obj == NULL)
20 return NULL;
21
22 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
23 kfree(obj);
24 return NULL;
25 }
26
27 return obj;
28}
29
30static int
31udl_gem_create(struct drm_file *file,
32 struct drm_device *dev,
33 uint64_t size,
34 uint32_t *handle_p)
35{
36 struct udl_gem_object *obj;
37 int ret;
38 u32 handle;
39
40 size = roundup(size, PAGE_SIZE);
41
42 obj = udl_gem_alloc_object(dev, size);
43 if (obj == NULL)
44 return -ENOMEM;
45
46 ret = drm_gem_handle_create(file, &obj->base, &handle);
47 if (ret) {
48 drm_gem_object_release(&obj->base);
49 kfree(obj);
50 return ret;
51 }
52
53 drm_gem_object_unreference(&obj->base);
54 *handle_p = handle;
55 return 0;
56}
57
58int udl_dumb_create(struct drm_file *file,
59 struct drm_device *dev,
60 struct drm_mode_create_dumb *args)
61{
62 args->pitch = args->width * ((args->bpp + 1) / 8);
63 args->size = args->pitch * args->height;
64 return udl_gem_create(file, dev,
65 args->size, &args->handle);
66}
67
68int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
69 uint32_t handle)
70{
71 return drm_gem_handle_delete(file, handle);
72}
73
74int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75{
76 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
77 struct page *page;
78 unsigned int page_offset;
79 int ret = 0;
80
81 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
82 PAGE_SHIFT;
83
84 if (!obj->pages)
85 return VM_FAULT_SIGBUS;
86
87 page = obj->pages[page_offset];
88 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
89 switch (ret) {
90 case -EAGAIN:
91 set_need_resched();
92 case 0:
93 case -ERESTARTSYS:
94 return VM_FAULT_NOPAGE;
95 case -ENOMEM:
96 return VM_FAULT_OOM;
97 default:
98 return VM_FAULT_SIGBUS;
99 }
100}
101
102int udl_gem_init_object(struct drm_gem_object *obj)
103{
104 BUG();
105
106 return 0;
107}
108
109static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
110{
111 int page_count, i;
112 struct page *page;
113 struct inode *inode;
114 struct address_space *mapping;
115
116 if (obj->pages)
117 return 0;
118
119 page_count = obj->base.size / PAGE_SIZE;
120 BUG_ON(obj->pages != NULL);
121 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
122 if (obj->pages == NULL)
123 return -ENOMEM;
124
125 inode = obj->base.filp->f_path.dentry->d_inode;
126 mapping = inode->i_mapping;
127 gfpmask |= mapping_gfp_mask(mapping);
128
129 for (i = 0; i < page_count; i++) {
130 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
131 if (IS_ERR(page))
132 goto err_pages;
133 obj->pages[i] = page;
134 }
135
136 return 0;
137err_pages:
138 while (i--)
139 page_cache_release(obj->pages[i]);
140 drm_free_large(obj->pages);
141 obj->pages = NULL;
142 return PTR_ERR(page);
143}
144
145static void udl_gem_put_pages(struct udl_gem_object *obj)
146{
147 int page_count = obj->base.size / PAGE_SIZE;
148 int i;
149
150 for (i = 0; i < page_count; i++)
151 page_cache_release(obj->pages[i]);
152
153 drm_free_large(obj->pages);
154 obj->pages = NULL;
155}
156
157int udl_gem_vmap(struct udl_gem_object *obj)
158{
159 int page_count = obj->base.size / PAGE_SIZE;
160 int ret;
161
162 ret = udl_gem_get_pages(obj, GFP_KERNEL);
163 if (ret)
164 return ret;
165
166 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
167 if (!obj->vmapping)
168 return -ENOMEM;
169 return 0;
170}
171
172void udl_gem_vunmap(struct udl_gem_object *obj)
173{
174 if (obj->vmapping)
175 vunmap(obj->vmapping);
176
177 udl_gem_put_pages(obj);
178}
179
180void udl_gem_free_object(struct drm_gem_object *gem_obj)
181{
182 struct udl_gem_object *obj = to_udl_bo(gem_obj);
183
184 if (obj->vmapping)
185 udl_gem_vunmap(obj);
186
187 if (obj->pages)
188 udl_gem_put_pages(obj);
189
190 if (gem_obj->map_list.map)
191 drm_gem_free_mmap_offset(gem_obj);
192}
193
194/* the dumb interface doesn't work with the GEM straight MMAP
195 interface, it expects to do MMAP on the drm fd, like normal */
196int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
197 uint32_t handle, uint64_t *offset)
198{
199 struct udl_gem_object *gobj;
200 struct drm_gem_object *obj;
201 int ret = 0;
202
203 mutex_lock(&dev->struct_mutex);
204 obj = drm_gem_object_lookup(dev, file, handle);
205 if (obj == NULL) {
206 ret = -ENOENT;
207 goto unlock;
208 }
209 gobj = to_udl_bo(obj);
210
211 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
212 if (ret)
213 return ret;
214 if (!gobj->base.map_list.map) {
215 ret = drm_gem_create_mmap_offset(obj);
216 if (ret)
217 goto out;
218 }
219
220 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
221
222out:
223 drm_gem_object_unreference(&gobj->base);
224unlock:
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
new file mode 100644
index 000000000000..a8d5f09428c7
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13#include "drmP.h"
14#include "udl_drv.h"
15
16/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
17#define BULK_SIZE 512
18
19#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
20#define WRITES_IN_FLIGHT (4)
21#define MAX_VENDOR_DESCRIPTOR_SIZE 256
22
23#define GET_URB_TIMEOUT HZ
24#define FREE_URB_TIMEOUT (HZ*2)
25
26static int udl_parse_vendor_descriptor(struct drm_device *dev,
27 struct usb_device *usbdev)
28{
29 struct udl_device *udl = dev->dev_private;
30 char *desc;
31 char *buf;
32 char *desc_end;
33
34 u8 total_len = 0;
35
36 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
37 if (!buf)
38 return false;
39 desc = buf;
40
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
45 "%02x %02x %02x %02x %02x %02x %02x\n",
46 total_len, desc[0],
47 desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
48 desc[7], desc[8], desc[9], desc[10]);
49
50 if ((desc[0] != total_len) || /* descriptor length */
51 (desc[1] != 0x5f) || /* vendor descriptor type */
52 (desc[2] != 0x01) || /* version (2 bytes) */
53 (desc[3] != 0x00) ||
54 (desc[4] != total_len - 2)) /* length after type */
55 goto unrecognized;
56
57 desc_end = desc + total_len;
58 desc += 5; /* the fixed header we've already parsed */
59
60 while (desc < desc_end) {
61 u8 length;
62 u16 key;
63
64 key = *((u16 *) desc);
65 desc += sizeof(u16);
66 length = *desc;
67 desc++;
68
69 switch (key) {
70 case 0x0200: { /* max_area */
71 u32 max_area;
72 max_area = le32_to_cpu(*((u32 *)desc));
73 DRM_DEBUG("DL chip limited to %d pixel modes\n",
74 max_area);
75 udl->sku_pixel_limit = max_area;
76 break;
77 }
78 default:
79 break;
80 }
81 desc += length;
82 }
83 }
84
85 goto success;
86
87unrecognized:
88 /* allow udlfb to load for now even if firmware unrecognized */
89 DRM_ERROR("Unrecognized vendor firmware descriptor\n");
90
91success:
92 kfree(buf);
93 return true;
94}
95
96static void udl_release_urb_work(struct work_struct *work)
97{
98 struct urb_node *unode = container_of(work, struct urb_node,
99 release_urb_work.work);
100
101 up(&unode->dev->urbs.limit_sem);
102}
103
104void udl_urb_completion(struct urb *urb)
105{
106 struct urb_node *unode = urb->context;
107 struct udl_device *udl = unode->dev;
108 unsigned long flags;
109
110 /* sync/async unlink faults aren't errors */
111 if (urb->status) {
112 if (!(urb->status == -ENOENT ||
113 urb->status == -ECONNRESET ||
114 urb->status == -ESHUTDOWN)) {
115 DRM_ERROR("%s - nonzero write bulk status received: %d\n",
116 __func__, urb->status);
117 atomic_set(&udl->lost_pixels, 1);
118 }
119 }
120
121 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
122
123 spin_lock_irqsave(&udl->urbs.lock, flags);
124 list_add_tail(&unode->entry, &udl->urbs.list);
125 udl->urbs.available++;
126 spin_unlock_irqrestore(&udl->urbs.lock, flags);
127
128#if 0
129 /*
130 * When using fb_defio, we deadlock if up() is called
131 * while another is waiting. So queue to another process.
132 */
133 if (fb_defio)
134 schedule_delayed_work(&unode->release_urb_work, 0);
135 else
136#endif
137 up(&udl->urbs.limit_sem);
138}
139
140static void udl_free_urb_list(struct drm_device *dev)
141{
142 struct udl_device *udl = dev->dev_private;
143 int count = udl->urbs.count;
144 struct list_head *node;
145 struct urb_node *unode;
146 struct urb *urb;
147 int ret;
148 unsigned long flags;
149
150 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
151
152 /* keep waiting and freeing, until we've got 'em all */
153 while (count--) {
154
155 /* Getting interrupted means a leak, but ok at shutdown*/
156 ret = down_interruptible(&udl->urbs.limit_sem);
157 if (ret)
158 break;
159
160 spin_lock_irqsave(&udl->urbs.lock, flags);
161
162 node = udl->urbs.list.next; /* have reserved one with sem */
163 list_del_init(node);
164
165 spin_unlock_irqrestore(&udl->urbs.lock, flags);
166
167 unode = list_entry(node, struct urb_node, entry);
168 urb = unode->urb;
169
170 /* Free each separately allocated piece */
171 usb_free_coherent(urb->dev, udl->urbs.size,
172 urb->transfer_buffer, urb->transfer_dma);
173 usb_free_urb(urb);
174 kfree(node);
175 }
176 udl->urbs.count = 0;
177}
178
179static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
180{
181 struct udl_device *udl = dev->dev_private;
182 int i = 0;
183 struct urb *urb;
184 struct urb_node *unode;
185 char *buf;
186
187 spin_lock_init(&udl->urbs.lock);
188
189 udl->urbs.size = size;
190 INIT_LIST_HEAD(&udl->urbs.list);
191
192 while (i < count) {
193 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
194 if (!unode)
195 break;
196 unode->dev = udl;
197
198 INIT_DELAYED_WORK(&unode->release_urb_work,
199 udl_release_urb_work);
200
201 urb = usb_alloc_urb(0, GFP_KERNEL);
202 if (!urb) {
203 kfree(unode);
204 break;
205 }
206 unode->urb = urb;
207
208 buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
209 &urb->transfer_dma);
210 if (!buf) {
211 kfree(unode);
212 usb_free_urb(urb);
213 break;
214 }
215
216 /* urb->transfer_buffer_length set to actual before submit */
217 usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
218 buf, size, udl_urb_completion, unode);
219 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
220
221 list_add_tail(&unode->entry, &udl->urbs.list);
222
223 i++;
224 }
225
226 sema_init(&udl->urbs.limit_sem, i);
227 udl->urbs.count = i;
228 udl->urbs.available = i;
229
230 DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
231
232 return i;
233}
234
235struct urb *udl_get_urb(struct drm_device *dev)
236{
237 struct udl_device *udl = dev->dev_private;
238 int ret = 0;
239 struct list_head *entry;
240 struct urb_node *unode;
241 struct urb *urb = NULL;
242 unsigned long flags;
243
244 /* Wait for an in-flight buffer to complete and get re-queued */
245 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
246 if (ret) {
247 atomic_set(&udl->lost_pixels, 1);
248 DRM_INFO("wait for urb interrupted: %x available: %d\n",
249 ret, udl->urbs.available);
250 goto error;
251 }
252
253 spin_lock_irqsave(&udl->urbs.lock, flags);
254
255 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
256 entry = udl->urbs.list.next;
257 list_del_init(entry);
258 udl->urbs.available--;
259
260 spin_unlock_irqrestore(&udl->urbs.lock, flags);
261
262 unode = list_entry(entry, struct urb_node, entry);
263 urb = unode->urb;
264
265error:
266 return urb;
267}
268
269int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
270{
271 struct udl_device *udl = dev->dev_private;
272 int ret;
273
274 BUG_ON(len > udl->urbs.size);
275
276 urb->transfer_buffer_length = len; /* set to actual payload len */
277 ret = usb_submit_urb(urb, GFP_ATOMIC);
278 if (ret) {
279 udl_urb_completion(urb); /* because no one else will */
280 atomic_set(&udl->lost_pixels, 1);
281 DRM_ERROR("usb_submit_urb error %x\n", ret);
282 }
283 return ret;
284}
285
286int udl_driver_load(struct drm_device *dev, unsigned long flags)
287{
288 struct udl_device *udl;
289 int ret;
290
291 DRM_DEBUG("\n");
292 udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
293 if (!udl)
294 return -ENOMEM;
295
296 udl->ddev = dev;
297 dev->dev_private = udl;
298
299 if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
300 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
301 goto err;
302 }
303
304 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
305 ret = -ENOMEM;
306 DRM_ERROR("udl_alloc_urb_list failed\n");
307 goto err;
308 }
309
310 DRM_DEBUG("\n");
311 ret = udl_modeset_init(dev);
312
313 ret = udl_fbdev_init(dev);
314 return 0;
315err:
316 kfree(udl);
317 DRM_ERROR("%d\n", ret);
318 return ret;
319}
320
321int udl_drop_usb(struct drm_device *dev)
322{
323 udl_free_urb_list(dev);
324 return 0;
325}
326
327int udl_driver_unload(struct drm_device *dev)
328{
329 struct udl_device *udl = dev->dev_private;
330
331 if (udl->urbs.count)
332 udl_free_urb_list(dev);
333
334 udl_fbdev_cleanup(dev);
335 udl_modeset_cleanup(dev);
336 kfree(udl);
337 return 0;
338}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
new file mode 100644
index 000000000000..b3ecb3d12a1d
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -0,0 +1,414 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13
14#include "drmP.h"
15#include "drm_crtc.h"
16#include "drm_crtc_helper.h"
17#include "udl_drv.h"
18
19/*
20 * All DisplayLink bulk operations start with 0xAF, followed by specific code
21 * All operations are written to buffers which then later get sent to device
22 */
23static char *udl_set_register(char *buf, u8 reg, u8 val)
24{
25 *buf++ = 0xAF;
26 *buf++ = 0x20;
27 *buf++ = reg;
28 *buf++ = val;
29 return buf;
30}
31
32static char *udl_vidreg_lock(char *buf)
33{
34 return udl_set_register(buf, 0xFF, 0x00);
35}
36
37static char *udl_vidreg_unlock(char *buf)
38{
39 return udl_set_register(buf, 0xFF, 0xFF);
40}
41
42/*
43 * On/Off for driving the DisplayLink framebuffer to the display
44 * 0x00 H and V sync on
45 * 0x01 H and V sync off (screen blank but powered)
46 * 0x07 DPMS powerdown (requires modeset to come back)
47 */
48static char *udl_enable_hvsync(char *buf, bool enable)
49{
50 if (enable)
51 return udl_set_register(buf, 0x1F, 0x00);
52 else
53 return udl_set_register(buf, 0x1F, 0x07);
54}
55
56static char *udl_set_color_depth(char *buf, u8 selection)
57{
58 return udl_set_register(buf, 0x00, selection);
59}
60
61static char *udl_set_base16bpp(char *wrptr, u32 base)
62{
63 /* the base pointer is 16 bits wide, 0x20 is hi byte. */
64 wrptr = udl_set_register(wrptr, 0x20, base >> 16);
65 wrptr = udl_set_register(wrptr, 0x21, base >> 8);
66 return udl_set_register(wrptr, 0x22, base);
67}
68
69/*
70 * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
71 * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
72 */
73static char *udl_set_base8bpp(char *wrptr, u32 base)
74{
75 wrptr = udl_set_register(wrptr, 0x26, base >> 16);
76 wrptr = udl_set_register(wrptr, 0x27, base >> 8);
77 return udl_set_register(wrptr, 0x28, base);
78}
79
80static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
81{
82 wrptr = udl_set_register(wrptr, reg, value >> 8);
83 return udl_set_register(wrptr, reg+1, value);
84}
85
86/*
87 * This is kind of weird because the controller takes some
88 * register values in a different byte order than other registers.
89 */
90static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
91{
92 wrptr = udl_set_register(wrptr, reg, value);
93 return udl_set_register(wrptr, reg+1, value >> 8);
94}
95
96/*
97 * LFSR is linear feedback shift register. The reason we have this is
98 * because the display controller needs to minimize the clock depth of
99 * various counters used in the display path. So this code reverses the
100 * provided value into the lfsr16 value by counting backwards to get
101 * the value that needs to be set in the hardware comparator to get the
102 * same actual count. This makes sense once you read above a couple of
103 * times and think about it from a hardware perspective.
104 */
105static u16 udl_lfsr16(u16 actual_count)
106{
107 u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
108
109 while (actual_count--) {
110 lv = ((lv << 1) |
111 (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
112 & 0xFFFF;
113 }
114
115 return (u16) lv;
116}
117
118/*
119 * This does LFSR conversion on the value that is to be written.
120 * See LFSR explanation above for more detail.
121 */
122static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
123{
124 return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
125}
126
127/*
128 * This takes a standard fbdev screeninfo struct and all of its monitor mode
129 * details and converts them into the DisplayLink equivalent register commands.
130 ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1));
131 ERR(vreg_lfsr16(dev, 0x01, xDisplayStart));
132 ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd));
133 ERR(vreg_lfsr16(dev, 0x05, yDisplayStart));
134 ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd));
135 ERR(vreg_lfsr16(dev, 0x09, xEndCount));
136 ERR(vreg_lfsr16(dev, 0x0B, hSyncStart));
137 ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd));
138 ERR(vreg_big_endian(dev, 0x0F, hPixels));
139 ERR(vreg_lfsr16(dev, 0x11, yEndCount));
140 ERR(vreg_lfsr16(dev, 0x13, vSyncStart));
141 ERR(vreg_lfsr16(dev, 0x15, vSyncEnd));
142 ERR(vreg_big_endian(dev, 0x17, vPixels));
143 ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
144
145 ERR(vreg(dev, 0x1F, 0));
146
147 ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
148 */
149static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
150{
151 u16 xds, yds;
152 u16 xde, yde;
153 u16 yec;
154
155 /* x display start */
156 xds = mode->crtc_htotal - mode->crtc_hsync_start;
157 wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
158 /* x display end */
159 xde = xds + mode->crtc_hdisplay;
160 wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
161
162 /* y display start */
163 yds = mode->crtc_vtotal - mode->crtc_vsync_start;
164 wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
165 /* y display end */
166 yde = yds + mode->crtc_vdisplay;
167 wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
168
169 /* x end count is active + blanking - 1 */
170 wrptr = udl_set_register_lfsr16(wrptr, 0x09,
171 mode->crtc_htotal - 1);
172
173 /* libdlo hardcodes hsync start to 1 */
174 wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
175
176 /* hsync end is width of sync pulse + 1 */
177 wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
178 mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
179
180 /* hpixels is active pixels */
181 wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
182
183 /* yendcount is vertical active + vertical blanking */
184 yec = mode->crtc_vtotal;
185 wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
186
187 /* libdlo hardcodes vsync start to 0 */
188 wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
189
190 /* vsync end is width of vsync pulse */
191 wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
192
193 /* vpixels is active pixels */
194 wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
195
196 wrptr = udl_set_register_16be(wrptr, 0x1B,
197 mode->clock / 5);
198
199 return wrptr;
200}
201
202static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
203{
204 struct drm_device *dev = crtc->dev;
205 struct udl_device *udl = dev->dev_private;
206 struct urb *urb;
207 char *buf;
208 int retval;
209
210 urb = udl_get_urb(dev);
211 if (!urb)
212 return -ENOMEM;
213
214 buf = (char *)urb->transfer_buffer;
215
216 memcpy(buf, udl->mode_buf, udl->mode_buf_len);
217 retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
218 DRM_INFO("write mode info %d\n", udl->mode_buf_len);
219 return retval;
220}
221
222
223static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
224{
225 struct drm_device *dev = crtc->dev;
226 struct udl_device *udl = dev->dev_private;
227 int retval;
228
229 if (mode == DRM_MODE_DPMS_OFF) {
230 char *buf;
231 struct urb *urb;
232 urb = udl_get_urb(dev);
233 if (!urb)
234 return;
235
236 buf = (char *)urb->transfer_buffer;
237 buf = udl_vidreg_lock(buf);
238 buf = udl_enable_hvsync(buf, false);
239 buf = udl_vidreg_unlock(buf);
240
241 retval = udl_submit_urb(dev, urb, buf - (char *)
242 urb->transfer_buffer);
243 } else {
244 if (udl->mode_buf_len == 0) {
245 DRM_ERROR("Trying to enable DPMS with no mode\n");
246 return;
247 }
248 udl_crtc_write_mode_to_hw(crtc);
249 }
250
251}
252
253static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
254 struct drm_display_mode *mode,
255 struct drm_display_mode *adjusted_mode)
256
257{
258 return true;
259}
260
261#if 0
262static int
263udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
264 int x, int y, enum mode_set_atomic state)
265{
266 return 0;
267}
268
269static int
270udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
271 struct drm_framebuffer *old_fb)
272{
273 return 0;
274}
275#endif
276
277static int udl_crtc_mode_set(struct drm_crtc *crtc,
278 struct drm_display_mode *mode,
279 struct drm_display_mode *adjusted_mode,
280 int x, int y,
281 struct drm_framebuffer *old_fb)
282
283{
284 struct drm_device *dev = crtc->dev;
285 struct udl_framebuffer *ufb = to_udl_fb(crtc->fb);
286 struct udl_device *udl = dev->dev_private;
287 char *buf;
288 char *wrptr;
289 int color_depth = 0;
290
291 buf = (char *)udl->mode_buf;
292
293 /* for now we just clip 24 -> 16 - if we fix that fix this */
294 /*if (crtc->fb->bits_per_pixel != 16)
295 color_depth = 1; */
296
297 /* This first section has to do with setting the base address on the
298 * controller * associated with the display. There are 2 base
299 * pointers, currently, we only * use the 16 bpp segment.
300 */
301 wrptr = udl_vidreg_lock(buf);
302 wrptr = udl_set_color_depth(wrptr, color_depth);
303 /* set base for 16bpp segment to 0 */
304 wrptr = udl_set_base16bpp(wrptr, 0);
305 /* set base for 8bpp segment to end of fb */
306 wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
307
308 wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
309 wrptr = udl_enable_hvsync(wrptr, true);
310 wrptr = udl_vidreg_unlock(wrptr);
311
312 ufb->active_16 = true;
313 if (old_fb) {
314 struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
315 uold_fb->active_16 = false;
316 }
317 udl->mode_buf_len = wrptr - buf;
318
319 /* damage all of it */
320 udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
321 return 0;
322}
323
324
325static void udl_crtc_disable(struct drm_crtc *crtc)
326{
327
328
329}
330
331static void udl_crtc_destroy(struct drm_crtc *crtc)
332{
333 drm_crtc_cleanup(crtc);
334 kfree(crtc);
335}
336
337static void udl_load_lut(struct drm_crtc *crtc)
338{
339}
340
341static void udl_crtc_prepare(struct drm_crtc *crtc)
342{
343}
344
345static void udl_crtc_commit(struct drm_crtc *crtc)
346{
347 udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
348}
349
350static struct drm_crtc_helper_funcs udl_helper_funcs = {
351 .dpms = udl_crtc_dpms,
352 .mode_fixup = udl_crtc_mode_fixup,
353 .mode_set = udl_crtc_mode_set,
354 .prepare = udl_crtc_prepare,
355 .commit = udl_crtc_commit,
356 .disable = udl_crtc_disable,
357 .load_lut = udl_load_lut,
358};
359
360static const struct drm_crtc_funcs udl_crtc_funcs = {
361 .set_config = drm_crtc_helper_set_config,
362 .destroy = udl_crtc_destroy,
363};
364
365int udl_crtc_init(struct drm_device *dev)
366{
367 struct drm_crtc *crtc;
368
369 crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
370 if (crtc == NULL)
371 return -ENOMEM;
372
373 drm_crtc_init(dev, crtc, &udl_crtc_funcs);
374 drm_crtc_helper_add(crtc, &udl_helper_funcs);
375
376 return 0;
377}
378
379static const struct drm_mode_config_funcs udl_mode_funcs = {
380 .fb_create = udl_fb_user_fb_create,
381 .output_poll_changed = NULL,
382};
383
384int udl_modeset_init(struct drm_device *dev)
385{
386 struct drm_encoder *encoder;
387 drm_mode_config_init(dev);
388
389 dev->mode_config.min_width = 640;
390 dev->mode_config.min_height = 480;
391
392 dev->mode_config.max_width = 2048;
393 dev->mode_config.max_height = 2048;
394
395 dev->mode_config.prefer_shadow = 0;
396 dev->mode_config.preferred_depth = 24;
397
398 dev->mode_config.funcs = (void *)&udl_mode_funcs;
399
400 drm_mode_create_dirty_info_property(dev);
401
402 udl_crtc_init(dev);
403
404 encoder = udl_encoder_init(dev);
405
406 udl_connector_init(dev, encoder);
407
408 return 0;
409}
410
411void udl_modeset_cleanup(struct drm_device *dev)
412{
413 drm_mode_config_cleanup(dev);
414}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
new file mode 100644
index 000000000000..b9320e2608dd
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (C) 2012 Red Hat
3 * based in parts on udlfb.c:
4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License v2. See the file COPYING in the main directory of this archive for
10 * more details.
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/fb.h>
16#include <linux/prefetch.h>
17
18#include "drmP.h"
19#include "udl_drv.h"
20
21#define MAX_CMD_PIXELS 255
22
23#define RLX_HEADER_BYTES 7
24#define MIN_RLX_PIX_BYTES 4
25#define MIN_RLX_CMD_BYTES (RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
26
27#define RLE_HEADER_BYTES 6
28#define MIN_RLE_PIX_BYTES 3
29#define MIN_RLE_CMD_BYTES (RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
30
31#define RAW_HEADER_BYTES 6
32#define MIN_RAW_PIX_BYTES 2
33#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
34
35/*
36 * Trims identical data from front and back of line
37 * Sets new front buffer address and width
38 * And returns byte count of identical pixels
39 * Assumes CPU natural alignment (unsigned long)
40 * for back and front buffer ptrs and width
41 */
42#if 0
43static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
44{
45 int j, k;
46 const unsigned long *back = (const unsigned long *) bback;
47 const unsigned long *front = (const unsigned long *) *bfront;
48 const int width = *width_bytes / sizeof(unsigned long);
49 int identical = width;
50 int start = width;
51 int end = width;
52
53 prefetch((void *) front);
54 prefetch((void *) back);
55
56 for (j = 0; j < width; j++) {
57 if (back[j] != front[j]) {
58 start = j;
59 break;
60 }
61 }
62
63 for (k = width - 1; k > j; k--) {
64 if (back[k] != front[k]) {
65 end = k+1;
66 break;
67 }
68 }
69
70 identical = start + (width - end);
71 *bfront = (u8 *) &front[start];
72 *width_bytes = (end - start) * sizeof(unsigned long);
73
74 return identical * sizeof(unsigned long);
75}
76#endif
77
78static inline u16 pixel32_to_be16p(const uint8_t *pixel)
79{
80 uint32_t pix = *(uint32_t *)pixel;
81 u16 retval;
82
83 retval = (((pix >> 3) & 0x001f) |
84 ((pix >> 5) & 0x07e0) |
85 ((pix >> 8) & 0xf800));
86 return retval;
87}
88
89/*
90 * Render a command stream for an encoded horizontal line segment of pixels.
91 *
92 * A command buffer holds several commands.
93 * It always begins with a fresh command header
94 * (the protocol doesn't require this, but we enforce it to allow
95 * multiple buffers to be potentially encoded and sent in parallel).
96 * A single command encodes one contiguous horizontal line of pixels
97 *
98 * The function relies on the client to do all allocation, so that
99 * rendering can be done directly to output buffers (e.g. USB URBs).
100 * The function fills the supplied command buffer, providing information
101 * on where it left off, so the client may call in again with additional
102 * buffers if the line will take several buffers to complete.
103 *
104 * A single command can transmit a maximum of 256 pixels,
105 * regardless of the compression ratio (protocol design limit).
106 * To the hardware, 0 for a size byte means 256
107 *
108 * Rather than 256 pixel commands which are either rl or raw encoded,
109 * the rlx command simply assumes alternating raw and rl spans within one cmd.
110 * This has a slightly larger header overhead, but produces more even results.
111 * It also processes all data (read and write) in a single pass.
112 * Performance benchmarks of common cases show it having just slightly better
113 * compression than 256 pixel raw or rle commands, with similar CPU consumpion.
114 * But for very rl friendly data, will compress not quite as well.
115 */
116static void udl_compress_hline16(
117 const u8 **pixel_start_ptr,
118 const u8 *const pixel_end,
119 uint32_t *device_address_ptr,
120 uint8_t **command_buffer_ptr,
121 const uint8_t *const cmd_buffer_end, int bpp)
122{
123 const u8 *pixel = *pixel_start_ptr;
124 uint32_t dev_addr = *device_address_ptr;
125 uint8_t *cmd = *command_buffer_ptr;
126
127 while ((pixel_end > pixel) &&
128 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
129 uint8_t *raw_pixels_count_byte = 0;
130 uint8_t *cmd_pixels_count_byte = 0;
131 const u8 *raw_pixel_start = 0;
132 const u8 *cmd_pixel_start, *cmd_pixel_end = 0;
133
134 prefetchw((void *) cmd); /* pull in one cache line at least */
135
136 *cmd++ = 0xaf;
137 *cmd++ = 0x6b;
138 *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
139 *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
140 *cmd++ = (uint8_t) ((dev_addr) & 0xFF);
141
142 cmd_pixels_count_byte = cmd++; /* we'll know this later */
143 cmd_pixel_start = pixel;
144
145 raw_pixels_count_byte = cmd++; /* we'll know this later */
146 raw_pixel_start = pixel;
147
148 cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
149 min((int)(pixel_end - pixel) / bpp,
150 (int)(cmd_buffer_end - cmd) / 2))) * bpp;
151
152 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
153
154 while (pixel < cmd_pixel_end) {
155 const u8 * const repeating_pixel = pixel;
156
157 if (bpp == 2)
158 *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
159 else if (bpp == 4)
160 *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
161
162 cmd += 2;
163 pixel += bpp;
164
165 if (unlikely((pixel < cmd_pixel_end) &&
166 (!memcmp(pixel, repeating_pixel, bpp)))) {
167 /* go back and fill in raw pixel count */
168 *raw_pixels_count_byte = (((repeating_pixel -
169 raw_pixel_start) / bpp) + 1) & 0xFF;
170
171 while ((pixel < cmd_pixel_end)
172 && (!memcmp(pixel, repeating_pixel, bpp))) {
173 pixel += bpp;
174 }
175
176 /* immediately after raw data is repeat byte */
177 *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
178
179 /* Then start another raw pixel span */
180 raw_pixel_start = pixel;
181 raw_pixels_count_byte = cmd++;
182 }
183 }
184
185 if (pixel > raw_pixel_start) {
186 /* finalize last RAW span */
187 *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
188 }
189
190 *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
191 dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
192 }
193
194 if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
195 /* Fill leftover bytes with no-ops */
196 if (cmd_buffer_end > cmd)
197 memset(cmd, 0xAF, cmd_buffer_end - cmd);
198 cmd = (uint8_t *) cmd_buffer_end;
199 }
200
201 *command_buffer_ptr = cmd;
202 *pixel_start_ptr = pixel;
203 *device_address_ptr = dev_addr;
204
205 return;
206}
207
208/*
209 * There are 3 copies of every pixel: The front buffer that the fbdev
210 * client renders to, the actual framebuffer across the USB bus in hardware
211 * (that we can only write to, slowly, and can never read), and (optionally)
212 * our shadow copy that tracks what's been sent to that hardware buffer.
213 */
214int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
215 const char *front, char **urb_buf_ptr,
216 u32 byte_offset, u32 byte_width,
217 int *ident_ptr, int *sent_ptr)
218{
219 const u8 *line_start, *line_end, *next_pixel;
220 u32 base16 = 0 + (byte_offset / bpp) * 2;
221 struct urb *urb = *urb_ptr;
222 u8 *cmd = *urb_buf_ptr;
223 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
224
225 line_start = (u8 *) (front + byte_offset);
226 next_pixel = line_start;
227 line_end = next_pixel + byte_width;
228
229 while (next_pixel < line_end) {
230
231 udl_compress_hline16(&next_pixel,
232 line_end, &base16,
233 (u8 **) &cmd, (u8 *) cmd_end, bpp);
234
235 if (cmd >= cmd_end) {
236 int len = cmd - (u8 *) urb->transfer_buffer;
237 if (udl_submit_urb(dev, urb, len))
238 return 1; /* lost pixels is set */
239 *sent_ptr += len;
240 urb = udl_get_urb(dev);
241 if (!urb)
242 return 1; /* lost_pixels is set */
243 *urb_ptr = urb;
244 cmd = urb->transfer_buffer;
245 cmd_end = &cmd[urb->transfer_buffer_length];
246 }
247 }
248
249 *urb_buf_ptr = cmd;
250
251 return 0;
252}
253