aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c161
-rw-r--r--drivers/gpu/drm/drm_stub.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c15
10 files changed, 67 insertions, 155 deletions
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d33b8252b58..14796594e5d9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,10 +33,11 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
37 36
38#if __OS_HAS_AGP 37#if __OS_HAS_AGP
39 38
39#include <asm/agp.h>
40
40/** 41/**
41 * Get AGP information. 42 * Get AGP information.
42 * 43 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5b2cbb778162..bfce0992fefb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -194,7 +194,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
194 * @type: object type 194 * @type: object type
195 * 195 *
196 * LOCKING: 196 * LOCKING:
197 * Caller must hold DRM mode_config lock.
198 * 197 *
199 * Create a unique identifier based on @ptr in @dev's identifier space. Used 198 * Create a unique identifier based on @ptr in @dev's identifier space. Used
200 * for tracking modes, CRTCs and connectors. 199 * for tracking modes, CRTCs and connectors.
@@ -209,15 +208,15 @@ static int drm_mode_object_get(struct drm_device *dev,
209 int new_id = 0; 208 int new_id = 0;
210 int ret; 209 int ret;
211 210
212 WARN(!mutex_is_locked(&dev->mode_config.mutex),
213 "%s called w/o mode_config lock\n", __func__);
214again: 211again:
215 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 212 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
216 DRM_ERROR("Ran out memory getting a mode number\n"); 213 DRM_ERROR("Ran out memory getting a mode number\n");
217 return -EINVAL; 214 return -EINVAL;
218 } 215 }
219 216
217 mutex_lock(&dev->mode_config.idr_mutex);
220 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 218 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
219 mutex_unlock(&dev->mode_config.idr_mutex);
221 if (ret == -EAGAIN) 220 if (ret == -EAGAIN)
222 goto again; 221 goto again;
223 222
@@ -239,16 +238,20 @@ again:
239static void drm_mode_object_put(struct drm_device *dev, 238static void drm_mode_object_put(struct drm_device *dev,
240 struct drm_mode_object *object) 239 struct drm_mode_object *object)
241{ 240{
241 mutex_lock(&dev->mode_config.idr_mutex);
242 idr_remove(&dev->mode_config.crtc_idr, object->id); 242 idr_remove(&dev->mode_config.crtc_idr, object->id);
243 mutex_unlock(&dev->mode_config.idr_mutex);
243} 244}
244 245
245void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 246void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
246{ 247{
247 struct drm_mode_object *obj; 248 struct drm_mode_object *obj = NULL;
248 249
250 mutex_lock(&dev->mode_config.idr_mutex);
249 obj = idr_find(&dev->mode_config.crtc_idr, id); 251 obj = idr_find(&dev->mode_config.crtc_idr, id);
250 if (!obj || (obj->type != type) || (obj->id != id)) 252 if (!obj || (obj->type != type) || (obj->id != id))
251 return NULL; 253 obj = NULL;
254 mutex_unlock(&dev->mode_config.idr_mutex);
252 255
253 return obj; 256 return obj;
254} 257}
@@ -786,6 +789,7 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
786void drm_mode_config_init(struct drm_device *dev) 789void drm_mode_config_init(struct drm_device *dev)
787{ 790{
788 mutex_init(&dev->mode_config.mutex); 791 mutex_init(&dev->mode_config.mutex);
792 mutex_init(&dev->mode_config.idr_mutex);
789 INIT_LIST_HEAD(&dev->mode_config.fb_list); 793 INIT_LIST_HEAD(&dev->mode_config.fb_list);
790 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); 794 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
791 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 795 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 5ff88d952226..14c7a23dc157 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -294,6 +294,7 @@ EXPORT_SYMBOL(drm_init);
294 */ 294 */
295static void drm_cleanup(struct drm_device * dev) 295static void drm_cleanup(struct drm_device * dev)
296{ 296{
297 struct drm_map_list *r_list, *list_temp;
297 DRM_DEBUG("\n"); 298 DRM_DEBUG("\n");
298 299
299 if (!dev) { 300 if (!dev) {
@@ -325,6 +326,9 @@ static void drm_cleanup(struct drm_device * dev)
325 drm_ht_remove(&dev->map_hash); 326 drm_ht_remove(&dev->map_hash);
326 drm_ctxbitmap_cleanup(dev); 327 drm_ctxbitmap_cleanup(dev);
327 328
329 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
330 drm_rmmap(dev, r_list->map);
331
328 if (drm_core_check_feature(dev, DRIVER_MODESET)) 332 if (drm_core_check_feature(dev, DRIVER_MODESET))
329 drm_put_minor(&dev->control); 333 drm_put_minor(&dev->control);
330 334
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0fbb0da342cb..5a4d3244758a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -660,7 +660,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
660 660
661 edid = (struct edid *)drm_ddc_read(adapter); 661 edid = (struct edid *)drm_ddc_read(adapter);
662 if (!edid) { 662 if (!edid) {
663 dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", 663 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
664 drm_get_connector_name(connector)); 664 drm_get_connector_name(connector));
665 return NULL; 665 return NULL;
666 } 666 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 477caa1b1e4b..69aa0ab28403 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,8 +106,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
106 106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
108 DRM_MEM_DRIVER); 108 DRM_MEM_DRIVER);
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
110 DRM_MEM_DRIVER);
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 109 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER); 110 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 111 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
@@ -132,7 +130,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 130 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
133 (unsigned long)dev); 131 (unsigned long)dev);
134 spin_lock_init(&dev->vbl_lock); 132 spin_lock_init(&dev->vbl_lock);
135 atomic_set(&dev->vbl_signal_pending, 0);
136 dev->num_crtcs = num_crtcs; 133 dev->num_crtcs = num_crtcs;
137 134
138 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -140,11 +137,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
140 if (!dev->vbl_queue) 137 if (!dev->vbl_queue)
141 goto err; 138 goto err;
142 139
143 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
144 DRM_MEM_DRIVER);
145 if (!dev->vbl_sigs)
146 goto err;
147
148 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 140 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
149 DRM_MEM_DRIVER); 141 DRM_MEM_DRIVER);
150 if (!dev->_vblank_count) 142 if (!dev->_vblank_count)
@@ -177,7 +169,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
177 /* Zero per-crtc vblank stuff */ 169 /* Zero per-crtc vblank stuff */
178 for (i = 0; i < num_crtcs; i++) { 170 for (i = 0; i < num_crtcs; i++) {
179 init_waitqueue_head(&dev->vbl_queue[i]); 171 init_waitqueue_head(&dev->vbl_queue[i]);
180 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
181 atomic_set(&dev->_vblank_count[i], 0); 172 atomic_set(&dev->_vblank_count[i], 0);
182 atomic_set(&dev->vblank_refcount[i], 0); 173 atomic_set(&dev->vblank_refcount[i], 0);
183 } 174 }
@@ -540,15 +531,10 @@ out:
540 * \param data user argument, pointing to a drm_wait_vblank structure. 531 * \param data user argument, pointing to a drm_wait_vblank structure.
541 * \return zero on success or a negative number on failure. 532 * \return zero on success or a negative number on failure.
542 * 533 *
543 * Verifies the IRQ is installed. 534 * This function enables the vblank interrupt on the pipe requested, then
544 * 535 * sleeps waiting for the requested sequence number to occur, and drops
545 * If a signal is requested checks if this task has already scheduled the same signal 536 * the vblank interrupt refcount afterwards. (vblank irq disable follows that
546 * for the same vblank sequence number - nothing to be done in 537 * after a timeout with no further vblank waits scheduled).
547 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
548 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
549 * task.
550 *
551 * If a signal is not requested, then calls vblank_wait().
552 */ 538 */
553int drm_wait_vblank(struct drm_device *dev, void *data, 539int drm_wait_vblank(struct drm_device *dev, void *data,
554 struct drm_file *file_priv) 540 struct drm_file *file_priv)
@@ -560,6 +546,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
560 if ((!dev->pdev->irq) || (!dev->irq_enabled)) 546 if ((!dev->pdev->irq) || (!dev->irq_enabled))
561 return -EINVAL; 547 return -EINVAL;
562 548
549 if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
550 return -EINVAL;
551
563 if (vblwait->request.type & 552 if (vblwait->request.type &
564 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 553 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
565 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 554 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
@@ -597,89 +586,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
597 vblwait->request.sequence = seq + 1; 586 vblwait->request.sequence = seq + 1;
598 } 587 }
599 588
600 if (flags & _DRM_VBLANK_SIGNAL) { 589 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
601 unsigned long irqflags; 590 vblwait->request.sequence, crtc);
602 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; 591 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
603 struct drm_vbl_sig *vbl_sig; 592 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
604 593 (((drm_vblank_count(dev, crtc) -
605 spin_lock_irqsave(&dev->vbl_lock, irqflags); 594 vblwait->request.sequence) <= (1 << 23)) ||
606 595 !dev->irq_enabled));
607 /* Check if this task has already scheduled the same signal
608 * for the same vblank sequence number; nothing to be done in
609 * that case
610 */
611 list_for_each_entry(vbl_sig, vbl_sigs, head) {
612 if (vbl_sig->sequence == vblwait->request.sequence
613 && vbl_sig->info.si_signo ==
614 vblwait->request.signal
615 && vbl_sig->task == current) {
616 spin_unlock_irqrestore(&dev->vbl_lock,
617 irqflags);
618 vblwait->reply.sequence = seq;
619 goto done;
620 }
621 }
622
623 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
624 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
625 ret = -EBUSY;
626 goto done;
627 }
628
629 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
630
631 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
632 DRM_MEM_DRIVER);
633 if (!vbl_sig) {
634 ret = -ENOMEM;
635 goto done;
636 }
637
638 /* Get a refcount on the vblank, which will be released by
639 * drm_vbl_send_signals().
640 */
641 ret = drm_vblank_get(dev, crtc);
642 if (ret) {
643 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
644 DRM_MEM_DRIVER);
645 goto done;
646 }
647
648 atomic_inc(&dev->vbl_signal_pending);
649
650 vbl_sig->sequence = vblwait->request.sequence;
651 vbl_sig->info.si_signo = vblwait->request.signal;
652 vbl_sig->task = current;
653 596
654 spin_lock_irqsave(&dev->vbl_lock, irqflags); 597 if (ret != -EINTR) {
655 598 struct timeval now;
656 list_add_tail(&vbl_sig->head, vbl_sigs);
657 599
658 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 600 do_gettimeofday(&now);
659 601
660 vblwait->reply.sequence = seq; 602 vblwait->reply.tval_sec = now.tv_sec;
603 vblwait->reply.tval_usec = now.tv_usec;
604 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
605 DRM_DEBUG("returning %d to client\n",
606 vblwait->reply.sequence);
661 } else { 607 } else {
662 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 608 DRM_DEBUG("vblank wait interrupted by signal\n");
663 vblwait->request.sequence, crtc);
664 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
665 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
666 (((drm_vblank_count(dev, crtc) -
667 vblwait->request.sequence) <= (1 << 23)) ||
668 !dev->irq_enabled));
669
670 if (ret != -EINTR) {
671 struct timeval now;
672
673 do_gettimeofday(&now);
674
675 vblwait->reply.tval_sec = now.tv_sec;
676 vblwait->reply.tval_usec = now.tv_usec;
677 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
678 DRM_DEBUG("returning %d to client\n",
679 vblwait->reply.sequence);
680 } else {
681 DRM_DEBUG("vblank wait interrupted by signal\n");
682 }
683 } 609 }
684 610
685done: 611done:
@@ -688,46 +614,6 @@ done:
688} 614}
689 615
690/** 616/**
691 * Send the VBLANK signals.
692 *
693 * \param dev DRM device.
694 * \param crtc CRTC where the vblank event occurred
695 *
696 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
697 *
698 * If a signal is not requested, then calls vblank_wait().
699 */
700static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
701{
702 struct drm_vbl_sig *vbl_sig, *tmp;
703 struct list_head *vbl_sigs;
704 unsigned int vbl_seq;
705 unsigned long flags;
706
707 spin_lock_irqsave(&dev->vbl_lock, flags);
708
709 vbl_sigs = &dev->vbl_sigs[crtc];
710 vbl_seq = drm_vblank_count(dev, crtc);
711
712 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
713 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
714 vbl_sig->info.si_code = vbl_seq;
715 send_sig_info(vbl_sig->info.si_signo,
716 &vbl_sig->info, vbl_sig->task);
717
718 list_del(&vbl_sig->head);
719
720 drm_free(vbl_sig, sizeof(*vbl_sig),
721 DRM_MEM_DRIVER);
722 atomic_dec(&dev->vbl_signal_pending);
723 drm_vblank_put(dev, crtc);
724 }
725 }
726
727 spin_unlock_irqrestore(&dev->vbl_lock, flags);
728}
729
730/**
731 * drm_handle_vblank - handle a vblank event 617 * drm_handle_vblank - handle a vblank event
732 * @dev: DRM device 618 * @dev: DRM device
733 * @crtc: where this event occurred 619 * @crtc: where this event occurred
@@ -739,6 +625,5 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
739{ 625{
740 atomic_inc(&dev->_vblank_count[crtc]); 626 atomic_inc(&dev->_vblank_count[crtc]);
741 DRM_WAKEUP(&dev->vbl_queue[crtc]); 627 DRM_WAKEUP(&dev->vbl_queue[crtc]);
742 drm_vbl_send_signals(dev, crtc);
743} 628}
744EXPORT_SYMBOL(drm_handle_vblank); 629EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 5ca132afa4f2..46bb923b097c 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -118,12 +118,20 @@ static void drm_master_destroy(struct kref *kref)
118 struct drm_master *master = container_of(kref, struct drm_master, refcount); 118 struct drm_master *master = container_of(kref, struct drm_master, refcount);
119 struct drm_magic_entry *pt, *next; 119 struct drm_magic_entry *pt, *next;
120 struct drm_device *dev = master->minor->dev; 120 struct drm_device *dev = master->minor->dev;
121 struct drm_map_list *r_list, *list_temp;
121 122
122 list_del(&master->head); 123 list_del(&master->head);
123 124
124 if (dev->driver->master_destroy) 125 if (dev->driver->master_destroy)
125 dev->driver->master_destroy(dev, master); 126 dev->driver->master_destroy(dev, master);
126 127
128 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
129 if (r_list->master == master) {
130 drm_rmmap_locked(dev, r_list->map);
131 r_list = NULL;
132 }
133 }
134
127 if (master->unique) { 135 if (master->unique) {
128 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 136 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
129 master->unique = NULL; 137 master->unique = NULL;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbadf1c04142..ee64b7301f67 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -944,13 +944,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
945 0xff000000; 945 0xff000000;
946 946
947 DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); 947 if (IS_MOBILE(dev) || IS_I9XX(dev))
948
949 if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
950 dev_priv->cursor_needs_physical = true; 948 dev_priv->cursor_needs_physical = true;
951 else 949 else
952 dev_priv->cursor_needs_physical = false; 950 dev_priv->cursor_needs_physical = false;
953 951
952 if (IS_I965G(dev) || IS_G33(dev))
953 dev_priv->cursor_needs_physical = false;
954
954 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 955 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
955 if (ret) 956 if (ret)
956 goto kfree_devname; 957 goto kfree_devname;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 96316fd47233..debad5c04cc0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3364,7 +3364,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
3364{ 3364{
3365 int i; 3365 int i;
3366 3366
3367 for (i = 0; i < I915_MAX_PHYS_OBJECT; i++) 3367 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3368 i915_gem_free_phys_object(dev, i); 3368 i915_gem_free_phys_object(dev, i);
3369} 3369}
3370 3370
@@ -3427,7 +3427,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3427 ret = i915_gem_init_phys_object(dev, id, 3427 ret = i915_gem_init_phys_object(dev, id,
3428 obj->size); 3428 obj->size);
3429 if (ret) { 3429 if (ret) {
3430 DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); 3430 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3431 goto out; 3431 goto out;
3432 } 3432 }
3433 } 3433 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index a5a2f5339e9e..5ee9d4c25753 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -137,10 +137,6 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
137 chan->reg = reg; 137 chan->reg = reg;
138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); 138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140#ifndef I2C_HW_B_INTELFB
141#define I2C_HW_B_INTELFB I2C_HW_B_I810
142#endif
143 chan->adapter.id = I2C_HW_B_INTELFB;
144 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
145 chan->adapter.dev.parent = &dev->pdev->dev; 141 chan->adapter.dev.parent = &dev->pdev->dev;
146 chan->algo.setsda = set_data; 142 chan->algo.setsda = set_data;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2fafdcc108fe..b36a5214d8df 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -311,7 +311,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
311 if (dev_priv->panel_fixed_mode != NULL) { 311 if (dev_priv->panel_fixed_mode != NULL) {
312 struct drm_display_mode *mode; 312 struct drm_display_mode *mode;
313 313
314 mutex_unlock(&dev->mode_config.mutex); 314 mutex_lock(&dev->mode_config.mutex);
315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
316 drm_mode_probed_add(connector, mode); 316 drm_mode_probed_add(connector, mode);
317 mutex_unlock(&dev->mode_config.mutex); 317 mutex_unlock(&dev->mode_config.mutex);
@@ -340,6 +340,18 @@ static void intel_lvds_destroy(struct drm_connector *connector)
340 kfree(connector); 340 kfree(connector);
341} 341}
342 342
343static int intel_lvds_set_property(struct drm_connector *connector,
344 struct drm_property *property,
345 uint64_t value)
346{
347 struct drm_device *dev = connector->dev;
348
349 if (property == dev->mode_config.dpms_property && connector->encoder)
350 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
351
352 return 0;
353}
354
343static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 355static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
344 .dpms = intel_lvds_dpms, 356 .dpms = intel_lvds_dpms,
345 .mode_fixup = intel_lvds_mode_fixup, 357 .mode_fixup = intel_lvds_mode_fixup,
@@ -359,6 +371,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
359 .restore = intel_lvds_restore, 371 .restore = intel_lvds_restore,
360 .detect = intel_lvds_detect, 372 .detect = intel_lvds_detect,
361 .fill_modes = drm_helper_probe_single_connector_modes, 373 .fill_modes = drm_helper_probe_single_connector_modes,
374 .set_property = intel_lvds_set_property,
362 .destroy = intel_lvds_destroy, 375 .destroy = intel_lvds_destroy,
363}; 376};
364 377