aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-07-07 14:43:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-07-07 14:43:28 -0400
commit78178c7d6e127fff6dba027315fd6914304b05cf (patch)
tree9b207197911feb57753bd9f1a9371851c8ca0991 /drivers
parent140236b4b1c749c9b795ea3d11558a0eb5a3a080 (diff)
parent023eb571a1d0eae738326042dcffa974257eb8c8 (diff)
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm: correctly update connector DPMS status in drm_fb_helper drm/radeon/kms: fix shared ddc handling drm/ttm: Allocate the page pool manager in the heap.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c68
3 files changed, 56 insertions, 39 deletions
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1f2cc6b09623..719662034bbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -315,8 +315,9 @@ static void drm_fb_helper_on(struct fb_info *info)
315 struct drm_device *dev = fb_helper->dev; 315 struct drm_device *dev = fb_helper->dev;
316 struct drm_crtc *crtc; 316 struct drm_crtc *crtc;
317 struct drm_crtc_helper_funcs *crtc_funcs; 317 struct drm_crtc_helper_funcs *crtc_funcs;
318 struct drm_connector *connector;
318 struct drm_encoder *encoder; 319 struct drm_encoder *encoder;
319 int i; 320 int i, j;
320 321
321 /* 322 /*
322 * For each CRTC in this fb, turn the crtc on then, 323 * For each CRTC in this fb, turn the crtc on then,
@@ -332,7 +333,14 @@ static void drm_fb_helper_on(struct fb_info *info)
332 333
333 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 334 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
334 335
335 336 /* Walk the connectors & encoders on this fb turning them on */
337 for (j = 0; j < fb_helper->connector_count; j++) {
338 connector = fb_helper->connector_info[j]->connector;
339 connector->dpms = DRM_MODE_DPMS_ON;
340 drm_connector_property_set_value(connector,
341 dev->mode_config.dpms_property,
342 DRM_MODE_DPMS_ON);
343 }
336 /* Found a CRTC on this fb, now find encoders */ 344 /* Found a CRTC on this fb, now find encoders */
337 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 345 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
338 if (encoder->crtc == crtc) { 346 if (encoder->crtc == crtc) {
@@ -352,8 +360,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
352 struct drm_device *dev = fb_helper->dev; 360 struct drm_device *dev = fb_helper->dev;
353 struct drm_crtc *crtc; 361 struct drm_crtc *crtc;
354 struct drm_crtc_helper_funcs *crtc_funcs; 362 struct drm_crtc_helper_funcs *crtc_funcs;
363 struct drm_connector *connector;
355 struct drm_encoder *encoder; 364 struct drm_encoder *encoder;
356 int i; 365 int i, j;
357 366
358 /* 367 /*
359 * For each CRTC in this fb, find all associated encoders 368 * For each CRTC in this fb, find all associated encoders
@@ -367,6 +376,14 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
367 if (!crtc->enabled) 376 if (!crtc->enabled)
368 continue; 377 continue;
369 378
379 /* Walk the connectors on this fb and mark them off */
380 for (j = 0; j < fb_helper->connector_count; j++) {
381 connector = fb_helper->connector_info[j]->connector;
382 connector->dpms = dpms_mode;
383 drm_connector_property_set_value(connector,
384 dev->mode_config.dpms_property,
385 dpms_mode);
386 }
370 /* Found a CRTC on this fb, now find encoders */ 387 /* Found a CRTC on this fb, now find encoders */
371 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 388 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
372 if (encoder->crtc == crtc) { 389 if (encoder->crtc == crtc) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 0c7ccc6961a3..f58f8bd8f77b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -785,7 +785,9 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
785 if (connector == list_connector) 785 if (connector == list_connector)
786 continue; 786 continue;
787 list_radeon_connector = to_radeon_connector(list_connector); 787 list_radeon_connector = to_radeon_connector(list_connector);
788 if (radeon_connector->devices == list_radeon_connector->devices) { 788 if (list_radeon_connector->shared_ddc &&
789 (list_radeon_connector->ddc_bus->rec.i2c_id ==
790 radeon_connector->ddc_bus->rec.i2c_id)) {
789 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 791 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
790 if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { 792 if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
791 kfree(radeon_connector->edid); 793 kfree(radeon_connector->edid);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 2f047577b1e3..b1d67dc973dc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -104,7 +104,6 @@ struct ttm_pool_opts {
104struct ttm_pool_manager { 104struct ttm_pool_manager {
105 struct kobject kobj; 105 struct kobject kobj;
106 struct shrinker mm_shrink; 106 struct shrinker mm_shrink;
107 atomic_t page_alloc_inited;
108 struct ttm_pool_opts options; 107 struct ttm_pool_opts options;
109 108
110 union { 109 union {
@@ -142,7 +141,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj)
142{ 141{
143 struct ttm_pool_manager *m = 142 struct ttm_pool_manager *m =
144 container_of(kobj, struct ttm_pool_manager, kobj); 143 container_of(kobj, struct ttm_pool_manager, kobj);
145 (void)m; 144 kfree(m);
146} 145}
147 146
148static ssize_t ttm_pool_store(struct kobject *kobj, 147static ssize_t ttm_pool_store(struct kobject *kobj,
@@ -214,9 +213,7 @@ static struct kobj_type ttm_pool_kobj_type = {
214 .default_attrs = ttm_pool_attrs, 213 .default_attrs = ttm_pool_attrs,
215}; 214};
216 215
217static struct ttm_pool_manager _manager = { 216static struct ttm_pool_manager *_manager;
218 .page_alloc_inited = ATOMIC_INIT(0)
219};
220 217
221#ifndef CONFIG_X86 218#ifndef CONFIG_X86
222static int set_pages_array_wb(struct page **pages, int addrinarray) 219static int set_pages_array_wb(struct page **pages, int addrinarray)
@@ -271,7 +268,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
271 if (flags & TTM_PAGE_FLAG_DMA32) 268 if (flags & TTM_PAGE_FLAG_DMA32)
272 pool_index |= 0x2; 269 pool_index |= 0x2;
273 270
274 return &_manager.pools[pool_index]; 271 return &_manager->pools[pool_index];
275} 272}
276 273
277/* set memory back to wb and free the pages. */ 274/* set memory back to wb and free the pages. */
@@ -387,7 +384,7 @@ static int ttm_pool_get_num_unused_pages(void)
387 unsigned i; 384 unsigned i;
388 int total = 0; 385 int total = 0;
389 for (i = 0; i < NUM_POOLS; ++i) 386 for (i = 0; i < NUM_POOLS; ++i)
390 total += _manager.pools[i].npages; 387 total += _manager->pools[i].npages;
391 388
392 return total; 389 return total;
393} 390}
@@ -408,7 +405,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
408 unsigned nr_free = shrink_pages; 405 unsigned nr_free = shrink_pages;
409 if (shrink_pages == 0) 406 if (shrink_pages == 0)
410 break; 407 break;
411 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; 408 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
412 shrink_pages = ttm_page_pool_free(pool, nr_free); 409 shrink_pages = ttm_page_pool_free(pool, nr_free);
413 } 410 }
414 /* return estimated number of unused pages in pool */ 411 /* return estimated number of unused pages in pool */
@@ -576,10 +573,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
576 573
577 /* If allocation request is small and there is not enough 574 /* If allocation request is small and there is not enough
578 * pages in pool we fill the pool first */ 575 * pages in pool we fill the pool first */
579 if (count < _manager.options.small 576 if (count < _manager->options.small
580 && count > pool->npages) { 577 && count > pool->npages) {
581 struct list_head new_pages; 578 struct list_head new_pages;
582 unsigned alloc_size = _manager.options.alloc_size; 579 unsigned alloc_size = _manager->options.alloc_size;
583 580
584 /** 581 /**
585 * Can't change page caching if in irqsave context. We have to 582 * Can't change page caching if in irqsave context. We have to
@@ -759,8 +756,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
759 pool->npages += page_count; 756 pool->npages += page_count;
760 /* Check that we don't go over the pool limit */ 757 /* Check that we don't go over the pool limit */
761 page_count = 0; 758 page_count = 0;
762 if (pool->npages > _manager.options.max_size) { 759 if (pool->npages > _manager->options.max_size) {
763 page_count = pool->npages - _manager.options.max_size; 760 page_count = pool->npages - _manager->options.max_size;
764 /* free at least NUM_PAGES_TO_ALLOC number of pages 761 /* free at least NUM_PAGES_TO_ALLOC number of pages
765 * to reduce calls to set_memory_wb */ 762 * to reduce calls to set_memory_wb */
766 if (page_count < NUM_PAGES_TO_ALLOC) 763 if (page_count < NUM_PAGES_TO_ALLOC)
@@ -785,33 +782,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
785int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 782int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
786{ 783{
787 int ret; 784 int ret;
788 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) 785
789 return 0; 786 WARN_ON(_manager);
790 787
791 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); 788 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
792 789
793 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); 790 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
794 791
795 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); 792 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
796 793
797 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, 794 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
798 "wc dma");
799 795
800 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, 796 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
801 "uc dma"); 797 GFP_USER | GFP_DMA32, "wc dma");
802 798
803 _manager.options.max_size = max_pages; 799 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
804 _manager.options.small = SMALL_ALLOCATION; 800 GFP_USER | GFP_DMA32, "uc dma");
805 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
806 801
807 kobject_init(&_manager.kobj, &ttm_pool_kobj_type); 802 _manager->options.max_size = max_pages;
808 ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); 803 _manager->options.small = SMALL_ALLOCATION;
804 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
805
806 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
807 &glob->kobj, "pool");
809 if (unlikely(ret != 0)) { 808 if (unlikely(ret != 0)) {
810 kobject_put(&_manager.kobj); 809 kobject_put(&_manager->kobj);
810 _manager = NULL;
811 return ret; 811 return ret;
812 } 812 }
813 813
814 ttm_pool_mm_shrink_init(&_manager); 814 ttm_pool_mm_shrink_init(_manager);
815 815
816 return 0; 816 return 0;
817} 817}
@@ -820,16 +820,14 @@ void ttm_page_alloc_fini()
820{ 820{
821 int i; 821 int i;
822 822
823 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
824 return;
825
826 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); 823 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
827 ttm_pool_mm_shrink_fini(&_manager); 824 ttm_pool_mm_shrink_fini(_manager);
828 825
829 for (i = 0; i < NUM_POOLS; ++i) 826 for (i = 0; i < NUM_POOLS; ++i)
830 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); 827 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
831 828
832 kobject_put(&_manager.kobj); 829 kobject_put(&_manager->kobj);
830 _manager = NULL;
833} 831}
834 832
835int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 833int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
@@ -837,14 +835,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
837 struct ttm_page_pool *p; 835 struct ttm_page_pool *p;
838 unsigned i; 836 unsigned i;
839 char *h[] = {"pool", "refills", "pages freed", "size"}; 837 char *h[] = {"pool", "refills", "pages freed", "size"};
840 if (atomic_read(&_manager.page_alloc_inited) == 0) { 838 if (!_manager) {
841 seq_printf(m, "No pool allocator running.\n"); 839 seq_printf(m, "No pool allocator running.\n");
842 return 0; 840 return 0;
843 } 841 }
844 seq_printf(m, "%6s %12s %13s %8s\n", 842 seq_printf(m, "%6s %12s %13s %8s\n",
845 h[0], h[1], h[2], h[3]); 843 h[0], h[1], h[2], h[3]);
846 for (i = 0; i < NUM_POOLS; ++i) { 844 for (i = 0; i < NUM_POOLS; ++i) {
847 p = &_manager.pools[i]; 845 p = &_manager->pools[i];
848 846
849 seq_printf(m, "%6s %12ld %13ld %8d\n", 847 seq_printf(m, "%6s %12ld %13ld %8d\n",
850 p->name, p->nrefills, 848 p->name, p->nrefills,