diff options
81 files changed, 2984 insertions, 1224 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index be5aa7d5206b..2583ddfcc33e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -9,6 +9,7 @@ menuconfig DRM | |||
| 9 | depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU | 9 | depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU |
| 10 | select I2C | 10 | select I2C |
| 11 | select I2C_ALGOBIT | 11 | select I2C_ALGOBIT |
| 12 | select SLOW_WORK | ||
| 12 | help | 13 | help |
| 13 | Kernel-level support for the Direct Rendering Infrastructure (DRI) | 14 | Kernel-level support for the Direct Rendering Infrastructure (DRI) |
| 14 | introduced in XFree86 4.0. If you say Y here, you need to select | 15 | introduced in XFree86 4.0. If you say Y here, you need to select |
| @@ -23,7 +24,6 @@ config DRM_KMS_HELPER | |||
| 23 | depends on DRM | 24 | depends on DRM |
| 24 | select FB | 25 | select FB |
| 25 | select FRAMEBUFFER_CONSOLE if !EMBEDDED | 26 | select FRAMEBUFFER_CONSOLE if !EMBEDDED |
| 26 | select SLOW_WORK | ||
| 27 | help | 27 | help |
| 28 | FB and CRTC helpers for KMS drivers. | 28 | FB and CRTC helpers for KMS drivers. |
| 29 | 29 | ||
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 932b5aa96a67..3f46772f0cb2 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c | |||
| @@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, | |||
| 79 | struct drm_device *dev = master->minor->dev; | 79 | struct drm_device *dev = master->minor->dev; |
| 80 | DRM_DEBUG("%d\n", magic); | 80 | DRM_DEBUG("%d\n", magic); |
| 81 | 81 | ||
| 82 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 82 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
| 83 | if (!entry) | 83 | if (!entry) |
| 84 | return -ENOMEM; | 84 | return -ENOMEM; |
| 85 | memset(entry, 0, sizeof(*entry)); | ||
| 86 | entry->priv = priv; | 85 | entry->priv = priv; |
| 87 | entry->hash_item.key = (unsigned long)magic; | 86 | entry->hash_item.key = (unsigned long)magic; |
| 88 | mutex_lock(&dev->struct_mutex); | 87 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index b142ac260d97..764401951041 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -807,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
| 807 | return 0; | 807 | return 0; |
| 808 | } | 808 | } |
| 809 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 809 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
| 810 | |||
| 811 | static struct slow_work_ops output_poll_ops; | ||
| 812 | |||
| 813 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | ||
| 814 | static void output_poll_execute(struct slow_work *work) | ||
| 815 | { | ||
| 816 | struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); | ||
| 817 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); | ||
| 818 | struct drm_connector *connector; | ||
| 819 | enum drm_connector_status old_status, status; | ||
| 820 | bool repoll = false, changed = false; | ||
| 821 | int ret; | ||
| 822 | |||
| 823 | mutex_lock(&dev->mode_config.mutex); | ||
| 824 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 825 | |||
| 826 | /* if this is HPD or polled don't check it - | ||
| 827 | TV out for instance */ | ||
| 828 | if (!connector->polled) | ||
| 829 | continue; | ||
| 830 | |||
| 831 | else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) | ||
| 832 | repoll = true; | ||
| 833 | |||
| 834 | old_status = connector->status; | ||
| 835 | /* if we are connected and don't want to poll for disconnect | ||
| 836 | skip it */ | ||
| 837 | if (old_status == connector_status_connected && | ||
| 838 | !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && | ||
| 839 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) | ||
| 840 | continue; | ||
| 841 | |||
| 842 | status = connector->funcs->detect(connector); | ||
| 843 | if (old_status != status) | ||
| 844 | changed = true; | ||
| 845 | } | ||
| 846 | |||
| 847 | mutex_unlock(&dev->mode_config.mutex); | ||
| 848 | |||
| 849 | if (changed) { | ||
| 850 | /* send a uevent + call fbdev */ | ||
| 851 | drm_sysfs_hotplug_event(dev); | ||
| 852 | if (dev->mode_config.funcs->output_poll_changed) | ||
| 853 | dev->mode_config.funcs->output_poll_changed(dev); | ||
| 854 | } | ||
| 855 | |||
| 856 | if (repoll) { | ||
| 857 | ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); | ||
| 858 | if (ret) | ||
| 859 | DRM_ERROR("delayed enqueue failed %d\n", ret); | ||
| 860 | } | ||
| 861 | } | ||
| 862 | |||
| 863 | void drm_kms_helper_poll_init(struct drm_device *dev) | ||
| 864 | { | ||
| 865 | struct drm_connector *connector; | ||
| 866 | bool poll = false; | ||
| 867 | int ret; | ||
| 868 | |||
| 869 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 870 | if (connector->polled) | ||
| 871 | poll = true; | ||
| 872 | } | ||
| 873 | slow_work_register_user(THIS_MODULE); | ||
| 874 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
| 875 | &output_poll_ops); | ||
| 876 | |||
| 877 | if (poll) { | ||
| 878 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | ||
| 879 | if (ret) | ||
| 880 | DRM_ERROR("delayed enqueue failed %d\n", ret); | ||
| 881 | } | ||
| 882 | } | ||
| 883 | EXPORT_SYMBOL(drm_kms_helper_poll_init); | ||
| 884 | |||
| 885 | void drm_kms_helper_poll_fini(struct drm_device *dev) | ||
| 886 | { | ||
| 887 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | ||
| 888 | slow_work_unregister_user(THIS_MODULE); | ||
| 889 | } | ||
| 890 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | ||
| 891 | |||
| 892 | void drm_helper_hpd_irq_event(struct drm_device *dev) | ||
| 893 | { | ||
| 894 | if (!dev->mode_config.poll_enabled) | ||
| 895 | return; | ||
| 896 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | ||
| 897 | /* schedule a slow work asap */ | ||
| 898 | delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); | ||
| 899 | } | ||
| 900 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | ||
| 901 | |||
| 902 | static struct slow_work_ops output_poll_ops = { | ||
| 903 | .execute = output_poll_execute, | ||
| 904 | }; | ||
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index 13f1537413fb..252cbd74df0e 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c | |||
| @@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev) | |||
| 47 | { | 47 | { |
| 48 | int i; | 48 | int i; |
| 49 | 49 | ||
| 50 | dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); | 50 | dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); |
| 51 | if (!dev->dma) | 51 | if (!dev->dma) |
| 52 | return -ENOMEM; | 52 | return -ENOMEM; |
| 53 | 53 | ||
| 54 | memset(dev->dma, 0, sizeof(*dev->dma)); | ||
| 55 | |||
| 56 | for (i = 0; i <= DRM_MAX_ORDER; i++) | 54 | for (i = 0; i <= DRM_MAX_ORDER; i++) |
| 57 | memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); | 55 | memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); |
| 58 | 56 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 71886749fa2c..dfd4f3677f3b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -496,7 +496,7 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
| 496 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 496 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
| 497 | /* 1024x768@85Hz */ | 497 | /* 1024x768@85Hz */ |
| 498 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, | 498 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, |
| 499 | 1072, 1376, 0, 768, 769, 772, 808, 0, | 499 | 1168, 1376, 0, 768, 769, 772, 808, 0, |
| 500 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 500 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
| 501 | /* 1152x864@75Hz */ | 501 | /* 1152x864@75Hz */ |
| 502 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | 502 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, |
| @@ -658,8 +658,8 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
| 658 | static const int drm_num_dmt_modes = | 658 | static const int drm_num_dmt_modes = |
| 659 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | 659 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); |
| 660 | 660 | ||
| 661 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | 661 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
| 662 | int hsize, int vsize, int fresh) | 662 | int hsize, int vsize, int fresh) |
| 663 | { | 663 | { |
| 664 | int i; | 664 | int i; |
| 665 | struct drm_display_mode *ptr, *mode; | 665 | struct drm_display_mode *ptr, *mode; |
| @@ -677,6 +677,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | |||
| 677 | } | 677 | } |
| 678 | return mode; | 678 | return mode; |
| 679 | } | 679 | } |
| 680 | EXPORT_SYMBOL(drm_mode_find_dmt); | ||
| 680 | 681 | ||
| 681 | typedef void detailed_cb(struct detailed_timing *timing, void *closure); | 682 | typedef void detailed_cb(struct detailed_timing *timing, void *closure); |
| 682 | 683 | ||
| @@ -866,7 +867,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
| 866 | } | 867 | } |
| 867 | 868 | ||
| 868 | /* check whether it can be found in default mode table */ | 869 | /* check whether it can be found in default mode table */ |
| 869 | mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); | 870 | mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); |
| 870 | if (mode) | 871 | if (mode) |
| 871 | return mode; | 872 | return mode; |
| 872 | 873 | ||
| @@ -1383,14 +1384,14 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) | |||
| 1383 | for (i = 0; i < 6; i++) { | 1384 | for (i = 0; i < 6; i++) { |
| 1384 | for (j = 7; j > 0; j--) { | 1385 | for (j = 7; j > 0; j--) { |
| 1385 | m = (i * 8) + (7 - j); | 1386 | m = (i * 8) + (7 - j); |
| 1386 | if (m > num_est3_modes) | 1387 | if (m >= num_est3_modes) |
| 1387 | break; | 1388 | break; |
| 1388 | if (est[i] & (1 << j)) { | 1389 | if (est[i] & (1 << j)) { |
| 1389 | mode = drm_find_dmt(connector->dev, | 1390 | mode = drm_mode_find_dmt(connector->dev, |
| 1390 | est3_modes[m].w, | 1391 | est3_modes[m].w, |
| 1391 | est3_modes[m].h, | 1392 | est3_modes[m].h, |
| 1392 | est3_modes[m].r | 1393 | est3_modes[m].r |
| 1393 | /*, est3_modes[m].rb */); | 1394 | /*, est3_modes[m].rb */); |
| 1394 | if (mode) { | 1395 | if (mode) { |
| 1395 | drm_mode_probed_add(connector, mode); | 1396 | drm_mode_probed_add(connector, mode); |
| 1396 | modes++; | 1397 | modes++; |
| @@ -1509,7 +1510,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
| 1509 | char *edid_ext = NULL; | 1510 | char *edid_ext = NULL; |
| 1510 | struct detailed_timing *timing; | 1511 | struct detailed_timing *timing; |
| 1511 | int start_offset, end_offset; | 1512 | int start_offset, end_offset; |
| 1512 | int timing_level; | ||
| 1513 | 1513 | ||
| 1514 | if (edid->version == 1 && edid->revision < 3) | 1514 | if (edid->version == 1 && edid->revision < 3) |
| 1515 | return 0; | 1515 | return 0; |
| @@ -1536,7 +1536,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
| 1536 | return 0; | 1536 | return 0; |
| 1537 | } | 1537 | } |
| 1538 | 1538 | ||
| 1539 | timing_level = standard_timing_level(edid); | ||
| 1540 | end_offset = EDID_LENGTH; | 1539 | end_offset = EDID_LENGTH; |
| 1541 | end_offset -= sizeof(struct detailed_timing); | 1540 | end_offset -= sizeof(struct detailed_timing); |
| 1542 | for (i = start_offset; i < end_offset; | 1541 | for (i = start_offset; i < end_offset; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index b28e56382e86..b3779d243aef 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -42,8 +42,6 @@ MODULE_LICENSE("GPL and additional rights"); | |||
| 42 | 42 | ||
| 43 | static LIST_HEAD(kernel_fb_helper_list); | 43 | static LIST_HEAD(kernel_fb_helper_list); |
| 44 | 44 | ||
| 45 | static struct slow_work_ops output_status_change_ops; | ||
| 46 | |||
| 47 | /* simple single crtc case helper function */ | 45 | /* simple single crtc case helper function */ |
| 48 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) | 46 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) |
| 49 | { | 47 | { |
| @@ -425,19 +423,13 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) | |||
| 425 | 423 | ||
| 426 | int drm_fb_helper_init(struct drm_device *dev, | 424 | int drm_fb_helper_init(struct drm_device *dev, |
| 427 | struct drm_fb_helper *fb_helper, | 425 | struct drm_fb_helper *fb_helper, |
| 428 | int crtc_count, int max_conn_count, | 426 | int crtc_count, int max_conn_count) |
| 429 | bool polled) | ||
| 430 | { | 427 | { |
| 431 | struct drm_crtc *crtc; | 428 | struct drm_crtc *crtc; |
| 432 | int ret = 0; | 429 | int ret = 0; |
| 433 | int i; | 430 | int i; |
| 434 | 431 | ||
| 435 | fb_helper->dev = dev; | 432 | fb_helper->dev = dev; |
| 436 | fb_helper->poll_enabled = polled; | ||
| 437 | |||
| 438 | slow_work_register_user(THIS_MODULE); | ||
| 439 | delayed_slow_work_init(&fb_helper->output_status_change_slow_work, | ||
| 440 | &output_status_change_ops); | ||
| 441 | 433 | ||
| 442 | INIT_LIST_HEAD(&fb_helper->kernel_fb_list); | 434 | INIT_LIST_HEAD(&fb_helper->kernel_fb_list); |
| 443 | 435 | ||
| @@ -485,7 +477,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) | |||
| 485 | if (!list_empty(&fb_helper->kernel_fb_list)) { | 477 | if (!list_empty(&fb_helper->kernel_fb_list)) { |
| 486 | list_del(&fb_helper->kernel_fb_list); | 478 | list_del(&fb_helper->kernel_fb_list); |
| 487 | if (list_empty(&kernel_fb_helper_list)) { | 479 | if (list_empty(&kernel_fb_helper_list)) { |
| 488 | printk(KERN_INFO "unregistered panic notifier\n"); | 480 | printk(KERN_INFO "drm: unregistered panic notifier\n"); |
| 489 | atomic_notifier_chain_unregister(&panic_notifier_list, | 481 | atomic_notifier_chain_unregister(&panic_notifier_list, |
| 490 | &paniced); | 482 | &paniced); |
| 491 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); | 483 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); |
| @@ -494,8 +486,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) | |||
| 494 | 486 | ||
| 495 | drm_fb_helper_crtc_free(fb_helper); | 487 | drm_fb_helper_crtc_free(fb_helper); |
| 496 | 488 | ||
| 497 | delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); | ||
| 498 | slow_work_unregister_user(THIS_MODULE); | ||
| 499 | } | 489 | } |
| 500 | EXPORT_SYMBOL(drm_fb_helper_fini); | 490 | EXPORT_SYMBOL(drm_fb_helper_fini); |
| 501 | 491 | ||
| @@ -713,7 +703,7 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
| 713 | 703 | ||
| 714 | if (fb_helper->delayed_hotplug) { | 704 | if (fb_helper->delayed_hotplug) { |
| 715 | fb_helper->delayed_hotplug = false; | 705 | fb_helper->delayed_hotplug = false; |
| 716 | delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); | 706 | drm_fb_helper_hotplug_event(fb_helper); |
| 717 | } | 707 | } |
| 718 | return 0; | 708 | return 0; |
| 719 | } | 709 | } |
| @@ -826,7 +816,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, | |||
| 826 | if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { | 816 | if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { |
| 827 | /* hmm everyone went away - assume VGA cable just fell out | 817 | /* hmm everyone went away - assume VGA cable just fell out |
| 828 | and will come back later. */ | 818 | and will come back later. */ |
| 829 | DRM_ERROR("Cannot find any crtc or sizes - going 1024x768\n"); | 819 | DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); |
| 830 | sizes.fb_width = sizes.surface_width = 1024; | 820 | sizes.fb_width = sizes.surface_width = 1024; |
| 831 | sizes.fb_height = sizes.surface_height = 768; | 821 | sizes.fb_height = sizes.surface_height = 768; |
| 832 | } | 822 | } |
| @@ -859,7 +849,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, | |||
| 859 | /* Switch back to kernel console on panic */ | 849 | /* Switch back to kernel console on panic */ |
| 860 | /* multi card linked list maybe */ | 850 | /* multi card linked list maybe */ |
| 861 | if (list_empty(&kernel_fb_helper_list)) { | 851 | if (list_empty(&kernel_fb_helper_list)) { |
| 862 | printk(KERN_INFO "registered panic notifier\n"); | 852 | printk(KERN_INFO "drm: registered panic notifier\n"); |
| 863 | atomic_notifier_chain_register(&panic_notifier_list, | 853 | atomic_notifier_chain_register(&panic_notifier_list, |
| 864 | &paniced); | 854 | &paniced); |
| 865 | register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); | 855 | register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); |
| @@ -1080,6 +1070,79 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper, | |||
| 1080 | } | 1070 | } |
| 1081 | } | 1071 | } |
| 1082 | 1072 | ||
| 1073 | static bool drm_target_cloned(struct drm_fb_helper *fb_helper, | ||
| 1074 | struct drm_display_mode **modes, | ||
| 1075 | bool *enabled, int width, int height) | ||
| 1076 | { | ||
| 1077 | int count, i, j; | ||
| 1078 | bool can_clone = false; | ||
| 1079 | struct drm_fb_helper_connector *fb_helper_conn; | ||
| 1080 | struct drm_display_mode *dmt_mode, *mode; | ||
| 1081 | |||
| 1082 | /* only contemplate cloning in the single crtc case */ | ||
| 1083 | if (fb_helper->crtc_count > 1) | ||
| 1084 | return false; | ||
| 1085 | |||
| 1086 | count = 0; | ||
| 1087 | for (i = 0; i < fb_helper->connector_count; i++) { | ||
| 1088 | if (enabled[i]) | ||
| 1089 | count++; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | /* only contemplate cloning if more than one connector is enabled */ | ||
| 1093 | if (count <= 1) | ||
| 1094 | return false; | ||
| 1095 | |||
| 1096 | /* check the command line or if nothing common pick 1024x768 */ | ||
| 1097 | can_clone = true; | ||
| 1098 | for (i = 0; i < fb_helper->connector_count; i++) { | ||
| 1099 | if (!enabled[i]) | ||
| 1100 | continue; | ||
| 1101 | fb_helper_conn = fb_helper->connector_info[i]; | ||
| 1102 | modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); | ||
| 1103 | if (!modes[i]) { | ||
| 1104 | can_clone = false; | ||
| 1105 | break; | ||
| 1106 | } | ||
| 1107 | for (j = 0; j < i; j++) { | ||
| 1108 | if (!enabled[j]) | ||
| 1109 | continue; | ||
| 1110 | if (!drm_mode_equal(modes[j], modes[i])) | ||
| 1111 | can_clone = false; | ||
| 1112 | } | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | if (can_clone) { | ||
| 1116 | DRM_DEBUG_KMS("can clone using command line\n"); | ||
| 1117 | return true; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | /* try and find a 1024x768 mode on each connector */ | ||
| 1121 | can_clone = true; | ||
| 1122 | dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); | ||
| 1123 | |||
| 1124 | for (i = 0; i < fb_helper->connector_count; i++) { | ||
| 1125 | |||
| 1126 | if (!enabled[i]) | ||
| 1127 | continue; | ||
| 1128 | |||
| 1129 | fb_helper_conn = fb_helper->connector_info[i]; | ||
| 1130 | list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { | ||
| 1131 | if (drm_mode_equal(mode, dmt_mode)) | ||
| 1132 | modes[i] = mode; | ||
| 1133 | } | ||
| 1134 | if (!modes[i]) | ||
| 1135 | can_clone = false; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | if (can_clone) { | ||
| 1139 | DRM_DEBUG_KMS("can clone using 1024x768\n"); | ||
| 1140 | return true; | ||
| 1141 | } | ||
| 1142 | DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); | ||
| 1143 | return false; | ||
| 1144 | } | ||
| 1145 | |||
| 1083 | static bool drm_target_preferred(struct drm_fb_helper *fb_helper, | 1146 | static bool drm_target_preferred(struct drm_fb_helper *fb_helper, |
| 1084 | struct drm_display_mode **modes, | 1147 | struct drm_display_mode **modes, |
| 1085 | bool *enabled, int width, int height) | 1148 | bool *enabled, int width, int height) |
| @@ -1173,8 +1236,12 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
| 1173 | break; | 1236 | break; |
| 1174 | 1237 | ||
| 1175 | if (o < n) { | 1238 | if (o < n) { |
| 1176 | /* ignore cloning for now */ | 1239 | /* ignore cloning unless only a single crtc */ |
| 1177 | continue; | 1240 | if (fb_helper->crtc_count > 1) |
| 1241 | continue; | ||
| 1242 | |||
| 1243 | if (!drm_mode_equal(modes[o], modes[n])) | ||
| 1244 | continue; | ||
| 1178 | } | 1245 | } |
| 1179 | 1246 | ||
| 1180 | crtcs[n] = crtc; | 1247 | crtcs[n] = crtc; |
| @@ -1224,9 +1291,12 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) | |||
| 1224 | 1291 | ||
| 1225 | drm_enable_connectors(fb_helper, enabled); | 1292 | drm_enable_connectors(fb_helper, enabled); |
| 1226 | 1293 | ||
| 1227 | ret = drm_target_preferred(fb_helper, modes, enabled, width, height); | 1294 | ret = drm_target_cloned(fb_helper, modes, enabled, width, height); |
| 1228 | if (!ret) | 1295 | if (!ret) { |
| 1229 | DRM_ERROR("Unable to find initial modes\n"); | 1296 | ret = drm_target_preferred(fb_helper, modes, enabled, width, height); |
| 1297 | if (!ret) | ||
| 1298 | DRM_ERROR("Unable to find initial modes\n"); | ||
| 1299 | } | ||
| 1230 | 1300 | ||
| 1231 | DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); | 1301 | DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); |
| 1232 | 1302 | ||
| @@ -1292,12 +1362,7 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) | |||
| 1292 | * we shouldn't end up with no modes here. | 1362 | * we shouldn't end up with no modes here. |
| 1293 | */ | 1363 | */ |
| 1294 | if (count == 0) { | 1364 | if (count == 0) { |
| 1295 | if (fb_helper->poll_enabled) { | 1365 | printk(KERN_INFO "No connectors reported connected with modes\n"); |
| 1296 | delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, | ||
| 1297 | 5*HZ); | ||
| 1298 | printk(KERN_INFO "No connectors reported connected with modes - started polling\n"); | ||
| 1299 | } else | ||
| 1300 | printk(KERN_INFO "No connectors reported connected with modes\n"); | ||
| 1301 | } | 1366 | } |
| 1302 | drm_setup_crtcs(fb_helper); | 1367 | drm_setup_crtcs(fb_helper); |
| 1303 | 1368 | ||
| @@ -1305,71 +1370,16 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) | |||
| 1305 | } | 1370 | } |
| 1306 | EXPORT_SYMBOL(drm_fb_helper_initial_config); | 1371 | EXPORT_SYMBOL(drm_fb_helper_initial_config); |
| 1307 | 1372 | ||
| 1308 | /* we got a hotplug irq - need to update fbcon */ | 1373 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) |
| 1309 | void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper) | ||
| 1310 | { | ||
| 1311 | /* if we don't have the fbdev registered yet do nothing */ | ||
| 1312 | if (!fb_helper->fbdev) | ||
| 1313 | return; | ||
| 1314 | |||
| 1315 | /* schedule a slow work asap */ | ||
| 1316 | delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); | ||
| 1317 | } | ||
| 1318 | EXPORT_SYMBOL(drm_helper_fb_hpd_irq_event); | ||
| 1319 | |||
| 1320 | bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, bool polled) | ||
| 1321 | { | 1374 | { |
| 1322 | int count = 0; | 1375 | int count = 0; |
| 1323 | int ret; | ||
| 1324 | u32 max_width, max_height, bpp_sel; | 1376 | u32 max_width, max_height, bpp_sel; |
| 1325 | |||
| 1326 | if (!fb_helper->fb) | ||
| 1327 | return false; | ||
| 1328 | DRM_DEBUG_KMS("\n"); | ||
| 1329 | |||
| 1330 | max_width = fb_helper->fb->width; | ||
| 1331 | max_height = fb_helper->fb->height; | ||
| 1332 | bpp_sel = fb_helper->fb->bits_per_pixel; | ||
| 1333 | |||
| 1334 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | ||
| 1335 | max_height); | ||
| 1336 | if (fb_helper->poll_enabled && !polled) { | ||
| 1337 | if (count) { | ||
| 1338 | delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); | ||
| 1339 | } else { | ||
| 1340 | ret = delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 5*HZ); | ||
| 1341 | } | ||
| 1342 | } | ||
| 1343 | drm_setup_crtcs(fb_helper); | ||
| 1344 | |||
| 1345 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); | ||
| 1346 | } | ||
| 1347 | EXPORT_SYMBOL(drm_helper_fb_hotplug_event); | ||
| 1348 | |||
| 1349 | /* | ||
| 1350 | * delayed work queue execution function | ||
| 1351 | * - check if fbdev is actually in use on the gpu | ||
| 1352 | * - if not set delayed flag and repoll if necessary | ||
| 1353 | * - check for connector status change | ||
| 1354 | * - repoll if 0 modes found | ||
| 1355 | *- call driver output status changed notifier | ||
| 1356 | */ | ||
| 1357 | static void output_status_change_execute(struct slow_work *work) | ||
| 1358 | { | ||
| 1359 | struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); | ||
| 1360 | struct drm_fb_helper *fb_helper = container_of(delayed_work, struct drm_fb_helper, output_status_change_slow_work); | ||
| 1361 | struct drm_connector *connector; | ||
| 1362 | enum drm_connector_status old_status, status; | ||
| 1363 | bool repoll, changed = false; | ||
| 1364 | int ret; | ||
| 1365 | int i; | ||
| 1366 | bool bound = false, crtcs_bound = false; | 1377 | bool bound = false, crtcs_bound = false; |
| 1367 | struct drm_crtc *crtc; | 1378 | struct drm_crtc *crtc; |
| 1368 | 1379 | ||
| 1369 | repoll = fb_helper->poll_enabled; | 1380 | if (!fb_helper->fb) |
| 1381 | return false; | ||
| 1370 | 1382 | ||
| 1371 | /* first of all check the fbcon framebuffer is actually bound to any crtc */ | ||
| 1372 | /* take into account that no crtc at all maybe bound */ | ||
| 1373 | list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { | 1383 | list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { |
| 1374 | if (crtc->fb) | 1384 | if (crtc->fb) |
| 1375 | crtcs_bound = true; | 1385 | crtcs_bound = true; |
| @@ -1377,38 +1387,21 @@ static void output_status_change_execute(struct slow_work *work) | |||
| 1377 | bound = true; | 1387 | bound = true; |
| 1378 | } | 1388 | } |
| 1379 | 1389 | ||
| 1380 | if (bound == false && crtcs_bound) { | 1390 | if (!bound && crtcs_bound) { |
| 1381 | fb_helper->delayed_hotplug = true; | 1391 | fb_helper->delayed_hotplug = true; |
| 1382 | goto requeue; | 1392 | return false; |
| 1383 | } | 1393 | } |
| 1394 | DRM_DEBUG_KMS("\n"); | ||
| 1384 | 1395 | ||
| 1385 | for (i = 0; i < fb_helper->connector_count; i++) { | 1396 | max_width = fb_helper->fb->width; |
| 1386 | connector = fb_helper->connector_info[i]->connector; | 1397 | max_height = fb_helper->fb->height; |
| 1387 | old_status = connector->status; | 1398 | bpp_sel = fb_helper->fb->bits_per_pixel; |
| 1388 | status = connector->funcs->detect(connector); | ||
| 1389 | if (old_status != status) { | ||
| 1390 | changed = true; | ||
| 1391 | } | ||
| 1392 | if (status == connector_status_connected && repoll) { | ||
| 1393 | DRM_DEBUG("%s is connected - stop polling\n", drm_get_connector_name(connector)); | ||
| 1394 | repoll = false; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | 1399 | ||
| 1398 | if (changed) { | 1400 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
| 1399 | if (fb_helper->funcs->fb_output_status_changed) | 1401 | max_height); |
| 1400 | fb_helper->funcs->fb_output_status_changed(fb_helper); | 1402 | drm_setup_crtcs(fb_helper); |
| 1401 | } | ||
| 1402 | 1403 | ||
| 1403 | requeue: | 1404 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
| 1404 | if (repoll) { | ||
| 1405 | ret = delayed_slow_work_enqueue(delayed_work, 5*HZ); | ||
| 1406 | if (ret) | ||
| 1407 | DRM_ERROR("delayed enqueue failed %d\n", ret); | ||
| 1408 | } | ||
| 1409 | } | 1405 | } |
| 1410 | 1406 | EXPORT_SYMBOL(drm_fb_helper_hotplug_event); | |
| 1411 | static struct slow_work_ops output_status_change_ops = { | ||
| 1412 | .execute = output_status_change_execute, | ||
| 1413 | }; | ||
| 1414 | 1407 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 9d532d7fdf59..e7aace20981f 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
| @@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
| 243 | 243 | ||
| 244 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); | 244 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); |
| 245 | 245 | ||
| 246 | priv = kmalloc(sizeof(*priv), GFP_KERNEL); | 246 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 247 | if (!priv) | 247 | if (!priv) |
| 248 | return -ENOMEM; | 248 | return -ENOMEM; |
| 249 | 249 | ||
| 250 | memset(priv, 0, sizeof(*priv)); | ||
| 251 | filp->private_data = priv; | 250 | filp->private_data = priv; |
| 252 | priv->filp = filp; | 251 | priv->filp = filp; |
| 253 | priv->uid = current_euid(); | 252 | priv->uid = current_euid(); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 851a2f8ed6e6..2a6b5de5ae5d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1504,7 +1504,7 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1504 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1504 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
| 1505 | 1505 | ||
| 1506 | intel_fbdev_init(dev); | 1506 | intel_fbdev_init(dev); |
| 1507 | 1507 | drm_kms_helper_poll_init(dev); | |
| 1508 | return 0; | 1508 | return 0; |
| 1509 | 1509 | ||
| 1510 | destroy_ringbuffer: | 1510 | destroy_ringbuffer: |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a7e4b1f27497..26792af7e1a1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -271,8 +271,7 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
| 271 | } | 271 | } |
| 272 | } | 272 | } |
| 273 | /* Just fire off a uevent and let userspace tell us what to do */ | 273 | /* Just fire off a uevent and let userspace tell us what to do */ |
| 274 | intelfb_hotplug(dev, false); | 274 | drm_helper_hpd_irq_event(dev); |
| 275 | drm_sysfs_hotplug_event(dev); | ||
| 276 | } | 275 | } |
| 277 | 276 | ||
| 278 | static void i915_handle_rps_change(struct drm_device *dev) | 277 | static void i915_handle_rps_change(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 26756cd34e3c..e16ac5a28c3c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -577,5 +577,10 @@ void intel_crt_init(struct drm_device *dev) | |||
| 577 | 577 | ||
| 578 | drm_sysfs_connector_add(connector); | 578 | drm_sysfs_connector_add(connector); |
| 579 | 579 | ||
| 580 | if (I915_HAS_HOTPLUG(dev)) | ||
| 581 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 582 | else | ||
| 583 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 584 | |||
| 580 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | 585 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
| 581 | } | 586 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e775ce67be33..8c668e3122a5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -4984,6 +4984,7 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
| 4984 | 4984 | ||
| 4985 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 4985 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
| 4986 | .fb_create = intel_user_framebuffer_create, | 4986 | .fb_create = intel_user_framebuffer_create, |
| 4987 | .output_poll_changed = intel_fb_output_poll_changed, | ||
| 4987 | }; | 4988 | }; |
| 4988 | 4989 | ||
| 4989 | static struct drm_gem_object * | 4990 | static struct drm_gem_object * |
| @@ -5371,6 +5372,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 5371 | 5372 | ||
| 5372 | mutex_lock(&dev->struct_mutex); | 5373 | mutex_lock(&dev->struct_mutex); |
| 5373 | 5374 | ||
| 5375 | drm_kms_helper_poll_fini(dev); | ||
| 5374 | intel_fbdev_fini(dev); | 5376 | intel_fbdev_fini(dev); |
| 5375 | 5377 | ||
| 5376 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 5378 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f6299bb788e5..6b1c9a27c27a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1392,6 +1392,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
| 1392 | DRM_MODE_CONNECTOR_DisplayPort); | 1392 | DRM_MODE_CONNECTOR_DisplayPort); |
| 1393 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1393 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
| 1394 | 1394 | ||
| 1395 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 1396 | |||
| 1395 | if (output_reg == DP_A) | 1397 | if (output_reg == DP_A) |
| 1396 | intel_encoder->type = INTEL_OUTPUT_EDP; | 1398 | intel_encoder->type = INTEL_OUTPUT_EDP; |
| 1397 | else | 1399 | else |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3230e8d2ea43..df931f787665 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -235,5 +235,5 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
| 235 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, | 235 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, |
| 236 | struct drm_file *file_priv); | 236 | struct drm_file *file_priv); |
| 237 | 237 | ||
| 238 | void intelfb_hotplug(struct drm_device *dev, bool polled); | 238 | extern void intel_fb_output_poll_changed(struct drm_device *dev); |
| 239 | #endif /* __INTEL_DRV_H__ */ | 239 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b04e0a86bf9a..6f53cf7fbc50 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -128,11 +128,16 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
| 128 | info->fbops = &intelfb_ops; | 128 | info->fbops = &intelfb_ops; |
| 129 | 129 | ||
| 130 | /* setup aperture base/size for vesafb takeover */ | 130 | /* setup aperture base/size for vesafb takeover */ |
| 131 | info->aperture_base = dev->mode_config.fb_base; | 131 | info->apertures = alloc_apertures(1); |
| 132 | if (!info->apertures) { | ||
| 133 | ret = -ENOMEM; | ||
| 134 | goto out_unpin; | ||
| 135 | } | ||
| 136 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | ||
| 132 | if (IS_I9XX(dev)) | 137 | if (IS_I9XX(dev)) |
| 133 | info->aperture_size = pci_resource_len(dev->pdev, 2); | 138 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); |
| 134 | else | 139 | else |
| 135 | info->aperture_size = pci_resource_len(dev->pdev, 0); | 140 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); |
| 136 | 141 | ||
| 137 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; | 142 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; |
| 138 | info->fix.smem_len = size; | 143 | info->fix.smem_len = size; |
| @@ -202,12 +207,6 @@ static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, | |||
| 202 | return new_fb; | 207 | return new_fb; |
| 203 | } | 208 | } |
| 204 | 209 | ||
| 205 | void intelfb_hotplug(struct drm_device *dev, bool polled) | ||
| 206 | { | ||
| 207 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 208 | drm_helper_fb_hpd_irq_event(&dev_priv->fbdev->helper); | ||
| 209 | } | ||
| 210 | |||
| 211 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 210 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
| 212 | .gamma_set = intel_crtc_fb_gamma_set, | 211 | .gamma_set = intel_crtc_fb_gamma_set, |
| 213 | .gamma_get = intel_crtc_fb_gamma_get, | 212 | .gamma_get = intel_crtc_fb_gamma_get, |
| @@ -251,7 +250,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
| 251 | ifbdev->helper.funcs = &intel_fb_helper_funcs; | 250 | ifbdev->helper.funcs = &intel_fb_helper_funcs; |
| 252 | 251 | ||
| 253 | drm_fb_helper_init(dev, &ifbdev->helper, 2, | 252 | drm_fb_helper_init(dev, &ifbdev->helper, 2, |
| 254 | INTELFB_CONN_LIMIT, false); | 253 | INTELFB_CONN_LIMIT); |
| 255 | 254 | ||
| 256 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); | 255 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
| 257 | drm_fb_helper_initial_config(&ifbdev->helper, 32); | 256 | drm_fb_helper_initial_config(&ifbdev->helper, 32); |
| @@ -269,3 +268,9 @@ void intel_fbdev_fini(struct drm_device *dev) | |||
| 269 | dev_priv->fbdev = NULL; | 268 | dev_priv->fbdev = NULL; |
| 270 | } | 269 | } |
| 271 | MODULE_LICENSE("GPL and additional rights"); | 270 | MODULE_LICENSE("GPL and additional rights"); |
| 271 | |||
| 272 | void intel_fb_output_poll_changed(struct drm_device *dev) | ||
| 273 | { | ||
| 274 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 275 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | ||
| 276 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 8a1c4eddc030..65727f0a79a3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -237,6 +237,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
| 237 | 237 | ||
| 238 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 238 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
| 239 | 239 | ||
| 240 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 240 | connector->interlace_allowed = 0; | 241 | connector->interlace_allowed = 0; |
| 241 | connector->doublescan_allowed = 0; | 242 | connector->doublescan_allowed = 0; |
| 242 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 243 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 5ad5a098b5bb..aba72c489a2f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -2218,6 +2218,7 @@ intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) | |||
| 2218 | } | 2218 | } |
| 2219 | 2219 | ||
| 2220 | connector = &intel_connector->base; | 2220 | connector = &intel_connector->base; |
| 2221 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
| 2221 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2222 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
| 2222 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2223 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
| 2223 | 2224 | ||
| @@ -2284,6 +2285,7 @@ intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) | |||
| 2284 | return false; | 2285 | return false; |
| 2285 | 2286 | ||
| 2286 | connector = &intel_connector->base; | 2287 | connector = &intel_connector->base; |
| 2288 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 2287 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2289 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
| 2288 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2290 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
| 2289 | sdvo_connector = intel_connector->dev_priv; | 2291 | sdvo_connector = intel_connector->dev_priv; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 14afe1e47e57..7e663a79829f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -843,6 +843,7 @@ nouveau_connector_create(struct drm_device *dev, | |||
| 843 | 843 | ||
| 844 | switch (dcb->type) { | 844 | switch (dcb->type) { |
| 845 | case DCB_CONNECTOR_VGA: | 845 | case DCB_CONNECTOR_VGA: |
| 846 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 846 | if (dev_priv->card_type >= NV_50) { | 847 | if (dev_priv->card_type >= NV_50) { |
| 847 | drm_connector_attach_property(connector, | 848 | drm_connector_attach_property(connector, |
| 848 | dev->mode_config.scaling_mode_property, | 849 | dev->mode_config.scaling_mode_property, |
| @@ -854,6 +855,17 @@ nouveau_connector_create(struct drm_device *dev, | |||
| 854 | case DCB_CONNECTOR_TV_3: | 855 | case DCB_CONNECTOR_TV_3: |
| 855 | nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; | 856 | nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; |
| 856 | break; | 857 | break; |
| 858 | case DCB_CONNECTOR_DP: | ||
| 859 | case DCB_CONNECTOR_eDP: | ||
| 860 | case DCB_CONNECTOR_HDMI_0: | ||
| 861 | case DCB_CONNECTOR_HDMI_1: | ||
| 862 | case DCB_CONNECTOR_DVI_I: | ||
| 863 | case DCB_CONNECTOR_DVI_D: | ||
| 864 | if (dev_priv->card_type >= NV_50) | ||
| 865 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 866 | else | ||
| 867 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 868 | /* fall-through */ | ||
| 857 | default: | 869 | default: |
| 858 | nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; | 870 | nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; |
| 859 | 871 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 9d7928f40fdf..74e6b4ed12c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -101,5 +101,6 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 101 | 101 | ||
| 102 | const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 102 | const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
| 103 | .fb_create = nouveau_user_framebuffer_create, | 103 | .fb_create = nouveau_user_framebuffer_create, |
| 104 | .output_poll_changed = nouveau_fbcon_output_poll_changed, | ||
| 104 | }; | 105 | }; |
| 105 | 106 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5b47b79f45e8..94d8dd27bde8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -624,6 +624,7 @@ struct drm_nouveau_private { | |||
| 624 | } debugfs; | 624 | } debugfs; |
| 625 | 625 | ||
| 626 | struct nouveau_fbdev *nfbdev; | 626 | struct nouveau_fbdev *nfbdev; |
| 627 | struct apertures_struct *apertures; | ||
| 627 | }; | 628 | }; |
| 628 | 629 | ||
| 629 | static inline struct drm_nouveau_private * | 630 | static inline struct drm_nouveau_private * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f29fa8c117ce..fd4a2df715e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -152,44 +152,6 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
| 152 | *blue = nv_crtc->lut.b[regno]; | 152 | *blue = nv_crtc->lut.b[regno]; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | #if defined(__i386__) || defined(__x86_64__) | ||
| 156 | static bool | ||
| 157 | nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) | ||
| 158 | { | ||
| 159 | struct pci_dev *pdev = dev->pdev; | ||
| 160 | int ramin; | ||
| 161 | |||
| 162 | if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB && | ||
| 163 | screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) | ||
| 164 | return false; | ||
| 165 | |||
| 166 | if (screen_info.lfb_base < pci_resource_start(pdev, 1)) | ||
| 167 | goto not_fb; | ||
| 168 | |||
| 169 | if (screen_info.lfb_base + screen_info.lfb_size >= | ||
| 170 | pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1)) | ||
| 171 | goto not_fb; | ||
| 172 | |||
| 173 | return true; | ||
| 174 | not_fb: | ||
| 175 | ramin = 2; | ||
| 176 | if (pci_resource_len(pdev, ramin) == 0) { | ||
| 177 | ramin = 3; | ||
| 178 | if (pci_resource_len(pdev, ramin) == 0) | ||
| 179 | return false; | ||
| 180 | } | ||
| 181 | |||
| 182 | if (screen_info.lfb_base < pci_resource_start(pdev, ramin)) | ||
| 183 | return false; | ||
| 184 | |||
| 185 | if (screen_info.lfb_base + screen_info.lfb_size >= | ||
| 186 | pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin)) | ||
| 187 | return false; | ||
| 188 | |||
| 189 | return true; | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | static void | 155 | static void |
| 194 | nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | 156 | nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) |
| 195 | { | 157 | { |
| @@ -219,7 +181,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
| 219 | struct nouveau_framebuffer *nouveau_fb; | 181 | struct nouveau_framebuffer *nouveau_fb; |
| 220 | struct nouveau_bo *nvbo; | 182 | struct nouveau_bo *nvbo; |
| 221 | struct drm_mode_fb_cmd mode_cmd; | 183 | struct drm_mode_fb_cmd mode_cmd; |
| 222 | struct device *device = &dev->pdev->dev; | 184 | struct pci_dev *pdev = dev->pdev; |
| 185 | struct device *device = &pdev->dev; | ||
| 223 | int size, ret; | 186 | int size, ret; |
| 224 | 187 | ||
| 225 | mode_cmd.width = sizes->surface_width; | 188 | mode_cmd.width = sizes->surface_width; |
| @@ -299,28 +262,14 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
| 299 | drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); | 262 | drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); |
| 300 | 263 | ||
| 301 | /* FIXME: we really shouldn't expose mmio space at all */ | 264 | /* FIXME: we really shouldn't expose mmio space at all */ |
| 302 | info->fix.mmio_start = pci_resource_start(dev->pdev, 1); | 265 | info->fix.mmio_start = pci_resource_start(pdev, 1); |
| 303 | info->fix.mmio_len = pci_resource_len(dev->pdev, 1); | 266 | info->fix.mmio_len = pci_resource_len(pdev, 1); |
| 304 | 267 | ||
| 305 | /* Set aperture base/size for vesafb takeover */ | 268 | /* Set aperture base/size for vesafb takeover */ |
| 306 | #if defined(__i386__) || defined(__x86_64__) | 269 | info->apertures = dev_priv->apertures; |
| 307 | if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { | 270 | if (!info->apertures) { |
| 308 | /* Some NVIDIA VBIOS' are stupid and decide to put the | 271 | ret = -ENOMEM; |
| 309 | * framebuffer in the middle of the PRAMIN BAR for | 272 | goto out_unref; |
| 310 | * whatever reason. We need to know the exact lfb_base | ||
| 311 | * to get vesafb kicked off, and the only reliable way | ||
| 312 | * we have left is to find out lfb_base the same way | ||
| 313 | * vesafb did. | ||
| 314 | */ | ||
| 315 | info->aperture_base = screen_info.lfb_base; | ||
| 316 | info->aperture_size = screen_info.lfb_size; | ||
| 317 | if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) | ||
| 318 | info->aperture_size *= 65536; | ||
| 319 | } else | ||
| 320 | #endif | ||
| 321 | { | ||
| 322 | info->aperture_base = info->fix.mmio_start; | ||
| 323 | info->aperture_size = info->fix.mmio_len; | ||
| 324 | } | 273 | } |
| 325 | 274 | ||
| 326 | info->pixmap.size = 64*1024; | 275 | info->pixmap.size = 64*1024; |
| @@ -377,15 +326,11 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, | |||
| 377 | return new_fb; | 326 | return new_fb; |
| 378 | } | 327 | } |
| 379 | 328 | ||
| 380 | void nouveau_fbcon_hotplug(struct drm_device *dev) | 329 | void |
| 330 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) | ||
| 381 | { | 331 | { |
| 382 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 332 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 383 | drm_helper_fb_hpd_irq_event(&dev_priv->nfbdev->helper); | 333 | drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); |
| 384 | } | ||
| 385 | |||
| 386 | static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper) | ||
| 387 | { | ||
| 388 | drm_helper_fb_hotplug_event(fb_helper, true); | ||
| 389 | } | 334 | } |
| 390 | 335 | ||
| 391 | int | 336 | int |
| @@ -425,7 +370,6 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | |||
| 425 | .gamma_set = nouveau_fbcon_gamma_set, | 370 | .gamma_set = nouveau_fbcon_gamma_set, |
| 426 | .gamma_get = nouveau_fbcon_gamma_get, | 371 | .gamma_get = nouveau_fbcon_gamma_get, |
| 427 | .fb_probe = nouveau_fbcon_find_or_create_single, | 372 | .fb_probe = nouveau_fbcon_find_or_create_single, |
| 428 | .fb_output_status_changed = nouveau_fbcon_output_status_changed, | ||
| 429 | }; | 373 | }; |
| 430 | 374 | ||
| 431 | 375 | ||
| @@ -442,8 +386,7 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
| 442 | dev_priv->nfbdev = nfbdev; | 386 | dev_priv->nfbdev = nfbdev; |
| 443 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; | 387 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; |
| 444 | 388 | ||
| 445 | drm_fb_helper_init(dev, &nfbdev->helper, | 389 | drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); |
| 446 | 2, 4, true); | ||
| 447 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); | 390 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); |
| 448 | drm_fb_helper_initial_config(&nfbdev->helper, 32); | 391 | drm_fb_helper_initial_config(&nfbdev->helper, 32); |
| 449 | return 0; | 392 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index bf8e00d4de65..e7e12684c37e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
| @@ -58,6 +58,6 @@ void nouveau_fbcon_zfill_all(struct drm_device *dev); | |||
| 58 | void nouveau_fbcon_save_disable_accel(struct drm_device *dev); | 58 | void nouveau_fbcon_save_disable_accel(struct drm_device *dev); |
| 59 | void nouveau_fbcon_restore_accel(struct drm_device *dev); | 59 | void nouveau_fbcon_restore_accel(struct drm_device *dev); |
| 60 | 60 | ||
| 61 | void nouveau_fbcon_hotplug(struct drm_device *dev); | 61 | void nouveau_fbcon_output_poll_changed(struct drm_device *dev); |
| 62 | #endif /* __NV50_FBCON_H__ */ | 62 | #endif /* __NV50_FBCON_H__ */ |
| 63 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 32f0e495464c..f731c5f60536 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c | |||
| @@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev) | |||
| 68 | return ret; | 68 | return ret; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); | 71 | pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); |
| 72 | if (!pgraph->ctxprog) { | 72 | if (!pgraph->ctxprog) { |
| 73 | NV_ERROR(dev, "OOM copying ctxprog\n"); | 73 | NV_ERROR(dev, "OOM copying ctxprog\n"); |
| 74 | release_firmware(fw); | 74 | release_firmware(fw); |
| 75 | return -ENOMEM; | 75 | return -ENOMEM; |
| 76 | } | 76 | } |
| 77 | memcpy(pgraph->ctxprog, fw->data, fw->size); | ||
| 78 | 77 | ||
| 79 | cp = pgraph->ctxprog; | 78 | cp = pgraph->ctxprog; |
| 80 | if (le32_to_cpu(cp->signature) != 0x5043564e || | 79 | if (le32_to_cpu(cp->signature) != 0x5043564e || |
| @@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev) | |||
| 97 | return ret; | 96 | return ret; |
| 98 | } | 97 | } |
| 99 | 98 | ||
| 100 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | 99 | pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); |
| 101 | if (!pgraph->ctxvals) { | 100 | if (!pgraph->ctxvals) { |
| 102 | NV_ERROR(dev, "OOM copying ctxvals\n"); | 101 | NV_ERROR(dev, "OOM copying ctxvals\n"); |
| 103 | release_firmware(fw); | 102 | release_firmware(fw); |
| 104 | nouveau_grctx_fini(dev); | 103 | nouveau_grctx_fini(dev); |
| 105 | return -ENOMEM; | 104 | return -ENOMEM; |
| 106 | } | 105 | } |
| 107 | memcpy(pgraph->ctxvals, fw->data, fw->size); | ||
| 108 | 106 | ||
| 109 | cv = (void *)pgraph->ctxvals; | 107 | cv = (void *)pgraph->ctxvals; |
| 110 | if (le32_to_cpu(cv->signature) != 0x5643564e || | 108 | if (le32_to_cpu(cv->signature) != 0x5643564e || |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 92100a9678ba..e632339c323e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -516,8 +516,10 @@ nouveau_card_init(struct drm_device *dev) | |||
| 516 | 516 | ||
| 517 | dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; | 517 | dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; |
| 518 | 518 | ||
| 519 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 519 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 520 | nouveau_fbcon_init(dev); | 520 | nouveau_fbcon_init(dev); |
| 521 | drm_kms_helper_poll_init(dev); | ||
| 522 | } | ||
| 521 | 523 | ||
| 522 | return 0; | 524 | return 0; |
| 523 | 525 | ||
| @@ -639,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) | |||
| 639 | #endif | 641 | #endif |
| 640 | } | 642 | } |
| 641 | 643 | ||
| 644 | static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev) | ||
| 645 | { | ||
| 646 | struct pci_dev *pdev = dev->pdev; | ||
| 647 | struct apertures_struct *aper = alloc_apertures(3); | ||
| 648 | if (!aper) | ||
| 649 | return NULL; | ||
| 650 | |||
| 651 | aper->ranges[0].base = pci_resource_start(pdev, 1); | ||
| 652 | aper->ranges[0].size = pci_resource_len(pdev, 1); | ||
| 653 | aper->count = 1; | ||
| 654 | |||
| 655 | if (pci_resource_len(pdev, 2)) { | ||
| 656 | aper->ranges[aper->count].base = pci_resource_start(pdev, 2); | ||
| 657 | aper->ranges[aper->count].size = pci_resource_len(pdev, 2); | ||
| 658 | aper->count++; | ||
| 659 | } | ||
| 660 | |||
| 661 | if (pci_resource_len(pdev, 3)) { | ||
| 662 | aper->ranges[aper->count].base = pci_resource_start(pdev, 3); | ||
| 663 | aper->ranges[aper->count].size = pci_resource_len(pdev, 3); | ||
| 664 | aper->count++; | ||
| 665 | } | ||
| 666 | |||
| 667 | return aper; | ||
| 668 | } | ||
| 669 | |||
| 670 | static int nouveau_remove_conflicting_drivers(struct drm_device *dev) | ||
| 671 | { | ||
| 672 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 673 | bool primary = false; | ||
| 674 | dev_priv->apertures = nouveau_get_apertures(dev); | ||
| 675 | if (!dev_priv->apertures) | ||
| 676 | return -ENOMEM; | ||
| 677 | |||
| 678 | #ifdef CONFIG_X86 | ||
| 679 | primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
| 680 | #endif | ||
| 681 | |||
| 682 | remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); | ||
| 683 | return 0; | ||
| 684 | } | ||
| 685 | |||
| 642 | int nouveau_load(struct drm_device *dev, unsigned long flags) | 686 | int nouveau_load(struct drm_device *dev, unsigned long flags) |
| 643 | { | 687 | { |
| 644 | struct drm_nouveau_private *dev_priv; | 688 | struct drm_nouveau_private *dev_priv; |
| @@ -726,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
| 726 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | 770 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", |
| 727 | dev_priv->card_type, reg0); | 771 | dev_priv->card_type, reg0); |
| 728 | 772 | ||
| 773 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
| 774 | int ret = nouveau_remove_conflicting_drivers(dev); | ||
| 775 | if (ret) | ||
| 776 | return ret; | ||
| 777 | } | ||
| 778 | |||
| 729 | /* map larger RAMIN aperture on NV40 cards */ | 779 | /* map larger RAMIN aperture on NV40 cards */ |
| 730 | dev_priv->ramin = NULL; | 780 | dev_priv->ramin = NULL; |
| 731 | if (dev_priv->card_type >= NV_40) { | 781 | if (dev_priv->card_type >= NV_40) { |
| @@ -796,6 +846,7 @@ int nouveau_unload(struct drm_device *dev) | |||
| 796 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 846 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 797 | 847 | ||
| 798 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 848 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 849 | drm_kms_helper_poll_fini(dev); | ||
| 799 | nouveau_fbcon_fini(dev); | 850 | nouveau_fbcon_fini(dev); |
| 800 | if (dev_priv->card_type >= NV_50) | 851 | if (dev_priv->card_type >= NV_50) |
| 801 | nv50_display_destroy(dev); | 852 | nv50_display_destroy(dev); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f9b304866e66..34156b69594f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -947,7 +947,7 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) | |||
| 947 | if (dev_priv->chipset >= 0x90) | 947 | if (dev_priv->chipset >= 0x90) |
| 948 | nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); | 948 | nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); |
| 949 | 949 | ||
| 950 | nouveau_fbcon_hotplug(dev); | 950 | drm_helper_hpd_irq_event(dev); |
| 951 | } | 951 | } |
| 952 | 952 | ||
| 953 | void | 953 | void |
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 1c02d23f6fcc..80c5b3ea28b4 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config DRM_RADEON_KMS | 1 | config DRM_RADEON_KMS |
| 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" | 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" |
| 3 | depends on DRM_RADEON | 3 | depends on DRM_RADEON |
| 4 | depends on POWER_SUPPLY | ||
| 4 | help | 5 | help |
| 5 | Choose this option if you want kernel modesetting enabled by default. | 6 | Choose this option if you want kernel modesetting enabled by default. |
| 6 | 7 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 3feca6aec4c4..03dd6c41dc19 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
| 27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
| 28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
| 29 | #include "radeon_fixed.h" | 29 | #include <drm/drm_fixed.h> |
| 30 | #include "radeon.h" | 30 | #include "radeon.h" |
| 31 | #include "atom.h" | 31 | #include "atom.h" |
| 32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
| @@ -245,6 +245,9 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 245 | 245 | ||
| 246 | switch (mode) { | 246 | switch (mode) { |
| 247 | case DRM_MODE_DPMS_ON: | 247 | case DRM_MODE_DPMS_ON: |
| 248 | radeon_crtc->enabled = true; | ||
| 249 | /* adjust pm to dpms changes BEFORE enabling crtcs */ | ||
| 250 | radeon_pm_compute_clocks(rdev); | ||
| 248 | atombios_enable_crtc(crtc, ATOM_ENABLE); | 251 | atombios_enable_crtc(crtc, ATOM_ENABLE); |
| 249 | if (ASIC_IS_DCE3(rdev)) | 252 | if (ASIC_IS_DCE3(rdev)) |
| 250 | atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); | 253 | atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); |
| @@ -260,6 +263,9 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 260 | if (ASIC_IS_DCE3(rdev)) | 263 | if (ASIC_IS_DCE3(rdev)) |
| 261 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); | 264 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); |
| 262 | atombios_enable_crtc(crtc, ATOM_DISABLE); | 265 | atombios_enable_crtc(crtc, ATOM_DISABLE); |
| 266 | radeon_crtc->enabled = false; | ||
| 267 | /* adjust pm to dpms changes AFTER disabling crtcs */ | ||
| 268 | radeon_pm_compute_clocks(rdev); | ||
| 263 | break; | 269 | break; |
| 264 | } | 270 | } |
| 265 | } | 271 | } |
| @@ -1156,6 +1162,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
| 1156 | struct drm_display_mode *mode, | 1162 | struct drm_display_mode *mode, |
| 1157 | struct drm_display_mode *adjusted_mode) | 1163 | struct drm_display_mode *adjusted_mode) |
| 1158 | { | 1164 | { |
| 1165 | struct drm_device *dev = crtc->dev; | ||
| 1166 | struct radeon_device *rdev = dev->dev_private; | ||
| 1167 | |||
| 1168 | /* adjust pm to upcoming mode change */ | ||
| 1169 | radeon_pm_compute_clocks(rdev); | ||
| 1170 | |||
| 1159 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 1171 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
| 1160 | return false; | 1172 | return false; |
| 1161 | return true; | 1173 | return true; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b3d168fb89e5..8c8e4d3cbaa3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -39,6 +39,47 @@ | |||
| 39 | static void evergreen_gpu_init(struct radeon_device *rdev); | 39 | static void evergreen_gpu_init(struct radeon_device *rdev); |
| 40 | void evergreen_fini(struct radeon_device *rdev); | 40 | void evergreen_fini(struct radeon_device *rdev); |
| 41 | 41 | ||
| 42 | void evergreen_pm_misc(struct radeon_device *rdev) | ||
| 43 | { | ||
| 44 | |||
| 45 | } | ||
| 46 | |||
| 47 | void evergreen_pm_prepare(struct radeon_device *rdev) | ||
| 48 | { | ||
| 49 | struct drm_device *ddev = rdev->ddev; | ||
| 50 | struct drm_crtc *crtc; | ||
| 51 | struct radeon_crtc *radeon_crtc; | ||
| 52 | u32 tmp; | ||
| 53 | |||
| 54 | /* disable any active CRTCs */ | ||
| 55 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 56 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 57 | if (radeon_crtc->enabled) { | ||
| 58 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
| 59 | tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
| 60 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | void evergreen_pm_finish(struct radeon_device *rdev) | ||
| 66 | { | ||
| 67 | struct drm_device *ddev = rdev->ddev; | ||
| 68 | struct drm_crtc *crtc; | ||
| 69 | struct radeon_crtc *radeon_crtc; | ||
| 70 | u32 tmp; | ||
| 71 | |||
| 72 | /* enable any active CRTCs */ | ||
| 73 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 74 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 75 | if (radeon_crtc->enabled) { | ||
| 76 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
| 77 | tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
| 78 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 42 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 83 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
| 43 | { | 84 | { |
| 44 | bool connected = false; | 85 | bool connected = false; |
| @@ -1418,6 +1459,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 1418 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | 1459 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; |
| 1419 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 1460 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
| 1420 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | 1461 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
| 1462 | u32 grbm_int_cntl = 0; | ||
| 1421 | 1463 | ||
| 1422 | if (!rdev->irq.installed) { | 1464 | if (!rdev->irq.installed) { |
| 1423 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 1465 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); |
| @@ -1490,8 +1532,13 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 1490 | DRM_DEBUG("evergreen_irq_set: hpd 6\n"); | 1532 | DRM_DEBUG("evergreen_irq_set: hpd 6\n"); |
| 1491 | hpd6 |= DC_HPDx_INT_EN; | 1533 | hpd6 |= DC_HPDx_INT_EN; |
| 1492 | } | 1534 | } |
| 1535 | if (rdev->irq.gui_idle) { | ||
| 1536 | DRM_DEBUG("gui idle\n"); | ||
| 1537 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | ||
| 1538 | } | ||
| 1493 | 1539 | ||
| 1494 | WREG32(CP_INT_CNTL, cp_int_cntl); | 1540 | WREG32(CP_INT_CNTL, cp_int_cntl); |
| 1541 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | ||
| 1495 | 1542 | ||
| 1496 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | 1543 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
| 1497 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | 1544 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); |
| @@ -1853,6 +1900,11 @@ restart_ih: | |||
| 1853 | case 181: /* CP EOP event */ | 1900 | case 181: /* CP EOP event */ |
| 1854 | DRM_DEBUG("IH: CP EOP\n"); | 1901 | DRM_DEBUG("IH: CP EOP\n"); |
| 1855 | break; | 1902 | break; |
| 1903 | case 233: /* GUI IDLE */ | ||
| 1904 | DRM_DEBUG("IH: CP EOP\n"); | ||
| 1905 | rdev->pm.gui_idle = true; | ||
| 1906 | wake_up(&rdev->irq.idle_queue); | ||
| 1907 | break; | ||
| 1856 | default: | 1908 | default: |
| 1857 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | 1909 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
| 1858 | break; | 1910 | break; |
| @@ -2063,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 2063 | r = radeon_clocks_init(rdev); | 2115 | r = radeon_clocks_init(rdev); |
| 2064 | if (r) | 2116 | if (r) |
| 2065 | return r; | 2117 | return r; |
| 2066 | /* Initialize power management */ | ||
| 2067 | radeon_pm_init(rdev); | ||
| 2068 | /* Fence driver */ | 2118 | /* Fence driver */ |
| 2069 | r = radeon_fence_driver_init(rdev); | 2119 | r = radeon_fence_driver_init(rdev); |
| 2070 | if (r) | 2120 | if (r) |
| @@ -2126,7 +2176,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 2126 | 2176 | ||
| 2127 | void evergreen_fini(struct radeon_device *rdev) | 2177 | void evergreen_fini(struct radeon_device *rdev) |
| 2128 | { | 2178 | { |
| 2129 | radeon_pm_fini(rdev); | ||
| 2130 | /*r600_blit_fini(rdev);*/ | 2179 | /*r600_blit_fini(rdev);*/ |
| 2131 | r700_cp_fini(rdev); | 2180 | r700_cp_fini(rdev); |
| 2132 | r600_wb_fini(rdev); | 2181 | r600_wb_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index f7c7c9643433..af86af836f13 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
| @@ -164,8 +164,12 @@ | |||
| 164 | #define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) | 164 | #define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) |
| 165 | 165 | ||
| 166 | /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ | 166 | /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ |
| 167 | #define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34 | ||
| 167 | #define EVERGREEN_CRTC_CONTROL 0x6e70 | 168 | #define EVERGREEN_CRTC_CONTROL 0x6e70 |
| 168 | # define EVERGREEN_CRTC_MASTER_EN (1 << 0) | 169 | # define EVERGREEN_CRTC_MASTER_EN (1 << 0) |
| 170 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | ||
| 171 | #define EVERGREEN_CRTC_STATUS 0x6e8c | ||
| 172 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | ||
| 169 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 | 173 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
| 170 | 174 | ||
| 171 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 | 175 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 4de41b0ad5ce..cc004b05d63e 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "rs100d.h" | 37 | #include "rs100d.h" |
| 38 | #include "rv200d.h" | 38 | #include "rv200d.h" |
| 39 | #include "rv250d.h" | 39 | #include "rv250d.h" |
| 40 | #include "atom.h" | ||
| 40 | 41 | ||
| 41 | #include <linux/firmware.h> | 42 | #include <linux/firmware.h> |
| 42 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
| @@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
| 67 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
| 68 | */ | 69 | */ |
| 69 | 70 | ||
| 71 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) | ||
| 72 | { | ||
| 73 | int i; | ||
| 74 | rdev->pm.dynpm_can_upclock = true; | ||
| 75 | rdev->pm.dynpm_can_downclock = true; | ||
| 76 | |||
| 77 | switch (rdev->pm.dynpm_planned_action) { | ||
| 78 | case DYNPM_ACTION_MINIMUM: | ||
| 79 | rdev->pm.requested_power_state_index = 0; | ||
| 80 | rdev->pm.dynpm_can_downclock = false; | ||
| 81 | break; | ||
| 82 | case DYNPM_ACTION_DOWNCLOCK: | ||
| 83 | if (rdev->pm.current_power_state_index == 0) { | ||
| 84 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 85 | rdev->pm.dynpm_can_downclock = false; | ||
| 86 | } else { | ||
| 87 | if (rdev->pm.active_crtc_count > 1) { | ||
| 88 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
| 89 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 90 | continue; | ||
| 91 | else if (i >= rdev->pm.current_power_state_index) { | ||
| 92 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 93 | break; | ||
| 94 | } else { | ||
| 95 | rdev->pm.requested_power_state_index = i; | ||
| 96 | break; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | } else | ||
| 100 | rdev->pm.requested_power_state_index = | ||
| 101 | rdev->pm.current_power_state_index - 1; | ||
| 102 | } | ||
| 103 | /* don't use the power state if crtcs are active and no display flag is set */ | ||
| 104 | if ((rdev->pm.active_crtc_count > 0) && | ||
| 105 | (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & | ||
| 106 | RADEON_PM_MODE_NO_DISPLAY)) { | ||
| 107 | rdev->pm.requested_power_state_index++; | ||
| 108 | } | ||
| 109 | break; | ||
| 110 | case DYNPM_ACTION_UPCLOCK: | ||
| 111 | if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { | ||
| 112 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 113 | rdev->pm.dynpm_can_upclock = false; | ||
| 114 | } else { | ||
| 115 | if (rdev->pm.active_crtc_count > 1) { | ||
| 116 | for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { | ||
| 117 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 118 | continue; | ||
| 119 | else if (i <= rdev->pm.current_power_state_index) { | ||
| 120 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 121 | break; | ||
| 122 | } else { | ||
| 123 | rdev->pm.requested_power_state_index = i; | ||
| 124 | break; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | } else | ||
| 128 | rdev->pm.requested_power_state_index = | ||
| 129 | rdev->pm.current_power_state_index + 1; | ||
| 130 | } | ||
| 131 | break; | ||
| 132 | case DYNPM_ACTION_DEFAULT: | ||
| 133 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | ||
| 134 | rdev->pm.dynpm_can_upclock = false; | ||
| 135 | break; | ||
| 136 | case DYNPM_ACTION_NONE: | ||
| 137 | default: | ||
| 138 | DRM_ERROR("Requested mode for not defined action\n"); | ||
| 139 | return; | ||
| 140 | } | ||
| 141 | /* only one clock mode per power state */ | ||
| 142 | rdev->pm.requested_clock_mode_index = 0; | ||
| 143 | |||
| 144 | DRM_DEBUG("Requested: e: %d m: %d p: %d\n", | ||
| 145 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 146 | clock_info[rdev->pm.requested_clock_mode_index].sclk, | ||
| 147 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 148 | clock_info[rdev->pm.requested_clock_mode_index].mclk, | ||
| 149 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 150 | pcie_lanes); | ||
| 151 | } | ||
| 152 | |||
| 153 | void r100_pm_init_profile(struct radeon_device *rdev) | ||
| 154 | { | ||
| 155 | /* default */ | ||
| 156 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 157 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 158 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 159 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 160 | /* low sh */ | ||
| 161 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; | ||
| 162 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; | ||
| 163 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 164 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 165 | /* high sh */ | ||
| 166 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | ||
| 167 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 168 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 169 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 170 | /* low mh */ | ||
| 171 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; | ||
| 172 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 173 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 174 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 175 | /* high mh */ | ||
| 176 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | ||
| 177 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 178 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 179 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 180 | } | ||
| 181 | |||
| 182 | void r100_pm_misc(struct radeon_device *rdev) | ||
| 183 | { | ||
| 184 | int requested_index = rdev->pm.requested_power_state_index; | ||
| 185 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | ||
| 186 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | ||
| 187 | u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; | ||
| 188 | |||
| 189 | if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { | ||
| 190 | if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | ||
| 191 | tmp = RREG32(voltage->gpio.reg); | ||
| 192 | if (voltage->active_high) | ||
| 193 | tmp |= voltage->gpio.mask; | ||
| 194 | else | ||
| 195 | tmp &= ~(voltage->gpio.mask); | ||
| 196 | WREG32(voltage->gpio.reg, tmp); | ||
| 197 | if (voltage->delay) | ||
| 198 | udelay(voltage->delay); | ||
| 199 | } else { | ||
| 200 | tmp = RREG32(voltage->gpio.reg); | ||
| 201 | if (voltage->active_high) | ||
| 202 | tmp &= ~voltage->gpio.mask; | ||
| 203 | else | ||
| 204 | tmp |= voltage->gpio.mask; | ||
| 205 | WREG32(voltage->gpio.reg, tmp); | ||
| 206 | if (voltage->delay) | ||
| 207 | udelay(voltage->delay); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | |||
| 211 | sclk_cntl = RREG32_PLL(SCLK_CNTL); | ||
| 212 | sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); | ||
| 213 | sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); | ||
| 214 | sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); | ||
| 215 | sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); | ||
| 216 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { | ||
| 217 | sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; | ||
| 218 | if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) | ||
| 219 | sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; | ||
| 220 | else | ||
| 221 | sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; | ||
| 222 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) | ||
| 223 | sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); | ||
| 224 | else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) | ||
| 225 | sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); | ||
| 226 | } else | ||
| 227 | sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; | ||
| 228 | |||
| 229 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { | ||
| 230 | sclk_more_cntl |= IO_CG_VOLTAGE_DROP; | ||
| 231 | if (voltage->delay) { | ||
| 232 | sclk_more_cntl |= VOLTAGE_DROP_SYNC; | ||
| 233 | switch (voltage->delay) { | ||
| 234 | case 33: | ||
| 235 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); | ||
| 236 | break; | ||
| 237 | case 66: | ||
| 238 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); | ||
| 239 | break; | ||
| 240 | case 99: | ||
| 241 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); | ||
| 242 | break; | ||
| 243 | case 132: | ||
| 244 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); | ||
| 245 | break; | ||
| 246 | } | ||
| 247 | } else | ||
| 248 | sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; | ||
| 249 | } else | ||
| 250 | sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; | ||
| 251 | |||
| 252 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) | ||
| 253 | sclk_cntl &= ~FORCE_HDP; | ||
| 254 | else | ||
| 255 | sclk_cntl |= FORCE_HDP; | ||
| 256 | |||
| 257 | WREG32_PLL(SCLK_CNTL, sclk_cntl); | ||
| 258 | WREG32_PLL(SCLK_CNTL2, sclk_cntl2); | ||
| 259 | WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); | ||
| 260 | |||
| 261 | /* set pcie lanes */ | ||
| 262 | if ((rdev->flags & RADEON_IS_PCIE) && | ||
| 263 | !(rdev->flags & RADEON_IS_IGP) && | ||
| 264 | rdev->asic->set_pcie_lanes && | ||
| 265 | (ps->pcie_lanes != | ||
| 266 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { | ||
| 267 | radeon_set_pcie_lanes(rdev, | ||
| 268 | ps->pcie_lanes); | ||
| 269 | DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); | ||
| 270 | } | ||
| 271 | } | ||
| 272 | |||
| 273 | void r100_pm_prepare(struct radeon_device *rdev) | ||
| 274 | { | ||
| 275 | struct drm_device *ddev = rdev->ddev; | ||
| 276 | struct drm_crtc *crtc; | ||
| 277 | struct radeon_crtc *radeon_crtc; | ||
| 278 | u32 tmp; | ||
| 279 | |||
| 280 | /* disable any active CRTCs */ | ||
| 281 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 282 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 283 | if (radeon_crtc->enabled) { | ||
| 284 | if (radeon_crtc->crtc_id) { | ||
| 285 | tmp = RREG32(RADEON_CRTC2_GEN_CNTL); | ||
| 286 | tmp |= RADEON_CRTC2_DISP_REQ_EN_B; | ||
| 287 | WREG32(RADEON_CRTC2_GEN_CNTL, tmp); | ||
| 288 | } else { | ||
| 289 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); | ||
| 290 | tmp |= RADEON_CRTC_DISP_REQ_EN_B; | ||
| 291 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 297 | void r100_pm_finish(struct radeon_device *rdev) | ||
| 298 | { | ||
| 299 | struct drm_device *ddev = rdev->ddev; | ||
| 300 | struct drm_crtc *crtc; | ||
| 301 | struct radeon_crtc *radeon_crtc; | ||
| 302 | u32 tmp; | ||
| 303 | |||
| 304 | /* enable any active CRTCs */ | ||
| 305 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 306 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 307 | if (radeon_crtc->enabled) { | ||
| 308 | if (radeon_crtc->crtc_id) { | ||
| 309 | tmp = RREG32(RADEON_CRTC2_GEN_CNTL); | ||
| 310 | tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; | ||
| 311 | WREG32(RADEON_CRTC2_GEN_CNTL, tmp); | ||
| 312 | } else { | ||
| 313 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); | ||
| 314 | tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; | ||
| 315 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); | ||
| 316 | } | ||
| 317 | } | ||
| 318 | } | ||
| 319 | } | ||
| 320 | |||
| 321 | bool r100_gui_idle(struct radeon_device *rdev) | ||
| 322 | { | ||
| 323 | if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) | ||
| 324 | return false; | ||
| 325 | else | ||
| 326 | return true; | ||
| 327 | } | ||
| 328 | |||
| 70 | /* hpd for digital panel detect/disconnect */ | 329 | /* hpd for digital panel detect/disconnect */ |
| 71 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 330 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
| 72 | { | 331 | { |
| @@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev) | |||
| 254 | if (rdev->irq.sw_int) { | 513 | if (rdev->irq.sw_int) { |
| 255 | tmp |= RADEON_SW_INT_ENABLE; | 514 | tmp |= RADEON_SW_INT_ENABLE; |
| 256 | } | 515 | } |
| 516 | if (rdev->irq.gui_idle) { | ||
| 517 | tmp |= RADEON_GUI_IDLE_MASK; | ||
| 518 | } | ||
| 257 | if (rdev->irq.crtc_vblank_int[0]) { | 519 | if (rdev->irq.crtc_vblank_int[0]) { |
| 258 | tmp |= RADEON_CRTC_VBLANK_MASK; | 520 | tmp |= RADEON_CRTC_VBLANK_MASK; |
| 259 | } | 521 | } |
| @@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | |||
| 288 | RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | | 550 | RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | |
| 289 | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; | 551 | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; |
| 290 | 552 | ||
| 553 | /* the interrupt works, but the status bit is permanently asserted */ | ||
| 554 | if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { | ||
| 555 | if (!rdev->irq.gui_idle_acked) | ||
| 556 | irq_mask |= RADEON_GUI_IDLE_STAT; | ||
| 557 | } | ||
| 558 | |||
| 291 | if (irqs) { | 559 | if (irqs) { |
| 292 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 560 | WREG32(RADEON_GEN_INT_STATUS, irqs); |
| 293 | } | 561 | } |
| @@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev) | |||
| 299 | uint32_t status, msi_rearm; | 567 | uint32_t status, msi_rearm; |
| 300 | bool queue_hotplug = false; | 568 | bool queue_hotplug = false; |
| 301 | 569 | ||
| 570 | /* reset gui idle ack. the status bit is broken */ | ||
| 571 | rdev->irq.gui_idle_acked = false; | ||
| 572 | |||
| 302 | status = r100_irq_ack(rdev); | 573 | status = r100_irq_ack(rdev); |
| 303 | if (!status) { | 574 | if (!status) { |
| 304 | return IRQ_NONE; | 575 | return IRQ_NONE; |
| @@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev) | |||
| 311 | if (status & RADEON_SW_INT_TEST) { | 582 | if (status & RADEON_SW_INT_TEST) { |
| 312 | radeon_fence_process(rdev); | 583 | radeon_fence_process(rdev); |
| 313 | } | 584 | } |
| 585 | /* gui idle interrupt */ | ||
| 586 | if (status & RADEON_GUI_IDLE_STAT) { | ||
| 587 | rdev->irq.gui_idle_acked = true; | ||
| 588 | rdev->pm.gui_idle = true; | ||
| 589 | wake_up(&rdev->irq.idle_queue); | ||
| 590 | } | ||
| 314 | /* Vertical blank interrupts */ | 591 | /* Vertical blank interrupts */ |
| 315 | if (status & RADEON_CRTC_VBLANK_STAT) { | 592 | if (status & RADEON_CRTC_VBLANK_STAT) { |
| 316 | drm_handle_vblank(rdev->ddev, 0); | 593 | drm_handle_vblank(rdev->ddev, 0); |
| @@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev) | |||
| 332 | } | 609 | } |
| 333 | status = r100_irq_ack(rdev); | 610 | status = r100_irq_ack(rdev); |
| 334 | } | 611 | } |
| 612 | /* reset gui idle ack. the status bit is broken */ | ||
| 613 | rdev->irq.gui_idle_acked = false; | ||
| 335 | if (queue_hotplug) | 614 | if (queue_hotplug) |
| 336 | queue_work(rdev->wq, &rdev->hotplug_work); | 615 | queue_work(rdev->wq, &rdev->hotplug_work); |
| 337 | if (rdev->msi_enabled) { | 616 | if (rdev->msi_enabled) { |
| @@ -2364,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2364 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | 2643 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
| 2365 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | 2644 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
| 2366 | fixed20_12 memtcas_ff[8] = { | 2645 | fixed20_12 memtcas_ff[8] = { |
| 2367 | fixed_init(1), | 2646 | dfixed_init(1), |
| 2368 | fixed_init(2), | 2647 | dfixed_init(2), |
| 2369 | fixed_init(3), | 2648 | dfixed_init(3), |
| 2370 | fixed_init(0), | 2649 | dfixed_init(0), |
| 2371 | fixed_init_half(1), | 2650 | dfixed_init_half(1), |
| 2372 | fixed_init_half(2), | 2651 | dfixed_init_half(2), |
| 2373 | fixed_init(0), | 2652 | dfixed_init(0), |
| 2374 | }; | 2653 | }; |
| 2375 | fixed20_12 memtcas_rs480_ff[8] = { | 2654 | fixed20_12 memtcas_rs480_ff[8] = { |
| 2376 | fixed_init(0), | 2655 | dfixed_init(0), |
| 2377 | fixed_init(1), | 2656 | dfixed_init(1), |
| 2378 | fixed_init(2), | 2657 | dfixed_init(2), |
| 2379 | fixed_init(3), | 2658 | dfixed_init(3), |
| 2380 | fixed_init(0), | 2659 | dfixed_init(0), |
| 2381 | fixed_init_half(1), | 2660 | dfixed_init_half(1), |
| 2382 | fixed_init_half(2), | 2661 | dfixed_init_half(2), |
| 2383 | fixed_init_half(3), | 2662 | dfixed_init_half(3), |
| 2384 | }; | 2663 | }; |
| 2385 | fixed20_12 memtcas2_ff[8] = { | 2664 | fixed20_12 memtcas2_ff[8] = { |
| 2386 | fixed_init(0), | 2665 | dfixed_init(0), |
| 2387 | fixed_init(1), | 2666 | dfixed_init(1), |
| 2388 | fixed_init(2), | 2667 | dfixed_init(2), |
| 2389 | fixed_init(3), | 2668 | dfixed_init(3), |
| 2390 | fixed_init(4), | 2669 | dfixed_init(4), |
| 2391 | fixed_init(5), | 2670 | dfixed_init(5), |
| 2392 | fixed_init(6), | 2671 | dfixed_init(6), |
| 2393 | fixed_init(7), | 2672 | dfixed_init(7), |
| 2394 | }; | 2673 | }; |
| 2395 | fixed20_12 memtrbs[8] = { | 2674 | fixed20_12 memtrbs[8] = { |
| 2396 | fixed_init(1), | 2675 | dfixed_init(1), |
| 2397 | fixed_init_half(1), | 2676 | dfixed_init_half(1), |
| 2398 | fixed_init(2), | 2677 | dfixed_init(2), |
| 2399 | fixed_init_half(2), | 2678 | dfixed_init_half(2), |
| 2400 | fixed_init(3), | 2679 | dfixed_init(3), |
| 2401 | fixed_init_half(3), | 2680 | dfixed_init_half(3), |
| 2402 | fixed_init(4), | 2681 | dfixed_init(4), |
| 2403 | fixed_init_half(4) | 2682 | dfixed_init_half(4) |
| 2404 | }; | 2683 | }; |
| 2405 | fixed20_12 memtrbs_r4xx[8] = { | 2684 | fixed20_12 memtrbs_r4xx[8] = { |
| 2406 | fixed_init(4), | 2685 | dfixed_init(4), |
| 2407 | fixed_init(5), | 2686 | dfixed_init(5), |
| 2408 | fixed_init(6), | 2687 | dfixed_init(6), |
| 2409 | fixed_init(7), | 2688 | dfixed_init(7), |
| 2410 | fixed_init(8), | 2689 | dfixed_init(8), |
| 2411 | fixed_init(9), | 2690 | dfixed_init(9), |
| 2412 | fixed_init(10), | 2691 | dfixed_init(10), |
| 2413 | fixed_init(11) | 2692 | dfixed_init(11) |
| 2414 | }; | 2693 | }; |
| 2415 | fixed20_12 min_mem_eff; | 2694 | fixed20_12 min_mem_eff; |
| 2416 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | 2695 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
| @@ -2441,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2441 | } | 2720 | } |
| 2442 | } | 2721 | } |
| 2443 | 2722 | ||
| 2444 | min_mem_eff.full = rfixed_const_8(0); | 2723 | min_mem_eff.full = dfixed_const_8(0); |
| 2445 | /* get modes */ | 2724 | /* get modes */ |
| 2446 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | 2725 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
| 2447 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | 2726 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
| @@ -2462,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2462 | mclk_ff = rdev->pm.mclk; | 2741 | mclk_ff = rdev->pm.mclk; |
| 2463 | 2742 | ||
| 2464 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | 2743 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
| 2465 | temp_ff.full = rfixed_const(temp); | 2744 | temp_ff.full = dfixed_const(temp); |
| 2466 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | 2745 | mem_bw.full = dfixed_mul(mclk_ff, temp_ff); |
| 2467 | 2746 | ||
| 2468 | pix_clk.full = 0; | 2747 | pix_clk.full = 0; |
| 2469 | pix_clk2.full = 0; | 2748 | pix_clk2.full = 0; |
| 2470 | peak_disp_bw.full = 0; | 2749 | peak_disp_bw.full = 0; |
| 2471 | if (mode1) { | 2750 | if (mode1) { |
| 2472 | temp_ff.full = rfixed_const(1000); | 2751 | temp_ff.full = dfixed_const(1000); |
| 2473 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | 2752 | pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ |
| 2474 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | 2753 | pix_clk.full = dfixed_div(pix_clk, temp_ff); |
| 2475 | temp_ff.full = rfixed_const(pixel_bytes1); | 2754 | temp_ff.full = dfixed_const(pixel_bytes1); |
| 2476 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | 2755 | peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); |
| 2477 | } | 2756 | } |
| 2478 | if (mode2) { | 2757 | if (mode2) { |
| 2479 | temp_ff.full = rfixed_const(1000); | 2758 | temp_ff.full = dfixed_const(1000); |
| 2480 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | 2759 | pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ |
| 2481 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | 2760 | pix_clk2.full = dfixed_div(pix_clk2, temp_ff); |
| 2482 | temp_ff.full = rfixed_const(pixel_bytes2); | 2761 | temp_ff.full = dfixed_const(pixel_bytes2); |
| 2483 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | 2762 | peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); |
| 2484 | } | 2763 | } |
| 2485 | 2764 | ||
| 2486 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | 2765 | mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); |
| 2487 | if (peak_disp_bw.full >= mem_bw.full) { | 2766 | if (peak_disp_bw.full >= mem_bw.full) { |
| 2488 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | 2767 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
| 2489 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | 2768 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
| @@ -2525,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2525 | mem_tras = ((temp >> 12) & 0xf) + 4; | 2804 | mem_tras = ((temp >> 12) & 0xf) + 4; |
| 2526 | } | 2805 | } |
| 2527 | /* convert to FF */ | 2806 | /* convert to FF */ |
| 2528 | trcd_ff.full = rfixed_const(mem_trcd); | 2807 | trcd_ff.full = dfixed_const(mem_trcd); |
| 2529 | trp_ff.full = rfixed_const(mem_trp); | 2808 | trp_ff.full = dfixed_const(mem_trp); |
| 2530 | tras_ff.full = rfixed_const(mem_tras); | 2809 | tras_ff.full = dfixed_const(mem_tras); |
| 2531 | 2810 | ||
| 2532 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | 2811 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
| 2533 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | 2812 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
| @@ -2545,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2545 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | 2824 | /* extra cas latency stored in bits 23-25 0-4 clocks */ |
| 2546 | data = (temp >> 23) & 0x7; | 2825 | data = (temp >> 23) & 0x7; |
| 2547 | if (data < 5) | 2826 | if (data < 5) |
| 2548 | tcas_ff.full += rfixed_const(data); | 2827 | tcas_ff.full += dfixed_const(data); |
| 2549 | } | 2828 | } |
| 2550 | 2829 | ||
| 2551 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | 2830 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
| @@ -2582,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2582 | 2861 | ||
| 2583 | if (rdev->flags & RADEON_IS_AGP) { | 2862 | if (rdev->flags & RADEON_IS_AGP) { |
| 2584 | fixed20_12 agpmode_ff; | 2863 | fixed20_12 agpmode_ff; |
| 2585 | agpmode_ff.full = rfixed_const(radeon_agpmode); | 2864 | agpmode_ff.full = dfixed_const(radeon_agpmode); |
| 2586 | temp_ff.full = rfixed_const_666(16); | 2865 | temp_ff.full = dfixed_const_666(16); |
| 2587 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | 2866 | sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); |
| 2588 | } | 2867 | } |
| 2589 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | 2868 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ |
| 2590 | 2869 | ||
| 2591 | if (ASIC_IS_R300(rdev)) { | 2870 | if (ASIC_IS_R300(rdev)) { |
| 2592 | sclk_delay_ff.full = rfixed_const(250); | 2871 | sclk_delay_ff.full = dfixed_const(250); |
| 2593 | } else { | 2872 | } else { |
| 2594 | if ((rdev->family == CHIP_RV100) || | 2873 | if ((rdev->family == CHIP_RV100) || |
| 2595 | rdev->flags & RADEON_IS_IGP) { | 2874 | rdev->flags & RADEON_IS_IGP) { |
| 2596 | if (rdev->mc.vram_is_ddr) | 2875 | if (rdev->mc.vram_is_ddr) |
| 2597 | sclk_delay_ff.full = rfixed_const(41); | 2876 | sclk_delay_ff.full = dfixed_const(41); |
| 2598 | else | 2877 | else |
| 2599 | sclk_delay_ff.full = rfixed_const(33); | 2878 | sclk_delay_ff.full = dfixed_const(33); |
| 2600 | } else { | 2879 | } else { |
| 2601 | if (rdev->mc.vram_width == 128) | 2880 | if (rdev->mc.vram_width == 128) |
| 2602 | sclk_delay_ff.full = rfixed_const(57); | 2881 | sclk_delay_ff.full = dfixed_const(57); |
| 2603 | else | 2882 | else |
| 2604 | sclk_delay_ff.full = rfixed_const(41); | 2883 | sclk_delay_ff.full = dfixed_const(41); |
| 2605 | } | 2884 | } |
| 2606 | } | 2885 | } |
| 2607 | 2886 | ||
| 2608 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | 2887 | mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); |
| 2609 | 2888 | ||
| 2610 | if (rdev->mc.vram_is_ddr) { | 2889 | if (rdev->mc.vram_is_ddr) { |
| 2611 | if (rdev->mc.vram_width == 32) { | 2890 | if (rdev->mc.vram_width == 32) { |
| 2612 | k1.full = rfixed_const(40); | 2891 | k1.full = dfixed_const(40); |
| 2613 | c = 3; | 2892 | c = 3; |
| 2614 | } else { | 2893 | } else { |
| 2615 | k1.full = rfixed_const(20); | 2894 | k1.full = dfixed_const(20); |
| 2616 | c = 1; | 2895 | c = 1; |
| 2617 | } | 2896 | } |
| 2618 | } else { | 2897 | } else { |
| 2619 | k1.full = rfixed_const(40); | 2898 | k1.full = dfixed_const(40); |
| 2620 | c = 3; | 2899 | c = 3; |
| 2621 | } | 2900 | } |
| 2622 | 2901 | ||
| 2623 | temp_ff.full = rfixed_const(2); | 2902 | temp_ff.full = dfixed_const(2); |
| 2624 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | 2903 | mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); |
| 2625 | temp_ff.full = rfixed_const(c); | 2904 | temp_ff.full = dfixed_const(c); |
| 2626 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | 2905 | mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); |
| 2627 | temp_ff.full = rfixed_const(4); | 2906 | temp_ff.full = dfixed_const(4); |
| 2628 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | 2907 | mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); |
| 2629 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | 2908 | mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); |
| 2630 | mc_latency_mclk.full += k1.full; | 2909 | mc_latency_mclk.full += k1.full; |
| 2631 | 2910 | ||
| 2632 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | 2911 | mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); |
| 2633 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | 2912 | mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); |
| 2634 | 2913 | ||
| 2635 | /* | 2914 | /* |
| 2636 | HW cursor time assuming worst case of full size colour cursor. | 2915 | HW cursor time assuming worst case of full size colour cursor. |
| 2637 | */ | 2916 | */ |
| 2638 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | 2917 | temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
| 2639 | temp_ff.full += trcd_ff.full; | 2918 | temp_ff.full += trcd_ff.full; |
| 2640 | if (temp_ff.full < tras_ff.full) | 2919 | if (temp_ff.full < tras_ff.full) |
| 2641 | temp_ff.full = tras_ff.full; | 2920 | temp_ff.full = tras_ff.full; |
| 2642 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | 2921 | cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); |
| 2643 | 2922 | ||
| 2644 | temp_ff.full = rfixed_const(cur_size); | 2923 | temp_ff.full = dfixed_const(cur_size); |
| 2645 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | 2924 | cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); |
| 2646 | /* | 2925 | /* |
| 2647 | Find the total latency for the display data. | 2926 | Find the total latency for the display data. |
| 2648 | */ | 2927 | */ |
| 2649 | disp_latency_overhead.full = rfixed_const(8); | 2928 | disp_latency_overhead.full = dfixed_const(8); |
| 2650 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | 2929 | disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); |
| 2651 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | 2930 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
| 2652 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | 2931 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
| 2653 | 2932 | ||
| @@ -2675,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2675 | /* | 2954 | /* |
| 2676 | Find the drain rate of the display buffer. | 2955 | Find the drain rate of the display buffer. |
| 2677 | */ | 2956 | */ |
| 2678 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | 2957 | temp_ff.full = dfixed_const((16/pixel_bytes1)); |
| 2679 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | 2958 | disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); |
| 2680 | 2959 | ||
| 2681 | /* | 2960 | /* |
| 2682 | Find the critical point of the display buffer. | 2961 | Find the critical point of the display buffer. |
| 2683 | */ | 2962 | */ |
| 2684 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | 2963 | crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); |
| 2685 | crit_point_ff.full += rfixed_const_half(0); | 2964 | crit_point_ff.full += dfixed_const_half(0); |
| 2686 | 2965 | ||
| 2687 | critical_point = rfixed_trunc(crit_point_ff); | 2966 | critical_point = dfixed_trunc(crit_point_ff); |
| 2688 | 2967 | ||
| 2689 | if (rdev->disp_priority == 2) { | 2968 | if (rdev->disp_priority == 2) { |
| 2690 | critical_point = 0; | 2969 | critical_point = 0; |
| @@ -2755,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2755 | /* | 3034 | /* |
| 2756 | Find the drain rate of the display buffer. | 3035 | Find the drain rate of the display buffer. |
| 2757 | */ | 3036 | */ |
| 2758 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | 3037 | temp_ff.full = dfixed_const((16/pixel_bytes2)); |
| 2759 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | 3038 | disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); |
| 2760 | 3039 | ||
| 2761 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | 3040 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
| 2762 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | 3041 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
| @@ -2777,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2777 | critical_point2 = 0; | 3056 | critical_point2 = 0; |
| 2778 | else { | 3057 | else { |
| 2779 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | 3058 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
| 2780 | temp_ff.full = rfixed_const(temp); | 3059 | temp_ff.full = dfixed_const(temp); |
| 2781 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | 3060 | temp_ff.full = dfixed_mul(mclk_ff, temp_ff); |
| 2782 | if (sclk_ff.full < temp_ff.full) | 3061 | if (sclk_ff.full < temp_ff.full) |
| 2783 | temp_ff.full = sclk_ff.full; | 3062 | temp_ff.full = sclk_ff.full; |
| 2784 | 3063 | ||
| @@ -2786,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2786 | 3065 | ||
| 2787 | if (mode1) { | 3066 | if (mode1) { |
| 2788 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | 3067 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
| 2789 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | 3068 | time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); |
| 2790 | } else { | 3069 | } else { |
| 2791 | time_disp1_drop_priority.full = 0; | 3070 | time_disp1_drop_priority.full = 0; |
| 2792 | } | 3071 | } |
| 2793 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | 3072 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
| 2794 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | 3073 | crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); |
| 2795 | crit_point_ff.full += rfixed_const_half(0); | 3074 | crit_point_ff.full += dfixed_const_half(0); |
| 2796 | 3075 | ||
| 2797 | critical_point2 = rfixed_trunc(crit_point_ff); | 3076 | critical_point2 = dfixed_trunc(crit_point_ff); |
| 2798 | 3077 | ||
| 2799 | if (rdev->disp_priority == 2) { | 3078 | if (rdev->disp_priority == 2) { |
| 2800 | critical_point2 = 0; | 3079 | critical_point2 = 0; |
| @@ -3491,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev) | |||
| 3491 | 3770 | ||
| 3492 | void r100_fini(struct radeon_device *rdev) | 3771 | void r100_fini(struct radeon_device *rdev) |
| 3493 | { | 3772 | { |
| 3494 | radeon_pm_fini(rdev); | ||
| 3495 | r100_cp_fini(rdev); | 3773 | r100_cp_fini(rdev); |
| 3496 | r100_wb_fini(rdev); | 3774 | r100_wb_fini(rdev); |
| 3497 | r100_ib_fini(rdev); | 3775 | r100_ib_fini(rdev); |
| @@ -3547,8 +3825,6 @@ int r100_init(struct radeon_device *rdev) | |||
| 3547 | r100_errata(rdev); | 3825 | r100_errata(rdev); |
| 3548 | /* Initialize clocks */ | 3826 | /* Initialize clocks */ |
| 3549 | radeon_get_clock_info(rdev->ddev); | 3827 | radeon_get_clock_info(rdev->ddev); |
| 3550 | /* Initialize power management */ | ||
| 3551 | radeon_pm_init(rdev); | ||
| 3552 | /* initialize AGP */ | 3828 | /* initialize AGP */ |
| 3553 | if (rdev->flags & RADEON_IS_AGP) { | 3829 | if (rdev->flags & RADEON_IS_AGP) { |
| 3554 | r = radeon_agp_init(rdev); | 3830 | r = radeon_agp_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index de8abd104ab7..d016b16fa116 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
| @@ -838,5 +838,41 @@ | |||
| 838 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | 838 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) |
| 839 | #define C_00000D_FORCE_RB 0xEFFFFFFF | 839 | #define C_00000D_FORCE_RB 0xEFFFFFFF |
| 840 | 840 | ||
| 841 | /* PLL regs */ | ||
| 842 | #define SCLK_CNTL 0xd | ||
| 843 | #define FORCE_HDP (1 << 17) | ||
| 844 | #define CLK_PWRMGT_CNTL 0x14 | ||
| 845 | #define GLOBAL_PMAN_EN (1 << 10) | ||
| 846 | #define DISP_PM (1 << 20) | ||
| 847 | #define PLL_PWRMGT_CNTL 0x15 | ||
| 848 | #define MPLL_TURNOFF (1 << 0) | ||
| 849 | #define SPLL_TURNOFF (1 << 1) | ||
| 850 | #define PPLL_TURNOFF (1 << 2) | ||
| 851 | #define P2PLL_TURNOFF (1 << 3) | ||
| 852 | #define TVPLL_TURNOFF (1 << 4) | ||
| 853 | #define MOBILE_SU (1 << 16) | ||
| 854 | #define SU_SCLK_USE_BCLK (1 << 17) | ||
| 855 | #define SCLK_CNTL2 0x1e | ||
| 856 | #define REDUCED_SPEED_SCLK_MODE (1 << 16) | ||
| 857 | #define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17) | ||
| 858 | #define MCLK_MISC 0x1f | ||
| 859 | #define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18) | ||
| 860 | #define SCLK_MORE_CNTL 0x35 | ||
| 861 | #define REDUCED_SPEED_SCLK_EN (1 << 16) | ||
| 862 | #define IO_CG_VOLTAGE_DROP (1 << 17) | ||
| 863 | #define VOLTAGE_DELAY_SEL(x) ((x) << 20) | ||
| 864 | #define VOLTAGE_DROP_SYNC (1 << 19) | ||
| 865 | |||
| 866 | /* mmreg */ | ||
| 867 | #define DISP_PWR_MAN 0xd08 | ||
| 868 | #define DISP_D3_GRPH_RST (1 << 18) | ||
| 869 | #define DISP_D3_SUBPIC_RST (1 << 19) | ||
| 870 | #define DISP_D3_OV0_RST (1 << 20) | ||
| 871 | #define DISP_D1D2_GRPH_RST (1 << 21) | ||
| 872 | #define DISP_D1D2_SUBPIC_RST (1 << 22) | ||
| 873 | #define DISP_D1D2_OV0_RST (1 << 23) | ||
| 874 | #define DISP_DVO_ENABLE_RST (1 << 24) | ||
| 875 | #define TV_ENABLE_RST (1 << 25) | ||
| 876 | #define AUTO_PWRUP_EN (1 << 26) | ||
| 841 | 877 | ||
| 842 | #endif | 878 | #endif |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 6d9569e002f7..b2f9efe2897c 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev) | |||
| 1344 | 1344 | ||
| 1345 | void r300_fini(struct radeon_device *rdev) | 1345 | void r300_fini(struct radeon_device *rdev) |
| 1346 | { | 1346 | { |
| 1347 | radeon_pm_fini(rdev); | ||
| 1348 | r100_cp_fini(rdev); | 1347 | r100_cp_fini(rdev); |
| 1349 | r100_wb_fini(rdev); | 1348 | r100_wb_fini(rdev); |
| 1350 | r100_ib_fini(rdev); | 1349 | r100_ib_fini(rdev); |
| @@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev) | |||
| 1400 | r300_errata(rdev); | 1399 | r300_errata(rdev); |
| 1401 | /* Initialize clocks */ | 1400 | /* Initialize clocks */ |
| 1402 | radeon_get_clock_info(rdev->ddev); | 1401 | radeon_get_clock_info(rdev->ddev); |
| 1403 | /* Initialize power management */ | ||
| 1404 | radeon_pm_init(rdev); | ||
| 1405 | /* initialize AGP */ | 1402 | /* initialize AGP */ |
| 1406 | if (rdev->flags & RADEON_IS_AGP) { | 1403 | if (rdev->flags & RADEON_IS_AGP) { |
| 1407 | r = radeon_agp_init(rdev); | 1404 | r = radeon_agp_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index be092d243f84..4415a5ee5871 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -36,6 +36,35 @@ | |||
| 36 | #include "r420d.h" | 36 | #include "r420d.h" |
| 37 | #include "r420_reg_safe.h" | 37 | #include "r420_reg_safe.h" |
| 38 | 38 | ||
| 39 | void r420_pm_init_profile(struct radeon_device *rdev) | ||
| 40 | { | ||
| 41 | /* default */ | ||
| 42 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 43 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 44 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 45 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 46 | /* low sh */ | ||
| 47 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; | ||
| 48 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | ||
| 49 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 50 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 51 | /* high sh */ | ||
| 52 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | ||
| 53 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 54 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 55 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 56 | /* low mh */ | ||
| 57 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; | ||
| 58 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 59 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 60 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 61 | /* high mh */ | ||
| 62 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | ||
| 63 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 64 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 65 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 66 | } | ||
| 67 | |||
| 39 | static void r420_set_reg_safe(struct radeon_device *rdev) | 68 | static void r420_set_reg_safe(struct radeon_device *rdev) |
| 40 | { | 69 | { |
| 41 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; | 70 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; |
| @@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev) | |||
| 274 | 303 | ||
| 275 | void r420_fini(struct radeon_device *rdev) | 304 | void r420_fini(struct radeon_device *rdev) |
| 276 | { | 305 | { |
| 277 | radeon_pm_fini(rdev); | ||
| 278 | r100_cp_fini(rdev); | 306 | r100_cp_fini(rdev); |
| 279 | r100_wb_fini(rdev); | 307 | r100_wb_fini(rdev); |
| 280 | r100_ib_fini(rdev); | 308 | r100_ib_fini(rdev); |
| @@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev) | |||
| 334 | 362 | ||
| 335 | /* Initialize clocks */ | 363 | /* Initialize clocks */ |
| 336 | radeon_get_clock_info(rdev->ddev); | 364 | radeon_get_clock_info(rdev->ddev); |
| 337 | /* Initialize power management */ | ||
| 338 | radeon_pm_init(rdev); | ||
| 339 | /* initialize AGP */ | 365 | /* initialize AGP */ |
| 340 | if (rdev->flags & RADEON_IS_AGP) { | 366 | if (rdev->flags & RADEON_IS_AGP) { |
| 341 | r = radeon_agp_init(rdev); | 367 | r = radeon_agp_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 0cf2ad2a5585..93c9a2bbccf8 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
| @@ -347,9 +347,11 @@ | |||
| 347 | 347 | ||
| 348 | #define AVIVO_D1CRTC_CONTROL 0x6080 | 348 | #define AVIVO_D1CRTC_CONTROL 0x6080 |
| 349 | # define AVIVO_CRTC_EN (1 << 0) | 349 | # define AVIVO_CRTC_EN (1 << 0) |
| 350 | # define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | ||
| 350 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 | 351 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 |
| 351 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 | 352 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 |
| 352 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c | 353 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c |
| 354 | #define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 | ||
| 353 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 | 355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 |
| 354 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 | 356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
| 355 | 357 | ||
| @@ -488,6 +490,7 @@ | |||
| 488 | #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 | 490 | #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 |
| 489 | #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 | 491 | #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 |
| 490 | #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c | 492 | #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c |
| 493 | #define AVIVO_D2CRTC_STATUS_POSITION 0x68a0 | ||
| 491 | #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 | 494 | #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 |
| 492 | #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 | 495 | #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 |
| 493 | 496 | ||
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 870111e26bd1..34330df28483 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -261,8 +261,6 @@ int r520_init(struct radeon_device *rdev) | |||
| 261 | } | 261 | } |
| 262 | /* Initialize clocks */ | 262 | /* Initialize clocks */ |
| 263 | radeon_get_clock_info(rdev->ddev); | 263 | radeon_get_clock_info(rdev->ddev); |
| 264 | /* Initialize power management */ | ||
| 265 | radeon_pm_init(rdev); | ||
| 266 | /* initialize AGP */ | 264 | /* initialize AGP */ |
| 267 | if (rdev->flags & RADEON_IS_AGP) { | 265 | if (rdev->flags & RADEON_IS_AGP) { |
| 268 | r = radeon_agp_init(rdev); | 266 | r = radeon_agp_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2ec423c3f3f8..7ffc3892c652 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -92,6 +92,400 @@ void r600_gpu_init(struct radeon_device *rdev); | |||
| 92 | void r600_fini(struct radeon_device *rdev); | 92 | void r600_fini(struct radeon_device *rdev); |
| 93 | void r600_irq_disable(struct radeon_device *rdev); | 93 | void r600_irq_disable(struct radeon_device *rdev); |
| 94 | 94 | ||
| 95 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | ||
| 96 | { | ||
| 97 | int i; | ||
| 98 | |||
| 99 | rdev->pm.dynpm_can_upclock = true; | ||
| 100 | rdev->pm.dynpm_can_downclock = true; | ||
| 101 | |||
| 102 | /* power state array is low to high, default is first */ | ||
| 103 | if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { | ||
| 104 | int min_power_state_index = 0; | ||
| 105 | |||
| 106 | if (rdev->pm.num_power_states > 2) | ||
| 107 | min_power_state_index = 1; | ||
| 108 | |||
| 109 | switch (rdev->pm.dynpm_planned_action) { | ||
| 110 | case DYNPM_ACTION_MINIMUM: | ||
| 111 | rdev->pm.requested_power_state_index = min_power_state_index; | ||
| 112 | rdev->pm.requested_clock_mode_index = 0; | ||
| 113 | rdev->pm.dynpm_can_downclock = false; | ||
| 114 | break; | ||
| 115 | case DYNPM_ACTION_DOWNCLOCK: | ||
| 116 | if (rdev->pm.current_power_state_index == min_power_state_index) { | ||
| 117 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 118 | rdev->pm.dynpm_can_downclock = false; | ||
| 119 | } else { | ||
| 120 | if (rdev->pm.active_crtc_count > 1) { | ||
| 121 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
| 122 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 123 | continue; | ||
| 124 | else if (i >= rdev->pm.current_power_state_index) { | ||
| 125 | rdev->pm.requested_power_state_index = | ||
| 126 | rdev->pm.current_power_state_index; | ||
| 127 | break; | ||
| 128 | } else { | ||
| 129 | rdev->pm.requested_power_state_index = i; | ||
| 130 | break; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | } else | ||
| 134 | rdev->pm.requested_power_state_index = | ||
| 135 | rdev->pm.current_power_state_index - 1; | ||
| 136 | } | ||
| 137 | rdev->pm.requested_clock_mode_index = 0; | ||
| 138 | /* don't use the power state if crtcs are active and no display flag is set */ | ||
| 139 | if ((rdev->pm.active_crtc_count > 0) && | ||
| 140 | (rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 141 | clock_info[rdev->pm.requested_clock_mode_index].flags & | ||
| 142 | RADEON_PM_MODE_NO_DISPLAY)) { | ||
| 143 | rdev->pm.requested_power_state_index++; | ||
| 144 | } | ||
| 145 | break; | ||
| 146 | case DYNPM_ACTION_UPCLOCK: | ||
| 147 | if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { | ||
| 148 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | ||
| 149 | rdev->pm.dynpm_can_upclock = false; | ||
| 150 | } else { | ||
| 151 | if (rdev->pm.active_crtc_count > 1) { | ||
| 152 | for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { | ||
| 153 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 154 | continue; | ||
| 155 | else if (i <= rdev->pm.current_power_state_index) { | ||
| 156 | rdev->pm.requested_power_state_index = | ||
| 157 | rdev->pm.current_power_state_index; | ||
| 158 | break; | ||
| 159 | } else { | ||
| 160 | rdev->pm.requested_power_state_index = i; | ||
| 161 | break; | ||
| 162 | } | ||
| 163 | } | ||
| 164 | } else | ||
| 165 | rdev->pm.requested_power_state_index = | ||
| 166 | rdev->pm.current_power_state_index + 1; | ||
| 167 | } | ||
| 168 | rdev->pm.requested_clock_mode_index = 0; | ||
| 169 | break; | ||
| 170 | case DYNPM_ACTION_DEFAULT: | ||
| 171 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | ||
| 172 | rdev->pm.requested_clock_mode_index = 0; | ||
| 173 | rdev->pm.dynpm_can_upclock = false; | ||
| 174 | break; | ||
| 175 | case DYNPM_ACTION_NONE: | ||
| 176 | default: | ||
| 177 | DRM_ERROR("Requested mode for not defined action\n"); | ||
| 178 | return; | ||
| 179 | } | ||
| 180 | } else { | ||
| 181 | /* XXX select a power state based on AC/DC, single/dualhead, etc. */ | ||
| 182 | /* for now just select the first power state and switch between clock modes */ | ||
| 183 | /* power state array is low to high, default is first (0) */ | ||
| 184 | if (rdev->pm.active_crtc_count > 1) { | ||
| 185 | rdev->pm.requested_power_state_index = -1; | ||
| 186 | /* start at 1 as we don't want the default mode */ | ||
| 187 | for (i = 1; i < rdev->pm.num_power_states; i++) { | ||
| 188 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 189 | continue; | ||
| 190 | else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || | ||
| 191 | (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { | ||
| 192 | rdev->pm.requested_power_state_index = i; | ||
| 193 | break; | ||
| 194 | } | ||
| 195 | } | ||
| 196 | /* if nothing selected, grab the default state. */ | ||
| 197 | if (rdev->pm.requested_power_state_index == -1) | ||
| 198 | rdev->pm.requested_power_state_index = 0; | ||
| 199 | } else | ||
| 200 | rdev->pm.requested_power_state_index = 1; | ||
| 201 | |||
| 202 | switch (rdev->pm.dynpm_planned_action) { | ||
| 203 | case DYNPM_ACTION_MINIMUM: | ||
| 204 | rdev->pm.requested_clock_mode_index = 0; | ||
| 205 | rdev->pm.dynpm_can_downclock = false; | ||
| 206 | break; | ||
| 207 | case DYNPM_ACTION_DOWNCLOCK: | ||
| 208 | if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { | ||
| 209 | if (rdev->pm.current_clock_mode_index == 0) { | ||
| 210 | rdev->pm.requested_clock_mode_index = 0; | ||
| 211 | rdev->pm.dynpm_can_downclock = false; | ||
| 212 | } else | ||
| 213 | rdev->pm.requested_clock_mode_index = | ||
| 214 | rdev->pm.current_clock_mode_index - 1; | ||
| 215 | } else { | ||
| 216 | rdev->pm.requested_clock_mode_index = 0; | ||
| 217 | rdev->pm.dynpm_can_downclock = false; | ||
| 218 | } | ||
| 219 | /* don't use the power state if crtcs are active and no display flag is set */ | ||
| 220 | if ((rdev->pm.active_crtc_count > 0) && | ||
| 221 | (rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 222 | clock_info[rdev->pm.requested_clock_mode_index].flags & | ||
| 223 | RADEON_PM_MODE_NO_DISPLAY)) { | ||
| 224 | rdev->pm.requested_clock_mode_index++; | ||
| 225 | } | ||
| 226 | break; | ||
| 227 | case DYNPM_ACTION_UPCLOCK: | ||
| 228 | if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { | ||
| 229 | if (rdev->pm.current_clock_mode_index == | ||
| 230 | (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { | ||
| 231 | rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; | ||
| 232 | rdev->pm.dynpm_can_upclock = false; | ||
| 233 | } else | ||
| 234 | rdev->pm.requested_clock_mode_index = | ||
| 235 | rdev->pm.current_clock_mode_index + 1; | ||
| 236 | } else { | ||
| 237 | rdev->pm.requested_clock_mode_index = | ||
| 238 | rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; | ||
| 239 | rdev->pm.dynpm_can_upclock = false; | ||
| 240 | } | ||
| 241 | break; | ||
| 242 | case DYNPM_ACTION_DEFAULT: | ||
| 243 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | ||
| 244 | rdev->pm.requested_clock_mode_index = 0; | ||
| 245 | rdev->pm.dynpm_can_upclock = false; | ||
| 246 | break; | ||
| 247 | case DYNPM_ACTION_NONE: | ||
| 248 | default: | ||
| 249 | DRM_ERROR("Requested mode for not defined action\n"); | ||
| 250 | return; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | DRM_DEBUG("Requested: e: %d m: %d p: %d\n", | ||
| 255 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 256 | clock_info[rdev->pm.requested_clock_mode_index].sclk, | ||
| 257 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 258 | clock_info[rdev->pm.requested_clock_mode_index].mclk, | ||
| 259 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
| 260 | pcie_lanes); | ||
| 261 | } | ||
| 262 | |||
| 263 | static int r600_pm_get_type_index(struct radeon_device *rdev, | ||
| 264 | enum radeon_pm_state_type ps_type, | ||
| 265 | int instance) | ||
| 266 | { | ||
| 267 | int i; | ||
| 268 | int found_instance = -1; | ||
| 269 | |||
| 270 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
| 271 | if (rdev->pm.power_state[i].type == ps_type) { | ||
| 272 | found_instance++; | ||
| 273 | if (found_instance == instance) | ||
| 274 | return i; | ||
| 275 | } | ||
| 276 | } | ||
| 277 | /* return default if no match */ | ||
| 278 | return rdev->pm.default_power_state_index; | ||
| 279 | } | ||
| 280 | |||
| 281 | void rs780_pm_init_profile(struct radeon_device *rdev) | ||
| 282 | { | ||
| 283 | if (rdev->pm.num_power_states == 2) { | ||
| 284 | /* default */ | ||
| 285 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 286 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 287 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 288 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 289 | /* low sh */ | ||
| 290 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; | ||
| 291 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; | ||
| 292 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 293 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 294 | /* high sh */ | ||
| 295 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | ||
| 296 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | ||
| 297 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 298 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 299 | /* low mh */ | ||
| 300 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; | ||
| 301 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | ||
| 302 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 303 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 304 | /* high mh */ | ||
| 305 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | ||
| 306 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; | ||
| 307 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 308 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 309 | } else if (rdev->pm.num_power_states == 3) { | ||
| 310 | /* default */ | ||
| 311 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 312 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 313 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 314 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 315 | /* low sh */ | ||
| 316 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; | ||
| 317 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | ||
| 318 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 319 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 320 | /* high sh */ | ||
| 321 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | ||
| 322 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; | ||
| 323 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 324 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 325 | /* low mh */ | ||
| 326 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; | ||
| 327 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; | ||
| 328 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 329 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 330 | /* high mh */ | ||
| 331 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; | ||
| 332 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | ||
| 333 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 334 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 335 | } else { | ||
| 336 | /* default */ | ||
| 337 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 338 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 339 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 340 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 341 | /* low sh */ | ||
| 342 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; | ||
| 343 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; | ||
| 344 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 345 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 346 | /* high sh */ | ||
| 347 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; | ||
| 348 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; | ||
| 349 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 350 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 351 | /* low mh */ | ||
| 352 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; | ||
| 353 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | ||
| 354 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 355 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 356 | /* high mh */ | ||
| 357 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | ||
| 358 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; | ||
| 359 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 360 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | |||
| 364 | void r600_pm_init_profile(struct radeon_device *rdev) | ||
| 365 | { | ||
| 366 | if (rdev->family == CHIP_R600) { | ||
| 367 | /* XXX */ | ||
| 368 | /* default */ | ||
| 369 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 370 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 371 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 372 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
| 373 | /* low sh */ | ||
| 374 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 375 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 376 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 377 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
| 378 | /* high sh */ | ||
| 379 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 380 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 381 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 382 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | ||
| 383 | /* low mh */ | ||
| 384 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 385 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 386 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 387 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
| 388 | /* high mh */ | ||
| 389 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 390 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 391 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 392 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | ||
| 393 | } else { | ||
| 394 | if (rdev->pm.num_power_states < 4) { | ||
| 395 | /* default */ | ||
| 396 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 397 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 398 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 399 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | ||
| 400 | /* low sh */ | ||
| 401 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; | ||
| 402 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | ||
| 403 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 404 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; | ||
| 405 | /* high sh */ | ||
| 406 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | ||
| 407 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | ||
| 408 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 409 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | ||
| 410 | /* low mh */ | ||
| 411 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; | ||
| 412 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; | ||
| 413 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 414 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; | ||
| 415 | /* high mh */ | ||
| 416 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | ||
| 417 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | ||
| 418 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 419 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | ||
| 420 | } else { | ||
| 421 | /* default */ | ||
| 422 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 423 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 424 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
| 425 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | ||
| 426 | /* low sh */ | ||
| 427 | if (rdev->flags & RADEON_IS_MOBILITY) { | ||
| 428 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | ||
| 429 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
| 430 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | ||
| 431 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
| 432 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 433 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 2; | ||
| 434 | } else { | ||
| 435 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | ||
| 436 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 437 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | ||
| 438 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 439 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
| 440 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; | ||
| 441 | } | ||
| 442 | /* high sh */ | ||
| 443 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = | ||
| 444 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 445 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = | ||
| 446 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 447 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
| 448 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | ||
| 449 | /* low mh */ | ||
| 450 | if (rdev->flags & RADEON_IS_MOBILITY) { | ||
| 451 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | ||
| 452 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
| 453 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | ||
| 454 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
| 455 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 456 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2; | ||
| 457 | } else { | ||
| 458 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | ||
| 459 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 460 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | ||
| 461 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 462 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
| 463 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; | ||
| 464 | } | ||
| 465 | /* high mh */ | ||
| 466 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = | ||
| 467 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 468 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = | ||
| 469 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 470 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
| 471 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | ||
| 472 | } | ||
| 473 | } | ||
| 474 | } | ||
| 475 | |||
| 476 | void r600_pm_misc(struct radeon_device *rdev) | ||
| 477 | { | ||
| 478 | |||
| 479 | } | ||
| 480 | |||
| 481 | bool r600_gui_idle(struct radeon_device *rdev) | ||
| 482 | { | ||
| 483 | if (RREG32(GRBM_STATUS) & GUI_ACTIVE) | ||
| 484 | return false; | ||
| 485 | else | ||
| 486 | return true; | ||
| 487 | } | ||
| 488 | |||
| 95 | /* hpd for digital panel detect/disconnect */ | 489 | /* hpd for digital panel detect/disconnect */ |
| 96 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 490 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
| 97 | { | 491 | { |
| @@ -2069,8 +2463,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2069 | r = radeon_clocks_init(rdev); | 2463 | r = radeon_clocks_init(rdev); |
| 2070 | if (r) | 2464 | if (r) |
| 2071 | return r; | 2465 | return r; |
| 2072 | /* Initialize power management */ | ||
| 2073 | radeon_pm_init(rdev); | ||
| 2074 | /* Fence driver */ | 2466 | /* Fence driver */ |
| 2075 | r = radeon_fence_driver_init(rdev); | 2467 | r = radeon_fence_driver_init(rdev); |
| 2076 | if (r) | 2468 | if (r) |
| @@ -2135,7 +2527,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2135 | 2527 | ||
| 2136 | void r600_fini(struct radeon_device *rdev) | 2528 | void r600_fini(struct radeon_device *rdev) |
| 2137 | { | 2529 | { |
| 2138 | radeon_pm_fini(rdev); | ||
| 2139 | r600_audio_fini(rdev); | 2530 | r600_audio_fini(rdev); |
| 2140 | r600_blit_fini(rdev); | 2531 | r600_blit_fini(rdev); |
| 2141 | r600_cp_fini(rdev); | 2532 | r600_cp_fini(rdev); |
| @@ -2527,6 +2918,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
| 2527 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | 2918 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; |
| 2528 | u32 mode_int = 0; | 2919 | u32 mode_int = 0; |
| 2529 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 2920 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
| 2921 | u32 grbm_int_cntl = 0; | ||
| 2530 | u32 hdmi1, hdmi2; | 2922 | u32 hdmi1, hdmi2; |
| 2531 | 2923 | ||
| 2532 | if (!rdev->irq.installed) { | 2924 | if (!rdev->irq.installed) { |
| @@ -2603,9 +2995,14 @@ int r600_irq_set(struct radeon_device *rdev) | |||
| 2603 | DRM_DEBUG("r600_irq_set: hdmi 2\n"); | 2995 | DRM_DEBUG("r600_irq_set: hdmi 2\n"); |
| 2604 | hdmi2 |= R600_HDMI_INT_EN; | 2996 | hdmi2 |= R600_HDMI_INT_EN; |
| 2605 | } | 2997 | } |
| 2998 | if (rdev->irq.gui_idle) { | ||
| 2999 | DRM_DEBUG("gui idle\n"); | ||
| 3000 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | ||
| 3001 | } | ||
| 2606 | 3002 | ||
| 2607 | WREG32(CP_INT_CNTL, cp_int_cntl); | 3003 | WREG32(CP_INT_CNTL, cp_int_cntl); |
| 2608 | WREG32(DxMODE_INT_MASK, mode_int); | 3004 | WREG32(DxMODE_INT_MASK, mode_int); |
| 3005 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | ||
| 2609 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); | 3006 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); |
| 2610 | if (ASIC_IS_DCE3(rdev)) { | 3007 | if (ASIC_IS_DCE3(rdev)) { |
| 2611 | WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); | 3008 | WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); |
| @@ -2921,6 +3318,11 @@ restart_ih: | |||
| 2921 | case 181: /* CP EOP event */ | 3318 | case 181: /* CP EOP event */ |
| 2922 | DRM_DEBUG("IH: CP EOP\n"); | 3319 | DRM_DEBUG("IH: CP EOP\n"); |
| 2923 | break; | 3320 | break; |
| 3321 | case 233: /* GUI IDLE */ | ||
| 3322 | DRM_DEBUG("IH: CP EOP\n"); | ||
| 3323 | rdev->pm.gui_idle = true; | ||
| 3324 | wake_up(&rdev->irq.idle_queue); | ||
| 3325 | break; | ||
| 2924 | default: | 3326 | default: |
| 2925 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | 3327 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
| 2926 | break; | 3328 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ab29d972a167..5c9ce2beaca3 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -89,7 +89,6 @@ extern int radeon_testing; | |||
| 89 | extern int radeon_connector_table; | 89 | extern int radeon_connector_table; |
| 90 | extern int radeon_tv; | 90 | extern int radeon_tv; |
| 91 | extern int radeon_new_pll; | 91 | extern int radeon_new_pll; |
| 92 | extern int radeon_dynpm; | ||
| 93 | extern int radeon_audio; | 92 | extern int radeon_audio; |
| 94 | extern int radeon_disp_priority; | 93 | extern int radeon_disp_priority; |
| 95 | extern int radeon_hw_i2c; | 94 | extern int radeon_hw_i2c; |
| @@ -173,6 +172,8 @@ struct radeon_clock { | |||
| 173 | int radeon_pm_init(struct radeon_device *rdev); | 172 | int radeon_pm_init(struct radeon_device *rdev); |
| 174 | void radeon_pm_fini(struct radeon_device *rdev); | 173 | void radeon_pm_fini(struct radeon_device *rdev); |
| 175 | void radeon_pm_compute_clocks(struct radeon_device *rdev); | 174 | void radeon_pm_compute_clocks(struct radeon_device *rdev); |
| 175 | void radeon_pm_suspend(struct radeon_device *rdev); | ||
| 176 | void radeon_pm_resume(struct radeon_device *rdev); | ||
| 176 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 177 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 178 | 179 | ||
| @@ -376,6 +377,9 @@ struct radeon_irq { | |||
| 376 | wait_queue_head_t vblank_queue; | 377 | wait_queue_head_t vblank_queue; |
| 377 | /* FIXME: use defines for max hpd/dacs */ | 378 | /* FIXME: use defines for max hpd/dacs */ |
| 378 | bool hpd[6]; | 379 | bool hpd[6]; |
| 380 | bool gui_idle; | ||
| 381 | bool gui_idle_acked; | ||
| 382 | wait_queue_head_t idle_queue; | ||
| 379 | /* FIXME: use defines for max HDMI blocks */ | 383 | /* FIXME: use defines for max HDMI blocks */ |
| 380 | bool hdmi[2]; | 384 | bool hdmi[2]; |
| 381 | spinlock_t sw_lock; | 385 | spinlock_t sw_lock; |
| @@ -465,7 +469,9 @@ int radeon_ib_test(struct radeon_device *rdev); | |||
| 465 | extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); | 469 | extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); |
| 466 | /* Ring access between begin & end cannot sleep */ | 470 | /* Ring access between begin & end cannot sleep */ |
| 467 | void radeon_ring_free_size(struct radeon_device *rdev); | 471 | void radeon_ring_free_size(struct radeon_device *rdev); |
| 472 | int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); | ||
| 468 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); | 473 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); |
| 474 | void radeon_ring_commit(struct radeon_device *rdev); | ||
| 469 | void radeon_ring_unlock_commit(struct radeon_device *rdev); | 475 | void radeon_ring_unlock_commit(struct radeon_device *rdev); |
| 470 | void radeon_ring_unlock_undo(struct radeon_device *rdev); | 476 | void radeon_ring_unlock_undo(struct radeon_device *rdev); |
| 471 | int radeon_ring_test(struct radeon_device *rdev); | 477 | int radeon_ring_test(struct radeon_device *rdev); |
| @@ -600,17 +606,24 @@ struct radeon_wb { | |||
| 600 | * Equation between gpu/memory clock and available bandwidth is hw dependent | 606 | * Equation between gpu/memory clock and available bandwidth is hw dependent |
| 601 | * (type of memory, bus size, efficiency, ...) | 607 | * (type of memory, bus size, efficiency, ...) |
| 602 | */ | 608 | */ |
| 603 | enum radeon_pm_state { | 609 | |
| 604 | PM_STATE_DISABLED, | 610 | enum radeon_pm_method { |
| 605 | PM_STATE_MINIMUM, | 611 | PM_METHOD_PROFILE, |
| 606 | PM_STATE_PAUSED, | 612 | PM_METHOD_DYNPM, |
| 607 | PM_STATE_ACTIVE | 613 | }; |
| 614 | |||
| 615 | enum radeon_dynpm_state { | ||
| 616 | DYNPM_STATE_DISABLED, | ||
| 617 | DYNPM_STATE_MINIMUM, | ||
| 618 | DYNPM_STATE_PAUSED, | ||
| 619 | DYNPM_STATE_ACTIVE | ||
| 608 | }; | 620 | }; |
| 609 | enum radeon_pm_action { | 621 | enum radeon_dynpm_action { |
| 610 | PM_ACTION_NONE, | 622 | DYNPM_ACTION_NONE, |
| 611 | PM_ACTION_MINIMUM, | 623 | DYNPM_ACTION_MINIMUM, |
| 612 | PM_ACTION_DOWNCLOCK, | 624 | DYNPM_ACTION_DOWNCLOCK, |
| 613 | PM_ACTION_UPCLOCK | 625 | DYNPM_ACTION_UPCLOCK, |
| 626 | DYNPM_ACTION_DEFAULT | ||
| 614 | }; | 627 | }; |
| 615 | 628 | ||
| 616 | enum radeon_voltage_type { | 629 | enum radeon_voltage_type { |
| @@ -628,11 +641,25 @@ enum radeon_pm_state_type { | |||
| 628 | POWER_STATE_TYPE_PERFORMANCE, | 641 | POWER_STATE_TYPE_PERFORMANCE, |
| 629 | }; | 642 | }; |
| 630 | 643 | ||
| 631 | enum radeon_pm_clock_mode_type { | 644 | enum radeon_pm_profile_type { |
| 632 | POWER_MODE_TYPE_DEFAULT, | 645 | PM_PROFILE_DEFAULT, |
| 633 | POWER_MODE_TYPE_LOW, | 646 | PM_PROFILE_AUTO, |
| 634 | POWER_MODE_TYPE_MID, | 647 | PM_PROFILE_LOW, |
| 635 | POWER_MODE_TYPE_HIGH, | 648 | PM_PROFILE_HIGH, |
| 649 | }; | ||
| 650 | |||
| 651 | #define PM_PROFILE_DEFAULT_IDX 0 | ||
| 652 | #define PM_PROFILE_LOW_SH_IDX 1 | ||
| 653 | #define PM_PROFILE_HIGH_SH_IDX 2 | ||
| 654 | #define PM_PROFILE_LOW_MH_IDX 3 | ||
| 655 | #define PM_PROFILE_HIGH_MH_IDX 4 | ||
| 656 | #define PM_PROFILE_MAX 5 | ||
| 657 | |||
| 658 | struct radeon_pm_profile { | ||
| 659 | int dpms_off_ps_idx; | ||
| 660 | int dpms_on_ps_idx; | ||
| 661 | int dpms_off_cm_idx; | ||
| 662 | int dpms_on_cm_idx; | ||
| 636 | }; | 663 | }; |
| 637 | 664 | ||
| 638 | struct radeon_voltage { | 665 | struct radeon_voltage { |
| @@ -649,12 +676,8 @@ struct radeon_voltage { | |||
| 649 | u32 voltage; | 676 | u32 voltage; |
| 650 | }; | 677 | }; |
| 651 | 678 | ||
| 652 | struct radeon_pm_non_clock_info { | 679 | /* clock mode flags */ |
| 653 | /* pcie lanes */ | 680 | #define RADEON_PM_MODE_NO_DISPLAY (1 << 0) |
| 654 | int pcie_lanes; | ||
| 655 | /* standardized non-clock flags */ | ||
| 656 | u32 flags; | ||
| 657 | }; | ||
| 658 | 681 | ||
| 659 | struct radeon_pm_clock_info { | 682 | struct radeon_pm_clock_info { |
| 660 | /* memory clock */ | 683 | /* memory clock */ |
| @@ -663,10 +686,13 @@ struct radeon_pm_clock_info { | |||
| 663 | u32 sclk; | 686 | u32 sclk; |
| 664 | /* voltage info */ | 687 | /* voltage info */ |
| 665 | struct radeon_voltage voltage; | 688 | struct radeon_voltage voltage; |
| 666 | /* standardized clock flags - not sure we'll need these */ | 689 | /* standardized clock flags */ |
| 667 | u32 flags; | 690 | u32 flags; |
| 668 | }; | 691 | }; |
| 669 | 692 | ||
| 693 | /* state flags */ | ||
| 694 | #define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) | ||
| 695 | |||
| 670 | struct radeon_power_state { | 696 | struct radeon_power_state { |
| 671 | enum radeon_pm_state_type type; | 697 | enum radeon_pm_state_type type; |
| 672 | /* XXX: use a define for num clock modes */ | 698 | /* XXX: use a define for num clock modes */ |
| @@ -674,9 +700,11 @@ struct radeon_power_state { | |||
| 674 | /* number of valid clock modes in this power state */ | 700 | /* number of valid clock modes in this power state */ |
| 675 | int num_clock_modes; | 701 | int num_clock_modes; |
| 676 | struct radeon_pm_clock_info *default_clock_mode; | 702 | struct radeon_pm_clock_info *default_clock_mode; |
| 677 | /* non clock info about this state */ | 703 | /* standardized state flags */ |
| 678 | struct radeon_pm_non_clock_info non_clock_info; | 704 | u32 flags; |
| 679 | bool voltage_drop_active; | 705 | u32 misc; /* vbios specific flags */ |
| 706 | u32 misc2; /* vbios specific flags */ | ||
| 707 | int pcie_lanes; /* pcie lanes */ | ||
| 680 | }; | 708 | }; |
| 681 | 709 | ||
| 682 | /* | 710 | /* |
| @@ -686,14 +714,11 @@ struct radeon_power_state { | |||
| 686 | 714 | ||
| 687 | struct radeon_pm { | 715 | struct radeon_pm { |
| 688 | struct mutex mutex; | 716 | struct mutex mutex; |
| 689 | struct delayed_work idle_work; | 717 | u32 active_crtcs; |
| 690 | enum radeon_pm_state state; | 718 | int active_crtc_count; |
| 691 | enum radeon_pm_action planned_action; | ||
| 692 | unsigned long action_timeout; | ||
| 693 | bool downclocked; | ||
| 694 | int active_crtcs; | ||
| 695 | int req_vblank; | 719 | int req_vblank; |
| 696 | bool vblank_sync; | 720 | bool vblank_sync; |
| 721 | bool gui_idle; | ||
| 697 | fixed20_12 max_bandwidth; | 722 | fixed20_12 max_bandwidth; |
| 698 | fixed20_12 igp_sideport_mclk; | 723 | fixed20_12 igp_sideport_mclk; |
| 699 | fixed20_12 igp_system_mclk; | 724 | fixed20_12 igp_system_mclk; |
| @@ -710,12 +735,27 @@ struct radeon_pm { | |||
| 710 | struct radeon_power_state power_state[8]; | 735 | struct radeon_power_state power_state[8]; |
| 711 | /* number of valid power states */ | 736 | /* number of valid power states */ |
| 712 | int num_power_states; | 737 | int num_power_states; |
| 713 | struct radeon_power_state *current_power_state; | 738 | int current_power_state_index; |
| 714 | struct radeon_pm_clock_info *current_clock_mode; | 739 | int current_clock_mode_index; |
| 715 | struct radeon_power_state *requested_power_state; | 740 | int requested_power_state_index; |
| 716 | struct radeon_pm_clock_info *requested_clock_mode; | 741 | int requested_clock_mode_index; |
| 717 | struct radeon_power_state *default_power_state; | 742 | int default_power_state_index; |
| 743 | u32 current_sclk; | ||
| 744 | u32 current_mclk; | ||
| 718 | struct radeon_i2c_chan *i2c_bus; | 745 | struct radeon_i2c_chan *i2c_bus; |
| 746 | /* selected pm method */ | ||
| 747 | enum radeon_pm_method pm_method; | ||
| 748 | /* dynpm power management */ | ||
| 749 | struct delayed_work dynpm_idle_work; | ||
| 750 | enum radeon_dynpm_state dynpm_state; | ||
| 751 | enum radeon_dynpm_action dynpm_planned_action; | ||
| 752 | unsigned long dynpm_action_timeout; | ||
| 753 | bool dynpm_can_upclock; | ||
| 754 | bool dynpm_can_downclock; | ||
| 755 | /* profile-based power management */ | ||
| 756 | enum radeon_pm_profile_type profile; | ||
| 757 | int profile_index; | ||
| 758 | struct radeon_pm_profile profiles[PM_PROFILE_MAX]; | ||
| 719 | }; | 759 | }; |
| 720 | 760 | ||
| 721 | 761 | ||
| @@ -803,6 +843,13 @@ struct radeon_asic { | |||
| 803 | * through ring. | 843 | * through ring. |
| 804 | */ | 844 | */ |
| 805 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); | 845 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); |
| 846 | bool (*gui_idle)(struct radeon_device *rdev); | ||
| 847 | /* power management */ | ||
| 848 | void (*pm_misc)(struct radeon_device *rdev); | ||
| 849 | void (*pm_prepare)(struct radeon_device *rdev); | ||
| 850 | void (*pm_finish)(struct radeon_device *rdev); | ||
| 851 | void (*pm_init_profile)(struct radeon_device *rdev); | ||
| 852 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); | ||
| 806 | }; | 853 | }; |
| 807 | 854 | ||
| 808 | /* | 855 | /* |
| @@ -1009,6 +1056,7 @@ struct radeon_device { | |||
| 1009 | struct work_struct hotplug_work; | 1056 | struct work_struct hotplug_work; |
| 1010 | int num_crtc; /* number of crtcs */ | 1057 | int num_crtc; /* number of crtcs */ |
| 1011 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ | 1058 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ |
| 1059 | struct mutex vram_mutex; | ||
| 1012 | 1060 | ||
| 1013 | /* audio stuff */ | 1061 | /* audio stuff */ |
| 1014 | struct timer_list audio_timer; | 1062 | struct timer_list audio_timer; |
| @@ -1019,6 +1067,7 @@ struct radeon_device { | |||
| 1019 | uint8_t audio_category_code; | 1067 | uint8_t audio_category_code; |
| 1020 | 1068 | ||
| 1021 | bool powered_down; | 1069 | bool powered_down; |
| 1070 | struct notifier_block acpi_nb; | ||
| 1022 | }; | 1071 | }; |
| 1023 | 1072 | ||
| 1024 | int radeon_device_init(struct radeon_device *rdev, | 1073 | int radeon_device_init(struct radeon_device *rdev, |
| @@ -1209,6 +1258,12 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
| 1209 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) | 1258 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) |
| 1210 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) | 1259 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) |
| 1211 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) | 1260 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) |
| 1261 | #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) | ||
| 1262 | #define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) | ||
| 1263 | #define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) | ||
| 1264 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) | ||
| 1265 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) | ||
| 1266 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) | ||
| 1212 | 1267 | ||
| 1213 | /* Common functions */ | 1268 | /* Common functions */ |
| 1214 | /* AGP */ | 1269 | /* AGP */ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f835333c1b69..e57df08d4aeb 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -165,6 +165,12 @@ static struct radeon_asic r100_asic = { | |||
| 165 | .hpd_sense = &r100_hpd_sense, | 165 | .hpd_sense = &r100_hpd_sense, |
| 166 | .hpd_set_polarity = &r100_hpd_set_polarity, | 166 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 167 | .ioctl_wait_idle = NULL, | 167 | .ioctl_wait_idle = NULL, |
| 168 | .gui_idle = &r100_gui_idle, | ||
| 169 | .pm_misc = &r100_pm_misc, | ||
| 170 | .pm_prepare = &r100_pm_prepare, | ||
| 171 | .pm_finish = &r100_pm_finish, | ||
| 172 | .pm_init_profile = &r100_pm_init_profile, | ||
| 173 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 168 | }; | 174 | }; |
| 169 | 175 | ||
| 170 | static struct radeon_asic r200_asic = { | 176 | static struct radeon_asic r200_asic = { |
| @@ -203,6 +209,12 @@ static struct radeon_asic r200_asic = { | |||
| 203 | .hpd_sense = &r100_hpd_sense, | 209 | .hpd_sense = &r100_hpd_sense, |
| 204 | .hpd_set_polarity = &r100_hpd_set_polarity, | 210 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 205 | .ioctl_wait_idle = NULL, | 211 | .ioctl_wait_idle = NULL, |
| 212 | .gui_idle = &r100_gui_idle, | ||
| 213 | .pm_misc = &r100_pm_misc, | ||
| 214 | .pm_prepare = &r100_pm_prepare, | ||
| 215 | .pm_finish = &r100_pm_finish, | ||
| 216 | .pm_init_profile = &r100_pm_init_profile, | ||
| 217 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 206 | }; | 218 | }; |
| 207 | 219 | ||
| 208 | static struct radeon_asic r300_asic = { | 220 | static struct radeon_asic r300_asic = { |
| @@ -242,6 +254,12 @@ static struct radeon_asic r300_asic = { | |||
| 242 | .hpd_sense = &r100_hpd_sense, | 254 | .hpd_sense = &r100_hpd_sense, |
| 243 | .hpd_set_polarity = &r100_hpd_set_polarity, | 255 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 244 | .ioctl_wait_idle = NULL, | 256 | .ioctl_wait_idle = NULL, |
| 257 | .gui_idle = &r100_gui_idle, | ||
| 258 | .pm_misc = &r100_pm_misc, | ||
| 259 | .pm_prepare = &r100_pm_prepare, | ||
| 260 | .pm_finish = &r100_pm_finish, | ||
| 261 | .pm_init_profile = &r100_pm_init_profile, | ||
| 262 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 245 | }; | 263 | }; |
| 246 | 264 | ||
| 247 | static struct radeon_asic r300_asic_pcie = { | 265 | static struct radeon_asic r300_asic_pcie = { |
| @@ -280,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = { | |||
| 280 | .hpd_sense = &r100_hpd_sense, | 298 | .hpd_sense = &r100_hpd_sense, |
| 281 | .hpd_set_polarity = &r100_hpd_set_polarity, | 299 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 282 | .ioctl_wait_idle = NULL, | 300 | .ioctl_wait_idle = NULL, |
| 301 | .gui_idle = &r100_gui_idle, | ||
| 302 | .pm_misc = &r100_pm_misc, | ||
| 303 | .pm_prepare = &r100_pm_prepare, | ||
| 304 | .pm_finish = &r100_pm_finish, | ||
| 305 | .pm_init_profile = &r100_pm_init_profile, | ||
| 306 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 283 | }; | 307 | }; |
| 284 | 308 | ||
| 285 | static struct radeon_asic r420_asic = { | 309 | static struct radeon_asic r420_asic = { |
| @@ -319,6 +343,12 @@ static struct radeon_asic r420_asic = { | |||
| 319 | .hpd_sense = &r100_hpd_sense, | 343 | .hpd_sense = &r100_hpd_sense, |
| 320 | .hpd_set_polarity = &r100_hpd_set_polarity, | 344 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 321 | .ioctl_wait_idle = NULL, | 345 | .ioctl_wait_idle = NULL, |
| 346 | .gui_idle = &r100_gui_idle, | ||
| 347 | .pm_misc = &r100_pm_misc, | ||
| 348 | .pm_prepare = &r100_pm_prepare, | ||
| 349 | .pm_finish = &r100_pm_finish, | ||
| 350 | .pm_init_profile = &r420_pm_init_profile, | ||
| 351 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 322 | }; | 352 | }; |
| 323 | 353 | ||
| 324 | static struct radeon_asic rs400_asic = { | 354 | static struct radeon_asic rs400_asic = { |
| @@ -358,6 +388,12 @@ static struct radeon_asic rs400_asic = { | |||
| 358 | .hpd_sense = &r100_hpd_sense, | 388 | .hpd_sense = &r100_hpd_sense, |
| 359 | .hpd_set_polarity = &r100_hpd_set_polarity, | 389 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 360 | .ioctl_wait_idle = NULL, | 390 | .ioctl_wait_idle = NULL, |
| 391 | .gui_idle = &r100_gui_idle, | ||
| 392 | .pm_misc = &r100_pm_misc, | ||
| 393 | .pm_prepare = &r100_pm_prepare, | ||
| 394 | .pm_finish = &r100_pm_finish, | ||
| 395 | .pm_init_profile = &r100_pm_init_profile, | ||
| 396 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 361 | }; | 397 | }; |
| 362 | 398 | ||
| 363 | static struct radeon_asic rs600_asic = { | 399 | static struct radeon_asic rs600_asic = { |
| @@ -397,6 +433,12 @@ static struct radeon_asic rs600_asic = { | |||
| 397 | .hpd_sense = &rs600_hpd_sense, | 433 | .hpd_sense = &rs600_hpd_sense, |
| 398 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 434 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 399 | .ioctl_wait_idle = NULL, | 435 | .ioctl_wait_idle = NULL, |
| 436 | .gui_idle = &r100_gui_idle, | ||
| 437 | .pm_misc = &rs600_pm_misc, | ||
| 438 | .pm_prepare = &rs600_pm_prepare, | ||
| 439 | .pm_finish = &rs600_pm_finish, | ||
| 440 | .pm_init_profile = &r420_pm_init_profile, | ||
| 441 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 400 | }; | 442 | }; |
| 401 | 443 | ||
| 402 | static struct radeon_asic rs690_asic = { | 444 | static struct radeon_asic rs690_asic = { |
| @@ -436,6 +478,12 @@ static struct radeon_asic rs690_asic = { | |||
| 436 | .hpd_sense = &rs600_hpd_sense, | 478 | .hpd_sense = &rs600_hpd_sense, |
| 437 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 479 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 438 | .ioctl_wait_idle = NULL, | 480 | .ioctl_wait_idle = NULL, |
| 481 | .gui_idle = &r100_gui_idle, | ||
| 482 | .pm_misc = &rs600_pm_misc, | ||
| 483 | .pm_prepare = &rs600_pm_prepare, | ||
| 484 | .pm_finish = &rs600_pm_finish, | ||
| 485 | .pm_init_profile = &r420_pm_init_profile, | ||
| 486 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 439 | }; | 487 | }; |
| 440 | 488 | ||
| 441 | static struct radeon_asic rv515_asic = { | 489 | static struct radeon_asic rv515_asic = { |
| @@ -475,6 +523,12 @@ static struct radeon_asic rv515_asic = { | |||
| 475 | .hpd_sense = &rs600_hpd_sense, | 523 | .hpd_sense = &rs600_hpd_sense, |
| 476 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 524 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 477 | .ioctl_wait_idle = NULL, | 525 | .ioctl_wait_idle = NULL, |
| 526 | .gui_idle = &r100_gui_idle, | ||
| 527 | .pm_misc = &rs600_pm_misc, | ||
| 528 | .pm_prepare = &rs600_pm_prepare, | ||
| 529 | .pm_finish = &rs600_pm_finish, | ||
| 530 | .pm_init_profile = &r420_pm_init_profile, | ||
| 531 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 478 | }; | 532 | }; |
| 479 | 533 | ||
| 480 | static struct radeon_asic r520_asic = { | 534 | static struct radeon_asic r520_asic = { |
| @@ -514,6 +568,12 @@ static struct radeon_asic r520_asic = { | |||
| 514 | .hpd_sense = &rs600_hpd_sense, | 568 | .hpd_sense = &rs600_hpd_sense, |
| 515 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 569 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 516 | .ioctl_wait_idle = NULL, | 570 | .ioctl_wait_idle = NULL, |
| 571 | .gui_idle = &r100_gui_idle, | ||
| 572 | .pm_misc = &rs600_pm_misc, | ||
| 573 | .pm_prepare = &rs600_pm_prepare, | ||
| 574 | .pm_finish = &rs600_pm_finish, | ||
| 575 | .pm_init_profile = &r420_pm_init_profile, | ||
| 576 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | ||
| 517 | }; | 577 | }; |
| 518 | 578 | ||
| 519 | static struct radeon_asic r600_asic = { | 579 | static struct radeon_asic r600_asic = { |
| @@ -552,6 +612,12 @@ static struct radeon_asic r600_asic = { | |||
| 552 | .hpd_sense = &r600_hpd_sense, | 612 | .hpd_sense = &r600_hpd_sense, |
| 553 | .hpd_set_polarity = &r600_hpd_set_polarity, | 613 | .hpd_set_polarity = &r600_hpd_set_polarity, |
| 554 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 614 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
| 615 | .gui_idle = &r600_gui_idle, | ||
| 616 | .pm_misc = &r600_pm_misc, | ||
| 617 | .pm_prepare = &rs600_pm_prepare, | ||
| 618 | .pm_finish = &rs600_pm_finish, | ||
| 619 | .pm_init_profile = &r600_pm_init_profile, | ||
| 620 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
| 555 | }; | 621 | }; |
| 556 | 622 | ||
| 557 | static struct radeon_asic rs780_asic = { | 623 | static struct radeon_asic rs780_asic = { |
| @@ -590,6 +656,12 @@ static struct radeon_asic rs780_asic = { | |||
| 590 | .hpd_sense = &r600_hpd_sense, | 656 | .hpd_sense = &r600_hpd_sense, |
| 591 | .hpd_set_polarity = &r600_hpd_set_polarity, | 657 | .hpd_set_polarity = &r600_hpd_set_polarity, |
| 592 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 658 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
| 659 | .gui_idle = &r600_gui_idle, | ||
| 660 | .pm_misc = &r600_pm_misc, | ||
| 661 | .pm_prepare = &rs600_pm_prepare, | ||
| 662 | .pm_finish = &rs600_pm_finish, | ||
| 663 | .pm_init_profile = &rs780_pm_init_profile, | ||
| 664 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
| 593 | }; | 665 | }; |
| 594 | 666 | ||
| 595 | static struct radeon_asic rv770_asic = { | 667 | static struct radeon_asic rv770_asic = { |
| @@ -628,6 +700,12 @@ static struct radeon_asic rv770_asic = { | |||
| 628 | .hpd_sense = &r600_hpd_sense, | 700 | .hpd_sense = &r600_hpd_sense, |
| 629 | .hpd_set_polarity = &r600_hpd_set_polarity, | 701 | .hpd_set_polarity = &r600_hpd_set_polarity, |
| 630 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 702 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
| 703 | .gui_idle = &r600_gui_idle, | ||
| 704 | .pm_misc = &rv770_pm_misc, | ||
| 705 | .pm_prepare = &rs600_pm_prepare, | ||
| 706 | .pm_finish = &rs600_pm_finish, | ||
| 707 | .pm_init_profile = &r600_pm_init_profile, | ||
| 708 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
| 631 | }; | 709 | }; |
| 632 | 710 | ||
| 633 | static struct radeon_asic evergreen_asic = { | 711 | static struct radeon_asic evergreen_asic = { |
| @@ -664,6 +742,12 @@ static struct radeon_asic evergreen_asic = { | |||
| 664 | .hpd_fini = &evergreen_hpd_fini, | 742 | .hpd_fini = &evergreen_hpd_fini, |
| 665 | .hpd_sense = &evergreen_hpd_sense, | 743 | .hpd_sense = &evergreen_hpd_sense, |
| 666 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | 744 | .hpd_set_polarity = &evergreen_hpd_set_polarity, |
| 745 | .gui_idle = &r600_gui_idle, | ||
| 746 | .pm_misc = &evergreen_pm_misc, | ||
| 747 | .pm_prepare = &evergreen_pm_prepare, | ||
| 748 | .pm_finish = &evergreen_pm_finish, | ||
| 749 | .pm_init_profile = &r600_pm_init_profile, | ||
| 750 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
| 667 | }; | 751 | }; |
| 668 | 752 | ||
| 669 | int radeon_asic_init(struct radeon_device *rdev) | 753 | int radeon_asic_init(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ef2c7ba1bdc9..5c40a3dfaca2 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -126,6 +126,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
| 126 | void r100_enable_bm(struct radeon_device *rdev); | 126 | void r100_enable_bm(struct radeon_device *rdev); |
| 127 | void r100_set_common_regs(struct radeon_device *rdev); | 127 | void r100_set_common_regs(struct radeon_device *rdev); |
| 128 | void r100_bm_disable(struct radeon_device *rdev); | 128 | void r100_bm_disable(struct radeon_device *rdev); |
| 129 | extern bool r100_gui_idle(struct radeon_device *rdev); | ||
| 130 | extern void r100_pm_misc(struct radeon_device *rdev); | ||
| 131 | extern void r100_pm_prepare(struct radeon_device *rdev); | ||
| 132 | extern void r100_pm_finish(struct radeon_device *rdev); | ||
| 133 | extern void r100_pm_init_profile(struct radeon_device *rdev); | ||
| 134 | extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); | ||
| 135 | |||
| 129 | /* | 136 | /* |
| 130 | * r200,rv250,rs300,rv280 | 137 | * r200,rv250,rs300,rv280 |
| 131 | */ | 138 | */ |
| @@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev); | |||
| 162 | extern void r420_fini(struct radeon_device *rdev); | 169 | extern void r420_fini(struct radeon_device *rdev); |
| 163 | extern int r420_suspend(struct radeon_device *rdev); | 170 | extern int r420_suspend(struct radeon_device *rdev); |
| 164 | extern int r420_resume(struct radeon_device *rdev); | 171 | extern int r420_resume(struct radeon_device *rdev); |
| 172 | extern void r420_pm_init_profile(struct radeon_device *rdev); | ||
| 165 | 173 | ||
| 166 | /* | 174 | /* |
| 167 | * rs400,rs480 | 175 | * rs400,rs480 |
| @@ -196,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev); | |||
| 196 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 204 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| 197 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | 205 | void rs600_hpd_set_polarity(struct radeon_device *rdev, |
| 198 | enum radeon_hpd_id hpd); | 206 | enum radeon_hpd_id hpd); |
| 207 | extern void rs600_pm_misc(struct radeon_device *rdev); | ||
| 208 | extern void rs600_pm_prepare(struct radeon_device *rdev); | ||
| 209 | extern void rs600_pm_finish(struct radeon_device *rdev); | ||
| 199 | 210 | ||
| 200 | /* | 211 | /* |
| 201 | * rs690,rs740 | 212 | * rs690,rs740 |
| @@ -269,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | |||
| 269 | void r600_hpd_set_polarity(struct radeon_device *rdev, | 280 | void r600_hpd_set_polarity(struct radeon_device *rdev, |
| 270 | enum radeon_hpd_id hpd); | 281 | enum radeon_hpd_id hpd); |
| 271 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | 282 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); |
| 283 | extern bool r600_gui_idle(struct radeon_device *rdev); | ||
| 284 | extern void r600_pm_misc(struct radeon_device *rdev); | ||
| 285 | extern void r600_pm_init_profile(struct radeon_device *rdev); | ||
| 286 | extern void rs780_pm_init_profile(struct radeon_device *rdev); | ||
| 287 | extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); | ||
| 272 | 288 | ||
| 273 | /* | 289 | /* |
| 274 | * rv770,rv730,rv710,rv740 | 290 | * rv770,rv730,rv710,rv740 |
| @@ -277,6 +293,7 @@ int rv770_init(struct radeon_device *rdev); | |||
| 277 | void rv770_fini(struct radeon_device *rdev); | 293 | void rv770_fini(struct radeon_device *rdev); |
| 278 | int rv770_suspend(struct radeon_device *rdev); | 294 | int rv770_suspend(struct radeon_device *rdev); |
| 279 | int rv770_resume(struct radeon_device *rdev); | 295 | int rv770_resume(struct radeon_device *rdev); |
| 296 | extern void rv770_pm_misc(struct radeon_device *rdev); | ||
| 280 | 297 | ||
| 281 | /* | 298 | /* |
| 282 | * evergreen | 299 | * evergreen |
| @@ -297,5 +314,8 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, | |||
| 297 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); | 314 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 298 | int evergreen_irq_set(struct radeon_device *rdev); | 315 | int evergreen_irq_set(struct radeon_device *rdev); |
| 299 | int evergreen_irq_process(struct radeon_device *rdev); | 316 | int evergreen_irq_process(struct radeon_device *rdev); |
| 317 | extern void evergreen_pm_misc(struct radeon_device *rdev); | ||
| 318 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | ||
| 319 | extern void evergreen_pm_finish(struct radeon_device *rdev); | ||
| 300 | 320 | ||
| 301 | #endif | 321 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1d05debdd604..1c24dad0ac39 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -1174,7 +1174,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
| 1174 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + | 1174 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + |
| 1175 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); | 1175 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); |
| 1176 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + | 1176 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + |
| 1177 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | 1177 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); |
| 1178 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + | 1178 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + |
| 1179 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | 1179 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
| 1180 | lvds->panel_pwr_delay = | 1180 | lvds->panel_pwr_delay = |
| @@ -1442,29 +1442,29 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
| 1442 | 1442 | ||
| 1443 | static const char *thermal_controller_names[] = { | 1443 | static const char *thermal_controller_names[] = { |
| 1444 | "NONE", | 1444 | "NONE", |
| 1445 | "LM63", | 1445 | "lm63", |
| 1446 | "ADM1032", | 1446 | "adm1032", |
| 1447 | "ADM1030", | 1447 | "adm1030", |
| 1448 | "MUA6649", | 1448 | "max6649", |
| 1449 | "LM64", | 1449 | "lm64", |
| 1450 | "F75375", | 1450 | "f75375", |
| 1451 | "ASC7512", | 1451 | "asc7xxx", |
| 1452 | }; | 1452 | }; |
| 1453 | 1453 | ||
| 1454 | static const char *pp_lib_thermal_controller_names[] = { | 1454 | static const char *pp_lib_thermal_controller_names[] = { |
| 1455 | "NONE", | 1455 | "NONE", |
| 1456 | "LM63", | 1456 | "lm63", |
| 1457 | "ADM1032", | 1457 | "adm1032", |
| 1458 | "ADM1030", | 1458 | "adm1030", |
| 1459 | "MUA6649", | 1459 | "max6649", |
| 1460 | "LM64", | 1460 | "lm64", |
| 1461 | "F75375", | 1461 | "f75375", |
| 1462 | "RV6xx", | 1462 | "RV6xx", |
| 1463 | "RV770", | 1463 | "RV770", |
| 1464 | "ADT7473", | 1464 | "adt7473", |
| 1465 | "External GPIO", | 1465 | "External GPIO", |
| 1466 | "Evergreen", | 1466 | "Evergreen", |
| 1467 | "ADT7473 with internal", | 1467 | "adt7473 with internal", |
| 1468 | 1468 | ||
| 1469 | }; | 1469 | }; |
| 1470 | 1470 | ||
| @@ -1489,7 +1489,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1489 | int state_index = 0, mode_index = 0; | 1489 | int state_index = 0, mode_index = 0; |
| 1490 | struct radeon_i2c_bus_rec i2c_bus; | 1490 | struct radeon_i2c_bus_rec i2c_bus; |
| 1491 | 1491 | ||
| 1492 | rdev->pm.default_power_state = NULL; | 1492 | rdev->pm.default_power_state_index = -1; |
| 1493 | 1493 | ||
| 1494 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1494 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
| 1495 | &frev, &crev, &data_offset)) { | 1495 | &frev, &crev, &data_offset)) { |
| @@ -1502,10 +1502,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1502 | power_info->info.ucOverdriveControllerAddress >> 1); | 1502 | power_info->info.ucOverdriveControllerAddress >> 1); |
| 1503 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); | 1503 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); |
| 1504 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | 1504 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); |
| 1505 | if (rdev->pm.i2c_bus) { | ||
| 1506 | struct i2c_board_info info = { }; | ||
| 1507 | const char *name = thermal_controller_names[power_info->info. | ||
| 1508 | ucOverdriveThermalController]; | ||
| 1509 | info.addr = power_info->info.ucOverdriveControllerAddress >> 1; | ||
| 1510 | strlcpy(info.type, name, sizeof(info.type)); | ||
| 1511 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
| 1512 | } | ||
| 1505 | } | 1513 | } |
| 1506 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1514 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
| 1507 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1515 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
| 1508 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1516 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
| 1517 | /* last mode is usually default, array is low to high */ | ||
| 1509 | for (i = 0; i < num_modes; i++) { | 1518 | for (i = 0; i < num_modes; i++) { |
| 1510 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1519 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
| 1511 | switch (frev) { | 1520 | switch (frev) { |
| @@ -1519,13 +1528,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1519 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 1528 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
| 1520 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 1529 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
| 1521 | continue; | 1530 | continue; |
| 1522 | /* skip overclock modes for now */ | 1531 | rdev->pm.power_state[state_index].pcie_lanes = |
| 1523 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk > | ||
| 1524 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 1525 | (rdev->pm.power_state[state_index].clock_info[0].sclk > | ||
| 1526 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 1527 | continue; | ||
| 1528 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = | ||
| 1529 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | 1532 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; |
| 1530 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | 1533 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); |
| 1531 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1534 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { |
| @@ -1546,6 +1549,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1546 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | 1549 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = |
| 1547 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; | 1550 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; |
| 1548 | } | 1551 | } |
| 1552 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1553 | rdev->pm.power_state[state_index].misc = misc; | ||
| 1549 | /* order matters! */ | 1554 | /* order matters! */ |
| 1550 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | 1555 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) |
| 1551 | rdev->pm.power_state[state_index].type = | 1556 | rdev->pm.power_state[state_index].type = |
| @@ -1559,15 +1564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1559 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | 1564 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) |
| 1560 | rdev->pm.power_state[state_index].type = | 1565 | rdev->pm.power_state[state_index].type = |
| 1561 | POWER_STATE_TYPE_BALANCED; | 1566 | POWER_STATE_TYPE_BALANCED; |
| 1562 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) | 1567 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { |
| 1563 | rdev->pm.power_state[state_index].type = | 1568 | rdev->pm.power_state[state_index].type = |
| 1564 | POWER_STATE_TYPE_PERFORMANCE; | 1569 | POWER_STATE_TYPE_PERFORMANCE; |
| 1570 | rdev->pm.power_state[state_index].flags &= | ||
| 1571 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1572 | } | ||
| 1565 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | 1573 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { |
| 1566 | rdev->pm.power_state[state_index].type = | 1574 | rdev->pm.power_state[state_index].type = |
| 1567 | POWER_STATE_TYPE_DEFAULT; | 1575 | POWER_STATE_TYPE_DEFAULT; |
| 1568 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | 1576 | rdev->pm.default_power_state_index = state_index; |
| 1569 | rdev->pm.power_state[state_index].default_clock_mode = | 1577 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1570 | &rdev->pm.power_state[state_index].clock_info[0]; | 1578 | &rdev->pm.power_state[state_index].clock_info[0]; |
| 1579 | rdev->pm.power_state[state_index].flags &= | ||
| 1580 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1581 | } else if (state_index == 0) { | ||
| 1582 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
| 1583 | RADEON_PM_MODE_NO_DISPLAY; | ||
| 1571 | } | 1584 | } |
| 1572 | state_index++; | 1585 | state_index++; |
| 1573 | break; | 1586 | break; |
| @@ -1581,13 +1594,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1581 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 1594 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
| 1582 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 1595 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
| 1583 | continue; | 1596 | continue; |
| 1584 | /* skip overclock modes for now */ | 1597 | rdev->pm.power_state[state_index].pcie_lanes = |
| 1585 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk > | ||
| 1586 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 1587 | (rdev->pm.power_state[state_index].clock_info[0].sclk > | ||
| 1588 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 1589 | continue; | ||
| 1590 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = | ||
| 1591 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; | 1598 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; |
| 1592 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); | 1599 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); |
| 1593 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); | 1600 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); |
| @@ -1609,6 +1616,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1609 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | 1616 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = |
| 1610 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; | 1617 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; |
| 1611 | } | 1618 | } |
| 1619 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1620 | rdev->pm.power_state[state_index].misc = misc; | ||
| 1621 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
| 1612 | /* order matters! */ | 1622 | /* order matters! */ |
| 1613 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | 1623 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) |
| 1614 | rdev->pm.power_state[state_index].type = | 1624 | rdev->pm.power_state[state_index].type = |
| @@ -1622,18 +1632,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1622 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | 1632 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) |
| 1623 | rdev->pm.power_state[state_index].type = | 1633 | rdev->pm.power_state[state_index].type = |
| 1624 | POWER_STATE_TYPE_BALANCED; | 1634 | POWER_STATE_TYPE_BALANCED; |
| 1625 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) | 1635 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { |
| 1626 | rdev->pm.power_state[state_index].type = | 1636 | rdev->pm.power_state[state_index].type = |
| 1627 | POWER_STATE_TYPE_PERFORMANCE; | 1637 | POWER_STATE_TYPE_PERFORMANCE; |
| 1638 | rdev->pm.power_state[state_index].flags &= | ||
| 1639 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1640 | } | ||
| 1628 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | 1641 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) |
| 1629 | rdev->pm.power_state[state_index].type = | 1642 | rdev->pm.power_state[state_index].type = |
| 1630 | POWER_STATE_TYPE_BALANCED; | 1643 | POWER_STATE_TYPE_BALANCED; |
| 1644 | if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) | ||
| 1645 | rdev->pm.power_state[state_index].flags &= | ||
| 1646 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1631 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | 1647 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { |
| 1632 | rdev->pm.power_state[state_index].type = | 1648 | rdev->pm.power_state[state_index].type = |
| 1633 | POWER_STATE_TYPE_DEFAULT; | 1649 | POWER_STATE_TYPE_DEFAULT; |
| 1634 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | 1650 | rdev->pm.default_power_state_index = state_index; |
| 1635 | rdev->pm.power_state[state_index].default_clock_mode = | 1651 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1636 | &rdev->pm.power_state[state_index].clock_info[0]; | 1652 | &rdev->pm.power_state[state_index].clock_info[0]; |
| 1653 | rdev->pm.power_state[state_index].flags &= | ||
| 1654 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1655 | } else if (state_index == 0) { | ||
| 1656 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
| 1657 | RADEON_PM_MODE_NO_DISPLAY; | ||
| 1637 | } | 1658 | } |
| 1638 | state_index++; | 1659 | state_index++; |
| 1639 | break; | 1660 | break; |
| @@ -1647,13 +1668,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1647 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 1668 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
| 1648 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 1669 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
| 1649 | continue; | 1670 | continue; |
| 1650 | /* skip overclock modes for now */ | 1671 | rdev->pm.power_state[state_index].pcie_lanes = |
| 1651 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk > | ||
| 1652 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 1653 | (rdev->pm.power_state[state_index].clock_info[0].sclk > | ||
| 1654 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 1655 | continue; | ||
| 1656 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = | ||
| 1657 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | 1672 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; |
| 1658 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | 1673 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); |
| 1659 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | 1674 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); |
| @@ -1681,6 +1696,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1681 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; | 1696 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; |
| 1682 | } | 1697 | } |
| 1683 | } | 1698 | } |
| 1699 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1700 | rdev->pm.power_state[state_index].misc = misc; | ||
| 1701 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
| 1684 | /* order matters! */ | 1702 | /* order matters! */ |
| 1685 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | 1703 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) |
| 1686 | rdev->pm.power_state[state_index].type = | 1704 | rdev->pm.power_state[state_index].type = |
| @@ -1694,48 +1712,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1694 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | 1712 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) |
| 1695 | rdev->pm.power_state[state_index].type = | 1713 | rdev->pm.power_state[state_index].type = |
| 1696 | POWER_STATE_TYPE_BALANCED; | 1714 | POWER_STATE_TYPE_BALANCED; |
| 1697 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) | 1715 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { |
| 1698 | rdev->pm.power_state[state_index].type = | 1716 | rdev->pm.power_state[state_index].type = |
| 1699 | POWER_STATE_TYPE_PERFORMANCE; | 1717 | POWER_STATE_TYPE_PERFORMANCE; |
| 1718 | rdev->pm.power_state[state_index].flags &= | ||
| 1719 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1720 | } | ||
| 1700 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | 1721 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) |
| 1701 | rdev->pm.power_state[state_index].type = | 1722 | rdev->pm.power_state[state_index].type = |
| 1702 | POWER_STATE_TYPE_BALANCED; | 1723 | POWER_STATE_TYPE_BALANCED; |
| 1703 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | 1724 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { |
| 1704 | rdev->pm.power_state[state_index].type = | 1725 | rdev->pm.power_state[state_index].type = |
| 1705 | POWER_STATE_TYPE_DEFAULT; | 1726 | POWER_STATE_TYPE_DEFAULT; |
| 1706 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | 1727 | rdev->pm.default_power_state_index = state_index; |
| 1707 | rdev->pm.power_state[state_index].default_clock_mode = | 1728 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1708 | &rdev->pm.power_state[state_index].clock_info[0]; | 1729 | &rdev->pm.power_state[state_index].clock_info[0]; |
| 1730 | } else if (state_index == 0) { | ||
| 1731 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
| 1732 | RADEON_PM_MODE_NO_DISPLAY; | ||
| 1709 | } | 1733 | } |
| 1710 | state_index++; | 1734 | state_index++; |
| 1711 | break; | 1735 | break; |
| 1712 | } | 1736 | } |
| 1713 | } | 1737 | } |
| 1738 | /* last mode is usually default */ | ||
| 1739 | if (rdev->pm.default_power_state_index == -1) { | ||
| 1740 | rdev->pm.power_state[state_index - 1].type = | ||
| 1741 | POWER_STATE_TYPE_DEFAULT; | ||
| 1742 | rdev->pm.default_power_state_index = state_index - 1; | ||
| 1743 | rdev->pm.power_state[state_index - 1].default_clock_mode = | ||
| 1744 | &rdev->pm.power_state[state_index - 1].clock_info[0]; | ||
| 1745 | rdev->pm.power_state[state_index].flags &= | ||
| 1746 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1747 | rdev->pm.power_state[state_index].misc = 0; | ||
| 1748 | rdev->pm.power_state[state_index].misc2 = 0; | ||
| 1749 | } | ||
| 1714 | } else { | 1750 | } else { |
| 1715 | /* add the i2c bus for thermal/fan chip */ | 1751 | /* add the i2c bus for thermal/fan chip */ |
| 1716 | /* no support for internal controller yet */ | 1752 | /* no support for internal controller yet */ |
| 1717 | if (power_info->info_4.sThermalController.ucType > 0) { | 1753 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; |
| 1718 | if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || | 1754 | if (controller->ucType > 0) { |
| 1719 | (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770) || | 1755 | if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || |
| 1720 | (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { | 1756 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || |
| 1757 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { | ||
| 1721 | DRM_INFO("Internal thermal controller %s fan control\n", | 1758 | DRM_INFO("Internal thermal controller %s fan control\n", |
| 1722 | (power_info->info_4.sThermalController.ucFanParameters & | 1759 | (controller->ucFanParameters & |
| 1723 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 1760 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
| 1724 | } else if ((power_info->info_4.sThermalController.ucType == | 1761 | } else if ((controller->ucType == |
| 1725 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | 1762 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || |
| 1726 | (power_info->info_4.sThermalController.ucType == | 1763 | (controller->ucType == |
| 1727 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { | 1764 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { |
| 1728 | DRM_INFO("Special thermal controller config\n"); | 1765 | DRM_INFO("Special thermal controller config\n"); |
| 1729 | } else { | 1766 | } else { |
| 1730 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | 1767 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", |
| 1731 | pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], | 1768 | pp_lib_thermal_controller_names[controller->ucType], |
| 1732 | power_info->info_4.sThermalController.ucI2cAddress >> 1, | 1769 | controller->ucI2cAddress >> 1, |
| 1733 | (power_info->info_4.sThermalController.ucFanParameters & | 1770 | (controller->ucFanParameters & |
| 1734 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 1771 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
| 1735 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); | 1772 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); |
| 1736 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | 1773 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); |
| 1774 | if (rdev->pm.i2c_bus) { | ||
| 1775 | struct i2c_board_info info = { }; | ||
| 1776 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; | ||
| 1777 | info.addr = controller->ucI2cAddress >> 1; | ||
| 1778 | strlcpy(info.type, name, sizeof(info.type)); | ||
| 1779 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
| 1780 | } | ||
| 1781 | |||
| 1737 | } | 1782 | } |
| 1738 | } | 1783 | } |
| 1784 | /* first mode is usually default, followed by low to high */ | ||
| 1739 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { | 1785 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { |
| 1740 | mode_index = 0; | 1786 | mode_index = 0; |
| 1741 | power_state = (struct _ATOM_PPLIB_STATE *) | 1787 | power_state = (struct _ATOM_PPLIB_STATE *) |
| @@ -1764,10 +1810,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1764 | /* skip invalid modes */ | 1810 | /* skip invalid modes */ |
| 1765 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | 1811 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) |
| 1766 | continue; | 1812 | continue; |
| 1767 | /* skip overclock modes for now */ | ||
| 1768 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > | ||
| 1769 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) | ||
| 1770 | continue; | ||
| 1771 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 1813 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
| 1772 | VOLTAGE_SW; | 1814 | VOLTAGE_SW; |
| 1773 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 1815 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
| @@ -1791,12 +1833,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1791 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 1833 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || |
| 1792 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 1834 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) |
| 1793 | continue; | 1835 | continue; |
| 1794 | /* skip overclock modes for now */ | ||
| 1795 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > | ||
| 1796 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 1797 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > | ||
| 1798 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 1799 | continue; | ||
| 1800 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 1836 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
| 1801 | VOLTAGE_SW; | 1837 | VOLTAGE_SW; |
| 1802 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 1838 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
| @@ -1821,12 +1857,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1821 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 1857 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || |
| 1822 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 1858 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) |
| 1823 | continue; | 1859 | continue; |
| 1824 | /* skip overclock modes for now */ | ||
| 1825 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > | ||
| 1826 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 1827 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > | ||
| 1828 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 1829 | continue; | ||
| 1830 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 1860 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
| 1831 | VOLTAGE_SW; | 1861 | VOLTAGE_SW; |
| 1832 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 1862 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
| @@ -1838,7 +1868,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1838 | if (mode_index) { | 1868 | if (mode_index) { |
| 1839 | misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); | 1869 | misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
| 1840 | misc2 = le16_to_cpu(non_clock_info->usClassification); | 1870 | misc2 = le16_to_cpu(non_clock_info->usClassification); |
| 1841 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = | 1871 | rdev->pm.power_state[state_index].misc = misc; |
| 1872 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
| 1873 | rdev->pm.power_state[state_index].pcie_lanes = | ||
| 1842 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> | 1874 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> |
| 1843 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; | 1875 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; |
| 1844 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | 1876 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { |
| @@ -1855,22 +1887,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1855 | POWER_STATE_TYPE_PERFORMANCE; | 1887 | POWER_STATE_TYPE_PERFORMANCE; |
| 1856 | break; | 1888 | break; |
| 1857 | } | 1889 | } |
| 1890 | rdev->pm.power_state[state_index].flags = 0; | ||
| 1891 | if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | ||
| 1892 | rdev->pm.power_state[state_index].flags |= | ||
| 1893 | RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 1858 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { | 1894 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
| 1859 | rdev->pm.power_state[state_index].type = | 1895 | rdev->pm.power_state[state_index].type = |
| 1860 | POWER_STATE_TYPE_DEFAULT; | 1896 | POWER_STATE_TYPE_DEFAULT; |
| 1861 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | 1897 | rdev->pm.default_power_state_index = state_index; |
| 1862 | rdev->pm.power_state[state_index].default_clock_mode = | 1898 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1863 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 1899 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
| 1864 | } | 1900 | } |
| 1865 | state_index++; | 1901 | state_index++; |
| 1866 | } | 1902 | } |
| 1867 | } | 1903 | } |
| 1904 | /* if multiple clock modes, mark the lowest as no display */ | ||
| 1905 | for (i = 0; i < state_index; i++) { | ||
| 1906 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
| 1907 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
| 1908 | RADEON_PM_MODE_NO_DISPLAY; | ||
| 1909 | } | ||
| 1910 | /* first mode is usually default */ | ||
| 1911 | if (rdev->pm.default_power_state_index == -1) { | ||
| 1912 | rdev->pm.power_state[0].type = | ||
| 1913 | POWER_STATE_TYPE_DEFAULT; | ||
| 1914 | rdev->pm.default_power_state_index = 0; | ||
| 1915 | rdev->pm.power_state[0].default_clock_mode = | ||
| 1916 | &rdev->pm.power_state[0].clock_info[0]; | ||
| 1917 | } | ||
| 1868 | } | 1918 | } |
| 1869 | } else { | 1919 | } else { |
| 1870 | /* XXX figure out some good default low power mode for cards w/out power tables */ | ||
| 1871 | } | ||
| 1872 | |||
| 1873 | if (rdev->pm.default_power_state == NULL) { | ||
| 1874 | /* add the default mode */ | 1920 | /* add the default mode */ |
| 1875 | rdev->pm.power_state[state_index].type = | 1921 | rdev->pm.power_state[state_index].type = |
| 1876 | POWER_STATE_TYPE_DEFAULT; | 1922 | POWER_STATE_TYPE_DEFAULT; |
| @@ -1880,18 +1926,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1880 | rdev->pm.power_state[state_index].default_clock_mode = | 1926 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1881 | &rdev->pm.power_state[state_index].clock_info[0]; | 1927 | &rdev->pm.power_state[state_index].clock_info[0]; |
| 1882 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1928 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
| 1883 | if (rdev->asic->get_pcie_lanes) | 1929 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
| 1884 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); | 1930 | rdev->pm.default_power_state_index = state_index; |
| 1885 | else | 1931 | rdev->pm.power_state[state_index].flags = 0; |
| 1886 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; | ||
| 1887 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | ||
| 1888 | state_index++; | 1932 | state_index++; |
| 1889 | } | 1933 | } |
| 1934 | |||
| 1890 | rdev->pm.num_power_states = state_index; | 1935 | rdev->pm.num_power_states = state_index; |
| 1891 | 1936 | ||
| 1892 | rdev->pm.current_power_state = rdev->pm.default_power_state; | 1937 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
| 1893 | rdev->pm.current_clock_mode = | 1938 | rdev->pm.current_clock_mode_index = 0; |
| 1894 | rdev->pm.default_power_state->default_clock_mode; | ||
| 1895 | } | 1939 | } |
| 1896 | 1940 | ||
| 1897 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) | 1941 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 8ad71f701316..fbba938f8048 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
| 85 | pci_unmap_rom(rdev->pdev, bios); | 85 | pci_unmap_rom(rdev->pdev, bios); |
| 86 | return false; | 86 | return false; |
| 87 | } | 87 | } |
| 88 | rdev->bios = kmalloc(size, GFP_KERNEL); | 88 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); |
| 89 | if (rdev->bios == NULL) { | 89 | if (rdev->bios == NULL) { |
| 90 | pci_unmap_rom(rdev->pdev, bios); | 90 | pci_unmap_rom(rdev->pdev, bios); |
| 91 | return false; | 91 | return false; |
| 92 | } | 92 | } |
| 93 | memcpy(rdev->bios, bios, size); | ||
| 94 | pci_unmap_rom(rdev->pdev, bios); | 93 | pci_unmap_rom(rdev->pdev, bios); |
| 95 | return true; | 94 | return true; |
| 96 | } | 95 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 0f1fd9254e30..93f18bbf744a 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
| 1113 | break; | 1113 | break; |
| 1114 | 1114 | ||
| 1115 | if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && | 1115 | if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && |
| 1116 | (RBIOS16(tmp + 2) == | 1116 | (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { |
| 1117 | lvds->native_mode.vdisplay)) { | 1117 | lvds->native_mode.htotal = lvds->native_mode.hdisplay + |
| 1118 | lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; | 1118 | (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; |
| 1119 | lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; | 1119 | lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + |
| 1120 | lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + | 1120 | (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; |
| 1121 | RBIOS16(tmp + 21)) * 8; | 1121 | lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + |
| 1122 | 1122 | (RBIOS8(tmp + 23) * 8); | |
| 1123 | lvds->native_mode.vtotal = RBIOS16(tmp + 24); | 1123 | |
| 1124 | lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; | 1124 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + |
| 1125 | lvds->native_mode.vsync_end = | 1125 | (RBIOS16(tmp + 24) - RBIOS16(tmp + 26)); |
| 1126 | ((RBIOS16(tmp + 28) & 0xf800) >> 11) + | 1126 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + |
| 1127 | (RBIOS16(tmp + 28) & 0x7ff); | 1127 | ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26)); |
| 1128 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + | ||
| 1129 | ((RBIOS16(tmp + 28) & 0xf800) >> 11); | ||
| 1128 | 1130 | ||
| 1129 | lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; | 1131 | lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; |
| 1130 | lvds->native_mode.flags = 0; | 1132 | lvds->native_mode.flags = 0; |
| @@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
| 2366 | u8 rev, blocks, tmp; | 2368 | u8 rev, blocks, tmp; |
| 2367 | int state_index = 0; | 2369 | int state_index = 0; |
| 2368 | 2370 | ||
| 2369 | rdev->pm.default_power_state = NULL; | 2371 | rdev->pm.default_power_state_index = -1; |
| 2370 | 2372 | ||
| 2371 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2373 | if (rdev->flags & RADEON_IS_MOBILITY) { |
| 2372 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); | 2374 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); |
| @@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
| 2380 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 2382 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
| 2381 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 2383 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
| 2382 | goto default_mode; | 2384 | goto default_mode; |
| 2383 | /* skip overclock modes for now */ | ||
| 2384 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk > | ||
| 2385 | rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || | ||
| 2386 | (rdev->pm.power_state[state_index].clock_info[0].sclk > | ||
| 2387 | rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) | ||
| 2388 | goto default_mode; | ||
| 2389 | rdev->pm.power_state[state_index].type = | 2385 | rdev->pm.power_state[state_index].type = |
| 2390 | POWER_STATE_TYPE_BATTERY; | 2386 | POWER_STATE_TYPE_BATTERY; |
| 2391 | misc = RBIOS16(offset + 0x5 + 0x0); | 2387 | misc = RBIOS16(offset + 0x5 + 0x0); |
| 2392 | if (rev > 4) | 2388 | if (rev > 4) |
| 2393 | misc2 = RBIOS16(offset + 0x5 + 0xe); | 2389 | misc2 = RBIOS16(offset + 0x5 + 0xe); |
| 2390 | rdev->pm.power_state[state_index].misc = misc; | ||
| 2391 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
| 2394 | if (misc & 0x4) { | 2392 | if (misc & 0x4) { |
| 2395 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; | 2393 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; |
| 2396 | if (misc & 0x8) | 2394 | if (misc & 0x8) |
| @@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
| 2437 | } else | 2435 | } else |
| 2438 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2436 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
| 2439 | if (rev > 6) | 2437 | if (rev > 6) |
| 2440 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = | 2438 | rdev->pm.power_state[state_index].pcie_lanes = |
| 2441 | RBIOS8(offset + 0x5 + 0x10); | 2439 | RBIOS8(offset + 0x5 + 0x10); |
| 2440 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
| 2442 | state_index++; | 2441 | state_index++; |
| 2443 | } else { | 2442 | } else { |
| 2444 | /* XXX figure out some good default low power mode for mobility cards w/out power tables */ | 2443 | /* XXX figure out some good default low power mode for mobility cards w/out power tables */ |
| @@ -2456,16 +2455,13 @@ default_mode: | |||
| 2456 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2455 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
| 2457 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; | 2456 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; |
| 2458 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2457 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
| 2459 | if (rdev->asic->get_pcie_lanes) | 2458 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
| 2460 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); | 2459 | rdev->pm.power_state[state_index].flags = 0; |
| 2461 | else | 2460 | rdev->pm.default_power_state_index = state_index; |
| 2462 | rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; | ||
| 2463 | rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; | ||
| 2464 | rdev->pm.num_power_states = state_index + 1; | 2461 | rdev->pm.num_power_states = state_index + 1; |
| 2465 | 2462 | ||
| 2466 | rdev->pm.current_power_state = rdev->pm.default_power_state; | 2463 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
| 2467 | rdev->pm.current_clock_mode = | 2464 | rdev->pm.current_clock_mode_index = 0; |
| 2468 | rdev->pm.default_power_state->default_clock_mode; | ||
| 2469 | } | 2465 | } |
| 2470 | 2466 | ||
| 2471 | void radeon_external_tmds_setup(struct drm_encoder *encoder) | 2467 | void radeon_external_tmds_setup(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 40a24c941f20..0c7ccc6961a3 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -1085,6 +1085,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1085 | drm_connector_attach_property(&radeon_connector->base, | 1085 | drm_connector_attach_property(&radeon_connector->base, |
| 1086 | rdev->mode_info.load_detect_property, | 1086 | rdev->mode_info.load_detect_property, |
| 1087 | 1); | 1087 | 1); |
| 1088 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1088 | break; | 1089 | break; |
| 1089 | case DRM_MODE_CONNECTOR_DVIA: | 1090 | case DRM_MODE_CONNECTOR_DVIA: |
| 1090 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1091 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
| @@ -1211,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1211 | break; | 1212 | break; |
| 1212 | } | 1213 | } |
| 1213 | 1214 | ||
| 1215 | if (hpd->hpd == RADEON_HPD_NONE) { | ||
| 1216 | if (i2c_bus->valid) | ||
| 1217 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1218 | } else | ||
| 1219 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 1220 | |||
| 1214 | connector->display_info.subpixel_order = subpixel_order; | 1221 | connector->display_info.subpixel_order = subpixel_order; |
| 1215 | drm_sysfs_connector_add(connector); | 1222 | drm_sysfs_connector_add(connector); |
| 1216 | return; | 1223 | return; |
| @@ -1272,6 +1279,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1272 | drm_connector_attach_property(&radeon_connector->base, | 1279 | drm_connector_attach_property(&radeon_connector->base, |
| 1273 | rdev->mode_info.load_detect_property, | 1280 | rdev->mode_info.load_detect_property, |
| 1274 | 1); | 1281 | 1); |
| 1282 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1275 | break; | 1283 | break; |
| 1276 | case DRM_MODE_CONNECTOR_DVIA: | 1284 | case DRM_MODE_CONNECTOR_DVIA: |
| 1277 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1285 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
| @@ -1340,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1340 | break; | 1348 | break; |
| 1341 | } | 1349 | } |
| 1342 | 1350 | ||
| 1351 | if (hpd->hpd == RADEON_HPD_NONE) { | ||
| 1352 | if (i2c_bus->valid) | ||
| 1353 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1354 | } else | ||
| 1355 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 1343 | connector->display_info.subpixel_order = subpixel_order; | 1356 | connector->display_info.subpixel_order = subpixel_order; |
| 1344 | drm_sysfs_connector_add(connector); | 1357 | drm_sysfs_connector_add(connector); |
| 1345 | return; | 1358 | return; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 26217ffe0355..a20b612ffe75 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev) | |||
| 299 | sclk = radeon_get_engine_clock(rdev); | 299 | sclk = radeon_get_engine_clock(rdev); |
| 300 | mclk = rdev->clock.default_mclk; | 300 | mclk = rdev->clock.default_mclk; |
| 301 | 301 | ||
| 302 | a.full = rfixed_const(100); | 302 | a.full = dfixed_const(100); |
| 303 | rdev->pm.sclk.full = rfixed_const(sclk); | 303 | rdev->pm.sclk.full = dfixed_const(sclk); |
| 304 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 304 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); |
| 305 | rdev->pm.mclk.full = rfixed_const(mclk); | 305 | rdev->pm.mclk.full = dfixed_const(mclk); |
| 306 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | 306 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
| 307 | 307 | ||
| 308 | a.full = rfixed_const(16); | 308 | a.full = dfixed_const(16); |
| 309 | /* core_bandwidth = sclk(Mhz) * 16 */ | 309 | /* core_bandwidth = sclk(Mhz) * 16 */ |
| 310 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | 310 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
| 311 | } else { | 311 | } else { |
| 312 | sclk = radeon_get_engine_clock(rdev); | 312 | sclk = radeon_get_engine_clock(rdev); |
| 313 | mclk = radeon_get_memory_clock(rdev); | 313 | mclk = radeon_get_memory_clock(rdev); |
| 314 | 314 | ||
| 315 | a.full = rfixed_const(100); | 315 | a.full = dfixed_const(100); |
| 316 | rdev->pm.sclk.full = rfixed_const(sclk); | 316 | rdev->pm.sclk.full = dfixed_const(sclk); |
| 317 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 317 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); |
| 318 | rdev->pm.mclk.full = rfixed_const(mclk); | 318 | rdev->pm.mclk.full = dfixed_const(mclk); |
| 319 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | 319 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
| 320 | } | 320 | } |
| 321 | } | 321 | } |
| 322 | 322 | ||
| @@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 599 | spin_lock_init(&rdev->ih.lock); | 599 | spin_lock_init(&rdev->ih.lock); |
| 600 | mutex_init(&rdev->gem.mutex); | 600 | mutex_init(&rdev->gem.mutex); |
| 601 | mutex_init(&rdev->pm.mutex); | 601 | mutex_init(&rdev->pm.mutex); |
| 602 | mutex_init(&rdev->vram_mutex); | ||
| 602 | rwlock_init(&rdev->fence_drv.lock); | 603 | rwlock_init(&rdev->fence_drv.lock); |
| 603 | INIT_LIST_HEAD(&rdev->gem.objects); | 604 | INIT_LIST_HEAD(&rdev->gem.objects); |
| 604 | init_waitqueue_head(&rdev->irq.vblank_queue); | 605 | init_waitqueue_head(&rdev->irq.vblank_queue); |
| 606 | init_waitqueue_head(&rdev->irq.idle_queue); | ||
| 605 | 607 | ||
| 606 | /* setup workqueue */ | 608 | /* setup workqueue */ |
| 607 | rdev->wq = create_workqueue("radeon"); | 609 | rdev->wq = create_workqueue("radeon"); |
| @@ -746,6 +748,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 746 | 748 | ||
| 747 | radeon_save_bios_scratch_regs(rdev); | 749 | radeon_save_bios_scratch_regs(rdev); |
| 748 | 750 | ||
| 751 | radeon_pm_suspend(rdev); | ||
| 749 | radeon_suspend(rdev); | 752 | radeon_suspend(rdev); |
| 750 | radeon_hpd_fini(rdev); | 753 | radeon_hpd_fini(rdev); |
| 751 | /* evict remaining vram memory */ | 754 | /* evict remaining vram memory */ |
| @@ -781,6 +784,7 @@ int radeon_resume_kms(struct drm_device *dev) | |||
| 781 | /* resume AGP if in use */ | 784 | /* resume AGP if in use */ |
| 782 | radeon_agp_resume(rdev); | 785 | radeon_agp_resume(rdev); |
| 783 | radeon_resume(rdev); | 786 | radeon_resume(rdev); |
| 787 | radeon_pm_resume(rdev); | ||
| 784 | radeon_restore_bios_scratch_regs(rdev); | 788 | radeon_restore_bios_scratch_regs(rdev); |
| 785 | radeon_fbdev_set_suspend(rdev, 0); | 789 | radeon_fbdev_set_suspend(rdev, 0); |
| 786 | release_console_sem(); | 790 | release_console_sem(); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ce5163ed1fa6..da85cad1152b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll, | |||
| 633 | 633 | ||
| 634 | vco_freq = freq * post_div; | 634 | vco_freq = freq * post_div; |
| 635 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ | 635 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ |
| 636 | a.full = rfixed_const(pll->reference_freq); | 636 | a.full = dfixed_const(pll->reference_freq); |
| 637 | feedback_divider.full = rfixed_const(vco_freq); | 637 | feedback_divider.full = dfixed_const(vco_freq); |
| 638 | feedback_divider.full = rfixed_div(feedback_divider, a); | 638 | feedback_divider.full = dfixed_div(feedback_divider, a); |
| 639 | a.full = rfixed_const(ref_div); | 639 | a.full = dfixed_const(ref_div); |
| 640 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 640 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
| 641 | 641 | ||
| 642 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 642 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
| 643 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ | 643 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ |
| 644 | a.full = rfixed_const(10); | 644 | a.full = dfixed_const(10); |
| 645 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 645 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
| 646 | feedback_divider.full += rfixed_const_half(0); | 646 | feedback_divider.full += dfixed_const_half(0); |
| 647 | feedback_divider.full = rfixed_floor(feedback_divider); | 647 | feedback_divider.full = dfixed_floor(feedback_divider); |
| 648 | feedback_divider.full = rfixed_div(feedback_divider, a); | 648 | feedback_divider.full = dfixed_div(feedback_divider, a); |
| 649 | 649 | ||
| 650 | /* *fb_div = floor(feedback_divider); */ | 650 | /* *fb_div = floor(feedback_divider); */ |
| 651 | a.full = rfixed_floor(feedback_divider); | 651 | a.full = dfixed_floor(feedback_divider); |
| 652 | *fb_div = rfixed_trunc(a); | 652 | *fb_div = dfixed_trunc(a); |
| 653 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ | 653 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ |
| 654 | a.full = rfixed_const(10); | 654 | a.full = dfixed_const(10); |
| 655 | b.full = rfixed_mul(feedback_divider, a); | 655 | b.full = dfixed_mul(feedback_divider, a); |
| 656 | 656 | ||
| 657 | feedback_divider.full = rfixed_floor(feedback_divider); | 657 | feedback_divider.full = dfixed_floor(feedback_divider); |
| 658 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 658 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
| 659 | feedback_divider.full = b.full - feedback_divider.full; | 659 | feedback_divider.full = b.full - feedback_divider.full; |
| 660 | *fb_div_frac = rfixed_trunc(feedback_divider); | 660 | *fb_div_frac = dfixed_trunc(feedback_divider); |
| 661 | } else { | 661 | } else { |
| 662 | /* *fb_div = floor(feedback_divider + 0.5); */ | 662 | /* *fb_div = floor(feedback_divider + 0.5); */ |
| 663 | feedback_divider.full += rfixed_const_half(0); | 663 | feedback_divider.full += dfixed_const_half(0); |
| 664 | feedback_divider.full = rfixed_floor(feedback_divider); | 664 | feedback_divider.full = dfixed_floor(feedback_divider); |
| 665 | 665 | ||
| 666 | *fb_div = rfixed_trunc(feedback_divider); | 666 | *fb_div = dfixed_trunc(feedback_divider); |
| 667 | *fb_div_frac = 0; | 667 | *fb_div_frac = 0; |
| 668 | } | 668 | } |
| 669 | 669 | ||
| @@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
| 693 | pll_out_max = pll->pll_out_max; | 693 | pll_out_max = pll->pll_out_max; |
| 694 | } | 694 | } |
| 695 | 695 | ||
| 696 | ffreq.full = rfixed_const(freq); | 696 | ffreq.full = dfixed_const(freq); |
| 697 | /* max_error = ffreq * 0.0025; */ | 697 | /* max_error = ffreq * 0.0025; */ |
| 698 | a.full = rfixed_const(400); | 698 | a.full = dfixed_const(400); |
| 699 | max_error.full = rfixed_div(ffreq, a); | 699 | max_error.full = dfixed_div(ffreq, a); |
| 700 | 700 | ||
| 701 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { | 701 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { |
| 702 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { | 702 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { |
| @@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
| 707 | continue; | 707 | continue; |
| 708 | 708 | ||
| 709 | /* pll_out = vco / post_div; */ | 709 | /* pll_out = vco / post_div; */ |
| 710 | a.full = rfixed_const(post_div); | 710 | a.full = dfixed_const(post_div); |
| 711 | pll_out.full = rfixed_const(vco); | 711 | pll_out.full = dfixed_const(vco); |
| 712 | pll_out.full = rfixed_div(pll_out, a); | 712 | pll_out.full = dfixed_div(pll_out, a); |
| 713 | 713 | ||
| 714 | if (pll_out.full >= ffreq.full) { | 714 | if (pll_out.full >= ffreq.full) { |
| 715 | error.full = pll_out.full - ffreq.full; | 715 | error.full = pll_out.full - ffreq.full; |
| @@ -888,8 +888,15 @@ radeon_user_framebuffer_create(struct drm_device *dev, | |||
| 888 | return &radeon_fb->base; | 888 | return &radeon_fb->base; |
| 889 | } | 889 | } |
| 890 | 890 | ||
| 891 | static void radeon_output_poll_changed(struct drm_device *dev) | ||
| 892 | { | ||
| 893 | struct radeon_device *rdev = dev->dev_private; | ||
| 894 | radeon_fb_output_poll_changed(rdev); | ||
| 895 | } | ||
| 896 | |||
| 891 | static const struct drm_mode_config_funcs radeon_mode_funcs = { | 897 | static const struct drm_mode_config_funcs radeon_mode_funcs = { |
| 892 | .fb_create = radeon_user_framebuffer_create, | 898 | .fb_create = radeon_user_framebuffer_create, |
| 899 | .output_poll_changed = radeon_output_poll_changed | ||
| 893 | }; | 900 | }; |
| 894 | 901 | ||
| 895 | struct drm_prop_enum_list { | 902 | struct drm_prop_enum_list { |
| @@ -1030,7 +1037,12 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
| 1030 | /* initialize hpd */ | 1037 | /* initialize hpd */ |
| 1031 | radeon_hpd_init(rdev); | 1038 | radeon_hpd_init(rdev); |
| 1032 | 1039 | ||
| 1040 | /* Initialize power management */ | ||
| 1041 | radeon_pm_init(rdev); | ||
| 1042 | |||
| 1033 | radeon_fbdev_init(rdev); | 1043 | radeon_fbdev_init(rdev); |
| 1044 | drm_kms_helper_poll_init(rdev->ddev); | ||
| 1045 | |||
| 1034 | return 0; | 1046 | return 0; |
| 1035 | } | 1047 | } |
| 1036 | 1048 | ||
| @@ -1038,8 +1050,10 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
| 1038 | { | 1050 | { |
| 1039 | radeon_fbdev_fini(rdev); | 1051 | radeon_fbdev_fini(rdev); |
| 1040 | kfree(rdev->mode_info.bios_hardcoded_edid); | 1052 | kfree(rdev->mode_info.bios_hardcoded_edid); |
| 1053 | radeon_pm_fini(rdev); | ||
| 1041 | 1054 | ||
| 1042 | if (rdev->mode_info.mode_config_initialized) { | 1055 | if (rdev->mode_info.mode_config_initialized) { |
| 1056 | drm_kms_helper_poll_fini(rdev->ddev); | ||
| 1043 | radeon_hpd_fini(rdev); | 1057 | radeon_hpd_fini(rdev); |
| 1044 | drm_mode_config_cleanup(rdev->ddev); | 1058 | drm_mode_config_cleanup(rdev->ddev); |
| 1045 | rdev->mode_info.mode_config_initialized = false; | 1059 | rdev->mode_info.mode_config_initialized = false; |
| @@ -1089,15 +1103,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
| 1089 | } | 1103 | } |
| 1090 | if (radeon_crtc->rmx_type != RMX_OFF) { | 1104 | if (radeon_crtc->rmx_type != RMX_OFF) { |
| 1091 | fixed20_12 a, b; | 1105 | fixed20_12 a, b; |
| 1092 | a.full = rfixed_const(crtc->mode.vdisplay); | 1106 | a.full = dfixed_const(crtc->mode.vdisplay); |
| 1093 | b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); | 1107 | b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); |
| 1094 | radeon_crtc->vsc.full = rfixed_div(a, b); | 1108 | radeon_crtc->vsc.full = dfixed_div(a, b); |
| 1095 | a.full = rfixed_const(crtc->mode.hdisplay); | 1109 | a.full = dfixed_const(crtc->mode.hdisplay); |
| 1096 | b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); | 1110 | b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); |
| 1097 | radeon_crtc->hsc.full = rfixed_div(a, b); | 1111 | radeon_crtc->hsc.full = dfixed_div(a, b); |
| 1098 | } else { | 1112 | } else { |
| 1099 | radeon_crtc->vsc.full = rfixed_const(1); | 1113 | radeon_crtc->vsc.full = dfixed_const(1); |
| 1100 | radeon_crtc->hsc.full = rfixed_const(1); | 1114 | radeon_crtc->hsc.full = dfixed_const(1); |
| 1101 | } | 1115 | } |
| 1102 | return true; | 1116 | return true; |
| 1103 | } | 1117 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4b05563d99e1..4afba1eca2a7 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -44,9 +44,10 @@ | |||
| 44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
| 45 | * - 2.2.0 - add r6xx/r7xx const buffer support | 45 | * - 2.2.0 - add r6xx/r7xx const buffer support |
| 46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs | 46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs |
| 47 | * - 2.4.0 - add crtc id query | ||
| 47 | */ | 48 | */ |
| 48 | #define KMS_DRIVER_MAJOR 2 | 49 | #define KMS_DRIVER_MAJOR 2 |
| 49 | #define KMS_DRIVER_MINOR 3 | 50 | #define KMS_DRIVER_MINOR 4 |
| 50 | #define KMS_DRIVER_PATCHLEVEL 0 | 51 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 51 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 52 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 52 | int radeon_driver_unload_kms(struct drm_device *dev); | 53 | int radeon_driver_unload_kms(struct drm_device *dev); |
| @@ -91,7 +92,6 @@ int radeon_testing = 0; | |||
| 91 | int radeon_connector_table = 0; | 92 | int radeon_connector_table = 0; |
| 92 | int radeon_tv = 1; | 93 | int radeon_tv = 1; |
| 93 | int radeon_new_pll = -1; | 94 | int radeon_new_pll = -1; |
| 94 | int radeon_dynpm = -1; | ||
| 95 | int radeon_audio = 1; | 95 | int radeon_audio = 1; |
| 96 | int radeon_disp_priority = 0; | 96 | int radeon_disp_priority = 0; |
| 97 | int radeon_hw_i2c = 0; | 97 | int radeon_hw_i2c = 0; |
| @@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444); | |||
| 132 | MODULE_PARM_DESC(new_pll, "Select new PLL code"); | 132 | MODULE_PARM_DESC(new_pll, "Select new PLL code"); |
| 133 | module_param_named(new_pll, radeon_new_pll, int, 0444); | 133 | module_param_named(new_pll, radeon_new_pll, int, 0444); |
| 134 | 134 | ||
| 135 | MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); | ||
| 136 | module_param_named(dynpm, radeon_dynpm, int, 0444); | ||
| 137 | |||
| 138 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); | 135 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); |
| 139 | module_param_named(audio, radeon_audio, int, 0444); | 136 | module_param_named(audio, radeon_audio, int, 0444); |
| 140 | 137 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c5ddaf58563a..1ebb100015b7 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
| 309 | struct drm_device *dev = encoder->dev; | 309 | struct drm_device *dev = encoder->dev; |
| 310 | struct radeon_device *rdev = dev->dev_private; | 310 | struct radeon_device *rdev = dev->dev_private; |
| 311 | 311 | ||
| 312 | /* adjust pm to upcoming mode change */ | ||
| 313 | radeon_pm_compute_clocks(rdev); | ||
| 314 | |||
| 315 | /* set the active encoder to connector routing */ | 312 | /* set the active encoder to connector routing */ |
| 316 | radeon_encoder_set_active_device(encoder); | 313 | radeon_encoder_set_active_device(encoder); |
| 317 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 314 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
| @@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1111 | } | 1108 | } |
| 1112 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 1109 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 1113 | 1110 | ||
| 1114 | /* adjust pm to dpms change */ | ||
| 1115 | radeon_pm_compute_clocks(rdev); | ||
| 1116 | } | 1111 | } |
| 1117 | 1112 | ||
| 1118 | union crtc_source_param { | 1113 | union crtc_source_param { |
| @@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | |||
| 1546 | 1541 | ||
| 1547 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | 1542 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) |
| 1548 | { | 1543 | { |
| 1544 | struct drm_device *dev = encoder->dev; | ||
| 1545 | struct radeon_device *rdev = dev->dev_private; | ||
| 1549 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1546 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 1550 | struct radeon_encoder_atom_dig *dig; | 1547 | struct radeon_encoder_atom_dig *dig; |
| 1551 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1548 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
| 1552 | 1549 | ||
| 1550 | switch (radeon_encoder->encoder_id) { | ||
| 1551 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
| 1552 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
| 1553 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
| 1554 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
| 1555 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); | ||
| 1556 | break; | ||
| 1557 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1558 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1559 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1560 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 1561 | if (ASIC_IS_DCE4(rdev)) | ||
| 1562 | /* disable the transmitter */ | ||
| 1563 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
| 1564 | else { | ||
| 1565 | /* disable the encoder and transmitter */ | ||
| 1566 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
| 1567 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE); | ||
| 1568 | } | ||
| 1569 | break; | ||
| 1570 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
| 1571 | atombios_ddia_setup(encoder, ATOM_DISABLE); | ||
| 1572 | break; | ||
| 1573 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
| 1574 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1575 | atombios_external_tmds_setup(encoder, ATOM_DISABLE); | ||
| 1576 | break; | ||
| 1577 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
| 1578 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 1579 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
| 1580 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 1581 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
| 1582 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
| 1583 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
| 1584 | break; | ||
| 1585 | } | ||
| 1586 | |||
| 1553 | if (radeon_encoder_is_digital(encoder)) { | 1587 | if (radeon_encoder_is_digital(encoder)) { |
| 1554 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 1588 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
| 1555 | r600_hdmi_disable(encoder); | 1589 | r600_hdmi_disable(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fcb5b52727b0..e192acfbf0cd 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -236,8 +236,13 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
| 236 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | 236 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
| 237 | 237 | ||
| 238 | /* setup aperture base/size for vesafb takeover */ | 238 | /* setup aperture base/size for vesafb takeover */ |
| 239 | info->aperture_base = rdev->ddev->mode_config.fb_base; | 239 | info->apertures = alloc_apertures(1); |
| 240 | info->aperture_size = rdev->mc.real_vram_size; | 240 | if (!info->apertures) { |
| 241 | ret = -ENOMEM; | ||
| 242 | goto out_unref; | ||
| 243 | } | ||
| 244 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; | ||
| 245 | info->apertures->ranges[0].size = rdev->mc.real_vram_size; | ||
| 241 | 246 | ||
| 242 | info->fix.mmio_start = 0; | 247 | info->fix.mmio_start = 0; |
| 243 | info->fix.mmio_len = 0; | 248 | info->fix.mmio_len = 0; |
| @@ -311,16 +316,9 @@ int radeon_parse_options(char *options) | |||
| 311 | return 0; | 316 | return 0; |
| 312 | } | 317 | } |
| 313 | 318 | ||
| 314 | void radeonfb_hotplug(struct drm_device *dev, bool polled) | 319 | void radeon_fb_output_poll_changed(struct radeon_device *rdev) |
| 315 | { | ||
| 316 | struct radeon_device *rdev = dev->dev_private; | ||
| 317 | |||
| 318 | drm_helper_fb_hpd_irq_event(&rdev->mode_info.rfbdev->helper); | ||
| 319 | } | ||
| 320 | |||
| 321 | static void radeon_fb_output_status_changed(struct drm_fb_helper *fb_helper) | ||
| 322 | { | 320 | { |
| 323 | drm_helper_fb_hotplug_event(fb_helper, true); | 321 | drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); |
| 324 | } | 322 | } |
| 325 | 323 | ||
| 326 | static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) | 324 | static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) |
| @@ -359,7 +357,6 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { | |||
| 359 | .gamma_set = radeon_crtc_fb_gamma_set, | 357 | .gamma_set = radeon_crtc_fb_gamma_set, |
| 360 | .gamma_get = radeon_crtc_fb_gamma_get, | 358 | .gamma_get = radeon_crtc_fb_gamma_get, |
| 361 | .fb_probe = radeon_fb_find_or_create_single, | 359 | .fb_probe = radeon_fb_find_or_create_single, |
| 362 | .fb_output_status_changed = radeon_fb_output_status_changed, | ||
| 363 | }; | 360 | }; |
| 364 | 361 | ||
| 365 | int radeon_fbdev_init(struct radeon_device *rdev) | 362 | int radeon_fbdev_init(struct radeon_device *rdev) |
| @@ -381,11 +378,10 @@ int radeon_fbdev_init(struct radeon_device *rdev) | |||
| 381 | 378 | ||
| 382 | drm_fb_helper_init(rdev->ddev, &rfbdev->helper, | 379 | drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
| 383 | rdev->num_crtc, | 380 | rdev->num_crtc, |
| 384 | RADEONFB_CONN_LIMIT, true); | 381 | RADEONFB_CONN_LIMIT); |
| 385 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); | 382 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
| 386 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); | 383 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
| 387 | return 0; | 384 | return 0; |
| 388 | |||
| 389 | } | 385 | } |
| 390 | 386 | ||
| 391 | void radeon_fbdev_fini(struct radeon_device *rdev) | 387 | void radeon_fbdev_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a95907aa7eae..059bfa4098d7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | * Jerome Glisse | 26 | * Jerome Glisse |
| 27 | */ | 27 | */ |
| 28 | #include "drmP.h" | 28 | #include "drmP.h" |
| 29 | #include "drm_crtc_helper.h" | ||
| 29 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
| 30 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
| 31 | #include "radeon.h" | 32 | #include "radeon.h" |
| @@ -55,9 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work) | |||
| 55 | radeon_connector_hotplug(connector); | 56 | radeon_connector_hotplug(connector); |
| 56 | } | 57 | } |
| 57 | /* Just fire off a uevent and let userspace tell us what to do */ | 58 | /* Just fire off a uevent and let userspace tell us what to do */ |
| 58 | radeonfb_hotplug(dev, false); | 59 | drm_helper_hpd_irq_event(dev); |
| 59 | |||
| 60 | drm_sysfs_hotplug_event(dev); | ||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | 62 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev) |
| @@ -69,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
| 69 | 68 | ||
| 70 | /* Disable *all* interrupts */ | 69 | /* Disable *all* interrupts */ |
| 71 | rdev->irq.sw_int = false; | 70 | rdev->irq.sw_int = false; |
| 71 | rdev->irq.gui_idle = false; | ||
| 72 | for (i = 0; i < rdev->num_crtc; i++) | 72 | for (i = 0; i < rdev->num_crtc; i++) |
| 73 | rdev->irq.crtc_vblank_int[i] = false; | 73 | rdev->irq.crtc_vblank_int[i] = false; |
| 74 | for (i = 0; i < 6; i++) | 74 | for (i = 0; i < 6; i++) |
| @@ -98,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
| 98 | } | 98 | } |
| 99 | /* Disable *all* interrupts */ | 99 | /* Disable *all* interrupts */ |
| 100 | rdev->irq.sw_int = false; | 100 | rdev->irq.sw_int = false; |
| 101 | rdev->irq.gui_idle = false; | ||
| 101 | for (i = 0; i < rdev->num_crtc; i++) | 102 | for (i = 0; i < rdev->num_crtc; i++) |
| 102 | rdev->irq.crtc_vblank_int[i] = false; | 103 | rdev->irq.crtc_vblank_int[i] = false; |
| 103 | for (i = 0; i < 6; i++) | 104 | for (i = 0; i < 6; i++) |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index c633319f98ed..04068352ccd2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 98 | { | 98 | { |
| 99 | struct radeon_device *rdev = dev->dev_private; | 99 | struct radeon_device *rdev = dev->dev_private; |
| 100 | struct drm_radeon_info *info; | 100 | struct drm_radeon_info *info; |
| 101 | struct radeon_mode_info *minfo = &rdev->mode_info; | ||
| 101 | uint32_t *value_ptr; | 102 | uint32_t *value_ptr; |
| 102 | uint32_t value; | 103 | uint32_t value; |
| 104 | struct drm_crtc *crtc; | ||
| 105 | int i, found; | ||
| 103 | 106 | ||
| 104 | info = data; | 107 | info = data; |
| 105 | value_ptr = (uint32_t *)((unsigned long)info->value); | 108 | value_ptr = (uint32_t *)((unsigned long)info->value); |
| 109 | value = *value_ptr; | ||
| 106 | switch (info->request) { | 110 | switch (info->request) { |
| 107 | case RADEON_INFO_DEVICE_ID: | 111 | case RADEON_INFO_DEVICE_ID: |
| 108 | value = dev->pci_device; | 112 | value = dev->pci_device; |
| @@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 116 | case RADEON_INFO_ACCEL_WORKING: | 120 | case RADEON_INFO_ACCEL_WORKING: |
| 117 | value = rdev->accel_working; | 121 | value = rdev->accel_working; |
| 118 | break; | 122 | break; |
| 123 | case RADEON_INFO_CRTC_FROM_ID: | ||
| 124 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { | ||
| 125 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | ||
| 126 | if (crtc && crtc->base.id == value) { | ||
| 127 | value = i; | ||
| 128 | found = 1; | ||
| 129 | break; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | if (!found) { | ||
| 133 | DRM_DEBUG("unknown crtc id %d\n", value); | ||
| 134 | return -EINVAL; | ||
| 135 | } | ||
| 136 | break; | ||
| 119 | default: | 137 | default: |
| 120 | DRM_DEBUG("Invalid request %d\n", info->request); | 138 | DRM_DEBUG("Invalid request %d\n", info->request); |
| 121 | return -EINVAL; | 139 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 88865e38fe30..e1e5255396ac 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
| 27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
| 28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
| 29 | #include "radeon_fixed.h" | 29 | #include <drm/drm_fixed.h> |
| 30 | #include "radeon.h" | 30 | #include "radeon.h" |
| 31 | #include "atom.h" | 31 | #include "atom.h" |
| 32 | 32 | ||
| @@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 314 | 314 | ||
| 315 | switch (mode) { | 315 | switch (mode) { |
| 316 | case DRM_MODE_DPMS_ON: | 316 | case DRM_MODE_DPMS_ON: |
| 317 | radeon_crtc->enabled = true; | ||
| 318 | /* adjust pm to dpms changes BEFORE enabling crtcs */ | ||
| 319 | radeon_pm_compute_clocks(rdev); | ||
| 317 | if (radeon_crtc->crtc_id) | 320 | if (radeon_crtc->crtc_id) |
| 318 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); | 321 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); |
| 319 | else { | 322 | else { |
| @@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 335 | RADEON_CRTC_DISP_REQ_EN_B)); | 338 | RADEON_CRTC_DISP_REQ_EN_B)); |
| 336 | WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); | 339 | WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); |
| 337 | } | 340 | } |
| 341 | radeon_crtc->enabled = false; | ||
| 342 | /* adjust pm to dpms changes AFTER disabling crtcs */ | ||
| 343 | radeon_pm_compute_clocks(rdev); | ||
| 338 | break; | 344 | break; |
| 339 | } | 345 | } |
| 340 | } | 346 | } |
| @@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
| 966 | struct drm_display_mode *mode, | 972 | struct drm_display_mode *mode, |
| 967 | struct drm_display_mode *adjusted_mode) | 973 | struct drm_display_mode *adjusted_mode) |
| 968 | { | 974 | { |
| 975 | struct drm_device *dev = crtc->dev; | ||
| 976 | struct radeon_device *rdev = dev->dev_private; | ||
| 977 | |||
| 978 | /* adjust pm to upcoming mode change */ | ||
| 979 | radeon_pm_compute_clocks(rdev); | ||
| 980 | |||
| 969 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 981 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
| 970 | return false; | 982 | return false; |
| 971 | return true; | 983 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0274abe17ad9..5a13b3eeef19 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
| 116 | else | 116 | else |
| 117 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 117 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 118 | 118 | ||
| 119 | /* adjust pm to dpms change */ | ||
| 120 | radeon_pm_compute_clocks(rdev); | ||
| 121 | } | 119 | } |
| 122 | 120 | ||
| 123 | static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) | 121 | static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) |
| @@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, | |||
| 217 | struct drm_display_mode *adjusted_mode) | 215 | struct drm_display_mode *adjusted_mode) |
| 218 | { | 216 | { |
| 219 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 217 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 220 | struct drm_device *dev = encoder->dev; | ||
| 221 | struct radeon_device *rdev = dev->dev_private; | ||
| 222 | |||
| 223 | /* adjust pm to upcoming mode change */ | ||
| 224 | radeon_pm_compute_clocks(rdev); | ||
| 225 | 218 | ||
| 226 | /* set the active encoder to connector routing */ | 219 | /* set the active encoder to connector routing */ |
| 227 | radeon_encoder_set_active_device(encoder); | 220 | radeon_encoder_set_active_device(encoder); |
| @@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode | |||
| 286 | else | 279 | else |
| 287 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 280 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 288 | 281 | ||
| 289 | /* adjust pm to dpms change */ | ||
| 290 | radeon_pm_compute_clocks(rdev); | ||
| 291 | } | 282 | } |
| 292 | 283 | ||
| 293 | static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) | 284 | static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) |
| @@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) | |||
| 474 | else | 465 | else |
| 475 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 466 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 476 | 467 | ||
| 477 | /* adjust pm to dpms change */ | ||
| 478 | radeon_pm_compute_clocks(rdev); | ||
| 479 | } | 468 | } |
| 480 | 469 | ||
| 481 | static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) | 470 | static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) |
| @@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) | |||
| 642 | else | 631 | else |
| 643 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 632 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 644 | 633 | ||
| 645 | /* adjust pm to dpms change */ | ||
| 646 | radeon_pm_compute_clocks(rdev); | ||
| 647 | } | 634 | } |
| 648 | 635 | ||
| 649 | static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) | 636 | static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) |
| @@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) | |||
| 852 | else | 839 | else |
| 853 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 840 | radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 854 | 841 | ||
| 855 | /* adjust pm to dpms change */ | ||
| 856 | radeon_pm_compute_clocks(rdev); | ||
| 857 | } | 842 | } |
| 858 | 843 | ||
| 859 | static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) | 844 | static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index a2bc31465e4f..71e1f0e7438d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -34,10 +34,10 @@ | |||
| 34 | #include <drm_mode.h> | 34 | #include <drm_mode.h> |
| 35 | #include <drm_edid.h> | 35 | #include <drm_edid.h> |
| 36 | #include <drm_dp_helper.h> | 36 | #include <drm_dp_helper.h> |
| 37 | #include <drm_fixed.h> | ||
| 37 | #include <linux/i2c.h> | 38 | #include <linux/i2c.h> |
| 38 | #include <linux/i2c-id.h> | 39 | #include <linux/i2c-id.h> |
| 39 | #include <linux/i2c-algo-bit.h> | 40 | #include <linux/i2c-algo-bit.h> |
| 40 | #include "radeon_fixed.h" | ||
| 41 | 41 | ||
| 42 | struct radeon_bo; | 42 | struct radeon_bo; |
| 43 | struct radeon_device; | 43 | struct radeon_device; |
| @@ -588,5 +588,6 @@ void radeon_fbdev_fini(struct radeon_device *rdev); | |||
| 588 | void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); | 588 | void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); |
| 589 | int radeon_fbdev_total_size(struct radeon_device *rdev); | 589 | int radeon_fbdev_total_size(struct radeon_device *rdev); |
| 590 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); | 590 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); |
| 591 | void radeonfb_hotplug(struct drm_device *dev, bool polled); | 591 | |
| 592 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); | ||
| 592 | #endif | 593 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 6a8617bac142..a8d18bcae7db 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
| 112 | 112 | ||
| 113 | radeon_ttm_placement_from_domain(bo, domain); | 113 | radeon_ttm_placement_from_domain(bo, domain); |
| 114 | /* Kernel allocation are uninterruptible */ | 114 | /* Kernel allocation are uninterruptible */ |
| 115 | mutex_lock(&rdev->vram_mutex); | ||
| 115 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 116 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
| 116 | &bo->placement, 0, 0, !kernel, NULL, size, | 117 | &bo->placement, 0, 0, !kernel, NULL, size, |
| 117 | &radeon_ttm_bo_destroy); | 118 | &radeon_ttm_bo_destroy); |
| 119 | mutex_unlock(&rdev->vram_mutex); | ||
| 118 | if (unlikely(r != 0)) { | 120 | if (unlikely(r != 0)) { |
| 119 | if (r != -ERESTARTSYS) | 121 | if (r != -ERESTARTSYS) |
| 120 | dev_err(rdev->dev, | 122 | dev_err(rdev->dev, |
| @@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo) | |||
| 166 | void radeon_bo_unref(struct radeon_bo **bo) | 168 | void radeon_bo_unref(struct radeon_bo **bo) |
| 167 | { | 169 | { |
| 168 | struct ttm_buffer_object *tbo; | 170 | struct ttm_buffer_object *tbo; |
| 171 | struct radeon_device *rdev; | ||
| 169 | 172 | ||
| 170 | if ((*bo) == NULL) | 173 | if ((*bo) == NULL) |
| 171 | return; | 174 | return; |
| 175 | rdev = (*bo)->rdev; | ||
| 172 | tbo = &((*bo)->tbo); | 176 | tbo = &((*bo)->tbo); |
| 177 | mutex_lock(&rdev->vram_mutex); | ||
| 173 | ttm_bo_unref(&tbo); | 178 | ttm_bo_unref(&tbo); |
| 179 | mutex_unlock(&rdev->vram_mutex); | ||
| 174 | if (tbo == NULL) | 180 | if (tbo == NULL) |
| 175 | *bo = NULL; | 181 | *bo = NULL; |
| 176 | } | 182 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a4b57493aa78..a8d162c6f829 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -23,164 +23,122 @@ | |||
| 23 | #include "drmP.h" | 23 | #include "drmP.h" |
| 24 | #include "radeon.h" | 24 | #include "radeon.h" |
| 25 | #include "avivod.h" | 25 | #include "avivod.h" |
| 26 | #ifdef CONFIG_ACPI | ||
| 27 | #include <linux/acpi.h> | ||
| 28 | #endif | ||
| 29 | #include <linux/power_supply.h> | ||
| 26 | 30 | ||
| 27 | #define RADEON_IDLE_LOOP_MS 100 | 31 | #define RADEON_IDLE_LOOP_MS 100 |
| 28 | #define RADEON_RECLOCK_DELAY_MS 200 | 32 | #define RADEON_RECLOCK_DELAY_MS 200 |
| 29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 33 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
| 34 | #define RADEON_WAIT_IDLE_TIMEOUT 200 | ||
| 30 | 35 | ||
| 36 | static void radeon_dynpm_idle_work_handler(struct work_struct *work); | ||
| 37 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | ||
| 38 | static bool radeon_pm_in_vbl(struct radeon_device *rdev); | ||
| 31 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | 39 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); |
| 32 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); | 40 | static void radeon_pm_update_profile(struct radeon_device *rdev); |
| 33 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 41 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
| 34 | static void radeon_pm_idle_work_handler(struct work_struct *work); | ||
| 35 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | ||
| 36 | |||
| 37 | static const char *pm_state_names[4] = { | ||
| 38 | "PM_STATE_DISABLED", | ||
| 39 | "PM_STATE_MINIMUM", | ||
| 40 | "PM_STATE_PAUSED", | ||
| 41 | "PM_STATE_ACTIVE" | ||
| 42 | }; | ||
| 43 | 42 | ||
| 44 | static const char *pm_state_types[5] = { | 43 | #define ACPI_AC_CLASS "ac_adapter" |
| 45 | "Default", | ||
| 46 | "Powersave", | ||
| 47 | "Battery", | ||
| 48 | "Balanced", | ||
| 49 | "Performance", | ||
| 50 | }; | ||
| 51 | 44 | ||
| 52 | static void radeon_print_power_mode_info(struct radeon_device *rdev) | 45 | #ifdef CONFIG_ACPI |
| 46 | static int radeon_acpi_event(struct notifier_block *nb, | ||
| 47 | unsigned long val, | ||
| 48 | void *data) | ||
| 53 | { | 49 | { |
| 54 | int i, j; | 50 | struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); |
| 55 | bool is_default; | 51 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; |
| 56 | 52 | ||
| 57 | DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); | 53 | if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { |
| 58 | for (i = 0; i < rdev->pm.num_power_states; i++) { | 54 | if (power_supply_is_system_supplied() > 0) |
| 59 | if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) | 55 | DRM_DEBUG("pm: AC\n"); |
| 60 | is_default = true; | ||
| 61 | else | 56 | else |
| 62 | is_default = false; | 57 | DRM_DEBUG("pm: DC\n"); |
| 63 | DRM_INFO("State %d %s %s\n", i, | 58 | |
| 64 | pm_state_types[rdev->pm.power_state[i].type], | 59 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
| 65 | is_default ? "(default)" : ""); | 60 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
| 66 | if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) | 61 | mutex_lock(&rdev->pm.mutex); |
| 67 | DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); | 62 | radeon_pm_update_profile(rdev); |
| 68 | DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); | 63 | radeon_pm_set_clocks(rdev); |
| 69 | for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { | 64 | mutex_unlock(&rdev->pm.mutex); |
| 70 | if (rdev->flags & RADEON_IS_IGP) | 65 | } |
| 71 | DRM_INFO("\t\t%d engine: %d\n", | ||
| 72 | j, | ||
| 73 | rdev->pm.power_state[i].clock_info[j].sclk * 10); | ||
| 74 | else | ||
| 75 | DRM_INFO("\t\t%d engine/memory: %d/%d\n", | ||
| 76 | j, | ||
| 77 | rdev->pm.power_state[i].clock_info[j].sclk * 10, | ||
| 78 | rdev->pm.power_state[i].clock_info[j].mclk * 10); | ||
| 79 | } | 66 | } |
| 80 | } | 67 | } |
| 68 | |||
| 69 | return NOTIFY_OK; | ||
| 81 | } | 70 | } |
| 71 | #endif | ||
| 82 | 72 | ||
| 83 | static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, | 73 | static void radeon_pm_update_profile(struct radeon_device *rdev) |
| 84 | enum radeon_pm_state_type type) | ||
| 85 | { | 74 | { |
| 86 | int i, j; | 75 | switch (rdev->pm.profile) { |
| 87 | enum radeon_pm_state_type wanted_types[2]; | 76 | case PM_PROFILE_DEFAULT: |
| 88 | int wanted_count; | 77 | rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; |
| 89 | |||
| 90 | switch (type) { | ||
| 91 | case POWER_STATE_TYPE_DEFAULT: | ||
| 92 | default: | ||
| 93 | return rdev->pm.default_power_state; | ||
| 94 | case POWER_STATE_TYPE_POWERSAVE: | ||
| 95 | if (rdev->flags & RADEON_IS_MOBILITY) { | ||
| 96 | wanted_types[0] = POWER_STATE_TYPE_POWERSAVE; | ||
| 97 | wanted_types[1] = POWER_STATE_TYPE_BATTERY; | ||
| 98 | wanted_count = 2; | ||
| 99 | } else { | ||
| 100 | wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; | ||
| 101 | wanted_count = 1; | ||
| 102 | } | ||
| 103 | break; | 78 | break; |
| 104 | case POWER_STATE_TYPE_BATTERY: | 79 | case PM_PROFILE_AUTO: |
| 105 | if (rdev->flags & RADEON_IS_MOBILITY) { | 80 | if (power_supply_is_system_supplied() > 0) { |
| 106 | wanted_types[0] = POWER_STATE_TYPE_BATTERY; | 81 | if (rdev->pm.active_crtc_count > 1) |
| 107 | wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; | 82 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; |
| 108 | wanted_count = 2; | 83 | else |
| 84 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; | ||
| 109 | } else { | 85 | } else { |
| 110 | wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; | 86 | if (rdev->pm.active_crtc_count > 1) |
| 111 | wanted_count = 1; | 87 | rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; |
| 88 | else | ||
| 89 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | ||
| 112 | } | 90 | } |
| 113 | break; | 91 | break; |
| 114 | case POWER_STATE_TYPE_BALANCED: | 92 | case PM_PROFILE_LOW: |
| 115 | case POWER_STATE_TYPE_PERFORMANCE: | 93 | if (rdev->pm.active_crtc_count > 1) |
| 116 | wanted_types[0] = type; | 94 | rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; |
| 117 | wanted_count = 1; | 95 | else |
| 96 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | ||
| 118 | break; | 97 | break; |
| 119 | } | 98 | case PM_PROFILE_HIGH: |
| 120 | 99 | if (rdev->pm.active_crtc_count > 1) | |
| 121 | for (i = 0; i < wanted_count; i++) { | 100 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; |
| 122 | for (j = 0; j < rdev->pm.num_power_states; j++) { | ||
| 123 | if (rdev->pm.power_state[j].type == wanted_types[i]) | ||
| 124 | return &rdev->pm.power_state[j]; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | |||
| 128 | return rdev->pm.default_power_state; | ||
| 129 | } | ||
| 130 | |||
| 131 | static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev, | ||
| 132 | struct radeon_power_state *power_state, | ||
| 133 | enum radeon_pm_clock_mode_type type) | ||
| 134 | { | ||
| 135 | switch (type) { | ||
| 136 | case POWER_MODE_TYPE_DEFAULT: | ||
| 137 | default: | ||
| 138 | return power_state->default_clock_mode; | ||
| 139 | case POWER_MODE_TYPE_LOW: | ||
| 140 | return &power_state->clock_info[0]; | ||
| 141 | case POWER_MODE_TYPE_MID: | ||
| 142 | if (power_state->num_clock_modes > 2) | ||
| 143 | return &power_state->clock_info[1]; | ||
| 144 | else | 101 | else |
| 145 | return &power_state->clock_info[0]; | 102 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; |
| 146 | break; | 103 | break; |
| 147 | case POWER_MODE_TYPE_HIGH: | ||
| 148 | return &power_state->clock_info[power_state->num_clock_modes - 1]; | ||
| 149 | } | 104 | } |
| 150 | 105 | ||
| 106 | if (rdev->pm.active_crtc_count == 0) { | ||
| 107 | rdev->pm.requested_power_state_index = | ||
| 108 | rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; | ||
| 109 | rdev->pm.requested_clock_mode_index = | ||
| 110 | rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; | ||
| 111 | } else { | ||
| 112 | rdev->pm.requested_power_state_index = | ||
| 113 | rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; | ||
| 114 | rdev->pm.requested_clock_mode_index = | ||
| 115 | rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; | ||
| 116 | } | ||
| 151 | } | 117 | } |
| 152 | 118 | ||
| 153 | static void radeon_get_power_state(struct radeon_device *rdev, | 119 | static void radeon_unmap_vram_bos(struct radeon_device *rdev) |
| 154 | enum radeon_pm_action action) | ||
| 155 | { | 120 | { |
| 156 | switch (action) { | 121 | struct radeon_bo *bo, *n; |
| 157 | case PM_ACTION_MINIMUM: | 122 | |
| 158 | rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); | 123 | if (list_empty(&rdev->gem.objects)) |
| 159 | rdev->pm.requested_clock_mode = | ||
| 160 | radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW); | ||
| 161 | break; | ||
| 162 | case PM_ACTION_DOWNCLOCK: | ||
| 163 | rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE); | ||
| 164 | rdev->pm.requested_clock_mode = | ||
| 165 | radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID); | ||
| 166 | break; | ||
| 167 | case PM_ACTION_UPCLOCK: | ||
| 168 | rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT); | ||
| 169 | rdev->pm.requested_clock_mode = | ||
| 170 | radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH); | ||
| 171 | break; | ||
| 172 | case PM_ACTION_NONE: | ||
| 173 | default: | ||
| 174 | DRM_ERROR("Requested mode for not defined action\n"); | ||
| 175 | return; | 124 | return; |
| 125 | |||
| 126 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | ||
| 127 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | ||
| 128 | ttm_bo_unmap_virtual(&bo->tbo); | ||
| 176 | } | 129 | } |
| 177 | DRM_INFO("Requested: e: %d m: %d p: %d\n", | 130 | |
| 178 | rdev->pm.requested_clock_mode->sclk, | 131 | if (rdev->gart.table.vram.robj) |
| 179 | rdev->pm.requested_clock_mode->mclk, | 132 | ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo); |
| 180 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 133 | |
| 134 | if (rdev->stollen_vga_memory) | ||
| 135 | ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo); | ||
| 136 | |||
| 137 | if (rdev->r600_blit.shader_obj) | ||
| 138 | ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); | ||
| 181 | } | 139 | } |
| 182 | 140 | ||
| 183 | static inline void radeon_sync_with_vblank(struct radeon_device *rdev) | 141 | static void radeon_sync_with_vblank(struct radeon_device *rdev) |
| 184 | { | 142 | { |
| 185 | if (rdev->pm.active_crtcs) { | 143 | if (rdev->pm.active_crtcs) { |
| 186 | rdev->pm.vblank_sync = false; | 144 | rdev->pm.vblank_sync = false; |
| @@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev) | |||
| 192 | 150 | ||
| 193 | static void radeon_set_power_state(struct radeon_device *rdev) | 151 | static void radeon_set_power_state(struct radeon_device *rdev) |
| 194 | { | 152 | { |
| 195 | /* if *_clock_mode are the same, *_power_state are as well */ | 153 | u32 sclk, mclk; |
| 196 | if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) | 154 | |
| 155 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | ||
| 156 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | ||
| 197 | return; | 157 | return; |
| 198 | 158 | ||
| 199 | DRM_INFO("Setting: e: %d m: %d p: %d\n", | 159 | if (radeon_gui_idle(rdev)) { |
| 200 | rdev->pm.requested_clock_mode->sclk, | 160 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
| 201 | rdev->pm.requested_clock_mode->mclk, | 161 | clock_info[rdev->pm.requested_clock_mode_index].sclk; |
| 202 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 162 | if (sclk > rdev->clock.default_sclk) |
| 203 | 163 | sclk = rdev->clock.default_sclk; | |
| 204 | /* set pcie lanes */ | 164 | |
| 205 | /* TODO */ | 165 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
| 206 | 166 | clock_info[rdev->pm.requested_clock_mode_index].mclk; | |
| 207 | /* set voltage */ | 167 | if (mclk > rdev->clock.default_mclk) |
| 208 | /* TODO */ | 168 | mclk = rdev->clock.default_mclk; |
| 209 | 169 | ||
| 210 | /* set engine clock */ | 170 | /* voltage, pcie lanes, etc.*/ |
| 211 | radeon_sync_with_vblank(rdev); | 171 | radeon_pm_misc(rdev); |
| 212 | radeon_pm_debug_check_in_vbl(rdev, false); | 172 | |
| 213 | radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); | 173 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 214 | radeon_pm_debug_check_in_vbl(rdev, true); | 174 | radeon_sync_with_vblank(rdev); |
| 215 | 175 | ||
| 216 | #if 0 | 176 | if (!radeon_pm_in_vbl(rdev)) |
| 217 | /* set memory clock */ | 177 | return; |
| 218 | if (rdev->asic->set_memory_clock) { | 178 | |
| 219 | radeon_sync_with_vblank(rdev); | 179 | radeon_pm_prepare(rdev); |
| 220 | radeon_pm_debug_check_in_vbl(rdev, false); | 180 | /* set engine clock */ |
| 221 | radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); | 181 | if (sclk != rdev->pm.current_sclk) { |
| 222 | radeon_pm_debug_check_in_vbl(rdev, true); | 182 | radeon_pm_debug_check_in_vbl(rdev, false); |
| 183 | radeon_set_engine_clock(rdev, sclk); | ||
| 184 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
| 185 | rdev->pm.current_sclk = sclk; | ||
| 186 | DRM_DEBUG("Setting: e: %d\n", sclk); | ||
| 187 | } | ||
| 188 | |||
| 189 | /* set memory clock */ | ||
| 190 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
| 191 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
| 192 | radeon_set_memory_clock(rdev, mclk); | ||
| 193 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
| 194 | rdev->pm.current_mclk = mclk; | ||
| 195 | DRM_DEBUG("Setting: m: %d\n", mclk); | ||
| 196 | } | ||
| 197 | radeon_pm_finish(rdev); | ||
| 198 | } else { | ||
| 199 | /* set engine clock */ | ||
| 200 | if (sclk != rdev->pm.current_sclk) { | ||
| 201 | radeon_sync_with_vblank(rdev); | ||
| 202 | radeon_pm_prepare(rdev); | ||
| 203 | radeon_set_engine_clock(rdev, sclk); | ||
| 204 | radeon_pm_finish(rdev); | ||
| 205 | rdev->pm.current_sclk = sclk; | ||
| 206 | DRM_DEBUG("Setting: e: %d\n", sclk); | ||
| 207 | } | ||
| 208 | /* set memory clock */ | ||
| 209 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
| 210 | radeon_sync_with_vblank(rdev); | ||
| 211 | radeon_pm_prepare(rdev); | ||
| 212 | radeon_set_memory_clock(rdev, mclk); | ||
| 213 | radeon_pm_finish(rdev); | ||
| 214 | rdev->pm.current_mclk = mclk; | ||
| 215 | DRM_DEBUG("Setting: m: %d\n", mclk); | ||
| 216 | } | ||
| 217 | } | ||
| 218 | |||
| 219 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; | ||
| 220 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; | ||
| 221 | } else | ||
| 222 | DRM_DEBUG("pm: GUI not idle!!!\n"); | ||
| 223 | } | ||
| 224 | |||
| 225 | static void radeon_pm_set_clocks(struct radeon_device *rdev) | ||
| 226 | { | ||
| 227 | int i; | ||
| 228 | |||
| 229 | mutex_lock(&rdev->ddev->struct_mutex); | ||
| 230 | mutex_lock(&rdev->vram_mutex); | ||
| 231 | mutex_lock(&rdev->cp.mutex); | ||
| 232 | |||
| 233 | /* gui idle int has issues on older chips it seems */ | ||
| 234 | if (rdev->family >= CHIP_R600) { | ||
| 235 | if (rdev->irq.installed) { | ||
| 236 | /* wait for GPU idle */ | ||
| 237 | rdev->pm.gui_idle = false; | ||
| 238 | rdev->irq.gui_idle = true; | ||
| 239 | radeon_irq_set(rdev); | ||
| 240 | wait_event_interruptible_timeout( | ||
| 241 | rdev->irq.idle_queue, rdev->pm.gui_idle, | ||
| 242 | msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); | ||
| 243 | rdev->irq.gui_idle = false; | ||
| 244 | radeon_irq_set(rdev); | ||
| 245 | } | ||
| 246 | } else { | ||
| 247 | if (rdev->cp.ready) { | ||
| 248 | struct radeon_fence *fence; | ||
| 249 | radeon_ring_alloc(rdev, 64); | ||
| 250 | radeon_fence_create(rdev, &fence); | ||
| 251 | radeon_fence_emit(rdev, fence); | ||
| 252 | radeon_ring_commit(rdev); | ||
| 253 | radeon_fence_wait(fence, false); | ||
| 254 | radeon_fence_unref(&fence); | ||
| 255 | } | ||
| 223 | } | 256 | } |
| 224 | #endif | 257 | radeon_unmap_vram_bos(rdev); |
| 258 | |||
| 259 | if (rdev->irq.installed) { | ||
| 260 | for (i = 0; i < rdev->num_crtc; i++) { | ||
| 261 | if (rdev->pm.active_crtcs & (1 << i)) { | ||
| 262 | rdev->pm.req_vblank |= (1 << i); | ||
| 263 | drm_vblank_get(rdev->ddev, i); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | radeon_set_power_state(rdev); | ||
| 269 | |||
| 270 | if (rdev->irq.installed) { | ||
| 271 | for (i = 0; i < rdev->num_crtc; i++) { | ||
| 272 | if (rdev->pm.req_vblank & (1 << i)) { | ||
| 273 | rdev->pm.req_vblank &= ~(1 << i); | ||
| 274 | drm_vblank_put(rdev->ddev, i); | ||
| 275 | } | ||
| 276 | } | ||
| 277 | } | ||
| 278 | |||
| 279 | /* update display watermarks based on new power state */ | ||
| 280 | radeon_update_bandwidth_info(rdev); | ||
| 281 | if (rdev->pm.active_crtc_count) | ||
| 282 | radeon_bandwidth_update(rdev); | ||
| 283 | |||
| 284 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | ||
| 285 | |||
| 286 | mutex_unlock(&rdev->cp.mutex); | ||
| 287 | mutex_unlock(&rdev->vram_mutex); | ||
| 288 | mutex_unlock(&rdev->ddev->struct_mutex); | ||
| 289 | } | ||
| 290 | |||
| 291 | static ssize_t radeon_get_pm_profile(struct device *dev, | ||
| 292 | struct device_attribute *attr, | ||
| 293 | char *buf) | ||
| 294 | { | ||
| 295 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | ||
| 296 | struct radeon_device *rdev = ddev->dev_private; | ||
| 297 | int cp = rdev->pm.profile; | ||
| 298 | |||
| 299 | return snprintf(buf, PAGE_SIZE, "%s\n", | ||
| 300 | (cp == PM_PROFILE_AUTO) ? "auto" : | ||
| 301 | (cp == PM_PROFILE_LOW) ? "low" : | ||
| 302 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | ||
| 303 | } | ||
| 304 | |||
| 305 | static ssize_t radeon_set_pm_profile(struct device *dev, | ||
| 306 | struct device_attribute *attr, | ||
| 307 | const char *buf, | ||
| 308 | size_t count) | ||
| 309 | { | ||
| 310 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | ||
| 311 | struct radeon_device *rdev = ddev->dev_private; | ||
| 312 | |||
| 313 | mutex_lock(&rdev->pm.mutex); | ||
| 314 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | ||
| 315 | if (strncmp("default", buf, strlen("default")) == 0) | ||
| 316 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
| 317 | else if (strncmp("auto", buf, strlen("auto")) == 0) | ||
| 318 | rdev->pm.profile = PM_PROFILE_AUTO; | ||
| 319 | else if (strncmp("low", buf, strlen("low")) == 0) | ||
| 320 | rdev->pm.profile = PM_PROFILE_LOW; | ||
| 321 | else if (strncmp("high", buf, strlen("high")) == 0) | ||
| 322 | rdev->pm.profile = PM_PROFILE_HIGH; | ||
| 323 | else { | ||
| 324 | DRM_ERROR("invalid power profile!\n"); | ||
| 325 | goto fail; | ||
| 326 | } | ||
| 327 | radeon_pm_update_profile(rdev); | ||
| 328 | radeon_pm_set_clocks(rdev); | ||
| 329 | } | ||
| 330 | fail: | ||
| 331 | mutex_unlock(&rdev->pm.mutex); | ||
| 332 | |||
| 333 | return count; | ||
| 334 | } | ||
| 335 | |||
| 336 | static ssize_t radeon_get_pm_method(struct device *dev, | ||
| 337 | struct device_attribute *attr, | ||
| 338 | char *buf) | ||
| 339 | { | ||
| 340 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | ||
| 341 | struct radeon_device *rdev = ddev->dev_private; | ||
| 342 | int pm = rdev->pm.pm_method; | ||
| 343 | |||
| 344 | return snprintf(buf, PAGE_SIZE, "%s\n", | ||
| 345 | (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); | ||
| 346 | } | ||
| 347 | |||
| 348 | static ssize_t radeon_set_pm_method(struct device *dev, | ||
| 349 | struct device_attribute *attr, | ||
| 350 | const char *buf, | ||
| 351 | size_t count) | ||
| 352 | { | ||
| 353 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | ||
| 354 | struct radeon_device *rdev = ddev->dev_private; | ||
| 355 | |||
| 356 | |||
| 357 | if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { | ||
| 358 | mutex_lock(&rdev->pm.mutex); | ||
| 359 | rdev->pm.pm_method = PM_METHOD_DYNPM; | ||
| 360 | rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; | ||
| 361 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | ||
| 362 | mutex_unlock(&rdev->pm.mutex); | ||
| 363 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | ||
| 364 | mutex_lock(&rdev->pm.mutex); | ||
| 365 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 366 | /* disable dynpm */ | ||
| 367 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | ||
| 368 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | ||
| 369 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
| 370 | mutex_unlock(&rdev->pm.mutex); | ||
| 371 | } else { | ||
| 372 | DRM_ERROR("invalid power method!\n"); | ||
| 373 | goto fail; | ||
| 374 | } | ||
| 375 | radeon_pm_compute_clocks(rdev); | ||
| 376 | fail: | ||
| 377 | return count; | ||
| 378 | } | ||
| 379 | |||
| 380 | static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); | ||
| 381 | static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); | ||
| 225 | 382 | ||
| 226 | rdev->pm.current_power_state = rdev->pm.requested_power_state; | 383 | void radeon_pm_suspend(struct radeon_device *rdev) |
| 227 | rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; | 384 | { |
| 385 | mutex_lock(&rdev->pm.mutex); | ||
| 386 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
| 387 | rdev->pm.current_power_state_index = -1; | ||
| 388 | rdev->pm.current_clock_mode_index = -1; | ||
| 389 | rdev->pm.current_sclk = 0; | ||
| 390 | rdev->pm.current_mclk = 0; | ||
| 391 | mutex_unlock(&rdev->pm.mutex); | ||
| 392 | } | ||
| 393 | |||
| 394 | void radeon_pm_resume(struct radeon_device *rdev) | ||
| 395 | { | ||
| 396 | radeon_pm_compute_clocks(rdev); | ||
| 228 | } | 397 | } |
| 229 | 398 | ||
| 230 | int radeon_pm_init(struct radeon_device *rdev) | 399 | int radeon_pm_init(struct radeon_device *rdev) |
| 231 | { | 400 | { |
| 232 | rdev->pm.state = PM_STATE_DISABLED; | 401 | int ret; |
| 233 | rdev->pm.planned_action = PM_ACTION_NONE; | 402 | /* default to profile method */ |
| 234 | rdev->pm.downclocked = false; | 403 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 404 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | ||
| 405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | ||
| 406 | rdev->pm.dynpm_can_upclock = true; | ||
| 407 | rdev->pm.dynpm_can_downclock = true; | ||
| 408 | rdev->pm.current_sclk = 0; | ||
| 409 | rdev->pm.current_mclk = 0; | ||
| 235 | 410 | ||
| 236 | if (rdev->bios) { | 411 | if (rdev->bios) { |
| 237 | if (rdev->is_atom_bios) | 412 | if (rdev->is_atom_bios) |
| 238 | radeon_atombios_get_power_modes(rdev); | 413 | radeon_atombios_get_power_modes(rdev); |
| 239 | else | 414 | else |
| 240 | radeon_combios_get_power_modes(rdev); | 415 | radeon_combios_get_power_modes(rdev); |
| 241 | radeon_print_power_mode_info(rdev); | 416 | radeon_pm_init_profile(rdev); |
| 417 | rdev->pm.current_power_state_index = -1; | ||
| 418 | rdev->pm.current_clock_mode_index = -1; | ||
| 242 | } | 419 | } |
| 243 | 420 | ||
| 244 | if (radeon_debugfs_pm_init(rdev)) { | 421 | if (rdev->pm.num_power_states > 1) { |
| 245 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 422 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
| 246 | } | 423 | mutex_lock(&rdev->pm.mutex); |
| 424 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
| 425 | radeon_pm_update_profile(rdev); | ||
| 426 | radeon_pm_set_clocks(rdev); | ||
| 427 | mutex_unlock(&rdev->pm.mutex); | ||
| 428 | } | ||
| 429 | |||
| 430 | /* where's the best place to put these? */ | ||
| 431 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | ||
| 432 | if (ret) | ||
| 433 | DRM_ERROR("failed to create device file for power profile\n"); | ||
| 434 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | ||
| 435 | if (ret) | ||
| 436 | DRM_ERROR("failed to create device file for power method\n"); | ||
| 437 | |||
| 438 | #ifdef CONFIG_ACPI | ||
| 439 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | ||
| 440 | register_acpi_notifier(&rdev->acpi_nb); | ||
| 441 | #endif | ||
| 442 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
| 247 | 443 | ||
| 248 | INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); | 444 | if (radeon_debugfs_pm_init(rdev)) { |
| 445 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | ||
| 446 | } | ||
| 249 | 447 | ||
| 250 | if (radeon_dynpm != -1 && radeon_dynpm) { | 448 | DRM_INFO("radeon: power management initialized\n"); |
| 251 | rdev->pm.state = PM_STATE_PAUSED; | ||
| 252 | DRM_INFO("radeon: dynamic power management enabled\n"); | ||
| 253 | } | 449 | } |
| 254 | 450 | ||
| 255 | DRM_INFO("radeon: power management initialized\n"); | ||
| 256 | |||
| 257 | return 0; | 451 | return 0; |
| 258 | } | 452 | } |
| 259 | 453 | ||
| 260 | void radeon_pm_fini(struct radeon_device *rdev) | 454 | void radeon_pm_fini(struct radeon_device *rdev) |
| 261 | { | 455 | { |
| 456 | if (rdev->pm.num_power_states > 1) { | ||
| 457 | mutex_lock(&rdev->pm.mutex); | ||
| 458 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | ||
| 459 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
| 460 | radeon_pm_update_profile(rdev); | ||
| 461 | radeon_pm_set_clocks(rdev); | ||
| 462 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
| 463 | /* cancel work */ | ||
| 464 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | ||
| 465 | /* reset default clocks */ | ||
| 466 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | ||
| 467 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | ||
| 468 | radeon_pm_set_clocks(rdev); | ||
| 469 | } | ||
| 470 | mutex_unlock(&rdev->pm.mutex); | ||
| 471 | |||
| 472 | device_remove_file(rdev->dev, &dev_attr_power_profile); | ||
| 473 | device_remove_file(rdev->dev, &dev_attr_power_method); | ||
| 474 | #ifdef CONFIG_ACPI | ||
| 475 | unregister_acpi_notifier(&rdev->acpi_nb); | ||
| 476 | #endif | ||
| 477 | } | ||
| 478 | |||
| 262 | if (rdev->pm.i2c_bus) | 479 | if (rdev->pm.i2c_bus) |
| 263 | radeon_i2c_destroy(rdev->pm.i2c_bus); | 480 | radeon_i2c_destroy(rdev->pm.i2c_bus); |
| 264 | } | 481 | } |
| @@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
| 266 | void radeon_pm_compute_clocks(struct radeon_device *rdev) | 483 | void radeon_pm_compute_clocks(struct radeon_device *rdev) |
| 267 | { | 484 | { |
| 268 | struct drm_device *ddev = rdev->ddev; | 485 | struct drm_device *ddev = rdev->ddev; |
| 269 | struct drm_connector *connector; | 486 | struct drm_crtc *crtc; |
| 270 | struct radeon_crtc *radeon_crtc; | 487 | struct radeon_crtc *radeon_crtc; |
| 271 | int count = 0; | ||
| 272 | 488 | ||
| 273 | if (rdev->pm.state == PM_STATE_DISABLED) | 489 | if (rdev->pm.num_power_states < 2) |
| 274 | return; | 490 | return; |
| 275 | 491 | ||
| 276 | mutex_lock(&rdev->pm.mutex); | 492 | mutex_lock(&rdev->pm.mutex); |
| 277 | 493 | ||
| 278 | rdev->pm.active_crtcs = 0; | 494 | rdev->pm.active_crtcs = 0; |
| 279 | list_for_each_entry(connector, | 495 | rdev->pm.active_crtc_count = 0; |
| 280 | &ddev->mode_config.connector_list, head) { | 496 | list_for_each_entry(crtc, |
| 281 | if (connector->encoder && | 497 | &ddev->mode_config.crtc_list, head) { |
| 282 | connector->encoder->crtc && | 498 | radeon_crtc = to_radeon_crtc(crtc); |
| 283 | connector->dpms != DRM_MODE_DPMS_OFF) { | 499 | if (radeon_crtc->enabled) { |
| 284 | radeon_crtc = to_radeon_crtc(connector->encoder->crtc); | ||
| 285 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); | 500 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); |
| 286 | ++count; | 501 | rdev->pm.active_crtc_count++; |
| 287 | } | 502 | } |
| 288 | } | 503 | } |
| 289 | 504 | ||
| 290 | if (count > 1) { | 505 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
| 291 | if (rdev->pm.state == PM_STATE_ACTIVE) { | 506 | radeon_pm_update_profile(rdev); |
| 292 | cancel_delayed_work(&rdev->pm.idle_work); | 507 | radeon_pm_set_clocks(rdev); |
| 293 | 508 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | |
| 294 | rdev->pm.state = PM_STATE_PAUSED; | 509 | if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { |
| 295 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 510 | if (rdev->pm.active_crtc_count > 1) { |
| 296 | if (rdev->pm.downclocked) | 511 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { |
| 297 | radeon_pm_set_clocks(rdev); | 512 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
| 298 | 513 | ||
| 299 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); | 514 | rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; |
| 300 | } | 515 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
| 301 | } else if (count == 1) { | 516 | radeon_pm_get_dynpm_state(rdev); |
| 302 | /* TODO: Increase clocks if needed for current mode */ | 517 | radeon_pm_set_clocks(rdev); |
| 303 | 518 | ||
| 304 | if (rdev->pm.state == PM_STATE_MINIMUM) { | 519 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); |
| 305 | rdev->pm.state = PM_STATE_ACTIVE; | 520 | } |
| 306 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 521 | } else if (rdev->pm.active_crtc_count == 1) { |
| 307 | radeon_pm_set_clocks(rdev); | 522 | /* TODO: Increase clocks if needed for current mode */ |
| 308 | 523 | ||
| 309 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 524 | if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { |
| 310 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 525 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
| 311 | } | 526 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; |
| 312 | else if (rdev->pm.state == PM_STATE_PAUSED) { | 527 | radeon_pm_get_dynpm_state(rdev); |
| 313 | rdev->pm.state = PM_STATE_ACTIVE; | 528 | radeon_pm_set_clocks(rdev); |
| 314 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 529 | |
| 315 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 530 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
| 316 | DRM_DEBUG("radeon: dynamic power management activated\n"); | 531 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
| 317 | } | 532 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { |
| 318 | } | 533 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
| 319 | else { /* count == 0 */ | 534 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
| 320 | if (rdev->pm.state != PM_STATE_MINIMUM) { | 535 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
| 321 | cancel_delayed_work(&rdev->pm.idle_work); | 536 | DRM_DEBUG("radeon: dynamic power management activated\n"); |
| 322 | 537 | } | |
| 323 | rdev->pm.state = PM_STATE_MINIMUM; | 538 | } else { /* count == 0 */ |
| 324 | rdev->pm.planned_action = PM_ACTION_MINIMUM; | 539 | if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { |
| 325 | radeon_pm_set_clocks(rdev); | 540 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
| 541 | |||
| 542 | rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; | ||
| 543 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; | ||
| 544 | radeon_pm_get_dynpm_state(rdev); | ||
| 545 | radeon_pm_set_clocks(rdev); | ||
| 546 | } | ||
| 547 | } | ||
| 326 | } | 548 | } |
| 327 | } | 549 | } |
| 328 | 550 | ||
| 329 | mutex_unlock(&rdev->pm.mutex); | 551 | mutex_unlock(&rdev->pm.mutex); |
| 330 | } | 552 | } |
| 331 | 553 | ||
| 332 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) | 554 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) |
| 333 | { | 555 | { |
| 334 | u32 stat_crtc1 = 0, stat_crtc2 = 0; | 556 | u32 stat_crtc = 0, vbl = 0, position = 0; |
| 335 | bool in_vbl = true; | 557 | bool in_vbl = true; |
| 336 | 558 | ||
| 337 | if (ASIC_IS_AVIVO(rdev)) { | 559 | if (ASIC_IS_DCE4(rdev)) { |
| 560 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
| 561 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 562 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | ||
| 563 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 564 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | ||
| 565 | } | ||
| 566 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
| 567 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 568 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
| 569 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 570 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
| 571 | } | ||
| 572 | if (rdev->pm.active_crtcs & (1 << 2)) { | ||
| 573 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 574 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
| 575 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 576 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
| 577 | } | ||
| 578 | if (rdev->pm.active_crtcs & (1 << 3)) { | ||
| 579 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 580 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
| 581 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 582 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
| 583 | } | ||
| 584 | if (rdev->pm.active_crtcs & (1 << 4)) { | ||
| 585 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 586 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
| 587 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 588 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
| 589 | } | ||
| 590 | if (rdev->pm.active_crtcs & (1 << 5)) { | ||
| 591 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
| 592 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
| 593 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
| 594 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
| 595 | } | ||
| 596 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
| 597 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
| 598 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; | ||
| 599 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; | ||
| 600 | } | ||
| 601 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
| 602 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; | ||
| 603 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; | ||
| 604 | } | ||
| 605 | if (position < vbl && position > 1) | ||
| 606 | in_vbl = false; | ||
| 607 | } else { | ||
| 338 | if (rdev->pm.active_crtcs & (1 << 0)) { | 608 | if (rdev->pm.active_crtcs & (1 << 0)) { |
| 339 | stat_crtc1 = RREG32(D1CRTC_STATUS); | 609 | stat_crtc = RREG32(RADEON_CRTC_STATUS); |
| 340 | if (!(stat_crtc1 & 1)) | 610 | if (!(stat_crtc & 1)) |
| 341 | in_vbl = false; | 611 | in_vbl = false; |
| 342 | } | 612 | } |
| 343 | if (rdev->pm.active_crtcs & (1 << 1)) { | 613 | if (rdev->pm.active_crtcs & (1 << 1)) { |
| 344 | stat_crtc2 = RREG32(D2CRTC_STATUS); | 614 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); |
| 345 | if (!(stat_crtc2 & 1)) | 615 | if (!(stat_crtc & 1)) |
| 346 | in_vbl = false; | 616 | in_vbl = false; |
| 347 | } | 617 | } |
| 348 | } | 618 | } |
| 349 | if (in_vbl == false) | ||
| 350 | DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1, | ||
| 351 | stat_crtc2, finish ? "exit" : "entry"); | ||
| 352 | return in_vbl; | ||
| 353 | } | ||
| 354 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | ||
| 355 | { | ||
| 356 | /*radeon_fence_wait_last(rdev);*/ | ||
| 357 | switch (rdev->pm.planned_action) { | ||
| 358 | case PM_ACTION_UPCLOCK: | ||
| 359 | rdev->pm.downclocked = false; | ||
| 360 | break; | ||
| 361 | case PM_ACTION_DOWNCLOCK: | ||
| 362 | rdev->pm.downclocked = true; | ||
| 363 | break; | ||
| 364 | case PM_ACTION_MINIMUM: | ||
| 365 | break; | ||
| 366 | case PM_ACTION_NONE: | ||
| 367 | DRM_ERROR("%s: PM_ACTION_NONE\n", __func__); | ||
| 368 | break; | ||
| 369 | } | ||
| 370 | 619 | ||
| 371 | radeon_set_power_state(rdev); | 620 | if (position < vbl && position > 1) |
| 372 | rdev->pm.planned_action = PM_ACTION_NONE; | 621 | in_vbl = false; |
| 622 | |||
| 623 | return in_vbl; | ||
| 373 | } | 624 | } |
| 374 | 625 | ||
| 375 | static void radeon_pm_set_clocks(struct radeon_device *rdev) | 626 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) |
| 376 | { | 627 | { |
| 377 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 628 | u32 stat_crtc = 0; |
| 378 | mutex_lock(&rdev->cp.mutex); | 629 | bool in_vbl = radeon_pm_in_vbl(rdev); |
| 379 | 630 | ||
| 380 | if (rdev->pm.active_crtcs & (1 << 0)) { | 631 | if (in_vbl == false) |
| 381 | rdev->pm.req_vblank |= (1 << 0); | 632 | DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc, |
| 382 | drm_vblank_get(rdev->ddev, 0); | 633 | finish ? "exit" : "entry"); |
| 383 | } | 634 | return in_vbl; |
| 384 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
| 385 | rdev->pm.req_vblank |= (1 << 1); | ||
| 386 | drm_vblank_get(rdev->ddev, 1); | ||
| 387 | } | ||
| 388 | radeon_pm_set_clocks_locked(rdev); | ||
| 389 | if (rdev->pm.req_vblank & (1 << 0)) { | ||
| 390 | rdev->pm.req_vblank &= ~(1 << 0); | ||
| 391 | drm_vblank_put(rdev->ddev, 0); | ||
| 392 | } | ||
| 393 | if (rdev->pm.req_vblank & (1 << 1)) { | ||
| 394 | rdev->pm.req_vblank &= ~(1 << 1); | ||
| 395 | drm_vblank_put(rdev->ddev, 1); | ||
| 396 | } | ||
| 397 | |||
| 398 | mutex_unlock(&rdev->cp.mutex); | ||
| 399 | } | 635 | } |
| 400 | 636 | ||
| 401 | static void radeon_pm_idle_work_handler(struct work_struct *work) | 637 | static void radeon_dynpm_idle_work_handler(struct work_struct *work) |
| 402 | { | 638 | { |
| 403 | struct radeon_device *rdev; | 639 | struct radeon_device *rdev; |
| 640 | int resched; | ||
| 404 | rdev = container_of(work, struct radeon_device, | 641 | rdev = container_of(work, struct radeon_device, |
| 405 | pm.idle_work.work); | 642 | pm.dynpm_idle_work.work); |
| 406 | 643 | ||
| 644 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | ||
| 407 | mutex_lock(&rdev->pm.mutex); | 645 | mutex_lock(&rdev->pm.mutex); |
| 408 | if (rdev->pm.state == PM_STATE_ACTIVE) { | 646 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { |
| 409 | unsigned long irq_flags; | 647 | unsigned long irq_flags; |
| 410 | int not_processed = 0; | 648 | int not_processed = 0; |
| 411 | 649 | ||
| @@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
| 421 | read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 659 | read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
| 422 | 660 | ||
| 423 | if (not_processed >= 3) { /* should upclock */ | 661 | if (not_processed >= 3) { /* should upclock */ |
| 424 | if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { | 662 | if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { |
| 425 | rdev->pm.planned_action = PM_ACTION_NONE; | 663 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
| 426 | } else if (rdev->pm.planned_action == PM_ACTION_NONE && | 664 | } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && |
| 427 | rdev->pm.downclocked) { | 665 | rdev->pm.dynpm_can_upclock) { |
| 428 | rdev->pm.planned_action = | 666 | rdev->pm.dynpm_planned_action = |
| 429 | PM_ACTION_UPCLOCK; | 667 | DYNPM_ACTION_UPCLOCK; |
| 430 | rdev->pm.action_timeout = jiffies + | 668 | rdev->pm.dynpm_action_timeout = jiffies + |
| 431 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); | 669 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); |
| 432 | } | 670 | } |
| 433 | } else if (not_processed == 0) { /* should downclock */ | 671 | } else if (not_processed == 0) { /* should downclock */ |
| 434 | if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { | 672 | if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { |
| 435 | rdev->pm.planned_action = PM_ACTION_NONE; | 673 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
| 436 | } else if (rdev->pm.planned_action == PM_ACTION_NONE && | 674 | } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && |
| 437 | !rdev->pm.downclocked) { | 675 | rdev->pm.dynpm_can_downclock) { |
| 438 | rdev->pm.planned_action = | 676 | rdev->pm.dynpm_planned_action = |
| 439 | PM_ACTION_DOWNCLOCK; | 677 | DYNPM_ACTION_DOWNCLOCK; |
| 440 | rdev->pm.action_timeout = jiffies + | 678 | rdev->pm.dynpm_action_timeout = jiffies + |
| 441 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); | 679 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); |
| 442 | } | 680 | } |
| 443 | } | 681 | } |
| 444 | 682 | ||
| 445 | if (rdev->pm.planned_action != PM_ACTION_NONE && | 683 | /* Note, radeon_pm_set_clocks is called with static_switch set |
| 446 | jiffies > rdev->pm.action_timeout) { | 684 | * to false since we want to wait for vbl to avoid flicker. |
| 685 | */ | ||
| 686 | if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && | ||
| 687 | jiffies > rdev->pm.dynpm_action_timeout) { | ||
| 688 | radeon_pm_get_dynpm_state(rdev); | ||
| 447 | radeon_pm_set_clocks(rdev); | 689 | radeon_pm_set_clocks(rdev); |
| 448 | } | 690 | } |
| 449 | } | 691 | } |
| 450 | mutex_unlock(&rdev->pm.mutex); | 692 | mutex_unlock(&rdev->pm.mutex); |
| 693 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | ||
| 451 | 694 | ||
| 452 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 695 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
| 453 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 696 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
| 454 | } | 697 | } |
| 455 | 698 | ||
| @@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
| 464 | struct drm_device *dev = node->minor->dev; | 707 | struct drm_device *dev = node->minor->dev; |
| 465 | struct radeon_device *rdev = dev->dev_private; | 708 | struct radeon_device *rdev = dev->dev_private; |
| 466 | 709 | ||
| 467 | seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); | ||
| 468 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); | 710 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); |
| 469 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); | 711 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
| 470 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); | 712 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index eabbc9cf30a7..c332f46340d5 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
| @@ -553,7 +553,6 @@ | |||
| 553 | # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) | 553 | # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) |
| 554 | #define RADEON_CRTC2_CRNT_FRAME 0x0314 | 554 | #define RADEON_CRTC2_CRNT_FRAME 0x0314 |
| 555 | #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 | 555 | #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 |
| 556 | #define RADEON_CRTC2_STATUS 0x03fc | ||
| 557 | #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 | 556 | #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 |
| 558 | #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ | 557 | #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ |
| 559 | #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ | 558 | #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ |
| @@ -995,6 +994,7 @@ | |||
| 995 | # define RADEON_FP_DETECT_MASK (1 << 4) | 994 | # define RADEON_FP_DETECT_MASK (1 << 4) |
| 996 | # define RADEON_CRTC2_VBLANK_MASK (1 << 9) | 995 | # define RADEON_CRTC2_VBLANK_MASK (1 << 9) |
| 997 | # define RADEON_FP2_DETECT_MASK (1 << 10) | 996 | # define RADEON_FP2_DETECT_MASK (1 << 10) |
| 997 | # define RADEON_GUI_IDLE_MASK (1 << 19) | ||
| 998 | # define RADEON_SW_INT_ENABLE (1 << 25) | 998 | # define RADEON_SW_INT_ENABLE (1 << 25) |
| 999 | #define RADEON_GEN_INT_STATUS 0x0044 | 999 | #define RADEON_GEN_INT_STATUS 0x0044 |
| 1000 | # define AVIVO_DISPLAY_INT_STATUS (1 << 0) | 1000 | # define AVIVO_DISPLAY_INT_STATUS (1 << 0) |
| @@ -1006,6 +1006,8 @@ | |||
| 1006 | # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) | 1006 | # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) |
| 1007 | # define RADEON_FP2_DETECT_STAT (1 << 10) | 1007 | # define RADEON_FP2_DETECT_STAT (1 << 10) |
| 1008 | # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) | 1008 | # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) |
| 1009 | # define RADEON_GUI_IDLE_STAT (1 << 19) | ||
| 1010 | # define RADEON_GUI_IDLE_STAT_ACK (1 << 19) | ||
| 1009 | # define RADEON_SW_INT_FIRE (1 << 26) | 1011 | # define RADEON_SW_INT_FIRE (1 << 26) |
| 1010 | # define RADEON_SW_INT_TEST (1 << 25) | 1012 | # define RADEON_SW_INT_TEST (1 << 25) |
| 1011 | # define RADEON_SW_INT_TEST_ACK (1 << 25) | 1013 | # define RADEON_SW_INT_TEST_ACK (1 << 25) |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index f6e1e8d4d986..261e98a276db 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 219 | void radeon_ib_pool_fini(struct radeon_device *rdev) | 219 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
| 220 | { | 220 | { |
| 221 | int r; | 221 | int r; |
| 222 | struct radeon_bo *robj; | ||
| 222 | 223 | ||
| 223 | if (!rdev->ib_pool.ready) { | 224 | if (!rdev->ib_pool.ready) { |
| 224 | return; | 225 | return; |
| 225 | } | 226 | } |
| 226 | mutex_lock(&rdev->ib_pool.mutex); | 227 | mutex_lock(&rdev->ib_pool.mutex); |
| 227 | radeon_ib_bogus_cleanup(rdev); | 228 | radeon_ib_bogus_cleanup(rdev); |
| 229 | robj = rdev->ib_pool.robj; | ||
| 230 | rdev->ib_pool.robj = NULL; | ||
| 231 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 228 | 232 | ||
| 229 | if (rdev->ib_pool.robj) { | 233 | if (robj) { |
| 230 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 234 | r = radeon_bo_reserve(robj, false); |
| 231 | if (likely(r == 0)) { | 235 | if (likely(r == 0)) { |
| 232 | radeon_bo_kunmap(rdev->ib_pool.robj); | 236 | radeon_bo_kunmap(robj); |
| 233 | radeon_bo_unpin(rdev->ib_pool.robj); | 237 | radeon_bo_unpin(robj); |
| 234 | radeon_bo_unreserve(rdev->ib_pool.robj); | 238 | radeon_bo_unreserve(robj); |
| 235 | } | 239 | } |
| 236 | radeon_bo_unref(&rdev->ib_pool.robj); | 240 | radeon_bo_unref(&robj); |
| 237 | rdev->ib_pool.robj = NULL; | ||
| 238 | } | 241 | } |
| 239 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 240 | } | 242 | } |
| 241 | 243 | ||
| 242 | 244 | ||
| @@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) | |||
| 258 | } | 260 | } |
| 259 | } | 261 | } |
| 260 | 262 | ||
| 261 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) | 263 | int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) |
| 262 | { | 264 | { |
| 263 | int r; | 265 | int r; |
| 264 | 266 | ||
| 265 | /* Align requested size with padding so unlock_commit can | 267 | /* Align requested size with padding so unlock_commit can |
| 266 | * pad safely */ | 268 | * pad safely */ |
| 267 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; | 269 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
| 268 | mutex_lock(&rdev->cp.mutex); | ||
| 269 | while (ndw > (rdev->cp.ring_free_dw - 1)) { | 270 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
| 270 | radeon_ring_free_size(rdev); | 271 | radeon_ring_free_size(rdev); |
| 271 | if (ndw < rdev->cp.ring_free_dw) { | 272 | if (ndw < rdev->cp.ring_free_dw) { |
| 272 | break; | 273 | break; |
| 273 | } | 274 | } |
| 274 | r = radeon_fence_wait_next(rdev); | 275 | r = radeon_fence_wait_next(rdev); |
| 275 | if (r) { | 276 | if (r) |
| 276 | mutex_unlock(&rdev->cp.mutex); | ||
| 277 | return r; | 277 | return r; |
| 278 | } | ||
| 279 | } | 278 | } |
| 280 | rdev->cp.count_dw = ndw; | 279 | rdev->cp.count_dw = ndw; |
| 281 | rdev->cp.wptr_old = rdev->cp.wptr; | 280 | rdev->cp.wptr_old = rdev->cp.wptr; |
| 282 | return 0; | 281 | return 0; |
| 283 | } | 282 | } |
| 284 | 283 | ||
| 285 | void radeon_ring_unlock_commit(struct radeon_device *rdev) | 284 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) |
| 285 | { | ||
| 286 | int r; | ||
| 287 | |||
| 288 | mutex_lock(&rdev->cp.mutex); | ||
| 289 | r = radeon_ring_alloc(rdev, ndw); | ||
| 290 | if (r) { | ||
| 291 | mutex_unlock(&rdev->cp.mutex); | ||
| 292 | return r; | ||
| 293 | } | ||
| 294 | return 0; | ||
| 295 | } | ||
| 296 | |||
| 297 | void radeon_ring_commit(struct radeon_device *rdev) | ||
| 286 | { | 298 | { |
| 287 | unsigned count_dw_pad; | 299 | unsigned count_dw_pad; |
| 288 | unsigned i; | 300 | unsigned i; |
| @@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) | |||
| 295 | } | 307 | } |
| 296 | DRM_MEMORYBARRIER(); | 308 | DRM_MEMORYBARRIER(); |
| 297 | radeon_cp_commit(rdev); | 309 | radeon_cp_commit(rdev); |
| 310 | } | ||
| 311 | |||
| 312 | void radeon_ring_unlock_commit(struct radeon_device *rdev) | ||
| 313 | { | ||
| 314 | radeon_ring_commit(rdev); | ||
| 298 | mutex_unlock(&rdev->cp.mutex); | 315 | mutex_unlock(&rdev->cp.mutex); |
| 299 | } | 316 | } |
| 300 | 317 | ||
| @@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 344 | void radeon_ring_fini(struct radeon_device *rdev) | 361 | void radeon_ring_fini(struct radeon_device *rdev) |
| 345 | { | 362 | { |
| 346 | int r; | 363 | int r; |
| 364 | struct radeon_bo *ring_obj; | ||
| 347 | 365 | ||
| 348 | mutex_lock(&rdev->cp.mutex); | 366 | mutex_lock(&rdev->cp.mutex); |
| 349 | if (rdev->cp.ring_obj) { | 367 | ring_obj = rdev->cp.ring_obj; |
| 350 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); | 368 | rdev->cp.ring = NULL; |
| 369 | rdev->cp.ring_obj = NULL; | ||
| 370 | mutex_unlock(&rdev->cp.mutex); | ||
| 371 | |||
| 372 | if (ring_obj) { | ||
| 373 | r = radeon_bo_reserve(ring_obj, false); | ||
| 351 | if (likely(r == 0)) { | 374 | if (likely(r == 0)) { |
| 352 | radeon_bo_kunmap(rdev->cp.ring_obj); | 375 | radeon_bo_kunmap(ring_obj); |
| 353 | radeon_bo_unpin(rdev->cp.ring_obj); | 376 | radeon_bo_unpin(ring_obj); |
| 354 | radeon_bo_unreserve(rdev->cp.ring_obj); | 377 | radeon_bo_unreserve(ring_obj); |
| 355 | } | 378 | } |
| 356 | radeon_bo_unref(&rdev->cp.ring_obj); | 379 | radeon_bo_unref(&ring_obj); |
| 357 | rdev->cp.ring = NULL; | ||
| 358 | rdev->cp.ring_obj = NULL; | ||
| 359 | } | 380 | } |
| 360 | mutex_unlock(&rdev->cp.mutex); | ||
| 361 | } | 381 | } |
| 362 | 382 | ||
| 363 | 383 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index af98f45954b3..3aa3a65800ab 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -607,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL; | |||
| 607 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 607 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 608 | { | 608 | { |
| 609 | struct ttm_buffer_object *bo; | 609 | struct ttm_buffer_object *bo; |
| 610 | struct radeon_device *rdev; | ||
| 610 | int r; | 611 | int r; |
| 611 | 612 | ||
| 612 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | 613 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
| 613 | if (bo == NULL) { | 614 | if (bo == NULL) { |
| 614 | return VM_FAULT_NOPAGE; | 615 | return VM_FAULT_NOPAGE; |
| 615 | } | 616 | } |
| 617 | rdev = radeon_get_rdev(bo->bdev); | ||
| 618 | mutex_lock(&rdev->vram_mutex); | ||
| 616 | r = ttm_vm_ops->fault(vma, vmf); | 619 | r = ttm_vm_ops->fault(vma, vmf); |
| 620 | mutex_unlock(&rdev->vram_mutex); | ||
| 617 | return r; | 621 | return r; |
| 618 | } | 622 | } |
| 619 | 623 | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index dc76fe76eb25..9e4240b3bf0b 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -456,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev) | |||
| 456 | 456 | ||
| 457 | void rs400_fini(struct radeon_device *rdev) | 457 | void rs400_fini(struct radeon_device *rdev) |
| 458 | { | 458 | { |
| 459 | radeon_pm_fini(rdev); | ||
| 460 | r100_cp_fini(rdev); | 459 | r100_cp_fini(rdev); |
| 461 | r100_wb_fini(rdev); | 460 | r100_wb_fini(rdev); |
| 462 | r100_ib_fini(rdev); | 461 | r100_ib_fini(rdev); |
| @@ -507,8 +506,6 @@ int rs400_init(struct radeon_device *rdev) | |||
| 507 | 506 | ||
| 508 | /* Initialize clocks */ | 507 | /* Initialize clocks */ |
| 509 | radeon_get_clock_info(rdev->ddev); | 508 | radeon_get_clock_info(rdev->ddev); |
| 510 | /* Initialize power management */ | ||
| 511 | radeon_pm_init(rdev); | ||
| 512 | /* initialize memory controller */ | 509 | /* initialize memory controller */ |
| 513 | rs400_mc_init(rdev); | 510 | rs400_mc_init(rdev); |
| 514 | /* Fence driver */ | 511 | /* Fence driver */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5e3f21861f45..79887cac5b54 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -46,6 +46,135 @@ | |||
| 46 | void rs600_gpu_init(struct radeon_device *rdev); | 46 | void rs600_gpu_init(struct radeon_device *rdev); |
| 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
| 48 | 48 | ||
| 49 | void rs600_pm_misc(struct radeon_device *rdev) | ||
| 50 | { | ||
| 51 | int requested_index = rdev->pm.requested_power_state_index; | ||
| 52 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | ||
| 53 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | ||
| 54 | u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; | ||
| 55 | u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; | ||
| 56 | |||
| 57 | if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { | ||
| 58 | if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | ||
| 59 | tmp = RREG32(voltage->gpio.reg); | ||
| 60 | if (voltage->active_high) | ||
| 61 | tmp |= voltage->gpio.mask; | ||
| 62 | else | ||
| 63 | tmp &= ~(voltage->gpio.mask); | ||
| 64 | WREG32(voltage->gpio.reg, tmp); | ||
| 65 | if (voltage->delay) | ||
| 66 | udelay(voltage->delay); | ||
| 67 | } else { | ||
| 68 | tmp = RREG32(voltage->gpio.reg); | ||
| 69 | if (voltage->active_high) | ||
| 70 | tmp &= ~voltage->gpio.mask; | ||
| 71 | else | ||
| 72 | tmp |= voltage->gpio.mask; | ||
| 73 | WREG32(voltage->gpio.reg, tmp); | ||
| 74 | if (voltage->delay) | ||
| 75 | udelay(voltage->delay); | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | ||
| 80 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | ||
| 81 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); | ||
| 82 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { | ||
| 83 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { | ||
| 84 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); | ||
| 85 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); | ||
| 86 | } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { | ||
| 87 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); | ||
| 88 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); | ||
| 89 | } | ||
| 90 | } else { | ||
| 91 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); | ||
| 92 | dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); | ||
| 93 | } | ||
| 94 | WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); | ||
| 95 | |||
| 96 | dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); | ||
| 97 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { | ||
| 98 | dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; | ||
| 99 | if (voltage->delay) { | ||
| 100 | dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; | ||
| 101 | dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); | ||
| 102 | } else | ||
| 103 | dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; | ||
| 104 | } else | ||
| 105 | dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; | ||
| 106 | WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); | ||
| 107 | |||
| 108 | hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); | ||
| 109 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) | ||
| 110 | hdp_dyn_cntl &= ~HDP_FORCEON; | ||
| 111 | else | ||
| 112 | hdp_dyn_cntl |= HDP_FORCEON; | ||
| 113 | WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); | ||
| 114 | #if 0 | ||
| 115 | /* mc_host_dyn seems to cause hangs from time to time */ | ||
| 116 | mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); | ||
| 117 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) | ||
| 118 | mc_host_dyn_cntl &= ~MC_HOST_FORCEON; | ||
| 119 | else | ||
| 120 | mc_host_dyn_cntl |= MC_HOST_FORCEON; | ||
| 121 | WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); | ||
| 122 | #endif | ||
| 123 | dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); | ||
| 124 | if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) | ||
| 125 | dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; | ||
| 126 | else | ||
| 127 | dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; | ||
| 128 | WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); | ||
| 129 | |||
| 130 | /* set pcie lanes */ | ||
| 131 | if ((rdev->flags & RADEON_IS_PCIE) && | ||
| 132 | !(rdev->flags & RADEON_IS_IGP) && | ||
| 133 | rdev->asic->set_pcie_lanes && | ||
| 134 | (ps->pcie_lanes != | ||
| 135 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { | ||
| 136 | radeon_set_pcie_lanes(rdev, | ||
| 137 | ps->pcie_lanes); | ||
| 138 | DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | void rs600_pm_prepare(struct radeon_device *rdev) | ||
| 143 | { | ||
| 144 | struct drm_device *ddev = rdev->ddev; | ||
| 145 | struct drm_crtc *crtc; | ||
| 146 | struct radeon_crtc *radeon_crtc; | ||
| 147 | u32 tmp; | ||
| 148 | |||
| 149 | /* disable any active CRTCs */ | ||
| 150 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 151 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 152 | if (radeon_crtc->enabled) { | ||
| 153 | tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
| 154 | tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; | ||
| 155 | WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
| 156 | } | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | void rs600_pm_finish(struct radeon_device *rdev) | ||
| 161 | { | ||
| 162 | struct drm_device *ddev = rdev->ddev; | ||
| 163 | struct drm_crtc *crtc; | ||
| 164 | struct radeon_crtc *radeon_crtc; | ||
| 165 | u32 tmp; | ||
| 166 | |||
| 167 | /* enable any active CRTCs */ | ||
| 168 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
| 169 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 170 | if (radeon_crtc->enabled) { | ||
| 171 | tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
| 172 | tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; | ||
| 173 | WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
| 174 | } | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 49 | /* hpd for digital panel detect/disconnect */ | 178 | /* hpd for digital panel detect/disconnect */ |
| 50 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 179 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
| 51 | { | 180 | { |
| @@ -382,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
| 382 | if (rdev->irq.sw_int) { | 511 | if (rdev->irq.sw_int) { |
| 383 | tmp |= S_000040_SW_INT_EN(1); | 512 | tmp |= S_000040_SW_INT_EN(1); |
| 384 | } | 513 | } |
| 514 | if (rdev->irq.gui_idle) { | ||
| 515 | tmp |= S_000040_GUI_IDLE(1); | ||
| 516 | } | ||
| 385 | if (rdev->irq.crtc_vblank_int[0]) { | 517 | if (rdev->irq.crtc_vblank_int[0]) { |
| 386 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); | 518 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
| 387 | } | 519 | } |
| @@ -404,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
| 404 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 536 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
| 405 | { | 537 | { |
| 406 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 538 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
| 407 | uint32_t irq_mask = ~C_000044_SW_INT; | 539 | uint32_t irq_mask = S_000044_SW_INT(1); |
| 408 | u32 tmp; | 540 | u32 tmp; |
| 409 | 541 | ||
| 542 | /* the interrupt works, but the status bit is permanently asserted */ | ||
| 543 | if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { | ||
| 544 | if (!rdev->irq.gui_idle_acked) | ||
| 545 | irq_mask |= S_000044_GUI_IDLE_STAT(1); | ||
| 546 | } | ||
| 547 | |||
| 410 | if (G_000044_DISPLAY_INT_STAT(irqs)) { | 548 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
| 411 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 549 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
| 412 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { | 550 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
| @@ -454,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 454 | uint32_t r500_disp_int; | 592 | uint32_t r500_disp_int; |
| 455 | bool queue_hotplug = false; | 593 | bool queue_hotplug = false; |
| 456 | 594 | ||
| 595 | /* reset gui idle ack. the status bit is broken */ | ||
| 596 | rdev->irq.gui_idle_acked = false; | ||
| 597 | |||
| 457 | status = rs600_irq_ack(rdev, &r500_disp_int); | 598 | status = rs600_irq_ack(rdev, &r500_disp_int); |
| 458 | if (!status && !r500_disp_int) { | 599 | if (!status && !r500_disp_int) { |
| 459 | return IRQ_NONE; | 600 | return IRQ_NONE; |
| @@ -462,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 462 | /* SW interrupt */ | 603 | /* SW interrupt */ |
| 463 | if (G_000044_SW_INT(status)) | 604 | if (G_000044_SW_INT(status)) |
| 464 | radeon_fence_process(rdev); | 605 | radeon_fence_process(rdev); |
| 606 | /* GUI idle */ | ||
| 607 | if (G_000040_GUI_IDLE(status)) { | ||
| 608 | rdev->irq.gui_idle_acked = true; | ||
| 609 | rdev->pm.gui_idle = true; | ||
| 610 | wake_up(&rdev->irq.idle_queue); | ||
| 611 | } | ||
| 465 | /* Vertical blank interrupts */ | 612 | /* Vertical blank interrupts */ |
| 466 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 613 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { |
| 467 | drm_handle_vblank(rdev->ddev, 0); | 614 | drm_handle_vblank(rdev->ddev, 0); |
| @@ -483,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 483 | } | 630 | } |
| 484 | status = rs600_irq_ack(rdev, &r500_disp_int); | 631 | status = rs600_irq_ack(rdev, &r500_disp_int); |
| 485 | } | 632 | } |
| 633 | /* reset gui idle ack. the status bit is broken */ | ||
| 634 | rdev->irq.gui_idle_acked = false; | ||
| 486 | if (queue_hotplug) | 635 | if (queue_hotplug) |
| 487 | queue_work(rdev->wq, &rdev->hotplug_work); | 636 | queue_work(rdev->wq, &rdev->hotplug_work); |
| 488 | if (rdev->msi_enabled) { | 637 | if (rdev->msi_enabled) { |
| @@ -697,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev) | |||
| 697 | 846 | ||
| 698 | void rs600_fini(struct radeon_device *rdev) | 847 | void rs600_fini(struct radeon_device *rdev) |
| 699 | { | 848 | { |
| 700 | radeon_pm_fini(rdev); | ||
| 701 | r100_cp_fini(rdev); | 849 | r100_cp_fini(rdev); |
| 702 | r100_wb_fini(rdev); | 850 | r100_wb_fini(rdev); |
| 703 | r100_ib_fini(rdev); | 851 | r100_ib_fini(rdev); |
| @@ -747,8 +895,6 @@ int rs600_init(struct radeon_device *rdev) | |||
| 747 | 895 | ||
| 748 | /* Initialize clocks */ | 896 | /* Initialize clocks */ |
| 749 | radeon_get_clock_info(rdev->ddev); | 897 | radeon_get_clock_info(rdev->ddev); |
| 750 | /* Initialize power management */ | ||
| 751 | radeon_pm_init(rdev); | ||
| 752 | /* initialize memory controller */ | 898 | /* initialize memory controller */ |
| 753 | rs600_mc_init(rdev); | 899 | rs600_mc_init(rdev); |
| 754 | rs600_debugfs(rdev); | 900 | rs600_debugfs(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index 08c4bebd3011..a27c13ac47c3 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
| @@ -634,4 +634,38 @@ | |||
| 634 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | 634 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) |
| 635 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | 635 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF |
| 636 | 636 | ||
| 637 | /* PLL regs */ | ||
| 638 | #define GENERAL_PWRMGT 0x8 | ||
| 639 | #define GLOBAL_PWRMGT_EN (1 << 0) | ||
| 640 | #define MOBILE_SU (1 << 2) | ||
| 641 | #define DYN_PWRMGT_SCLK_LENGTH 0xc | ||
| 642 | #define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0) | ||
| 643 | #define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4) | ||
| 644 | #define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8) | ||
| 645 | #define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12) | ||
| 646 | #define POWER_D1_SCLK_HILEN(x) ((x) << 16) | ||
| 647 | #define POWER_D1_SCLK_LOLEN(x) ((x) << 20) | ||
| 648 | #define STATIC_SCREEN_HILEN(x) ((x) << 24) | ||
| 649 | #define STATIC_SCREEN_LOLEN(x) ((x) << 28) | ||
| 650 | #define DYN_SCLK_VOL_CNTL 0xe | ||
| 651 | #define IO_CG_VOLTAGE_DROP (1 << 0) | ||
| 652 | #define VOLTAGE_DROP_SYNC (1 << 2) | ||
| 653 | #define VOLTAGE_DELAY_SEL(x) ((x) << 3) | ||
| 654 | #define HDP_DYN_CNTL 0x10 | ||
| 655 | #define HDP_FORCEON (1 << 0) | ||
| 656 | #define MC_HOST_DYN_CNTL 0x1e | ||
| 657 | #define MC_HOST_FORCEON (1 << 0) | ||
| 658 | #define DYN_BACKBIAS_CNTL 0x29 | ||
| 659 | #define IO_CG_BACKBIAS_EN (1 << 0) | ||
| 660 | |||
| 661 | /* mmreg */ | ||
| 662 | #define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0 | ||
| 663 | #define PWRDN_WAIT_BUSY_OFF (1 << 0) | ||
| 664 | #define PWRDN_WAIT_PWRSEQ_OFF (1 << 4) | ||
| 665 | #define PWRDN_WAIT_PPLL_OFF (1 << 8) | ||
| 666 | #define PWRUP_WAIT_PPLL_ON (1 << 12) | ||
| 667 | #define PWRUP_WAIT_MEM_INIT_DONE (1 << 16) | ||
| 668 | #define PM_ASSERT_RESET (1 << 20) | ||
| 669 | #define PM_PWRDN_PPLL (1 << 24) | ||
| 670 | |||
| 637 | #endif | 671 | #endif |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 56a0aec84af2..bcc33195ebc2 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -76,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 76 | /* Get various system informations from bios */ | 76 | /* Get various system informations from bios */ |
| 77 | switch (crev) { | 77 | switch (crev) { |
| 78 | case 1: | 78 | case 1: |
| 79 | tmp.full = rfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
| 80 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); |
| 81 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 82 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 82 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
| 83 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); | 83 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
| 84 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); | 84 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
| 85 | break; | 85 | break; |
| 86 | case 2: | 86 | case 2: |
| 87 | tmp.full = rfixed_const(100); | 87 | tmp.full = dfixed_const(100); |
| 88 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); | 88 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); |
| 89 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 89 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 90 | rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); | 90 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); |
| 91 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 91 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
| 92 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); | 92 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); |
| 93 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 93 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
| 94 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 94 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
| 95 | break; | 95 | break; |
| 96 | default: | 96 | default: |
| 97 | tmp.full = rfixed_const(100); | 97 | tmp.full = dfixed_const(100); |
| 98 | /* We assume the slower possible clock ie worst case */ | 98 | /* We assume the slower possible clock ie worst case */ |
| 99 | /* DDR 333Mhz */ | 99 | /* DDR 333Mhz */ |
| 100 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | 100 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); |
| 101 | /* FIXME: system clock ? */ | 101 | /* FIXME: system clock ? */ |
| 102 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | 102 | rdev->pm.igp_system_mclk.full = dfixed_const(100); |
| 103 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
| 104 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); |
| 105 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 107 | break; | 107 | break; |
| 108 | } | 108 | } |
| 109 | } else { | 109 | } else { |
| 110 | tmp.full = rfixed_const(100); | 110 | tmp.full = dfixed_const(100); |
| 111 | /* We assume the slower possible clock ie worst case */ | 111 | /* We assume the slower possible clock ie worst case */ |
| 112 | /* DDR 333Mhz */ | 112 | /* DDR 333Mhz */ |
| 113 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | 113 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); |
| 114 | /* FIXME: system clock ? */ | 114 | /* FIXME: system clock ? */ |
| 115 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | 115 | rdev->pm.igp_system_mclk.full = dfixed_const(100); |
| 116 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 116 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
| 117 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 117 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); |
| 118 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 118 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 120 | } | 120 | } |
| 121 | /* Compute various bandwidth */ | 121 | /* Compute various bandwidth */ |
| 122 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | 122 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
| 123 | tmp.full = rfixed_const(4); | 123 | tmp.full = dfixed_const(4); |
| 124 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | 124 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); |
| 125 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | 125 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
| 126 | * = ht_clk * ht_width / 5 | 126 | * = ht_clk * ht_width / 5 |
| 127 | */ | 127 | */ |
| 128 | tmp.full = rfixed_const(5); | 128 | tmp.full = dfixed_const(5); |
| 129 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | 129 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, |
| 130 | rdev->pm.igp_ht_link_width); | 130 | rdev->pm.igp_ht_link_width); |
| 131 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | 131 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); |
| 132 | if (tmp.full < rdev->pm.max_bandwidth.full) { | 132 | if (tmp.full < rdev->pm.max_bandwidth.full) { |
| 133 | /* HT link is a limiting factor */ | 133 | /* HT link is a limiting factor */ |
| 134 | rdev->pm.max_bandwidth.full = tmp.full; | 134 | rdev->pm.max_bandwidth.full = tmp.full; |
| @@ -136,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 136 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | 136 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
| 137 | * = (sideport_clk * 14) / 10 | 137 | * = (sideport_clk * 14) / 10 |
| 138 | */ | 138 | */ |
| 139 | tmp.full = rfixed_const(14); | 139 | tmp.full = dfixed_const(14); |
| 140 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | 140 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
| 141 | tmp.full = rfixed_const(10); | 141 | tmp.full = dfixed_const(10); |
| 142 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | 142 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | void rs690_mc_init(struct radeon_device *rdev) | 145 | void rs690_mc_init(struct radeon_device *rdev) |
| @@ -239,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 239 | return; | 239 | return; |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | if (crtc->vsc.full > rfixed_const(2)) | 242 | if (crtc->vsc.full > dfixed_const(2)) |
| 243 | wm->num_line_pair.full = rfixed_const(2); | 243 | wm->num_line_pair.full = dfixed_const(2); |
| 244 | else | 244 | else |
| 245 | wm->num_line_pair.full = rfixed_const(1); | 245 | wm->num_line_pair.full = dfixed_const(1); |
| 246 | 246 | ||
| 247 | b.full = rfixed_const(mode->crtc_hdisplay); | 247 | b.full = dfixed_const(mode->crtc_hdisplay); |
| 248 | c.full = rfixed_const(256); | 248 | c.full = dfixed_const(256); |
| 249 | a.full = rfixed_div(b, c); | 249 | a.full = dfixed_div(b, c); |
| 250 | request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); | 250 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
| 251 | request_fifo_depth.full = rfixed_ceil(request_fifo_depth); | 251 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
| 252 | if (a.full < rfixed_const(4)) { | 252 | if (a.full < dfixed_const(4)) { |
| 253 | wm->lb_request_fifo_depth = 4; | 253 | wm->lb_request_fifo_depth = 4; |
| 254 | } else { | 254 | } else { |
| 255 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | 255 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | /* Determine consumption rate | 258 | /* Determine consumption rate |
| @@ -261,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 261 | * vsc = vertical scaling ratio, defined as source/destination | 261 | * vsc = vertical scaling ratio, defined as source/destination |
| 262 | * hsc = horizontal scaling ration, defined as source/destination | 262 | * hsc = horizontal scaling ration, defined as source/destination |
| 263 | */ | 263 | */ |
| 264 | a.full = rfixed_const(mode->clock); | 264 | a.full = dfixed_const(mode->clock); |
| 265 | b.full = rfixed_const(1000); | 265 | b.full = dfixed_const(1000); |
| 266 | a.full = rfixed_div(a, b); | 266 | a.full = dfixed_div(a, b); |
| 267 | pclk.full = rfixed_div(b, a); | 267 | pclk.full = dfixed_div(b, a); |
| 268 | if (crtc->rmx_type != RMX_OFF) { | 268 | if (crtc->rmx_type != RMX_OFF) { |
| 269 | b.full = rfixed_const(2); | 269 | b.full = dfixed_const(2); |
| 270 | if (crtc->vsc.full > b.full) | 270 | if (crtc->vsc.full > b.full) |
| 271 | b.full = crtc->vsc.full; | 271 | b.full = crtc->vsc.full; |
| 272 | b.full = rfixed_mul(b, crtc->hsc); | 272 | b.full = dfixed_mul(b, crtc->hsc); |
| 273 | c.full = rfixed_const(2); | 273 | c.full = dfixed_const(2); |
| 274 | b.full = rfixed_div(b, c); | 274 | b.full = dfixed_div(b, c); |
| 275 | consumption_time.full = rfixed_div(pclk, b); | 275 | consumption_time.full = dfixed_div(pclk, b); |
| 276 | } else { | 276 | } else { |
| 277 | consumption_time.full = pclk.full; | 277 | consumption_time.full = pclk.full; |
| 278 | } | 278 | } |
| 279 | a.full = rfixed_const(1); | 279 | a.full = dfixed_const(1); |
| 280 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | 280 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
| 281 | 281 | ||
| 282 | 282 | ||
| 283 | /* Determine line time | 283 | /* Determine line time |
| @@ -285,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 285 | * LineTime = total number of horizontal pixels | 285 | * LineTime = total number of horizontal pixels |
| 286 | * pclk = pixel clock period(ns) | 286 | * pclk = pixel clock period(ns) |
| 287 | */ | 287 | */ |
| 288 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 288 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
| 289 | line_time.full = rfixed_mul(a, pclk); | 289 | line_time.full = dfixed_mul(a, pclk); |
| 290 | 290 | ||
| 291 | /* Determine active time | 291 | /* Determine active time |
| 292 | * ActiveTime = time of active region of display within one line, | 292 | * ActiveTime = time of active region of display within one line, |
| 293 | * hactive = total number of horizontal active pixels | 293 | * hactive = total number of horizontal active pixels |
| 294 | * htotal = total number of horizontal pixels | 294 | * htotal = total number of horizontal pixels |
| 295 | */ | 295 | */ |
| 296 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 296 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
| 297 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 297 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
| 298 | wm->active_time.full = rfixed_mul(line_time, b); | 298 | wm->active_time.full = dfixed_mul(line_time, b); |
| 299 | wm->active_time.full = rfixed_div(wm->active_time, a); | 299 | wm->active_time.full = dfixed_div(wm->active_time, a); |
| 300 | 300 | ||
| 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
| 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
| @@ -304,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
| 305 | rdev->pm.sideport_bandwidth.full) | 305 | rdev->pm.sideport_bandwidth.full) |
| 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
| 307 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | 307 | read_delay_latency.full = dfixed_const(370 * 800 * 1000); |
| 308 | read_delay_latency.full = rfixed_div(read_delay_latency, | 308 | read_delay_latency.full = dfixed_div(read_delay_latency, |
| 309 | rdev->pm.igp_sideport_mclk); | 309 | rdev->pm.igp_sideport_mclk); |
| 310 | } else { | 310 | } else { |
| 311 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | 311 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
| @@ -314,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 314 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | 314 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
| 315 | rdev->pm.ht_bandwidth.full) | 315 | rdev->pm.ht_bandwidth.full) |
| 316 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | 316 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; |
| 317 | read_delay_latency.full = rfixed_const(5000); | 317 | read_delay_latency.full = dfixed_const(5000); |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | 320 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
| 321 | a.full = rfixed_const(16); | 321 | a.full = dfixed_const(16); |
| 322 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | 322 | rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); |
| 323 | a.full = rfixed_const(1000); | 323 | a.full = dfixed_const(1000); |
| 324 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | 324 | rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); |
| 325 | /* Determine chunk time | 325 | /* Determine chunk time |
| 326 | * ChunkTime = the time it takes the DCP to send one chunk of data | 326 | * ChunkTime = the time it takes the DCP to send one chunk of data |
| 327 | * to the LB which consists of pipeline delay and inter chunk gap | 327 | * to the LB which consists of pipeline delay and inter chunk gap |
| 328 | * sclk = system clock(ns) | 328 | * sclk = system clock(ns) |
| 329 | */ | 329 | */ |
| 330 | a.full = rfixed_const(256 * 13); | 330 | a.full = dfixed_const(256 * 13); |
| 331 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | 331 | chunk_time.full = dfixed_mul(rdev->pm.sclk, a); |
| 332 | a.full = rfixed_const(10); | 332 | a.full = dfixed_const(10); |
| 333 | chunk_time.full = rfixed_div(chunk_time, a); | 333 | chunk_time.full = dfixed_div(chunk_time, a); |
| 334 | 334 | ||
| 335 | /* Determine the worst case latency | 335 | /* Determine the worst case latency |
| 336 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | 336 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
| @@ -340,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 340 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | 340 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
| 341 | * which consists of pipeline delay and inter chunk gap | 341 | * which consists of pipeline delay and inter chunk gap |
| 342 | */ | 342 | */ |
| 343 | if (rfixed_trunc(wm->num_line_pair) > 1) { | 343 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
| 344 | a.full = rfixed_const(3); | 344 | a.full = dfixed_const(3); |
| 345 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 345 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
| 346 | wm->worst_case_latency.full += read_delay_latency.full; | 346 | wm->worst_case_latency.full += read_delay_latency.full; |
| 347 | } else { | 347 | } else { |
| 348 | a.full = rfixed_const(2); | 348 | a.full = dfixed_const(2); |
| 349 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 349 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
| 350 | wm->worst_case_latency.full += read_delay_latency.full; | 350 | wm->worst_case_latency.full += read_delay_latency.full; |
| 351 | } | 351 | } |
| 352 | 352 | ||
| @@ -360,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 360 | * of data to the LB which consists of | 360 | * of data to the LB which consists of |
| 361 | * pipeline delay and inter chunk gap | 361 | * pipeline delay and inter chunk gap |
| 362 | */ | 362 | */ |
| 363 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | 363 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
| 364 | tolerable_latency.full = line_time.full; | 364 | tolerable_latency.full = line_time.full; |
| 365 | } else { | 365 | } else { |
| 366 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | 366 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
| 367 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | 367 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
| 368 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | 368 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
| 369 | tolerable_latency.full = line_time.full - tolerable_latency.full; | 369 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
| 370 | } | 370 | } |
| 371 | /* We assume worst case 32bits (4 bytes) */ | 371 | /* We assume worst case 32bits (4 bytes) */ |
| 372 | wm->dbpp.full = rfixed_const(4 * 8); | 372 | wm->dbpp.full = dfixed_const(4 * 8); |
| 373 | 373 | ||
| 374 | /* Determine the maximum priority mark | 374 | /* Determine the maximum priority mark |
| 375 | * width = viewport width in pixels | 375 | * width = viewport width in pixels |
| 376 | */ | 376 | */ |
| 377 | a.full = rfixed_const(16); | 377 | a.full = dfixed_const(16); |
| 378 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 378 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
| 379 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | 379 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
| 380 | wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); | 380 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
| 381 | 381 | ||
| 382 | /* Determine estimated width */ | 382 | /* Determine estimated width */ |
| 383 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | 383 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
| 384 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | 384 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
| 385 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | 385 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
| 386 | wm->priority_mark.full = rfixed_const(10); | 386 | wm->priority_mark.full = dfixed_const(10); |
| 387 | } else { | 387 | } else { |
| 388 | a.full = rfixed_const(16); | 388 | a.full = dfixed_const(16); |
| 389 | wm->priority_mark.full = rfixed_div(estimated_width, a); | 389 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
| 390 | wm->priority_mark.full = rfixed_ceil(wm->priority_mark); | 390 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
| 391 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | 391 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
| 392 | } | 392 | } |
| 393 | } | 393 | } |
| @@ -439,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 439 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); | 439 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
| 440 | 440 | ||
| 441 | if (mode0 && mode1) { | 441 | if (mode0 && mode1) { |
| 442 | if (rfixed_trunc(wm0.dbpp) > 64) | 442 | if (dfixed_trunc(wm0.dbpp) > 64) |
| 443 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 443 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); |
| 444 | else | 444 | else |
| 445 | a.full = wm0.num_line_pair.full; | 445 | a.full = wm0.num_line_pair.full; |
| 446 | if (rfixed_trunc(wm1.dbpp) > 64) | 446 | if (dfixed_trunc(wm1.dbpp) > 64) |
| 447 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 447 | b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); |
| 448 | else | 448 | else |
| 449 | b.full = wm1.num_line_pair.full; | 449 | b.full = wm1.num_line_pair.full; |
| 450 | a.full += b.full; | 450 | a.full += b.full; |
| 451 | fill_rate.full = rfixed_div(wm0.sclk, a); | 451 | fill_rate.full = dfixed_div(wm0.sclk, a); |
| 452 | if (wm0.consumption_rate.full > fill_rate.full) { | 452 | if (wm0.consumption_rate.full > fill_rate.full) { |
| 453 | b.full = wm0.consumption_rate.full - fill_rate.full; | 453 | b.full = wm0.consumption_rate.full - fill_rate.full; |
| 454 | b.full = rfixed_mul(b, wm0.active_time); | 454 | b.full = dfixed_mul(b, wm0.active_time); |
| 455 | a.full = rfixed_mul(wm0.worst_case_latency, | 455 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 456 | wm0.consumption_rate); | 456 | wm0.consumption_rate); |
| 457 | a.full = a.full + b.full; | 457 | a.full = a.full + b.full; |
| 458 | b.full = rfixed_const(16 * 1000); | 458 | b.full = dfixed_const(16 * 1000); |
| 459 | priority_mark02.full = rfixed_div(a, b); | 459 | priority_mark02.full = dfixed_div(a, b); |
| 460 | } else { | 460 | } else { |
| 461 | a.full = rfixed_mul(wm0.worst_case_latency, | 461 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 462 | wm0.consumption_rate); | 462 | wm0.consumption_rate); |
| 463 | b.full = rfixed_const(16 * 1000); | 463 | b.full = dfixed_const(16 * 1000); |
| 464 | priority_mark02.full = rfixed_div(a, b); | 464 | priority_mark02.full = dfixed_div(a, b); |
| 465 | } | 465 | } |
| 466 | if (wm1.consumption_rate.full > fill_rate.full) { | 466 | if (wm1.consumption_rate.full > fill_rate.full) { |
| 467 | b.full = wm1.consumption_rate.full - fill_rate.full; | 467 | b.full = wm1.consumption_rate.full - fill_rate.full; |
| 468 | b.full = rfixed_mul(b, wm1.active_time); | 468 | b.full = dfixed_mul(b, wm1.active_time); |
| 469 | a.full = rfixed_mul(wm1.worst_case_latency, | 469 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 470 | wm1.consumption_rate); | 470 | wm1.consumption_rate); |
| 471 | a.full = a.full + b.full; | 471 | a.full = a.full + b.full; |
| 472 | b.full = rfixed_const(16 * 1000); | 472 | b.full = dfixed_const(16 * 1000); |
| 473 | priority_mark12.full = rfixed_div(a, b); | 473 | priority_mark12.full = dfixed_div(a, b); |
| 474 | } else { | 474 | } else { |
| 475 | a.full = rfixed_mul(wm1.worst_case_latency, | 475 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 476 | wm1.consumption_rate); | 476 | wm1.consumption_rate); |
| 477 | b.full = rfixed_const(16 * 1000); | 477 | b.full = dfixed_const(16 * 1000); |
| 478 | priority_mark12.full = rfixed_div(a, b); | 478 | priority_mark12.full = dfixed_div(a, b); |
| 479 | } | 479 | } |
| 480 | if (wm0.priority_mark.full > priority_mark02.full) | 480 | if (wm0.priority_mark.full > priority_mark02.full) |
| 481 | priority_mark02.full = wm0.priority_mark.full; | 481 | priority_mark02.full = wm0.priority_mark.full; |
| 482 | if (rfixed_trunc(priority_mark02) < 0) | 482 | if (dfixed_trunc(priority_mark02) < 0) |
| 483 | priority_mark02.full = 0; | 483 | priority_mark02.full = 0; |
| 484 | if (wm0.priority_mark_max.full > priority_mark02.full) | 484 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 485 | priority_mark02.full = wm0.priority_mark_max.full; | 485 | priority_mark02.full = wm0.priority_mark_max.full; |
| 486 | if (wm1.priority_mark.full > priority_mark12.full) | 486 | if (wm1.priority_mark.full > priority_mark12.full) |
| 487 | priority_mark12.full = wm1.priority_mark.full; | 487 | priority_mark12.full = wm1.priority_mark.full; |
| 488 | if (rfixed_trunc(priority_mark12) < 0) | 488 | if (dfixed_trunc(priority_mark12) < 0) |
| 489 | priority_mark12.full = 0; | 489 | priority_mark12.full = 0; |
| 490 | if (wm1.priority_mark_max.full > priority_mark12.full) | 490 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 491 | priority_mark12.full = wm1.priority_mark_max.full; | 491 | priority_mark12.full = wm1.priority_mark_max.full; |
| 492 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 492 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
| 493 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 493 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
| 494 | if (rdev->disp_priority == 2) { | 494 | if (rdev->disp_priority == 2) { |
| 495 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 495 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
| 496 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 496 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
| @@ -500,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 500 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 500 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
| 501 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 501 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
| 502 | } else if (mode0) { | 502 | } else if (mode0) { |
| 503 | if (rfixed_trunc(wm0.dbpp) > 64) | 503 | if (dfixed_trunc(wm0.dbpp) > 64) |
| 504 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 504 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); |
| 505 | else | 505 | else |
| 506 | a.full = wm0.num_line_pair.full; | 506 | a.full = wm0.num_line_pair.full; |
| 507 | fill_rate.full = rfixed_div(wm0.sclk, a); | 507 | fill_rate.full = dfixed_div(wm0.sclk, a); |
| 508 | if (wm0.consumption_rate.full > fill_rate.full) { | 508 | if (wm0.consumption_rate.full > fill_rate.full) { |
| 509 | b.full = wm0.consumption_rate.full - fill_rate.full; | 509 | b.full = wm0.consumption_rate.full - fill_rate.full; |
| 510 | b.full = rfixed_mul(b, wm0.active_time); | 510 | b.full = dfixed_mul(b, wm0.active_time); |
| 511 | a.full = rfixed_mul(wm0.worst_case_latency, | 511 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 512 | wm0.consumption_rate); | 512 | wm0.consumption_rate); |
| 513 | a.full = a.full + b.full; | 513 | a.full = a.full + b.full; |
| 514 | b.full = rfixed_const(16 * 1000); | 514 | b.full = dfixed_const(16 * 1000); |
| 515 | priority_mark02.full = rfixed_div(a, b); | 515 | priority_mark02.full = dfixed_div(a, b); |
| 516 | } else { | 516 | } else { |
| 517 | a.full = rfixed_mul(wm0.worst_case_latency, | 517 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 518 | wm0.consumption_rate); | 518 | wm0.consumption_rate); |
| 519 | b.full = rfixed_const(16 * 1000); | 519 | b.full = dfixed_const(16 * 1000); |
| 520 | priority_mark02.full = rfixed_div(a, b); | 520 | priority_mark02.full = dfixed_div(a, b); |
| 521 | } | 521 | } |
| 522 | if (wm0.priority_mark.full > priority_mark02.full) | 522 | if (wm0.priority_mark.full > priority_mark02.full) |
| 523 | priority_mark02.full = wm0.priority_mark.full; | 523 | priority_mark02.full = wm0.priority_mark.full; |
| 524 | if (rfixed_trunc(priority_mark02) < 0) | 524 | if (dfixed_trunc(priority_mark02) < 0) |
| 525 | priority_mark02.full = 0; | 525 | priority_mark02.full = 0; |
| 526 | if (wm0.priority_mark_max.full > priority_mark02.full) | 526 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 527 | priority_mark02.full = wm0.priority_mark_max.full; | 527 | priority_mark02.full = wm0.priority_mark_max.full; |
| 528 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 528 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
| 529 | if (rdev->disp_priority == 2) | 529 | if (rdev->disp_priority == 2) |
| 530 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 530 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
| 531 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 531 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
| @@ -535,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 535 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | 535 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, |
| 536 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | 536 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); |
| 537 | } else { | 537 | } else { |
| 538 | if (rfixed_trunc(wm1.dbpp) > 64) | 538 | if (dfixed_trunc(wm1.dbpp) > 64) |
| 539 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 539 | a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); |
| 540 | else | 540 | else |
| 541 | a.full = wm1.num_line_pair.full; | 541 | a.full = wm1.num_line_pair.full; |
| 542 | fill_rate.full = rfixed_div(wm1.sclk, a); | 542 | fill_rate.full = dfixed_div(wm1.sclk, a); |
| 543 | if (wm1.consumption_rate.full > fill_rate.full) { | 543 | if (wm1.consumption_rate.full > fill_rate.full) { |
| 544 | b.full = wm1.consumption_rate.full - fill_rate.full; | 544 | b.full = wm1.consumption_rate.full - fill_rate.full; |
| 545 | b.full = rfixed_mul(b, wm1.active_time); | 545 | b.full = dfixed_mul(b, wm1.active_time); |
| 546 | a.full = rfixed_mul(wm1.worst_case_latency, | 546 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 547 | wm1.consumption_rate); | 547 | wm1.consumption_rate); |
| 548 | a.full = a.full + b.full; | 548 | a.full = a.full + b.full; |
| 549 | b.full = rfixed_const(16 * 1000); | 549 | b.full = dfixed_const(16 * 1000); |
| 550 | priority_mark12.full = rfixed_div(a, b); | 550 | priority_mark12.full = dfixed_div(a, b); |
| 551 | } else { | 551 | } else { |
| 552 | a.full = rfixed_mul(wm1.worst_case_latency, | 552 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 553 | wm1.consumption_rate); | 553 | wm1.consumption_rate); |
| 554 | b.full = rfixed_const(16 * 1000); | 554 | b.full = dfixed_const(16 * 1000); |
| 555 | priority_mark12.full = rfixed_div(a, b); | 555 | priority_mark12.full = dfixed_div(a, b); |
| 556 | } | 556 | } |
| 557 | if (wm1.priority_mark.full > priority_mark12.full) | 557 | if (wm1.priority_mark.full > priority_mark12.full) |
| 558 | priority_mark12.full = wm1.priority_mark.full; | 558 | priority_mark12.full = wm1.priority_mark.full; |
| 559 | if (rfixed_trunc(priority_mark12) < 0) | 559 | if (dfixed_trunc(priority_mark12) < 0) |
| 560 | priority_mark12.full = 0; | 560 | priority_mark12.full = 0; |
| 561 | if (wm1.priority_mark_max.full > priority_mark12.full) | 561 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 562 | priority_mark12.full = wm1.priority_mark_max.full; | 562 | priority_mark12.full = wm1.priority_mark_max.full; |
| 563 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 563 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
| 564 | if (rdev->disp_priority == 2) | 564 | if (rdev->disp_priority == 2) |
| 565 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 565 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
| 566 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, | 566 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
| @@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) | |||
| 676 | 676 | ||
| 677 | void rs690_fini(struct radeon_device *rdev) | 677 | void rs690_fini(struct radeon_device *rdev) |
| 678 | { | 678 | { |
| 679 | radeon_pm_fini(rdev); | ||
| 680 | r100_cp_fini(rdev); | 679 | r100_cp_fini(rdev); |
| 681 | r100_wb_fini(rdev); | 680 | r100_wb_fini(rdev); |
| 682 | r100_ib_fini(rdev); | 681 | r100_ib_fini(rdev); |
| @@ -727,8 +726,6 @@ int rs690_init(struct radeon_device *rdev) | |||
| 727 | 726 | ||
| 728 | /* Initialize clocks */ | 727 | /* Initialize clocks */ |
| 729 | radeon_get_clock_info(rdev->ddev); | 728 | radeon_get_clock_info(rdev->ddev); |
| 730 | /* Initialize power management */ | ||
| 731 | radeon_pm_init(rdev); | ||
| 732 | /* initialize memory controller */ | 729 | /* initialize memory controller */ |
| 733 | rs690_mc_init(rdev); | 730 | rs690_mc_init(rdev); |
| 734 | rv515_debugfs(rdev); | 731 | rv515_debugfs(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c513473d72ae..7d9a7b0a180a 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -445,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
| 445 | 445 | ||
| 446 | void rv515_fini(struct radeon_device *rdev) | 446 | void rv515_fini(struct radeon_device *rdev) |
| 447 | { | 447 | { |
| 448 | radeon_pm_fini(rdev); | ||
| 449 | r100_cp_fini(rdev); | 448 | r100_cp_fini(rdev); |
| 450 | r100_wb_fini(rdev); | 449 | r100_wb_fini(rdev); |
| 451 | r100_ib_fini(rdev); | 450 | r100_ib_fini(rdev); |
| @@ -494,8 +493,6 @@ int rv515_init(struct radeon_device *rdev) | |||
| 494 | return -EINVAL; | 493 | return -EINVAL; |
| 495 | /* Initialize clocks */ | 494 | /* Initialize clocks */ |
| 496 | radeon_get_clock_info(rdev->ddev); | 495 | radeon_get_clock_info(rdev->ddev); |
| 497 | /* Initialize power management */ | ||
| 498 | radeon_pm_init(rdev); | ||
| 499 | /* initialize AGP */ | 496 | /* initialize AGP */ |
| 500 | if (rdev->flags & RADEON_IS_AGP) { | 497 | if (rdev->flags & RADEON_IS_AGP) { |
| 501 | r = radeon_agp_init(rdev); | 498 | r = radeon_agp_init(rdev); |
| @@ -795,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 795 | return; | 792 | return; |
| 796 | } | 793 | } |
| 797 | 794 | ||
| 798 | if (crtc->vsc.full > rfixed_const(2)) | 795 | if (crtc->vsc.full > dfixed_const(2)) |
| 799 | wm->num_line_pair.full = rfixed_const(2); | 796 | wm->num_line_pair.full = dfixed_const(2); |
| 800 | else | 797 | else |
| 801 | wm->num_line_pair.full = rfixed_const(1); | 798 | wm->num_line_pair.full = dfixed_const(1); |
| 802 | 799 | ||
| 803 | b.full = rfixed_const(mode->crtc_hdisplay); | 800 | b.full = dfixed_const(mode->crtc_hdisplay); |
| 804 | c.full = rfixed_const(256); | 801 | c.full = dfixed_const(256); |
| 805 | a.full = rfixed_div(b, c); | 802 | a.full = dfixed_div(b, c); |
| 806 | request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); | 803 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
| 807 | request_fifo_depth.full = rfixed_ceil(request_fifo_depth); | 804 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
| 808 | if (a.full < rfixed_const(4)) { | 805 | if (a.full < dfixed_const(4)) { |
| 809 | wm->lb_request_fifo_depth = 4; | 806 | wm->lb_request_fifo_depth = 4; |
| 810 | } else { | 807 | } else { |
| 811 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | 808 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
| 812 | } | 809 | } |
| 813 | 810 | ||
| 814 | /* Determine consumption rate | 811 | /* Determine consumption rate |
| @@ -817,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 817 | * vsc = vertical scaling ratio, defined as source/destination | 814 | * vsc = vertical scaling ratio, defined as source/destination |
| 818 | * hsc = horizontal scaling ration, defined as source/destination | 815 | * hsc = horizontal scaling ration, defined as source/destination |
| 819 | */ | 816 | */ |
| 820 | a.full = rfixed_const(mode->clock); | 817 | a.full = dfixed_const(mode->clock); |
| 821 | b.full = rfixed_const(1000); | 818 | b.full = dfixed_const(1000); |
| 822 | a.full = rfixed_div(a, b); | 819 | a.full = dfixed_div(a, b); |
| 823 | pclk.full = rfixed_div(b, a); | 820 | pclk.full = dfixed_div(b, a); |
| 824 | if (crtc->rmx_type != RMX_OFF) { | 821 | if (crtc->rmx_type != RMX_OFF) { |
| 825 | b.full = rfixed_const(2); | 822 | b.full = dfixed_const(2); |
| 826 | if (crtc->vsc.full > b.full) | 823 | if (crtc->vsc.full > b.full) |
| 827 | b.full = crtc->vsc.full; | 824 | b.full = crtc->vsc.full; |
| 828 | b.full = rfixed_mul(b, crtc->hsc); | 825 | b.full = dfixed_mul(b, crtc->hsc); |
| 829 | c.full = rfixed_const(2); | 826 | c.full = dfixed_const(2); |
| 830 | b.full = rfixed_div(b, c); | 827 | b.full = dfixed_div(b, c); |
| 831 | consumption_time.full = rfixed_div(pclk, b); | 828 | consumption_time.full = dfixed_div(pclk, b); |
| 832 | } else { | 829 | } else { |
| 833 | consumption_time.full = pclk.full; | 830 | consumption_time.full = pclk.full; |
| 834 | } | 831 | } |
| 835 | a.full = rfixed_const(1); | 832 | a.full = dfixed_const(1); |
| 836 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | 833 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
| 837 | 834 | ||
| 838 | 835 | ||
| 839 | /* Determine line time | 836 | /* Determine line time |
| @@ -841,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 841 | * LineTime = total number of horizontal pixels | 838 | * LineTime = total number of horizontal pixels |
| 842 | * pclk = pixel clock period(ns) | 839 | * pclk = pixel clock period(ns) |
| 843 | */ | 840 | */ |
| 844 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 841 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
| 845 | line_time.full = rfixed_mul(a, pclk); | 842 | line_time.full = dfixed_mul(a, pclk); |
| 846 | 843 | ||
| 847 | /* Determine active time | 844 | /* Determine active time |
| 848 | * ActiveTime = time of active region of display within one line, | 845 | * ActiveTime = time of active region of display within one line, |
| 849 | * hactive = total number of horizontal active pixels | 846 | * hactive = total number of horizontal active pixels |
| 850 | * htotal = total number of horizontal pixels | 847 | * htotal = total number of horizontal pixels |
| 851 | */ | 848 | */ |
| 852 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 849 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
| 853 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 850 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
| 854 | wm->active_time.full = rfixed_mul(line_time, b); | 851 | wm->active_time.full = dfixed_mul(line_time, b); |
| 855 | wm->active_time.full = rfixed_div(wm->active_time, a); | 852 | wm->active_time.full = dfixed_div(wm->active_time, a); |
| 856 | 853 | ||
| 857 | /* Determine chunk time | 854 | /* Determine chunk time |
| 858 | * ChunkTime = the time it takes the DCP to send one chunk of data | 855 | * ChunkTime = the time it takes the DCP to send one chunk of data |
| 859 | * to the LB which consists of pipeline delay and inter chunk gap | 856 | * to the LB which consists of pipeline delay and inter chunk gap |
| 860 | * sclk = system clock(Mhz) | 857 | * sclk = system clock(Mhz) |
| 861 | */ | 858 | */ |
| 862 | a.full = rfixed_const(600 * 1000); | 859 | a.full = dfixed_const(600 * 1000); |
| 863 | chunk_time.full = rfixed_div(a, rdev->pm.sclk); | 860 | chunk_time.full = dfixed_div(a, rdev->pm.sclk); |
| 864 | read_delay_latency.full = rfixed_const(1000); | 861 | read_delay_latency.full = dfixed_const(1000); |
| 865 | 862 | ||
| 866 | /* Determine the worst case latency | 863 | /* Determine the worst case latency |
| 867 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | 864 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
| @@ -871,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 871 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | 868 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
| 872 | * which consists of pipeline delay and inter chunk gap | 869 | * which consists of pipeline delay and inter chunk gap |
| 873 | */ | 870 | */ |
| 874 | if (rfixed_trunc(wm->num_line_pair) > 1) { | 871 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
| 875 | a.full = rfixed_const(3); | 872 | a.full = dfixed_const(3); |
| 876 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 873 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
| 877 | wm->worst_case_latency.full += read_delay_latency.full; | 874 | wm->worst_case_latency.full += read_delay_latency.full; |
| 878 | } else { | 875 | } else { |
| 879 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; | 876 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; |
| @@ -889,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 889 | * of data to the LB which consists of | 886 | * of data to the LB which consists of |
| 890 | * pipeline delay and inter chunk gap | 887 | * pipeline delay and inter chunk gap |
| 891 | */ | 888 | */ |
| 892 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | 889 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
| 893 | tolerable_latency.full = line_time.full; | 890 | tolerable_latency.full = line_time.full; |
| 894 | } else { | 891 | } else { |
| 895 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | 892 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
| 896 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | 893 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
| 897 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | 894 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
| 898 | tolerable_latency.full = line_time.full - tolerable_latency.full; | 895 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
| 899 | } | 896 | } |
| 900 | /* We assume worst case 32bits (4 bytes) */ | 897 | /* We assume worst case 32bits (4 bytes) */ |
| 901 | wm->dbpp.full = rfixed_const(2 * 16); | 898 | wm->dbpp.full = dfixed_const(2 * 16); |
| 902 | 899 | ||
| 903 | /* Determine the maximum priority mark | 900 | /* Determine the maximum priority mark |
| 904 | * width = viewport width in pixels | 901 | * width = viewport width in pixels |
| 905 | */ | 902 | */ |
| 906 | a.full = rfixed_const(16); | 903 | a.full = dfixed_const(16); |
| 907 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 904 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
| 908 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | 905 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
| 909 | wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); | 906 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
| 910 | 907 | ||
| 911 | /* Determine estimated width */ | 908 | /* Determine estimated width */ |
| 912 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | 909 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
| 913 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | 910 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
| 914 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | 911 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
| 915 | wm->priority_mark.full = wm->priority_mark_max.full; | 912 | wm->priority_mark.full = wm->priority_mark_max.full; |
| 916 | } else { | 913 | } else { |
| 917 | a.full = rfixed_const(16); | 914 | a.full = dfixed_const(16); |
| 918 | wm->priority_mark.full = rfixed_div(estimated_width, a); | 915 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
| 919 | wm->priority_mark.full = rfixed_ceil(wm->priority_mark); | 916 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
| 920 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | 917 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
| 921 | } | 918 | } |
| 922 | } | 919 | } |
| @@ -945,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
| 945 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | 942 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
| 946 | 943 | ||
| 947 | if (mode0 && mode1) { | 944 | if (mode0 && mode1) { |
| 948 | if (rfixed_trunc(wm0.dbpp) > 64) | 945 | if (dfixed_trunc(wm0.dbpp) > 64) |
| 949 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 946 | a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
| 950 | else | 947 | else |
| 951 | a.full = wm0.num_line_pair.full; | 948 | a.full = wm0.num_line_pair.full; |
| 952 | if (rfixed_trunc(wm1.dbpp) > 64) | 949 | if (dfixed_trunc(wm1.dbpp) > 64) |
| 953 | b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | 950 | b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
| 954 | else | 951 | else |
| 955 | b.full = wm1.num_line_pair.full; | 952 | b.full = wm1.num_line_pair.full; |
| 956 | a.full += b.full; | 953 | a.full += b.full; |
| 957 | fill_rate.full = rfixed_div(wm0.sclk, a); | 954 | fill_rate.full = dfixed_div(wm0.sclk, a); |
| 958 | if (wm0.consumption_rate.full > fill_rate.full) { | 955 | if (wm0.consumption_rate.full > fill_rate.full) { |
| 959 | b.full = wm0.consumption_rate.full - fill_rate.full; | 956 | b.full = wm0.consumption_rate.full - fill_rate.full; |
| 960 | b.full = rfixed_mul(b, wm0.active_time); | 957 | b.full = dfixed_mul(b, wm0.active_time); |
| 961 | a.full = rfixed_const(16); | 958 | a.full = dfixed_const(16); |
| 962 | b.full = rfixed_div(b, a); | 959 | b.full = dfixed_div(b, a); |
| 963 | a.full = rfixed_mul(wm0.worst_case_latency, | 960 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 964 | wm0.consumption_rate); | 961 | wm0.consumption_rate); |
| 965 | priority_mark02.full = a.full + b.full; | 962 | priority_mark02.full = a.full + b.full; |
| 966 | } else { | 963 | } else { |
| 967 | a.full = rfixed_mul(wm0.worst_case_latency, | 964 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 968 | wm0.consumption_rate); | 965 | wm0.consumption_rate); |
| 969 | b.full = rfixed_const(16 * 1000); | 966 | b.full = dfixed_const(16 * 1000); |
| 970 | priority_mark02.full = rfixed_div(a, b); | 967 | priority_mark02.full = dfixed_div(a, b); |
| 971 | } | 968 | } |
| 972 | if (wm1.consumption_rate.full > fill_rate.full) { | 969 | if (wm1.consumption_rate.full > fill_rate.full) { |
| 973 | b.full = wm1.consumption_rate.full - fill_rate.full; | 970 | b.full = wm1.consumption_rate.full - fill_rate.full; |
| 974 | b.full = rfixed_mul(b, wm1.active_time); | 971 | b.full = dfixed_mul(b, wm1.active_time); |
| 975 | a.full = rfixed_const(16); | 972 | a.full = dfixed_const(16); |
| 976 | b.full = rfixed_div(b, a); | 973 | b.full = dfixed_div(b, a); |
| 977 | a.full = rfixed_mul(wm1.worst_case_latency, | 974 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 978 | wm1.consumption_rate); | 975 | wm1.consumption_rate); |
| 979 | priority_mark12.full = a.full + b.full; | 976 | priority_mark12.full = a.full + b.full; |
| 980 | } else { | 977 | } else { |
| 981 | a.full = rfixed_mul(wm1.worst_case_latency, | 978 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 982 | wm1.consumption_rate); | 979 | wm1.consumption_rate); |
| 983 | b.full = rfixed_const(16 * 1000); | 980 | b.full = dfixed_const(16 * 1000); |
| 984 | priority_mark12.full = rfixed_div(a, b); | 981 | priority_mark12.full = dfixed_div(a, b); |
| 985 | } | 982 | } |
| 986 | if (wm0.priority_mark.full > priority_mark02.full) | 983 | if (wm0.priority_mark.full > priority_mark02.full) |
| 987 | priority_mark02.full = wm0.priority_mark.full; | 984 | priority_mark02.full = wm0.priority_mark.full; |
| 988 | if (rfixed_trunc(priority_mark02) < 0) | 985 | if (dfixed_trunc(priority_mark02) < 0) |
| 989 | priority_mark02.full = 0; | 986 | priority_mark02.full = 0; |
| 990 | if (wm0.priority_mark_max.full > priority_mark02.full) | 987 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 991 | priority_mark02.full = wm0.priority_mark_max.full; | 988 | priority_mark02.full = wm0.priority_mark_max.full; |
| 992 | if (wm1.priority_mark.full > priority_mark12.full) | 989 | if (wm1.priority_mark.full > priority_mark12.full) |
| 993 | priority_mark12.full = wm1.priority_mark.full; | 990 | priority_mark12.full = wm1.priority_mark.full; |
| 994 | if (rfixed_trunc(priority_mark12) < 0) | 991 | if (dfixed_trunc(priority_mark12) < 0) |
| 995 | priority_mark12.full = 0; | 992 | priority_mark12.full = 0; |
| 996 | if (wm1.priority_mark_max.full > priority_mark12.full) | 993 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 997 | priority_mark12.full = wm1.priority_mark_max.full; | 994 | priority_mark12.full = wm1.priority_mark_max.full; |
| 998 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 995 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
| 999 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 996 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
| 1000 | if (rdev->disp_priority == 2) { | 997 | if (rdev->disp_priority == 2) { |
| 1001 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 998 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
| 1002 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 999 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
| @@ -1006,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
| 1006 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 1003 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
| 1007 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 1004 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
| 1008 | } else if (mode0) { | 1005 | } else if (mode0) { |
| 1009 | if (rfixed_trunc(wm0.dbpp) > 64) | 1006 | if (dfixed_trunc(wm0.dbpp) > 64) |
| 1010 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 1007 | a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
| 1011 | else | 1008 | else |
| 1012 | a.full = wm0.num_line_pair.full; | 1009 | a.full = wm0.num_line_pair.full; |
| 1013 | fill_rate.full = rfixed_div(wm0.sclk, a); | 1010 | fill_rate.full = dfixed_div(wm0.sclk, a); |
| 1014 | if (wm0.consumption_rate.full > fill_rate.full) { | 1011 | if (wm0.consumption_rate.full > fill_rate.full) { |
| 1015 | b.full = wm0.consumption_rate.full - fill_rate.full; | 1012 | b.full = wm0.consumption_rate.full - fill_rate.full; |
| 1016 | b.full = rfixed_mul(b, wm0.active_time); | 1013 | b.full = dfixed_mul(b, wm0.active_time); |
| 1017 | a.full = rfixed_const(16); | 1014 | a.full = dfixed_const(16); |
| 1018 | b.full = rfixed_div(b, a); | 1015 | b.full = dfixed_div(b, a); |
| 1019 | a.full = rfixed_mul(wm0.worst_case_latency, | 1016 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 1020 | wm0.consumption_rate); | 1017 | wm0.consumption_rate); |
| 1021 | priority_mark02.full = a.full + b.full; | 1018 | priority_mark02.full = a.full + b.full; |
| 1022 | } else { | 1019 | } else { |
| 1023 | a.full = rfixed_mul(wm0.worst_case_latency, | 1020 | a.full = dfixed_mul(wm0.worst_case_latency, |
| 1024 | wm0.consumption_rate); | 1021 | wm0.consumption_rate); |
| 1025 | b.full = rfixed_const(16); | 1022 | b.full = dfixed_const(16); |
| 1026 | priority_mark02.full = rfixed_div(a, b); | 1023 | priority_mark02.full = dfixed_div(a, b); |
| 1027 | } | 1024 | } |
| 1028 | if (wm0.priority_mark.full > priority_mark02.full) | 1025 | if (wm0.priority_mark.full > priority_mark02.full) |
| 1029 | priority_mark02.full = wm0.priority_mark.full; | 1026 | priority_mark02.full = wm0.priority_mark.full; |
| 1030 | if (rfixed_trunc(priority_mark02) < 0) | 1027 | if (dfixed_trunc(priority_mark02) < 0) |
| 1031 | priority_mark02.full = 0; | 1028 | priority_mark02.full = 0; |
| 1032 | if (wm0.priority_mark_max.full > priority_mark02.full) | 1029 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 1033 | priority_mark02.full = wm0.priority_mark_max.full; | 1030 | priority_mark02.full = wm0.priority_mark_max.full; |
| 1034 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 1031 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
| 1035 | if (rdev->disp_priority == 2) | 1032 | if (rdev->disp_priority == 2) |
| 1036 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1033 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
| 1037 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 1034 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
| @@ -1039,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
| 1039 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1036 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
| 1040 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1037 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
| 1041 | } else { | 1038 | } else { |
| 1042 | if (rfixed_trunc(wm1.dbpp) > 64) | 1039 | if (dfixed_trunc(wm1.dbpp) > 64) |
| 1043 | a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | 1040 | a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
| 1044 | else | 1041 | else |
| 1045 | a.full = wm1.num_line_pair.full; | 1042 | a.full = wm1.num_line_pair.full; |
| 1046 | fill_rate.full = rfixed_div(wm1.sclk, a); | 1043 | fill_rate.full = dfixed_div(wm1.sclk, a); |
| 1047 | if (wm1.consumption_rate.full > fill_rate.full) { | 1044 | if (wm1.consumption_rate.full > fill_rate.full) { |
| 1048 | b.full = wm1.consumption_rate.full - fill_rate.full; | 1045 | b.full = wm1.consumption_rate.full - fill_rate.full; |
| 1049 | b.full = rfixed_mul(b, wm1.active_time); | 1046 | b.full = dfixed_mul(b, wm1.active_time); |
| 1050 | a.full = rfixed_const(16); | 1047 | a.full = dfixed_const(16); |
| 1051 | b.full = rfixed_div(b, a); | 1048 | b.full = dfixed_div(b, a); |
| 1052 | a.full = rfixed_mul(wm1.worst_case_latency, | 1049 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 1053 | wm1.consumption_rate); | 1050 | wm1.consumption_rate); |
| 1054 | priority_mark12.full = a.full + b.full; | 1051 | priority_mark12.full = a.full + b.full; |
| 1055 | } else { | 1052 | } else { |
| 1056 | a.full = rfixed_mul(wm1.worst_case_latency, | 1053 | a.full = dfixed_mul(wm1.worst_case_latency, |
| 1057 | wm1.consumption_rate); | 1054 | wm1.consumption_rate); |
| 1058 | b.full = rfixed_const(16 * 1000); | 1055 | b.full = dfixed_const(16 * 1000); |
| 1059 | priority_mark12.full = rfixed_div(a, b); | 1056 | priority_mark12.full = dfixed_div(a, b); |
| 1060 | } | 1057 | } |
| 1061 | if (wm1.priority_mark.full > priority_mark12.full) | 1058 | if (wm1.priority_mark.full > priority_mark12.full) |
| 1062 | priority_mark12.full = wm1.priority_mark.full; | 1059 | priority_mark12.full = wm1.priority_mark.full; |
| 1063 | if (rfixed_trunc(priority_mark12) < 0) | 1060 | if (dfixed_trunc(priority_mark12) < 0) |
| 1064 | priority_mark12.full = 0; | 1061 | priority_mark12.full = 0; |
| 1065 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1062 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 1066 | priority_mark12.full = wm1.priority_mark_max.full; | 1063 | priority_mark12.full = wm1.priority_mark_max.full; |
| 1067 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 1064 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
| 1068 | if (rdev->disp_priority == 2) | 1065 | if (rdev->disp_priority == 2) |
| 1069 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1066 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
| 1070 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1067 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index a74683e18612..253f24aec031 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -42,6 +42,10 @@ | |||
| 42 | static void rv770_gpu_init(struct radeon_device *rdev); | 42 | static void rv770_gpu_init(struct radeon_device *rdev); |
| 43 | void rv770_fini(struct radeon_device *rdev); | 43 | void rv770_fini(struct radeon_device *rdev); |
| 44 | 44 | ||
| 45 | void rv770_pm_misc(struct radeon_device *rdev) | ||
| 46 | { | ||
| 47 | |||
| 48 | } | ||
| 45 | 49 | ||
| 46 | /* | 50 | /* |
| 47 | * GART | 51 | * GART |
| @@ -1087,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1087 | r = radeon_clocks_init(rdev); | 1091 | r = radeon_clocks_init(rdev); |
| 1088 | if (r) | 1092 | if (r) |
| 1089 | return r; | 1093 | return r; |
| 1090 | /* Initialize power management */ | ||
| 1091 | radeon_pm_init(rdev); | ||
| 1092 | /* Fence driver */ | 1094 | /* Fence driver */ |
| 1093 | r = radeon_fence_driver_init(rdev); | 1095 | r = radeon_fence_driver_init(rdev); |
| 1094 | if (r) | 1096 | if (r) |
| @@ -1157,7 +1159,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1157 | 1159 | ||
| 1158 | void rv770_fini(struct radeon_device *rdev) | 1160 | void rv770_fini(struct radeon_device *rdev) |
| 1159 | { | 1161 | { |
| 1160 | radeon_pm_fini(rdev); | ||
| 1161 | r600_blit_fini(rdev); | 1162 | r600_blit_fini(rdev); |
| 1162 | r700_cp_fini(rdev); | 1163 | r700_cp_fini(rdev); |
| 1163 | r600_wb_fini(rdev); | 1164 | r600_wb_fini(rdev); |
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index bff6fc2524c8..2d0c9ca484c5 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c | |||
| @@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 539 | { | 539 | { |
| 540 | drm_savage_private_t *dev_priv; | 540 | drm_savage_private_t *dev_priv; |
| 541 | 541 | ||
| 542 | dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); | 542 | dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); |
| 543 | if (dev_priv == NULL) | 543 | if (dev_priv == NULL) |
| 544 | return -ENOMEM; | 544 | return -ENOMEM; |
| 545 | 545 | ||
| 546 | memset(dev_priv, 0, sizeof(drm_savage_private_t)); | ||
| 547 | dev->dev_private = (void *)dev_priv; | 546 | dev->dev_private = (void *)dev_priv; |
| 548 | 547 | ||
| 549 | dev_priv->chipset = (enum savage_family)chipset; | 548 | dev_priv->chipset = (enum savage_family)chipset; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3b5b094b1397..4c2299299ab2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -604,6 +604,20 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) | |||
| 604 | } | 604 | } |
| 605 | EXPORT_SYMBOL(ttm_bo_unref); | 605 | EXPORT_SYMBOL(ttm_bo_unref); |
| 606 | 606 | ||
| 607 | int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) | ||
| 608 | { | ||
| 609 | return cancel_delayed_work_sync(&bdev->wq); | ||
| 610 | } | ||
| 611 | EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); | ||
| 612 | |||
| 613 | void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) | ||
| 614 | { | ||
| 615 | if (resched) | ||
| 616 | schedule_delayed_work(&bdev->wq, | ||
| 617 | ((HZ / 100) < 1) ? 1 : HZ / 100); | ||
| 618 | } | ||
| 619 | EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); | ||
| 620 | |||
| 607 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | 621 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
| 608 | bool no_wait_reserve, bool no_wait_gpu) | 622 | bool no_wait_reserve, bool no_wait_gpu) |
| 609 | { | 623 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 80125ffc4e28..7421aaad8d09 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; |
| 560 | #endif | 560 | #endif |
| 561 | 561 | ||
| 562 | info->aperture_base = vmw_priv->vram_start; | 562 | info->apertures = alloc_apertures(1); |
| 563 | info->aperture_size = vmw_priv->vram_size; | 563 | if (!info->apertures) { |
| 564 | ret = -ENOMEM; | ||
| 565 | goto err_aper; | ||
| 566 | } | ||
| 567 | info->apertures->ranges[0].base = vmw_priv->vram_start; | ||
| 568 | info->apertures->ranges[0].size = vmw_priv->vram_size; | ||
| 564 | 569 | ||
| 565 | /* | 570 | /* |
| 566 | * Dirty & Deferred IO | 571 | * Dirty & Deferred IO |
| @@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 580 | 585 | ||
| 581 | err_defio: | 586 | err_defio: |
| 582 | fb_deferred_io_cleanup(info); | 587 | fb_deferred_io_cleanup(info); |
| 588 | err_aper: | ||
| 583 | ttm_bo_kunmap(&par->map); | 589 | ttm_bo_kunmap(&par->map); |
| 584 | err_unref: | 590 | err_unref: |
| 585 | ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); | 591 | ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index 61ab4daf0bbb..8d0e31a22027 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
| @@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS | |||
| 18 | multiple GPUS. The overhead for each GPU is very small. | 18 | multiple GPUS. The overhead for each GPU is very small. |
| 19 | 19 | ||
| 20 | config VGA_SWITCHEROO | 20 | config VGA_SWITCHEROO |
| 21 | bool "Laptop Hybrid Grapics - GPU switching support" | 21 | bool "Laptop Hybrid Graphics - GPU switching support" |
| 22 | depends on X86 | 22 | depends on X86 |
| 23 | depends on ACPI | 23 | depends on ACPI |
| 24 | help | 24 | help |
| 25 | Many laptops released in 2008/9/10 have two gpus with a multiplxer | 25 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer |
| 26 | to switch between them. This adds support for dynamic switching when | 26 | to switch between them. This adds support for dynamic switching when |
| 27 | X isn't running and delayed switching until the next logoff. This | 27 | X isn't running and delayed switching until the next logoff. This |
| 28 | features is called hybrid graphics, ATI PowerXpress, and Nvidia | 28 | feature is called hybrid graphics, ATI PowerXpress, and Nvidia |
| 29 | HybridPower. | 29 | HybridPower. |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index ecf405562f5c..4a56f46af40a 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
| @@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info) | |||
| 168 | { | 168 | { |
| 169 | if (info->screen_base) | 169 | if (info->screen_base) |
| 170 | iounmap(info->screen_base); | 170 | iounmap(info->screen_base); |
| 171 | release_mem_region(info->aperture_base, info->aperture_size); | 171 | release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); |
| 172 | framebuffer_release(info); | 172 | framebuffer_release(info); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| @@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev) | |||
| 292 | info->pseudo_palette = info->par; | 292 | info->pseudo_palette = info->par; |
| 293 | info->par = NULL; | 293 | info->par = NULL; |
| 294 | 294 | ||
| 295 | info->aperture_base = efifb_fix.smem_start; | 295 | info->apertures = alloc_apertures(1); |
| 296 | info->aperture_size = size_remap; | 296 | if (!info->apertures) { |
| 297 | err = -ENOMEM; | ||
| 298 | goto err_release_fb; | ||
| 299 | } | ||
| 300 | info->apertures->ranges[0].base = efifb_fix.smem_start; | ||
| 301 | info->apertures->ranges[0].size = size_remap; | ||
| 297 | 302 | ||
| 298 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); | 303 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); |
| 299 | if (!info->screen_base) { | 304 | if (!info->screen_base) { |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index a15b44e9c003..e08b7b5cb326 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
| @@ -1468,16 +1468,67 @@ static int fb_check_foreignness(struct fb_info *fi) | |||
| 1468 | return 0; | 1468 | return 0; |
| 1469 | } | 1469 | } |
| 1470 | 1470 | ||
| 1471 | static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw) | 1471 | static bool apertures_overlap(struct aperture *gen, struct aperture *hw) |
| 1472 | { | 1472 | { |
| 1473 | /* is the generic aperture base the same as the HW one */ | 1473 | /* is the generic aperture base the same as the HW one */ |
| 1474 | if (gen->aperture_base == hw->aperture_base) | 1474 | if (gen->base == hw->base) |
| 1475 | return true; | 1475 | return true; |
| 1476 | /* is the generic aperture base inside the hw base->hw base+size */ | 1476 | /* is the generic aperture base inside the hw base->hw base+size */ |
| 1477 | if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size) | 1477 | if (gen->base > hw->base && gen->base <= hw->base + hw->size) |
| 1478 | return true; | 1478 | return true; |
| 1479 | return false; | 1479 | return false; |
| 1480 | } | 1480 | } |
| 1481 | |||
| 1482 | static bool fb_do_apertures_overlap(struct apertures_struct *gena, | ||
| 1483 | struct apertures_struct *hwa) | ||
| 1484 | { | ||
| 1485 | int i, j; | ||
| 1486 | if (!hwa || !gena) | ||
| 1487 | return false; | ||
| 1488 | |||
| 1489 | for (i = 0; i < hwa->count; ++i) { | ||
| 1490 | struct aperture *h = &hwa->ranges[i]; | ||
| 1491 | for (j = 0; j < gena->count; ++j) { | ||
| 1492 | struct aperture *g = &gena->ranges[j]; | ||
| 1493 | printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n", | ||
| 1494 | g->base, g->size, h->base, h->size); | ||
| 1495 | if (apertures_overlap(g, h)) | ||
| 1496 | return true; | ||
| 1497 | } | ||
| 1498 | } | ||
| 1499 | |||
| 1500 | return false; | ||
| 1501 | } | ||
| 1502 | |||
| 1503 | #define VGA_FB_PHYS 0xA0000 | ||
| 1504 | void remove_conflicting_framebuffers(struct apertures_struct *a, | ||
| 1505 | const char *name, bool primary) | ||
| 1506 | { | ||
| 1507 | int i; | ||
| 1508 | |||
| 1509 | /* check all firmware fbs and kick off if the base addr overlaps */ | ||
| 1510 | for (i = 0 ; i < FB_MAX; i++) { | ||
| 1511 | struct apertures_struct *gen_aper; | ||
| 1512 | if (!registered_fb[i]) | ||
| 1513 | continue; | ||
| 1514 | |||
| 1515 | if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) | ||
| 1516 | continue; | ||
| 1517 | |||
| 1518 | gen_aper = registered_fb[i]->apertures; | ||
| 1519 | if (fb_do_apertures_overlap(gen_aper, a) || | ||
| 1520 | (primary && gen_aper && gen_aper->count && | ||
| 1521 | gen_aper->ranges[0].base == VGA_FB_PHYS)) { | ||
| 1522 | |||
| 1523 | printk(KERN_ERR "fb: conflicting fb hw usage " | ||
| 1524 | "%s vs %s - removing generic driver\n", | ||
| 1525 | name, registered_fb[i]->fix.id); | ||
| 1526 | unregister_framebuffer(registered_fb[i]); | ||
| 1527 | } | ||
| 1528 | } | ||
| 1529 | } | ||
| 1530 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | ||
| 1531 | |||
| 1481 | /** | 1532 | /** |
| 1482 | * register_framebuffer - registers a frame buffer device | 1533 | * register_framebuffer - registers a frame buffer device |
| 1483 | * @fb_info: frame buffer info structure | 1534 | * @fb_info: frame buffer info structure |
| @@ -1501,21 +1552,8 @@ register_framebuffer(struct fb_info *fb_info) | |||
| 1501 | if (fb_check_foreignness(fb_info)) | 1552 | if (fb_check_foreignness(fb_info)) |
| 1502 | return -ENOSYS; | 1553 | return -ENOSYS; |
| 1503 | 1554 | ||
| 1504 | /* check all firmware fbs and kick off if the base addr overlaps */ | 1555 | remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, |
| 1505 | for (i = 0 ; i < FB_MAX; i++) { | 1556 | fb_is_primary_device(fb_info)); |
| 1506 | if (!registered_fb[i]) | ||
| 1507 | continue; | ||
| 1508 | |||
| 1509 | if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) { | ||
| 1510 | if (fb_do_apertures_overlap(registered_fb[i], fb_info)) { | ||
| 1511 | printk(KERN_ERR "fb: conflicting fb hw usage " | ||
| 1512 | "%s vs %s - removing generic driver\n", | ||
| 1513 | fb_info->fix.id, | ||
| 1514 | registered_fb[i]->fix.id); | ||
| 1515 | unregister_framebuffer(registered_fb[i]); | ||
| 1516 | } | ||
| 1517 | } | ||
| 1518 | } | ||
| 1519 | 1557 | ||
| 1520 | num_registered_fb++; | 1558 | num_registered_fb++; |
| 1521 | for (i = 0 ; i < FB_MAX; i++) | 1559 | for (i = 0 ; i < FB_MAX; i++) |
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 81aa3129c17d..0a08f1341227 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c | |||
| @@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc); | |||
| 80 | */ | 80 | */ |
| 81 | void framebuffer_release(struct fb_info *info) | 81 | void framebuffer_release(struct fb_info *info) |
| 82 | { | 82 | { |
| 83 | kfree(info->apertures); | ||
| 83 | kfree(info); | 84 | kfree(info); |
| 84 | } | 85 | } |
| 85 | EXPORT_SYMBOL(framebuffer_release); | 86 | EXPORT_SYMBOL(framebuffer_release); |
diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 61f8b8f919b0..46dda7d8aaee 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c | |||
| @@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info) | |||
| 285 | { | 285 | { |
| 286 | if (info->screen_base) | 286 | if (info->screen_base) |
| 287 | iounmap(info->screen_base); | 287 | iounmap(info->screen_base); |
| 288 | release_mem_region(info->aperture_base, info->aperture_size); | 288 | release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); |
| 289 | framebuffer_release(info); | 289 | framebuffer_release(info); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| @@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name, | |||
| 491 | var->vmode = FB_VMODE_NONINTERLACED; | 491 | var->vmode = FB_VMODE_NONINTERLACED; |
| 492 | 492 | ||
| 493 | /* set offb aperture size for generic probing */ | 493 | /* set offb aperture size for generic probing */ |
| 494 | info->aperture_base = address; | 494 | info->apertures = alloc_apertures(1); |
| 495 | info->aperture_size = fix->smem_len; | 495 | if (!info->apertures) |
| 496 | goto out_aper; | ||
| 497 | info->apertures->ranges[0].base = address; | ||
| 498 | info->apertures->ranges[0].size = fix->smem_len; | ||
| 496 | 499 | ||
| 497 | info->fbops = &offb_ops; | 500 | info->fbops = &offb_ops; |
| 498 | info->screen_base = ioremap(address, fix->smem_len); | 501 | info->screen_base = ioremap(address, fix->smem_len); |
| @@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name, | |||
| 501 | 504 | ||
| 502 | fb_alloc_cmap(&info->cmap, 256, 0); | 505 | fb_alloc_cmap(&info->cmap, 256, 0); |
| 503 | 506 | ||
| 504 | if (register_framebuffer(info) < 0) { | 507 | if (register_framebuffer(info) < 0) |
| 505 | iounmap(par->cmap_adr); | 508 | goto out_err; |
| 506 | par->cmap_adr = NULL; | ||
| 507 | iounmap(info->screen_base); | ||
| 508 | framebuffer_release(info); | ||
| 509 | release_mem_region(res_start, res_size); | ||
| 510 | return; | ||
| 511 | } | ||
| 512 | 509 | ||
| 513 | printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", | 510 | printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", |
| 514 | info->node, full_name); | 511 | info->node, full_name); |
| 512 | return; | ||
| 513 | |||
| 514 | out_err: | ||
| 515 | iounmap(info->screen_base); | ||
| 516 | out_aper: | ||
| 517 | iounmap(par->cmap_adr); | ||
| 518 | par->cmap_adr = NULL; | ||
| 519 | framebuffer_release(info); | ||
| 520 | release_mem_region(res_start, res_size); | ||
| 515 | } | 521 | } |
| 516 | 522 | ||
| 517 | 523 | ||
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c index 0cadf7aee27e..090aa1a9be6e 100644 --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c | |||
| @@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info) | |||
| 177 | { | 177 | { |
| 178 | if (info->screen_base) | 178 | if (info->screen_base) |
| 179 | iounmap(info->screen_base); | 179 | iounmap(info->screen_base); |
| 180 | release_mem_region(info->aperture_base, info->aperture_size); | 180 | release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); |
| 181 | framebuffer_release(info); | 181 | framebuffer_release(info); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| @@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev) | |||
| 295 | info->par = NULL; | 295 | info->par = NULL; |
| 296 | 296 | ||
| 297 | /* set vesafb aperture size for generic probing */ | 297 | /* set vesafb aperture size for generic probing */ |
| 298 | info->aperture_base = screen_info.lfb_base; | 298 | info->apertures = alloc_apertures(1); |
| 299 | info->aperture_size = size_total; | 299 | if (!info->apertures) { |
| 300 | err = -ENOMEM; | ||
| 301 | goto err; | ||
| 302 | } | ||
| 303 | info->apertures->ranges[0].base = screen_info.lfb_base; | ||
| 304 | info->apertures->ranges[0].size = size_total; | ||
| 300 | 305 | ||
| 301 | info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); | 306 | info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); |
| 302 | if (!info->screen_base) { | 307 | if (!info->screen_base) { |
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c index bf638a47a5b3..149c47ac7e93 100644 --- a/drivers/video/vga16fb.c +++ b/drivers/video/vga16fb.c | |||
| @@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image | |||
| 1263 | vga_imageblit_color(info, image); | 1263 | vga_imageblit_color(info, image); |
| 1264 | } | 1264 | } |
| 1265 | 1265 | ||
| 1266 | static void vga16fb_destroy(struct fb_info *info) | ||
| 1267 | { | ||
| 1268 | iounmap(info->screen_base); | ||
| 1269 | fb_dealloc_cmap(&info->cmap); | ||
| 1270 | /* XXX unshare VGA regions */ | ||
| 1271 | framebuffer_release(info); | ||
| 1272 | } | ||
| 1273 | |||
| 1266 | static struct fb_ops vga16fb_ops = { | 1274 | static struct fb_ops vga16fb_ops = { |
| 1267 | .owner = THIS_MODULE, | 1275 | .owner = THIS_MODULE, |
| 1268 | .fb_open = vga16fb_open, | 1276 | .fb_open = vga16fb_open, |
| 1269 | .fb_release = vga16fb_release, | 1277 | .fb_release = vga16fb_release, |
| 1278 | .fb_destroy = vga16fb_destroy, | ||
| 1270 | .fb_check_var = vga16fb_check_var, | 1279 | .fb_check_var = vga16fb_check_var, |
| 1271 | .fb_set_par = vga16fb_set_par, | 1280 | .fb_set_par = vga16fb_set_par, |
| 1272 | .fb_setcolreg = vga16fb_setcolreg, | 1281 | .fb_setcolreg = vga16fb_setcolreg, |
| @@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev) | |||
| 1306 | ret = -ENOMEM; | 1315 | ret = -ENOMEM; |
| 1307 | goto err_fb_alloc; | 1316 | goto err_fb_alloc; |
| 1308 | } | 1317 | } |
| 1318 | info->apertures = alloc_apertures(1); | ||
| 1319 | if (!info->apertures) { | ||
| 1320 | ret = -ENOMEM; | ||
| 1321 | goto err_ioremap; | ||
| 1322 | } | ||
| 1309 | 1323 | ||
| 1310 | /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */ | 1324 | /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */ |
| 1311 | info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0); | 1325 | info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0); |
| @@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev) | |||
| 1335 | info->fix = vga16fb_fix; | 1349 | info->fix = vga16fb_fix; |
| 1336 | /* supports rectangles with widths of multiples of 8 */ | 1350 | /* supports rectangles with widths of multiples of 8 */ |
| 1337 | info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31; | 1351 | info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31; |
| 1338 | info->flags = FBINFO_FLAG_DEFAULT | | 1352 | info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE | |
| 1339 | FBINFO_HWACCEL_YPAN; | 1353 | FBINFO_HWACCEL_YPAN; |
| 1340 | 1354 | ||
| 1341 | i = (info->var.bits_per_pixel == 8) ? 256 : 16; | 1355 | i = (info->var.bits_per_pixel == 8) ? 256 : 16; |
| @@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev) | |||
| 1354 | 1368 | ||
| 1355 | vga16fb_update_fix(info); | 1369 | vga16fb_update_fix(info); |
| 1356 | 1370 | ||
| 1371 | info->apertures->ranges[0].base = VGA_FB_PHYS; | ||
| 1372 | info->apertures->ranges[0].size = VGA_FB_PHYS_LEN; | ||
| 1373 | |||
| 1357 | if (register_framebuffer(info) < 0) { | 1374 | if (register_framebuffer(info) < 0) { |
| 1358 | printk(KERN_ERR "vga16fb: unable to register framebuffer\n"); | 1375 | printk(KERN_ERR "vga16fb: unable to register framebuffer\n"); |
| 1359 | ret = -EINVAL; | 1376 | ret = -EINVAL; |
| @@ -1380,13 +1397,8 @@ static int vga16fb_remove(struct platform_device *dev) | |||
| 1380 | { | 1397 | { |
| 1381 | struct fb_info *info = platform_get_drvdata(dev); | 1398 | struct fb_info *info = platform_get_drvdata(dev); |
| 1382 | 1399 | ||
| 1383 | if (info) { | 1400 | if (info) |
| 1384 | unregister_framebuffer(info); | 1401 | unregister_framebuffer(info); |
| 1385 | iounmap(info->screen_base); | ||
| 1386 | fb_dealloc_cmap(&info->cmap); | ||
| 1387 | /* XXX unshare VGA regions */ | ||
| 1388 | framebuffer_release(info); | ||
| 1389 | } | ||
| 1390 | 1402 | ||
| 1391 | return 0; | 1403 | return 0; |
| 1392 | } | 1404 | } |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c560364663a5..93a1a31b9c2d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/idr.h> | 31 | #include <linux/idr.h> |
| 32 | 32 | ||
| 33 | #include <linux/fb.h> | 33 | #include <linux/fb.h> |
| 34 | #include <linux/slow-work.h> | ||
| 34 | 35 | ||
| 35 | struct drm_device; | 36 | struct drm_device; |
| 36 | struct drm_mode_set; | 37 | struct drm_mode_set; |
| @@ -460,6 +461,15 @@ enum drm_connector_force { | |||
| 460 | DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ | 461 | DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ |
| 461 | }; | 462 | }; |
| 462 | 463 | ||
| 464 | /* should we poll this connector for connects and disconnects */ | ||
| 465 | /* hot plug detectable */ | ||
| 466 | #define DRM_CONNECTOR_POLL_HPD (1 << 0) | ||
| 467 | /* poll for connections */ | ||
| 468 | #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) | ||
| 469 | /* can cleanly poll for disconnections without flickering the screen */ | ||
| 470 | /* DACs should rarely do this without a lot of testing */ | ||
| 471 | #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) | ||
| 472 | |||
| 463 | /** | 473 | /** |
| 464 | * drm_connector - central DRM connector control structure | 474 | * drm_connector - central DRM connector control structure |
| 465 | * @crtc: CRTC this connector is currently connected to, NULL if none | 475 | * @crtc: CRTC this connector is currently connected to, NULL if none |
| @@ -504,6 +514,8 @@ struct drm_connector { | |||
| 504 | u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; | 514 | u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; |
| 505 | uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; | 515 | uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; |
| 506 | 516 | ||
| 517 | uint8_t polled; /* DRM_CONNECTOR_POLL_* */ | ||
| 518 | |||
| 507 | /* requested DPMS state */ | 519 | /* requested DPMS state */ |
| 508 | int dpms; | 520 | int dpms; |
| 509 | 521 | ||
| @@ -543,6 +555,7 @@ struct drm_mode_set { | |||
| 543 | */ | 555 | */ |
| 544 | struct drm_mode_config_funcs { | 556 | struct drm_mode_config_funcs { |
| 545 | struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); | 557 | struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); |
| 558 | void (*output_poll_changed)(struct drm_device *dev); | ||
| 546 | }; | 559 | }; |
| 547 | 560 | ||
| 548 | struct drm_mode_group { | 561 | struct drm_mode_group { |
| @@ -580,6 +593,10 @@ struct drm_mode_config { | |||
| 580 | struct drm_mode_config_funcs *funcs; | 593 | struct drm_mode_config_funcs *funcs; |
| 581 | resource_size_t fb_base; | 594 | resource_size_t fb_base; |
| 582 | 595 | ||
| 596 | /* output poll support */ | ||
| 597 | bool poll_enabled; | ||
| 598 | struct delayed_slow_work output_poll_slow_work; | ||
| 599 | |||
| 583 | /* pointers to standard properties */ | 600 | /* pointers to standard properties */ |
| 584 | struct list_head property_blob_list; | 601 | struct list_head property_blob_list; |
| 585 | struct drm_property *edid_property; | 602 | struct drm_property *edid_property; |
| @@ -787,4 +804,6 @@ extern int drm_add_modes_noedid(struct drm_connector *connector, | |||
| 787 | int hdisplay, int vdisplay); | 804 | int hdisplay, int vdisplay); |
| 788 | 805 | ||
| 789 | extern bool drm_edid_is_valid(struct edid *edid); | 806 | extern bool drm_edid_is_valid(struct edid *edid); |
| 807 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, | ||
| 808 | int hsize, int vsize, int fresh); | ||
| 790 | #endif /* __DRM_CRTC_H__ */ | 809 | #endif /* __DRM_CRTC_H__ */ |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index b1fa0f8cfa60..dc5873c21e45 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -127,4 +127,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | extern int drm_helper_resume_force_mode(struct drm_device *dev); | 129 | extern int drm_helper_resume_force_mode(struct drm_device *dev); |
| 130 | extern void drm_kms_helper_poll_init(struct drm_device *dev); | ||
| 131 | extern void drm_kms_helper_poll_fini(struct drm_device *dev); | ||
| 132 | extern void drm_helper_hpd_irq_event(struct drm_device *dev); | ||
| 130 | #endif | 133 | #endif |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index d33c3e038606..39e2cc5c7e66 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
| @@ -120,7 +120,7 @@ struct detailed_non_pixel { | |||
| 120 | struct detailed_data_string str; | 120 | struct detailed_data_string str; |
| 121 | struct detailed_data_monitor_range range; | 121 | struct detailed_data_monitor_range range; |
| 122 | struct detailed_data_wpindex color; | 122 | struct detailed_data_wpindex color; |
| 123 | struct std_timing timings[5]; | 123 | struct std_timing timings[6]; |
| 124 | struct cvt_timing cvt[4]; | 124 | struct cvt_timing cvt[4]; |
| 125 | } data; | 125 | } data; |
| 126 | } __attribute__((packed)); | 126 | } __attribute__((packed)); |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 9b55a94feada..f0a6afc47e76 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
| @@ -30,8 +30,6 @@ | |||
| 30 | #ifndef DRM_FB_HELPER_H | 30 | #ifndef DRM_FB_HELPER_H |
| 31 | #define DRM_FB_HELPER_H | 31 | #define DRM_FB_HELPER_H |
| 32 | 32 | ||
| 33 | #include <linux/slow-work.h> | ||
| 34 | |||
| 35 | struct drm_fb_helper; | 33 | struct drm_fb_helper; |
| 36 | 34 | ||
| 37 | struct drm_fb_helper_crtc { | 35 | struct drm_fb_helper_crtc { |
| @@ -71,9 +69,6 @@ struct drm_fb_helper_funcs { | |||
| 71 | 69 | ||
| 72 | int (*fb_probe)(struct drm_fb_helper *helper, | 70 | int (*fb_probe)(struct drm_fb_helper *helper, |
| 73 | struct drm_fb_helper_surface_size *sizes); | 71 | struct drm_fb_helper_surface_size *sizes); |
| 74 | |||
| 75 | void (*fb_output_status_changed)(struct drm_fb_helper *helper); | ||
| 76 | |||
| 77 | }; | 72 | }; |
| 78 | 73 | ||
| 79 | struct drm_fb_helper_connector { | 74 | struct drm_fb_helper_connector { |
| @@ -95,8 +90,6 @@ struct drm_fb_helper { | |||
| 95 | u32 pseudo_palette[17]; | 90 | u32 pseudo_palette[17]; |
| 96 | struct list_head kernel_fb_list; | 91 | struct list_head kernel_fb_list; |
| 97 | 92 | ||
| 98 | struct delayed_slow_work output_status_change_slow_work; | ||
| 99 | bool poll_enabled; | ||
| 100 | /* we got a hotplug but fbdev wasn't running the console | 93 | /* we got a hotplug but fbdev wasn't running the console |
| 101 | delay until next set_par */ | 94 | delay until next set_par */ |
| 102 | bool delayed_hotplug; | 95 | bool delayed_hotplug; |
| @@ -107,7 +100,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper, | |||
| 107 | 100 | ||
| 108 | int drm_fb_helper_init(struct drm_device *dev, | 101 | int drm_fb_helper_init(struct drm_device *dev, |
| 109 | struct drm_fb_helper *helper, int crtc_count, | 102 | struct drm_fb_helper *helper, int crtc_count, |
| 110 | int max_conn, bool polled); | 103 | int max_conn); |
| 111 | void drm_fb_helper_fini(struct drm_fb_helper *helper); | 104 | void drm_fb_helper_fini(struct drm_fb_helper *helper); |
| 112 | int drm_fb_helper_blank(int blank, struct fb_info *info); | 105 | int drm_fb_helper_blank(int blank, struct fb_info *info); |
| 113 | int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | 106 | int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, |
| @@ -130,10 +123,8 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, | |||
| 130 | 123 | ||
| 131 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | 124 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); |
| 132 | 125 | ||
| 133 | bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, | 126 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); |
| 134 | bool polled); | ||
| 135 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); | 127 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); |
| 136 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); | 128 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); |
| 137 | 129 | ||
| 138 | void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper); | ||
| 139 | #endif | 130 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/include/drm/drm_fixed.h index 3d4d84e078ac..4a08a664ff1f 100644 --- a/drivers/gpu/drm/radeon/radeon_fixed.h +++ b/include/drm/drm_fixed.h | |||
| @@ -21,41 +21,41 @@ | |||
| 21 | * | 21 | * |
| 22 | * Authors: Dave Airlie | 22 | * Authors: Dave Airlie |
| 23 | */ | 23 | */ |
| 24 | #ifndef RADEON_FIXED_H | 24 | #ifndef DRM_FIXED_H |
| 25 | #define RADEON_FIXED_H | 25 | #define DRM_FIXED_H |
| 26 | 26 | ||
| 27 | typedef union rfixed { | 27 | typedef union dfixed { |
| 28 | u32 full; | 28 | u32 full; |
| 29 | } fixed20_12; | 29 | } fixed20_12; |
| 30 | 30 | ||
| 31 | 31 | ||
| 32 | #define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ | 32 | #define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ |
| 33 | #define rfixed_const_half(A) (u32)(((A) << 12) + 2048) | 33 | #define dfixed_const_half(A) (u32)(((A) << 12) + 2048) |
| 34 | #define rfixed_const_666(A) (u32)(((A) << 12) + 2731) | 34 | #define dfixed_const_666(A) (u32)(((A) << 12) + 2731) |
| 35 | #define rfixed_const_8(A) (u32)(((A) << 12) + 3277) | 35 | #define dfixed_const_8(A) (u32)(((A) << 12) + 3277) |
| 36 | #define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) | 36 | #define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) |
| 37 | #define fixed_init(A) { .full = rfixed_const((A)) } | 37 | #define dfixed_init(A) { .full = dfixed_const((A)) } |
| 38 | #define fixed_init_half(A) { .full = rfixed_const_half((A)) } | 38 | #define dfixed_init_half(A) { .full = dfixed_const_half((A)) } |
| 39 | #define rfixed_trunc(A) ((A).full >> 12) | 39 | #define dfixed_trunc(A) ((A).full >> 12) |
| 40 | 40 | ||
| 41 | static inline u32 rfixed_floor(fixed20_12 A) | 41 | static inline u32 dfixed_floor(fixed20_12 A) |
| 42 | { | 42 | { |
| 43 | u32 non_frac = rfixed_trunc(A); | 43 | u32 non_frac = dfixed_trunc(A); |
| 44 | 44 | ||
| 45 | return rfixed_const(non_frac); | 45 | return dfixed_const(non_frac); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static inline u32 rfixed_ceil(fixed20_12 A) | 48 | static inline u32 dfixed_ceil(fixed20_12 A) |
| 49 | { | 49 | { |
| 50 | u32 non_frac = rfixed_trunc(A); | 50 | u32 non_frac = dfixed_trunc(A); |
| 51 | 51 | ||
| 52 | if (A.full > rfixed_const(non_frac)) | 52 | if (A.full > dfixed_const(non_frac)) |
| 53 | return rfixed_const(non_frac + 1); | 53 | return dfixed_const(non_frac + 1); |
| 54 | else | 54 | else |
| 55 | return rfixed_const(non_frac); | 55 | return dfixed_const(non_frac); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) | 58 | static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) |
| 59 | { | 59 | { |
| 60 | u64 tmp = ((u64)A.full << 13); | 60 | u64 tmp = ((u64)A.full << 13); |
| 61 | 61 | ||
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 81e614bf2dc3..3ff9fc071dfe 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
| @@ -902,6 +902,7 @@ struct drm_radeon_cs { | |||
| 902 | #define RADEON_INFO_NUM_GB_PIPES 0x01 | 902 | #define RADEON_INFO_NUM_GB_PIPES 0x01 |
| 903 | #define RADEON_INFO_NUM_Z_PIPES 0x02 | 903 | #define RADEON_INFO_NUM_Z_PIPES 0x02 |
| 904 | #define RADEON_INFO_ACCEL_WORKING 0x03 | 904 | #define RADEON_INFO_ACCEL_WORKING 0x03 |
| 905 | #define RADEON_INFO_CRTC_FROM_ID 0x04 | ||
| 905 | 906 | ||
| 906 | struct drm_radeon_info { | 907 | struct drm_radeon_info { |
| 907 | uint32_t request; | 908 | uint32_t request; |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 3e273e0b9417..267a86c74e2e 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
| @@ -362,6 +362,23 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, | |||
| 362 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); | 362 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); |
| 363 | 363 | ||
| 364 | /** | 364 | /** |
| 365 | * ttm_bo_lock_delayed_workqueue | ||
| 366 | * | ||
| 367 | * Prevent the delayed workqueue from running. | ||
| 368 | * Returns | ||
| 369 | * True if the workqueue was queued at the time | ||
| 370 | */ | ||
| 371 | extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); | ||
| 372 | |||
| 373 | /** | ||
| 374 | * ttm_bo_unlock_delayed_workqueue | ||
| 375 | * | ||
| 376 | * Allows the delayed workqueue to run. | ||
| 377 | */ | ||
| 378 | extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, | ||
| 379 | int resched); | ||
| 380 | |||
| 381 | /** | ||
| 365 | * ttm_bo_synccpu_write_grab | 382 | * ttm_bo_synccpu_write_grab |
| 366 | * | 383 | * |
| 367 | * @bo: The buffer object: | 384 | * @bo: The buffer object: |
diff --git a/include/linux/fb.h b/include/linux/fb.h index c10163b4c40e..1296af45169d 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -403,6 +403,7 @@ struct fb_cursor { | |||
| 403 | #include <linux/notifier.h> | 403 | #include <linux/notifier.h> |
| 404 | #include <linux/list.h> | 404 | #include <linux/list.h> |
| 405 | #include <linux/backlight.h> | 405 | #include <linux/backlight.h> |
| 406 | #include <linux/slab.h> | ||
| 406 | #include <asm/io.h> | 407 | #include <asm/io.h> |
| 407 | 408 | ||
| 408 | struct vm_area_struct; | 409 | struct vm_area_struct; |
| @@ -862,10 +863,22 @@ struct fb_info { | |||
| 862 | /* we need the PCI or similiar aperture base/size not | 863 | /* we need the PCI or similiar aperture base/size not |
| 863 | smem_start/size as smem_start may just be an object | 864 | smem_start/size as smem_start may just be an object |
| 864 | allocated inside the aperture so may not actually overlap */ | 865 | allocated inside the aperture so may not actually overlap */ |
| 865 | resource_size_t aperture_base; | 866 | struct apertures_struct { |
| 866 | resource_size_t aperture_size; | 867 | unsigned int count; |
| 868 | struct aperture { | ||
| 869 | resource_size_t base; | ||
| 870 | resource_size_t size; | ||
| 871 | } ranges[0]; | ||
| 872 | } *apertures; | ||
| 867 | }; | 873 | }; |
| 868 | 874 | ||
| 875 | static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { | ||
| 876 | struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) | ||
| 877 | + max_num * sizeof(struct aperture), GFP_KERNEL); | ||
| 878 | a->count = max_num; | ||
| 879 | return a; | ||
| 880 | } | ||
| 881 | |||
| 869 | #ifdef MODULE | 882 | #ifdef MODULE |
| 870 | #define FBINFO_DEFAULT FBINFO_MODULE | 883 | #define FBINFO_DEFAULT FBINFO_MODULE |
| 871 | #else | 884 | #else |
| @@ -958,6 +971,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, | |||
| 958 | /* drivers/video/fbmem.c */ | 971 | /* drivers/video/fbmem.c */ |
| 959 | extern int register_framebuffer(struct fb_info *fb_info); | 972 | extern int register_framebuffer(struct fb_info *fb_info); |
| 960 | extern int unregister_framebuffer(struct fb_info *fb_info); | 973 | extern int unregister_framebuffer(struct fb_info *fb_info); |
| 974 | extern void remove_conflicting_framebuffers(struct apertures_struct *a, | ||
| 975 | const char *name, bool primary); | ||
| 961 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); | 976 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
| 962 | extern int fb_show_logo(struct fb_info *fb_info, int rotate); | 977 | extern int fb_show_logo(struct fb_info *fb_info, int rotate); |
| 963 | extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); | 978 | extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); |
