diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-11-19 13:27:57 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-11-19 13:27:57 -0500 |
| commit | 764bc5691765470b486ec70916935c771d7f5bb1 (patch) | |
| tree | d23c7e2d66524cb9e7b18550d69cde6bcc3f651f | |
| parent | 589136bfa784a4558b397f017ca2f06f0ca9080e (diff) | |
| parent | 164bcb94bc821fcbac752e809b4ac7c6f15d13b5 (diff) | |
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (40 commits)
drm/radeon/kms: i2c s/sprintf/snprintf/g for safety
drm/radeon/kms: fix i2c pad masks on rs4xx
drm/ttm: Fix up a theoretical deadlock
drm/radeon/kms: fix tiling info on evergreen
drm/radeon/kms: fix alignment when allocating buffers
drm/vmwgfx: Fix up an error path during bo creation
drm/radeon/kms: register an i2c adapter name for the dp aux bus
drm/radeon/kms/atom: add proper external encoders support
drm/radeon/kms/atom: cleanup and unify DVO handling
drm/radeon/kms: properly power up/down the eDP panel as needed (v4)
drm/radeon/kms/atom: set sane defaults in atombios_get_encoder_mode()
drm/radeon/kms: turn the backlight off explicitly for dpms
drm/radeon/kms: fix typo in r600 cs checker
drm: radeon: fix error value sign
drm/radeon/kms: fix and unify tiled buffer alignment checking for r6xx/7xx
nouveau: Acknowledge HPD irq in handler, not bottom half
drm/nouveau: Fix a few confusions between "chipset" and "card_type".
drm/nouveau: don't expose backlight control when available through ACPI
drm/nouveau/pm: improve memtiming mappings
drm/nouveau: Make PCIE GART size depend on the available RAMIN space.
...
55 files changed, 1021 insertions, 424 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 406228f4a2a0..b14c81110575 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
| 34 | #include <linux/acpi.h> | ||
| 34 | 35 | ||
| 35 | #include "drmP.h" | 36 | #include "drmP.h" |
| 36 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
| @@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev) | |||
| 136 | { | 137 | { |
| 137 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 138 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 138 | 139 | ||
| 140 | #ifdef CONFIG_ACPI | ||
| 141 | if (acpi_video_backlight_support()) { | ||
| 142 | NV_INFO(dev, "ACPI backlight interface available, " | ||
| 143 | "not registering our own\n"); | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | #endif | ||
| 147 | |||
| 139 | switch (dev_priv->card_type) { | 148 | switch (dev_priv->card_type) { |
| 140 | case NV_40: | 149 | case NV_40: |
| 141 | return nouveau_nv40_backlight_init(dev); | 150 | return nouveau_nv40_backlight_init(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 5f21030a293b..b2293576f278 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev) | |||
| 6829 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 6829 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 6830 | unsigned htotal; | 6830 | unsigned htotal; |
| 6831 | 6831 | ||
| 6832 | if (dev_priv->chipset >= NV_50) { | 6832 | if (dev_priv->card_type >= NV_50) { |
| 6833 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && | 6833 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && |
| 6834 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) | 6834 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) |
| 6835 | return false; | 6835 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 80353e2b8409..c41e1c200ef5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 143 | nvbo->no_vm = no_vm; | 143 | nvbo->no_vm = no_vm; |
| 144 | nvbo->tile_mode = tile_mode; | 144 | nvbo->tile_mode = tile_mode; |
| 145 | nvbo->tile_flags = tile_flags; | 145 | nvbo->tile_flags = tile_flags; |
| 146 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | ||
| 146 | 147 | ||
| 147 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); | 148 | nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), |
| 149 | &align, &size); | ||
| 148 | align >>= PAGE_SHIFT; | 150 | align >>= PAGE_SHIFT; |
| 149 | 151 | ||
| 150 | nouveau_bo_placement_set(nvbo, flags, 0); | 152 | nouveau_bo_placement_set(nvbo, flags, 0); |
| @@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |||
| 176 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | 178 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; |
| 177 | } | 179 | } |
| 178 | 180 | ||
| 181 | static void | ||
| 182 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | ||
| 183 | { | ||
| 184 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | ||
| 185 | |||
| 186 | if (dev_priv->card_type == NV_10 && | ||
| 187 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | ||
| 188 | /* | ||
| 189 | * Make sure that the color and depth buffers are handled | ||
| 190 | * by independent memory controller units. Up to a 9x | ||
| 191 | * speed up when alpha-blending and depth-test are enabled | ||
| 192 | * at the same time. | ||
| 193 | */ | ||
| 194 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
| 195 | |||
| 196 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | ||
| 197 | nvbo->placement.fpfn = vram_pages / 2; | ||
| 198 | nvbo->placement.lpfn = ~0; | ||
| 199 | } else { | ||
| 200 | nvbo->placement.fpfn = 0; | ||
| 201 | nvbo->placement.lpfn = vram_pages / 2; | ||
| 202 | } | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 179 | void | 206 | void |
| 180 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) | 207 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
| 181 | { | 208 | { |
| @@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) | |||
| 190 | pl->busy_placement = nvbo->busy_placements; | 217 | pl->busy_placement = nvbo->busy_placements; |
| 191 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | 218 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
| 192 | type | busy, flags); | 219 | type | busy, flags); |
| 220 | |||
| 221 | set_placement_range(nvbo, type); | ||
| 193 | } | 222 | } |
| 194 | 223 | ||
| 195 | int | 224 | int |
| @@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 525 | stride = 16 * 4; | 554 | stride = 16 * 4; |
| 526 | height = amount / stride; | 555 | height = amount / stride; |
| 527 | 556 | ||
| 528 | if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | 557 | if (new_mem->mem_type == TTM_PL_VRAM && |
| 558 | nouveau_bo_tile_layout(nvbo)) { | ||
| 529 | ret = RING_SPACE(chan, 8); | 559 | ret = RING_SPACE(chan, 8); |
| 530 | if (ret) | 560 | if (ret) |
| 531 | return ret; | 561 | return ret; |
| @@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 546 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | 576 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); |
| 547 | OUT_RING (chan, 1); | 577 | OUT_RING (chan, 1); |
| 548 | } | 578 | } |
| 549 | if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | 579 | if (old_mem->mem_type == TTM_PL_VRAM && |
| 580 | nouveau_bo_tile_layout(nvbo)) { | ||
| 550 | ret = RING_SPACE(chan, 8); | 581 | ret = RING_SPACE(chan, 8); |
| 551 | if (ret) | 582 | if (ret) |
| 552 | return ret; | 583 | return ret; |
| @@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
| 753 | if (dev_priv->card_type == NV_50) { | 784 | if (dev_priv->card_type == NV_50) { |
| 754 | ret = nv50_mem_vm_bind_linear(dev, | 785 | ret = nv50_mem_vm_bind_linear(dev, |
| 755 | offset + dev_priv->vm_vram_base, | 786 | offset + dev_priv->vm_vram_base, |
| 756 | new_mem->size, nvbo->tile_flags, | 787 | new_mem->size, |
| 788 | nouveau_bo_tile_layout(nvbo), | ||
| 757 | offset); | 789 | offset); |
| 758 | if (ret) | 790 | if (ret) |
| 759 | return ret; | 791 | return ret; |
| @@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 894 | * nothing to do here. | 926 | * nothing to do here. |
| 895 | */ | 927 | */ |
| 896 | if (bo->mem.mem_type != TTM_PL_VRAM) { | 928 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
| 897 | if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) | 929 | if (dev_priv->card_type < NV_50 || |
| 930 | !nouveau_bo_tile_layout(nvbo)) | ||
| 898 | return 0; | 931 | return 0; |
| 899 | } | 932 | } |
| 900 | 933 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 0871495096fa..52c356e9a3d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -281,7 +281,7 @@ detect_analog: | |||
| 281 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 281 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
| 282 | if (!nv_encoder && !nouveau_tv_disable) | 282 | if (!nv_encoder && !nouveau_tv_disable) |
| 283 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 283 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
| 284 | if (nv_encoder) { | 284 | if (nv_encoder && force) { |
| 285 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 285 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
| 286 | struct drm_encoder_helper_funcs *helper = | 286 | struct drm_encoder_helper_funcs *helper = |
| 287 | encoder->helper_private; | 287 | encoder->helper_private; |
| @@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
| 641 | return ret; | 641 | return ret; |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | static unsigned | ||
| 645 | get_tmds_link_bandwidth(struct drm_connector *connector) | ||
| 646 | { | ||
| 647 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | ||
| 648 | struct drm_nouveau_private *dev_priv = connector->dev->dev_private; | ||
| 649 | struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; | ||
| 650 | |||
| 651 | if (dcb->location != DCB_LOC_ON_CHIP || | ||
| 652 | dev_priv->chipset >= 0x46) | ||
| 653 | return 165000; | ||
| 654 | else if (dev_priv->chipset >= 0x40) | ||
| 655 | return 155000; | ||
| 656 | else if (dev_priv->chipset >= 0x18) | ||
| 657 | return 135000; | ||
| 658 | else | ||
| 659 | return 112000; | ||
| 660 | } | ||
| 661 | |||
| 644 | static int | 662 | static int |
| 645 | nouveau_connector_mode_valid(struct drm_connector *connector, | 663 | nouveau_connector_mode_valid(struct drm_connector *connector, |
| 646 | struct drm_display_mode *mode) | 664 | struct drm_display_mode *mode) |
| 647 | { | 665 | { |
| 648 | struct drm_nouveau_private *dev_priv = connector->dev->dev_private; | ||
| 649 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 666 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
| 650 | struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; | 667 | struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; |
| 651 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 668 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
| @@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 663 | max_clock = 400000; | 680 | max_clock = 400000; |
| 664 | break; | 681 | break; |
| 665 | case OUTPUT_TMDS: | 682 | case OUTPUT_TMDS: |
| 666 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || | 683 | max_clock = get_tmds_link_bandwidth(connector); |
| 667 | !nv_encoder->dcb->duallink_possible) | 684 | if (nouveau_duallink && nv_encoder->dcb->duallink_possible) |
| 668 | max_clock = 165000; | 685 | max_clock *= 2; |
| 669 | else | ||
| 670 | max_clock = 330000; | ||
| 671 | break; | 686 | break; |
| 672 | case OUTPUT_ANALOG: | 687 | case OUTPUT_ANALOG: |
| 673 | max_clock = nv_encoder->dcb->crtconf.maxfreq; | 688 | max_clock = nv_encoder->dcb->crtconf.maxfreq; |
| @@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector) | |||
| 709 | return NULL; | 724 | return NULL; |
| 710 | } | 725 | } |
| 711 | 726 | ||
| 712 | void | ||
| 713 | nouveau_connector_set_polling(struct drm_connector *connector) | ||
| 714 | { | ||
| 715 | struct drm_device *dev = connector->dev; | ||
| 716 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 717 | struct drm_crtc *crtc; | ||
| 718 | bool spare_crtc = false; | ||
| 719 | |||
| 720 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
| 721 | spare_crtc |= !crtc->enabled; | ||
| 722 | |||
| 723 | connector->polled = 0; | ||
| 724 | |||
| 725 | switch (connector->connector_type) { | ||
| 726 | case DRM_MODE_CONNECTOR_VGA: | ||
| 727 | case DRM_MODE_CONNECTOR_TV: | ||
| 728 | if (dev_priv->card_type >= NV_50 || | ||
| 729 | (nv_gf4_disp_arch(dev) && spare_crtc)) | ||
| 730 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 731 | break; | ||
| 732 | |||
| 733 | case DRM_MODE_CONNECTOR_DVII: | ||
| 734 | case DRM_MODE_CONNECTOR_DVID: | ||
| 735 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 736 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 737 | case DRM_MODE_CONNECTOR_eDP: | ||
| 738 | if (dev_priv->card_type >= NV_50) | ||
| 739 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 740 | else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
| 741 | spare_crtc) | ||
| 742 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 743 | break; | ||
| 744 | |||
| 745 | default: | ||
| 746 | break; | ||
| 747 | } | ||
| 748 | } | ||
| 749 | |||
| 750 | static const struct drm_connector_helper_funcs | 727 | static const struct drm_connector_helper_funcs |
| 751 | nouveau_connector_helper_funcs = { | 728 | nouveau_connector_helper_funcs = { |
| 752 | .get_modes = nouveau_connector_get_modes, | 729 | .get_modes = nouveau_connector_get_modes, |
| @@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
| 872 | dev->mode_config.scaling_mode_property, | 849 | dev->mode_config.scaling_mode_property, |
| 873 | nv_connector->scaling_mode); | 850 | nv_connector->scaling_mode); |
| 874 | } | 851 | } |
| 852 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 875 | /* fall-through */ | 853 | /* fall-through */ |
| 876 | case DCB_CONNECTOR_TV_0: | 854 | case DCB_CONNECTOR_TV_0: |
| 877 | case DCB_CONNECTOR_TV_1: | 855 | case DCB_CONNECTOR_TV_1: |
| @@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
| 888 | dev->mode_config.dithering_mode_property, | 866 | dev->mode_config.dithering_mode_property, |
| 889 | nv_connector->use_dithering ? | 867 | nv_connector->use_dithering ? |
| 890 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); | 868 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); |
| 869 | |||
| 870 | if (dcb->type != DCB_CONNECTOR_LVDS) { | ||
| 871 | if (dev_priv->card_type >= NV_50) | ||
| 872 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 873 | else | ||
| 874 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 875 | } | ||
| 891 | break; | 876 | break; |
| 892 | } | 877 | } |
| 893 | 878 | ||
| 894 | nouveau_connector_set_polling(connector); | ||
| 895 | |||
| 896 | drm_sysfs_connector_add(connector); | 879 | drm_sysfs_connector_add(connector); |
| 897 | dcb->drm = connector; | 880 | dcb->drm = connector; |
| 898 | return dcb->drm; | 881 | return dcb->drm; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index c21ed6b16f88..711b1e9203af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h | |||
| @@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector( | |||
| 52 | struct drm_connector * | 52 | struct drm_connector * |
| 53 | nouveau_connector_create(struct drm_device *, int index); | 53 | nouveau_connector_create(struct drm_device *, int index); |
| 54 | 54 | ||
| 55 | void | ||
| 56 | nouveau_connector_set_polling(struct drm_connector *); | ||
| 57 | |||
| 58 | int | 55 | int |
| 59 | nouveau_connector_bpp(struct drm_connector *); | 56 | nouveau_connector_bpp(struct drm_connector *); |
| 60 | 57 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3a07e580d27a..1c7db64c03bf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -100,6 +100,9 @@ struct nouveau_bo { | |||
| 100 | int pin_refcnt; | 100 | int pin_refcnt; |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | #define nouveau_bo_tile_layout(nvbo) \ | ||
| 104 | ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) | ||
| 105 | |||
| 103 | static inline struct nouveau_bo * | 106 | static inline struct nouveau_bo * |
| 104 | nouveau_bo(struct ttm_buffer_object *bo) | 107 | nouveau_bo(struct ttm_buffer_object *bo) |
| 105 | { | 108 | { |
| @@ -304,6 +307,7 @@ struct nouveau_fifo_engine { | |||
| 304 | void (*destroy_context)(struct nouveau_channel *); | 307 | void (*destroy_context)(struct nouveau_channel *); |
| 305 | int (*load_context)(struct nouveau_channel *); | 308 | int (*load_context)(struct nouveau_channel *); |
| 306 | int (*unload_context)(struct drm_device *); | 309 | int (*unload_context)(struct drm_device *); |
| 310 | void (*tlb_flush)(struct drm_device *dev); | ||
| 307 | }; | 311 | }; |
| 308 | 312 | ||
| 309 | struct nouveau_pgraph_object_method { | 313 | struct nouveau_pgraph_object_method { |
| @@ -336,6 +340,7 @@ struct nouveau_pgraph_engine { | |||
| 336 | void (*destroy_context)(struct nouveau_channel *); | 340 | void (*destroy_context)(struct nouveau_channel *); |
| 337 | int (*load_context)(struct nouveau_channel *); | 341 | int (*load_context)(struct nouveau_channel *); |
| 338 | int (*unload_context)(struct drm_device *); | 342 | int (*unload_context)(struct drm_device *); |
| 343 | void (*tlb_flush)(struct drm_device *dev); | ||
| 339 | 344 | ||
| 340 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | 345 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, |
| 341 | uint32_t size, uint32_t pitch); | 346 | uint32_t size, uint32_t pitch); |
| @@ -485,13 +490,13 @@ enum nv04_fp_display_regs { | |||
| 485 | }; | 490 | }; |
| 486 | 491 | ||
| 487 | struct nv04_crtc_reg { | 492 | struct nv04_crtc_reg { |
| 488 | unsigned char MiscOutReg; /* */ | 493 | unsigned char MiscOutReg; |
| 489 | uint8_t CRTC[0xa0]; | 494 | uint8_t CRTC[0xa0]; |
| 490 | uint8_t CR58[0x10]; | 495 | uint8_t CR58[0x10]; |
| 491 | uint8_t Sequencer[5]; | 496 | uint8_t Sequencer[5]; |
| 492 | uint8_t Graphics[9]; | 497 | uint8_t Graphics[9]; |
| 493 | uint8_t Attribute[21]; | 498 | uint8_t Attribute[21]; |
| 494 | unsigned char DAC[768]; /* Internal Colorlookuptable */ | 499 | unsigned char DAC[768]; |
| 495 | 500 | ||
| 496 | /* PCRTC regs */ | 501 | /* PCRTC regs */ |
| 497 | uint32_t fb_start; | 502 | uint32_t fb_start; |
| @@ -539,43 +544,9 @@ struct nv04_output_reg { | |||
| 539 | }; | 544 | }; |
| 540 | 545 | ||
| 541 | struct nv04_mode_state { | 546 | struct nv04_mode_state { |
| 542 | uint32_t bpp; | 547 | struct nv04_crtc_reg crtc_reg[2]; |
| 543 | uint32_t width; | ||
| 544 | uint32_t height; | ||
| 545 | uint32_t interlace; | ||
| 546 | uint32_t repaint0; | ||
| 547 | uint32_t repaint1; | ||
| 548 | uint32_t screen; | ||
| 549 | uint32_t scale; | ||
| 550 | uint32_t dither; | ||
| 551 | uint32_t extra; | ||
| 552 | uint32_t fifo; | ||
| 553 | uint32_t pixel; | ||
| 554 | uint32_t horiz; | ||
| 555 | int arbitration0; | ||
| 556 | int arbitration1; | ||
| 557 | uint32_t pll; | ||
| 558 | uint32_t pllB; | ||
| 559 | uint32_t vpll; | ||
| 560 | uint32_t vpll2; | ||
| 561 | uint32_t vpllB; | ||
| 562 | uint32_t vpll2B; | ||
| 563 | uint32_t pllsel; | 548 | uint32_t pllsel; |
| 564 | uint32_t sel_clk; | 549 | uint32_t sel_clk; |
| 565 | uint32_t general; | ||
| 566 | uint32_t crtcOwner; | ||
| 567 | uint32_t head; | ||
| 568 | uint32_t head2; | ||
| 569 | uint32_t cursorConfig; | ||
| 570 | uint32_t cursor0; | ||
| 571 | uint32_t cursor1; | ||
| 572 | uint32_t cursor2; | ||
| 573 | uint32_t timingH; | ||
| 574 | uint32_t timingV; | ||
| 575 | uint32_t displayV; | ||
| 576 | uint32_t crtcSync; | ||
| 577 | |||
| 578 | struct nv04_crtc_reg crtc_reg[2]; | ||
| 579 | }; | 550 | }; |
| 580 | 551 | ||
| 581 | enum nouveau_card_type { | 552 | enum nouveau_card_type { |
| @@ -613,6 +584,12 @@ struct drm_nouveau_private { | |||
| 613 | struct work_struct irq_work; | 584 | struct work_struct irq_work; |
| 614 | struct work_struct hpd_work; | 585 | struct work_struct hpd_work; |
| 615 | 586 | ||
| 587 | struct { | ||
| 588 | spinlock_t lock; | ||
| 589 | uint32_t hpd0_bits; | ||
| 590 | uint32_t hpd1_bits; | ||
| 591 | } hpd_state; | ||
| 592 | |||
| 616 | struct list_head vbl_waiting; | 593 | struct list_head vbl_waiting; |
| 617 | 594 | ||
| 618 | struct { | 595 | struct { |
| @@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *); | |||
| 1045 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); | 1022 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); |
| 1046 | extern int nv50_fifo_load_context(struct nouveau_channel *); | 1023 | extern int nv50_fifo_load_context(struct nouveau_channel *); |
| 1047 | extern int nv50_fifo_unload_context(struct drm_device *); | 1024 | extern int nv50_fifo_unload_context(struct drm_device *); |
| 1025 | extern void nv50_fifo_tlb_flush(struct drm_device *dev); | ||
| 1048 | 1026 | ||
| 1049 | /* nvc0_fifo.c */ | 1027 | /* nvc0_fifo.c */ |
| 1050 | extern int nvc0_fifo_init(struct drm_device *); | 1028 | extern int nvc0_fifo_init(struct drm_device *); |
| @@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *); | |||
| 1122 | extern int nv50_graph_unload_context(struct drm_device *); | 1100 | extern int nv50_graph_unload_context(struct drm_device *); |
| 1123 | extern void nv50_graph_context_switch(struct drm_device *); | 1101 | extern void nv50_graph_context_switch(struct drm_device *); |
| 1124 | extern int nv50_grctx_init(struct nouveau_grctx *); | 1102 | extern int nv50_grctx_init(struct nouveau_grctx *); |
| 1103 | extern void nv50_graph_tlb_flush(struct drm_device *dev); | ||
| 1104 | extern void nv86_graph_tlb_flush(struct drm_device *dev); | ||
| 1125 | 1105 | ||
| 1126 | /* nvc0_graph.c */ | 1106 | /* nvc0_graph.c */ |
| 1127 | extern int nvc0_graph_init(struct drm_device *); | 1107 | extern int nvc0_graph_init(struct drm_device *); |
| @@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | |||
| 1239 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1219 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
| 1240 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1220 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
| 1241 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | 1221 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); |
| 1242 | extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); | ||
| 1243 | 1222 | ||
| 1244 | /* nouveau_fence.c */ | 1223 | /* nouveau_fence.c */ |
| 1245 | struct nouveau_fence; | 1224 | struct nouveau_fence; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 441b12420bb1..ab1bbfbf266e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev) | |||
| 249 | { | 249 | { |
| 250 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 250 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 251 | struct nouveau_semaphore *sema; | 251 | struct nouveau_semaphore *sema; |
| 252 | int ret; | ||
| 252 | 253 | ||
| 253 | if (!USE_SEMA(dev)) | 254 | if (!USE_SEMA(dev)) |
| 254 | return NULL; | 255 | return NULL; |
| @@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev) | |||
| 257 | if (!sema) | 258 | if (!sema) |
| 258 | goto fail; | 259 | goto fail; |
| 259 | 260 | ||
| 261 | ret = drm_mm_pre_get(&dev_priv->fence.heap); | ||
| 262 | if (ret) | ||
| 263 | goto fail; | ||
| 264 | |||
| 260 | spin_lock(&dev_priv->fence.lock); | 265 | spin_lock(&dev_priv->fence.lock); |
| 261 | sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); | 266 | sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); |
| 262 | if (sema->mem) | 267 | if (sema->mem) |
| 263 | sema->mem = drm_mm_get_block(sema->mem, 4, 0); | 268 | sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); |
| 264 | spin_unlock(&dev_priv->fence.lock); | 269 | spin_unlock(&dev_priv->fence.lock); |
| 265 | 270 | ||
| 266 | if (!sema->mem) | 271 | if (!sema->mem) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5c4c929d7f74..9a1fdcf400c2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |||
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static bool | 109 | static bool |
| 110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { | 110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) |
| 111 | switch (tile_flags) { | 111 | { |
| 112 | case 0x0000: | 112 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 113 | case 0x1800: | 113 | |
| 114 | case 0x2800: | 114 | if (dev_priv->card_type >= NV_50) { |
| 115 | case 0x4800: | 115 | switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { |
| 116 | case 0x7000: | 116 | case 0x0000: |
| 117 | case 0x7400: | 117 | case 0x1800: |
| 118 | case 0x7a00: | 118 | case 0x2800: |
| 119 | case 0xe000: | 119 | case 0x4800: |
| 120 | break; | 120 | case 0x7000: |
| 121 | default: | 121 | case 0x7400: |
| 122 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | 122 | case 0x7a00: |
| 123 | return false; | 123 | case 0xe000: |
| 124 | return true; | ||
| 125 | } | ||
| 126 | } else { | ||
| 127 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
| 128 | return true; | ||
| 124 | } | 129 | } |
| 125 | 130 | ||
| 126 | return true; | 131 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); |
| 132 | return false; | ||
| 127 | } | 133 | } |
| 128 | 134 | ||
| 129 | int | 135 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index bed669a54a2d..b9672a05c411 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c | |||
| @@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
| 519 | 519 | ||
| 520 | struct pll_lims pll_lim; | 520 | struct pll_lims pll_lim; |
| 521 | struct nouveau_pll_vals pv; | 521 | struct nouveau_pll_vals pv; |
| 522 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; | 522 | enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; |
| 523 | 523 | ||
| 524 | if (get_pll_limits(dev, pllreg, &pll_lim)) | 524 | if (get_pll_limits(dev, pll, &pll_lim)) |
| 525 | return; | 525 | return; |
| 526 | nouveau_hw_get_pllvals(dev, pllreg, &pv); | 526 | nouveau_hw_get_pllvals(dev, pll, &pv); |
| 527 | 527 | ||
| 528 | if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && | 528 | if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && |
| 529 | pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && | 529 | pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && |
| @@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
| 536 | pv.M1 = pll_lim.vco1.max_m; | 536 | pv.M1 = pll_lim.vco1.max_m; |
| 537 | pv.N1 = pll_lim.vco1.min_n; | 537 | pv.N1 = pll_lim.vco1.min_n; |
| 538 | pv.log2P = pll_lim.max_usable_log2p; | 538 | pv.log2P = pll_lim.max_usable_log2p; |
| 539 | nouveau_hw_setpll(dev, pllreg, &pv); | 539 | nouveau_hw_setpll(dev, pll_lim.reg, &pv); |
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | /* | 542 | /* |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h index 869130f83602..2989090b9434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.h +++ b/drivers/gpu/drm/nouveau/nouveau_hw.h | |||
| @@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) | |||
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | static inline void | 418 | static inline void |
| 419 | nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) | ||
| 420 | { | ||
| 421 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 422 | |||
| 423 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); | ||
| 424 | |||
| 425 | if (dev_priv->card_type == NV_04) { | ||
| 426 | /* | ||
| 427 | * Hilarious, the 24th bit doesn't want to stick to | ||
| 428 | * PCRTC_START... | ||
| 429 | */ | ||
| 430 | int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); | ||
| 431 | |||
| 432 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, | ||
| 433 | (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); | ||
| 434 | } | ||
| 435 | } | ||
| 436 | |||
| 437 | static inline void | ||
| 419 | nv_show_cursor(struct drm_device *dev, int head, bool show) | 438 | nv_show_cursor(struct drm_device *dev, int head, bool show) |
| 420 | { | 439 | { |
| 421 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 440 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index fdd7e3de79c8..cb389d014326 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
| @@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index) | |||
| 256 | if (index >= DCB_MAX_NUM_I2C_ENTRIES) | 256 | if (index >= DCB_MAX_NUM_I2C_ENTRIES) |
| 257 | return NULL; | 257 | return NULL; |
| 258 | 258 | ||
| 259 | if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { | 259 | if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { |
| 260 | uint32_t reg = 0xe500, val; | 260 | uint32_t reg = 0xe500, val; |
| 261 | 261 | ||
| 262 | if (i2c->port_type == 6) { | 262 | if (i2c->port_type == 6) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 6fd51a51c608..7bfd9e6c9d67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
| @@ -42,6 +42,13 @@ | |||
| 42 | #include "nouveau_connector.h" | 42 | #include "nouveau_connector.h" |
| 43 | #include "nv50_display.h" | 43 | #include "nv50_display.h" |
| 44 | 44 | ||
| 45 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
| 46 | |||
| 47 | static int nouveau_ratelimit(void) | ||
| 48 | { | ||
| 49 | return __ratelimit(&nouveau_ratelimit_state); | ||
| 50 | } | ||
| 51 | |||
| 45 | void | 52 | void |
| 46 | nouveau_irq_preinstall(struct drm_device *dev) | 53 | nouveau_irq_preinstall(struct drm_device *dev) |
| 47 | { | 54 | { |
| @@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev) | |||
| 53 | if (dev_priv->card_type >= NV_50) { | 60 | if (dev_priv->card_type >= NV_50) { |
| 54 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); | 61 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); |
| 55 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); | 62 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); |
| 63 | spin_lock_init(&dev_priv->hpd_state.lock); | ||
| 56 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); | 64 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); |
| 57 | } | 65 | } |
| 58 | } | 66 | } |
| @@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
| 202 | } | 210 | } |
| 203 | 211 | ||
| 204 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | 212 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { |
| 205 | u32 get = nv_rd32(dev, 0x003244); | 213 | u32 dma_get = nv_rd32(dev, 0x003244); |
| 206 | u32 put = nv_rd32(dev, 0x003240); | 214 | u32 dma_put = nv_rd32(dev, 0x003240); |
| 207 | u32 push = nv_rd32(dev, 0x003220); | 215 | u32 push = nv_rd32(dev, 0x003220); |
| 208 | u32 state = nv_rd32(dev, 0x003228); | 216 | u32 state = nv_rd32(dev, 0x003228); |
| 209 | 217 | ||
| @@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
| 213 | u32 ib_get = nv_rd32(dev, 0x003334); | 221 | u32 ib_get = nv_rd32(dev, 0x003334); |
| 214 | u32 ib_put = nv_rd32(dev, 0x003330); | 222 | u32 ib_put = nv_rd32(dev, 0x003330); |
| 215 | 223 | ||
| 216 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | 224 | if (nouveau_ratelimit()) |
| 225 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | ||
| 217 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " | 226 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " |
| 218 | "State 0x%08x Push 0x%08x\n", | 227 | "State 0x%08x Push 0x%08x\n", |
| 219 | chid, ho_get, get, ho_put, put, ib_get, ib_put, | 228 | chid, ho_get, dma_get, ho_put, |
| 220 | state, push); | 229 | dma_put, ib_get, ib_put, state, |
| 230 | push); | ||
| 221 | 231 | ||
| 222 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | 232 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ |
| 223 | nv_wr32(dev, 0x003364, 0x00000000); | 233 | nv_wr32(dev, 0x003364, 0x00000000); |
| 224 | if (get != put || ho_get != ho_put) { | 234 | if (dma_get != dma_put || ho_get != ho_put) { |
| 225 | nv_wr32(dev, 0x003244, put); | 235 | nv_wr32(dev, 0x003244, dma_put); |
| 226 | nv_wr32(dev, 0x003328, ho_put); | 236 | nv_wr32(dev, 0x003328, ho_put); |
| 227 | } else | 237 | } else |
| 228 | if (ib_get != ib_put) { | 238 | if (ib_get != ib_put) { |
| @@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
| 231 | } else { | 241 | } else { |
| 232 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " | 242 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " |
| 233 | "Put 0x%08x State 0x%08x Push 0x%08x\n", | 243 | "Put 0x%08x State 0x%08x Push 0x%08x\n", |
| 234 | chid, get, put, state, push); | 244 | chid, dma_get, dma_put, state, push); |
| 235 | 245 | ||
| 236 | if (get != put) | 246 | if (dma_get != dma_put) |
| 237 | nv_wr32(dev, 0x003244, put); | 247 | nv_wr32(dev, 0x003244, dma_put); |
| 238 | } | 248 | } |
| 239 | 249 | ||
| 240 | nv_wr32(dev, 0x003228, 0x00000000); | 250 | nv_wr32(dev, 0x003228, 0x00000000); |
| @@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
| 266 | } | 276 | } |
| 267 | 277 | ||
| 268 | if (status) { | 278 | if (status) { |
| 269 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | 279 | if (nouveau_ratelimit()) |
| 270 | status, chid); | 280 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", |
| 281 | status, chid); | ||
| 271 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); | 282 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); |
| 272 | status = 0; | 283 | status = 0; |
| 273 | } | 284 | } |
| @@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) | |||
| 544 | nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); | 555 | nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); |
| 545 | } | 556 | } |
| 546 | 557 | ||
| 547 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
| 548 | |||
| 549 | static int nouveau_ratelimit(void) | ||
| 550 | { | ||
| 551 | return __ratelimit(&nouveau_ratelimit_state); | ||
| 552 | } | ||
| 553 | |||
| 554 | 558 | ||
| 555 | static inline void | 559 | static inline void |
| 556 | nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) | 560 | nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a163c7c612e7..fe4a30dc4b42 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -33,9 +33,9 @@ | |||
| 33 | #include "drmP.h" | 33 | #include "drmP.h" |
| 34 | #include "drm.h" | 34 | #include "drm.h" |
| 35 | #include "drm_sarea.h" | 35 | #include "drm_sarea.h" |
| 36 | #include "nouveau_drv.h" | ||
| 37 | 36 | ||
| 38 | #define MIN(a,b) a < b ? a : b | 37 | #include "nouveau_drv.h" |
| 38 | #include "nouveau_pm.h" | ||
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * NV10-NV40 tiling helpers | 41 | * NV10-NV40 tiling helpers |
| @@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
| 175 | } | 175 | } |
| 176 | } | 176 | } |
| 177 | } | 177 | } |
| 178 | dev_priv->engine.instmem.flush(dev); | ||
| 179 | 178 | ||
| 180 | nv50_vm_flush(dev, 5); | 179 | dev_priv->engine.instmem.flush(dev); |
| 181 | nv50_vm_flush(dev, 0); | 180 | dev_priv->engine.fifo.tlb_flush(dev); |
| 182 | nv50_vm_flush(dev, 4); | 181 | dev_priv->engine.graph.tlb_flush(dev); |
| 183 | nv50_vm_flush(dev, 6); | 182 | nv50_vm_flush(dev, 6); |
| 184 | return 0; | 183 | return 0; |
| 185 | } | 184 | } |
| @@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |||
| 209 | pte++; | 208 | pte++; |
| 210 | } | 209 | } |
| 211 | } | 210 | } |
| 212 | dev_priv->engine.instmem.flush(dev); | ||
| 213 | 211 | ||
| 214 | nv50_vm_flush(dev, 5); | 212 | dev_priv->engine.instmem.flush(dev); |
| 215 | nv50_vm_flush(dev, 0); | 213 | dev_priv->engine.fifo.tlb_flush(dev); |
| 216 | nv50_vm_flush(dev, 4); | 214 | dev_priv->engine.graph.tlb_flush(dev); |
| 217 | nv50_vm_flush(dev, 6); | 215 | nv50_vm_flush(dev, 6); |
| 218 | } | 216 | } |
| 219 | 217 | ||
| @@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev) | |||
| 653 | void | 651 | void |
| 654 | nouveau_mem_timing_init(struct drm_device *dev) | 652 | nouveau_mem_timing_init(struct drm_device *dev) |
| 655 | { | 653 | { |
| 654 | /* cards < NVC0 only */ | ||
| 656 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 655 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 657 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 656 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
| 658 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | 657 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; |
| @@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 719 | tUNK_19 = 1; | 718 | tUNK_19 = 1; |
| 720 | tUNK_20 = 0; | 719 | tUNK_20 = 0; |
| 721 | tUNK_21 = 0; | 720 | tUNK_21 = 0; |
| 722 | switch (MIN(recordlen,21)) { | 721 | switch (min(recordlen, 22)) { |
| 723 | case 21: | 722 | case 22: |
| 724 | tUNK_21 = entry[21]; | 723 | tUNK_21 = entry[21]; |
| 725 | case 20: | 724 | case 21: |
| 726 | tUNK_20 = entry[20]; | 725 | tUNK_20 = entry[20]; |
| 727 | case 19: | 726 | case 20: |
| 728 | tUNK_19 = entry[19]; | 727 | tUNK_19 = entry[19]; |
| 729 | case 18: | 728 | case 19: |
| 730 | tUNK_18 = entry[18]; | 729 | tUNK_18 = entry[18]; |
| 731 | default: | 730 | default: |
| 732 | tUNK_0 = entry[0]; | 731 | tUNK_0 = entry[0]; |
| @@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 756 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); | 755 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); |
| 757 | if(recordlen > 19) { | 756 | if(recordlen > 19) { |
| 758 | timing->reg_100228 += (tUNK_19 - 1) << 24; | 757 | timing->reg_100228 += (tUNK_19 - 1) << 24; |
| 759 | } else { | 758 | }/* I cannot back-up this else-statement right now |
| 759 | else { | ||
| 760 | timing->reg_100228 += tUNK_12 << 24; | 760 | timing->reg_100228 += tUNK_12 << 24; |
| 761 | } | 761 | }*/ |
| 762 | 762 | ||
| 763 | /* XXX: reg_10022c */ | 763 | /* XXX: reg_10022c */ |
| 764 | timing->reg_10022c = tUNK_2 - 1; | ||
| 764 | 765 | ||
| 765 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | 766 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | |
| 766 | tUNK_13 << 8 | tUNK_13); | 767 | tUNK_13 << 8 | tUNK_13); |
| 767 | 768 | ||
| 768 | /* XXX: +6? */ | 769 | /* XXX: +6? */ |
| 769 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); | 770 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); |
| 770 | if(tUNK_10 > tUNK_11) { | 771 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; |
| 771 | timing->reg_100234 += tUNK_10 << 16; | 772 | |
| 772 | } else { | 773 | /* XXX; reg_100238, reg_10023c |
| 773 | timing->reg_100234 += tUNK_11 << 16; | 774 | * reg: 0x00?????? |
| 775 | * reg_10023c: | ||
| 776 | * 0 for pre-NV50 cards | ||
| 777 | * 0x????0202 for NV50+ cards (empirical evidence) */ | ||
| 778 | if(dev_priv->card_type >= NV_50) { | ||
| 779 | timing->reg_10023c = 0x202; | ||
| 774 | } | 780 | } |
| 775 | 781 | ||
| 776 | /* XXX; reg_100238, reg_10023c */ | ||
| 777 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, | 782 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, |
| 778 | timing->reg_100220, timing->reg_100224, | 783 | timing->reg_100220, timing->reg_100224, |
| 779 | timing->reg_100228, timing->reg_10022c); | 784 | timing->reg_100228, timing->reg_10022c); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 896cf8634144..dd572adca02a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
| @@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 129 | if (ramin == NULL) { | 129 | if (ramin == NULL) { |
| 130 | spin_unlock(&dev_priv->ramin_lock); | 130 | spin_unlock(&dev_priv->ramin_lock); |
| 131 | nouveau_gpuobj_ref(NULL, &gpuobj); | 131 | nouveau_gpuobj_ref(NULL, &gpuobj); |
| 132 | return ret; | 132 | return -ENOMEM; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | ramin = drm_mm_get_block_atomic(ramin, size, align); | 135 | ramin = drm_mm_get_block_atomic(ramin, size, align); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 1c99c55d6d46..9f7b158f5825 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
| @@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev) | |||
| 284 | } | 284 | } |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | #ifdef CONFIG_HWMON | ||
| 287 | static ssize_t | 288 | static ssize_t |
| 288 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | 289 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) |
| 289 | { | 290 | { |
| @@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = { | |||
| 395 | static const struct attribute_group hwmon_attrgroup = { | 396 | static const struct attribute_group hwmon_attrgroup = { |
| 396 | .attrs = hwmon_attributes, | 397 | .attrs = hwmon_attributes, |
| 397 | }; | 398 | }; |
| 399 | #endif | ||
| 398 | 400 | ||
| 399 | static int | 401 | static int |
| 400 | nouveau_hwmon_init(struct drm_device *dev) | 402 | nouveau_hwmon_init(struct drm_device *dev) |
| 401 | { | 403 | { |
| 404 | #ifdef CONFIG_HWMON | ||
| 402 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 405 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 403 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 406 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
| 404 | struct device *hwmon_dev; | 407 | struct device *hwmon_dev; |
| @@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
| 425 | } | 428 | } |
| 426 | 429 | ||
| 427 | pm->hwmon = hwmon_dev; | 430 | pm->hwmon = hwmon_dev; |
| 428 | 431 | #endif | |
| 429 | return 0; | 432 | return 0; |
| 430 | } | 433 | } |
| 431 | 434 | ||
| 432 | static void | 435 | static void |
| 433 | nouveau_hwmon_fini(struct drm_device *dev) | 436 | nouveau_hwmon_fini(struct drm_device *dev) |
| 434 | { | 437 | { |
| 438 | #ifdef CONFIG_HWMON | ||
| 435 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 439 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 436 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 440 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
| 437 | 441 | ||
| @@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
| 439 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); | 443 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); |
| 440 | hwmon_device_unregister(pm->hwmon); | 444 | hwmon_device_unregister(pm->hwmon); |
| 441 | } | 445 | } |
| 446 | #endif | ||
| 442 | } | 447 | } |
| 443 | 448 | ||
| 444 | int | 449 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 7f16697cc96c..2d8580927ca4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c | |||
| @@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, | |||
| 153 | return -ENOMEM; | 153 | return -ENOMEM; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | static struct nouveau_ramht_entry * | ||
| 157 | nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) | ||
| 158 | { | ||
| 159 | struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; | ||
| 160 | struct nouveau_ramht_entry *entry; | ||
| 161 | unsigned long flags; | ||
| 162 | |||
| 163 | if (!ramht) | ||
| 164 | return NULL; | ||
| 165 | |||
| 166 | spin_lock_irqsave(&ramht->lock, flags); | ||
| 167 | list_for_each_entry(entry, &ramht->entries, head) { | ||
| 168 | if (entry->channel == chan && | ||
| 169 | (!handle || entry->handle == handle)) { | ||
| 170 | list_del(&entry->head); | ||
| 171 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
| 172 | |||
| 173 | return entry; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
| 177 | |||
| 178 | return NULL; | ||
| 179 | } | ||
| 180 | |||
| 156 | static void | 181 | static void |
| 157 | nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | 182 | nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) |
| 158 | { | 183 | { |
| 159 | struct drm_device *dev = chan->dev; | 184 | struct drm_device *dev = chan->dev; |
| 160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 185 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 161 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | 186 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
| 162 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; | 187 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; |
| 163 | struct nouveau_ramht_entry *entry, *tmp; | 188 | unsigned long flags; |
| 164 | u32 co, ho; | 189 | u32 co, ho; |
| 165 | 190 | ||
| 166 | list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { | 191 | spin_lock_irqsave(&chan->ramht->lock, flags); |
| 167 | if (entry->channel != chan || entry->handle != handle) | ||
| 168 | continue; | ||
| 169 | |||
| 170 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
| 171 | list_del(&entry->head); | ||
| 172 | kfree(entry); | ||
| 173 | break; | ||
| 174 | } | ||
| 175 | |||
| 176 | co = ho = nouveau_ramht_hash_handle(chan, handle); | 192 | co = ho = nouveau_ramht_hash_handle(chan, handle); |
| 177 | do { | 193 | do { |
| 178 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | 194 | if (nouveau_ramht_entry_valid(dev, ramht, co) && |
| @@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | |||
| 184 | nv_wo32(ramht, co + 0, 0x00000000); | 200 | nv_wo32(ramht, co + 0, 0x00000000); |
| 185 | nv_wo32(ramht, co + 4, 0x00000000); | 201 | nv_wo32(ramht, co + 4, 0x00000000); |
| 186 | instmem->flush(dev); | 202 | instmem->flush(dev); |
| 187 | return; | 203 | goto out; |
| 188 | } | 204 | } |
| 189 | 205 | ||
| 190 | co += 8; | 206 | co += 8; |
| @@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | |||
| 194 | 210 | ||
| 195 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | 211 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", |
| 196 | chan->id, handle); | 212 | chan->id, handle); |
| 213 | out: | ||
| 214 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | ||
| 197 | } | 215 | } |
| 198 | 216 | ||
| 199 | void | 217 | void |
| 200 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) | 218 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) |
| 201 | { | 219 | { |
| 202 | struct nouveau_ramht *ramht = chan->ramht; | 220 | struct nouveau_ramht_entry *entry; |
| 203 | unsigned long flags; | ||
| 204 | 221 | ||
| 205 | spin_lock_irqsave(&ramht->lock, flags); | 222 | entry = nouveau_ramht_remove_entry(chan, handle); |
| 206 | nouveau_ramht_remove_locked(chan, handle); | 223 | if (!entry) |
| 207 | spin_unlock_irqrestore(&ramht->lock, flags); | 224 | return; |
| 225 | |||
| 226 | nouveau_ramht_remove_hash(chan, entry->handle); | ||
| 227 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
| 228 | kfree(entry); | ||
| 208 | } | 229 | } |
| 209 | 230 | ||
| 210 | struct nouveau_gpuobj * | 231 | struct nouveau_gpuobj * |
| @@ -265,23 +286,19 @@ void | |||
| 265 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, | 286 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, |
| 266 | struct nouveau_channel *chan) | 287 | struct nouveau_channel *chan) |
| 267 | { | 288 | { |
| 268 | struct nouveau_ramht_entry *entry, *tmp; | 289 | struct nouveau_ramht_entry *entry; |
| 269 | struct nouveau_ramht *ramht; | 290 | struct nouveau_ramht *ramht; |
| 270 | unsigned long flags; | ||
| 271 | 291 | ||
| 272 | if (ref) | 292 | if (ref) |
| 273 | kref_get(&ref->refcount); | 293 | kref_get(&ref->refcount); |
| 274 | 294 | ||
| 275 | ramht = *ptr; | 295 | ramht = *ptr; |
| 276 | if (ramht) { | 296 | if (ramht) { |
| 277 | spin_lock_irqsave(&ramht->lock, flags); | 297 | while ((entry = nouveau_ramht_remove_entry(chan, 0))) { |
| 278 | list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { | 298 | nouveau_ramht_remove_hash(chan, entry->handle); |
| 279 | if (entry->channel != chan) | 299 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); |
| 280 | continue; | 300 | kfree(entry); |
| 281 | |||
| 282 | nouveau_ramht_remove_locked(chan, entry->handle); | ||
| 283 | } | 301 | } |
| 284 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
| 285 | 302 | ||
| 286 | kref_put(&ramht->refcount, nouveau_ramht_del); | 303 | kref_put(&ramht->refcount, nouveau_ramht_del); |
| 287 | } | 304 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 288bacac7e5a..d4ac97007038 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
| 120 | dev_priv->engine.instmem.flush(nvbe->dev); | 120 | dev_priv->engine.instmem.flush(nvbe->dev); |
| 121 | 121 | ||
| 122 | if (dev_priv->card_type == NV_50) { | 122 | if (dev_priv->card_type == NV_50) { |
| 123 | nv50_vm_flush(dev, 5); /* PGRAPH */ | 123 | dev_priv->engine.fifo.tlb_flush(dev); |
| 124 | nv50_vm_flush(dev, 0); /* PFIFO */ | 124 | dev_priv->engine.graph.tlb_flush(dev); |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | nvbe->bound = true; | 127 | nvbe->bound = true; |
| @@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be) | |||
| 162 | dev_priv->engine.instmem.flush(nvbe->dev); | 162 | dev_priv->engine.instmem.flush(nvbe->dev); |
| 163 | 163 | ||
| 164 | if (dev_priv->card_type == NV_50) { | 164 | if (dev_priv->card_type == NV_50) { |
| 165 | nv50_vm_flush(dev, 5); | 165 | dev_priv->engine.fifo.tlb_flush(dev); |
| 166 | nv50_vm_flush(dev, 0); | 166 | dev_priv->engine.graph.tlb_flush(dev); |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | nvbe->bound = false; | 169 | nvbe->bound = false; |
| @@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
| 224 | int i, ret; | 224 | int i, ret; |
| 225 | 225 | ||
| 226 | if (dev_priv->card_type < NV_50) { | 226 | if (dev_priv->card_type < NV_50) { |
| 227 | aper_size = (64 * 1024 * 1024); | 227 | if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) |
| 228 | aper_size = 64 * 1024 * 1024; | ||
| 229 | else | ||
| 230 | aper_size = 512 * 1024 * 1024; | ||
| 231 | |||
| 228 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; | 232 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; |
| 229 | obj_size += 8; /* ctxdma header */ | 233 | obj_size += 8; /* ctxdma header */ |
| 230 | } else { | 234 | } else { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index ed7757f14083..049f755567e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 354 | engine->graph.destroy_context = nv50_graph_destroy_context; | 354 | engine->graph.destroy_context = nv50_graph_destroy_context; |
| 355 | engine->graph.load_context = nv50_graph_load_context; | 355 | engine->graph.load_context = nv50_graph_load_context; |
| 356 | engine->graph.unload_context = nv50_graph_unload_context; | 356 | engine->graph.unload_context = nv50_graph_unload_context; |
| 357 | if (dev_priv->chipset != 0x86) | ||
| 358 | engine->graph.tlb_flush = nv50_graph_tlb_flush; | ||
| 359 | else { | ||
| 360 | /* from what i can see nvidia do this on every | ||
| 361 | * pre-NVA3 board except NVAC, but, we've only | ||
| 362 | * ever seen problems on NV86 | ||
| 363 | */ | ||
| 364 | engine->graph.tlb_flush = nv86_graph_tlb_flush; | ||
| 365 | } | ||
| 357 | engine->fifo.channels = 128; | 366 | engine->fifo.channels = 128; |
| 358 | engine->fifo.init = nv50_fifo_init; | 367 | engine->fifo.init = nv50_fifo_init; |
| 359 | engine->fifo.takedown = nv50_fifo_takedown; | 368 | engine->fifo.takedown = nv50_fifo_takedown; |
| @@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 365 | engine->fifo.destroy_context = nv50_fifo_destroy_context; | 374 | engine->fifo.destroy_context = nv50_fifo_destroy_context; |
| 366 | engine->fifo.load_context = nv50_fifo_load_context; | 375 | engine->fifo.load_context = nv50_fifo_load_context; |
| 367 | engine->fifo.unload_context = nv50_fifo_unload_context; | 376 | engine->fifo.unload_context = nv50_fifo_unload_context; |
| 377 | engine->fifo.tlb_flush = nv50_fifo_tlb_flush; | ||
| 368 | engine->display.early_init = nv50_display_early_init; | 378 | engine->display.early_init = nv50_display_early_init; |
| 369 | engine->display.late_takedown = nv50_display_late_takedown; | 379 | engine->display.late_takedown = nv50_display_late_takedown; |
| 370 | engine->display.create = nv50_display_create; | 380 | engine->display.create = nv50_display_create; |
| @@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
| 1041 | case NOUVEAU_GETPARAM_PTIMER_TIME: | 1051 | case NOUVEAU_GETPARAM_PTIMER_TIME: |
| 1042 | getparam->value = dev_priv->engine.timer.read(dev); | 1052 | getparam->value = dev_priv->engine.timer.read(dev); |
| 1043 | break; | 1053 | break; |
| 1054 | case NOUVEAU_GETPARAM_HAS_BO_USAGE: | ||
| 1055 | getparam->value = 1; | ||
| 1056 | break; | ||
| 1044 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 1057 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
| 1045 | /* NV40 and NV50 versions are quite different, but register | 1058 | /* NV40 and NV50 versions are quite different, but register |
| 1046 | * address is the same. User is supposed to know the card | 1059 | * address is the same. User is supposed to know the card |
| @@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
| 1051 | } | 1064 | } |
| 1052 | /* FALLTHRU */ | 1065 | /* FALLTHRU */ |
| 1053 | default: | 1066 | default: |
| 1054 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); | 1067 | NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); |
| 1055 | return -EINVAL; | 1068 | return -EINVAL; |
| 1056 | } | 1069 | } |
| 1057 | 1070 | ||
| @@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, | |||
| 1066 | 1079 | ||
| 1067 | switch (setparam->param) { | 1080 | switch (setparam->param) { |
| 1068 | default: | 1081 | default: |
| 1069 | NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); | 1082 | NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); |
| 1070 | return -EINVAL; | 1083 | return -EINVAL; |
| 1071 | } | 1084 | } |
| 1072 | 1085 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 16bbbf1eff63..7ecc4adc1e45 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c | |||
| @@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev) | |||
| 191 | int offset = sensor->offset_mult / sensor->offset_div; | 191 | int offset = sensor->offset_mult / sensor->offset_div; |
| 192 | int core_temp; | 192 | int core_temp; |
| 193 | 193 | ||
| 194 | if (dev_priv->chipset >= 0x50) { | 194 | if (dev_priv->card_type >= NV_50) { |
| 195 | core_temp = nv_rd32(dev, 0x20008); | 195 | core_temp = nv_rd32(dev, 0x20008); |
| 196 | } else { | 196 | } else { |
| 197 | core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; | 197 | core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index c71abc2a34d5..40e180741629 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
| @@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 158 | { | 158 | { |
| 159 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 159 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 160 | struct drm_device *dev = crtc->dev; | 160 | struct drm_device *dev = crtc->dev; |
| 161 | struct drm_connector *connector; | ||
| 162 | unsigned char seq1 = 0, crtc17 = 0; | 161 | unsigned char seq1 = 0, crtc17 = 0; |
| 163 | unsigned char crtc1A; | 162 | unsigned char crtc1A; |
| 164 | 163 | ||
| @@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 213 | NVVgaSeqReset(dev, nv_crtc->index, false); | 212 | NVVgaSeqReset(dev, nv_crtc->index, false); |
| 214 | 213 | ||
| 215 | NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); | 214 | NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); |
| 216 | |||
| 217 | /* Update connector polling modes */ | ||
| 218 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
| 219 | nouveau_connector_set_polling(connector); | ||
| 220 | } | 215 | } |
| 221 | 216 | ||
| 222 | static bool | 217 | static bool |
| @@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 831 | /* Update the framebuffer location. */ | 826 | /* Update the framebuffer location. */ |
| 832 | regp->fb_start = nv_crtc->fb.offset & ~3; | 827 | regp->fb_start = nv_crtc->fb.offset & ~3; |
| 833 | regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); | 828 | regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); |
| 834 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); | 829 | nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); |
| 835 | 830 | ||
| 836 | /* Update the arbitration parameters. */ | 831 | /* Update the arbitration parameters. */ |
| 837 | nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, | 832 | nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c936403b26e2..ef23550407b5 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
| @@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, | |||
| 185 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 185 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
| 186 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | 186 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); |
| 187 | 187 | ||
| 188 | /* For internal panels and gpu scaling on DVI we need the native mode */ | 188 | if (!nv_connector->native_mode || |
| 189 | if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { | 189 | nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || |
| 190 | if (!nv_connector->native_mode) | 190 | mode->hdisplay > nv_connector->native_mode->hdisplay || |
| 191 | return false; | 191 | mode->vdisplay > nv_connector->native_mode->vdisplay) { |
| 192 | nv_encoder->mode = *adjusted_mode; | ||
| 193 | |||
| 194 | } else { | ||
| 192 | nv_encoder->mode = *nv_connector->native_mode; | 195 | nv_encoder->mode = *nv_connector->native_mode; |
| 193 | adjusted_mode->clock = nv_connector->native_mode->clock; | 196 | adjusted_mode->clock = nv_connector->native_mode->clock; |
| 194 | } else { | ||
| 195 | nv_encoder->mode = *adjusted_mode; | ||
| 196 | } | 197 | } |
| 197 | 198 | ||
| 198 | return true; | 199 | return true; |
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 6a6eb697d38e..eb1c70dd82ed 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c | |||
| @@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state) | |||
| 76 | reg += 4; | 76 | reg += 4; |
| 77 | 77 | ||
| 78 | nouveau_hw_setpll(dev, reg, &state->calc); | 78 | nouveau_hw_setpll(dev, reg, &state->calc); |
| 79 | |||
| 80 | if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { | ||
| 81 | if (dev_priv->card_type == NV_20) | ||
| 82 | nv_mask(dev, 0x1002c4, 0, 1 << 20); | ||
| 83 | |||
| 84 | /* Reset the DLLs */ | ||
| 85 | nv_mask(dev, 0x1002c0, 0, 1 << 8); | ||
| 86 | } | ||
| 87 | |||
| 79 | kfree(state); | 88 | kfree(state); |
| 80 | } | 89 | } |
| 81 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c index 2cdc2bfe7179..de81151648f8 100644 --- a/drivers/gpu/drm/nouveau/nv50_calc.c +++ b/drivers/gpu/drm/nouveau/nv50_calc.c | |||
| @@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, | |||
| 51 | int *N, int *fN, int *M, int *P) | 51 | int *N, int *fN, int *M, int *P) |
| 52 | { | 52 | { |
| 53 | fixed20_12 fb_div, a, b; | 53 | fixed20_12 fb_div, a, b; |
| 54 | u32 refclk = pll->refclk / 10; | ||
| 55 | u32 max_vco_freq = pll->vco1.maxfreq / 10; | ||
| 56 | u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; | ||
| 57 | clk /= 10; | ||
| 54 | 58 | ||
| 55 | *P = pll->vco1.maxfreq / clk; | 59 | *P = max_vco_freq / clk; |
| 56 | if (*P > pll->max_p) | 60 | if (*P > pll->max_p) |
| 57 | *P = pll->max_p; | 61 | *P = pll->max_p; |
| 58 | if (*P < pll->min_p) | 62 | if (*P < pll->min_p) |
| 59 | *P = pll->min_p; | 63 | *P = pll->min_p; |
| 60 | 64 | ||
| 61 | /* *M = ceil(refclk / pll->vco.max_inputfreq); */ | 65 | /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ |
| 62 | a.full = dfixed_const(pll->refclk); | 66 | a.full = dfixed_const(refclk + max_vco_inputfreq); |
| 63 | b.full = dfixed_const(pll->vco1.max_inputfreq); | 67 | b.full = dfixed_const(max_vco_inputfreq); |
| 64 | a.full = dfixed_div(a, b); | 68 | a.full = dfixed_div(a, b); |
| 65 | a.full = dfixed_ceil(a); | 69 | a.full = dfixed_floor(a); |
| 66 | *M = dfixed_trunc(a); | 70 | *M = dfixed_trunc(a); |
| 67 | 71 | ||
| 68 | /* fb_div = (vco * *M) / refclk; */ | 72 | /* fb_div = (vco * *M) / refclk; */ |
| 69 | fb_div.full = dfixed_const(clk * *P); | 73 | fb_div.full = dfixed_const(clk * *P); |
| 70 | fb_div.full = dfixed_mul(fb_div, a); | 74 | fb_div.full = dfixed_mul(fb_div, a); |
| 71 | a.full = dfixed_const(pll->refclk); | 75 | a.full = dfixed_const(refclk); |
| 72 | fb_div.full = dfixed_div(fb_div, a); | 76 | fb_div.full = dfixed_div(fb_div, a); |
| 73 | 77 | ||
| 74 | /* *N = floor(fb_div); */ | 78 | /* *N = floor(fb_div); */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 16380d52cd88..56476d0c6de8 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
| @@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; | 548 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; |
| 549 | nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; | 549 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); |
| 550 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | 550 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; |
| 551 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | 551 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { |
| 552 | ret = RING_SPACE(evo, 2); | 552 | ret = RING_SPACE(evo, 2); |
| @@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 578 | fb->nvbo->tile_mode); | 578 | fb->nvbo->tile_mode); |
| 579 | } | 579 | } |
| 580 | if (dev_priv->chipset == 0x50) | 580 | if (dev_priv->chipset == 0x50) |
| 581 | OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); | 581 | OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); |
| 582 | else | 582 | else |
| 583 | OUT_RING(evo, format); | 583 | OUT_RING(evo, format); |
| 584 | 584 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 55c9663ef2bf..f624c611ddea 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) | |||
| 1032 | struct drm_connector *connector; | 1032 | struct drm_connector *connector; |
| 1033 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | 1033 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; |
| 1034 | uint32_t unplug_mask, plug_mask, change_mask; | 1034 | uint32_t unplug_mask, plug_mask, change_mask; |
| 1035 | uint32_t hpd0, hpd1 = 0; | 1035 | uint32_t hpd0, hpd1; |
| 1036 | 1036 | ||
| 1037 | hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); | 1037 | spin_lock_irq(&dev_priv->hpd_state.lock); |
| 1038 | hpd0 = dev_priv->hpd_state.hpd0_bits; | ||
| 1039 | dev_priv->hpd_state.hpd0_bits = 0; | ||
| 1040 | hpd1 = dev_priv->hpd_state.hpd1_bits; | ||
| 1041 | dev_priv->hpd_state.hpd1_bits = 0; | ||
| 1042 | spin_unlock_irq(&dev_priv->hpd_state.lock); | ||
| 1043 | |||
| 1044 | hpd0 &= nv_rd32(dev, 0xe050); | ||
| 1038 | if (dev_priv->chipset >= 0x90) | 1045 | if (dev_priv->chipset >= 0x90) |
| 1039 | hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); | 1046 | hpd1 &= nv_rd32(dev, 0xe070); |
| 1040 | 1047 | ||
| 1041 | plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); | 1048 | plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); |
| 1042 | unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); | 1049 | unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); |
| @@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) | |||
| 1078 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); | 1085 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); |
| 1079 | } | 1086 | } |
| 1080 | 1087 | ||
| 1081 | nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); | ||
| 1082 | if (dev_priv->chipset >= 0x90) | ||
| 1083 | nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); | ||
| 1084 | |||
| 1085 | drm_helper_hpd_irq_event(dev); | 1088 | drm_helper_hpd_irq_event(dev); |
| 1086 | } | 1089 | } |
| 1087 | 1090 | ||
| @@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
| 1092 | uint32_t delayed = 0; | 1095 | uint32_t delayed = 0; |
| 1093 | 1096 | ||
| 1094 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { | 1097 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { |
| 1095 | if (!work_pending(&dev_priv->hpd_work)) | 1098 | uint32_t hpd0_bits, hpd1_bits = 0; |
| 1096 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | 1099 | |
| 1100 | hpd0_bits = nv_rd32(dev, 0xe054); | ||
| 1101 | nv_wr32(dev, 0xe054, hpd0_bits); | ||
| 1102 | |||
| 1103 | if (dev_priv->chipset >= 0x90) { | ||
| 1104 | hpd1_bits = nv_rd32(dev, 0xe074); | ||
| 1105 | nv_wr32(dev, 0xe074, hpd1_bits); | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | spin_lock(&dev_priv->hpd_state.lock); | ||
| 1109 | dev_priv->hpd_state.hpd0_bits |= hpd0_bits; | ||
| 1110 | dev_priv->hpd_state.hpd1_bits |= hpd1_bits; | ||
| 1111 | spin_unlock(&dev_priv->hpd_state.lock); | ||
| 1112 | |||
| 1113 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | ||
| 1097 | } | 1114 | } |
| 1098 | 1115 | ||
| 1099 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { | 1116 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index a46a961102f3..1da65bd60c10 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
| @@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
| 464 | return 0; | 464 | return 0; |
| 465 | } | 465 | } |
| 466 | 466 | ||
| 467 | void | ||
| 468 | nv50_fifo_tlb_flush(struct drm_device *dev) | ||
| 469 | { | ||
| 470 | nv50_vm_flush(dev, 5); | ||
| 471 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index cbf5ae2f67d4..8b669d0af610 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { | |||
| 402 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ | 402 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ |
| 403 | {} | 403 | {} |
| 404 | }; | 404 | }; |
| 405 | |||
| 406 | void | ||
| 407 | nv50_graph_tlb_flush(struct drm_device *dev) | ||
| 408 | { | ||
| 409 | nv50_vm_flush(dev, 0); | ||
| 410 | } | ||
| 411 | |||
| 412 | void | ||
| 413 | nv86_graph_tlb_flush(struct drm_device *dev) | ||
| 414 | { | ||
| 415 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 416 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
| 417 | bool idle, timeout = false; | ||
| 418 | unsigned long flags; | ||
| 419 | u64 start; | ||
| 420 | u32 tmp; | ||
| 421 | |||
| 422 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
| 423 | nv_mask(dev, 0x400500, 0x00000001, 0x00000000); | ||
| 424 | |||
| 425 | start = ptimer->read(dev); | ||
| 426 | do { | ||
| 427 | idle = true; | ||
| 428 | |||
| 429 | for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { | ||
| 430 | if ((tmp & 7) == 1) | ||
| 431 | idle = false; | ||
| 432 | } | ||
| 433 | |||
| 434 | for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { | ||
| 435 | if ((tmp & 7) == 1) | ||
| 436 | idle = false; | ||
| 437 | } | ||
| 438 | |||
| 439 | for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { | ||
| 440 | if ((tmp & 7) == 1) | ||
| 441 | idle = false; | ||
| 442 | } | ||
| 443 | } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); | ||
| 444 | |||
| 445 | if (timeout) { | ||
| 446 | NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " | ||
| 447 | "0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
| 448 | nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), | ||
| 449 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); | ||
| 450 | } | ||
| 451 | |||
| 452 | nv50_vm_flush(dev, 0); | ||
| 453 | |||
| 454 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); | ||
| 455 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
| 456 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index a53fc974332b..b773229b7647 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
| 402 | } | 402 | } |
| 403 | dev_priv->engine.instmem.flush(dev); | 403 | dev_priv->engine.instmem.flush(dev); |
| 404 | 404 | ||
| 405 | nv50_vm_flush(dev, 4); | ||
| 406 | nv50_vm_flush(dev, 6); | 405 | nv50_vm_flush(dev, 6); |
| 407 | 406 | ||
| 408 | gpuobj->im_bound = 1; | 407 | gpuobj->im_bound = 1; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 488c36c8f5e6..4dc5b4714c5a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -1650,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1650 | } | 1650 | } |
| 1651 | } | 1651 | } |
| 1652 | 1652 | ||
| 1653 | rdev->config.evergreen.tile_config = gb_addr_config; | 1653 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
| 1654 | * not have bank info, so create a custom tiling dword. | ||
| 1655 | * bits 3:0 num_pipes | ||
| 1656 | * bits 7:4 num_banks | ||
| 1657 | * bits 11:8 group_size | ||
| 1658 | * bits 15:12 row_size | ||
| 1659 | */ | ||
| 1660 | rdev->config.evergreen.tile_config = 0; | ||
| 1661 | switch (rdev->config.evergreen.max_tile_pipes) { | ||
| 1662 | case 1: | ||
| 1663 | default: | ||
| 1664 | rdev->config.evergreen.tile_config |= (0 << 0); | ||
| 1665 | break; | ||
| 1666 | case 2: | ||
| 1667 | rdev->config.evergreen.tile_config |= (1 << 0); | ||
| 1668 | break; | ||
| 1669 | case 4: | ||
| 1670 | rdev->config.evergreen.tile_config |= (2 << 0); | ||
| 1671 | break; | ||
| 1672 | case 8: | ||
| 1673 | rdev->config.evergreen.tile_config |= (3 << 0); | ||
| 1674 | break; | ||
| 1675 | } | ||
| 1676 | rdev->config.evergreen.tile_config |= | ||
| 1677 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | ||
| 1678 | rdev->config.evergreen.tile_config |= | ||
| 1679 | ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; | ||
| 1680 | rdev->config.evergreen.tile_config |= | ||
| 1681 | ((gb_addr_config & 0x30000000) >> 28) << 12; | ||
| 1682 | |||
| 1654 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 1683 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
| 1655 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 1684 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
| 1656 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1685 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index ac3b6dde23db..e0e590110dd4 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
| @@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
| 459 | obj_size += evergreen_ps_size * 4; | 459 | obj_size += evergreen_ps_size * 4; |
| 460 | obj_size = ALIGN(obj_size, 256); | 460 | obj_size = ALIGN(obj_size, 256); |
| 461 | 461 | ||
| 462 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | 462 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
| 463 | &rdev->r600_blit.shader_obj); | 463 | &rdev->r600_blit.shader_obj); |
| 464 | if (r) { | 464 | if (r) { |
| 465 | DRM_ERROR("evergreen failed to allocate shader\n"); | 465 | DRM_ERROR("evergreen failed to allocate shader\n"); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0f806cc7dc75..a3552594ccc4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2718,7 +2718,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) | |||
| 2718 | /* Allocate ring buffer */ | 2718 | /* Allocate ring buffer */ |
| 2719 | if (rdev->ih.ring_obj == NULL) { | 2719 | if (rdev->ih.ring_obj == NULL) { |
| 2720 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2720 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
| 2721 | true, | 2721 | PAGE_SIZE, true, |
| 2722 | RADEON_GEM_DOMAIN_GTT, | 2722 | RADEON_GEM_DOMAIN_GTT, |
| 2723 | &rdev->ih.ring_obj); | 2723 | &rdev->ih.ring_obj); |
| 2724 | if (r) { | 2724 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 8362974ef41a..86e5aa07f0db 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
| 501 | obj_size += r6xx_ps_size * 4; | 501 | obj_size += r6xx_ps_size * 4; |
| 502 | obj_size = ALIGN(obj_size, 256); | 502 | obj_size = ALIGN(obj_size, 256); |
| 503 | 503 | ||
| 504 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | 504 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
| 505 | &rdev->r600_blit.shader_obj); | 505 | &rdev->r600_blit.shader_obj); |
| 506 | if (r) { | 506 | if (r) { |
| 507 | DRM_ERROR("r600 failed to allocate shader\n"); | 507 | DRM_ERROR("r600 failed to allocate shader\n"); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 37cc2aa9f923..9bebac1ec006 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -50,6 +50,7 @@ struct r600_cs_track { | |||
| 50 | u32 nsamples; | 50 | u32 nsamples; |
| 51 | u32 cb_color_base_last[8]; | 51 | u32 cb_color_base_last[8]; |
| 52 | struct radeon_bo *cb_color_bo[8]; | 52 | struct radeon_bo *cb_color_bo[8]; |
| 53 | u64 cb_color_bo_mc[8]; | ||
| 53 | u32 cb_color_bo_offset[8]; | 54 | u32 cb_color_bo_offset[8]; |
| 54 | struct radeon_bo *cb_color_frag_bo[8]; | 55 | struct radeon_bo *cb_color_frag_bo[8]; |
| 55 | struct radeon_bo *cb_color_tile_bo[8]; | 56 | struct radeon_bo *cb_color_tile_bo[8]; |
| @@ -67,6 +68,7 @@ struct r600_cs_track { | |||
| 67 | u32 db_depth_size; | 68 | u32 db_depth_size; |
| 68 | u32 db_offset; | 69 | u32 db_offset; |
| 69 | struct radeon_bo *db_bo; | 70 | struct radeon_bo *db_bo; |
| 71 | u64 db_bo_mc; | ||
| 70 | }; | 72 | }; |
| 71 | 73 | ||
| 72 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | 74 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) |
| @@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format) | |||
| 140 | return 0; | 142 | return 0; |
| 141 | } | 143 | } |
| 142 | 144 | ||
| 145 | struct array_mode_checker { | ||
| 146 | int array_mode; | ||
| 147 | u32 group_size; | ||
| 148 | u32 nbanks; | ||
| 149 | u32 npipes; | ||
| 150 | u32 nsamples; | ||
| 151 | u32 bpe; | ||
| 152 | }; | ||
| 153 | |||
| 154 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ | ||
| 155 | static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, | ||
| 156 | u32 *pitch_align, | ||
| 157 | u32 *height_align, | ||
| 158 | u32 *depth_align, | ||
| 159 | u64 *base_align) | ||
| 160 | { | ||
| 161 | u32 tile_width = 8; | ||
| 162 | u32 tile_height = 8; | ||
| 163 | u32 macro_tile_width = values->nbanks; | ||
| 164 | u32 macro_tile_height = values->npipes; | ||
| 165 | u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; | ||
| 166 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; | ||
| 167 | |||
| 168 | switch (values->array_mode) { | ||
| 169 | case ARRAY_LINEAR_GENERAL: | ||
| 170 | /* technically tile_width/_height for pitch/height */ | ||
| 171 | *pitch_align = 1; /* tile_width */ | ||
| 172 | *height_align = 1; /* tile_height */ | ||
| 173 | *depth_align = 1; | ||
| 174 | *base_align = 1; | ||
| 175 | break; | ||
| 176 | case ARRAY_LINEAR_ALIGNED: | ||
| 177 | *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); | ||
| 178 | *height_align = tile_height; | ||
| 179 | *depth_align = 1; | ||
| 180 | *base_align = values->group_size; | ||
| 181 | break; | ||
| 182 | case ARRAY_1D_TILED_THIN1: | ||
| 183 | *pitch_align = max((u32)tile_width, | ||
| 184 | (u32)(values->group_size / | ||
| 185 | (tile_height * values->bpe * values->nsamples))); | ||
| 186 | *height_align = tile_height; | ||
| 187 | *depth_align = 1; | ||
| 188 | *base_align = values->group_size; | ||
| 189 | break; | ||
| 190 | case ARRAY_2D_TILED_THIN1: | ||
| 191 | *pitch_align = max((u32)macro_tile_width, | ||
| 192 | (u32)(((values->group_size / tile_height) / | ||
| 193 | (values->bpe * values->nsamples)) * | ||
| 194 | values->nbanks)) * tile_width; | ||
| 195 | *height_align = macro_tile_height * tile_height; | ||
| 196 | *depth_align = 1; | ||
| 197 | *base_align = max(macro_tile_bytes, | ||
| 198 | (*pitch_align) * values->bpe * (*height_align) * values->nsamples); | ||
| 199 | break; | ||
| 200 | default: | ||
| 201 | return -EINVAL; | ||
| 202 | } | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 143 | static void r600_cs_track_init(struct r600_cs_track *track) | 207 | static void r600_cs_track_init(struct r600_cs_track *track) |
| 144 | { | 208 | { |
| 145 | int i; | 209 | int i; |
| @@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
| 153 | track->cb_color_info[i] = 0; | 217 | track->cb_color_info[i] = 0; |
| 154 | track->cb_color_bo[i] = NULL; | 218 | track->cb_color_bo[i] = NULL; |
| 155 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 219 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
| 220 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | ||
| 156 | } | 221 | } |
| 157 | track->cb_target_mask = 0xFFFFFFFF; | 222 | track->cb_target_mask = 0xFFFFFFFF; |
| 158 | track->cb_shader_mask = 0xFFFFFFFF; | 223 | track->cb_shader_mask = 0xFFFFFFFF; |
| 159 | track->db_bo = NULL; | 224 | track->db_bo = NULL; |
| 225 | track->db_bo_mc = 0xFFFFFFFF; | ||
| 160 | /* assume the biggest format and that htile is enabled */ | 226 | /* assume the biggest format and that htile is enabled */ |
| 161 | track->db_depth_info = 7 | (1 << 25); | 227 | track->db_depth_info = 7 | (1 << 25); |
| 162 | track->db_depth_view = 0xFFFFC000; | 228 | track->db_depth_view = 0xFFFFC000; |
| @@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
| 168 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 234 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
| 169 | { | 235 | { |
| 170 | struct r600_cs_track *track = p->track; | 236 | struct r600_cs_track *track = p->track; |
| 171 | u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; | 237 | u32 bpe = 0, slice_tile_max, size, tmp; |
| 238 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
| 239 | u64 base_offset, base_align; | ||
| 240 | struct array_mode_checker array_check; | ||
| 172 | volatile u32 *ib = p->ib->ptr; | 241 | volatile u32 *ib = p->ib->ptr; |
| 173 | unsigned array_mode; | 242 | unsigned array_mode; |
| 174 | 243 | ||
| @@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
| 183 | i, track->cb_color_info[i]); | 252 | i, track->cb_color_info[i]); |
| 184 | return -EINVAL; | 253 | return -EINVAL; |
| 185 | } | 254 | } |
| 186 | /* pitch is the number of 8x8 tiles per row */ | 255 | /* pitch in pixels */ |
| 187 | pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; | 256 | pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; |
| 188 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; | 257 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; |
| 189 | slice_tile_max *= 64; | 258 | slice_tile_max *= 64; |
| 190 | height = slice_tile_max / (pitch * 8); | 259 | height = slice_tile_max / pitch; |
| 191 | if (height > 8192) | 260 | if (height > 8192) |
| 192 | height = 8192; | 261 | height = 8192; |
| 193 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); | 262 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); |
| 263 | |||
| 264 | base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; | ||
| 265 | array_check.array_mode = array_mode; | ||
| 266 | array_check.group_size = track->group_size; | ||
| 267 | array_check.nbanks = track->nbanks; | ||
| 268 | array_check.npipes = track->npipes; | ||
| 269 | array_check.nsamples = track->nsamples; | ||
| 270 | array_check.bpe = bpe; | ||
| 271 | if (r600_get_array_mode_alignment(&array_check, | ||
| 272 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
| 273 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | ||
| 274 | G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, | ||
| 275 | track->cb_color_info[i]); | ||
| 276 | return -EINVAL; | ||
| 277 | } | ||
| 194 | switch (array_mode) { | 278 | switch (array_mode) { |
| 195 | case V_0280A0_ARRAY_LINEAR_GENERAL: | 279 | case V_0280A0_ARRAY_LINEAR_GENERAL: |
| 196 | /* technically height & 0x7 */ | ||
| 197 | break; | 280 | break; |
| 198 | case V_0280A0_ARRAY_LINEAR_ALIGNED: | 281 | case V_0280A0_ARRAY_LINEAR_ALIGNED: |
| 199 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | ||
| 200 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 201 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
| 202 | __func__, __LINE__, pitch); | ||
| 203 | return -EINVAL; | ||
| 204 | } | ||
| 205 | if (!IS_ALIGNED(height, 8)) { | ||
| 206 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
| 207 | __func__, __LINE__, height); | ||
| 208 | return -EINVAL; | ||
| 209 | } | ||
| 210 | break; | 282 | break; |
| 211 | case V_0280A0_ARRAY_1D_TILED_THIN1: | 283 | case V_0280A0_ARRAY_1D_TILED_THIN1: |
| 212 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; | ||
| 213 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 214 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
| 215 | __func__, __LINE__, pitch); | ||
| 216 | return -EINVAL; | ||
| 217 | } | ||
| 218 | /* avoid breaking userspace */ | 284 | /* avoid breaking userspace */ |
| 219 | if (height > 7) | 285 | if (height > 7) |
| 220 | height &= ~0x7; | 286 | height &= ~0x7; |
| 221 | if (!IS_ALIGNED(height, 8)) { | ||
| 222 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
| 223 | __func__, __LINE__, height); | ||
| 224 | return -EINVAL; | ||
| 225 | } | ||
| 226 | break; | 287 | break; |
| 227 | case V_0280A0_ARRAY_2D_TILED_THIN1: | 288 | case V_0280A0_ARRAY_2D_TILED_THIN1: |
| 228 | pitch_align = max((u32)track->nbanks, | ||
| 229 | (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; | ||
| 230 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 231 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
| 232 | __func__, __LINE__, pitch); | ||
| 233 | return -EINVAL; | ||
| 234 | } | ||
| 235 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
| 236 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
| 237 | __func__, __LINE__, height); | ||
| 238 | return -EINVAL; | ||
| 239 | } | ||
| 240 | break; | 289 | break; |
| 241 | default: | 290 | default: |
| 242 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 291 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
| @@ -244,13 +293,29 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
| 244 | track->cb_color_info[i]); | 293 | track->cb_color_info[i]); |
| 245 | return -EINVAL; | 294 | return -EINVAL; |
| 246 | } | 295 | } |
| 296 | |||
| 297 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 298 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
| 299 | __func__, __LINE__, pitch); | ||
| 300 | return -EINVAL; | ||
| 301 | } | ||
| 302 | if (!IS_ALIGNED(height, height_align)) { | ||
| 303 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
| 304 | __func__, __LINE__, height); | ||
| 305 | return -EINVAL; | ||
| 306 | } | ||
| 307 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
| 308 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
| 309 | return -EINVAL; | ||
| 310 | } | ||
| 311 | |||
| 247 | /* check offset */ | 312 | /* check offset */ |
| 248 | tmp = height * pitch * 8 * bpe; | 313 | tmp = height * pitch * bpe; |
| 249 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 314 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
| 250 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | 315 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { |
| 251 | /* the initial DDX does bad things with the CB size occasionally */ | 316 | /* the initial DDX does bad things with the CB size occasionally */ |
| 252 | /* it rounds up height too far for slice tile max but the BO is smaller */ | 317 | /* it rounds up height too far for slice tile max but the BO is smaller */ |
| 253 | tmp = (height - 7) * 8 * bpe; | 318 | tmp = (height - 7) * pitch * bpe; |
| 254 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 319 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
| 255 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 320 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); |
| 256 | return -EINVAL; | 321 | return -EINVAL; |
| @@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
| 260 | return -EINVAL; | 325 | return -EINVAL; |
| 261 | } | 326 | } |
| 262 | } | 327 | } |
| 263 | if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { | ||
| 264 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); | ||
| 265 | return -EINVAL; | ||
| 266 | } | ||
| 267 | /* limit max tile */ | 328 | /* limit max tile */ |
| 268 | tmp = (height * pitch * 8) >> 6; | 329 | tmp = (height * pitch) >> 6; |
| 269 | if (tmp < slice_tile_max) | 330 | if (tmp < slice_tile_max) |
| 270 | slice_tile_max = tmp; | 331 | slice_tile_max = tmp; |
| 271 | tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | | 332 | tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | |
| 272 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); | 333 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); |
| 273 | ib[track->cb_color_size_idx[i]] = tmp; | 334 | ib[track->cb_color_size_idx[i]] = tmp; |
| 274 | return 0; | 335 | return 0; |
| @@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
| 310 | /* Check depth buffer */ | 371 | /* Check depth buffer */ |
| 311 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 372 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
| 312 | G_028800_Z_ENABLE(track->db_depth_control)) { | 373 | G_028800_Z_ENABLE(track->db_depth_control)) { |
| 313 | u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; | 374 | u32 nviews, bpe, ntiles, size, slice_tile_max; |
| 375 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
| 376 | u64 base_offset, base_align; | ||
| 377 | struct array_mode_checker array_check; | ||
| 378 | int array_mode; | ||
| 379 | |||
| 314 | if (track->db_bo == NULL) { | 380 | if (track->db_bo == NULL) { |
| 315 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | 381 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); |
| 316 | return -EINVAL; | 382 | return -EINVAL; |
| @@ -353,41 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
| 353 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | 419 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); |
| 354 | } else { | 420 | } else { |
| 355 | size = radeon_bo_size(track->db_bo); | 421 | size = radeon_bo_size(track->db_bo); |
| 356 | pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; | 422 | /* pitch in pixels */ |
| 423 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
| 357 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 424 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
| 358 | slice_tile_max *= 64; | 425 | slice_tile_max *= 64; |
| 359 | height = slice_tile_max / (pitch * 8); | 426 | height = slice_tile_max / pitch; |
| 360 | if (height > 8192) | 427 | if (height > 8192) |
| 361 | height = 8192; | 428 | height = 8192; |
| 362 | switch (G_028010_ARRAY_MODE(track->db_depth_info)) { | 429 | base_offset = track->db_bo_mc + track->db_offset; |
| 430 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
| 431 | array_check.array_mode = array_mode; | ||
| 432 | array_check.group_size = track->group_size; | ||
| 433 | array_check.nbanks = track->nbanks; | ||
| 434 | array_check.npipes = track->npipes; | ||
| 435 | array_check.nsamples = track->nsamples; | ||
| 436 | array_check.bpe = bpe; | ||
| 437 | if (r600_get_array_mode_alignment(&array_check, | ||
| 438 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
| 439 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
| 440 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
| 441 | track->db_depth_info); | ||
| 442 | return -EINVAL; | ||
| 443 | } | ||
| 444 | switch (array_mode) { | ||
| 363 | case V_028010_ARRAY_1D_TILED_THIN1: | 445 | case V_028010_ARRAY_1D_TILED_THIN1: |
| 364 | pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); | ||
| 365 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 366 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
| 367 | __func__, __LINE__, pitch); | ||
| 368 | return -EINVAL; | ||
| 369 | } | ||
| 370 | /* don't break userspace */ | 446 | /* don't break userspace */ |
| 371 | height &= ~0x7; | 447 | height &= ~0x7; |
| 372 | if (!IS_ALIGNED(height, 8)) { | ||
| 373 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
| 374 | __func__, __LINE__, height); | ||
| 375 | return -EINVAL; | ||
| 376 | } | ||
| 377 | break; | 448 | break; |
| 378 | case V_028010_ARRAY_2D_TILED_THIN1: | 449 | case V_028010_ARRAY_2D_TILED_THIN1: |
| 379 | pitch_align = max((u32)track->nbanks, | ||
| 380 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | ||
| 381 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
| 382 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
| 383 | __func__, __LINE__, pitch); | ||
| 384 | return -EINVAL; | ||
| 385 | } | ||
| 386 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
| 387 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
| 388 | __func__, __LINE__, height); | ||
| 389 | return -EINVAL; | ||
| 390 | } | ||
| 391 | break; | 450 | break; |
| 392 | default: | 451 | default: |
| 393 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 452 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, |
| @@ -395,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
| 395 | track->db_depth_info); | 454 | track->db_depth_info); |
| 396 | return -EINVAL; | 455 | return -EINVAL; |
| 397 | } | 456 | } |
| 398 | if (!IS_ALIGNED(track->db_offset, track->group_size)) { | 457 | |
| 399 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); | 458 | if (!IS_ALIGNED(pitch, pitch_align)) { |
| 459 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
| 460 | __func__, __LINE__, pitch); | ||
| 461 | return -EINVAL; | ||
| 462 | } | ||
| 463 | if (!IS_ALIGNED(height, height_align)) { | ||
| 464 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
| 465 | __func__, __LINE__, height); | ||
| 400 | return -EINVAL; | 466 | return -EINVAL; |
| 401 | } | 467 | } |
| 468 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
| 469 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
| 470 | return -EINVAL; | ||
| 471 | } | ||
| 472 | |||
| 402 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 473 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
| 403 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 474 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
| 404 | tmp = ntiles * bpe * 64 * nviews; | 475 | tmp = ntiles * bpe * 64 * nviews; |
| 405 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 476 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
| 406 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", | 477 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", |
| 407 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 478 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
| 408 | radeon_bo_size(track->db_bo)); | 479 | radeon_bo_size(track->db_bo)); |
| 409 | return -EINVAL; | 480 | return -EINVAL; |
| @@ -954,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
| 954 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1025 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
| 955 | track->cb_color_base_last[tmp] = ib[idx]; | 1026 | track->cb_color_base_last[tmp] = ib[idx]; |
| 956 | track->cb_color_bo[tmp] = reloc->robj; | 1027 | track->cb_color_bo[tmp] = reloc->robj; |
| 1028 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | ||
| 957 | break; | 1029 | break; |
| 958 | case DB_DEPTH_BASE: | 1030 | case DB_DEPTH_BASE: |
| 959 | r = r600_cs_packet_next_reloc(p, &reloc); | 1031 | r = r600_cs_packet_next_reloc(p, &reloc); |
| @@ -965,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
| 965 | track->db_offset = radeon_get_ib_value(p, idx) << 8; | 1037 | track->db_offset = radeon_get_ib_value(p, idx) << 8; |
| 966 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1038 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
| 967 | track->db_bo = reloc->robj; | 1039 | track->db_bo = reloc->robj; |
| 1040 | track->db_bo_mc = reloc->lobj.gpu_offset; | ||
| 968 | break; | 1041 | break; |
| 969 | case DB_HTILE_DATA_BASE: | 1042 | case DB_HTILE_DATA_BASE: |
| 970 | case SQ_PGM_START_FS: | 1043 | case SQ_PGM_START_FS: |
| @@ -1086,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels | |||
| 1086 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 1159 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
| 1087 | struct radeon_bo *texture, | 1160 | struct radeon_bo *texture, |
| 1088 | struct radeon_bo *mipmap, | 1161 | struct radeon_bo *mipmap, |
| 1162 | u64 base_offset, | ||
| 1163 | u64 mip_offset, | ||
| 1089 | u32 tiling_flags) | 1164 | u32 tiling_flags) |
| 1090 | { | 1165 | { |
| 1091 | struct r600_cs_track *track = p->track; | 1166 | struct r600_cs_track *track = p->track; |
| 1092 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; | 1167 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; |
| 1093 | u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; | 1168 | u32 word0, word1, l0_size, mipmap_size; |
| 1169 | u32 height_align, pitch, pitch_align, depth_align; | ||
| 1170 | u64 base_align; | ||
| 1171 | struct array_mode_checker array_check; | ||
| 1094 | 1172 | ||
| 1095 | /* on legacy kernel we don't perform advanced check */ | 1173 | /* on legacy kernel we don't perform advanced check */ |
| 1096 | if (p->rdev == NULL) | 1174 | if (p->rdev == NULL) |
| 1097 | return 0; | 1175 | return 0; |
| 1098 | 1176 | ||
| 1177 | /* convert to bytes */ | ||
| 1178 | base_offset <<= 8; | ||
| 1179 | mip_offset <<= 8; | ||
| 1180 | |||
| 1099 | word0 = radeon_get_ib_value(p, idx + 0); | 1181 | word0 = radeon_get_ib_value(p, idx + 0); |
| 1100 | if (tiling_flags & RADEON_TILING_MACRO) | 1182 | if (tiling_flags & RADEON_TILING_MACRO) |
| 1101 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1183 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
| @@ -1128,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
| 1128 | return -EINVAL; | 1210 | return -EINVAL; |
| 1129 | } | 1211 | } |
| 1130 | 1212 | ||
| 1131 | pitch = G_038000_PITCH(word0) + 1; | 1213 | /* pitch in texels */ |
| 1132 | switch (G_038000_TILE_MODE(word0)) { | 1214 | pitch = (G_038000_PITCH(word0) + 1) * 8; |
| 1133 | case V_038000_ARRAY_LINEAR_GENERAL: | 1215 | array_check.array_mode = G_038000_TILE_MODE(word0); |
| 1134 | pitch_align = 1; | 1216 | array_check.group_size = track->group_size; |
| 1135 | /* XXX check height align */ | 1217 | array_check.nbanks = track->nbanks; |
| 1136 | break; | 1218 | array_check.npipes = track->npipes; |
| 1137 | case V_038000_ARRAY_LINEAR_ALIGNED: | 1219 | array_check.nsamples = 1; |
| 1138 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | 1220 | array_check.bpe = bpe; |
| 1139 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1221 | if (r600_get_array_mode_alignment(&array_check, |
| 1140 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1222 | &pitch_align, &height_align, &depth_align, &base_align)) { |
| 1141 | __func__, __LINE__, pitch); | 1223 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
| 1142 | return -EINVAL; | 1224 | __func__, __LINE__, G_038000_TILE_MODE(word0)); |
| 1143 | } | 1225 | return -EINVAL; |
| 1144 | /* XXX check height align */ | 1226 | } |
| 1145 | break; | 1227 | |
| 1146 | case V_038000_ARRAY_1D_TILED_THIN1: | 1228 | /* XXX check height as well... */ |
| 1147 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; | 1229 | |
| 1148 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1230 | if (!IS_ALIGNED(pitch, pitch_align)) { |
| 1149 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1231 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", |
| 1150 | __func__, __LINE__, pitch); | 1232 | __func__, __LINE__, pitch); |
| 1151 | return -EINVAL; | 1233 | return -EINVAL; |
| 1152 | } | 1234 | } |
| 1153 | /* XXX check height align */ | 1235 | if (!IS_ALIGNED(base_offset, base_align)) { |
| 1154 | break; | 1236 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", |
| 1155 | case V_038000_ARRAY_2D_TILED_THIN1: | 1237 | __func__, __LINE__, base_offset); |
| 1156 | pitch_align = max((u32)track->nbanks, | 1238 | return -EINVAL; |
| 1157 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | 1239 | } |
| 1158 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1240 | if (!IS_ALIGNED(mip_offset, base_align)) { |
| 1159 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1241 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", |
| 1160 | __func__, __LINE__, pitch); | 1242 | __func__, __LINE__, mip_offset); |
| 1161 | return -EINVAL; | ||
| 1162 | } | ||
| 1163 | /* XXX check height align */ | ||
| 1164 | break; | ||
| 1165 | default: | ||
| 1166 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
| 1167 | G_038000_TILE_MODE(word0), word0); | ||
| 1168 | return -EINVAL; | 1243 | return -EINVAL; |
| 1169 | } | 1244 | } |
| 1170 | /* XXX check offset align */ | ||
| 1171 | 1245 | ||
| 1172 | word0 = radeon_get_ib_value(p, idx + 4); | 1246 | word0 = radeon_get_ib_value(p, idx + 4); |
| 1173 | word1 = radeon_get_ib_value(p, idx + 5); | 1247 | word1 = radeon_get_ib_value(p, idx + 5); |
| @@ -1402,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
| 1402 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1476 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
| 1403 | mipmap = reloc->robj; | 1477 | mipmap = reloc->robj; |
| 1404 | r = r600_check_texture_resource(p, idx+(i*7)+1, | 1478 | r = r600_check_texture_resource(p, idx+(i*7)+1, |
| 1405 | texture, mipmap, reloc->lobj.tiling_flags); | 1479 | texture, mipmap, |
| 1480 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), | ||
| 1481 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), | ||
| 1482 | reloc->lobj.tiling_flags); | ||
| 1406 | if (r) | 1483 | if (r) |
| 1407 | return r; | 1484 | return r; |
| 1408 | ib[idx+1+(i*7)+2] += base_offset; | 1485 | ib[idx+1+(i*7)+2] += base_offset; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 966a793e225b..bff4dc4f410f 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -51,6 +51,12 @@ | |||
| 51 | #define PTE_READABLE (1 << 5) | 51 | #define PTE_READABLE (1 << 5) |
| 52 | #define PTE_WRITEABLE (1 << 6) | 52 | #define PTE_WRITEABLE (1 << 6) |
| 53 | 53 | ||
| 54 | /* tiling bits */ | ||
| 55 | #define ARRAY_LINEAR_GENERAL 0x00000000 | ||
| 56 | #define ARRAY_LINEAR_ALIGNED 0x00000001 | ||
| 57 | #define ARRAY_1D_TILED_THIN1 0x00000002 | ||
| 58 | #define ARRAY_2D_TILED_THIN1 0x00000004 | ||
| 59 | |||
| 54 | /* Registers */ | 60 | /* Registers */ |
| 55 | #define ARB_POP 0x2418 | 61 | #define ARB_POP 0x2418 |
| 56 | #define ENABLE_TC128 (1 << 30) | 62 | #define ENABLE_TC128 (1 << 30) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73f600d39ad4..3a7095743d44 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
| 1262 | (rdev->family == CHIP_RS400) || \ | 1262 | (rdev->family == CHIP_RS400) || \ |
| 1263 | (rdev->family == CHIP_RS480)) | 1263 | (rdev->family == CHIP_RS480)) |
| 1264 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) | 1264 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) |
| 1265 | #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ | ||
| 1266 | (rdev->family == CHIP_RS690) || \ | ||
| 1267 | (rdev->family == CHIP_RS740) || \ | ||
| 1268 | (rdev->family >= CHIP_R600)) | ||
| 1265 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) | 1269 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) |
| 1266 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) | 1270 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) |
| 1267 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) | 1271 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 7932dc4d6b90..c558685cc637 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
| @@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
| 41 | 41 | ||
| 42 | size = bsize; | 42 | size = bsize; |
| 43 | n = 1024; | 43 | n = 1024; |
| 44 | r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); | 44 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); |
| 45 | if (r) { | 45 | if (r) { |
| 46 | goto out_cleanup; | 46 | goto out_cleanup; |
| 47 | } | 47 | } |
| @@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
| 53 | if (r) { | 53 | if (r) { |
| 54 | goto out_cleanup; | 54 | goto out_cleanup; |
| 55 | } | 55 | } |
| 56 | r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); | 56 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); |
| 57 | if (r) { | 57 | if (r) { |
| 58 | goto out_cleanup; | 58 | goto out_cleanup; |
| 59 | } | 59 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b7ea269549c..3bddea5b5295 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
| 571 | } | 571 | } |
| 572 | 572 | ||
| 573 | if (clk_mask && data_mask) { | 573 | if (clk_mask && data_mask) { |
| 574 | /* system specific masks */ | ||
| 574 | i2c.mask_clk_mask = clk_mask; | 575 | i2c.mask_clk_mask = clk_mask; |
| 575 | i2c.mask_data_mask = data_mask; | 576 | i2c.mask_data_mask = data_mask; |
| 576 | i2c.a_clk_mask = clk_mask; | 577 | i2c.a_clk_mask = clk_mask; |
| @@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
| 579 | i2c.en_data_mask = data_mask; | 580 | i2c.en_data_mask = data_mask; |
| 580 | i2c.y_clk_mask = clk_mask; | 581 | i2c.y_clk_mask = clk_mask; |
| 581 | i2c.y_data_mask = data_mask; | 582 | i2c.y_data_mask = data_mask; |
| 583 | } else if ((ddc_line == RADEON_GPIOPAD_MASK) || | ||
| 584 | (ddc_line == RADEON_MDGPIO_MASK)) { | ||
| 585 | /* default gpiopad masks */ | ||
| 586 | i2c.mask_clk_mask = (0x20 << 8); | ||
| 587 | i2c.mask_data_mask = 0x80; | ||
| 588 | i2c.a_clk_mask = (0x20 << 8); | ||
| 589 | i2c.a_data_mask = 0x80; | ||
| 590 | i2c.en_clk_mask = (0x20 << 8); | ||
| 591 | i2c.en_data_mask = 0x80; | ||
| 592 | i2c.y_clk_mask = (0x20 << 8); | ||
| 593 | i2c.y_data_mask = 0x80; | ||
| 582 | } else { | 594 | } else { |
| 595 | /* default masks for ddc pads */ | ||
| 583 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 596 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; |
| 584 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 597 | i2c.mask_data_mask = RADEON_GPIO_EN_0; |
| 585 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 598 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index fe6c74780f18..3bef9f6d66fd 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) | |||
| 1008 | static int radeon_dp_get_modes(struct drm_connector *connector) | 1008 | static int radeon_dp_get_modes(struct drm_connector *connector) |
| 1009 | { | 1009 | { |
| 1010 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 1010 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 1011 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
| 1011 | int ret; | 1012 | int ret; |
| 1012 | 1013 | ||
| 1014 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 1015 | if (!radeon_dig_connector->edp_on) | ||
| 1016 | atombios_set_edp_panel_power(connector, | ||
| 1017 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1018 | } | ||
| 1013 | ret = radeon_ddc_get_modes(radeon_connector); | 1019 | ret = radeon_ddc_get_modes(radeon_connector); |
| 1020 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 1021 | if (!radeon_dig_connector->edp_on) | ||
| 1022 | atombios_set_edp_panel_power(connector, | ||
| 1023 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1024 | } | ||
| 1025 | |||
| 1014 | return ret; | 1026 | return ret; |
| 1015 | } | 1027 | } |
| 1016 | 1028 | ||
| @@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
| 1029 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 1041 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
| 1030 | /* eDP is always DP */ | 1042 | /* eDP is always DP */ |
| 1031 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | 1043 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
| 1044 | if (!radeon_dig_connector->edp_on) | ||
| 1045 | atombios_set_edp_panel_power(connector, | ||
| 1046 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1032 | if (radeon_dp_getdpcd(radeon_connector)) | 1047 | if (radeon_dp_getdpcd(radeon_connector)) |
| 1033 | ret = connector_status_connected; | 1048 | ret = connector_status_connected; |
| 1049 | if (!radeon_dig_connector->edp_on) | ||
| 1050 | atombios_set_edp_panel_power(connector, | ||
| 1051 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1034 | } else { | 1052 | } else { |
| 1035 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); | 1053 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
| 1036 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 1054 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8adfedfe547f..d8ac1849180d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
| 180 | int r; | 180 | int r; |
| 181 | 181 | ||
| 182 | if (rdev->wb.wb_obj == NULL) { | 182 | if (rdev->wb.wb_obj == NULL) { |
| 183 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | 183 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, |
| 184 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | 184 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
| 185 | if (r) { | 185 | if (r) { |
| 186 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | 186 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index f678257c42e6..041943df966b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | |||
| 176 | return false; | 176 | return false; |
| 177 | } | 177 | } |
| 178 | } | 178 | } |
| 179 | |||
| 179 | void | 180 | void |
| 180 | radeon_link_encoder_connector(struct drm_device *dev) | 181 | radeon_link_encoder_connector(struct drm_device *dev) |
| 181 | { | 182 | { |
| @@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
| 228 | return NULL; | 229 | return NULL; |
| 229 | } | 230 | } |
| 230 | 231 | ||
| 232 | struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) | ||
| 233 | { | ||
| 234 | struct drm_device *dev = encoder->dev; | ||
| 235 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 236 | struct drm_encoder *other_encoder; | ||
| 237 | struct radeon_encoder *other_radeon_encoder; | ||
| 238 | |||
| 239 | if (radeon_encoder->is_ext_encoder) | ||
| 240 | return NULL; | ||
| 241 | |||
| 242 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
| 243 | if (other_encoder == encoder) | ||
| 244 | continue; | ||
| 245 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
| 246 | if (other_radeon_encoder->is_ext_encoder && | ||
| 247 | (radeon_encoder->devices & other_radeon_encoder->devices)) | ||
| 248 | return other_encoder; | ||
| 249 | } | ||
| 250 | return NULL; | ||
| 251 | } | ||
| 252 | |||
| 231 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 253 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
| 232 | struct drm_display_mode *adjusted_mode) | 254 | struct drm_display_mode *adjusted_mode) |
| 233 | { | 255 | { |
| @@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
| 426 | 448 | ||
| 427 | } | 449 | } |
| 428 | 450 | ||
| 429 | void | 451 | union dvo_encoder_control { |
| 430 | atombios_external_tmds_setup(struct drm_encoder *encoder, int action) | 452 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; |
| 431 | { | 453 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; |
| 432 | struct drm_device *dev = encoder->dev; | 454 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; |
| 433 | struct radeon_device *rdev = dev->dev_private; | 455 | }; |
| 434 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 435 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; | ||
| 436 | int index = 0; | ||
| 437 | |||
| 438 | memset(&args, 0, sizeof(args)); | ||
| 439 | |||
| 440 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
| 441 | |||
| 442 | args.sXTmdsEncoder.ucEnable = action; | ||
| 443 | |||
| 444 | if (radeon_encoder->pixel_clock > 165000) | ||
| 445 | args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; | ||
| 446 | |||
| 447 | /*if (pScrn->rgbBits == 8)*/ | ||
| 448 | args.sXTmdsEncoder.ucMisc |= (1 << 1); | ||
| 449 | |||
| 450 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 451 | |||
| 452 | } | ||
| 453 | 456 | ||
| 454 | static void | 457 | void |
| 455 | atombios_ddia_setup(struct drm_encoder *encoder, int action) | 458 | atombios_dvo_setup(struct drm_encoder *encoder, int action) |
| 456 | { | 459 | { |
| 457 | struct drm_device *dev = encoder->dev; | 460 | struct drm_device *dev = encoder->dev; |
| 458 | struct radeon_device *rdev = dev->dev_private; | 461 | struct radeon_device *rdev = dev->dev_private; |
| 459 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 462 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 460 | DVO_ENCODER_CONTROL_PS_ALLOCATION args; | 463 | union dvo_encoder_control args; |
| 461 | int index = 0; | 464 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); |
| 462 | 465 | ||
| 463 | memset(&args, 0, sizeof(args)); | 466 | memset(&args, 0, sizeof(args)); |
| 464 | 467 | ||
| 465 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | 468 | if (ASIC_IS_DCE3(rdev)) { |
| 469 | /* DCE3+ */ | ||
| 470 | args.dvo_v3.ucAction = action; | ||
| 471 | args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
| 472 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
| 473 | } else if (ASIC_IS_DCE2(rdev)) { | ||
| 474 | /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ | ||
| 475 | args.dvo.sDVOEncoder.ucAction = action; | ||
| 476 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
| 477 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
| 478 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
| 479 | |||
| 480 | if (radeon_encoder->pixel_clock > 165000) | ||
| 481 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
| 482 | } else { | ||
| 483 | /* R4xx, R5xx */ | ||
| 484 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
| 466 | 485 | ||
| 467 | args.sDVOEncoder.ucAction = action; | 486 | if (radeon_encoder->pixel_clock > 165000) |
| 468 | args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 487 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| 469 | 488 | ||
| 470 | if (radeon_encoder->pixel_clock > 165000) | 489 | /*if (pScrn->rgbBits == 8)*/ |
| 471 | args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; | 490 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; |
| 491 | } | ||
| 472 | 492 | ||
| 473 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 493 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 474 | |||
| 475 | } | 494 | } |
| 476 | 495 | ||
| 477 | union lvds_encoder_control { | 496 | union lvds_encoder_control { |
| @@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 532 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | 551 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
| 533 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 552 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| 534 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | 553 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
| 535 | args.v1.ucMisc |= (1 << 1); | 554 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
| 536 | } else { | 555 | } else { |
| 537 | if (dig->linkb) | 556 | if (dig->linkb) |
| 538 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 557 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
| 539 | if (radeon_encoder->pixel_clock > 165000) | 558 | if (radeon_encoder->pixel_clock > 165000) |
| 540 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 559 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| 541 | /*if (pScrn->rgbBits == 8) */ | 560 | /*if (pScrn->rgbBits == 8) */ |
| 542 | args.v1.ucMisc |= (1 << 1); | 561 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
| 543 | } | 562 | } |
| 544 | break; | 563 | break; |
| 545 | case 2: | 564 | case 2: |
| @@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 595 | int | 614 | int |
| 596 | atombios_get_encoder_mode(struct drm_encoder *encoder) | 615 | atombios_get_encoder_mode(struct drm_encoder *encoder) |
| 597 | { | 616 | { |
| 617 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 598 | struct drm_device *dev = encoder->dev; | 618 | struct drm_device *dev = encoder->dev; |
| 599 | struct radeon_device *rdev = dev->dev_private; | 619 | struct radeon_device *rdev = dev->dev_private; |
| 600 | struct drm_connector *connector; | 620 | struct drm_connector *connector; |
| @@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
| 602 | struct radeon_connector_atom_dig *dig_connector; | 622 | struct radeon_connector_atom_dig *dig_connector; |
| 603 | 623 | ||
| 604 | connector = radeon_get_connector_for_encoder(encoder); | 624 | connector = radeon_get_connector_for_encoder(encoder); |
| 605 | if (!connector) | 625 | if (!connector) { |
| 606 | return 0; | 626 | switch (radeon_encoder->encoder_id) { |
| 607 | 627 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | |
| 628 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 629 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 630 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 631 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 632 | return ATOM_ENCODER_MODE_DVI; | ||
| 633 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 634 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 635 | default: | ||
| 636 | return ATOM_ENCODER_MODE_CRT; | ||
| 637 | } | ||
| 638 | } | ||
| 608 | radeon_connector = to_radeon_connector(connector); | 639 | radeon_connector = to_radeon_connector(connector); |
| 609 | 640 | ||
| 610 | switch (connector->connector_type) { | 641 | switch (connector->connector_type) { |
| @@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 834 | memset(&args, 0, sizeof(args)); | 865 | memset(&args, 0, sizeof(args)); |
| 835 | 866 | ||
| 836 | switch (radeon_encoder->encoder_id) { | 867 | switch (radeon_encoder->encoder_id) { |
| 868 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 869 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
| 870 | break; | ||
| 837 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 871 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
| 838 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 872 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
| 839 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 873 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
| @@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 978 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1012 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 979 | } | 1013 | } |
| 980 | 1014 | ||
| 1015 | void | ||
| 1016 | atombios_set_edp_panel_power(struct drm_connector *connector, int action) | ||
| 1017 | { | ||
| 1018 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 1019 | struct drm_device *dev = radeon_connector->base.dev; | ||
| 1020 | struct radeon_device *rdev = dev->dev_private; | ||
| 1021 | union dig_transmitter_control args; | ||
| 1022 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
| 1023 | uint8_t frev, crev; | ||
| 1024 | |||
| 1025 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
| 1026 | return; | ||
| 1027 | |||
| 1028 | if (!ASIC_IS_DCE4(rdev)) | ||
| 1029 | return; | ||
| 1030 | |||
| 1031 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || | ||
| 1032 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
| 1033 | return; | ||
| 1034 | |||
| 1035 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1036 | return; | ||
| 1037 | |||
| 1038 | memset(&args, 0, sizeof(args)); | ||
| 1039 | |||
| 1040 | args.v1.ucAction = action; | ||
| 1041 | |||
| 1042 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | union external_encoder_control { | ||
| 1046 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
| 1047 | }; | ||
| 1048 | |||
| 1049 | static void | ||
| 1050 | atombios_external_encoder_setup(struct drm_encoder *encoder, | ||
| 1051 | struct drm_encoder *ext_encoder, | ||
| 1052 | int action) | ||
| 1053 | { | ||
| 1054 | struct drm_device *dev = encoder->dev; | ||
| 1055 | struct radeon_device *rdev = dev->dev_private; | ||
| 1056 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 1057 | union external_encoder_control args; | ||
| 1058 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 1059 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
| 1060 | u8 frev, crev; | ||
| 1061 | int dp_clock = 0; | ||
| 1062 | int dp_lane_count = 0; | ||
| 1063 | int connector_object_id = 0; | ||
| 1064 | |||
| 1065 | if (connector) { | ||
| 1066 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 1067 | struct radeon_connector_atom_dig *dig_connector = | ||
| 1068 | radeon_connector->con_priv; | ||
| 1069 | |||
| 1070 | dp_clock = dig_connector->dp_clock; | ||
| 1071 | dp_lane_count = dig_connector->dp_lane_count; | ||
| 1072 | connector_object_id = | ||
| 1073 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | memset(&args, 0, sizeof(args)); | ||
| 1077 | |||
| 1078 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1079 | return; | ||
| 1080 | |||
| 1081 | switch (frev) { | ||
| 1082 | case 1: | ||
| 1083 | /* no params on frev 1 */ | ||
| 1084 | break; | ||
| 1085 | case 2: | ||
| 1086 | switch (crev) { | ||
| 1087 | case 1: | ||
| 1088 | case 2: | ||
| 1089 | args.v1.sDigEncoder.ucAction = action; | ||
| 1090 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
| 1091 | args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
| 1092 | |||
| 1093 | if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
| 1094 | if (dp_clock == 270000) | ||
| 1095 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
| 1096 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
| 1097 | } else if (radeon_encoder->pixel_clock > 165000) | ||
| 1098 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
| 1099 | else | ||
| 1100 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
| 1101 | break; | ||
| 1102 | default: | ||
| 1103 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
| 1104 | return; | ||
| 1105 | } | ||
| 1106 | break; | ||
| 1107 | default: | ||
| 1108 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
| 1109 | return; | ||
| 1110 | } | ||
| 1111 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1112 | } | ||
| 1113 | |||
| 981 | static void | 1114 | static void |
| 982 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 1115 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
| 983 | { | 1116 | { |
| @@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1021 | struct drm_device *dev = encoder->dev; | 1154 | struct drm_device *dev = encoder->dev; |
| 1022 | struct radeon_device *rdev = dev->dev_private; | 1155 | struct radeon_device *rdev = dev->dev_private; |
| 1023 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1156 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 1157 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
| 1024 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | 1158 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
| 1025 | int index = 0; | 1159 | int index = 0; |
| 1026 | bool is_dig = false; | 1160 | bool is_dig = false; |
| @@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1043 | break; | 1177 | break; |
| 1044 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1178 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
| 1045 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1179 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
| 1046 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1047 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | 1180 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
| 1048 | break; | 1181 | break; |
| 1182 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1183 | if (ASIC_IS_DCE3(rdev)) | ||
| 1184 | is_dig = true; | ||
| 1185 | else | ||
| 1186 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
| 1187 | break; | ||
| 1049 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1188 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
| 1050 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | 1189 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); |
| 1051 | break; | 1190 | break; |
| @@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1082 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1221 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1083 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1222 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 1084 | 1223 | ||
| 1224 | if (connector && | ||
| 1225 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
| 1226 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 1227 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
| 1228 | radeon_connector->con_priv; | ||
| 1229 | atombios_set_edp_panel_power(connector, | ||
| 1230 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1231 | radeon_dig_connector->edp_on = true; | ||
| 1232 | } | ||
| 1085 | dp_link_train(encoder, connector); | 1233 | dp_link_train(encoder, connector); |
| 1086 | if (ASIC_IS_DCE4(rdev)) | 1234 | if (ASIC_IS_DCE4(rdev)) |
| 1087 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1235 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); |
| 1088 | } | 1236 | } |
| 1237 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 1238 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
| 1089 | break; | 1239 | break; |
| 1090 | case DRM_MODE_DPMS_STANDBY: | 1240 | case DRM_MODE_DPMS_STANDBY: |
| 1091 | case DRM_MODE_DPMS_SUSPEND: | 1241 | case DRM_MODE_DPMS_SUSPEND: |
| 1092 | case DRM_MODE_DPMS_OFF: | 1242 | case DRM_MODE_DPMS_OFF: |
| 1093 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1243 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
| 1094 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1244 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1245 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 1246 | |||
| 1095 | if (ASIC_IS_DCE4(rdev)) | 1247 | if (ASIC_IS_DCE4(rdev)) |
| 1096 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | 1248 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); |
| 1249 | if (connector && | ||
| 1250 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
| 1251 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 1252 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
| 1253 | radeon_connector->con_priv; | ||
| 1254 | atombios_set_edp_panel_power(connector, | ||
| 1255 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1256 | radeon_dig_connector->edp_on = false; | ||
| 1257 | } | ||
| 1097 | } | 1258 | } |
| 1259 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 1260 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
| 1098 | break; | 1261 | break; |
| 1099 | } | 1262 | } |
| 1100 | } else { | 1263 | } else { |
| 1101 | switch (mode) { | 1264 | switch (mode) { |
| 1102 | case DRM_MODE_DPMS_ON: | 1265 | case DRM_MODE_DPMS_ON: |
| 1103 | args.ucAction = ATOM_ENABLE; | 1266 | args.ucAction = ATOM_ENABLE; |
| 1267 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1268 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 1269 | args.ucAction = ATOM_LCD_BLON; | ||
| 1270 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1271 | } | ||
| 1104 | break; | 1272 | break; |
| 1105 | case DRM_MODE_DPMS_STANDBY: | 1273 | case DRM_MODE_DPMS_STANDBY: |
| 1106 | case DRM_MODE_DPMS_SUSPEND: | 1274 | case DRM_MODE_DPMS_SUSPEND: |
| 1107 | case DRM_MODE_DPMS_OFF: | 1275 | case DRM_MODE_DPMS_OFF: |
| 1108 | args.ucAction = ATOM_DISABLE; | 1276 | args.ucAction = ATOM_DISABLE; |
| 1277 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1278 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 1279 | args.ucAction = ATOM_LCD_BLOFF; | ||
| 1280 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1281 | } | ||
| 1109 | break; | 1282 | break; |
| 1110 | } | 1283 | } |
| 1111 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1112 | } | 1284 | } |
| 1285 | |||
| 1286 | if (ext_encoder) { | ||
| 1287 | int action; | ||
| 1288 | |||
| 1289 | switch (mode) { | ||
| 1290 | case DRM_MODE_DPMS_ON: | ||
| 1291 | default: | ||
| 1292 | action = ATOM_ENABLE; | ||
| 1293 | break; | ||
| 1294 | case DRM_MODE_DPMS_STANDBY: | ||
| 1295 | case DRM_MODE_DPMS_SUSPEND: | ||
| 1296 | case DRM_MODE_DPMS_OFF: | ||
| 1297 | action = ATOM_DISABLE; | ||
| 1298 | break; | ||
| 1299 | } | ||
| 1300 | atombios_external_encoder_setup(encoder, ext_encoder, action); | ||
| 1301 | } | ||
| 1302 | |||
| 1113 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 1303 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
| 1114 | 1304 | ||
| 1115 | } | 1305 | } |
| @@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
| 1242 | break; | 1432 | break; |
| 1243 | default: | 1433 | default: |
| 1244 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | 1434 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); |
| 1245 | break; | 1435 | return; |
| 1246 | } | 1436 | } |
| 1247 | 1437 | ||
| 1248 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1438 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| @@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 1357 | struct drm_device *dev = encoder->dev; | 1547 | struct drm_device *dev = encoder->dev; |
| 1358 | struct radeon_device *rdev = dev->dev_private; | 1548 | struct radeon_device *rdev = dev->dev_private; |
| 1359 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1549 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 1550 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
| 1360 | 1551 | ||
| 1361 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1552 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
| 1362 | 1553 | ||
| @@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 1400 | } | 1591 | } |
| 1401 | break; | 1592 | break; |
| 1402 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1593 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
| 1403 | atombios_ddia_setup(encoder, ATOM_ENABLE); | ||
| 1404 | break; | ||
| 1405 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1594 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
| 1406 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1595 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
| 1407 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 1596 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
| 1408 | break; | 1597 | break; |
| 1409 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1598 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
| 1410 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1599 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
| @@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 1419 | } | 1608 | } |
| 1420 | break; | 1609 | break; |
| 1421 | } | 1610 | } |
| 1611 | |||
| 1612 | if (ext_encoder) { | ||
| 1613 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
| 1614 | } | ||
| 1615 | |||
| 1422 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1616 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
| 1423 | 1617 | ||
| 1424 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | 1618 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
| @@ -1595,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
| 1595 | } | 1789 | } |
| 1596 | break; | 1790 | break; |
| 1597 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1791 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
| 1598 | atombios_ddia_setup(encoder, ATOM_DISABLE); | ||
| 1599 | break; | ||
| 1600 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1792 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
| 1601 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1793 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
| 1602 | atombios_external_tmds_setup(encoder, ATOM_DISABLE); | 1794 | atombios_dvo_setup(encoder, ATOM_DISABLE); |
| 1603 | break; | 1795 | break; |
| 1604 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1796 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
| 1605 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1797 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
| @@ -1621,6 +1813,53 @@ disable_done: | |||
| 1621 | radeon_encoder->active_device = 0; | 1813 | radeon_encoder->active_device = 0; |
| 1622 | } | 1814 | } |
| 1623 | 1815 | ||
| 1816 | /* these are handled by the primary encoders */ | ||
| 1817 | static void radeon_atom_ext_prepare(struct drm_encoder *encoder) | ||
| 1818 | { | ||
| 1819 | |||
| 1820 | } | ||
| 1821 | |||
| 1822 | static void radeon_atom_ext_commit(struct drm_encoder *encoder) | ||
| 1823 | { | ||
| 1824 | |||
| 1825 | } | ||
| 1826 | |||
| 1827 | static void | ||
| 1828 | radeon_atom_ext_mode_set(struct drm_encoder *encoder, | ||
| 1829 | struct drm_display_mode *mode, | ||
| 1830 | struct drm_display_mode *adjusted_mode) | ||
| 1831 | { | ||
| 1832 | |||
| 1833 | } | ||
| 1834 | |||
| 1835 | static void radeon_atom_ext_disable(struct drm_encoder *encoder) | ||
| 1836 | { | ||
| 1837 | |||
| 1838 | } | ||
| 1839 | |||
| 1840 | static void | ||
| 1841 | radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) | ||
| 1842 | { | ||
| 1843 | |||
| 1844 | } | ||
| 1845 | |||
| 1846 | static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, | ||
| 1847 | struct drm_display_mode *mode, | ||
| 1848 | struct drm_display_mode *adjusted_mode) | ||
| 1849 | { | ||
| 1850 | return true; | ||
| 1851 | } | ||
| 1852 | |||
| 1853 | static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { | ||
| 1854 | .dpms = radeon_atom_ext_dpms, | ||
| 1855 | .mode_fixup = radeon_atom_ext_mode_fixup, | ||
| 1856 | .prepare = radeon_atom_ext_prepare, | ||
| 1857 | .mode_set = radeon_atom_ext_mode_set, | ||
| 1858 | .commit = radeon_atom_ext_commit, | ||
| 1859 | .disable = radeon_atom_ext_disable, | ||
| 1860 | /* no detect for TMDS/LVDS yet */ | ||
| 1861 | }; | ||
| 1862 | |||
| 1624 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | 1863 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { |
| 1625 | .dpms = radeon_atom_encoder_dpms, | 1864 | .dpms = radeon_atom_encoder_dpms, |
| 1626 | .mode_fixup = radeon_atom_mode_fixup, | 1865 | .mode_fixup = radeon_atom_mode_fixup, |
| @@ -1730,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
| 1730 | radeon_encoder->devices = supported_device; | 1969 | radeon_encoder->devices = supported_device; |
| 1731 | radeon_encoder->rmx_type = RMX_OFF; | 1970 | radeon_encoder->rmx_type = RMX_OFF; |
| 1732 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 1971 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
| 1972 | radeon_encoder->is_ext_encoder = false; | ||
| 1733 | 1973 | ||
| 1734 | switch (radeon_encoder->encoder_id) { | 1974 | switch (radeon_encoder->encoder_id) { |
| 1735 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1975 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
| @@ -1771,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
| 1771 | radeon_encoder->rmx_type = RMX_FULL; | 2011 | radeon_encoder->rmx_type = RMX_FULL; |
| 1772 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | 2012 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); |
| 1773 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | 2013 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); |
| 2014 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
| 2015 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
| 2016 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
| 1774 | } else { | 2017 | } else { |
| 1775 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | 2018 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
| 1776 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | 2019 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
| @@ -1779,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
| 1779 | } | 2022 | } |
| 1780 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 2023 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
| 1781 | break; | 2024 | break; |
| 2025 | case ENCODER_OBJECT_ID_SI170B: | ||
| 2026 | case ENCODER_OBJECT_ID_CH7303: | ||
| 2027 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
| 2028 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
| 2029 | case ENCODER_OBJECT_ID_TITFP513: | ||
| 2030 | case ENCODER_OBJECT_ID_VT1623: | ||
| 2031 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
| 2032 | /* these are handled by the primary encoders */ | ||
| 2033 | radeon_encoder->is_ext_encoder = true; | ||
| 2034 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 2035 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
| 2036 | else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
| 2037 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
| 2038 | else | ||
| 2039 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
| 2040 | drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); | ||
| 2041 | break; | ||
| 1782 | } | 2042 | } |
| 1783 | } | 2043 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e65b90317fab..65016117d95f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
| 79 | 79 | ||
| 80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
| 81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, |
| 82 | true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
| 83 | &rdev->gart.table.vram.robj); | 83 | &rdev->gart.table.vram.robj); |
| 84 | if (r) { | 84 | if (r) { |
| 85 | return r; | 85 | return r; |
| 86 | } | 86 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d1e595d91723..df95eb83dac6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
| 67 | if (alignment < PAGE_SIZE) { | 67 | if (alignment < PAGE_SIZE) { |
| 68 | alignment = PAGE_SIZE; | 68 | alignment = PAGE_SIZE; |
| 69 | } | 69 | } |
| 70 | r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); | 70 | r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); |
| 71 | if (r) { | 71 | if (r) { |
| 72 | if (r != -ERESTARTSYS) | 72 | if (r != -ERESTARTSYS) |
| 73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 0cfbba02c4d0..ded2a45bc95c 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
| 896 | ((rdev->family <= CHIP_RS480) || | 896 | ((rdev->family <= CHIP_RS480) || |
| 897 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { | 897 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { |
| 898 | /* set the radeon hw i2c adapter */ | 898 | /* set the radeon hw i2c adapter */ |
| 899 | sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); | 899 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
| 900 | "Radeon i2c hw bus %s", name); | ||
| 900 | i2c->adapter.algo = &radeon_i2c_algo; | 901 | i2c->adapter.algo = &radeon_i2c_algo; |
| 901 | ret = i2c_add_adapter(&i2c->adapter); | 902 | ret = i2c_add_adapter(&i2c->adapter); |
| 902 | if (ret) { | 903 | if (ret) { |
| @@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
| 905 | } | 906 | } |
| 906 | } else { | 907 | } else { |
| 907 | /* set the radeon bit adapter */ | 908 | /* set the radeon bit adapter */ |
| 908 | sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); | 909 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
| 910 | "Radeon i2c bit bus %s", name); | ||
| 909 | i2c->adapter.algo_data = &i2c->algo.bit; | 911 | i2c->adapter.algo_data = &i2c->algo.bit; |
| 910 | i2c->algo.bit.pre_xfer = pre_xfer; | 912 | i2c->algo.bit.pre_xfer = pre_xfer; |
| 911 | i2c->algo.bit.post_xfer = post_xfer; | 913 | i2c->algo.bit.post_xfer = post_xfer; |
| @@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, | |||
| 946 | i2c->rec = *rec; | 948 | i2c->rec = *rec; |
| 947 | i2c->adapter.owner = THIS_MODULE; | 949 | i2c->adapter.owner = THIS_MODULE; |
| 948 | i2c->dev = dev; | 950 | i2c->dev = dev; |
| 951 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
| 952 | "Radeon aux bus %s", name); | ||
| 949 | i2c_set_adapdata(&i2c->adapter, i2c); | 953 | i2c_set_adapdata(&i2c->adapter, i2c); |
| 950 | i2c->adapter.algo_data = &i2c->algo.dp; | 954 | i2c->adapter.algo_data = &i2c->algo.dp; |
| 951 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; | 955 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 2f349a300195..465746bd51b7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
| @@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
| 76 | default: | 76 | default: |
| 77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
| 78 | crtc); | 78 | crtc); |
| 79 | return EINVAL; | 79 | return -EINVAL; |
| 80 | } | 80 | } |
| 81 | } else { | 81 | } else { |
| 82 | switch (crtc) { | 82 | switch (crtc) { |
| @@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
| 89 | default: | 89 | default: |
| 90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
| 91 | crtc); | 91 | crtc); |
| 92 | return EINVAL; | 92 | return -EINVAL; |
| 93 | } | 93 | } |
| 94 | } | 94 | } |
| 95 | 95 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0b8397000f4c..59f834ba283d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
| 670 | 670 | ||
| 671 | if (rdev->is_atom_bios) { | 671 | if (rdev->is_atom_bios) { |
| 672 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 672 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
| 673 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 673 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
| 674 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 674 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
| 675 | } else { | 675 | } else { |
| 676 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 676 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 680f57644e86..e301c6f9e059 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -375,6 +375,7 @@ struct radeon_encoder { | |||
| 375 | int hdmi_config_offset; | 375 | int hdmi_config_offset; |
| 376 | int hdmi_audio_workaround; | 376 | int hdmi_audio_workaround; |
| 377 | int hdmi_buffer_status; | 377 | int hdmi_buffer_status; |
| 378 | bool is_ext_encoder; | ||
| 378 | }; | 379 | }; |
| 379 | 380 | ||
| 380 | struct radeon_connector_atom_dig { | 381 | struct radeon_connector_atom_dig { |
| @@ -385,6 +386,7 @@ struct radeon_connector_atom_dig { | |||
| 385 | u8 dp_sink_type; | 386 | u8 dp_sink_type; |
| 386 | int dp_clock; | 387 | int dp_clock; |
| 387 | int dp_lane_count; | 388 | int dp_lane_count; |
| 389 | bool edp_on; | ||
| 388 | }; | 390 | }; |
| 389 | 391 | ||
| 390 | struct radeon_gpio_rec { | 392 | struct radeon_gpio_rec { |
| @@ -523,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev | |||
| 523 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 525 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
| 524 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); | 526 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); |
| 525 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); | 527 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
| 526 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); | 528 | extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); |
| 527 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); | 529 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); |
| 528 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); | 530 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
| 531 | extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); | ||
| 529 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); | 532 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
| 530 | 533 | ||
| 531 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); | 534 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8eb183466015..1d067743fee0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -86,11 +86,12 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | 88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, |
| 89 | unsigned long size, bool kernel, u32 domain, | 89 | unsigned long size, int byte_align, bool kernel, u32 domain, |
| 90 | struct radeon_bo **bo_ptr) | 90 | struct radeon_bo **bo_ptr) |
| 91 | { | 91 | { |
| 92 | struct radeon_bo *bo; | 92 | struct radeon_bo *bo; |
| 93 | enum ttm_bo_type type; | 93 | enum ttm_bo_type type; |
| 94 | int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | ||
| 94 | int r; | 95 | int r; |
| 95 | 96 | ||
| 96 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 97 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
| @@ -115,7 +116,7 @@ retry: | |||
| 115 | /* Kernel allocation are uninterruptible */ | 116 | /* Kernel allocation are uninterruptible */ |
| 116 | mutex_lock(&rdev->vram_mutex); | 117 | mutex_lock(&rdev->vram_mutex); |
| 117 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 118 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
| 118 | &bo->placement, 0, 0, !kernel, NULL, size, | 119 | &bo->placement, page_align, 0, !kernel, NULL, size, |
| 119 | &radeon_ttm_bo_destroy); | 120 | &radeon_ttm_bo_destroy); |
| 120 | mutex_unlock(&rdev->vram_mutex); | 121 | mutex_unlock(&rdev->vram_mutex); |
| 121 | if (unlikely(r != 0)) { | 122 | if (unlikely(r != 0)) { |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 3481bc7f6f58..d143702b244a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
| @@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | extern int radeon_bo_create(struct radeon_device *rdev, | 139 | extern int radeon_bo_create(struct radeon_device *rdev, |
| 140 | struct drm_gem_object *gobj, unsigned long size, | 140 | struct drm_gem_object *gobj, unsigned long size, |
| 141 | bool kernel, u32 domain, | 141 | int byte_align, |
| 142 | struct radeon_bo **bo_ptr); | 142 | bool kernel, u32 domain, |
| 143 | struct radeon_bo **bo_ptr); | ||
| 143 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | 144 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
| 144 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | 145 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
| 145 | extern void radeon_bo_unref(struct radeon_bo **bo); | 146 | extern void radeon_bo_unref(struct radeon_bo **bo); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 6ea798ce8218..06e79822a2bf 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); | 176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
| 177 | /* Allocate 1M object buffer */ | 177 | /* Allocate 1M object buffer */ |
| 178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
| 179 | true, RADEON_GEM_DOMAIN_GTT, | 179 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
| 180 | &rdev->ib_pool.robj); | 180 | &rdev->ib_pool.robj); |
| 181 | if (r) { | 181 | if (r) { |
| 182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | 182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
| 183 | return r; | 183 | return r; |
| @@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 332 | rdev->cp.ring_size = ring_size; | 332 | rdev->cp.ring_size = ring_size; |
| 333 | /* Allocate ring buffer */ | 333 | /* Allocate ring buffer */ |
| 334 | if (rdev->cp.ring_obj == NULL) { | 334 | if (rdev->cp.ring_obj == NULL) { |
| 335 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, | 335 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, |
| 336 | RADEON_GEM_DOMAIN_GTT, | 336 | RADEON_GEM_DOMAIN_GTT, |
| 337 | &rdev->cp.ring_obj); | 337 | &rdev->cp.ring_obj); |
| 338 | if (r) { | 338 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 313c96bc09da..5b44f652145c 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
| @@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 52 | goto out_cleanup; | 52 | goto out_cleanup; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
| 56 | &vram_obj); | 56 | &vram_obj); |
| 57 | if (r) { | 57 | if (r) { |
| 58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
| @@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 71 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
| 72 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
| 73 | 73 | ||
| 74 | r = radeon_bo_create(rdev, NULL, size, true, | 74 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, |
| 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
| 76 | if (r) { | 76 | if (r) { |
| 77 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 01c2c736a1da..1272e4b6a1d4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 529 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 529 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 530 | return r; | 530 | return r; |
| 531 | } | 531 | } |
| 532 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, | 532 | r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, |
| 533 | RADEON_GEM_DOMAIN_VRAM, | 533 | RADEON_GEM_DOMAIN_VRAM, |
| 534 | &rdev->stollen_vga_memory); | 534 | &rdev->stollen_vga_memory); |
| 535 | if (r) { | 535 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 245374e2b778..4dfead8cee33 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) | |||
| 915 | 915 | ||
| 916 | if (rdev->vram_scratch.robj == NULL) { | 916 | if (rdev->vram_scratch.robj == NULL) { |
| 917 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 917 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, |
| 918 | true, RADEON_GEM_DOMAIN_VRAM, | 918 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
| 919 | &rdev->vram_scratch.robj); | 919 | &rdev->vram_scratch.robj); |
| 920 | if (r) { | 920 | if (r) { |
| 921 | return r; | 921 | return r; |
| 922 | } | 922 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3ca77dc03915..148a322d8f5d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -224,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
| 224 | int ret; | 224 | int ret; |
| 225 | 225 | ||
| 226 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | 226 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { |
| 227 | /** | ||
| 228 | * Deadlock avoidance for multi-bo reserving. | ||
| 229 | */ | ||
| 227 | if (use_sequence && bo->seq_valid && | 230 | if (use_sequence && bo->seq_valid && |
| 228 | (sequence - bo->val_seq < (1 << 31))) { | 231 | (sequence - bo->val_seq < (1 << 31))) { |
| 229 | return -EAGAIN; | 232 | return -EAGAIN; |
| @@ -241,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
| 241 | } | 244 | } |
| 242 | 245 | ||
| 243 | if (use_sequence) { | 246 | if (use_sequence) { |
| 247 | /** | ||
| 248 | * Wake up waiters that may need to recheck for deadlock, | ||
| 249 | * if we decreased the sequence number. | ||
| 250 | */ | ||
| 251 | if (unlikely((bo->val_seq - sequence < (1 << 31)) | ||
| 252 | || !bo->seq_valid)) | ||
| 253 | wake_up_all(&bo->event_queue); | ||
| 254 | |||
| 244 | bo->val_seq = sequence; | 255 | bo->val_seq = sequence; |
| 245 | bo->seq_valid = true; | 256 | bo->seq_valid = true; |
| 246 | } else { | 257 | } else { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 36e129f0023f..5408b1b7996f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
| 862 | &vmw_vram_sys_placement, true, | 862 | &vmw_vram_sys_placement, true, |
| 863 | &vmw_user_dmabuf_destroy); | 863 | &vmw_user_dmabuf_destroy); |
| 864 | if (unlikely(ret != 0)) | 864 | if (unlikely(ret != 0)) |
| 865 | return ret; | 865 | goto out_no_dmabuf; |
| 866 | 866 | ||
| 867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); |
| 868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, |
| @@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
| 870 | false, | 870 | false, |
| 871 | ttm_buffer_type, | 871 | ttm_buffer_type, |
| 872 | &vmw_user_dmabuf_release, NULL); | 872 | &vmw_user_dmabuf_release, NULL); |
| 873 | if (unlikely(ret != 0)) { | 873 | if (unlikely(ret != 0)) |
| 874 | ttm_bo_unref(&tmp); | 874 | goto out_no_base_object; |
| 875 | } else { | 875 | else { |
| 876 | rep->handle = vmw_user_bo->base.hash.key; | 876 | rep->handle = vmw_user_bo->base.hash.key; |
| 877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | 877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; |
| 878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | 878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; |
| 879 | rep->cur_gmr_offset = 0; | 879 | rep->cur_gmr_offset = 0; |
| 880 | } | 880 | } |
| 881 | ttm_bo_unref(&tmp); | ||
| 882 | 881 | ||
| 882 | out_no_base_object: | ||
| 883 | ttm_bo_unref(&tmp); | ||
| 884 | out_no_dmabuf: | ||
| 883 | ttm_read_unlock(&vmaster->lock); | 885 | ttm_read_unlock(&vmaster->lock); |
| 884 | 886 | ||
| 885 | return 0; | 887 | return ret; |
| 886 | } | 888 | } |
| 887 | 889 | ||
| 888 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 890 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 01a714119506..bc5590b1a1ac 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
| @@ -80,6 +80,7 @@ struct drm_nouveau_gpuobj_free { | |||
| 80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 | 80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 |
| 81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 | 81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 |
| 82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 | 82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 |
| 83 | #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 | ||
| 83 | struct drm_nouveau_getparam { | 84 | struct drm_nouveau_getparam { |
| 84 | uint64_t param; | 85 | uint64_t param; |
| 85 | uint64_t value; | 86 | uint64_t value; |
| @@ -95,6 +96,12 @@ struct drm_nouveau_setparam { | |||
| 95 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) | 96 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) |
| 96 | #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) | 97 | #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) |
| 97 | 98 | ||
| 99 | #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 | ||
| 100 | #define NOUVEAU_GEM_TILE_16BPP 0x00000001 | ||
| 101 | #define NOUVEAU_GEM_TILE_32BPP 0x00000002 | ||
| 102 | #define NOUVEAU_GEM_TILE_ZETA 0x00000004 | ||
| 103 | #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 | ||
| 104 | |||
| 98 | struct drm_nouveau_gem_info { | 105 | struct drm_nouveau_gem_info { |
| 99 | uint32_t handle; | 106 | uint32_t handle; |
| 100 | uint32_t domain; | 107 | uint32_t domain; |
