diff options
76 files changed, 2904 insertions, 1676 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f2aaf39be398..51103aa469f8 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
104 | if (connector->status == connector_status_disconnected) { | 104 | if (connector->status == connector_status_disconnected) { |
105 | DRM_DEBUG_KMS("%s is disconnected\n", | 105 | DRM_DEBUG_KMS("%s is disconnected\n", |
106 | drm_get_connector_name(connector)); | 106 | drm_get_connector_name(connector)); |
107 | drm_mode_connector_update_edid_property(connector, NULL); | ||
107 | goto prune; | 108 | goto prune; |
108 | } | 109 | } |
109 | 110 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f97e7c42ac8e..7e608f4a0df9 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
707 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; | 707 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; |
708 | mode->vtotal = mode->vdisplay + vblank; | 708 | mode->vtotal = mode->vdisplay + vblank; |
709 | 709 | ||
710 | /* perform the basic check for the detailed timing */ | ||
711 | if (mode->hsync_end > mode->htotal || | ||
712 | mode->vsync_end > mode->vtotal) { | ||
713 | drm_mode_destroy(dev, mode); | ||
714 | DRM_DEBUG_KMS("Incorrect detailed timing. " | ||
715 | "Sync is beyond the blank.\n"); | ||
716 | return NULL; | ||
717 | } | ||
718 | |||
719 | /* Some EDIDs have bogus h/vtotal values */ | 710 | /* Some EDIDs have bogus h/vtotal values */ |
720 | if (mode->hsync_end > mode->htotal) | 711 | if (mode->hsync_end > mode->htotal) |
721 | mode->htotal = mode->hsync_end + 1; | 712 | mode->htotal = mode->hsync_end + 1; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 50549703584f..99487237111d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -283,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { | |||
283 | .help_msg = "force-fb(V)", | 283 | .help_msg = "force-fb(V)", |
284 | .action_msg = "Restore framebuffer console", | 284 | .action_msg = "Restore framebuffer console", |
285 | }; | 285 | }; |
286 | #else | ||
287 | static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; | ||
286 | #endif | 288 | #endif |
287 | 289 | ||
288 | static void drm_fb_helper_on(struct fb_info *info) | 290 | static void drm_fb_helper_on(struct fb_info *info) |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 08d14df3bb42..4804872f8b19 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp) | |||
140 | spin_unlock(&dev->count_lock); | 140 | spin_unlock(&dev->count_lock); |
141 | } | 141 | } |
142 | out: | 142 | out: |
143 | mutex_lock(&dev->struct_mutex); | 143 | if (!retcode) { |
144 | if (minor->type == DRM_MINOR_LEGACY) { | 144 | mutex_lock(&dev->struct_mutex); |
145 | BUG_ON((dev->dev_mapping != NULL) && | 145 | if (minor->type == DRM_MINOR_LEGACY) { |
146 | (dev->dev_mapping != inode->i_mapping)); | 146 | if (dev->dev_mapping == NULL) |
147 | if (dev->dev_mapping == NULL) | 147 | dev->dev_mapping = inode->i_mapping; |
148 | dev->dev_mapping = inode->i_mapping; | 148 | else if (dev->dev_mapping != inode->i_mapping) |
149 | retcode = -ENODEV; | ||
150 | } | ||
151 | mutex_unlock(&dev->struct_mutex); | ||
149 | } | 152 | } |
150 | mutex_unlock(&dev->struct_mutex); | ||
151 | 153 | ||
152 | return retcode; | 154 | return retcode; |
153 | } | 155 | } |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 32db806f3b5a..7f0d807a0d0d 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
12 | nouveau_dp.o nouveau_grctx.o \ | 12 | nouveau_dp.o nouveau_grctx.o \ |
13 | nv04_timer.o \ | 13 | nv04_timer.o \ |
14 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 14 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
15 | nv04_fb.o nv10_fb.o nv40_fb.o \ | 15 | nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ |
16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ | 16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ |
17 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 17 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
18 | nv40_graph.o nv50_graph.o \ | 18 | nv40_graph.o nv50_graph.o \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 75bceee76044..b5a9336a2e88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -5211,6 +5211,21 @@ divine_connector_type(struct nvbios *bios, int index) | |||
5211 | } | 5211 | } |
5212 | 5212 | ||
5213 | static void | 5213 | static void |
5214 | apply_dcb_connector_quirks(struct nvbios *bios, int idx) | ||
5215 | { | ||
5216 | struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx]; | ||
5217 | struct drm_device *dev = bios->dev; | ||
5218 | |||
5219 | /* Gigabyte NX85T */ | ||
5220 | if ((dev->pdev->device == 0x0421) && | ||
5221 | (dev->pdev->subsystem_vendor == 0x1458) && | ||
5222 | (dev->pdev->subsystem_device == 0x344c)) { | ||
5223 | if (cte->type == DCB_CONNECTOR_HDMI_1) | ||
5224 | cte->type = DCB_CONNECTOR_DVI_I; | ||
5225 | } | ||
5226 | } | ||
5227 | |||
5228 | static void | ||
5214 | parse_dcb_connector_table(struct nvbios *bios) | 5229 | parse_dcb_connector_table(struct nvbios *bios) |
5215 | { | 5230 | { |
5216 | struct drm_device *dev = bios->dev; | 5231 | struct drm_device *dev = bios->dev; |
@@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5238 | entry = conntab + conntab[1]; | 5253 | entry = conntab + conntab[1]; |
5239 | cte = &ct->entry[0]; | 5254 | cte = &ct->entry[0]; |
5240 | for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { | 5255 | for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { |
5256 | cte->index = i; | ||
5241 | if (conntab[3] == 2) | 5257 | if (conntab[3] == 2) |
5242 | cte->entry = ROM16(entry[0]); | 5258 | cte->entry = ROM16(entry[0]); |
5243 | else | 5259 | else |
5244 | cte->entry = ROM32(entry[0]); | 5260 | cte->entry = ROM32(entry[0]); |
5245 | 5261 | ||
5246 | cte->type = (cte->entry & 0x000000ff) >> 0; | 5262 | cte->type = (cte->entry & 0x000000ff) >> 0; |
5247 | cte->index = (cte->entry & 0x00000f00) >> 8; | 5263 | cte->index2 = (cte->entry & 0x00000f00) >> 8; |
5248 | switch (cte->entry & 0x00033000) { | 5264 | switch (cte->entry & 0x00033000) { |
5249 | case 0x00001000: | 5265 | case 0x00001000: |
5250 | cte->gpio_tag = 0x07; | 5266 | cte->gpio_tag = 0x07; |
@@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5266 | if (cte->type == 0xff) | 5282 | if (cte->type == 0xff) |
5267 | continue; | 5283 | continue; |
5268 | 5284 | ||
5285 | apply_dcb_connector_quirks(bios, i); | ||
5286 | |||
5269 | NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", | 5287 | NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", |
5270 | i, cte->entry, cte->type, cte->index, cte->gpio_tag); | 5288 | i, cte->entry, cte->type, cte->index, cte->gpio_tag); |
5271 | 5289 | ||
@@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5287 | break; | 5305 | break; |
5288 | default: | 5306 | default: |
5289 | cte->type = divine_connector_type(bios, cte->index); | 5307 | cte->type = divine_connector_type(bios, cte->index); |
5290 | NV_WARN(dev, "unknown type, using 0x%02x", cte->type); | 5308 | NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type); |
5291 | break; | 5309 | break; |
5292 | } | 5310 | } |
5293 | 5311 | ||
5312 | if (nouveau_override_conntype) { | ||
5313 | int type = divine_connector_type(bios, cte->index); | ||
5314 | if (type != cte->type) | ||
5315 | NV_WARN(dev, " -> type 0x%02x\n", cte->type); | ||
5316 | } | ||
5317 | |||
5294 | } | 5318 | } |
5295 | } | 5319 | } |
5296 | 5320 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 9f688aa9a655..4f88e6924d27 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -72,9 +72,10 @@ enum dcb_connector_type { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct dcb_connector_table_entry { | 74 | struct dcb_connector_table_entry { |
75 | uint8_t index; | ||
75 | uint32_t entry; | 76 | uint32_t entry; |
76 | enum dcb_connector_type type; | 77 | enum dcb_connector_type type; |
77 | uint8_t index; | 78 | uint8_t index2; |
78 | uint8_t gpio_tag; | 79 | uint8_t gpio_tag; |
79 | }; | 80 | }; |
80 | 81 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 028719fddf76..026612471c92 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -439,8 +439,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
439 | 439 | ||
440 | switch (bo->mem.mem_type) { | 440 | switch (bo->mem.mem_type) { |
441 | case TTM_PL_VRAM: | 441 | case TTM_PL_VRAM: |
442 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | | 442 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); |
443 | TTM_PL_FLAG_SYSTEM); | ||
444 | break; | 443 | break; |
445 | default: | 444 | default: |
446 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); | 445 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 24327f468c4b..14afe1e47e57 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
302 | 302 | ||
303 | detect_analog: | 303 | detect_analog: |
304 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 304 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
305 | if (!nv_encoder) | 305 | if (!nv_encoder && !nouveau_tv_disable) |
306 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 306 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
307 | if (nv_encoder) { | 307 | if (nv_encoder) { |
308 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 308 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index c8482a108a78..65c441a1999f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | |||
190 | nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); | 190 | nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); |
191 | 191 | ||
192 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; | 192 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; |
193 | |||
194 | DRM_MEMORYBARRIER(); | ||
195 | /* Flush writes. */ | ||
196 | nouveau_bo_rd32(pb, 0); | ||
197 | |||
193 | nvchan_wr32(chan, 0x8c, chan->dma.ib_put); | 198 | nvchan_wr32(chan, 0x8c, chan->dma.ib_put); |
194 | chan->dma.ib_free--; | 199 | chan->dma.ib_free--; |
195 | } | 200 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 30cc09e8a709..1de974acbc65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); | |||
83 | int nouveau_nofbaccel = 0; | 83 | int nouveau_nofbaccel = 0; |
84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | 84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); |
85 | 85 | ||
86 | MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); | ||
87 | int nouveau_override_conntype = 0; | ||
88 | module_param_named(override_conntype, nouveau_override_conntype, int, 0400); | ||
89 | |||
90 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); | ||
91 | int nouveau_tv_disable = 0; | ||
92 | module_param_named(tv_disable, nouveau_tv_disable, int, 0400); | ||
93 | |||
86 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 94 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
87 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 95 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
88 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 96 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
154 | if (pm_state.event == PM_EVENT_PRETHAW) | 162 | if (pm_state.event == PM_EVENT_PRETHAW) |
155 | return 0; | 163 | return 0; |
156 | 164 | ||
165 | NV_INFO(dev, "Disabling fbcon acceleration...\n"); | ||
157 | fbdev_flags = dev_priv->fbdev_info->flags; | 166 | fbdev_flags = dev_priv->fbdev_info->flags; |
158 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; | 167 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; |
159 | 168 | ||
169 | NV_INFO(dev, "Unpinning framebuffer(s)...\n"); | ||
160 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 170 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
161 | struct nouveau_framebuffer *nouveau_fb; | 171 | struct nouveau_framebuffer *nouveau_fb; |
162 | 172 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4b9aaf2a8d0f..d8b559011777 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds; | |||
681 | extern int nouveau_vram_pushbuf; | 681 | extern int nouveau_vram_pushbuf; |
682 | extern int nouveau_vram_notify; | 682 | extern int nouveau_vram_notify; |
683 | extern int nouveau_fbpercrtc; | 683 | extern int nouveau_fbpercrtc; |
684 | extern int nouveau_tv_disable; | ||
684 | extern char *nouveau_tv_norm; | 685 | extern char *nouveau_tv_norm; |
685 | extern int nouveau_reg_debug; | 686 | extern int nouveau_reg_debug; |
686 | extern char *nouveau_vbios; | 687 | extern char *nouveau_vbios; |
@@ -688,6 +689,7 @@ extern int nouveau_ctxfw; | |||
688 | extern int nouveau_ignorelid; | 689 | extern int nouveau_ignorelid; |
689 | extern int nouveau_nofbaccel; | 690 | extern int nouveau_nofbaccel; |
690 | extern int nouveau_noaccel; | 691 | extern int nouveau_noaccel; |
692 | extern int nouveau_override_conntype; | ||
691 | 693 | ||
692 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); | 694 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); |
693 | extern int nouveau_pci_resume(struct pci_dev *pdev); | 695 | extern int nouveau_pci_resume(struct pci_dev *pdev); |
@@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *); | |||
926 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, | 928 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, |
927 | uint32_t, uint32_t); | 929 | uint32_t, uint32_t); |
928 | 930 | ||
931 | /* nv50_fb.c */ | ||
932 | extern int nv50_fb_init(struct drm_device *); | ||
933 | extern void nv50_fb_takedown(struct drm_device *); | ||
934 | |||
929 | /* nv04_fifo.c */ | 935 | /* nv04_fifo.c */ |
930 | extern int nv04_fifo_init(struct drm_device *); | 936 | extern int nv04_fifo_init(struct drm_device *); |
931 | extern void nv04_fifo_disable(struct drm_device *); | 937 | extern void nv04_fifo_disable(struct drm_device *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 95220ddebb45..2bd59a92fee5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value, | |||
311 | #define nouveau_print_bitfield_names(val, namelist) \ | 311 | #define nouveau_print_bitfield_names(val, namelist) \ |
312 | nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) | 312 | nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) |
313 | 313 | ||
314 | struct nouveau_enum_names { | ||
315 | uint32_t value; | ||
316 | const char *name; | ||
317 | }; | ||
318 | |||
319 | static void | ||
320 | nouveau_print_enum_names_(uint32_t value, | ||
321 | const struct nouveau_enum_names *namelist, | ||
322 | const int namelist_len) | ||
323 | { | ||
324 | /* | ||
325 | * Caller must have already printed the KERN_* log level for us. | ||
326 | * Also the caller is responsible for adding the newline. | ||
327 | */ | ||
328 | int i; | ||
329 | for (i = 0; i < namelist_len; ++i) { | ||
330 | if (value == namelist[i].value) { | ||
331 | printk("%s", namelist[i].name); | ||
332 | return; | ||
333 | } | ||
334 | } | ||
335 | printk("unknown value 0x%08x", value); | ||
336 | } | ||
337 | #define nouveau_print_enum_names(val, namelist) \ | ||
338 | nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) | ||
314 | 339 | ||
315 | static int | 340 | static int |
316 | nouveau_graph_chid_from_grctx(struct drm_device *dev) | 341 | nouveau_graph_chid_from_grctx(struct drm_device *dev) |
@@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, | |||
427 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 452 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
428 | uint32_t nsource = trap->nsource, nstatus = trap->nstatus; | 453 | uint32_t nsource = trap->nsource, nstatus = trap->nstatus; |
429 | 454 | ||
430 | NV_INFO(dev, "%s - nSource:", id); | 455 | if (dev_priv->card_type < NV_50) { |
431 | nouveau_print_bitfield_names(nsource, nsource_names); | 456 | NV_INFO(dev, "%s - nSource:", id); |
432 | printk(", nStatus:"); | 457 | nouveau_print_bitfield_names(nsource, nsource_names); |
433 | if (dev_priv->card_type < NV_10) | 458 | printk(", nStatus:"); |
434 | nouveau_print_bitfield_names(nstatus, nstatus_names); | 459 | if (dev_priv->card_type < NV_10) |
435 | else | 460 | nouveau_print_bitfield_names(nstatus, nstatus_names); |
436 | nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); | 461 | else |
437 | printk("\n"); | 462 | nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); |
463 | printk("\n"); | ||
464 | } | ||
438 | 465 | ||
439 | NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " | 466 | NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " |
440 | "Data 0x%08x:0x%08x\n", | 467 | "Data 0x%08x:0x%08x\n", |
@@ -578,27 +605,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) | |||
578 | } | 605 | } |
579 | 606 | ||
580 | static void | 607 | static void |
608 | nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) | ||
609 | { | ||
610 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
611 | uint32_t trap[6]; | ||
612 | int i, ch; | ||
613 | uint32_t idx = nv_rd32(dev, 0x100c90); | ||
614 | if (idx & 0x80000000) { | ||
615 | idx &= 0xffffff; | ||
616 | if (display) { | ||
617 | for (i = 0; i < 6; i++) { | ||
618 | nv_wr32(dev, 0x100c90, idx | i << 24); | ||
619 | trap[i] = nv_rd32(dev, 0x100c94); | ||
620 | } | ||
621 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | ||
622 | struct nouveau_channel *chan = dev_priv->fifos[ch]; | ||
623 | |||
624 | if (!chan || !chan->ramin) | ||
625 | continue; | ||
626 | |||
627 | if (trap[1] == chan->ramin->instance >> 12) | ||
628 | break; | ||
629 | } | ||
630 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", | ||
631 | name, (trap[5]&0x100?"read":"write"), | ||
632 | trap[5]&0xff, trap[4]&0xffff, | ||
633 | trap[3]&0xffff, trap[0], trap[2], ch); | ||
634 | } | ||
635 | nv_wr32(dev, 0x100c90, idx | 0x80000000); | ||
636 | } else if (display) { | ||
637 | NV_INFO(dev, "%s - no VM fault?\n", name); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | static struct nouveau_enum_names nv50_mp_exec_error_names[] = | ||
642 | { | ||
643 | { 3, "STACK_UNDERFLOW" }, | ||
644 | { 4, "QUADON_ACTIVE" }, | ||
645 | { 8, "TIMEOUT" }, | ||
646 | { 0x10, "INVALID_OPCODE" }, | ||
647 | { 0x40, "BREAKPOINT" }, | ||
648 | }; | ||
649 | |||
650 | static void | ||
651 | nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) | ||
652 | { | ||
653 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
654 | uint32_t units = nv_rd32(dev, 0x1540); | ||
655 | uint32_t addr, mp10, status, pc, oplow, ophigh; | ||
656 | int i; | ||
657 | int mps = 0; | ||
658 | for (i = 0; i < 4; i++) { | ||
659 | if (!(units & 1 << (i+24))) | ||
660 | continue; | ||
661 | if (dev_priv->chipset < 0xa0) | ||
662 | addr = 0x408200 + (tpid << 12) + (i << 7); | ||
663 | else | ||
664 | addr = 0x408100 + (tpid << 11) + (i << 7); | ||
665 | mp10 = nv_rd32(dev, addr + 0x10); | ||
666 | status = nv_rd32(dev, addr + 0x14); | ||
667 | if (!status) | ||
668 | continue; | ||
669 | if (display) { | ||
670 | nv_rd32(dev, addr + 0x20); | ||
671 | pc = nv_rd32(dev, addr + 0x24); | ||
672 | oplow = nv_rd32(dev, addr + 0x70); | ||
673 | ophigh= nv_rd32(dev, addr + 0x74); | ||
674 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " | ||
675 | "TP %d MP %d: ", tpid, i); | ||
676 | nouveau_print_enum_names(status, | ||
677 | nv50_mp_exec_error_names); | ||
678 | printk(" at %06x warp %d, opcode %08x %08x\n", | ||
679 | pc&0xffffff, pc >> 24, | ||
680 | oplow, ophigh); | ||
681 | } | ||
682 | nv_wr32(dev, addr + 0x10, mp10); | ||
683 | nv_wr32(dev, addr + 0x14, 0); | ||
684 | mps++; | ||
685 | } | ||
686 | if (!mps && display) | ||
687 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " | ||
688 | "No MPs claiming errors?\n", tpid); | ||
689 | } | ||
690 | |||
691 | static void | ||
692 | nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, | ||
693 | uint32_t ustatus_new, int display, const char *name) | ||
694 | { | ||
695 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
696 | int tps = 0; | ||
697 | uint32_t units = nv_rd32(dev, 0x1540); | ||
698 | int i, r; | ||
699 | uint32_t ustatus_addr, ustatus; | ||
700 | for (i = 0; i < 16; i++) { | ||
701 | if (!(units & (1 << i))) | ||
702 | continue; | ||
703 | if (dev_priv->chipset < 0xa0) | ||
704 | ustatus_addr = ustatus_old + (i << 12); | ||
705 | else | ||
706 | ustatus_addr = ustatus_new + (i << 11); | ||
707 | ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; | ||
708 | if (!ustatus) | ||
709 | continue; | ||
710 | tps++; | ||
711 | switch (type) { | ||
712 | case 6: /* texture error... unknown for now */ | ||
713 | nv50_pfb_vm_trap(dev, display, name); | ||
714 | if (display) { | ||
715 | NV_ERROR(dev, "magic set %d:\n", i); | ||
716 | for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) | ||
717 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
718 | nv_rd32(dev, r)); | ||
719 | } | ||
720 | break; | ||
721 | case 7: /* MP error */ | ||
722 | if (ustatus & 0x00010000) { | ||
723 | nv50_pgraph_mp_trap(dev, i, display); | ||
724 | ustatus &= ~0x00010000; | ||
725 | } | ||
726 | break; | ||
727 | case 8: /* TPDMA error */ | ||
728 | { | ||
729 | uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); | ||
730 | uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); | ||
731 | uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); | ||
732 | uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); | ||
733 | uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); | ||
734 | uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); | ||
735 | uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); | ||
736 | nv50_pfb_vm_trap(dev, display, name); | ||
737 | /* 2d engine destination */ | ||
738 | if (ustatus & 0x00000010) { | ||
739 | if (display) { | ||
740 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
741 | i, e14, e10); | ||
742 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
743 | i, e0c, e18, e1c, e20, e24); | ||
744 | } | ||
745 | ustatus &= ~0x00000010; | ||
746 | } | ||
747 | /* Render target */ | ||
748 | if (ustatus & 0x00000040) { | ||
749 | if (display) { | ||
750 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
751 | i, e14, e10); | ||
752 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
753 | i, e0c, e18, e1c, e20, e24); | ||
754 | } | ||
755 | ustatus &= ~0x00000040; | ||
756 | } | ||
757 | /* CUDA memory: l[], g[] or stack. */ | ||
758 | if (ustatus & 0x00000080) { | ||
759 | if (display) { | ||
760 | if (e18 & 0x80000000) { | ||
761 | /* g[] read fault? */ | ||
762 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
763 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
764 | e18 &= ~0x1f000000; | ||
765 | } else if (e18 & 0xc) { | ||
766 | /* g[] write fault? */ | ||
767 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
768 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
769 | e18 &= ~0x00000f80; | ||
770 | } else { | ||
771 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
772 | i, e14, e10); | ||
773 | } | ||
774 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
775 | i, e0c, e18, e1c, e20, e24); | ||
776 | } | ||
777 | ustatus &= ~0x00000080; | ||
778 | } | ||
779 | } | ||
780 | break; | ||
781 | } | ||
782 | if (ustatus) { | ||
783 | if (display) | ||
784 | NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); | ||
785 | } | ||
786 | nv_wr32(dev, ustatus_addr, 0xc0000000); | ||
787 | } | ||
788 | |||
789 | if (!tps && display) | ||
790 | NV_INFO(dev, "%s - No TPs claiming errors?\n", name); | ||
791 | } | ||
792 | |||
793 | static void | ||
794 | nv50_pgraph_trap_handler(struct drm_device *dev) | ||
795 | { | ||
796 | struct nouveau_pgraph_trap trap; | ||
797 | uint32_t status = nv_rd32(dev, 0x400108); | ||
798 | uint32_t ustatus; | ||
799 | int display = nouveau_ratelimit(); | ||
800 | |||
801 | |||
802 | if (!status && display) { | ||
803 | nouveau_graph_trap_info(dev, &trap); | ||
804 | nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); | ||
805 | NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); | ||
806 | } | ||
807 | |||
808 | /* DISPATCH: Relays commands to other units and handles NOTIFY, | ||
809 | * COND, QUERY. If you get a trap from it, the command is still stuck | ||
810 | * in DISPATCH and you need to do something about it. */ | ||
811 | if (status & 0x001) { | ||
812 | ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; | ||
813 | if (!ustatus && display) { | ||
814 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); | ||
815 | } | ||
816 | |||
817 | /* Known to be triggered by screwed up NOTIFY and COND... */ | ||
818 | if (ustatus & 0x00000001) { | ||
819 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); | ||
820 | nv_wr32(dev, 0x400500, 0); | ||
821 | if (nv_rd32(dev, 0x400808) & 0x80000000) { | ||
822 | if (display) { | ||
823 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
824 | trap.channel = -1; | ||
825 | trap.class = nv_rd32(dev, 0x400814); | ||
826 | trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; | ||
827 | trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; | ||
828 | trap.data = nv_rd32(dev, 0x40080c); | ||
829 | trap.data2 = nv_rd32(dev, 0x400810); | ||
830 | nouveau_graph_dump_trap_info(dev, | ||
831 | "PGRAPH_TRAP_DISPATCH_FAULT", &trap); | ||
832 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); | ||
833 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); | ||
834 | } | ||
835 | nv_wr32(dev, 0x400808, 0); | ||
836 | } else if (display) { | ||
837 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); | ||
838 | } | ||
839 | nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); | ||
840 | nv_wr32(dev, 0x400848, 0); | ||
841 | ustatus &= ~0x00000001; | ||
842 | } | ||
843 | if (ustatus & 0x00000002) { | ||
844 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); | ||
845 | nv_wr32(dev, 0x400500, 0); | ||
846 | if (nv_rd32(dev, 0x40084c) & 0x80000000) { | ||
847 | if (display) { | ||
848 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
849 | trap.channel = -1; | ||
850 | trap.class = nv_rd32(dev, 0x400814); | ||
851 | trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; | ||
852 | trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; | ||
853 | trap.data = nv_rd32(dev, 0x40085c); | ||
854 | trap.data2 = 0; | ||
855 | nouveau_graph_dump_trap_info(dev, | ||
856 | "PGRAPH_TRAP_DISPATCH_QUERY", &trap); | ||
857 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); | ||
858 | } | ||
859 | nv_wr32(dev, 0x40084c, 0); | ||
860 | } else if (display) { | ||
861 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); | ||
862 | } | ||
863 | ustatus &= ~0x00000002; | ||
864 | } | ||
865 | if (ustatus && display) | ||
866 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
867 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
868 | nv_wr32(dev, 0x400108, 0x001); | ||
869 | status &= ~0x001; | ||
870 | } | ||
871 | |||
872 | /* TRAPs other than dispatch use the "normal" trap regs. */ | ||
873 | if (status && display) { | ||
874 | nouveau_graph_trap_info(dev, &trap); | ||
875 | nouveau_graph_dump_trap_info(dev, | ||
876 | "PGRAPH_TRAP", &trap); | ||
877 | } | ||
878 | |||
879 | /* M2MF: Memory to memory copy engine. */ | ||
880 | if (status & 0x002) { | ||
881 | ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; | ||
882 | if (!ustatus && display) { | ||
883 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); | ||
884 | } | ||
885 | if (ustatus & 0x00000001) { | ||
886 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); | ||
887 | ustatus &= ~0x00000001; | ||
888 | } | ||
889 | if (ustatus & 0x00000002) { | ||
890 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); | ||
891 | ustatus &= ~0x00000002; | ||
892 | } | ||
893 | if (ustatus & 0x00000004) { | ||
894 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); | ||
895 | ustatus &= ~0x00000004; | ||
896 | } | ||
897 | NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", | ||
898 | nv_rd32(dev, 0x406804), | ||
899 | nv_rd32(dev, 0x406808), | ||
900 | nv_rd32(dev, 0x40680c), | ||
901 | nv_rd32(dev, 0x406810)); | ||
902 | if (ustatus && display) | ||
903 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); | ||
904 | /* No sane way found yet -- just reset the bugger. */ | ||
905 | nv_wr32(dev, 0x400040, 2); | ||
906 | nv_wr32(dev, 0x400040, 0); | ||
907 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
908 | nv_wr32(dev, 0x400108, 0x002); | ||
909 | status &= ~0x002; | ||
910 | } | ||
911 | |||
912 | /* VFETCH: Fetches data from vertex buffers. */ | ||
913 | if (status & 0x004) { | ||
914 | ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; | ||
915 | if (!ustatus && display) { | ||
916 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); | ||
917 | } | ||
918 | if (ustatus & 0x00000001) { | ||
919 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); | ||
920 | NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", | ||
921 | nv_rd32(dev, 0x400c00), | ||
922 | nv_rd32(dev, 0x400c08), | ||
923 | nv_rd32(dev, 0x400c0c), | ||
924 | nv_rd32(dev, 0x400c10)); | ||
925 | ustatus &= ~0x00000001; | ||
926 | } | ||
927 | if (ustatus && display) | ||
928 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
929 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
930 | nv_wr32(dev, 0x400108, 0x004); | ||
931 | status &= ~0x004; | ||
932 | } | ||
933 | |||
934 | /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ | ||
935 | if (status & 0x008) { | ||
936 | ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; | ||
937 | if (!ustatus && display) { | ||
938 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); | ||
939 | } | ||
940 | if (ustatus & 0x00000001) { | ||
941 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); | ||
942 | NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", | ||
943 | nv_rd32(dev, 0x401804), | ||
944 | nv_rd32(dev, 0x401808), | ||
945 | nv_rd32(dev, 0x40180c), | ||
946 | nv_rd32(dev, 0x401810)); | ||
947 | ustatus &= ~0x00000001; | ||
948 | } | ||
949 | if (ustatus && display) | ||
950 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); | ||
951 | /* No sane way found yet -- just reset the bugger. */ | ||
952 | nv_wr32(dev, 0x400040, 0x80); | ||
953 | nv_wr32(dev, 0x400040, 0); | ||
954 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
955 | nv_wr32(dev, 0x400108, 0x008); | ||
956 | status &= ~0x008; | ||
957 | } | ||
958 | |||
959 | /* CCACHE: Handles code and c[] caches and fills them. */ | ||
960 | if (status & 0x010) { | ||
961 | ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; | ||
962 | if (!ustatus && display) { | ||
963 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); | ||
964 | } | ||
965 | if (ustatus & 0x00000001) { | ||
966 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); | ||
967 | NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", | ||
968 | nv_rd32(dev, 0x405800), | ||
969 | nv_rd32(dev, 0x405804), | ||
970 | nv_rd32(dev, 0x405808), | ||
971 | nv_rd32(dev, 0x40580c), | ||
972 | nv_rd32(dev, 0x405810), | ||
973 | nv_rd32(dev, 0x405814), | ||
974 | nv_rd32(dev, 0x40581c)); | ||
975 | ustatus &= ~0x00000001; | ||
976 | } | ||
977 | if (ustatus && display) | ||
978 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); | ||
979 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
980 | nv_wr32(dev, 0x400108, 0x010); | ||
981 | status &= ~0x010; | ||
982 | } | ||
983 | |||
984 | /* Unknown, not seen yet... 0x402000 is the only trap status reg | ||
985 | * remaining, so try to handle it anyway. Perhaps related to that | ||
986 | * unknown DMA slot on tesla? */ | ||
987 | if (status & 0x20) { | ||
988 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); | ||
989 | ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; | ||
990 | if (display) | ||
991 | NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); | ||
992 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
993 | /* no status modifiction on purpose */ | ||
994 | } | ||
995 | |||
996 | /* TEXTURE: CUDA texturing units */ | ||
997 | if (status & 0x040) { | ||
998 | nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, | ||
999 | "PGRAPH_TRAP_TEXTURE"); | ||
1000 | nv_wr32(dev, 0x400108, 0x040); | ||
1001 | status &= ~0x040; | ||
1002 | } | ||
1003 | |||
1004 | /* MP: CUDA execution engines. */ | ||
1005 | if (status & 0x080) { | ||
1006 | nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, | ||
1007 | "PGRAPH_TRAP_MP"); | ||
1008 | nv_wr32(dev, 0x400108, 0x080); | ||
1009 | status &= ~0x080; | ||
1010 | } | ||
1011 | |||
1012 | /* TPDMA: Handles TP-initiated uncached memory accesses: | ||
1013 | * l[], g[], stack, 2d surfaces, render targets. */ | ||
1014 | if (status & 0x100) { | ||
1015 | nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, | ||
1016 | "PGRAPH_TRAP_TPDMA"); | ||
1017 | nv_wr32(dev, 0x400108, 0x100); | ||
1018 | status &= ~0x100; | ||
1019 | } | ||
1020 | |||
1021 | if (status) { | ||
1022 | if (display) | ||
1023 | NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", | ||
1024 | status); | ||
1025 | nv_wr32(dev, 0x400108, status); | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | /* There must be a *lot* of these. Will take some time to gather them up. */ | ||
1030 | static struct nouveau_enum_names nv50_data_error_names[] = | ||
1031 | { | ||
1032 | { 4, "INVALID_VALUE" }, | ||
1033 | { 5, "INVALID_ENUM" }, | ||
1034 | { 8, "INVALID_OBJECT" }, | ||
1035 | { 0xc, "INVALID_BITFIELD" }, | ||
1036 | { 0x28, "MP_NO_REG_SPACE" }, | ||
1037 | { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, | ||
1038 | }; | ||
1039 | |||
1040 | static void | ||
581 | nv50_pgraph_irq_handler(struct drm_device *dev) | 1041 | nv50_pgraph_irq_handler(struct drm_device *dev) |
582 | { | 1042 | { |
1043 | struct nouveau_pgraph_trap trap; | ||
1044 | int unhandled = 0; | ||
583 | uint32_t status; | 1045 | uint32_t status; |
584 | 1046 | ||
585 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { | 1047 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { |
586 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | 1048 | /* NOTIFY: You've set a NOTIFY an a command and it's done. */ |
587 | |||
588 | if (status & 0x00000001) { | 1049 | if (status & 0x00000001) { |
589 | nouveau_pgraph_intr_notify(dev, nsource); | 1050 | nouveau_graph_trap_info(dev, &trap); |
1051 | if (nouveau_ratelimit()) | ||
1052 | nouveau_graph_dump_trap_info(dev, | ||
1053 | "PGRAPH_NOTIFY", &trap); | ||
590 | status &= ~0x00000001; | 1054 | status &= ~0x00000001; |
591 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | 1055 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); |
592 | } | 1056 | } |
593 | 1057 | ||
594 | if (status & 0x00000010) { | 1058 | /* COMPUTE_QUERY: Purpose and exact cause unknown, happens |
595 | nouveau_pgraph_intr_error(dev, nsource | | 1059 | * when you write 0x200 to 0x50c0 method 0x31c. */ |
596 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); | 1060 | if (status & 0x00000002) { |
1061 | nouveau_graph_trap_info(dev, &trap); | ||
1062 | if (nouveau_ratelimit()) | ||
1063 | nouveau_graph_dump_trap_info(dev, | ||
1064 | "PGRAPH_COMPUTE_QUERY", &trap); | ||
1065 | status &= ~0x00000002; | ||
1066 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); | ||
1067 | } | ||
597 | 1068 | ||
1069 | /* Unknown, never seen: 0x4 */ | ||
1070 | |||
1071 | /* ILLEGAL_MTHD: You used a wrong method for this class. */ | ||
1072 | if (status & 0x00000010) { | ||
1073 | nouveau_graph_trap_info(dev, &trap); | ||
1074 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | ||
1075 | unhandled = 1; | ||
1076 | if (unhandled && nouveau_ratelimit()) | ||
1077 | nouveau_graph_dump_trap_info(dev, | ||
1078 | "PGRAPH_ILLEGAL_MTHD", &trap); | ||
598 | status &= ~0x00000010; | 1079 | status &= ~0x00000010; |
599 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | 1080 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); |
600 | } | 1081 | } |
601 | 1082 | ||
1083 | /* ILLEGAL_CLASS: You used a wrong class. */ | ||
1084 | if (status & 0x00000020) { | ||
1085 | nouveau_graph_trap_info(dev, &trap); | ||
1086 | if (nouveau_ratelimit()) | ||
1087 | nouveau_graph_dump_trap_info(dev, | ||
1088 | "PGRAPH_ILLEGAL_CLASS", &trap); | ||
1089 | status &= ~0x00000020; | ||
1090 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); | ||
1091 | } | ||
1092 | |||
1093 | /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ | ||
1094 | if (status & 0x00000040) { | ||
1095 | nouveau_graph_trap_info(dev, &trap); | ||
1096 | if (nouveau_ratelimit()) | ||
1097 | nouveau_graph_dump_trap_info(dev, | ||
1098 | "PGRAPH_DOUBLE_NOTIFY", &trap); | ||
1099 | status &= ~0x00000040; | ||
1100 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); | ||
1101 | } | ||
1102 | |||
1103 | /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ | ||
602 | if (status & 0x00001000) { | 1104 | if (status & 0x00001000) { |
603 | nv_wr32(dev, 0x400500, 0x00000000); | 1105 | nv_wr32(dev, 0x400500, 0x00000000); |
604 | nv_wr32(dev, NV03_PGRAPH_INTR, | 1106 | nv_wr32(dev, NV03_PGRAPH_INTR, |
@@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
613 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | 1115 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; |
614 | } | 1116 | } |
615 | 1117 | ||
616 | if (status & 0x00100000) { | 1118 | /* BUFFER_NOTIFY: Your m2mf transfer finished */ |
617 | nouveau_pgraph_intr_error(dev, nsource | | 1119 | if (status & 0x00010000) { |
618 | NV03_PGRAPH_NSOURCE_DATA_ERROR); | 1120 | nouveau_graph_trap_info(dev, &trap); |
1121 | if (nouveau_ratelimit()) | ||
1122 | nouveau_graph_dump_trap_info(dev, | ||
1123 | "PGRAPH_BUFFER_NOTIFY", &trap); | ||
1124 | status &= ~0x00010000; | ||
1125 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); | ||
1126 | } | ||
619 | 1127 | ||
1128 | /* DATA_ERROR: Invalid value for this method, or invalid | ||
1129 | * state in current PGRAPH context for this operation */ | ||
1130 | if (status & 0x00100000) { | ||
1131 | nouveau_graph_trap_info(dev, &trap); | ||
1132 | if (nouveau_ratelimit()) { | ||
1133 | nouveau_graph_dump_trap_info(dev, | ||
1134 | "PGRAPH_DATA_ERROR", &trap); | ||
1135 | NV_INFO (dev, "PGRAPH_DATA_ERROR - "); | ||
1136 | nouveau_print_enum_names(nv_rd32(dev, 0x400110), | ||
1137 | nv50_data_error_names); | ||
1138 | printk("\n"); | ||
1139 | } | ||
620 | status &= ~0x00100000; | 1140 | status &= ~0x00100000; |
621 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | 1141 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); |
622 | } | 1142 | } |
623 | 1143 | ||
1144 | /* TRAP: Something bad happened in the middle of command | ||
1145 | * execution. Has a billion types, subtypes, and even | ||
1146 | * subsubtypes. */ | ||
624 | if (status & 0x00200000) { | 1147 | if (status & 0x00200000) { |
625 | int r; | 1148 | nv50_pgraph_trap_handler(dev); |
626 | |||
627 | nouveau_pgraph_intr_error(dev, nsource | | ||
628 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); | ||
629 | |||
630 | NV_ERROR(dev, "magic set 1:\n"); | ||
631 | for (r = 0x408900; r <= 0x408910; r += 4) | ||
632 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
633 | nv_rd32(dev, r)); | ||
634 | nv_wr32(dev, 0x408900, | ||
635 | nv_rd32(dev, 0x408904) | 0xc0000000); | ||
636 | for (r = 0x408e08; r <= 0x408e24; r += 4) | ||
637 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
638 | nv_rd32(dev, r)); | ||
639 | nv_wr32(dev, 0x408e08, | ||
640 | nv_rd32(dev, 0x408e08) | 0xc0000000); | ||
641 | |||
642 | NV_ERROR(dev, "magic set 2:\n"); | ||
643 | for (r = 0x409900; r <= 0x409910; r += 4) | ||
644 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
645 | nv_rd32(dev, r)); | ||
646 | nv_wr32(dev, 0x409900, | ||
647 | nv_rd32(dev, 0x409904) | 0xc0000000); | ||
648 | for (r = 0x409e08; r <= 0x409e24; r += 4) | ||
649 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
650 | nv_rd32(dev, r)); | ||
651 | nv_wr32(dev, 0x409e08, | ||
652 | nv_rd32(dev, 0x409e08) | 0xc0000000); | ||
653 | |||
654 | status &= ~0x00200000; | 1149 | status &= ~0x00200000; |
655 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | ||
656 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | 1150 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); |
657 | } | 1151 | } |
658 | 1152 | ||
1153 | /* Unknown, never seen: 0x00400000 */ | ||
1154 | |||
1155 | /* SINGLE_STEP: Happens on every method if you turned on | ||
1156 | * single stepping in 40008c */ | ||
1157 | if (status & 0x01000000) { | ||
1158 | nouveau_graph_trap_info(dev, &trap); | ||
1159 | if (nouveau_ratelimit()) | ||
1160 | nouveau_graph_dump_trap_info(dev, | ||
1161 | "PGRAPH_SINGLE_STEP", &trap); | ||
1162 | status &= ~0x01000000; | ||
1163 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); | ||
1164 | } | ||
1165 | |||
1166 | /* 0x02000000 happens when you pause a ctxprog... | ||
1167 | * but the only way this can happen that I know is by | ||
1168 | * poking the relevant MMIO register, and we don't | ||
1169 | * do that. */ | ||
1170 | |||
659 | if (status) { | 1171 | if (status) { |
660 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", | 1172 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", |
661 | status); | 1173 | status); |
@@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
672 | } | 1184 | } |
673 | 1185 | ||
674 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 1186 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
675 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | 1187 | if (nv_rd32(dev, 0x400824) & (1 << 31)) |
1188 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
676 | } | 1189 | } |
677 | 1190 | ||
678 | static void | 1191 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index eb8f084d5f53..58b46807de23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include "nouveau_drm.h" | 35 | #include "nouveau_drm.h" |
36 | #include "nv50_display.h" | 36 | #include "nv50_display.h" |
37 | 37 | ||
38 | static int nouveau_stub_init(struct drm_device *dev) { return 0; } | ||
39 | static void nouveau_stub_takedown(struct drm_device *dev) {} | 38 | static void nouveau_stub_takedown(struct drm_device *dev) {} |
40 | 39 | ||
41 | static int nouveau_init_engine_ptrs(struct drm_device *dev) | 40 | static int nouveau_init_engine_ptrs(struct drm_device *dev) |
@@ -277,8 +276,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
277 | engine->timer.init = nv04_timer_init; | 276 | engine->timer.init = nv04_timer_init; |
278 | engine->timer.read = nv04_timer_read; | 277 | engine->timer.read = nv04_timer_read; |
279 | engine->timer.takedown = nv04_timer_takedown; | 278 | engine->timer.takedown = nv04_timer_takedown; |
280 | engine->fb.init = nouveau_stub_init; | 279 | engine->fb.init = nv50_fb_init; |
281 | engine->fb.takedown = nouveau_stub_takedown; | 280 | engine->fb.takedown = nv50_fb_takedown; |
282 | engine->graph.grclass = nv50_graph_grclass; | 281 | engine->graph.grclass = nv50_graph_grclass; |
283 | engine->graph.init = nv50_graph_init; | 282 | engine->graph.init = nv50_graph_init; |
284 | engine->graph.takedown = nv50_graph_takedown; | 283 | engine->graph.takedown = nv50_graph_takedown; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index a1d1ebb073d9..eba687f1099e 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
230 | struct drm_framebuffer *fb = crtc->fb; | 230 | struct drm_framebuffer *fb = crtc->fb; |
231 | 231 | ||
232 | /* Calculate our timings */ | 232 | /* Calculate our timings */ |
233 | int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; | 233 | int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; |
234 | int horizStart = (mode->crtc_hsync_start >> 3) - 1; | 234 | int horizStart = (mode->crtc_hsync_start >> 3) + 1; |
235 | int horizEnd = (mode->crtc_hsync_end >> 3) - 1; | 235 | int horizEnd = (mode->crtc_hsync_end >> 3) + 1; |
236 | int horizTotal = (mode->crtc_htotal >> 3) - 5; | 236 | int horizTotal = (mode->crtc_htotal >> 3) - 5; |
237 | int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; | 237 | int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; |
238 | int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; | 238 | int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 3da90c2c4e63..813b25cec726 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
121 | width = ALIGN(image->width, 32); | 121 | width = ALIGN(image->width, 8); |
122 | dsize = (width * image->height) >> 5; | 122 | dsize = ALIGN(width * image->height, 32) >> 5; |
123 | 123 | ||
124 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 124 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
125 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 125 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
136 | ((image->dx + image->width) & 0xffff)); | 136 | ((image->dx + image->width) & 0xffff)); |
137 | OUT_RING(chan, bg); | 137 | OUT_RING(chan, bg); |
138 | OUT_RING(chan, fg); | 138 | OUT_RING(chan, fg); |
139 | OUT_RING(chan, (image->height << 16) | image->width); | ||
140 | OUT_RING(chan, (image->height << 16) | width); | 139 | OUT_RING(chan, (image->height << 16) | width); |
140 | OUT_RING(chan, (image->height << 16) | image->width); | ||
141 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 141 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
142 | 142 | ||
143 | while (dsize) { | 143 | while (dsize) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 61a89f2dc553..fac6c88a2b1f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev) | |||
522 | } | 522 | } |
523 | 523 | ||
524 | for (i = 0 ; i < dcb->connector.entries; i++) { | 524 | for (i = 0 ; i < dcb->connector.entries; i++) { |
525 | if (i != 0 && dcb->connector.entry[i].index == | 525 | if (i != 0 && dcb->connector.entry[i].index2 == |
526 | dcb->connector.entry[i - 1].index) | 526 | dcb->connector.entry[i - 1].index2) |
527 | continue; | 527 | continue; |
528 | nouveau_connector_create(dev, &dcb->connector.entry[i]); | 528 | nouveau_connector_create(dev, &dcb->connector.entry[i]); |
529 | } | 529 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c new file mode 100644 index 000000000000..a95e6941ba88 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -0,0 +1,32 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv50_fb_init(struct drm_device *dev) | ||
8 | { | ||
9 | /* This is needed to get meaningful information from 100c90 | ||
10 | * on traps. No idea what these values mean exactly. */ | ||
11 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
12 | |||
13 | switch (dev_priv->chipset) { | ||
14 | case 0x50: | ||
15 | nv_wr32(dev, 0x100c90, 0x0707ff); | ||
16 | break; | ||
17 | case 0xa5: | ||
18 | case 0xa8: | ||
19 | nv_wr32(dev, 0x100c90, 0x0d0fff); | ||
20 | break; | ||
21 | default: | ||
22 | nv_wr32(dev, 0x100c90, 0x1d07ff); | ||
23 | break; | ||
24 | } | ||
25 | |||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | void | ||
30 | nv50_fb_takedown(struct drm_device *dev) | ||
31 | { | ||
32 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 993c7126fbde..25a3cd8794f9 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
233 | BEGIN_RING(chan, NvSub2D, 0x0808, 3); | 233 | BEGIN_RING(chan, NvSub2D, 0x0808, 3); |
234 | OUT_RING(chan, 0); | 234 | OUT_RING(chan, 0); |
235 | OUT_RING(chan, 0); | 235 | OUT_RING(chan, 0); |
236 | OUT_RING(chan, 0); | 236 | OUT_RING(chan, 1); |
237 | BEGIN_RING(chan, NvSub2D, 0x081c, 1); | 237 | BEGIN_RING(chan, NvSub2D, 0x081c, 1); |
238 | OUT_RING(chan, 1); | 238 | OUT_RING(chan, 1); |
239 | BEGIN_RING(chan, NvSub2D, 0x0840, 4); | 239 | BEGIN_RING(chan, NvSub2D, 0x0840, 4); |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 857a09671a39..c62b33a02f88 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev) | |||
56 | static void | 56 | static void |
57 | nv50_graph_init_regs__nv(struct drm_device *dev) | 57 | nv50_graph_init_regs__nv(struct drm_device *dev) |
58 | { | 58 | { |
59 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
60 | uint32_t units = nv_rd32(dev, 0x1540); | ||
61 | int i; | ||
62 | |||
59 | NV_DEBUG(dev, "\n"); | 63 | NV_DEBUG(dev, "\n"); |
60 | 64 | ||
61 | nv_wr32(dev, 0x400804, 0xc0000000); | 65 | nv_wr32(dev, 0x400804, 0xc0000000); |
@@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev) | |||
65 | nv_wr32(dev, 0x405018, 0xc0000000); | 69 | nv_wr32(dev, 0x405018, 0xc0000000); |
66 | nv_wr32(dev, 0x402000, 0xc0000000); | 70 | nv_wr32(dev, 0x402000, 0xc0000000); |
67 | 71 | ||
72 | for (i = 0; i < 16; i++) { | ||
73 | if (units & 1 << i) { | ||
74 | if (dev_priv->chipset < 0xa0) { | ||
75 | nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); | ||
76 | nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); | ||
77 | nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); | ||
78 | } else { | ||
79 | nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); | ||
80 | nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); | ||
81 | nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
68 | nv_wr32(dev, 0x400108, 0xffffffff); | 86 | nv_wr32(dev, 0x400108, 0xffffffff); |
69 | 87 | ||
70 | nv_wr32(dev, 0x400824, 0x00004000); | 88 | nv_wr32(dev, 0x400824, 0x00004000); |
@@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
229 | nouveau_grctx_vals_load(dev, ctx); | 247 | nouveau_grctx_vals_load(dev, ctx); |
230 | } | 248 | } |
231 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); | 249 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); |
232 | if ((dev_priv->chipset & 0xf0) == 0xa0) | ||
233 | nv_wo32(dev, ctx, 0x00004/4, 0x00000000); | ||
234 | else | ||
235 | nv_wo32(dev, ctx, 0x0011c/4, 0x00000000); | ||
236 | dev_priv->engine.instmem.finish_access(dev); | 250 | dev_priv->engine.instmem.finish_access(dev); |
237 | 251 | ||
238 | return 0; | 252 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d105fcd42ca0..546b31949a30 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -64,6 +64,9 @@ | |||
64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) | 64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) |
65 | #define CP_FLAG_ALWAYS_FALSE 0 | 65 | #define CP_FLAG_ALWAYS_FALSE 0 |
66 | #define CP_FLAG_ALWAYS_TRUE 1 | 66 | #define CP_FLAG_ALWAYS_TRUE 1 |
67 | #define CP_FLAG_INTR ((2 * 32) + 15) | ||
68 | #define CP_FLAG_INTR_NOT_PENDING 0 | ||
69 | #define CP_FLAG_INTR_PENDING 1 | ||
67 | 70 | ||
68 | #define CP_CTX 0x00100000 | 71 | #define CP_CTX 0x00100000 |
69 | #define CP_CTX_COUNT 0x000f0000 | 72 | #define CP_CTX_COUNT 0x000f0000 |
@@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx) | |||
214 | cp_name(ctx, cp_setup_save); | 217 | cp_name(ctx, cp_setup_save); |
215 | cp_set (ctx, UNK1D, SET); | 218 | cp_set (ctx, UNK1D, SET); |
216 | cp_wait(ctx, STATUS, BUSY); | 219 | cp_wait(ctx, STATUS, BUSY); |
220 | cp_wait(ctx, INTR, PENDING); | ||
221 | cp_bra (ctx, STATUS, BUSY, cp_setup_save); | ||
217 | cp_set (ctx, UNK01, SET); | 222 | cp_set (ctx, UNK01, SET); |
218 | cp_set (ctx, SWAP_DIRECTION, SAVE); | 223 | cp_set (ctx, SWAP_DIRECTION, SAVE); |
219 | 224 | ||
@@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
269 | int offset, base; | 274 | int offset, base; |
270 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); | 275 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); |
271 | 276 | ||
272 | /* 0800 */ | 277 | /* 0800: DISPATCH */ |
273 | cp_ctx(ctx, 0x400808, 7); | 278 | cp_ctx(ctx, 0x400808, 7); |
274 | gr_def(ctx, 0x400814, 0x00000030); | 279 | gr_def(ctx, 0x400814, 0x00000030); |
275 | cp_ctx(ctx, 0x400834, 0x32); | 280 | cp_ctx(ctx, 0x400834, 0x32); |
@@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
300 | gr_def(ctx, 0x400b20, 0x0001629d); | 305 | gr_def(ctx, 0x400b20, 0x0001629d); |
301 | } | 306 | } |
302 | 307 | ||
303 | /* 0C00 */ | 308 | /* 0C00: VFETCH */ |
304 | cp_ctx(ctx, 0x400c08, 0x2); | 309 | cp_ctx(ctx, 0x400c08, 0x2); |
305 | gr_def(ctx, 0x400c08, 0x0000fe0c); | 310 | gr_def(ctx, 0x400c08, 0x0000fe0c); |
306 | 311 | ||
@@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
326 | cp_ctx(ctx, 0x401540, 0x5); | 331 | cp_ctx(ctx, 0x401540, 0x5); |
327 | gr_def(ctx, 0x401550, 0x00001018); | 332 | gr_def(ctx, 0x401550, 0x00001018); |
328 | 333 | ||
329 | /* 1800 */ | 334 | /* 1800: STREAMOUT */ |
330 | cp_ctx(ctx, 0x401814, 0x1); | 335 | cp_ctx(ctx, 0x401814, 0x1); |
331 | gr_def(ctx, 0x401814, 0x000000ff); | 336 | gr_def(ctx, 0x401814, 0x000000ff); |
332 | if (dev_priv->chipset == 0x50) { | 337 | if (dev_priv->chipset == 0x50) { |
@@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
641 | if (dev_priv->chipset == 0x50) | 646 | if (dev_priv->chipset == 0x50) |
642 | cp_ctx(ctx, 0x4063e0, 0x1); | 647 | cp_ctx(ctx, 0x4063e0, 0x1); |
643 | 648 | ||
644 | /* 6800 */ | 649 | /* 6800: M2MF */ |
645 | if (dev_priv->chipset < 0x90) { | 650 | if (dev_priv->chipset < 0x90) { |
646 | cp_ctx(ctx, 0x406814, 0x2b); | 651 | cp_ctx(ctx, 0x406814, 0x2b); |
647 | gr_def(ctx, 0x406818, 0x00000f80); | 652 | gr_def(ctx, 0x406818, 0x00000f80); |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index ed38262d9985..3c91312dea9a 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h | |||
50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
51 | radeon_irq.o r300_cmdbuf.o r600_cp.o | 51 | radeon_irq.o r300_cmdbuf.o r600_cp.o |
52 | # add KMS driver | 52 | # add KMS driver |
53 | radeon-y += radeon_device.o radeon_kms.o \ | 53 | radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ |
54 | radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ | 54 | radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ |
55 | atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ | 55 | atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ |
56 | radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ | 56 | radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..247f8ee7e940 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -52,15 +52,17 @@ | |||
52 | 52 | ||
53 | typedef struct { | 53 | typedef struct { |
54 | struct atom_context *ctx; | 54 | struct atom_context *ctx; |
55 | |||
56 | uint32_t *ps, *ws; | 55 | uint32_t *ps, *ws; |
57 | int ps_shift; | 56 | int ps_shift; |
58 | uint16_t start; | 57 | uint16_t start; |
58 | unsigned last_jump; | ||
59 | unsigned long last_jump_jiffies; | ||
60 | bool abort; | ||
59 | } atom_exec_context; | 61 | } atom_exec_context; |
60 | 62 | ||
61 | int atom_debug = 0; | 63 | int atom_debug = 0; |
62 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | 64 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
63 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 65 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
64 | 66 | ||
65 | static uint32_t atom_arg_mask[8] = | 67 | static uint32_t atom_arg_mask[8] = |
66 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | 68 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
@@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | |||
604 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | 606 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
605 | { | 607 | { |
606 | int idx = U8((*ptr)++); | 608 | int idx = U8((*ptr)++); |
609 | int r = 0; | ||
610 | |||
607 | if (idx < ATOM_TABLE_NAMES_CNT) | 611 | if (idx < ATOM_TABLE_NAMES_CNT) |
608 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | 612 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); |
609 | else | 613 | else |
610 | SDEBUG(" table: %d\n", idx); | 614 | SDEBUG(" table: %d\n", idx); |
611 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 615 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
612 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 616 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
617 | if (r) { | ||
618 | ctx->abort = true; | ||
619 | } | ||
613 | } | 620 | } |
614 | 621 | ||
615 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 622 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
@@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | |||
673 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | 680 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
674 | { | 681 | { |
675 | int execute = 0, target = U16(*ptr); | 682 | int execute = 0, target = U16(*ptr); |
683 | unsigned long cjiffies; | ||
684 | |||
676 | (*ptr) += 2; | 685 | (*ptr) += 2; |
677 | switch (arg) { | 686 | switch (arg) { |
678 | case ATOM_COND_ABOVE: | 687 | case ATOM_COND_ABOVE: |
@@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
700 | if (arg != ATOM_COND_ALWAYS) | 709 | if (arg != ATOM_COND_ALWAYS) |
701 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | 710 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); |
702 | SDEBUG(" target: 0x%04X\n", target); | 711 | SDEBUG(" target: 0x%04X\n", target); |
703 | if (execute) | 712 | if (execute) { |
713 | if (ctx->last_jump == (ctx->start + target)) { | ||
714 | cjiffies = jiffies; | ||
715 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
716 | cjiffies -= ctx->last_jump_jiffies; | ||
717 | if ((jiffies_to_msecs(cjiffies) > 1000)) { | ||
718 | DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); | ||
719 | ctx->abort = true; | ||
720 | } | ||
721 | } else { | ||
722 | /* jiffies wrap around we will just wait a little longer */ | ||
723 | ctx->last_jump_jiffies = jiffies; | ||
724 | } | ||
725 | } else { | ||
726 | ctx->last_jump = ctx->start + target; | ||
727 | ctx->last_jump_jiffies = jiffies; | ||
728 | } | ||
704 | *ptr = ctx->start + target; | 729 | *ptr = ctx->start + target; |
730 | } | ||
705 | } | 731 | } |
706 | 732 | ||
707 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 733 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
@@ -1104,7 +1130,7 @@ static struct { | |||
1104 | atom_op_shr, ATOM_ARG_MC}, { | 1130 | atom_op_shr, ATOM_ARG_MC}, { |
1105 | atom_op_debug, 0},}; | 1131 | atom_op_debug, 0},}; |
1106 | 1132 | ||
1107 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | 1133 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
1108 | { | 1134 | { |
1109 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1135 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
1110 | int len, ws, ps, ptr; | 1136 | int len, ws, ps, ptr; |
@@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1112 | atom_exec_context ectx; | 1138 | atom_exec_context ectx; |
1113 | 1139 | ||
1114 | if (!base) | 1140 | if (!base) |
1115 | return; | 1141 | return -EINVAL; |
1116 | 1142 | ||
1117 | len = CU16(base + ATOM_CT_SIZE_PTR); | 1143 | len = CU16(base + ATOM_CT_SIZE_PTR); |
1118 | ws = CU8(base + ATOM_CT_WS_PTR); | 1144 | ws = CU8(base + ATOM_CT_WS_PTR); |
@@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1125 | ectx.ps_shift = ps / 4; | 1151 | ectx.ps_shift = ps / 4; |
1126 | ectx.start = base; | 1152 | ectx.start = base; |
1127 | ectx.ps = params; | 1153 | ectx.ps = params; |
1154 | ectx.abort = false; | ||
1155 | ectx.last_jump = 0; | ||
1128 | if (ws) | 1156 | if (ws) |
1129 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | 1157 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
1130 | else | 1158 | else |
@@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1137 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | 1165 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); |
1138 | else | 1166 | else |
1139 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | 1167 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); |
1168 | if (ectx.abort) { | ||
1169 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
1170 | base, len, ws, ps, ptr - 1); | ||
1171 | return -EINVAL; | ||
1172 | } | ||
1140 | 1173 | ||
1141 | if (op < ATOM_OP_CNT && op > 0) | 1174 | if (op < ATOM_OP_CNT && op > 0) |
1142 | opcode_table[op].func(&ectx, &ptr, | 1175 | opcode_table[op].func(&ectx, &ptr, |
@@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1152 | 1185 | ||
1153 | if (ws) | 1186 | if (ws) |
1154 | kfree(ectx.ws); | 1187 | kfree(ectx.ws); |
1188 | return 0; | ||
1155 | } | 1189 | } |
1156 | 1190 | ||
1157 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1191 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1158 | { | 1192 | { |
1193 | int r; | ||
1194 | |||
1159 | mutex_lock(&ctx->mutex); | 1195 | mutex_lock(&ctx->mutex); |
1160 | /* reset reg block */ | 1196 | /* reset reg block */ |
1161 | ctx->reg_block = 0; | 1197 | ctx->reg_block = 0; |
@@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
1163 | ctx->fb_base = 0; | 1199 | ctx->fb_base = 0; |
1164 | /* reset io mode */ | 1200 | /* reset io mode */ |
1165 | ctx->io_mode = ATOM_IO_MM; | 1201 | ctx->io_mode = ATOM_IO_MM; |
1166 | atom_execute_table_locked(ctx, index, params); | 1202 | r = atom_execute_table_locked(ctx, index, params); |
1167 | mutex_unlock(&ctx->mutex); | 1203 | mutex_unlock(&ctx->mutex); |
1204 | return r; | ||
1168 | } | 1205 | } |
1169 | 1206 | ||
1170 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1207 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
@@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx) | |||
1248 | 1285 | ||
1249 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1286 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1250 | return 1; | 1287 | return 1; |
1251 | atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1288 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1252 | |||
1253 | return 0; | ||
1254 | } | 1289 | } |
1255 | 1290 | ||
1256 | void atom_destroy(struct atom_context *ctx) | 1291 | void atom_destroy(struct atom_context *ctx) |
@@ -1260,12 +1295,16 @@ void atom_destroy(struct atom_context *ctx) | |||
1260 | kfree(ctx); | 1295 | kfree(ctx); |
1261 | } | 1296 | } |
1262 | 1297 | ||
1263 | void atom_parse_data_header(struct atom_context *ctx, int index, | 1298 | bool atom_parse_data_header(struct atom_context *ctx, int index, |
1264 | uint16_t * size, uint8_t * frev, uint8_t * crev, | 1299 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
1265 | uint16_t * data_start) | 1300 | uint16_t * data_start) |
1266 | { | 1301 | { |
1267 | int offset = index * 2 + 4; | 1302 | int offset = index * 2 + 4; |
1268 | int idx = CU16(ctx->data_table + offset); | 1303 | int idx = CU16(ctx->data_table + offset); |
1304 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
1305 | |||
1306 | if (!mdt[index]) | ||
1307 | return false; | ||
1269 | 1308 | ||
1270 | if (size) | 1309 | if (size) |
1271 | *size = CU16(idx); | 1310 | *size = CU16(idx); |
@@ -1274,38 +1313,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, | |||
1274 | if (crev) | 1313 | if (crev) |
1275 | *crev = CU8(idx + 3); | 1314 | *crev = CU8(idx + 3); |
1276 | *data_start = idx; | 1315 | *data_start = idx; |
1277 | return; | 1316 | return true; |
1278 | } | 1317 | } |
1279 | 1318 | ||
1280 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | 1319 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, |
1281 | uint8_t * crev) | 1320 | uint8_t * crev) |
1282 | { | 1321 | { |
1283 | int offset = index * 2 + 4; | 1322 | int offset = index * 2 + 4; |
1284 | int idx = CU16(ctx->cmd_table + offset); | 1323 | int idx = CU16(ctx->cmd_table + offset); |
1324 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
1325 | |||
1326 | if (!mct[index]) | ||
1327 | return false; | ||
1285 | 1328 | ||
1286 | if (frev) | 1329 | if (frev) |
1287 | *frev = CU8(idx + 2); | 1330 | *frev = CU8(idx + 2); |
1288 | if (crev) | 1331 | if (crev) |
1289 | *crev = CU8(idx + 3); | 1332 | *crev = CU8(idx + 3); |
1290 | return; | 1333 | return true; |
1291 | } | 1334 | } |
1292 | 1335 | ||
1293 | int atom_allocate_fb_scratch(struct atom_context *ctx) | 1336 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
1294 | { | 1337 | { |
1295 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | 1338 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
1296 | uint16_t data_offset; | 1339 | uint16_t data_offset; |
1297 | int usage_bytes; | 1340 | int usage_bytes = 0; |
1298 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | 1341 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
1299 | 1342 | ||
1300 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 1343 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
1344 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
1301 | 1345 | ||
1302 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | 1346 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
1347 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
1348 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1303 | 1349 | ||
1304 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | 1350 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1305 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | 1351 | } |
1306 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1307 | |||
1308 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
1309 | if (usage_bytes == 0) | 1352 | if (usage_bytes == 0) |
1310 | usage_bytes = 20 * 1024; | 1353 | usage_bytes = 20 * 1024; |
1311 | /* allocate some scratch memory */ | 1354 | /* allocate some scratch memory */ |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index bc73781423a1..cd1b64ab5ca7 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -140,11 +140,13 @@ struct atom_context { | |||
140 | extern int atom_debug; | 140 | extern int atom_debug; |
141 | 141 | ||
142 | struct atom_context *atom_parse(struct card_info *, void *); | 142 | struct atom_context *atom_parse(struct card_info *, void *); |
143 | void atom_execute_table(struct atom_context *, int, uint32_t *); | 143 | int atom_execute_table(struct atom_context *, int, uint32_t *); |
144 | int atom_asic_init(struct atom_context *); | 144 | int atom_asic_init(struct atom_context *); |
145 | void atom_destroy(struct atom_context *); | 145 | void atom_destroy(struct atom_context *); |
146 | void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); | 146 | bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, |
147 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); | 147 | uint8_t *frev, uint8_t *crev, uint16_t *data_start); |
148 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, | ||
149 | uint8_t *frev, uint8_t *crev); | ||
148 | int atom_allocate_fb_scratch(struct atom_context *ctx); | 150 | int atom_allocate_fb_scratch(struct atom_context *ctx); |
149 | #include "atom-types.h" | 151 | #include "atom-types.h" |
150 | #include "atombios.h" | 152 | #include "atombios.h" |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dd9fdf560611..fd4ef6d18849 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
353 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 353 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void atombios_disable_ss(struct drm_crtc *crtc) | ||
357 | { | ||
358 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
359 | struct drm_device *dev = crtc->dev; | ||
360 | struct radeon_device *rdev = dev->dev_private; | ||
361 | u32 ss_cntl; | ||
362 | |||
363 | if (ASIC_IS_DCE4(rdev)) { | ||
364 | switch (radeon_crtc->pll_id) { | ||
365 | case ATOM_PPLL1: | ||
366 | ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); | ||
367 | ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; | ||
368 | WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); | ||
369 | break; | ||
370 | case ATOM_PPLL2: | ||
371 | ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); | ||
372 | ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; | ||
373 | WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); | ||
374 | break; | ||
375 | case ATOM_DCPLL: | ||
376 | case ATOM_PPLL_INVALID: | ||
377 | return; | ||
378 | } | ||
379 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
380 | switch (radeon_crtc->pll_id) { | ||
381 | case ATOM_PPLL1: | ||
382 | ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); | ||
383 | ss_cntl &= ~1; | ||
384 | WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); | ||
385 | break; | ||
386 | case ATOM_PPLL2: | ||
387 | ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); | ||
388 | ss_cntl &= ~1; | ||
389 | WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); | ||
390 | break; | ||
391 | case ATOM_DCPLL: | ||
392 | case ATOM_PPLL_INVALID: | ||
393 | return; | ||
394 | } | ||
395 | } | ||
396 | } | ||
397 | |||
398 | |||
356 | union atom_enable_ss { | 399 | union atom_enable_ss { |
357 | ENABLE_LVDS_SS_PARAMETERS legacy; | 400 | ENABLE_LVDS_SS_PARAMETERS legacy; |
358 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | 401 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; |
359 | }; | 402 | }; |
360 | 403 | ||
361 | static void atombios_set_ss(struct drm_crtc *crtc, int enable) | 404 | static void atombios_enable_ss(struct drm_crtc *crtc) |
362 | { | 405 | { |
363 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 406 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
364 | struct drm_device *dev = crtc->dev; | 407 | struct drm_device *dev = crtc->dev; |
@@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
387 | step = dig->ss->step; | 430 | step = dig->ss->step; |
388 | delay = dig->ss->delay; | 431 | delay = dig->ss->delay; |
389 | range = dig->ss->range; | 432 | range = dig->ss->range; |
390 | } else if (enable) | 433 | } else |
391 | return; | 434 | return; |
392 | } else if (enable) | 435 | } else |
393 | return; | 436 | return; |
394 | break; | 437 | break; |
395 | } | 438 | } |
@@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
406 | args.v1.ucSpreadSpectrumDelay = delay; | 449 | args.v1.ucSpreadSpectrumDelay = delay; |
407 | args.v1.ucSpreadSpectrumRange = range; | 450 | args.v1.ucSpreadSpectrumRange = range; |
408 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 451 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
409 | args.v1.ucEnable = enable; | 452 | args.v1.ucEnable = ATOM_ENABLE; |
410 | } else { | 453 | } else { |
411 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 454 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); |
412 | args.legacy.ucSpreadSpectrumType = type; | 455 | args.legacy.ucSpreadSpectrumType = type; |
413 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; | 456 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; |
414 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; | 457 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; |
415 | args.legacy.ucEnable = enable; | 458 | args.legacy.ucEnable = ATOM_ENABLE; |
416 | } | 459 | } |
417 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 460 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
418 | } | 461 | } |
@@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
478 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
479 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
480 | adjusted_clock = mode->clock * 2; | 523 | adjusted_clock = mode->clock * 2; |
481 | /* LVDS PLL quirks */ | ||
482 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | ||
483 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
484 | pll->algo = dig->pll_algo; | ||
485 | } | ||
486 | } else { | 524 | } else { |
487 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 525 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
488 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 526 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
503 | int index; | 541 | int index; |
504 | 542 | ||
505 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | 543 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); |
506 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 544 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
507 | &crev); | 545 | &crev)) |
546 | return adjusted_clock; | ||
508 | 547 | ||
509 | memset(&args, 0, sizeof(args)); | 548 | memset(&args, 0, sizeof(args)); |
510 | 549 | ||
@@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
542 | } | 581 | } |
543 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 582 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
544 | /* may want to enable SS on DP/eDP eventually */ | 583 | /* may want to enable SS on DP/eDP eventually */ |
545 | args.v3.sInput.ucDispPllConfig |= | 584 | /*args.v3.sInput.ucDispPllConfig |= |
546 | DISPPLL_CONFIG_SS_ENABLE; | 585 | DISPPLL_CONFIG_SS_ENABLE;*/ |
547 | if (mode->clock > 165000) | 586 | if (encoder_mode == ATOM_ENCODER_MODE_DP) |
548 | args.v3.sInput.ucDispPllConfig |= | 587 | args.v3.sInput.ucDispPllConfig |= |
549 | DISPPLL_CONFIG_DUAL_LINK; | 588 | DISPPLL_CONFIG_COHERENT_MODE; |
589 | else { | ||
590 | if (mode->clock > 165000) | ||
591 | args.v3.sInput.ucDispPllConfig |= | ||
592 | DISPPLL_CONFIG_DUAL_LINK; | ||
593 | } | ||
550 | } | 594 | } |
551 | atom_execute_table(rdev->mode_info.atom_context, | 595 | atom_execute_table(rdev->mode_info.atom_context, |
552 | index, (uint32_t *)&args); | 596 | index, (uint32_t *)&args); |
@@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | |||
592 | memset(&args, 0, sizeof(args)); | 636 | memset(&args, 0, sizeof(args)); |
593 | 637 | ||
594 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 638 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
595 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 639 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
596 | &crev); | 640 | &crev)) |
641 | return; | ||
597 | 642 | ||
598 | switch (frev) { | 643 | switch (frev) { |
599 | case 1: | 644 | case 1: |
@@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
667 | &ref_div, &post_div); | 712 | &ref_div, &post_div); |
668 | 713 | ||
669 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 714 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
670 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 715 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
671 | &crev); | 716 | &crev)) |
717 | return; | ||
672 | 718 | ||
673 | switch (frev) { | 719 | switch (frev) { |
674 | case 1: | 720 | case 1: |
@@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1083 | 1129 | ||
1084 | /* TODO color tiling */ | 1130 | /* TODO color tiling */ |
1085 | 1131 | ||
1086 | /* pick pll */ | 1132 | atombios_disable_ss(crtc); |
1087 | radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); | ||
1088 | |||
1089 | atombios_set_ss(crtc, 0); | ||
1090 | /* always set DCPLL */ | 1133 | /* always set DCPLL */ |
1091 | if (ASIC_IS_DCE4(rdev)) | 1134 | if (ASIC_IS_DCE4(rdev)) |
1092 | atombios_crtc_set_dcpll(crtc); | 1135 | atombios_crtc_set_dcpll(crtc); |
1093 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1136 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1094 | atombios_set_ss(crtc, 1); | 1137 | atombios_enable_ss(crtc); |
1095 | 1138 | ||
1096 | if (ASIC_IS_DCE4(rdev)) | 1139 | if (ASIC_IS_DCE4(rdev)) |
1097 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1140 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
@@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1120 | 1163 | ||
1121 | static void atombios_crtc_prepare(struct drm_crtc *crtc) | 1164 | static void atombios_crtc_prepare(struct drm_crtc *crtc) |
1122 | { | 1165 | { |
1166 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
1167 | |||
1168 | /* pick pll */ | ||
1169 | radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); | ||
1170 | |||
1123 | atombios_lock_crtc(crtc, ATOM_ENABLE); | 1171 | atombios_lock_crtc(crtc, ATOM_ENABLE); |
1124 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1172 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); |
1125 | } | 1173 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 8a133bda00a2..28b31c64f48d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder, | |||
745 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); | 745 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); |
746 | 746 | ||
747 | /* disable the training pattern on the sink */ | 747 | /* disable the training pattern on the sink */ |
748 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
749 | |||
750 | /* disable the training pattern on the source */ | ||
748 | if (ASIC_IS_DCE4(rdev)) | 751 | if (ASIC_IS_DCE4(rdev)) |
749 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); | 752 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); |
750 | else | 753 | else |
751 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | 754 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, |
752 | dig_connector->dp_clock, enc_id, 0); | 755 | dig_connector->dp_clock, enc_id, 0); |
753 | |||
754 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | ||
755 | dig_connector->dp_clock, enc_id, 0); | ||
756 | } | 756 | } |
757 | 757 | ||
758 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 758 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bd2e7aa85c1d..647a0efdc353 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | #include "radeon_asic.h" | ||
28 | #include "radeon_drm.h" | 29 | #include "radeon_drm.h" |
29 | #include "rv770d.h" | 30 | #include "rv770d.h" |
30 | #include "atom.h" | 31 | #include "atom.h" |
@@ -436,7 +437,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
436 | 437 | ||
437 | int evergreen_mc_init(struct radeon_device *rdev) | 438 | int evergreen_mc_init(struct radeon_device *rdev) |
438 | { | 439 | { |
439 | fixed20_12 a; | ||
440 | u32 tmp; | 440 | u32 tmp; |
441 | int chansize, numchan; | 441 | int chansize, numchan; |
442 | 442 | ||
@@ -481,12 +481,8 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
481 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 481 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
482 | } | 482 | } |
483 | r600_vram_gtt_location(rdev, &rdev->mc); | 483 | r600_vram_gtt_location(rdev, &rdev->mc); |
484 | /* FIXME: we should enforce default clock in case GPU is not in | 484 | radeon_update_bandwidth_info(rdev); |
485 | * default setup | 485 | |
486 | */ | ||
487 | a.full = rfixed_const(100); | ||
488 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
489 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
490 | return 0; | 486 | return 0; |
491 | } | 487 | } |
492 | 488 | ||
@@ -746,6 +742,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
746 | 742 | ||
747 | void evergreen_fini(struct radeon_device *rdev) | 743 | void evergreen_fini(struct radeon_device *rdev) |
748 | { | 744 | { |
745 | radeon_pm_fini(rdev); | ||
749 | evergreen_suspend(rdev); | 746 | evergreen_suspend(rdev); |
750 | #if 0 | 747 | #if 0 |
751 | r600_blit_fini(rdev); | 748 | r600_blit_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 91eb762eb3f9..3ae51ada1abf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "radeon_drm.h" | 31 | #include "radeon_drm.h" |
32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_asic.h" | ||
34 | #include "r100d.h" | 35 | #include "r100d.h" |
35 | #include "rs100d.h" | 36 | #include "rs100d.h" |
36 | #include "rv200d.h" | 37 | #include "rv200d.h" |
@@ -235,9 +236,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
235 | 236 | ||
236 | void r100_pci_gart_fini(struct radeon_device *rdev) | 237 | void r100_pci_gart_fini(struct radeon_device *rdev) |
237 | { | 238 | { |
239 | radeon_gart_fini(rdev); | ||
238 | r100_pci_gart_disable(rdev); | 240 | r100_pci_gart_disable(rdev); |
239 | radeon_gart_table_ram_free(rdev); | 241 | radeon_gart_table_ram_free(rdev); |
240 | radeon_gart_fini(rdev); | ||
241 | } | 242 | } |
242 | 243 | ||
243 | int r100_irq_set(struct radeon_device *rdev) | 244 | int r100_irq_set(struct radeon_device *rdev) |
@@ -312,10 +313,12 @@ int r100_irq_process(struct radeon_device *rdev) | |||
312 | /* Vertical blank interrupts */ | 313 | /* Vertical blank interrupts */ |
313 | if (status & RADEON_CRTC_VBLANK_STAT) { | 314 | if (status & RADEON_CRTC_VBLANK_STAT) { |
314 | drm_handle_vblank(rdev->ddev, 0); | 315 | drm_handle_vblank(rdev->ddev, 0); |
316 | rdev->pm.vblank_sync = true; | ||
315 | wake_up(&rdev->irq.vblank_queue); | 317 | wake_up(&rdev->irq.vblank_queue); |
316 | } | 318 | } |
317 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 319 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
318 | drm_handle_vblank(rdev->ddev, 1); | 320 | drm_handle_vblank(rdev->ddev, 1); |
321 | rdev->pm.vblank_sync = true; | ||
319 | wake_up(&rdev->irq.vblank_queue); | 322 | wake_up(&rdev->irq.vblank_queue); |
320 | } | 323 | } |
321 | if (status & RADEON_FP_DETECT_STAT) { | 324 | if (status & RADEON_FP_DETECT_STAT) { |
@@ -741,6 +744,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
741 | udelay(10); | 744 | udelay(10); |
742 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 745 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
743 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | 746 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); |
747 | /* protect against crazy HW on resume */ | ||
748 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
744 | /* Set cp mode to bus mastering & enable cp*/ | 749 | /* Set cp mode to bus mastering & enable cp*/ |
745 | WREG32(RADEON_CP_CSQ_MODE, | 750 | WREG32(RADEON_CP_CSQ_MODE, |
746 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 751 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
@@ -1804,6 +1809,7 @@ void r100_set_common_regs(struct radeon_device *rdev) | |||
1804 | { | 1809 | { |
1805 | struct drm_device *dev = rdev->ddev; | 1810 | struct drm_device *dev = rdev->ddev; |
1806 | bool force_dac2 = false; | 1811 | bool force_dac2 = false; |
1812 | u32 tmp; | ||
1807 | 1813 | ||
1808 | /* set these so they don't interfere with anything */ | 1814 | /* set these so they don't interfere with anything */ |
1809 | WREG32(RADEON_OV0_SCALE_CNTL, 0); | 1815 | WREG32(RADEON_OV0_SCALE_CNTL, 0); |
@@ -1875,6 +1881,12 @@ void r100_set_common_regs(struct radeon_device *rdev) | |||
1875 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | 1881 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); |
1876 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); | 1882 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
1877 | } | 1883 | } |
1884 | |||
1885 | /* switch PM block to ACPI mode */ | ||
1886 | tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); | ||
1887 | tmp &= ~RADEON_PM_MODE_SEL; | ||
1888 | WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); | ||
1889 | |||
1878 | } | 1890 | } |
1879 | 1891 | ||
1880 | /* | 1892 | /* |
@@ -2022,6 +2034,7 @@ void r100_mc_init(struct radeon_device *rdev) | |||
2022 | radeon_vram_location(rdev, &rdev->mc, base); | 2034 | radeon_vram_location(rdev, &rdev->mc, base); |
2023 | if (!(rdev->flags & RADEON_IS_AGP)) | 2035 | if (!(rdev->flags & RADEON_IS_AGP)) |
2024 | radeon_gtt_location(rdev, &rdev->mc); | 2036 | radeon_gtt_location(rdev, &rdev->mc); |
2037 | radeon_update_bandwidth_info(rdev); | ||
2025 | } | 2038 | } |
2026 | 2039 | ||
2027 | 2040 | ||
@@ -2385,6 +2398,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2385 | uint32_t pixel_bytes1 = 0; | 2398 | uint32_t pixel_bytes1 = 0; |
2386 | uint32_t pixel_bytes2 = 0; | 2399 | uint32_t pixel_bytes2 = 0; |
2387 | 2400 | ||
2401 | radeon_update_display_priority(rdev); | ||
2402 | |||
2388 | if (rdev->mode_info.crtcs[0]->base.enabled) { | 2403 | if (rdev->mode_info.crtcs[0]->base.enabled) { |
2389 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | 2404 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
2390 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | 2405 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
@@ -2413,11 +2428,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2413 | /* | 2428 | /* |
2414 | * determine is there is enough bw for current mode | 2429 | * determine is there is enough bw for current mode |
2415 | */ | 2430 | */ |
2416 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | 2431 | sclk_ff = rdev->pm.sclk; |
2417 | temp_ff.full = rfixed_const(100); | 2432 | mclk_ff = rdev->pm.mclk; |
2418 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
2419 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
2420 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
2421 | 2433 | ||
2422 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | 2434 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
2423 | temp_ff.full = rfixed_const(temp); | 2435 | temp_ff.full = rfixed_const(temp); |
@@ -3440,6 +3452,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3440 | 3452 | ||
3441 | void r100_fini(struct radeon_device *rdev) | 3453 | void r100_fini(struct radeon_device *rdev) |
3442 | { | 3454 | { |
3455 | radeon_pm_fini(rdev); | ||
3443 | r100_cp_fini(rdev); | 3456 | r100_cp_fini(rdev); |
3444 | r100_wb_fini(rdev); | 3457 | r100_wb_fini(rdev); |
3445 | r100_ib_fini(rdev); | 3458 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 1146c9909c2c..85617c311212 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_asic.h" | ||
33 | 34 | ||
34 | #include "r100d.h" | 35 | #include "r100d.h" |
35 | #include "r200_reg_safe.h" | 36 | #include "r200_reg_safe.h" |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 4cef90cd74e5..1023eeb65872 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_asic.h" | ||
33 | #include "radeon_drm.h" | 34 | #include "radeon_drm.h" |
34 | #include "r100_track.h" | 35 | #include "r100_track.h" |
35 | #include "r300d.h" | 36 | #include "r300d.h" |
@@ -164,9 +165,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) | |||
164 | 165 | ||
165 | void rv370_pcie_gart_fini(struct radeon_device *rdev) | 166 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
166 | { | 167 | { |
168 | radeon_gart_fini(rdev); | ||
167 | rv370_pcie_gart_disable(rdev); | 169 | rv370_pcie_gart_disable(rdev); |
168 | radeon_gart_table_vram_free(rdev); | 170 | radeon_gart_table_vram_free(rdev); |
169 | radeon_gart_fini(rdev); | ||
170 | } | 171 | } |
171 | 172 | ||
172 | void r300_fence_ring_emit(struct radeon_device *rdev, | 173 | void r300_fence_ring_emit(struct radeon_device *rdev, |
@@ -481,6 +482,7 @@ void r300_mc_init(struct radeon_device *rdev) | |||
481 | radeon_vram_location(rdev, &rdev->mc, base); | 482 | radeon_vram_location(rdev, &rdev->mc, base); |
482 | if (!(rdev->flags & RADEON_IS_AGP)) | 483 | if (!(rdev->flags & RADEON_IS_AGP)) |
483 | radeon_gtt_location(rdev, &rdev->mc); | 484 | radeon_gtt_location(rdev, &rdev->mc); |
485 | radeon_update_bandwidth_info(rdev); | ||
484 | } | 486 | } |
485 | 487 | ||
486 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 488 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
@@ -1334,6 +1336,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
1334 | 1336 | ||
1335 | void r300_fini(struct radeon_device *rdev) | 1337 | void r300_fini(struct radeon_device *rdev) |
1336 | { | 1338 | { |
1339 | radeon_pm_fini(rdev); | ||
1337 | r100_cp_fini(rdev); | 1340 | r100_cp_fini(rdev); |
1338 | r100_wb_fini(rdev); | 1341 | r100_wb_fini(rdev); |
1339 | r100_ib_fini(rdev); | 1342 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c7593b8f58ee..0b8603ca6974 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | #include "r100d.h" | 34 | #include "r100d.h" |
34 | #include "r420d.h" | 35 | #include "r420d.h" |
@@ -266,6 +267,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
266 | 267 | ||
267 | void r420_fini(struct radeon_device *rdev) | 268 | void r420_fini(struct radeon_device *rdev) |
268 | { | 269 | { |
270 | radeon_pm_fini(rdev); | ||
269 | r100_cp_fini(rdev); | 271 | r100_cp_fini(rdev); |
270 | r100_wb_fini(rdev); | 272 | r100_wb_fini(rdev); |
271 | r100_ib_fini(rdev); | 273 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 2b8a5dd13516..3c44b8d39318 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include "r520d.h" | 32 | #include "r520d.h" |
32 | 33 | ||
@@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev) | |||
121 | 122 | ||
122 | void r520_mc_init(struct radeon_device *rdev) | 123 | void r520_mc_init(struct radeon_device *rdev) |
123 | { | 124 | { |
124 | fixed20_12 a; | ||
125 | 125 | ||
126 | r520_vram_get_type(rdev); | 126 | r520_vram_get_type(rdev); |
127 | r100_vram_init_sizes(rdev); | 127 | r100_vram_init_sizes(rdev); |
128 | radeon_vram_location(rdev, &rdev->mc, 0); | 128 | radeon_vram_location(rdev, &rdev->mc, 0); |
129 | if (!(rdev->flags & RADEON_IS_AGP)) | 129 | if (!(rdev->flags & RADEON_IS_AGP)) |
130 | radeon_gtt_location(rdev, &rdev->mc); | 130 | radeon_gtt_location(rdev, &rdev->mc); |
131 | /* FIXME: we should enforce default clock in case GPU is not in | 131 | radeon_update_bandwidth_info(rdev); |
132 | * default setup | ||
133 | */ | ||
134 | a.full = rfixed_const(100); | ||
135 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
136 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
137 | } | 132 | } |
138 | 133 | ||
139 | void r520_mc_program(struct radeon_device *rdev) | 134 | void r520_mc_program(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c52290197292..5509354c7c89 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "radeon_drm.h" | 32 | #include "radeon_drm.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_asic.h" | ||
34 | #include "radeon_mode.h" | 35 | #include "radeon_mode.h" |
35 | #include "r600d.h" | 36 | #include "r600d.h" |
36 | #include "atom.h" | 37 | #include "atom.h" |
@@ -491,9 +492,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
491 | 492 | ||
492 | void r600_pcie_gart_fini(struct radeon_device *rdev) | 493 | void r600_pcie_gart_fini(struct radeon_device *rdev) |
493 | { | 494 | { |
495 | radeon_gart_fini(rdev); | ||
494 | r600_pcie_gart_disable(rdev); | 496 | r600_pcie_gart_disable(rdev); |
495 | radeon_gart_table_vram_free(rdev); | 497 | radeon_gart_table_vram_free(rdev); |
496 | radeon_gart_fini(rdev); | ||
497 | } | 498 | } |
498 | 499 | ||
499 | void r600_agp_enable(struct radeon_device *rdev) | 500 | void r600_agp_enable(struct radeon_device *rdev) |
@@ -675,7 +676,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
675 | 676 | ||
676 | int r600_mc_init(struct radeon_device *rdev) | 677 | int r600_mc_init(struct radeon_device *rdev) |
677 | { | 678 | { |
678 | fixed20_12 a; | ||
679 | u32 tmp; | 679 | u32 tmp; |
680 | int chansize, numchan; | 680 | int chansize, numchan; |
681 | 681 | ||
@@ -719,14 +719,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
719 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 719 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
720 | } | 720 | } |
721 | r600_vram_gtt_location(rdev, &rdev->mc); | 721 | r600_vram_gtt_location(rdev, &rdev->mc); |
722 | /* FIXME: we should enforce default clock in case GPU is not in | 722 | |
723 | * default setup | ||
724 | */ | ||
725 | a.full = rfixed_const(100); | ||
726 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
727 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
728 | if (rdev->flags & RADEON_IS_IGP) | 723 | if (rdev->flags & RADEON_IS_IGP) |
729 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 724 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
725 | radeon_update_bandwidth_info(rdev); | ||
730 | return 0; | 726 | return 0; |
731 | } | 727 | } |
732 | 728 | ||
@@ -1132,6 +1128,7 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1132 | /* Setup pipes */ | 1128 | /* Setup pipes */ |
1133 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1129 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1134 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 1130 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
1131 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1135 | 1132 | ||
1136 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 1133 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
1137 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); | 1134 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
@@ -2119,6 +2116,7 @@ int r600_init(struct radeon_device *rdev) | |||
2119 | 2116 | ||
2120 | void r600_fini(struct radeon_device *rdev) | 2117 | void r600_fini(struct radeon_device *rdev) |
2121 | { | 2118 | { |
2119 | radeon_pm_fini(rdev); | ||
2122 | r600_audio_fini(rdev); | 2120 | r600_audio_fini(rdev); |
2123 | r600_blit_fini(rdev); | 2121 | r600_blit_fini(rdev); |
2124 | r600_cp_fini(rdev); | 2122 | r600_cp_fini(rdev); |
@@ -2398,19 +2396,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) | |||
2398 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 2396 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
2399 | if (ASIC_IS_DCE32(rdev)) { | 2397 | if (ASIC_IS_DCE32(rdev)) { |
2400 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | 2398 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
2401 | WREG32(DC_HPD5_INT_CONTROL, 0); | 2399 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
2402 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | 2400 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
2403 | WREG32(DC_HPD6_INT_CONTROL, 0); | 2401 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
2404 | } | 2402 | } |
2405 | } else { | 2403 | } else { |
2406 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 2404 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
2407 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | 2405 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
2408 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2406 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2409 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); | 2407 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
2410 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2408 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2411 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); | 2409 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
2412 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2410 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2413 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); | 2411 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); |
2414 | } | 2412 | } |
2415 | } | 2413 | } |
2416 | 2414 | ||
@@ -2765,6 +2763,7 @@ restart_ih: | |||
2765 | case 0: /* D1 vblank */ | 2763 | case 0: /* D1 vblank */ |
2766 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2764 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2767 | drm_handle_vblank(rdev->ddev, 0); | 2765 | drm_handle_vblank(rdev->ddev, 0); |
2766 | rdev->pm.vblank_sync = true; | ||
2768 | wake_up(&rdev->irq.vblank_queue); | 2767 | wake_up(&rdev->irq.vblank_queue); |
2769 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2768 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2770 | DRM_DEBUG("IH: D1 vblank\n"); | 2769 | DRM_DEBUG("IH: D1 vblank\n"); |
@@ -2786,6 +2785,7 @@ restart_ih: | |||
2786 | case 0: /* D2 vblank */ | 2785 | case 0: /* D2 vblank */ |
2787 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | 2786 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { |
2788 | drm_handle_vblank(rdev->ddev, 1); | 2787 | drm_handle_vblank(rdev->ddev, 1); |
2788 | rdev->pm.vblank_sync = true; | ||
2789 | wake_up(&rdev->irq.vblank_queue); | 2789 | wake_up(&rdev->irq.vblank_queue); |
2790 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | 2790 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; |
2791 | DRM_DEBUG("IH: D2 vblank\n"); | 2791 | DRM_DEBUG("IH: D2 vblank\n"); |
@@ -2834,14 +2834,14 @@ restart_ih: | |||
2834 | break; | 2834 | break; |
2835 | case 10: | 2835 | case 10: |
2836 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { | 2836 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { |
2837 | disp_int_cont &= ~DC_HPD5_INTERRUPT; | 2837 | disp_int_cont2 &= ~DC_HPD5_INTERRUPT; |
2838 | queue_hotplug = true; | 2838 | queue_hotplug = true; |
2839 | DRM_DEBUG("IH: HPD5\n"); | 2839 | DRM_DEBUG("IH: HPD5\n"); |
2840 | } | 2840 | } |
2841 | break; | 2841 | break; |
2842 | case 12: | 2842 | case 12: |
2843 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { | 2843 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { |
2844 | disp_int_cont &= ~DC_HPD6_INTERRUPT; | 2844 | disp_int_cont2 &= ~DC_HPD6_INTERRUPT; |
2845 | queue_hotplug = true; | 2845 | queue_hotplug = true; |
2846 | DRM_DEBUG("IH: HPD6\n"); | 2846 | DRM_DEBUG("IH: HPD6\n"); |
2847 | } | 2847 | } |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index db928016d034..dac7042b797e 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -182,41 +182,6 @@ int r600_audio_init(struct radeon_device *rdev) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * determin how the encoders and audio interface is wired together | ||
186 | */ | ||
187 | int r600_audio_tmds_index(struct drm_encoder *encoder) | ||
188 | { | ||
189 | struct drm_device *dev = encoder->dev; | ||
190 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
191 | struct drm_encoder *other; | ||
192 | |||
193 | switch (radeon_encoder->encoder_id) { | ||
194 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
195 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
196 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
197 | return 0; | ||
198 | |||
199 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
200 | /* special case check if an TMDS1 is present */ | ||
201 | list_for_each_entry(other, &dev->mode_config.encoder_list, head) { | ||
202 | if (to_radeon_encoder(other)->encoder_id == | ||
203 | ENCODER_OBJECT_ID_INTERNAL_TMDS1) | ||
204 | return 1; | ||
205 | } | ||
206 | return 0; | ||
207 | |||
208 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
209 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
210 | return 1; | ||
211 | |||
212 | default: | ||
213 | DRM_ERROR("Unsupported encoder type 0x%02X\n", | ||
214 | radeon_encoder->encoder_id); | ||
215 | return -1; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * atach the audio codec to the clock source of the encoder | 185 | * atach the audio codec to the clock source of the encoder |
221 | */ | 186 | */ |
222 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | 187 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock) |
@@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
224 | struct drm_device *dev = encoder->dev; | 189 | struct drm_device *dev = encoder->dev; |
225 | struct radeon_device *rdev = dev->dev_private; | 190 | struct radeon_device *rdev = dev->dev_private; |
226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 191 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
192 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
227 | int base_rate = 48000; | 193 | int base_rate = 48000; |
228 | 194 | ||
229 | switch (radeon_encoder->encoder_id) { | 195 | switch (radeon_encoder->encoder_id) { |
@@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
231 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | 197 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
232 | WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); | 198 | WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); |
233 | break; | 199 | break; |
234 | |||
235 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 200 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
236 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 201 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
237 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 202 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
238 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 203 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
239 | WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); | 204 | WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); |
240 | break; | 205 | break; |
241 | |||
242 | default: | 206 | default: |
243 | DRM_ERROR("Unsupported encoder type 0x%02X\n", | 207 | DRM_ERROR("Unsupported encoder type 0x%02X\n", |
244 | radeon_encoder->encoder_id); | 208 | radeon_encoder->encoder_id); |
245 | return; | 209 | return; |
246 | } | 210 | } |
247 | 211 | ||
248 | switch (r600_audio_tmds_index(encoder)) { | 212 | switch (dig->dig_encoder) { |
249 | case 0: | 213 | case 0: |
250 | WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); | 214 | WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50); |
251 | WREG32(R600_AUDIO_PLL1_DIV, clock*100); | 215 | WREG32(R600_AUDIO_PLL1_DIV, clock * 100); |
252 | WREG32(R600_AUDIO_CLK_SRCSEL, 0); | 216 | WREG32(R600_AUDIO_CLK_SRCSEL, 0); |
253 | break; | 217 | break; |
254 | 218 | ||
255 | case 1: | 219 | case 1: |
256 | WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); | 220 | WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50); |
257 | WREG32(R600_AUDIO_PLL2_DIV, clock*100); | 221 | WREG32(R600_AUDIO_PLL2_DIV, clock * 100); |
258 | WREG32(R600_AUDIO_CLK_SRCSEL, 1); | 222 | WREG32(R600_AUDIO_CLK_SRCSEL, 1); |
259 | break; | 223 | break; |
224 | default: | ||
225 | dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n", | ||
226 | radeon_encoder->encoder_id); | ||
227 | return; | ||
260 | } | 228 | } |
261 | } | 229 | } |
262 | 230 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index a112c59f9d82..0271b53fa2dd 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c | |||
@@ -1,7 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
1 | 26 | ||
2 | #include <linux/types.h> | 27 | #include <linux/types.h> |
3 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
4 | 29 | ||
30 | /* | ||
31 | * R6xx+ cards need to use the 3D engine to blit data which requires | ||
32 | * quite a bit of hw state setup. Rather than pull the whole 3D driver | ||
33 | * (which normally generates the 3D state) into the DRM, we opt to use | ||
34 | * statically generated state tables. The regsiter state and shaders | ||
35 | * were hand generated to support blitting functionality. See the 3D | ||
36 | * driver or documentation for descriptions of the registers and | ||
37 | * shader instructions. | ||
38 | */ | ||
39 | |||
5 | const u32 r6xx_default_state[] = | 40 | const u32 r6xx_default_state[] = |
6 | { | 41 | { |
7 | 0xc0002400, | 42 | 0xc0002400, |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 40416c068d9f..68e6f4349309 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev, | |||
1548 | 1548 | ||
1549 | RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1549 | RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1550 | RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 1550 | RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
1551 | RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1551 | 1552 | ||
1552 | RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1553 | RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1553 | RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); | 1554 | RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); |
1554 | RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); | 1555 | RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); |
1556 | RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0); | ||
1557 | RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0); | ||
1555 | 1558 | ||
1556 | num_qd_pipes = | 1559 | num_qd_pipes = |
1557 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); | 1560 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index cd2c63bce501..c39c1bc13016 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -45,6 +45,7 @@ struct r600_cs_track { | |||
45 | u32 nbanks; | 45 | u32 nbanks; |
46 | u32 npipes; | 46 | u32 npipes; |
47 | /* value we track */ | 47 | /* value we track */ |
48 | u32 sq_config; | ||
48 | u32 nsamples; | 49 | u32 nsamples; |
49 | u32 cb_color_base_last[8]; | 50 | u32 cb_color_base_last[8]; |
50 | struct radeon_bo *cb_color_bo[8]; | 51 | struct radeon_bo *cb_color_bo[8]; |
@@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
141 | { | 142 | { |
142 | int i; | 143 | int i; |
143 | 144 | ||
145 | /* assume DX9 mode */ | ||
146 | track->sq_config = DX9_CONSTS; | ||
144 | for (i = 0; i < 8; i++) { | 147 | for (i = 0; i < 8; i++) { |
145 | track->cb_color_base_last[i] = 0; | 148 | track->cb_color_base_last[i] = 0; |
146 | track->cb_color_size[i] = 0; | 149 | track->cb_color_size[i] = 0; |
@@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
715 | tmp =radeon_get_ib_value(p, idx); | 718 | tmp =radeon_get_ib_value(p, idx); |
716 | ib[idx] = 0; | 719 | ib[idx] = 0; |
717 | break; | 720 | break; |
721 | case SQ_CONFIG: | ||
722 | track->sq_config = radeon_get_ib_value(p, idx); | ||
723 | break; | ||
718 | case R_028800_DB_DEPTH_CONTROL: | 724 | case R_028800_DB_DEPTH_CONTROL: |
719 | track->db_depth_control = radeon_get_ib_value(p, idx); | 725 | track->db_depth_control = radeon_get_ib_value(p, idx); |
720 | break; | 726 | break; |
@@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
869 | case SQ_PGM_START_VS: | 875 | case SQ_PGM_START_VS: |
870 | case SQ_PGM_START_GS: | 876 | case SQ_PGM_START_GS: |
871 | case SQ_PGM_START_PS: | 877 | case SQ_PGM_START_PS: |
878 | case SQ_ALU_CONST_CACHE_GS_0: | ||
879 | case SQ_ALU_CONST_CACHE_GS_1: | ||
880 | case SQ_ALU_CONST_CACHE_GS_2: | ||
881 | case SQ_ALU_CONST_CACHE_GS_3: | ||
882 | case SQ_ALU_CONST_CACHE_GS_4: | ||
883 | case SQ_ALU_CONST_CACHE_GS_5: | ||
884 | case SQ_ALU_CONST_CACHE_GS_6: | ||
885 | case SQ_ALU_CONST_CACHE_GS_7: | ||
886 | case SQ_ALU_CONST_CACHE_GS_8: | ||
887 | case SQ_ALU_CONST_CACHE_GS_9: | ||
888 | case SQ_ALU_CONST_CACHE_GS_10: | ||
889 | case SQ_ALU_CONST_CACHE_GS_11: | ||
890 | case SQ_ALU_CONST_CACHE_GS_12: | ||
891 | case SQ_ALU_CONST_CACHE_GS_13: | ||
892 | case SQ_ALU_CONST_CACHE_GS_14: | ||
893 | case SQ_ALU_CONST_CACHE_GS_15: | ||
894 | case SQ_ALU_CONST_CACHE_PS_0: | ||
895 | case SQ_ALU_CONST_CACHE_PS_1: | ||
896 | case SQ_ALU_CONST_CACHE_PS_2: | ||
897 | case SQ_ALU_CONST_CACHE_PS_3: | ||
898 | case SQ_ALU_CONST_CACHE_PS_4: | ||
899 | case SQ_ALU_CONST_CACHE_PS_5: | ||
900 | case SQ_ALU_CONST_CACHE_PS_6: | ||
901 | case SQ_ALU_CONST_CACHE_PS_7: | ||
902 | case SQ_ALU_CONST_CACHE_PS_8: | ||
903 | case SQ_ALU_CONST_CACHE_PS_9: | ||
904 | case SQ_ALU_CONST_CACHE_PS_10: | ||
905 | case SQ_ALU_CONST_CACHE_PS_11: | ||
906 | case SQ_ALU_CONST_CACHE_PS_12: | ||
907 | case SQ_ALU_CONST_CACHE_PS_13: | ||
908 | case SQ_ALU_CONST_CACHE_PS_14: | ||
909 | case SQ_ALU_CONST_CACHE_PS_15: | ||
910 | case SQ_ALU_CONST_CACHE_VS_0: | ||
911 | case SQ_ALU_CONST_CACHE_VS_1: | ||
912 | case SQ_ALU_CONST_CACHE_VS_2: | ||
913 | case SQ_ALU_CONST_CACHE_VS_3: | ||
914 | case SQ_ALU_CONST_CACHE_VS_4: | ||
915 | case SQ_ALU_CONST_CACHE_VS_5: | ||
916 | case SQ_ALU_CONST_CACHE_VS_6: | ||
917 | case SQ_ALU_CONST_CACHE_VS_7: | ||
918 | case SQ_ALU_CONST_CACHE_VS_8: | ||
919 | case SQ_ALU_CONST_CACHE_VS_9: | ||
920 | case SQ_ALU_CONST_CACHE_VS_10: | ||
921 | case SQ_ALU_CONST_CACHE_VS_11: | ||
922 | case SQ_ALU_CONST_CACHE_VS_12: | ||
923 | case SQ_ALU_CONST_CACHE_VS_13: | ||
924 | case SQ_ALU_CONST_CACHE_VS_14: | ||
925 | case SQ_ALU_CONST_CACHE_VS_15: | ||
872 | r = r600_cs_packet_next_reloc(p, &reloc); | 926 | r = r600_cs_packet_next_reloc(p, &reloc); |
873 | if (r) { | 927 | if (r) { |
874 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | 928 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
@@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1226 | } | 1280 | } |
1227 | break; | 1281 | break; |
1228 | case PACKET3_SET_ALU_CONST: | 1282 | case PACKET3_SET_ALU_CONST: |
1229 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; | 1283 | if (track->sq_config & DX9_CONSTS) { |
1230 | end_reg = 4 * pkt->count + start_reg - 4; | 1284 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; |
1231 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || | 1285 | end_reg = 4 * pkt->count + start_reg - 4; |
1232 | (start_reg >= PACKET3_SET_ALU_CONST_END) || | 1286 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || |
1233 | (end_reg >= PACKET3_SET_ALU_CONST_END)) { | 1287 | (start_reg >= PACKET3_SET_ALU_CONST_END) || |
1234 | DRM_ERROR("bad SET_ALU_CONST\n"); | 1288 | (end_reg >= PACKET3_SET_ALU_CONST_END)) { |
1235 | return -EINVAL; | 1289 | DRM_ERROR("bad SET_ALU_CONST\n"); |
1290 | return -EINVAL; | ||
1291 | } | ||
1236 | } | 1292 | } |
1237 | break; | 1293 | break; |
1238 | case PACKET3_SET_BOOL_CONST: | 1294 | case PACKET3_SET_BOOL_CONST: |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index fcc949df0e5d..029fa1406d1d 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format { | |||
42 | */ | 42 | */ |
43 | enum r600_hdmi_iec_status_bits { | 43 | enum r600_hdmi_iec_status_bits { |
44 | AUDIO_STATUS_DIG_ENABLE = 0x01, | 44 | AUDIO_STATUS_DIG_ENABLE = 0x01, |
45 | AUDIO_STATUS_V = 0x02, | 45 | AUDIO_STATUS_V = 0x02, |
46 | AUDIO_STATUS_VCFG = 0x04, | 46 | AUDIO_STATUS_VCFG = 0x04, |
47 | AUDIO_STATUS_EMPHASIS = 0x08, | 47 | AUDIO_STATUS_EMPHASIS = 0x08, |
48 | AUDIO_STATUS_COPYRIGHT = 0x10, | 48 | AUDIO_STATUS_COPYRIGHT = 0x10, |
49 | AUDIO_STATUS_NONAUDIO = 0x20, | 49 | AUDIO_STATUS_NONAUDIO = 0x20, |
50 | AUDIO_STATUS_PROFESSIONAL = 0x40, | 50 | AUDIO_STATUS_PROFESSIONAL = 0x40, |
51 | AUDIO_STATUS_LEVEL = 0x80 | 51 | AUDIO_STATUS_LEVEL = 0x80 |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct { | 54 | struct { |
@@ -85,7 +85,7 @@ struct { | |||
85 | static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) | 85 | static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) |
86 | { | 86 | { |
87 | if (*CTS == 0) | 87 | if (*CTS == 0) |
88 | *CTS = clock*N/(128*freq)*1000; | 88 | *CTS = clock * N / (128 * freq) * 1000; |
89 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", | 89 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", |
90 | N, *CTS, freq); | 90 | N, *CTS, freq); |
91 | } | 91 | } |
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType, | |||
131 | uint8_t length, | 131 | uint8_t length, |
132 | uint8_t *frame) | 132 | uint8_t *frame) |
133 | { | 133 | { |
134 | int i; | 134 | int i; |
135 | frame[0] = packetType + versionNumber + length; | 135 | frame[0] = packetType + versionNumber + length; |
136 | for (i = 1; i <= length; i++) | 136 | for (i = 1; i <= length; i++) |
137 | frame[0] += frame[i]; | 137 | frame[0] += frame[i]; |
138 | frame[0] = 0x100 - frame[0]; | 138 | frame[0] = 0x100 - frame[0]; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* | 141 | /* |
@@ -417,90 +417,141 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, | |||
417 | WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); | 417 | WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); |
418 | } | 418 | } |
419 | 419 | ||
420 | /* | 420 | static int r600_hdmi_find_free_block(struct drm_device *dev) |
421 | * enable/disable the HDMI engine | 421 | { |
422 | */ | 422 | struct radeon_device *rdev = dev->dev_private; |
423 | void r600_hdmi_enable(struct drm_encoder *encoder, int enable) | 423 | struct drm_encoder *encoder; |
424 | struct radeon_encoder *radeon_encoder; | ||
425 | bool free_blocks[3] = { true, true, true }; | ||
426 | |||
427 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
428 | radeon_encoder = to_radeon_encoder(encoder); | ||
429 | switch (radeon_encoder->hdmi_offset) { | ||
430 | case R600_HDMI_BLOCK1: | ||
431 | free_blocks[0] = false; | ||
432 | break; | ||
433 | case R600_HDMI_BLOCK2: | ||
434 | free_blocks[1] = false; | ||
435 | break; | ||
436 | case R600_HDMI_BLOCK3: | ||
437 | free_blocks[2] = false; | ||
438 | break; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) { | ||
443 | return free_blocks[0] ? R600_HDMI_BLOCK1 : 0; | ||
444 | } else if (rdev->family >= CHIP_R600) { | ||
445 | if (free_blocks[0]) | ||
446 | return R600_HDMI_BLOCK1; | ||
447 | else if (free_blocks[1]) | ||
448 | return R600_HDMI_BLOCK2; | ||
449 | } | ||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static void r600_hdmi_assign_block(struct drm_encoder *encoder) | ||
424 | { | 454 | { |
425 | struct drm_device *dev = encoder->dev; | 455 | struct drm_device *dev = encoder->dev; |
426 | struct radeon_device *rdev = dev->dev_private; | 456 | struct radeon_device *rdev = dev->dev_private; |
427 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 457 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
428 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; | 458 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
429 | 459 | ||
430 | if (!offset) | 460 | if (!dig) { |
461 | dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n"); | ||
431 | return; | 462 | return; |
463 | } | ||
432 | 464 | ||
433 | DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); | 465 | if (ASIC_IS_DCE4(rdev)) { |
434 | 466 | /* TODO */ | |
435 | /* some version of atombios ignore the enable HDMI flag | 467 | } else if (ASIC_IS_DCE3(rdev)) { |
436 | * so enabling/disabling HDMI was moved here for TMDS1+2 */ | 468 | radeon_encoder->hdmi_offset = dig->dig_encoder ? |
437 | switch (radeon_encoder->encoder_id) { | 469 | R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1; |
438 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 470 | if (ASIC_IS_DCE32(rdev)) |
439 | WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); | 471 | radeon_encoder->hdmi_config_offset = dig->dig_encoder ? |
440 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); | 472 | R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1; |
441 | break; | 473 | } else if (rdev->family >= CHIP_R600) { |
442 | 474 | radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev); | |
443 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
444 | WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4); | ||
445 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0); | ||
446 | break; | ||
447 | |||
448 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
449 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
450 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
451 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
452 | /* This part is doubtfull in my opinion */ | ||
453 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0); | ||
454 | break; | ||
455 | |||
456 | default: | ||
457 | DRM_ERROR("unknown HDMI output type\n"); | ||
458 | break; | ||
459 | } | 475 | } |
460 | } | 476 | } |
461 | 477 | ||
462 | /* | 478 | /* |
463 | * determin at which register offset the HDMI encoder is | 479 | * enable the HDMI engine |
464 | */ | 480 | */ |
465 | void r600_hdmi_init(struct drm_encoder *encoder) | 481 | void r600_hdmi_enable(struct drm_encoder *encoder) |
466 | { | 482 | { |
483 | struct drm_device *dev = encoder->dev; | ||
484 | struct radeon_device *rdev = dev->dev_private; | ||
467 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
468 | 486 | ||
469 | switch (radeon_encoder->encoder_id) { | 487 | if (!radeon_encoder->hdmi_offset) { |
470 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 488 | r600_hdmi_assign_block(encoder); |
471 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 489 | if (!radeon_encoder->hdmi_offset) { |
472 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 490 | dev_warn(rdev->dev, "Could not find HDMI block for " |
473 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; | 491 | "0x%x encoder\n", radeon_encoder->encoder_id); |
474 | break; | 492 | return; |
475 | 493 | } | |
476 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | 494 | } |
477 | switch (r600_audio_tmds_index(encoder)) { | 495 | |
478 | case 0: | 496 | if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { |
479 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; | 497 | WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); |
498 | } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { | ||
499 | int offset = radeon_encoder->hdmi_offset; | ||
500 | switch (radeon_encoder->encoder_id) { | ||
501 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
502 | WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); | ||
503 | WREG32(offset + R600_HDMI_ENABLE, 0x101); | ||
480 | break; | 504 | break; |
481 | case 1: | 505 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
482 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; | 506 | WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4); |
507 | WREG32(offset + R600_HDMI_ENABLE, 0x105); | ||
483 | break; | 508 | break; |
484 | default: | 509 | default: |
485 | radeon_encoder->hdmi_offset = 0; | 510 | dev_err(rdev->dev, "Unknown HDMI output type\n"); |
486 | break; | 511 | break; |
487 | } | 512 | } |
488 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 513 | } |
489 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; | ||
490 | break; | ||
491 | 514 | ||
492 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 515 | DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
493 | radeon_encoder->hdmi_offset = R600_HDMI_DIG; | 516 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); |
494 | break; | 517 | } |
495 | 518 | ||
496 | default: | 519 | /* |
497 | radeon_encoder->hdmi_offset = 0; | 520 | * disable the HDMI engine |
498 | break; | 521 | */ |
522 | void r600_hdmi_disable(struct drm_encoder *encoder) | ||
523 | { | ||
524 | struct drm_device *dev = encoder->dev; | ||
525 | struct radeon_device *rdev = dev->dev_private; | ||
526 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
527 | |||
528 | if (!radeon_encoder->hdmi_offset) { | ||
529 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); | ||
530 | return; | ||
499 | } | 531 | } |
500 | 532 | ||
501 | DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", | 533 | DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
502 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); | 534 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); |
535 | |||
536 | if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { | ||
537 | WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); | ||
538 | } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { | ||
539 | int offset = radeon_encoder->hdmi_offset; | ||
540 | switch (radeon_encoder->encoder_id) { | ||
541 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
542 | WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); | ||
543 | WREG32(offset + R600_HDMI_ENABLE, 0); | ||
544 | break; | ||
545 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
546 | WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4); | ||
547 | WREG32(offset + R600_HDMI_ENABLE, 0); | ||
548 | break; | ||
549 | default: | ||
550 | dev_err(rdev->dev, "Unknown HDMI output type\n"); | ||
551 | break; | ||
552 | } | ||
553 | } | ||
503 | 554 | ||
504 | /* TODO: make this configureable */ | 555 | radeon_encoder->hdmi_offset = 0; |
505 | radeon_encoder->hdmi_audio_workaround = 0; | 556 | radeon_encoder->hdmi_config_offset = 0; |
506 | } | 557 | } |
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d0e28ffdeda9..7b1d22370f6e 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h | |||
@@ -152,9 +152,9 @@ | |||
152 | #define R600_AUDIO_STATUS_BITS 0x73d8 | 152 | #define R600_AUDIO_STATUS_BITS 0x73d8 |
153 | 153 | ||
154 | /* HDMI base register addresses */ | 154 | /* HDMI base register addresses */ |
155 | #define R600_HDMI_TMDS1 0x7400 | 155 | #define R600_HDMI_BLOCK1 0x7400 |
156 | #define R600_HDMI_TMDS2 0x7700 | 156 | #define R600_HDMI_BLOCK2 0x7700 |
157 | #define R600_HDMI_DIG 0x7800 | 157 | #define R600_HDMI_BLOCK3 0x7800 |
158 | 158 | ||
159 | /* HDMI registers */ | 159 | /* HDMI registers */ |
160 | #define R600_HDMI_ENABLE 0x00 | 160 | #define R600_HDMI_ENABLE 0x00 |
@@ -185,4 +185,8 @@ | |||
185 | #define R600_HDMI_AUDIO_DEBUG_2 0xe8 | 185 | #define R600_HDMI_AUDIO_DEBUG_2 0xe8 |
186 | #define R600_HDMI_AUDIO_DEBUG_3 0xec | 186 | #define R600_HDMI_AUDIO_DEBUG_3 0xec |
187 | 187 | ||
188 | /* HDMI additional config base register addresses */ | ||
189 | #define R600_HDMI_CONFIG1 0x7600 | ||
190 | #define R600_HDMI_CONFIG2 0x7a00 | ||
191 | |||
188 | #endif | 192 | #endif |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 5b2e4d442823..59c1f8793e60 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -77,6 +77,55 @@ | |||
77 | #define CB_COLOR0_FRAG 0x280e0 | 77 | #define CB_COLOR0_FRAG 0x280e0 |
78 | #define CB_COLOR0_MASK 0x28100 | 78 | #define CB_COLOR0_MASK 0x28100 |
79 | 79 | ||
80 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | ||
81 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | ||
82 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | ||
83 | #define SQ_ALU_CONST_CACHE_PS_3 0x2894c | ||
84 | #define SQ_ALU_CONST_CACHE_PS_4 0x28950 | ||
85 | #define SQ_ALU_CONST_CACHE_PS_5 0x28954 | ||
86 | #define SQ_ALU_CONST_CACHE_PS_6 0x28958 | ||
87 | #define SQ_ALU_CONST_CACHE_PS_7 0x2895c | ||
88 | #define SQ_ALU_CONST_CACHE_PS_8 0x28960 | ||
89 | #define SQ_ALU_CONST_CACHE_PS_9 0x28964 | ||
90 | #define SQ_ALU_CONST_CACHE_PS_10 0x28968 | ||
91 | #define SQ_ALU_CONST_CACHE_PS_11 0x2896c | ||
92 | #define SQ_ALU_CONST_CACHE_PS_12 0x28970 | ||
93 | #define SQ_ALU_CONST_CACHE_PS_13 0x28974 | ||
94 | #define SQ_ALU_CONST_CACHE_PS_14 0x28978 | ||
95 | #define SQ_ALU_CONST_CACHE_PS_15 0x2897c | ||
96 | #define SQ_ALU_CONST_CACHE_VS_0 0x28980 | ||
97 | #define SQ_ALU_CONST_CACHE_VS_1 0x28984 | ||
98 | #define SQ_ALU_CONST_CACHE_VS_2 0x28988 | ||
99 | #define SQ_ALU_CONST_CACHE_VS_3 0x2898c | ||
100 | #define SQ_ALU_CONST_CACHE_VS_4 0x28990 | ||
101 | #define SQ_ALU_CONST_CACHE_VS_5 0x28994 | ||
102 | #define SQ_ALU_CONST_CACHE_VS_6 0x28998 | ||
103 | #define SQ_ALU_CONST_CACHE_VS_7 0x2899c | ||
104 | #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 | ||
105 | #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 | ||
106 | #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 | ||
107 | #define SQ_ALU_CONST_CACHE_VS_11 0x289ac | ||
108 | #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 | ||
109 | #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 | ||
110 | #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 | ||
111 | #define SQ_ALU_CONST_CACHE_VS_15 0x289bc | ||
112 | #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 | ||
113 | #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 | ||
114 | #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 | ||
115 | #define SQ_ALU_CONST_CACHE_GS_3 0x289cc | ||
116 | #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 | ||
117 | #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 | ||
118 | #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 | ||
119 | #define SQ_ALU_CONST_CACHE_GS_7 0x289dc | ||
120 | #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 | ||
121 | #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 | ||
122 | #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 | ||
123 | #define SQ_ALU_CONST_CACHE_GS_11 0x289ec | ||
124 | #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 | ||
125 | #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 | ||
126 | #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 | ||
127 | #define SQ_ALU_CONST_CACHE_GS_15 0x289fc | ||
128 | |||
80 | #define CONFIG_MEMSIZE 0x5428 | 129 | #define CONFIG_MEMSIZE 0x5428 |
81 | #define CONFIG_CNTL 0x5424 | 130 | #define CONFIG_CNTL 0x5424 |
82 | #define CP_STAT 0x8680 | 131 | #define CP_STAT 0x8680 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 829e26e8a4bb..034218c3dbbb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -91,6 +91,8 @@ extern int radeon_tv; | |||
91 | extern int radeon_new_pll; | 91 | extern int radeon_new_pll; |
92 | extern int radeon_dynpm; | 92 | extern int radeon_dynpm; |
93 | extern int radeon_audio; | 93 | extern int radeon_audio; |
94 | extern int radeon_disp_priority; | ||
95 | extern int radeon_hw_i2c; | ||
94 | 96 | ||
95 | /* | 97 | /* |
96 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -168,6 +170,7 @@ struct radeon_clock { | |||
168 | * Power management | 170 | * Power management |
169 | */ | 171 | */ |
170 | int radeon_pm_init(struct radeon_device *rdev); | 172 | int radeon_pm_init(struct radeon_device *rdev); |
173 | void radeon_pm_fini(struct radeon_device *rdev); | ||
171 | void radeon_pm_compute_clocks(struct radeon_device *rdev); | 174 | void radeon_pm_compute_clocks(struct radeon_device *rdev); |
172 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 175 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
173 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 176 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
@@ -687,6 +690,7 @@ struct radeon_pm { | |||
687 | bool downclocked; | 690 | bool downclocked; |
688 | int active_crtcs; | 691 | int active_crtcs; |
689 | int req_vblank; | 692 | int req_vblank; |
693 | bool vblank_sync; | ||
690 | fixed20_12 max_bandwidth; | 694 | fixed20_12 max_bandwidth; |
691 | fixed20_12 igp_sideport_mclk; | 695 | fixed20_12 igp_sideport_mclk; |
692 | fixed20_12 igp_system_mclk; | 696 | fixed20_12 igp_system_mclk; |
@@ -697,6 +701,7 @@ struct radeon_pm { | |||
697 | fixed20_12 ht_bandwidth; | 701 | fixed20_12 ht_bandwidth; |
698 | fixed20_12 core_bandwidth; | 702 | fixed20_12 core_bandwidth; |
699 | fixed20_12 sclk; | 703 | fixed20_12 sclk; |
704 | fixed20_12 mclk; | ||
700 | fixed20_12 needed_bandwidth; | 705 | fixed20_12 needed_bandwidth; |
701 | /* XXX: use a define for num power modes */ | 706 | /* XXX: use a define for num power modes */ |
702 | struct radeon_power_state power_state[8]; | 707 | struct radeon_power_state power_state[8]; |
@@ -707,6 +712,7 @@ struct radeon_pm { | |||
707 | struct radeon_power_state *requested_power_state; | 712 | struct radeon_power_state *requested_power_state; |
708 | struct radeon_pm_clock_info *requested_clock_mode; | 713 | struct radeon_pm_clock_info *requested_clock_mode; |
709 | struct radeon_power_state *default_power_state; | 714 | struct radeon_power_state *default_power_state; |
715 | struct radeon_i2c_chan *i2c_bus; | ||
710 | }; | 716 | }; |
711 | 717 | ||
712 | 718 | ||
@@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, | |||
729 | struct drm_info_list *files, | 735 | struct drm_info_list *files, |
730 | unsigned nfiles); | 736 | unsigned nfiles); |
731 | int radeon_debugfs_fence_init(struct radeon_device *rdev); | 737 | int radeon_debugfs_fence_init(struct radeon_device *rdev); |
732 | int r100_debugfs_rbbm_init(struct radeon_device *rdev); | ||
733 | int r100_debugfs_cp_init(struct radeon_device *rdev); | ||
734 | 738 | ||
735 | 739 | ||
736 | /* | 740 | /* |
@@ -782,7 +786,7 @@ struct radeon_asic { | |||
782 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | 786 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, |
783 | uint32_t tiling_flags, uint32_t pitch, | 787 | uint32_t tiling_flags, uint32_t pitch, |
784 | uint32_t offset, uint32_t obj_size); | 788 | uint32_t offset, uint32_t obj_size); |
785 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | 789 | void (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
786 | void (*bandwidth_update)(struct radeon_device *rdev); | 790 | void (*bandwidth_update)(struct radeon_device *rdev); |
787 | void (*hpd_init)(struct radeon_device *rdev); | 791 | void (*hpd_init)(struct radeon_device *rdev); |
788 | void (*hpd_fini)(struct radeon_device *rdev); | 792 | void (*hpd_fini)(struct radeon_device *rdev); |
@@ -862,6 +866,12 @@ union radeon_asic_config { | |||
862 | struct rv770_asic rv770; | 866 | struct rv770_asic rv770; |
863 | }; | 867 | }; |
864 | 868 | ||
869 | /* | ||
870 | * asic initizalization from radeon_asic.c | ||
871 | */ | ||
872 | void radeon_agp_disable(struct radeon_device *rdev); | ||
873 | int radeon_asic_init(struct radeon_device *rdev); | ||
874 | |||
865 | 875 | ||
866 | /* | 876 | /* |
867 | * IOCTL. | 877 | * IOCTL. |
@@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev); | |||
1172 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1182 | extern int radeon_modeset_init(struct radeon_device *rdev); |
1173 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1183 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
1174 | extern bool radeon_card_posted(struct radeon_device *rdev); | 1184 | extern bool radeon_card_posted(struct radeon_device *rdev); |
1185 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | ||
1186 | extern void radeon_update_display_priority(struct radeon_device *rdev); | ||
1175 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1187 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1176 | extern int radeon_clocks_init(struct radeon_device *rdev); | 1188 | extern int radeon_clocks_init(struct radeon_device *rdev); |
1177 | extern void radeon_clocks_fini(struct radeon_device *rdev); | 1189 | extern void radeon_clocks_fini(struct radeon_device *rdev); |
@@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev); | |||
1188 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1200 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1189 | 1201 | ||
1190 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 1202 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
1191 | struct r100_mc_save { | ||
1192 | u32 GENMO_WT; | ||
1193 | u32 CRTC_EXT_CNTL; | ||
1194 | u32 CRTC_GEN_CNTL; | ||
1195 | u32 CRTC2_GEN_CNTL; | ||
1196 | u32 CUR_OFFSET; | ||
1197 | u32 CUR2_OFFSET; | ||
1198 | }; | ||
1199 | extern void r100_cp_disable(struct radeon_device *rdev); | ||
1200 | extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
1201 | extern void r100_cp_fini(struct radeon_device *rdev); | ||
1202 | extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | ||
1203 | extern int r100_pci_gart_init(struct radeon_device *rdev); | ||
1204 | extern void r100_pci_gart_fini(struct radeon_device *rdev); | ||
1205 | extern int r100_pci_gart_enable(struct radeon_device *rdev); | ||
1206 | extern void r100_pci_gart_disable(struct radeon_device *rdev); | ||
1207 | extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | ||
1208 | extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); | ||
1209 | extern int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
1210 | extern void r100_ib_fini(struct radeon_device *rdev); | ||
1211 | extern int r100_ib_init(struct radeon_device *rdev); | ||
1212 | extern void r100_irq_disable(struct radeon_device *rdev); | ||
1213 | extern int r100_irq_set(struct radeon_device *rdev); | ||
1214 | extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | ||
1215 | extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | ||
1216 | extern void r100_vram_init_sizes(struct radeon_device *rdev); | ||
1217 | extern void r100_wb_disable(struct radeon_device *rdev); | ||
1218 | extern void r100_wb_fini(struct radeon_device *rdev); | ||
1219 | extern int r100_wb_init(struct radeon_device *rdev); | ||
1220 | extern void r100_hdp_reset(struct radeon_device *rdev); | ||
1221 | extern int r100_rb2d_reset(struct radeon_device *rdev); | ||
1222 | extern int r100_cp_reset(struct radeon_device *rdev); | ||
1223 | extern void r100_vga_render_disable(struct radeon_device *rdev); | ||
1224 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
1225 | struct radeon_cs_packet *pkt, | ||
1226 | struct radeon_bo *robj); | ||
1227 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
1228 | struct radeon_cs_packet *pkt, | ||
1229 | const unsigned *auth, unsigned n, | ||
1230 | radeon_packet0_check_t check); | ||
1231 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
1232 | struct radeon_cs_packet *pkt, | ||
1233 | unsigned idx); | ||
1234 | extern void r100_enable_bm(struct radeon_device *rdev); | ||
1235 | extern void r100_set_common_regs(struct radeon_device *rdev); | ||
1236 | 1203 | ||
1237 | /* rv200,rv250,rv280 */ | 1204 | /* rv200,rv250,rv280 */ |
1238 | extern void r200_set_safe_registers(struct radeon_device *rdev); | 1205 | extern void r200_set_safe_registers(struct radeon_device *rdev); |
@@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder); | |||
1322 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | 1289 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); |
1323 | extern void r600_audio_fini(struct radeon_device *rdev); | 1290 | extern void r600_audio_fini(struct radeon_device *rdev); |
1324 | extern void r600_hdmi_init(struct drm_encoder *encoder); | 1291 | extern void r600_hdmi_init(struct drm_encoder *encoder); |
1325 | extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); | 1292 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
1293 | extern void r600_hdmi_disable(struct drm_encoder *encoder); | ||
1326 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); | 1294 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
1327 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | 1295 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); |
1328 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, | 1296 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c new file mode 100644 index 000000000000..a4b4bc9fa322 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -0,0 +1,772 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | |||
29 | #include <linux/console.h> | ||
30 | #include <drm/drmP.h> | ||
31 | #include <drm/drm_crtc_helper.h> | ||
32 | #include <drm/radeon_drm.h> | ||
33 | #include <linux/vgaarb.h> | ||
34 | #include <linux/vga_switcheroo.h> | ||
35 | #include "radeon_reg.h" | ||
36 | #include "radeon.h" | ||
37 | #include "radeon_asic.h" | ||
38 | #include "atom.h" | ||
39 | |||
40 | /* | ||
41 | * Registers accessors functions. | ||
42 | */ | ||
43 | static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) | ||
44 | { | ||
45 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | ||
46 | BUG_ON(1); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
51 | { | ||
52 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | ||
53 | reg, v); | ||
54 | BUG_ON(1); | ||
55 | } | ||
56 | |||
57 | static void radeon_register_accessor_init(struct radeon_device *rdev) | ||
58 | { | ||
59 | rdev->mc_rreg = &radeon_invalid_rreg; | ||
60 | rdev->mc_wreg = &radeon_invalid_wreg; | ||
61 | rdev->pll_rreg = &radeon_invalid_rreg; | ||
62 | rdev->pll_wreg = &radeon_invalid_wreg; | ||
63 | rdev->pciep_rreg = &radeon_invalid_rreg; | ||
64 | rdev->pciep_wreg = &radeon_invalid_wreg; | ||
65 | |||
66 | /* Don't change order as we are overridding accessor. */ | ||
67 | if (rdev->family < CHIP_RV515) { | ||
68 | rdev->pcie_reg_mask = 0xff; | ||
69 | } else { | ||
70 | rdev->pcie_reg_mask = 0x7ff; | ||
71 | } | ||
72 | /* FIXME: not sure here */ | ||
73 | if (rdev->family <= CHIP_R580) { | ||
74 | rdev->pll_rreg = &r100_pll_rreg; | ||
75 | rdev->pll_wreg = &r100_pll_wreg; | ||
76 | } | ||
77 | if (rdev->family >= CHIP_R420) { | ||
78 | rdev->mc_rreg = &r420_mc_rreg; | ||
79 | rdev->mc_wreg = &r420_mc_wreg; | ||
80 | } | ||
81 | if (rdev->family >= CHIP_RV515) { | ||
82 | rdev->mc_rreg = &rv515_mc_rreg; | ||
83 | rdev->mc_wreg = &rv515_mc_wreg; | ||
84 | } | ||
85 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | ||
86 | rdev->mc_rreg = &rs400_mc_rreg; | ||
87 | rdev->mc_wreg = &rs400_mc_wreg; | ||
88 | } | ||
89 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | ||
90 | rdev->mc_rreg = &rs690_mc_rreg; | ||
91 | rdev->mc_wreg = &rs690_mc_wreg; | ||
92 | } | ||
93 | if (rdev->family == CHIP_RS600) { | ||
94 | rdev->mc_rreg = &rs600_mc_rreg; | ||
95 | rdev->mc_wreg = &rs600_mc_wreg; | ||
96 | } | ||
97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | ||
98 | rdev->pciep_rreg = &r600_pciep_rreg; | ||
99 | rdev->pciep_wreg = &r600_pciep_wreg; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | |||
104 | /* helper to disable agp */ | ||
105 | void radeon_agp_disable(struct radeon_device *rdev) | ||
106 | { | ||
107 | rdev->flags &= ~RADEON_IS_AGP; | ||
108 | if (rdev->family >= CHIP_R600) { | ||
109 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
110 | rdev->flags |= RADEON_IS_PCIE; | ||
111 | } else if (rdev->family >= CHIP_RV515 || | ||
112 | rdev->family == CHIP_RV380 || | ||
113 | rdev->family == CHIP_RV410 || | ||
114 | rdev->family == CHIP_R423) { | ||
115 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
116 | rdev->flags |= RADEON_IS_PCIE; | ||
117 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
118 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
119 | } else { | ||
120 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
121 | rdev->flags |= RADEON_IS_PCI; | ||
122 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
123 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
124 | } | ||
125 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * ASIC | ||
130 | */ | ||
131 | static struct radeon_asic r100_asic = { | ||
132 | .init = &r100_init, | ||
133 | .fini = &r100_fini, | ||
134 | .suspend = &r100_suspend, | ||
135 | .resume = &r100_resume, | ||
136 | .vga_set_state = &r100_vga_set_state, | ||
137 | .gpu_reset = &r100_gpu_reset, | ||
138 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
139 | .gart_set_page = &r100_pci_gart_set_page, | ||
140 | .cp_commit = &r100_cp_commit, | ||
141 | .ring_start = &r100_ring_start, | ||
142 | .ring_test = &r100_ring_test, | ||
143 | .ring_ib_execute = &r100_ring_ib_execute, | ||
144 | .irq_set = &r100_irq_set, | ||
145 | .irq_process = &r100_irq_process, | ||
146 | .get_vblank_counter = &r100_get_vblank_counter, | ||
147 | .fence_ring_emit = &r100_fence_ring_emit, | ||
148 | .cs_parse = &r100_cs_parse, | ||
149 | .copy_blit = &r100_copy_blit, | ||
150 | .copy_dma = NULL, | ||
151 | .copy = &r100_copy_blit, | ||
152 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
153 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
154 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
155 | .set_memory_clock = NULL, | ||
156 | .get_pcie_lanes = NULL, | ||
157 | .set_pcie_lanes = NULL, | ||
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
159 | .set_surface_reg = r100_set_surface_reg, | ||
160 | .clear_surface_reg = r100_clear_surface_reg, | ||
161 | .bandwidth_update = &r100_bandwidth_update, | ||
162 | .hpd_init = &r100_hpd_init, | ||
163 | .hpd_fini = &r100_hpd_fini, | ||
164 | .hpd_sense = &r100_hpd_sense, | ||
165 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
166 | .ioctl_wait_idle = NULL, | ||
167 | }; | ||
168 | |||
169 | static struct radeon_asic r200_asic = { | ||
170 | .init = &r100_init, | ||
171 | .fini = &r100_fini, | ||
172 | .suspend = &r100_suspend, | ||
173 | .resume = &r100_resume, | ||
174 | .vga_set_state = &r100_vga_set_state, | ||
175 | .gpu_reset = &r100_gpu_reset, | ||
176 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
177 | .gart_set_page = &r100_pci_gart_set_page, | ||
178 | .cp_commit = &r100_cp_commit, | ||
179 | .ring_start = &r100_ring_start, | ||
180 | .ring_test = &r100_ring_test, | ||
181 | .ring_ib_execute = &r100_ring_ib_execute, | ||
182 | .irq_set = &r100_irq_set, | ||
183 | .irq_process = &r100_irq_process, | ||
184 | .get_vblank_counter = &r100_get_vblank_counter, | ||
185 | .fence_ring_emit = &r100_fence_ring_emit, | ||
186 | .cs_parse = &r100_cs_parse, | ||
187 | .copy_blit = &r100_copy_blit, | ||
188 | .copy_dma = &r200_copy_dma, | ||
189 | .copy = &r100_copy_blit, | ||
190 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
191 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
192 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
193 | .set_memory_clock = NULL, | ||
194 | .set_pcie_lanes = NULL, | ||
195 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
196 | .set_surface_reg = r100_set_surface_reg, | ||
197 | .clear_surface_reg = r100_clear_surface_reg, | ||
198 | .bandwidth_update = &r100_bandwidth_update, | ||
199 | .hpd_init = &r100_hpd_init, | ||
200 | .hpd_fini = &r100_hpd_fini, | ||
201 | .hpd_sense = &r100_hpd_sense, | ||
202 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
203 | .ioctl_wait_idle = NULL, | ||
204 | }; | ||
205 | |||
206 | static struct radeon_asic r300_asic = { | ||
207 | .init = &r300_init, | ||
208 | .fini = &r300_fini, | ||
209 | .suspend = &r300_suspend, | ||
210 | .resume = &r300_resume, | ||
211 | .vga_set_state = &r100_vga_set_state, | ||
212 | .gpu_reset = &r300_gpu_reset, | ||
213 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
214 | .gart_set_page = &r100_pci_gart_set_page, | ||
215 | .cp_commit = &r100_cp_commit, | ||
216 | .ring_start = &r300_ring_start, | ||
217 | .ring_test = &r100_ring_test, | ||
218 | .ring_ib_execute = &r100_ring_ib_execute, | ||
219 | .irq_set = &r100_irq_set, | ||
220 | .irq_process = &r100_irq_process, | ||
221 | .get_vblank_counter = &r100_get_vblank_counter, | ||
222 | .fence_ring_emit = &r300_fence_ring_emit, | ||
223 | .cs_parse = &r300_cs_parse, | ||
224 | .copy_blit = &r100_copy_blit, | ||
225 | .copy_dma = &r200_copy_dma, | ||
226 | .copy = &r100_copy_blit, | ||
227 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
228 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
229 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
230 | .set_memory_clock = NULL, | ||
231 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
232 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
233 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
234 | .set_surface_reg = r100_set_surface_reg, | ||
235 | .clear_surface_reg = r100_clear_surface_reg, | ||
236 | .bandwidth_update = &r100_bandwidth_update, | ||
237 | .hpd_init = &r100_hpd_init, | ||
238 | .hpd_fini = &r100_hpd_fini, | ||
239 | .hpd_sense = &r100_hpd_sense, | ||
240 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
241 | .ioctl_wait_idle = NULL, | ||
242 | }; | ||
243 | |||
244 | static struct radeon_asic r300_asic_pcie = { | ||
245 | .init = &r300_init, | ||
246 | .fini = &r300_fini, | ||
247 | .suspend = &r300_suspend, | ||
248 | .resume = &r300_resume, | ||
249 | .vga_set_state = &r100_vga_set_state, | ||
250 | .gpu_reset = &r300_gpu_reset, | ||
251 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
252 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
253 | .cp_commit = &r100_cp_commit, | ||
254 | .ring_start = &r300_ring_start, | ||
255 | .ring_test = &r100_ring_test, | ||
256 | .ring_ib_execute = &r100_ring_ib_execute, | ||
257 | .irq_set = &r100_irq_set, | ||
258 | .irq_process = &r100_irq_process, | ||
259 | .get_vblank_counter = &r100_get_vblank_counter, | ||
260 | .fence_ring_emit = &r300_fence_ring_emit, | ||
261 | .cs_parse = &r300_cs_parse, | ||
262 | .copy_blit = &r100_copy_blit, | ||
263 | .copy_dma = &r200_copy_dma, | ||
264 | .copy = &r100_copy_blit, | ||
265 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
266 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
267 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
268 | .set_memory_clock = NULL, | ||
269 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
270 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
271 | .set_surface_reg = r100_set_surface_reg, | ||
272 | .clear_surface_reg = r100_clear_surface_reg, | ||
273 | .bandwidth_update = &r100_bandwidth_update, | ||
274 | .hpd_init = &r100_hpd_init, | ||
275 | .hpd_fini = &r100_hpd_fini, | ||
276 | .hpd_sense = &r100_hpd_sense, | ||
277 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
278 | .ioctl_wait_idle = NULL, | ||
279 | }; | ||
280 | |||
281 | static struct radeon_asic r420_asic = { | ||
282 | .init = &r420_init, | ||
283 | .fini = &r420_fini, | ||
284 | .suspend = &r420_suspend, | ||
285 | .resume = &r420_resume, | ||
286 | .vga_set_state = &r100_vga_set_state, | ||
287 | .gpu_reset = &r300_gpu_reset, | ||
288 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
289 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
290 | .cp_commit = &r100_cp_commit, | ||
291 | .ring_start = &r300_ring_start, | ||
292 | .ring_test = &r100_ring_test, | ||
293 | .ring_ib_execute = &r100_ring_ib_execute, | ||
294 | .irq_set = &r100_irq_set, | ||
295 | .irq_process = &r100_irq_process, | ||
296 | .get_vblank_counter = &r100_get_vblank_counter, | ||
297 | .fence_ring_emit = &r300_fence_ring_emit, | ||
298 | .cs_parse = &r300_cs_parse, | ||
299 | .copy_blit = &r100_copy_blit, | ||
300 | .copy_dma = &r200_copy_dma, | ||
301 | .copy = &r100_copy_blit, | ||
302 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
303 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
304 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
305 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
306 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
307 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
308 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
309 | .set_surface_reg = r100_set_surface_reg, | ||
310 | .clear_surface_reg = r100_clear_surface_reg, | ||
311 | .bandwidth_update = &r100_bandwidth_update, | ||
312 | .hpd_init = &r100_hpd_init, | ||
313 | .hpd_fini = &r100_hpd_fini, | ||
314 | .hpd_sense = &r100_hpd_sense, | ||
315 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
316 | .ioctl_wait_idle = NULL, | ||
317 | }; | ||
318 | |||
319 | static struct radeon_asic rs400_asic = { | ||
320 | .init = &rs400_init, | ||
321 | .fini = &rs400_fini, | ||
322 | .suspend = &rs400_suspend, | ||
323 | .resume = &rs400_resume, | ||
324 | .vga_set_state = &r100_vga_set_state, | ||
325 | .gpu_reset = &r300_gpu_reset, | ||
326 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
327 | .gart_set_page = &rs400_gart_set_page, | ||
328 | .cp_commit = &r100_cp_commit, | ||
329 | .ring_start = &r300_ring_start, | ||
330 | .ring_test = &r100_ring_test, | ||
331 | .ring_ib_execute = &r100_ring_ib_execute, | ||
332 | .irq_set = &r100_irq_set, | ||
333 | .irq_process = &r100_irq_process, | ||
334 | .get_vblank_counter = &r100_get_vblank_counter, | ||
335 | .fence_ring_emit = &r300_fence_ring_emit, | ||
336 | .cs_parse = &r300_cs_parse, | ||
337 | .copy_blit = &r100_copy_blit, | ||
338 | .copy_dma = &r200_copy_dma, | ||
339 | .copy = &r100_copy_blit, | ||
340 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
341 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
342 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
343 | .set_memory_clock = NULL, | ||
344 | .get_pcie_lanes = NULL, | ||
345 | .set_pcie_lanes = NULL, | ||
346 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
347 | .set_surface_reg = r100_set_surface_reg, | ||
348 | .clear_surface_reg = r100_clear_surface_reg, | ||
349 | .bandwidth_update = &r100_bandwidth_update, | ||
350 | .hpd_init = &r100_hpd_init, | ||
351 | .hpd_fini = &r100_hpd_fini, | ||
352 | .hpd_sense = &r100_hpd_sense, | ||
353 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
354 | .ioctl_wait_idle = NULL, | ||
355 | }; | ||
356 | |||
357 | static struct radeon_asic rs600_asic = { | ||
358 | .init = &rs600_init, | ||
359 | .fini = &rs600_fini, | ||
360 | .suspend = &rs600_suspend, | ||
361 | .resume = &rs600_resume, | ||
362 | .vga_set_state = &r100_vga_set_state, | ||
363 | .gpu_reset = &r300_gpu_reset, | ||
364 | .gart_tlb_flush = &rs600_gart_tlb_flush, | ||
365 | .gart_set_page = &rs600_gart_set_page, | ||
366 | .cp_commit = &r100_cp_commit, | ||
367 | .ring_start = &r300_ring_start, | ||
368 | .ring_test = &r100_ring_test, | ||
369 | .ring_ib_execute = &r100_ring_ib_execute, | ||
370 | .irq_set = &rs600_irq_set, | ||
371 | .irq_process = &rs600_irq_process, | ||
372 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
373 | .fence_ring_emit = &r300_fence_ring_emit, | ||
374 | .cs_parse = &r300_cs_parse, | ||
375 | .copy_blit = &r100_copy_blit, | ||
376 | .copy_dma = &r200_copy_dma, | ||
377 | .copy = &r100_copy_blit, | ||
378 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
379 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
380 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
381 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
382 | .get_pcie_lanes = NULL, | ||
383 | .set_pcie_lanes = NULL, | ||
384 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
385 | .set_surface_reg = r100_set_surface_reg, | ||
386 | .clear_surface_reg = r100_clear_surface_reg, | ||
387 | .bandwidth_update = &rs600_bandwidth_update, | ||
388 | .hpd_init = &rs600_hpd_init, | ||
389 | .hpd_fini = &rs600_hpd_fini, | ||
390 | .hpd_sense = &rs600_hpd_sense, | ||
391 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
392 | .ioctl_wait_idle = NULL, | ||
393 | }; | ||
394 | |||
395 | static struct radeon_asic rs690_asic = { | ||
396 | .init = &rs690_init, | ||
397 | .fini = &rs690_fini, | ||
398 | .suspend = &rs690_suspend, | ||
399 | .resume = &rs690_resume, | ||
400 | .vga_set_state = &r100_vga_set_state, | ||
401 | .gpu_reset = &r300_gpu_reset, | ||
402 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
403 | .gart_set_page = &rs400_gart_set_page, | ||
404 | .cp_commit = &r100_cp_commit, | ||
405 | .ring_start = &r300_ring_start, | ||
406 | .ring_test = &r100_ring_test, | ||
407 | .ring_ib_execute = &r100_ring_ib_execute, | ||
408 | .irq_set = &rs600_irq_set, | ||
409 | .irq_process = &rs600_irq_process, | ||
410 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
411 | .fence_ring_emit = &r300_fence_ring_emit, | ||
412 | .cs_parse = &r300_cs_parse, | ||
413 | .copy_blit = &r100_copy_blit, | ||
414 | .copy_dma = &r200_copy_dma, | ||
415 | .copy = &r200_copy_dma, | ||
416 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
417 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
418 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
419 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
420 | .get_pcie_lanes = NULL, | ||
421 | .set_pcie_lanes = NULL, | ||
422 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
423 | .set_surface_reg = r100_set_surface_reg, | ||
424 | .clear_surface_reg = r100_clear_surface_reg, | ||
425 | .bandwidth_update = &rs690_bandwidth_update, | ||
426 | .hpd_init = &rs600_hpd_init, | ||
427 | .hpd_fini = &rs600_hpd_fini, | ||
428 | .hpd_sense = &rs600_hpd_sense, | ||
429 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
430 | .ioctl_wait_idle = NULL, | ||
431 | }; | ||
432 | |||
433 | static struct radeon_asic rv515_asic = { | ||
434 | .init = &rv515_init, | ||
435 | .fini = &rv515_fini, | ||
436 | .suspend = &rv515_suspend, | ||
437 | .resume = &rv515_resume, | ||
438 | .vga_set_state = &r100_vga_set_state, | ||
439 | .gpu_reset = &rv515_gpu_reset, | ||
440 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
441 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
442 | .cp_commit = &r100_cp_commit, | ||
443 | .ring_start = &rv515_ring_start, | ||
444 | .ring_test = &r100_ring_test, | ||
445 | .ring_ib_execute = &r100_ring_ib_execute, | ||
446 | .irq_set = &rs600_irq_set, | ||
447 | .irq_process = &rs600_irq_process, | ||
448 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
449 | .fence_ring_emit = &r300_fence_ring_emit, | ||
450 | .cs_parse = &r300_cs_parse, | ||
451 | .copy_blit = &r100_copy_blit, | ||
452 | .copy_dma = &r200_copy_dma, | ||
453 | .copy = &r100_copy_blit, | ||
454 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
455 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
456 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
457 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
458 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
459 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
460 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
461 | .set_surface_reg = r100_set_surface_reg, | ||
462 | .clear_surface_reg = r100_clear_surface_reg, | ||
463 | .bandwidth_update = &rv515_bandwidth_update, | ||
464 | .hpd_init = &rs600_hpd_init, | ||
465 | .hpd_fini = &rs600_hpd_fini, | ||
466 | .hpd_sense = &rs600_hpd_sense, | ||
467 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
468 | .ioctl_wait_idle = NULL, | ||
469 | }; | ||
470 | |||
471 | static struct radeon_asic r520_asic = { | ||
472 | .init = &r520_init, | ||
473 | .fini = &rv515_fini, | ||
474 | .suspend = &rv515_suspend, | ||
475 | .resume = &r520_resume, | ||
476 | .vga_set_state = &r100_vga_set_state, | ||
477 | .gpu_reset = &rv515_gpu_reset, | ||
478 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
479 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
480 | .cp_commit = &r100_cp_commit, | ||
481 | .ring_start = &rv515_ring_start, | ||
482 | .ring_test = &r100_ring_test, | ||
483 | .ring_ib_execute = &r100_ring_ib_execute, | ||
484 | .irq_set = &rs600_irq_set, | ||
485 | .irq_process = &rs600_irq_process, | ||
486 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
487 | .fence_ring_emit = &r300_fence_ring_emit, | ||
488 | .cs_parse = &r300_cs_parse, | ||
489 | .copy_blit = &r100_copy_blit, | ||
490 | .copy_dma = &r200_copy_dma, | ||
491 | .copy = &r100_copy_blit, | ||
492 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
493 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
494 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
495 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
496 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
497 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
498 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
499 | .set_surface_reg = r100_set_surface_reg, | ||
500 | .clear_surface_reg = r100_clear_surface_reg, | ||
501 | .bandwidth_update = &rv515_bandwidth_update, | ||
502 | .hpd_init = &rs600_hpd_init, | ||
503 | .hpd_fini = &rs600_hpd_fini, | ||
504 | .hpd_sense = &rs600_hpd_sense, | ||
505 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
506 | .ioctl_wait_idle = NULL, | ||
507 | }; | ||
508 | |||
509 | static struct radeon_asic r600_asic = { | ||
510 | .init = &r600_init, | ||
511 | .fini = &r600_fini, | ||
512 | .suspend = &r600_suspend, | ||
513 | .resume = &r600_resume, | ||
514 | .cp_commit = &r600_cp_commit, | ||
515 | .vga_set_state = &r600_vga_set_state, | ||
516 | .gpu_reset = &r600_gpu_reset, | ||
517 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
518 | .gart_set_page = &rs600_gart_set_page, | ||
519 | .ring_test = &r600_ring_test, | ||
520 | .ring_ib_execute = &r600_ring_ib_execute, | ||
521 | .irq_set = &r600_irq_set, | ||
522 | .irq_process = &r600_irq_process, | ||
523 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
524 | .fence_ring_emit = &r600_fence_ring_emit, | ||
525 | .cs_parse = &r600_cs_parse, | ||
526 | .copy_blit = &r600_copy_blit, | ||
527 | .copy_dma = &r600_copy_blit, | ||
528 | .copy = &r600_copy_blit, | ||
529 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
530 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
531 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
532 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
533 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
534 | .set_pcie_lanes = NULL, | ||
535 | .set_clock_gating = NULL, | ||
536 | .set_surface_reg = r600_set_surface_reg, | ||
537 | .clear_surface_reg = r600_clear_surface_reg, | ||
538 | .bandwidth_update = &rv515_bandwidth_update, | ||
539 | .hpd_init = &r600_hpd_init, | ||
540 | .hpd_fini = &r600_hpd_fini, | ||
541 | .hpd_sense = &r600_hpd_sense, | ||
542 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
543 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
544 | }; | ||
545 | |||
546 | static struct radeon_asic rs780_asic = { | ||
547 | .init = &r600_init, | ||
548 | .fini = &r600_fini, | ||
549 | .suspend = &r600_suspend, | ||
550 | .resume = &r600_resume, | ||
551 | .cp_commit = &r600_cp_commit, | ||
552 | .vga_set_state = &r600_vga_set_state, | ||
553 | .gpu_reset = &r600_gpu_reset, | ||
554 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
555 | .gart_set_page = &rs600_gart_set_page, | ||
556 | .ring_test = &r600_ring_test, | ||
557 | .ring_ib_execute = &r600_ring_ib_execute, | ||
558 | .irq_set = &r600_irq_set, | ||
559 | .irq_process = &r600_irq_process, | ||
560 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
561 | .fence_ring_emit = &r600_fence_ring_emit, | ||
562 | .cs_parse = &r600_cs_parse, | ||
563 | .copy_blit = &r600_copy_blit, | ||
564 | .copy_dma = &r600_copy_blit, | ||
565 | .copy = &r600_copy_blit, | ||
566 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
567 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
568 | .get_memory_clock = NULL, | ||
569 | .set_memory_clock = NULL, | ||
570 | .get_pcie_lanes = NULL, | ||
571 | .set_pcie_lanes = NULL, | ||
572 | .set_clock_gating = NULL, | ||
573 | .set_surface_reg = r600_set_surface_reg, | ||
574 | .clear_surface_reg = r600_clear_surface_reg, | ||
575 | .bandwidth_update = &rs690_bandwidth_update, | ||
576 | .hpd_init = &r600_hpd_init, | ||
577 | .hpd_fini = &r600_hpd_fini, | ||
578 | .hpd_sense = &r600_hpd_sense, | ||
579 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
580 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
581 | }; | ||
582 | |||
583 | static struct radeon_asic rv770_asic = { | ||
584 | .init = &rv770_init, | ||
585 | .fini = &rv770_fini, | ||
586 | .suspend = &rv770_suspend, | ||
587 | .resume = &rv770_resume, | ||
588 | .cp_commit = &r600_cp_commit, | ||
589 | .gpu_reset = &rv770_gpu_reset, | ||
590 | .vga_set_state = &r600_vga_set_state, | ||
591 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
592 | .gart_set_page = &rs600_gart_set_page, | ||
593 | .ring_test = &r600_ring_test, | ||
594 | .ring_ib_execute = &r600_ring_ib_execute, | ||
595 | .irq_set = &r600_irq_set, | ||
596 | .irq_process = &r600_irq_process, | ||
597 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
598 | .fence_ring_emit = &r600_fence_ring_emit, | ||
599 | .cs_parse = &r600_cs_parse, | ||
600 | .copy_blit = &r600_copy_blit, | ||
601 | .copy_dma = &r600_copy_blit, | ||
602 | .copy = &r600_copy_blit, | ||
603 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
604 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
605 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
606 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
607 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
608 | .set_pcie_lanes = NULL, | ||
609 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
610 | .set_surface_reg = r600_set_surface_reg, | ||
611 | .clear_surface_reg = r600_clear_surface_reg, | ||
612 | .bandwidth_update = &rv515_bandwidth_update, | ||
613 | .hpd_init = &r600_hpd_init, | ||
614 | .hpd_fini = &r600_hpd_fini, | ||
615 | .hpd_sense = &r600_hpd_sense, | ||
616 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
617 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
618 | }; | ||
619 | |||
620 | static struct radeon_asic evergreen_asic = { | ||
621 | .init = &evergreen_init, | ||
622 | .fini = &evergreen_fini, | ||
623 | .suspend = &evergreen_suspend, | ||
624 | .resume = &evergreen_resume, | ||
625 | .cp_commit = NULL, | ||
626 | .gpu_reset = &evergreen_gpu_reset, | ||
627 | .vga_set_state = &r600_vga_set_state, | ||
628 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
629 | .gart_set_page = &rs600_gart_set_page, | ||
630 | .ring_test = NULL, | ||
631 | .ring_ib_execute = NULL, | ||
632 | .irq_set = NULL, | ||
633 | .irq_process = NULL, | ||
634 | .get_vblank_counter = NULL, | ||
635 | .fence_ring_emit = NULL, | ||
636 | .cs_parse = NULL, | ||
637 | .copy_blit = NULL, | ||
638 | .copy_dma = NULL, | ||
639 | .copy = NULL, | ||
640 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
641 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
642 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
643 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
644 | .set_pcie_lanes = NULL, | ||
645 | .set_clock_gating = NULL, | ||
646 | .set_surface_reg = r600_set_surface_reg, | ||
647 | .clear_surface_reg = r600_clear_surface_reg, | ||
648 | .bandwidth_update = &evergreen_bandwidth_update, | ||
649 | .hpd_init = &evergreen_hpd_init, | ||
650 | .hpd_fini = &evergreen_hpd_fini, | ||
651 | .hpd_sense = &evergreen_hpd_sense, | ||
652 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
653 | }; | ||
654 | |||
655 | int radeon_asic_init(struct radeon_device *rdev) | ||
656 | { | ||
657 | radeon_register_accessor_init(rdev); | ||
658 | switch (rdev->family) { | ||
659 | case CHIP_R100: | ||
660 | case CHIP_RV100: | ||
661 | case CHIP_RS100: | ||
662 | case CHIP_RV200: | ||
663 | case CHIP_RS200: | ||
664 | rdev->asic = &r100_asic; | ||
665 | break; | ||
666 | case CHIP_R200: | ||
667 | case CHIP_RV250: | ||
668 | case CHIP_RS300: | ||
669 | case CHIP_RV280: | ||
670 | rdev->asic = &r200_asic; | ||
671 | break; | ||
672 | case CHIP_R300: | ||
673 | case CHIP_R350: | ||
674 | case CHIP_RV350: | ||
675 | case CHIP_RV380: | ||
676 | if (rdev->flags & RADEON_IS_PCIE) | ||
677 | rdev->asic = &r300_asic_pcie; | ||
678 | else | ||
679 | rdev->asic = &r300_asic; | ||
680 | break; | ||
681 | case CHIP_R420: | ||
682 | case CHIP_R423: | ||
683 | case CHIP_RV410: | ||
684 | rdev->asic = &r420_asic; | ||
685 | break; | ||
686 | case CHIP_RS400: | ||
687 | case CHIP_RS480: | ||
688 | rdev->asic = &rs400_asic; | ||
689 | break; | ||
690 | case CHIP_RS600: | ||
691 | rdev->asic = &rs600_asic; | ||
692 | break; | ||
693 | case CHIP_RS690: | ||
694 | case CHIP_RS740: | ||
695 | rdev->asic = &rs690_asic; | ||
696 | break; | ||
697 | case CHIP_RV515: | ||
698 | rdev->asic = &rv515_asic; | ||
699 | break; | ||
700 | case CHIP_R520: | ||
701 | case CHIP_RV530: | ||
702 | case CHIP_RV560: | ||
703 | case CHIP_RV570: | ||
704 | case CHIP_R580: | ||
705 | rdev->asic = &r520_asic; | ||
706 | break; | ||
707 | case CHIP_R600: | ||
708 | case CHIP_RV610: | ||
709 | case CHIP_RV630: | ||
710 | case CHIP_RV620: | ||
711 | case CHIP_RV635: | ||
712 | case CHIP_RV670: | ||
713 | rdev->asic = &r600_asic; | ||
714 | break; | ||
715 | case CHIP_RS780: | ||
716 | case CHIP_RS880: | ||
717 | rdev->asic = &rs780_asic; | ||
718 | break; | ||
719 | case CHIP_RV770: | ||
720 | case CHIP_RV730: | ||
721 | case CHIP_RV710: | ||
722 | case CHIP_RV740: | ||
723 | rdev->asic = &rv770_asic; | ||
724 | break; | ||
725 | case CHIP_CEDAR: | ||
726 | case CHIP_REDWOOD: | ||
727 | case CHIP_JUNIPER: | ||
728 | case CHIP_CYPRESS: | ||
729 | case CHIP_HEMLOCK: | ||
730 | rdev->asic = &evergreen_asic; | ||
731 | break; | ||
732 | default: | ||
733 | /* FIXME: not supported yet */ | ||
734 | return -EINVAL; | ||
735 | } | ||
736 | |||
737 | if (rdev->flags & RADEON_IS_IGP) { | ||
738 | rdev->asic->get_memory_clock = NULL; | ||
739 | rdev->asic->set_memory_clock = NULL; | ||
740 | } | ||
741 | |||
742 | /* set the number of crtcs */ | ||
743 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
744 | rdev->num_crtc = 1; | ||
745 | else { | ||
746 | if (ASIC_IS_DCE4(rdev)) | ||
747 | rdev->num_crtc = 6; | ||
748 | else | ||
749 | rdev->num_crtc = 2; | ||
750 | } | ||
751 | |||
752 | return 0; | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * Wrapper around modesetting bits. Move to radeon_clocks.c? | ||
757 | */ | ||
758 | int radeon_clocks_init(struct radeon_device *rdev) | ||
759 | { | ||
760 | int r; | ||
761 | |||
762 | r = radeon_static_clocks_init(rdev->ddev); | ||
763 | if (r) { | ||
764 | return r; | ||
765 | } | ||
766 | DRM_INFO("Clocks initialized !\n"); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
771 | { | ||
772 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index d3a157b2bcb7..a0b8280663d1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | |||
45 | /* | 45 | /* |
46 | * r100,rv100,rs100,rv200,rs200 | 46 | * r100,rv100,rs100,rv200,rs200 |
47 | */ | 47 | */ |
48 | extern int r100_init(struct radeon_device *rdev); | 48 | struct r100_mc_save { |
49 | extern void r100_fini(struct radeon_device *rdev); | 49 | u32 GENMO_WT; |
50 | extern int r100_suspend(struct radeon_device *rdev); | 50 | u32 CRTC_EXT_CNTL; |
51 | extern int r100_resume(struct radeon_device *rdev); | 51 | u32 CRTC_GEN_CNTL; |
52 | u32 CRTC2_GEN_CNTL; | ||
53 | u32 CUR_OFFSET; | ||
54 | u32 CUR2_OFFSET; | ||
55 | }; | ||
56 | int r100_init(struct radeon_device *rdev); | ||
57 | void r100_fini(struct radeon_device *rdev); | ||
58 | int r100_suspend(struct radeon_device *rdev); | ||
59 | int r100_resume(struct radeon_device *rdev); | ||
52 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | 60 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
53 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 61 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
54 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 62 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
@@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
73 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 81 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
74 | uint32_t tiling_flags, uint32_t pitch, | 82 | uint32_t tiling_flags, uint32_t pitch, |
75 | uint32_t offset, uint32_t obj_size); | 83 | uint32_t offset, uint32_t obj_size); |
76 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 84 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
77 | void r100_bandwidth_update(struct radeon_device *rdev); | 85 | void r100_bandwidth_update(struct radeon_device *rdev); |
78 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 86 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
79 | int r100_ring_test(struct radeon_device *rdev); | 87 | int r100_ring_test(struct radeon_device *rdev); |
@@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev); | |||
82 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 90 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
83 | void r100_hpd_set_polarity(struct radeon_device *rdev, | 91 | void r100_hpd_set_polarity(struct radeon_device *rdev, |
84 | enum radeon_hpd_id hpd); | 92 | enum radeon_hpd_id hpd); |
85 | 93 | int r100_debugfs_rbbm_init(struct radeon_device *rdev); | |
86 | static struct radeon_asic r100_asic = { | 94 | int r100_debugfs_cp_init(struct radeon_device *rdev); |
87 | .init = &r100_init, | 95 | void r100_cp_disable(struct radeon_device *rdev); |
88 | .fini = &r100_fini, | 96 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
89 | .suspend = &r100_suspend, | 97 | void r100_cp_fini(struct radeon_device *rdev); |
90 | .resume = &r100_resume, | 98 | int r100_pci_gart_init(struct radeon_device *rdev); |
91 | .vga_set_state = &r100_vga_set_state, | 99 | void r100_pci_gart_fini(struct radeon_device *rdev); |
92 | .gpu_reset = &r100_gpu_reset, | 100 | int r100_pci_gart_enable(struct radeon_device *rdev); |
93 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 101 | void r100_pci_gart_disable(struct radeon_device *rdev); |
94 | .gart_set_page = &r100_pci_gart_set_page, | 102 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
95 | .cp_commit = &r100_cp_commit, | 103 | int r100_gui_wait_for_idle(struct radeon_device *rdev); |
96 | .ring_start = &r100_ring_start, | 104 | void r100_ib_fini(struct radeon_device *rdev); |
97 | .ring_test = &r100_ring_test, | 105 | int r100_ib_init(struct radeon_device *rdev); |
98 | .ring_ib_execute = &r100_ring_ib_execute, | 106 | void r100_irq_disable(struct radeon_device *rdev); |
99 | .irq_set = &r100_irq_set, | 107 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
100 | .irq_process = &r100_irq_process, | 108 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
101 | .get_vblank_counter = &r100_get_vblank_counter, | 109 | void r100_vram_init_sizes(struct radeon_device *rdev); |
102 | .fence_ring_emit = &r100_fence_ring_emit, | 110 | void r100_wb_disable(struct radeon_device *rdev); |
103 | .cs_parse = &r100_cs_parse, | 111 | void r100_wb_fini(struct radeon_device *rdev); |
104 | .copy_blit = &r100_copy_blit, | 112 | int r100_wb_init(struct radeon_device *rdev); |
105 | .copy_dma = NULL, | 113 | void r100_hdp_reset(struct radeon_device *rdev); |
106 | .copy = &r100_copy_blit, | 114 | int r100_rb2d_reset(struct radeon_device *rdev); |
107 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 115 | int r100_cp_reset(struct radeon_device *rdev); |
108 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 116 | void r100_vga_render_disable(struct radeon_device *rdev); |
109 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 117 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
110 | .set_memory_clock = NULL, | 118 | struct radeon_cs_packet *pkt, |
111 | .get_pcie_lanes = NULL, | 119 | struct radeon_bo *robj); |
112 | .set_pcie_lanes = NULL, | 120 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
113 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 121 | struct radeon_cs_packet *pkt, |
114 | .set_surface_reg = r100_set_surface_reg, | 122 | const unsigned *auth, unsigned n, |
115 | .clear_surface_reg = r100_clear_surface_reg, | 123 | radeon_packet0_check_t check); |
116 | .bandwidth_update = &r100_bandwidth_update, | 124 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
117 | .hpd_init = &r100_hpd_init, | 125 | struct radeon_cs_packet *pkt, |
118 | .hpd_fini = &r100_hpd_fini, | 126 | unsigned idx); |
119 | .hpd_sense = &r100_hpd_sense, | 127 | void r100_enable_bm(struct radeon_device *rdev); |
120 | .hpd_set_polarity = &r100_hpd_set_polarity, | 128 | void r100_set_common_regs(struct radeon_device *rdev); |
121 | .ioctl_wait_idle = NULL, | ||
122 | }; | ||
123 | 129 | ||
124 | /* | 130 | /* |
125 | * r200,rv250,rs300,rv280 | 131 | * r200,rv250,rs300,rv280 |
@@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev, | |||
129 | uint64_t dst_offset, | 135 | uint64_t dst_offset, |
130 | unsigned num_pages, | 136 | unsigned num_pages, |
131 | struct radeon_fence *fence); | 137 | struct radeon_fence *fence); |
132 | static struct radeon_asic r200_asic = { | ||
133 | .init = &r100_init, | ||
134 | .fini = &r100_fini, | ||
135 | .suspend = &r100_suspend, | ||
136 | .resume = &r100_resume, | ||
137 | .vga_set_state = &r100_vga_set_state, | ||
138 | .gpu_reset = &r100_gpu_reset, | ||
139 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
140 | .gart_set_page = &r100_pci_gart_set_page, | ||
141 | .cp_commit = &r100_cp_commit, | ||
142 | .ring_start = &r100_ring_start, | ||
143 | .ring_test = &r100_ring_test, | ||
144 | .ring_ib_execute = &r100_ring_ib_execute, | ||
145 | .irq_set = &r100_irq_set, | ||
146 | .irq_process = &r100_irq_process, | ||
147 | .get_vblank_counter = &r100_get_vblank_counter, | ||
148 | .fence_ring_emit = &r100_fence_ring_emit, | ||
149 | .cs_parse = &r100_cs_parse, | ||
150 | .copy_blit = &r100_copy_blit, | ||
151 | .copy_dma = &r200_copy_dma, | ||
152 | .copy = &r100_copy_blit, | ||
153 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
154 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
155 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
156 | .set_memory_clock = NULL, | ||
157 | .set_pcie_lanes = NULL, | ||
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
159 | .set_surface_reg = r100_set_surface_reg, | ||
160 | .clear_surface_reg = r100_clear_surface_reg, | ||
161 | .bandwidth_update = &r100_bandwidth_update, | ||
162 | .hpd_init = &r100_hpd_init, | ||
163 | .hpd_fini = &r100_hpd_fini, | ||
164 | .hpd_sense = &r100_hpd_sense, | ||
165 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
166 | .ioctl_wait_idle = NULL, | ||
167 | }; | ||
168 | |||
169 | 138 | ||
170 | /* | 139 | /* |
171 | * r300,r350,rv350,rv380 | 140 | * r300,r350,rv350,rv380 |
@@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v | |||
186 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 155 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
187 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 156 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
188 | 157 | ||
189 | static struct radeon_asic r300_asic = { | ||
190 | .init = &r300_init, | ||
191 | .fini = &r300_fini, | ||
192 | .suspend = &r300_suspend, | ||
193 | .resume = &r300_resume, | ||
194 | .vga_set_state = &r100_vga_set_state, | ||
195 | .gpu_reset = &r300_gpu_reset, | ||
196 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
197 | .gart_set_page = &r100_pci_gart_set_page, | ||
198 | .cp_commit = &r100_cp_commit, | ||
199 | .ring_start = &r300_ring_start, | ||
200 | .ring_test = &r100_ring_test, | ||
201 | .ring_ib_execute = &r100_ring_ib_execute, | ||
202 | .irq_set = &r100_irq_set, | ||
203 | .irq_process = &r100_irq_process, | ||
204 | .get_vblank_counter = &r100_get_vblank_counter, | ||
205 | .fence_ring_emit = &r300_fence_ring_emit, | ||
206 | .cs_parse = &r300_cs_parse, | ||
207 | .copy_blit = &r100_copy_blit, | ||
208 | .copy_dma = &r200_copy_dma, | ||
209 | .copy = &r100_copy_blit, | ||
210 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
211 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
212 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
213 | .set_memory_clock = NULL, | ||
214 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
215 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
216 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
217 | .set_surface_reg = r100_set_surface_reg, | ||
218 | .clear_surface_reg = r100_clear_surface_reg, | ||
219 | .bandwidth_update = &r100_bandwidth_update, | ||
220 | .hpd_init = &r100_hpd_init, | ||
221 | .hpd_fini = &r100_hpd_fini, | ||
222 | .hpd_sense = &r100_hpd_sense, | ||
223 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
224 | .ioctl_wait_idle = NULL, | ||
225 | }; | ||
226 | |||
227 | |||
228 | static struct radeon_asic r300_asic_pcie = { | ||
229 | .init = &r300_init, | ||
230 | .fini = &r300_fini, | ||
231 | .suspend = &r300_suspend, | ||
232 | .resume = &r300_resume, | ||
233 | .vga_set_state = &r100_vga_set_state, | ||
234 | .gpu_reset = &r300_gpu_reset, | ||
235 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
236 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
237 | .cp_commit = &r100_cp_commit, | ||
238 | .ring_start = &r300_ring_start, | ||
239 | .ring_test = &r100_ring_test, | ||
240 | .ring_ib_execute = &r100_ring_ib_execute, | ||
241 | .irq_set = &r100_irq_set, | ||
242 | .irq_process = &r100_irq_process, | ||
243 | .get_vblank_counter = &r100_get_vblank_counter, | ||
244 | .fence_ring_emit = &r300_fence_ring_emit, | ||
245 | .cs_parse = &r300_cs_parse, | ||
246 | .copy_blit = &r100_copy_blit, | ||
247 | .copy_dma = &r200_copy_dma, | ||
248 | .copy = &r100_copy_blit, | ||
249 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
250 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
251 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
252 | .set_memory_clock = NULL, | ||
253 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
254 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
255 | .set_surface_reg = r100_set_surface_reg, | ||
256 | .clear_surface_reg = r100_clear_surface_reg, | ||
257 | .bandwidth_update = &r100_bandwidth_update, | ||
258 | .hpd_init = &r100_hpd_init, | ||
259 | .hpd_fini = &r100_hpd_fini, | ||
260 | .hpd_sense = &r100_hpd_sense, | ||
261 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
262 | .ioctl_wait_idle = NULL, | ||
263 | }; | ||
264 | |||
265 | /* | 158 | /* |
266 | * r420,r423,rv410 | 159 | * r420,r423,rv410 |
267 | */ | 160 | */ |
@@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev); | |||
269 | extern void r420_fini(struct radeon_device *rdev); | 162 | extern void r420_fini(struct radeon_device *rdev); |
270 | extern int r420_suspend(struct radeon_device *rdev); | 163 | extern int r420_suspend(struct radeon_device *rdev); |
271 | extern int r420_resume(struct radeon_device *rdev); | 164 | extern int r420_resume(struct radeon_device *rdev); |
272 | static struct radeon_asic r420_asic = { | ||
273 | .init = &r420_init, | ||
274 | .fini = &r420_fini, | ||
275 | .suspend = &r420_suspend, | ||
276 | .resume = &r420_resume, | ||
277 | .vga_set_state = &r100_vga_set_state, | ||
278 | .gpu_reset = &r300_gpu_reset, | ||
279 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
280 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
281 | .cp_commit = &r100_cp_commit, | ||
282 | .ring_start = &r300_ring_start, | ||
283 | .ring_test = &r100_ring_test, | ||
284 | .ring_ib_execute = &r100_ring_ib_execute, | ||
285 | .irq_set = &r100_irq_set, | ||
286 | .irq_process = &r100_irq_process, | ||
287 | .get_vblank_counter = &r100_get_vblank_counter, | ||
288 | .fence_ring_emit = &r300_fence_ring_emit, | ||
289 | .cs_parse = &r300_cs_parse, | ||
290 | .copy_blit = &r100_copy_blit, | ||
291 | .copy_dma = &r200_copy_dma, | ||
292 | .copy = &r100_copy_blit, | ||
293 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
294 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
295 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
296 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
297 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
298 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
299 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
300 | .set_surface_reg = r100_set_surface_reg, | ||
301 | .clear_surface_reg = r100_clear_surface_reg, | ||
302 | .bandwidth_update = &r100_bandwidth_update, | ||
303 | .hpd_init = &r100_hpd_init, | ||
304 | .hpd_fini = &r100_hpd_fini, | ||
305 | .hpd_sense = &r100_hpd_sense, | ||
306 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
307 | .ioctl_wait_idle = NULL, | ||
308 | }; | ||
309 | |||
310 | 165 | ||
311 | /* | 166 | /* |
312 | * rs400,rs480 | 167 | * rs400,rs480 |
@@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); | |||
319 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 174 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
320 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 175 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
321 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 176 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
322 | static struct radeon_asic rs400_asic = { | ||
323 | .init = &rs400_init, | ||
324 | .fini = &rs400_fini, | ||
325 | .suspend = &rs400_suspend, | ||
326 | .resume = &rs400_resume, | ||
327 | .vga_set_state = &r100_vga_set_state, | ||
328 | .gpu_reset = &r300_gpu_reset, | ||
329 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
330 | .gart_set_page = &rs400_gart_set_page, | ||
331 | .cp_commit = &r100_cp_commit, | ||
332 | .ring_start = &r300_ring_start, | ||
333 | .ring_test = &r100_ring_test, | ||
334 | .ring_ib_execute = &r100_ring_ib_execute, | ||
335 | .irq_set = &r100_irq_set, | ||
336 | .irq_process = &r100_irq_process, | ||
337 | .get_vblank_counter = &r100_get_vblank_counter, | ||
338 | .fence_ring_emit = &r300_fence_ring_emit, | ||
339 | .cs_parse = &r300_cs_parse, | ||
340 | .copy_blit = &r100_copy_blit, | ||
341 | .copy_dma = &r200_copy_dma, | ||
342 | .copy = &r100_copy_blit, | ||
343 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
344 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
345 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
346 | .set_memory_clock = NULL, | ||
347 | .get_pcie_lanes = NULL, | ||
348 | .set_pcie_lanes = NULL, | ||
349 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
350 | .set_surface_reg = r100_set_surface_reg, | ||
351 | .clear_surface_reg = r100_clear_surface_reg, | ||
352 | .bandwidth_update = &r100_bandwidth_update, | ||
353 | .hpd_init = &r100_hpd_init, | ||
354 | .hpd_fini = &r100_hpd_fini, | ||
355 | .hpd_sense = &r100_hpd_sense, | ||
356 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
357 | .ioctl_wait_idle = NULL, | ||
358 | }; | ||
359 | |||
360 | 177 | ||
361 | /* | 178 | /* |
362 | * rs600. | 179 | * rs600. |
@@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | |||
379 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | 196 | void rs600_hpd_set_polarity(struct radeon_device *rdev, |
380 | enum radeon_hpd_id hpd); | 197 | enum radeon_hpd_id hpd); |
381 | 198 | ||
382 | static struct radeon_asic rs600_asic = { | ||
383 | .init = &rs600_init, | ||
384 | .fini = &rs600_fini, | ||
385 | .suspend = &rs600_suspend, | ||
386 | .resume = &rs600_resume, | ||
387 | .vga_set_state = &r100_vga_set_state, | ||
388 | .gpu_reset = &r300_gpu_reset, | ||
389 | .gart_tlb_flush = &rs600_gart_tlb_flush, | ||
390 | .gart_set_page = &rs600_gart_set_page, | ||
391 | .cp_commit = &r100_cp_commit, | ||
392 | .ring_start = &r300_ring_start, | ||
393 | .ring_test = &r100_ring_test, | ||
394 | .ring_ib_execute = &r100_ring_ib_execute, | ||
395 | .irq_set = &rs600_irq_set, | ||
396 | .irq_process = &rs600_irq_process, | ||
397 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
398 | .fence_ring_emit = &r300_fence_ring_emit, | ||
399 | .cs_parse = &r300_cs_parse, | ||
400 | .copy_blit = &r100_copy_blit, | ||
401 | .copy_dma = &r200_copy_dma, | ||
402 | .copy = &r100_copy_blit, | ||
403 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
404 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
405 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
406 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
407 | .get_pcie_lanes = NULL, | ||
408 | .set_pcie_lanes = NULL, | ||
409 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
410 | .set_surface_reg = r100_set_surface_reg, | ||
411 | .clear_surface_reg = r100_clear_surface_reg, | ||
412 | .bandwidth_update = &rs600_bandwidth_update, | ||
413 | .hpd_init = &rs600_hpd_init, | ||
414 | .hpd_fini = &rs600_hpd_fini, | ||
415 | .hpd_sense = &rs600_hpd_sense, | ||
416 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
417 | .ioctl_wait_idle = NULL, | ||
418 | }; | ||
419 | |||
420 | |||
421 | /* | 199 | /* |
422 | * rs690,rs740 | 200 | * rs690,rs740 |
423 | */ | 201 | */ |
@@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev); | |||
428 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 206 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
429 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 207 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
430 | void rs690_bandwidth_update(struct radeon_device *rdev); | 208 | void rs690_bandwidth_update(struct radeon_device *rdev); |
431 | static struct radeon_asic rs690_asic = { | ||
432 | .init = &rs690_init, | ||
433 | .fini = &rs690_fini, | ||
434 | .suspend = &rs690_suspend, | ||
435 | .resume = &rs690_resume, | ||
436 | .vga_set_state = &r100_vga_set_state, | ||
437 | .gpu_reset = &r300_gpu_reset, | ||
438 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
439 | .gart_set_page = &rs400_gart_set_page, | ||
440 | .cp_commit = &r100_cp_commit, | ||
441 | .ring_start = &r300_ring_start, | ||
442 | .ring_test = &r100_ring_test, | ||
443 | .ring_ib_execute = &r100_ring_ib_execute, | ||
444 | .irq_set = &rs600_irq_set, | ||
445 | .irq_process = &rs600_irq_process, | ||
446 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
447 | .fence_ring_emit = &r300_fence_ring_emit, | ||
448 | .cs_parse = &r300_cs_parse, | ||
449 | .copy_blit = &r100_copy_blit, | ||
450 | .copy_dma = &r200_copy_dma, | ||
451 | .copy = &r200_copy_dma, | ||
452 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
453 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
454 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
455 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
456 | .get_pcie_lanes = NULL, | ||
457 | .set_pcie_lanes = NULL, | ||
458 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
459 | .set_surface_reg = r100_set_surface_reg, | ||
460 | .clear_surface_reg = r100_clear_surface_reg, | ||
461 | .bandwidth_update = &rs690_bandwidth_update, | ||
462 | .hpd_init = &rs600_hpd_init, | ||
463 | .hpd_fini = &rs600_hpd_fini, | ||
464 | .hpd_sense = &rs600_hpd_sense, | ||
465 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
466 | .ioctl_wait_idle = NULL, | ||
467 | }; | ||
468 | |||
469 | 209 | ||
470 | /* | 210 | /* |
471 | * rv515 | 211 | * rv515 |
@@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
481 | void rv515_bandwidth_update(struct radeon_device *rdev); | 221 | void rv515_bandwidth_update(struct radeon_device *rdev); |
482 | int rv515_resume(struct radeon_device *rdev); | 222 | int rv515_resume(struct radeon_device *rdev); |
483 | int rv515_suspend(struct radeon_device *rdev); | 223 | int rv515_suspend(struct radeon_device *rdev); |
484 | static struct radeon_asic rv515_asic = { | ||
485 | .init = &rv515_init, | ||
486 | .fini = &rv515_fini, | ||
487 | .suspend = &rv515_suspend, | ||
488 | .resume = &rv515_resume, | ||
489 | .vga_set_state = &r100_vga_set_state, | ||
490 | .gpu_reset = &rv515_gpu_reset, | ||
491 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
492 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
493 | .cp_commit = &r100_cp_commit, | ||
494 | .ring_start = &rv515_ring_start, | ||
495 | .ring_test = &r100_ring_test, | ||
496 | .ring_ib_execute = &r100_ring_ib_execute, | ||
497 | .irq_set = &rs600_irq_set, | ||
498 | .irq_process = &rs600_irq_process, | ||
499 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
500 | .fence_ring_emit = &r300_fence_ring_emit, | ||
501 | .cs_parse = &r300_cs_parse, | ||
502 | .copy_blit = &r100_copy_blit, | ||
503 | .copy_dma = &r200_copy_dma, | ||
504 | .copy = &r100_copy_blit, | ||
505 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
506 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
507 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
508 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
509 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
510 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
511 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
512 | .set_surface_reg = r100_set_surface_reg, | ||
513 | .clear_surface_reg = r100_clear_surface_reg, | ||
514 | .bandwidth_update = &rv515_bandwidth_update, | ||
515 | .hpd_init = &rs600_hpd_init, | ||
516 | .hpd_fini = &rs600_hpd_fini, | ||
517 | .hpd_sense = &rs600_hpd_sense, | ||
518 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
519 | .ioctl_wait_idle = NULL, | ||
520 | }; | ||
521 | |||
522 | 224 | ||
523 | /* | 225 | /* |
524 | * r520,rv530,rv560,rv570,r580 | 226 | * r520,rv530,rv560,rv570,r580 |
525 | */ | 227 | */ |
526 | int r520_init(struct radeon_device *rdev); | 228 | int r520_init(struct radeon_device *rdev); |
527 | int r520_resume(struct radeon_device *rdev); | 229 | int r520_resume(struct radeon_device *rdev); |
528 | static struct radeon_asic r520_asic = { | ||
529 | .init = &r520_init, | ||
530 | .fini = &rv515_fini, | ||
531 | .suspend = &rv515_suspend, | ||
532 | .resume = &r520_resume, | ||
533 | .vga_set_state = &r100_vga_set_state, | ||
534 | .gpu_reset = &rv515_gpu_reset, | ||
535 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
536 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
537 | .cp_commit = &r100_cp_commit, | ||
538 | .ring_start = &rv515_ring_start, | ||
539 | .ring_test = &r100_ring_test, | ||
540 | .ring_ib_execute = &r100_ring_ib_execute, | ||
541 | .irq_set = &rs600_irq_set, | ||
542 | .irq_process = &rs600_irq_process, | ||
543 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
544 | .fence_ring_emit = &r300_fence_ring_emit, | ||
545 | .cs_parse = &r300_cs_parse, | ||
546 | .copy_blit = &r100_copy_blit, | ||
547 | .copy_dma = &r200_copy_dma, | ||
548 | .copy = &r100_copy_blit, | ||
549 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
550 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
551 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
552 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
553 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
554 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
555 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
556 | .set_surface_reg = r100_set_surface_reg, | ||
557 | .clear_surface_reg = r100_clear_surface_reg, | ||
558 | .bandwidth_update = &rv515_bandwidth_update, | ||
559 | .hpd_init = &rs600_hpd_init, | ||
560 | .hpd_fini = &rs600_hpd_fini, | ||
561 | .hpd_sense = &rs600_hpd_sense, | ||
562 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
563 | .ioctl_wait_idle = NULL, | ||
564 | }; | ||
565 | 230 | ||
566 | /* | 231 | /* |
567 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 | 232 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 |
@@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev); | |||
591 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 256 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
592 | uint32_t tiling_flags, uint32_t pitch, | 257 | uint32_t tiling_flags, uint32_t pitch, |
593 | uint32_t offset, uint32_t obj_size); | 258 | uint32_t offset, uint32_t obj_size); |
594 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 259 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
595 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 260 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
596 | int r600_ring_test(struct radeon_device *rdev); | 261 | int r600_ring_test(struct radeon_device *rdev); |
597 | int r600_copy_blit(struct radeon_device *rdev, | 262 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev, | |||
604 | enum radeon_hpd_id hpd); | 269 | enum radeon_hpd_id hpd); |
605 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | 270 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); |
606 | 271 | ||
607 | static struct radeon_asic r600_asic = { | ||
608 | .init = &r600_init, | ||
609 | .fini = &r600_fini, | ||
610 | .suspend = &r600_suspend, | ||
611 | .resume = &r600_resume, | ||
612 | .cp_commit = &r600_cp_commit, | ||
613 | .vga_set_state = &r600_vga_set_state, | ||
614 | .gpu_reset = &r600_gpu_reset, | ||
615 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
616 | .gart_set_page = &rs600_gart_set_page, | ||
617 | .ring_test = &r600_ring_test, | ||
618 | .ring_ib_execute = &r600_ring_ib_execute, | ||
619 | .irq_set = &r600_irq_set, | ||
620 | .irq_process = &r600_irq_process, | ||
621 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
622 | .fence_ring_emit = &r600_fence_ring_emit, | ||
623 | .cs_parse = &r600_cs_parse, | ||
624 | .copy_blit = &r600_copy_blit, | ||
625 | .copy_dma = &r600_copy_blit, | ||
626 | .copy = &r600_copy_blit, | ||
627 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
628 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
629 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
630 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
631 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
632 | .set_pcie_lanes = NULL, | ||
633 | .set_clock_gating = NULL, | ||
634 | .set_surface_reg = r600_set_surface_reg, | ||
635 | .clear_surface_reg = r600_clear_surface_reg, | ||
636 | .bandwidth_update = &rv515_bandwidth_update, | ||
637 | .hpd_init = &r600_hpd_init, | ||
638 | .hpd_fini = &r600_hpd_fini, | ||
639 | .hpd_sense = &r600_hpd_sense, | ||
640 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
641 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
642 | }; | ||
643 | |||
644 | /* | 272 | /* |
645 | * rv770,rv730,rv710,rv740 | 273 | * rv770,rv730,rv710,rv740 |
646 | */ | 274 | */ |
@@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev); | |||
650 | int rv770_resume(struct radeon_device *rdev); | 278 | int rv770_resume(struct radeon_device *rdev); |
651 | int rv770_gpu_reset(struct radeon_device *rdev); | 279 | int rv770_gpu_reset(struct radeon_device *rdev); |
652 | 280 | ||
653 | static struct radeon_asic rv770_asic = { | ||
654 | .init = &rv770_init, | ||
655 | .fini = &rv770_fini, | ||
656 | .suspend = &rv770_suspend, | ||
657 | .resume = &rv770_resume, | ||
658 | .cp_commit = &r600_cp_commit, | ||
659 | .gpu_reset = &rv770_gpu_reset, | ||
660 | .vga_set_state = &r600_vga_set_state, | ||
661 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
662 | .gart_set_page = &rs600_gart_set_page, | ||
663 | .ring_test = &r600_ring_test, | ||
664 | .ring_ib_execute = &r600_ring_ib_execute, | ||
665 | .irq_set = &r600_irq_set, | ||
666 | .irq_process = &r600_irq_process, | ||
667 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
668 | .fence_ring_emit = &r600_fence_ring_emit, | ||
669 | .cs_parse = &r600_cs_parse, | ||
670 | .copy_blit = &r600_copy_blit, | ||
671 | .copy_dma = &r600_copy_blit, | ||
672 | .copy = &r600_copy_blit, | ||
673 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
674 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
675 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
676 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
677 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
678 | .set_pcie_lanes = NULL, | ||
679 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
680 | .set_surface_reg = r600_set_surface_reg, | ||
681 | .clear_surface_reg = r600_clear_surface_reg, | ||
682 | .bandwidth_update = &rv515_bandwidth_update, | ||
683 | .hpd_init = &r600_hpd_init, | ||
684 | .hpd_fini = &r600_hpd_fini, | ||
685 | .hpd_sense = &r600_hpd_sense, | ||
686 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
687 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
688 | }; | ||
689 | |||
690 | /* | 281 | /* |
691 | * evergreen | 282 | * evergreen |
692 | */ | 283 | */ |
@@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev); | |||
701 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 292 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
702 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, | 293 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, |
703 | enum radeon_hpd_id hpd); | 294 | enum radeon_hpd_id hpd); |
704 | |||
705 | static struct radeon_asic evergreen_asic = { | ||
706 | .init = &evergreen_init, | ||
707 | .fini = &evergreen_fini, | ||
708 | .suspend = &evergreen_suspend, | ||
709 | .resume = &evergreen_resume, | ||
710 | .cp_commit = NULL, | ||
711 | .gpu_reset = &evergreen_gpu_reset, | ||
712 | .vga_set_state = &r600_vga_set_state, | ||
713 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
714 | .gart_set_page = &rs600_gart_set_page, | ||
715 | .ring_test = NULL, | ||
716 | .ring_ib_execute = NULL, | ||
717 | .irq_set = NULL, | ||
718 | .irq_process = NULL, | ||
719 | .get_vblank_counter = NULL, | ||
720 | .fence_ring_emit = NULL, | ||
721 | .cs_parse = NULL, | ||
722 | .copy_blit = NULL, | ||
723 | .copy_dma = NULL, | ||
724 | .copy = NULL, | ||
725 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
726 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
727 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
728 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
729 | .set_pcie_lanes = NULL, | ||
730 | .set_clock_gating = NULL, | ||
731 | .set_surface_reg = r600_set_surface_reg, | ||
732 | .clear_surface_reg = r600_clear_surface_reg, | ||
733 | .bandwidth_update = &evergreen_bandwidth_update, | ||
734 | .hpd_init = &evergreen_hpd_init, | ||
735 | .hpd_fini = &evergreen_hpd_fini, | ||
736 | .hpd_sense = &evergreen_hpd_sense, | ||
737 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
738 | }; | ||
739 | |||
740 | #endif | 295 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 93783b15c81d..1fff95505cf5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -75,46 +75,45 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); | 75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); |
76 | i2c.valid = false; | 76 | i2c.valid = false; |
77 | 77 | ||
78 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 78 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
79 | 79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | |
80 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | 80 | |
81 | 81 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | |
82 | 82 | gpio = &i2c_info->asGPIO_Info[i]; | |
83 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 83 | |
84 | gpio = &i2c_info->asGPIO_Info[i]; | 84 | if (gpio->sucI2cId.ucAccess == id) { |
85 | 85 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | |
86 | if (gpio->sucI2cId.ucAccess == id) { | 86 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
87 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 87 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
88 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 88 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; |
89 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 89 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; |
90 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; | 90 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; |
91 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; | 91 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; |
92 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; | 92 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; |
93 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; | 93 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); |
94 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; | 94 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); |
95 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | 95 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); |
96 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | 96 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); |
97 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | 97 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); |
98 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | 98 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); |
99 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | 99 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); |
100 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | 100 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); |
101 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | 101 | |
102 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | 102 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) |
103 | 103 | i2c.hw_capable = true; | |
104 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | 104 | else |
105 | i2c.hw_capable = true; | 105 | i2c.hw_capable = false; |
106 | else | 106 | |
107 | i2c.hw_capable = false; | 107 | if (gpio->sucI2cId.ucAccess == 0xa0) |
108 | 108 | i2c.mm_i2c = true; | |
109 | if (gpio->sucI2cId.ucAccess == 0xa0) | 109 | else |
110 | i2c.mm_i2c = true; | 110 | i2c.mm_i2c = false; |
111 | else | 111 | |
112 | i2c.mm_i2c = false; | 112 | i2c.i2c_id = gpio->sucI2cId.ucAccess; |
113 | 113 | ||
114 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | 114 | i2c.valid = true; |
115 | 115 | break; | |
116 | i2c.valid = true; | 116 | } |
117 | break; | ||
118 | } | 117 | } |
119 | } | 118 | } |
120 | 119 | ||
@@ -135,20 +134,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd | |||
135 | memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); | 134 | memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); |
136 | gpio.valid = false; | 135 | gpio.valid = false; |
137 | 136 | ||
138 | atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); | 137 | if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { |
138 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | ||
139 | 139 | ||
140 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | 140 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
141 | sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | ||
141 | 142 | ||
142 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | 143 | for (i = 0; i < num_indices; i++) { |
143 | 144 | pin = &gpio_info->asGPIO_Pin[i]; | |
144 | for (i = 0; i < num_indices; i++) { | 145 | if (id == pin->ucGPIO_ID) { |
145 | pin = &gpio_info->asGPIO_Pin[i]; | 146 | gpio.id = pin->ucGPIO_ID; |
146 | if (id == pin->ucGPIO_ID) { | 147 | gpio.reg = pin->usGpioPin_AIndex * 4; |
147 | gpio.id = pin->ucGPIO_ID; | 148 | gpio.mask = (1 << pin->ucGpioPinBitShift); |
148 | gpio.reg = pin->usGpioPin_AIndex * 4; | 149 | gpio.valid = true; |
149 | gpio.mask = (1 << pin->ucGpioPinBitShift); | 150 | break; |
150 | gpio.valid = true; | 151 | } |
151 | break; | ||
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
@@ -264,6 +264,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
264 | if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || | 264 | if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || |
265 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | 265 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) |
266 | return false; | 266 | return false; |
267 | if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) | ||
268 | *line_mux = 0x90; | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ | 271 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
@@ -395,9 +397,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
395 | struct radeon_gpio_rec gpio; | 397 | struct radeon_gpio_rec gpio; |
396 | struct radeon_hpd hpd; | 398 | struct radeon_hpd hpd; |
397 | 399 | ||
398 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 400 | if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) |
399 | |||
400 | if (data_offset == 0) | ||
401 | return false; | 401 | return false; |
402 | 402 | ||
403 | if (crev < 2) | 403 | if (crev < 2) |
@@ -449,37 +449,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
449 | GetIndexIntoMasterTable(DATA, | 449 | GetIndexIntoMasterTable(DATA, |
450 | IntegratedSystemInfo); | 450 | IntegratedSystemInfo); |
451 | 451 | ||
452 | atom_parse_data_header(ctx, index, &size, &frev, | 452 | if (atom_parse_data_header(ctx, index, &size, &frev, |
453 | &crev, &igp_offset); | 453 | &crev, &igp_offset)) { |
454 | 454 | ||
455 | if (crev >= 2) { | 455 | if (crev >= 2) { |
456 | igp_obj = | 456 | igp_obj = |
457 | (ATOM_INTEGRATED_SYSTEM_INFO_V2 | 457 | (ATOM_INTEGRATED_SYSTEM_INFO_V2 |
458 | *) (ctx->bios + igp_offset); | 458 | *) (ctx->bios + igp_offset); |
459 | 459 | ||
460 | if (igp_obj) { | 460 | if (igp_obj) { |
461 | uint32_t slot_config, ct; | 461 | uint32_t slot_config, ct; |
462 | 462 | ||
463 | if (con_obj_num == 1) | 463 | if (con_obj_num == 1) |
464 | slot_config = | 464 | slot_config = |
465 | igp_obj-> | 465 | igp_obj-> |
466 | ulDDISlot1Config; | 466 | ulDDISlot1Config; |
467 | else | 467 | else |
468 | slot_config = | 468 | slot_config = |
469 | igp_obj-> | 469 | igp_obj-> |
470 | ulDDISlot2Config; | 470 | ulDDISlot2Config; |
471 | 471 | ||
472 | ct = (slot_config >> 16) & 0xff; | 472 | ct = (slot_config >> 16) & 0xff; |
473 | connector_type = | 473 | connector_type = |
474 | object_connector_convert | 474 | object_connector_convert |
475 | [ct]; | 475 | [ct]; |
476 | connector_object_id = ct; | 476 | connector_object_id = ct; |
477 | igp_lane_info = | 477 | igp_lane_info = |
478 | slot_config & 0xffff; | 478 | slot_config & 0xffff; |
479 | } else | ||
480 | continue; | ||
479 | } else | 481 | } else |
480 | continue; | 482 | continue; |
481 | } else | 483 | } else { |
482 | continue; | 484 | igp_lane_info = 0; |
485 | connector_type = | ||
486 | object_connector_convert[con_obj_id]; | ||
487 | connector_object_id = con_obj_id; | ||
488 | } | ||
483 | } else { | 489 | } else { |
484 | igp_lane_info = 0; | 490 | igp_lane_info = 0; |
485 | connector_type = | 491 | connector_type = |
@@ -627,20 +633,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev, | |||
627 | uint8_t frev, crev; | 633 | uint8_t frev, crev; |
628 | ATOM_XTMDS_INFO *xtmds; | 634 | ATOM_XTMDS_INFO *xtmds; |
629 | 635 | ||
630 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 636 | if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { |
631 | xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); | 637 | xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); |
632 | 638 | ||
633 | if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { | 639 | if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { |
634 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | 640 | if (connector_type == DRM_MODE_CONNECTOR_DVII) |
635 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; | 641 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; |
636 | else | 642 | else |
637 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; | 643 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; |
638 | } else { | 644 | } else { |
639 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | 645 | if (connector_type == DRM_MODE_CONNECTOR_DVII) |
640 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | 646 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; |
641 | else | 647 | else |
642 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; | 648 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; |
643 | } | 649 | } |
650 | } else | ||
651 | return supported_devices_connector_object_id_convert | ||
652 | [connector_type]; | ||
644 | } else { | 653 | } else { |
645 | return supported_devices_connector_object_id_convert | 654 | return supported_devices_connector_object_id_convert |
646 | [connector_type]; | 655 | [connector_type]; |
@@ -672,7 +681,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
672 | int i, j, max_device; | 681 | int i, j, max_device; |
673 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; | 682 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; |
674 | 683 | ||
675 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 684 | if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) |
685 | return false; | ||
676 | 686 | ||
677 | supported_devices = | 687 | supported_devices = |
678 | (union atom_supported_devices *)(ctx->bios + data_offset); | 688 | (union atom_supported_devices *)(ctx->bios + data_offset); |
@@ -865,14 +875,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
865 | struct radeon_pll *mpll = &rdev->clock.mpll; | 875 | struct radeon_pll *mpll = &rdev->clock.mpll; |
866 | uint16_t data_offset; | 876 | uint16_t data_offset; |
867 | 877 | ||
868 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 878 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
869 | &crev, &data_offset); | 879 | &frev, &crev, &data_offset)) { |
870 | 880 | firmware_info = | |
871 | firmware_info = | 881 | (union firmware_info *)(mode_info->atom_context->bios + |
872 | (union firmware_info *)(mode_info->atom_context->bios + | 882 | data_offset); |
873 | data_offset); | ||
874 | |||
875 | if (firmware_info) { | ||
876 | /* pixel clocks */ | 883 | /* pixel clocks */ |
877 | p1pll->reference_freq = | 884 | p1pll->reference_freq = |
878 | le16_to_cpu(firmware_info->info.usReferenceClock); | 885 | le16_to_cpu(firmware_info->info.usReferenceClock); |
@@ -887,6 +894,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
887 | p1pll->pll_out_max = | 894 | p1pll->pll_out_max = |
888 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); | 895 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); |
889 | 896 | ||
897 | if (crev >= 4) { | ||
898 | p1pll->lcd_pll_out_min = | ||
899 | le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; | ||
900 | if (p1pll->lcd_pll_out_min == 0) | ||
901 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
902 | p1pll->lcd_pll_out_max = | ||
903 | le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; | ||
904 | if (p1pll->lcd_pll_out_max == 0) | ||
905 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
906 | } else { | ||
907 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
908 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
909 | } | ||
910 | |||
890 | if (p1pll->pll_out_min == 0) { | 911 | if (p1pll->pll_out_min == 0) { |
891 | if (ASIC_IS_AVIVO(rdev)) | 912 | if (ASIC_IS_AVIVO(rdev)) |
892 | p1pll->pll_out_min = 64800; | 913 | p1pll->pll_out_min = 64800; |
@@ -992,13 +1013,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
992 | u8 frev, crev; | 1013 | u8 frev, crev; |
993 | u16 data_offset; | 1014 | u16 data_offset; |
994 | 1015 | ||
995 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1016 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
996 | &crev, &data_offset); | 1017 | &frev, &crev, &data_offset)) { |
997 | 1018 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | |
998 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
999 | data_offset); | 1019 | data_offset); |
1000 | |||
1001 | if (igp_info) { | ||
1002 | switch (crev) { | 1020 | switch (crev) { |
1003 | case 1: | 1021 | case 1: |
1004 | if (igp_info->info.ucMemoryType & 0xf0) | 1022 | if (igp_info->info.ucMemoryType & 0xf0) |
@@ -1029,14 +1047,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | |||
1029 | uint16_t maxfreq; | 1047 | uint16_t maxfreq; |
1030 | int i; | 1048 | int i; |
1031 | 1049 | ||
1032 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1050 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1033 | &crev, &data_offset); | 1051 | &frev, &crev, &data_offset)) { |
1034 | 1052 | tmds_info = | |
1035 | tmds_info = | 1053 | (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + |
1036 | (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + | 1054 | data_offset); |
1037 | data_offset); | ||
1038 | 1055 | ||
1039 | if (tmds_info) { | ||
1040 | maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); | 1056 | maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); |
1041 | for (i = 0; i < 4; i++) { | 1057 | for (i = 0; i < 4; i++) { |
1042 | tmds->tmds_pll[i].freq = | 1058 | tmds->tmds_pll[i].freq = |
@@ -1085,13 +1101,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1085 | if (id > ATOM_MAX_SS_ENTRY) | 1101 | if (id > ATOM_MAX_SS_ENTRY) |
1086 | return NULL; | 1102 | return NULL; |
1087 | 1103 | ||
1088 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1104 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1089 | &crev, &data_offset); | 1105 | &frev, &crev, &data_offset)) { |
1106 | ss_info = | ||
1107 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | ||
1090 | 1108 | ||
1091 | ss_info = | ||
1092 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | ||
1093 | |||
1094 | if (ss_info) { | ||
1095 | ss = | 1109 | ss = |
1096 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); | 1110 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); |
1097 | 1111 | ||
@@ -1114,30 +1128,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1114 | return ss; | 1128 | return ss; |
1115 | } | 1129 | } |
1116 | 1130 | ||
1117 | static void radeon_atom_apply_lvds_quirks(struct drm_device *dev, | ||
1118 | struct radeon_encoder_atom_dig *lvds) | ||
1119 | { | ||
1120 | |||
1121 | /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */ | ||
1122 | if ((dev->pdev->device == 0x95c4) && | ||
1123 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
1124 | (dev->pdev->subsystem_device == 0xff50)) { | ||
1125 | if ((lvds->native_mode.hdisplay == 1280) && | ||
1126 | (lvds->native_mode.vdisplay == 800)) | ||
1127 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1128 | } | ||
1129 | |||
1130 | /* Dell Studio 15 laptop panel doesn't like new pll divider algo */ | ||
1131 | if ((dev->pdev->device == 0x95c4) && | ||
1132 | (dev->pdev->subsystem_vendor == 0x1028) && | ||
1133 | (dev->pdev->subsystem_device == 0x029f)) { | ||
1134 | if ((lvds->native_mode.hdisplay == 1280) && | ||
1135 | (lvds->native_mode.vdisplay == 800)) | ||
1136 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1137 | } | ||
1138 | |||
1139 | } | ||
1140 | |||
1141 | union lvds_info { | 1131 | union lvds_info { |
1142 | struct _ATOM_LVDS_INFO info; | 1132 | struct _ATOM_LVDS_INFO info; |
1143 | struct _ATOM_LVDS_INFO_V12 info_12; | 1133 | struct _ATOM_LVDS_INFO_V12 info_12; |
@@ -1156,13 +1146,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1156 | uint8_t frev, crev; | 1146 | uint8_t frev, crev; |
1157 | struct radeon_encoder_atom_dig *lvds = NULL; | 1147 | struct radeon_encoder_atom_dig *lvds = NULL; |
1158 | 1148 | ||
1159 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1149 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1160 | &crev, &data_offset); | 1150 | &frev, &crev, &data_offset)) { |
1161 | 1151 | lvds_info = | |
1162 | lvds_info = | 1152 | (union lvds_info *)(mode_info->atom_context->bios + data_offset); |
1163 | (union lvds_info *)(mode_info->atom_context->bios + data_offset); | ||
1164 | |||
1165 | if (lvds_info) { | ||
1166 | lvds = | 1153 | lvds = |
1167 | kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | 1154 | kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); |
1168 | 1155 | ||
@@ -1220,9 +1207,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1220 | lvds->pll_algo = PLL_ALGO_LEGACY; | 1207 | lvds->pll_algo = PLL_ALGO_LEGACY; |
1221 | } | 1208 | } |
1222 | 1209 | ||
1223 | /* LVDS quirks */ | ||
1224 | radeon_atom_apply_lvds_quirks(dev, lvds); | ||
1225 | |||
1226 | encoder->native_mode = lvds->native_mode; | 1210 | encoder->native_mode = lvds->native_mode; |
1227 | } | 1211 | } |
1228 | return lvds; | 1212 | return lvds; |
@@ -1241,11 +1225,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) | |||
1241 | uint8_t bg, dac; | 1225 | uint8_t bg, dac; |
1242 | struct radeon_encoder_primary_dac *p_dac = NULL; | 1226 | struct radeon_encoder_primary_dac *p_dac = NULL; |
1243 | 1227 | ||
1244 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1228 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1245 | 1229 | &frev, &crev, &data_offset)) { | |
1246 | dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); | 1230 | dac_info = (struct _COMPASSIONATE_DATA *) |
1231 | (mode_info->atom_context->bios + data_offset); | ||
1247 | 1232 | ||
1248 | if (dac_info) { | ||
1249 | p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); | 1233 | p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); |
1250 | 1234 | ||
1251 | if (!p_dac) | 1235 | if (!p_dac) |
@@ -1270,7 +1254,9 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
1270 | u8 frev, crev; | 1254 | u8 frev, crev; |
1271 | u16 data_offset, misc; | 1255 | u16 data_offset, misc; |
1272 | 1256 | ||
1273 | atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); | 1257 | if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, |
1258 | &frev, &crev, &data_offset)) | ||
1259 | return false; | ||
1274 | 1260 | ||
1275 | switch (crev) { | 1261 | switch (crev) { |
1276 | case 1: | 1262 | case 1: |
@@ -1362,47 +1348,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev) | |||
1362 | struct _ATOM_ANALOG_TV_INFO *tv_info; | 1348 | struct _ATOM_ANALOG_TV_INFO *tv_info; |
1363 | enum radeon_tv_std tv_std = TV_STD_NTSC; | 1349 | enum radeon_tv_std tv_std = TV_STD_NTSC; |
1364 | 1350 | ||
1365 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1351 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1352 | &frev, &crev, &data_offset)) { | ||
1366 | 1353 | ||
1367 | tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); | 1354 | tv_info = (struct _ATOM_ANALOG_TV_INFO *) |
1355 | (mode_info->atom_context->bios + data_offset); | ||
1368 | 1356 | ||
1369 | switch (tv_info->ucTV_BootUpDefaultStandard) { | 1357 | switch (tv_info->ucTV_BootUpDefaultStandard) { |
1370 | case ATOM_TV_NTSC: | 1358 | case ATOM_TV_NTSC: |
1371 | tv_std = TV_STD_NTSC; | 1359 | tv_std = TV_STD_NTSC; |
1372 | DRM_INFO("Default TV standard: NTSC\n"); | 1360 | DRM_INFO("Default TV standard: NTSC\n"); |
1373 | break; | 1361 | break; |
1374 | case ATOM_TV_NTSCJ: | 1362 | case ATOM_TV_NTSCJ: |
1375 | tv_std = TV_STD_NTSC_J; | 1363 | tv_std = TV_STD_NTSC_J; |
1376 | DRM_INFO("Default TV standard: NTSC-J\n"); | 1364 | DRM_INFO("Default TV standard: NTSC-J\n"); |
1377 | break; | 1365 | break; |
1378 | case ATOM_TV_PAL: | 1366 | case ATOM_TV_PAL: |
1379 | tv_std = TV_STD_PAL; | 1367 | tv_std = TV_STD_PAL; |
1380 | DRM_INFO("Default TV standard: PAL\n"); | 1368 | DRM_INFO("Default TV standard: PAL\n"); |
1381 | break; | 1369 | break; |
1382 | case ATOM_TV_PALM: | 1370 | case ATOM_TV_PALM: |
1383 | tv_std = TV_STD_PAL_M; | 1371 | tv_std = TV_STD_PAL_M; |
1384 | DRM_INFO("Default TV standard: PAL-M\n"); | 1372 | DRM_INFO("Default TV standard: PAL-M\n"); |
1385 | break; | 1373 | break; |
1386 | case ATOM_TV_PALN: | 1374 | case ATOM_TV_PALN: |
1387 | tv_std = TV_STD_PAL_N; | 1375 | tv_std = TV_STD_PAL_N; |
1388 | DRM_INFO("Default TV standard: PAL-N\n"); | 1376 | DRM_INFO("Default TV standard: PAL-N\n"); |
1389 | break; | 1377 | break; |
1390 | case ATOM_TV_PALCN: | 1378 | case ATOM_TV_PALCN: |
1391 | tv_std = TV_STD_PAL_CN; | 1379 | tv_std = TV_STD_PAL_CN; |
1392 | DRM_INFO("Default TV standard: PAL-CN\n"); | 1380 | DRM_INFO("Default TV standard: PAL-CN\n"); |
1393 | break; | 1381 | break; |
1394 | case ATOM_TV_PAL60: | 1382 | case ATOM_TV_PAL60: |
1395 | tv_std = TV_STD_PAL_60; | 1383 | tv_std = TV_STD_PAL_60; |
1396 | DRM_INFO("Default TV standard: PAL-60\n"); | 1384 | DRM_INFO("Default TV standard: PAL-60\n"); |
1397 | break; | 1385 | break; |
1398 | case ATOM_TV_SECAM: | 1386 | case ATOM_TV_SECAM: |
1399 | tv_std = TV_STD_SECAM; | 1387 | tv_std = TV_STD_SECAM; |
1400 | DRM_INFO("Default TV standard: SECAM\n"); | 1388 | DRM_INFO("Default TV standard: SECAM\n"); |
1401 | break; | 1389 | break; |
1402 | default: | 1390 | default: |
1403 | tv_std = TV_STD_NTSC; | 1391 | tv_std = TV_STD_NTSC; |
1404 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); | 1392 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); |
1405 | break; | 1393 | break; |
1394 | } | ||
1406 | } | 1395 | } |
1407 | return tv_std; | 1396 | return tv_std; |
1408 | } | 1397 | } |
@@ -1420,11 +1409,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
1420 | uint8_t bg, dac; | 1409 | uint8_t bg, dac; |
1421 | struct radeon_encoder_tv_dac *tv_dac = NULL; | 1410 | struct radeon_encoder_tv_dac *tv_dac = NULL; |
1422 | 1411 | ||
1423 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1412 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1413 | &frev, &crev, &data_offset)) { | ||
1424 | 1414 | ||
1425 | dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); | 1415 | dac_info = (struct _COMPASSIONATE_DATA *) |
1416 | (mode_info->atom_context->bios + data_offset); | ||
1426 | 1417 | ||
1427 | if (dac_info) { | ||
1428 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); | 1418 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); |
1429 | 1419 | ||
1430 | if (!tv_dac) | 1420 | if (!tv_dac) |
@@ -1447,6 +1437,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
1447 | return tv_dac; | 1437 | return tv_dac; |
1448 | } | 1438 | } |
1449 | 1439 | ||
1440 | static const char *thermal_controller_names[] = { | ||
1441 | "NONE", | ||
1442 | "LM63", | ||
1443 | "ADM1032", | ||
1444 | "ADM1030", | ||
1445 | "MUA6649", | ||
1446 | "LM64", | ||
1447 | "F75375", | ||
1448 | "ASC7512", | ||
1449 | }; | ||
1450 | |||
1451 | static const char *pp_lib_thermal_controller_names[] = { | ||
1452 | "NONE", | ||
1453 | "LM63", | ||
1454 | "ADM1032", | ||
1455 | "ADM1030", | ||
1456 | "MUA6649", | ||
1457 | "LM64", | ||
1458 | "F75375", | ||
1459 | "RV6xx", | ||
1460 | "RV770", | ||
1461 | "ADT7473", | ||
1462 | }; | ||
1463 | |||
1450 | union power_info { | 1464 | union power_info { |
1451 | struct _ATOM_POWERPLAY_INFO info; | 1465 | struct _ATOM_POWERPLAY_INFO info; |
1452 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 1466 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
@@ -1466,15 +1480,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
1466 | struct _ATOM_PPLIB_STATE *power_state; | 1480 | struct _ATOM_PPLIB_STATE *power_state; |
1467 | int num_modes = 0, i, j; | 1481 | int num_modes = 0, i, j; |
1468 | int state_index = 0, mode_index = 0; | 1482 | int state_index = 0, mode_index = 0; |
1469 | 1483 | struct radeon_i2c_bus_rec i2c_bus; | |
1470 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | ||
1471 | |||
1472 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
1473 | 1484 | ||
1474 | rdev->pm.default_power_state = NULL; | 1485 | rdev->pm.default_power_state = NULL; |
1475 | 1486 | ||
1476 | if (power_info) { | 1487 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1488 | &frev, &crev, &data_offset)) { | ||
1489 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
1477 | if (frev < 4) { | 1490 | if (frev < 4) { |
1491 | /* add the i2c bus for thermal/fan chip */ | ||
1492 | if (power_info->info.ucOverdriveThermalController > 0) { | ||
1493 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | ||
1494 | thermal_controller_names[power_info->info.ucOverdriveThermalController], | ||
1495 | power_info->info.ucOverdriveControllerAddress >> 1); | ||
1496 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); | ||
1497 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | ||
1498 | } | ||
1478 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1499 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
1479 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1500 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
1480 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1501 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
@@ -1684,6 +1705,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
1684 | } | 1705 | } |
1685 | } | 1706 | } |
1686 | } else if (frev == 4) { | 1707 | } else if (frev == 4) { |
1708 | /* add the i2c bus for thermal/fan chip */ | ||
1709 | /* no support for internal controller yet */ | ||
1710 | if (power_info->info_4.sThermalController.ucType > 0) { | ||
1711 | if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || | ||
1712 | (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { | ||
1713 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
1714 | (power_info->info_4.sThermalController.ucFanParameters & | ||
1715 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1716 | } else { | ||
1717 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | ||
1718 | pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], | ||
1719 | power_info->info_4.sThermalController.ucI2cAddress >> 1, | ||
1720 | (power_info->info_4.sThermalController.ucFanParameters & | ||
1721 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1722 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); | ||
1723 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | ||
1724 | } | ||
1725 | } | ||
1687 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { | 1726 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { |
1688 | mode_index = 0; | 1727 | mode_index = 0; |
1689 | power_state = (struct _ATOM_PPLIB_STATE *) | 1728 | power_state = (struct _ATOM_PPLIB_STATE *) |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e9ea38ece375..2becdeda68a3 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
531 | case CHIP_RS300: | 531 | case CHIP_RS300: |
532 | switch (ddc_line) { | 532 | switch (ddc_line) { |
533 | case RADEON_GPIO_DVI_DDC: | 533 | case RADEON_GPIO_DVI_DDC: |
534 | /* in theory this should be hw capable, | 534 | i2c.hw_capable = true; |
535 | * but it doesn't seem to work | ||
536 | */ | ||
537 | i2c.hw_capable = false; | ||
538 | break; | 535 | break; |
539 | default: | 536 | default: |
540 | i2c.hw_capable = false; | 537 | i2c.hw_capable = false; |
@@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
633 | p1pll->reference_div = RBIOS16(pll_info + 0x10); | 630 | p1pll->reference_div = RBIOS16(pll_info + 0x10); |
634 | p1pll->pll_out_min = RBIOS32(pll_info + 0x12); | 631 | p1pll->pll_out_min = RBIOS32(pll_info + 0x12); |
635 | p1pll->pll_out_max = RBIOS32(pll_info + 0x16); | 632 | p1pll->pll_out_max = RBIOS32(pll_info + 0x16); |
633 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
634 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
636 | 635 | ||
637 | if (rev > 9) { | 636 | if (rev > 9) { |
638 | p1pll->pll_in_min = RBIOS32(pll_info + 0x36); | 637 | p1pll->pll_in_min = RBIOS32(pll_info + 0x36); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ee0083f982d8..60d59816b94f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -940,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) | |||
940 | if (radeon_connector->edid) | 940 | if (radeon_connector->edid) |
941 | kfree(radeon_connector->edid); | 941 | kfree(radeon_connector->edid); |
942 | if (radeon_dig_connector->dp_i2c_bus) | 942 | if (radeon_dig_connector->dp_i2c_bus) |
943 | radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); | 943 | radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); |
944 | kfree(radeon_connector->con_priv); | 944 | kfree(radeon_connector->con_priv); |
945 | drm_sysfs_connector_remove(connector); | 945 | drm_sysfs_connector_remove(connector); |
946 | drm_connector_cleanup(connector); | 946 | drm_connector_cleanup(connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 70ba02ed7723..f9b0fe002c0a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
194 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | radeon_bo_list_unreserve(&parser->validated); |
196 | for (i = 0; i < parser->nrelocs; i++) { | 196 | if (parser->relocs != NULL) { |
197 | if (parser->relocs[i].gobj) | 197 | for (i = 0; i < parser->nrelocs; i++) { |
198 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); | 198 | if (parser->relocs[i].gobj) |
199 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); | ||
200 | } | ||
199 | } | 201 | } |
200 | kfree(parser->track); | 202 | kfree(parser->track); |
201 | kfree(parser->relocs); | 203 | kfree(parser->relocs); |
@@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
243 | } | 245 | } |
244 | r = radeon_cs_parser_relocs(&parser); | 246 | r = radeon_cs_parser_relocs(&parser); |
245 | if (r) { | 247 | if (r) { |
246 | DRM_ERROR("Failed to parse relocation !\n"); | 248 | if (r != -ERESTARTSYS) |
249 | DRM_ERROR("Failed to parse relocation %d!\n", r); | ||
247 | radeon_cs_parser_fini(&parser, r); | 250 | radeon_cs_parser_fini(&parser, r); |
248 | mutex_unlock(&rdev->cs_mutex); | 251 | mutex_unlock(&rdev->cs_mutex); |
249 | return r; | 252 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e28e4ed5f720..60ec47b71642 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
34 | #include "radeon_reg.h" | 34 | #include "radeon_reg.h" |
35 | #include "radeon.h" | 35 | #include "radeon.h" |
36 | #include "radeon_asic.h" | ||
37 | #include "atom.h" | 36 | #include "atom.h" |
38 | 37 | ||
39 | /* | 38 | /* |
@@ -242,6 +241,36 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
242 | 241 | ||
243 | } | 242 | } |
244 | 243 | ||
244 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | ||
245 | { | ||
246 | fixed20_12 a; | ||
247 | u32 sclk, mclk; | ||
248 | |||
249 | if (rdev->flags & RADEON_IS_IGP) { | ||
250 | sclk = radeon_get_engine_clock(rdev); | ||
251 | mclk = rdev->clock.default_mclk; | ||
252 | |||
253 | a.full = rfixed_const(100); | ||
254 | rdev->pm.sclk.full = rfixed_const(sclk); | ||
255 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
256 | rdev->pm.mclk.full = rfixed_const(mclk); | ||
257 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | ||
258 | |||
259 | a.full = rfixed_const(16); | ||
260 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
261 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
262 | } else { | ||
263 | sclk = radeon_get_engine_clock(rdev); | ||
264 | mclk = radeon_get_memory_clock(rdev); | ||
265 | |||
266 | a.full = rfixed_const(100); | ||
267 | rdev->pm.sclk.full = rfixed_const(sclk); | ||
268 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
269 | rdev->pm.mclk.full = rfixed_const(mclk); | ||
270 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | ||
271 | } | ||
272 | } | ||
273 | |||
245 | bool radeon_boot_test_post_card(struct radeon_device *rdev) | 274 | bool radeon_boot_test_post_card(struct radeon_device *rdev) |
246 | { | 275 | { |
247 | if (radeon_card_posted(rdev)) | 276 | if (radeon_card_posted(rdev)) |
@@ -288,181 +317,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev) | |||
288 | } | 317 | } |
289 | 318 | ||
290 | 319 | ||
291 | /* | ||
292 | * Registers accessors functions. | ||
293 | */ | ||
294 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) | ||
295 | { | ||
296 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | ||
297 | BUG_ON(1); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
302 | { | ||
303 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | ||
304 | reg, v); | ||
305 | BUG_ON(1); | ||
306 | } | ||
307 | |||
308 | void radeon_register_accessor_init(struct radeon_device *rdev) | ||
309 | { | ||
310 | rdev->mc_rreg = &radeon_invalid_rreg; | ||
311 | rdev->mc_wreg = &radeon_invalid_wreg; | ||
312 | rdev->pll_rreg = &radeon_invalid_rreg; | ||
313 | rdev->pll_wreg = &radeon_invalid_wreg; | ||
314 | rdev->pciep_rreg = &radeon_invalid_rreg; | ||
315 | rdev->pciep_wreg = &radeon_invalid_wreg; | ||
316 | |||
317 | /* Don't change order as we are overridding accessor. */ | ||
318 | if (rdev->family < CHIP_RV515) { | ||
319 | rdev->pcie_reg_mask = 0xff; | ||
320 | } else { | ||
321 | rdev->pcie_reg_mask = 0x7ff; | ||
322 | } | ||
323 | /* FIXME: not sure here */ | ||
324 | if (rdev->family <= CHIP_R580) { | ||
325 | rdev->pll_rreg = &r100_pll_rreg; | ||
326 | rdev->pll_wreg = &r100_pll_wreg; | ||
327 | } | ||
328 | if (rdev->family >= CHIP_R420) { | ||
329 | rdev->mc_rreg = &r420_mc_rreg; | ||
330 | rdev->mc_wreg = &r420_mc_wreg; | ||
331 | } | ||
332 | if (rdev->family >= CHIP_RV515) { | ||
333 | rdev->mc_rreg = &rv515_mc_rreg; | ||
334 | rdev->mc_wreg = &rv515_mc_wreg; | ||
335 | } | ||
336 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | ||
337 | rdev->mc_rreg = &rs400_mc_rreg; | ||
338 | rdev->mc_wreg = &rs400_mc_wreg; | ||
339 | } | ||
340 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | ||
341 | rdev->mc_rreg = &rs690_mc_rreg; | ||
342 | rdev->mc_wreg = &rs690_mc_wreg; | ||
343 | } | ||
344 | if (rdev->family == CHIP_RS600) { | ||
345 | rdev->mc_rreg = &rs600_mc_rreg; | ||
346 | rdev->mc_wreg = &rs600_mc_wreg; | ||
347 | } | ||
348 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | ||
349 | rdev->pciep_rreg = &r600_pciep_rreg; | ||
350 | rdev->pciep_wreg = &r600_pciep_wreg; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | |||
355 | /* | ||
356 | * ASIC | ||
357 | */ | ||
358 | int radeon_asic_init(struct radeon_device *rdev) | ||
359 | { | ||
360 | radeon_register_accessor_init(rdev); | ||
361 | switch (rdev->family) { | ||
362 | case CHIP_R100: | ||
363 | case CHIP_RV100: | ||
364 | case CHIP_RS100: | ||
365 | case CHIP_RV200: | ||
366 | case CHIP_RS200: | ||
367 | rdev->asic = &r100_asic; | ||
368 | break; | ||
369 | case CHIP_R200: | ||
370 | case CHIP_RV250: | ||
371 | case CHIP_RS300: | ||
372 | case CHIP_RV280: | ||
373 | rdev->asic = &r200_asic; | ||
374 | break; | ||
375 | case CHIP_R300: | ||
376 | case CHIP_R350: | ||
377 | case CHIP_RV350: | ||
378 | case CHIP_RV380: | ||
379 | if (rdev->flags & RADEON_IS_PCIE) | ||
380 | rdev->asic = &r300_asic_pcie; | ||
381 | else | ||
382 | rdev->asic = &r300_asic; | ||
383 | break; | ||
384 | case CHIP_R420: | ||
385 | case CHIP_R423: | ||
386 | case CHIP_RV410: | ||
387 | rdev->asic = &r420_asic; | ||
388 | break; | ||
389 | case CHIP_RS400: | ||
390 | case CHIP_RS480: | ||
391 | rdev->asic = &rs400_asic; | ||
392 | break; | ||
393 | case CHIP_RS600: | ||
394 | rdev->asic = &rs600_asic; | ||
395 | break; | ||
396 | case CHIP_RS690: | ||
397 | case CHIP_RS740: | ||
398 | rdev->asic = &rs690_asic; | ||
399 | break; | ||
400 | case CHIP_RV515: | ||
401 | rdev->asic = &rv515_asic; | ||
402 | break; | ||
403 | case CHIP_R520: | ||
404 | case CHIP_RV530: | ||
405 | case CHIP_RV560: | ||
406 | case CHIP_RV570: | ||
407 | case CHIP_R580: | ||
408 | rdev->asic = &r520_asic; | ||
409 | break; | ||
410 | case CHIP_R600: | ||
411 | case CHIP_RV610: | ||
412 | case CHIP_RV630: | ||
413 | case CHIP_RV620: | ||
414 | case CHIP_RV635: | ||
415 | case CHIP_RV670: | ||
416 | case CHIP_RS780: | ||
417 | case CHIP_RS880: | ||
418 | rdev->asic = &r600_asic; | ||
419 | break; | ||
420 | case CHIP_RV770: | ||
421 | case CHIP_RV730: | ||
422 | case CHIP_RV710: | ||
423 | case CHIP_RV740: | ||
424 | rdev->asic = &rv770_asic; | ||
425 | break; | ||
426 | case CHIP_CEDAR: | ||
427 | case CHIP_REDWOOD: | ||
428 | case CHIP_JUNIPER: | ||
429 | case CHIP_CYPRESS: | ||
430 | case CHIP_HEMLOCK: | ||
431 | rdev->asic = &evergreen_asic; | ||
432 | break; | ||
433 | default: | ||
434 | /* FIXME: not supported yet */ | ||
435 | return -EINVAL; | ||
436 | } | ||
437 | |||
438 | if (rdev->flags & RADEON_IS_IGP) { | ||
439 | rdev->asic->get_memory_clock = NULL; | ||
440 | rdev->asic->set_memory_clock = NULL; | ||
441 | } | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | |||
447 | /* | ||
448 | * Wrapper around modesetting bits. | ||
449 | */ | ||
450 | int radeon_clocks_init(struct radeon_device *rdev) | ||
451 | { | ||
452 | int r; | ||
453 | |||
454 | r = radeon_static_clocks_init(rdev->ddev); | ||
455 | if (r) { | ||
456 | return r; | ||
457 | } | ||
458 | DRM_INFO("Clocks initialized !\n"); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
463 | { | ||
464 | } | ||
465 | |||
466 | /* ATOM accessor methods */ | 320 | /* ATOM accessor methods */ |
467 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | 321 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) |
468 | { | 322 | { |
@@ -567,29 +421,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
567 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 421 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
568 | } | 422 | } |
569 | 423 | ||
570 | void radeon_agp_disable(struct radeon_device *rdev) | ||
571 | { | ||
572 | rdev->flags &= ~RADEON_IS_AGP; | ||
573 | if (rdev->family >= CHIP_R600) { | ||
574 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
575 | rdev->flags |= RADEON_IS_PCIE; | ||
576 | } else if (rdev->family >= CHIP_RV515 || | ||
577 | rdev->family == CHIP_RV380 || | ||
578 | rdev->family == CHIP_RV410 || | ||
579 | rdev->family == CHIP_R423) { | ||
580 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
581 | rdev->flags |= RADEON_IS_PCIE; | ||
582 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
583 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
584 | } else { | ||
585 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
586 | rdev->flags |= RADEON_IS_PCI; | ||
587 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
588 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
589 | } | ||
590 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
591 | } | ||
592 | |||
593 | void radeon_check_arguments(struct radeon_device *rdev) | 424 | void radeon_check_arguments(struct radeon_device *rdev) |
594 | { | 425 | { |
595 | /* vramlimit must be a power of two */ | 426 | /* vramlimit must be a power of two */ |
@@ -731,6 +562,14 @@ int radeon_device_init(struct radeon_device *rdev, | |||
731 | return r; | 562 | return r; |
732 | radeon_check_arguments(rdev); | 563 | radeon_check_arguments(rdev); |
733 | 564 | ||
565 | /* all of the newer IGP chips have an internal gart | ||
566 | * However some rs4xx report as AGP, so remove that here. | ||
567 | */ | ||
568 | if ((rdev->family >= CHIP_RS400) && | ||
569 | (rdev->flags & RADEON_IS_IGP)) { | ||
570 | rdev->flags &= ~RADEON_IS_AGP; | ||
571 | } | ||
572 | |||
734 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { | 573 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
735 | radeon_agp_disable(rdev); | 574 | radeon_agp_disable(rdev); |
736 | } | 575 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ba8d806dcf39..b8d672828246 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
368 | 368 | ||
369 | if (rdev->bios) { | 369 | if (rdev->bios) { |
370 | if (rdev->is_atom_bios) { | 370 | if (rdev->is_atom_bios) { |
371 | if (rdev->family >= CHIP_R600) | 371 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); |
372 | if (ret == false) | ||
372 | ret = radeon_get_atom_connector_info_from_object_table(dev); | 373 | ret = radeon_get_atom_connector_info_from_object_table(dev); |
373 | else | ||
374 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); | ||
375 | } else { | 374 | } else { |
376 | ret = radeon_get_legacy_connector_info_from_bios(dev); | 375 | ret = radeon_get_legacy_connector_info_from_bios(dev); |
377 | if (ret == false) | 376 | if (ret == false) |
@@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
469 | uint32_t best_error = 0xffffffff; | 468 | uint32_t best_error = 0xffffffff; |
470 | uint32_t best_vco_diff = 1; | 469 | uint32_t best_vco_diff = 1; |
471 | uint32_t post_div; | 470 | uint32_t post_div; |
471 | u32 pll_out_min, pll_out_max; | ||
472 | 472 | ||
473 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); | 473 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); |
474 | freq = freq * 1000; | 474 | freq = freq * 1000; |
475 | 475 | ||
476 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
477 | pll_out_min = pll->lcd_pll_out_min; | ||
478 | pll_out_max = pll->lcd_pll_out_max; | ||
479 | } else { | ||
480 | pll_out_min = pll->pll_out_min; | ||
481 | pll_out_max = pll->pll_out_max; | ||
482 | } | ||
483 | |||
476 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | 484 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
477 | min_ref_div = max_ref_div = pll->reference_div; | 485 | min_ref_div = max_ref_div = pll->reference_div; |
478 | else { | 486 | else { |
@@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
536 | tmp = (uint64_t)pll->reference_freq * feedback_div; | 544 | tmp = (uint64_t)pll->reference_freq * feedback_div; |
537 | vco = radeon_div(tmp, ref_div); | 545 | vco = radeon_div(tmp, ref_div); |
538 | 546 | ||
539 | if (vco < pll->pll_out_min) { | 547 | if (vco < pll_out_min) { |
540 | min_feed_div = feedback_div + 1; | 548 | min_feed_div = feedback_div + 1; |
541 | continue; | 549 | continue; |
542 | } else if (vco > pll->pll_out_max) { | 550 | } else if (vco > pll_out_max) { |
543 | max_feed_div = feedback_div; | 551 | max_feed_div = feedback_div; |
544 | continue; | 552 | continue; |
545 | } | 553 | } |
@@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
675 | { | 683 | { |
676 | fixed20_12 ffreq, max_error, error, pll_out, a; | 684 | fixed20_12 ffreq, max_error, error, pll_out, a; |
677 | u32 vco; | 685 | u32 vco; |
686 | u32 pll_out_min, pll_out_max; | ||
687 | |||
688 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
689 | pll_out_min = pll->lcd_pll_out_min; | ||
690 | pll_out_max = pll->lcd_pll_out_max; | ||
691 | } else { | ||
692 | pll_out_min = pll->pll_out_min; | ||
693 | pll_out_max = pll->pll_out_max; | ||
694 | } | ||
678 | 695 | ||
679 | ffreq.full = rfixed_const(freq); | 696 | ffreq.full = rfixed_const(freq); |
680 | /* max_error = ffreq * 0.0025; */ | 697 | /* max_error = ffreq * 0.0025; */ |
@@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
686 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); | 703 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); |
687 | vco = vco / ((*ref_div) * 10); | 704 | vco = vco / ((*ref_div) * 10); |
688 | 705 | ||
689 | if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) | 706 | if ((vco < pll_out_min) || (vco > pll_out_max)) |
690 | continue; | 707 | continue; |
691 | 708 | ||
692 | /* pll_out = vco / post_div; */ | 709 | /* pll_out = vco / post_div; */ |
@@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
714 | { | 731 | { |
715 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; | 732 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; |
716 | u32 best_freq = 0, vco_frequency; | 733 | u32 best_freq = 0, vco_frequency; |
734 | u32 pll_out_min, pll_out_max; | ||
735 | |||
736 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
737 | pll_out_min = pll->lcd_pll_out_min; | ||
738 | pll_out_max = pll->lcd_pll_out_max; | ||
739 | } else { | ||
740 | pll_out_min = pll->pll_out_min; | ||
741 | pll_out_max = pll->pll_out_max; | ||
742 | } | ||
717 | 743 | ||
718 | /* freq = freq / 10; */ | 744 | /* freq = freq / 10; */ |
719 | do_div(freq, 10); | 745 | do_div(freq, 10); |
@@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
724 | goto done; | 750 | goto done; |
725 | 751 | ||
726 | vco_frequency = freq * post_div; | 752 | vco_frequency = freq * post_div; |
727 | if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) | 753 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) |
728 | goto done; | 754 | goto done; |
729 | 755 | ||
730 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | 756 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { |
@@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
749 | continue; | 775 | continue; |
750 | 776 | ||
751 | vco_frequency = freq * post_div; | 777 | vco_frequency = freq * post_div; |
752 | if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) | 778 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) |
753 | continue; | 779 | continue; |
754 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | 780 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { |
755 | ref_div = pll->reference_div; | 781 | ref_div = pll->reference_div; |
@@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) | |||
945 | return 0; | 971 | return 0; |
946 | } | 972 | } |
947 | 973 | ||
974 | void radeon_update_display_priority(struct radeon_device *rdev) | ||
975 | { | ||
976 | /* adjustment options for the display watermarks */ | ||
977 | if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { | ||
978 | /* set display priority to high for r3xx, rv515 chips | ||
979 | * this avoids flickering due to underflow to the | ||
980 | * display controllers during heavy acceleration. | ||
981 | */ | ||
982 | if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) | ||
983 | rdev->disp_priority = 2; | ||
984 | else | ||
985 | rdev->disp_priority = 0; | ||
986 | } else | ||
987 | rdev->disp_priority = radeon_disp_priority; | ||
988 | |||
989 | } | ||
990 | |||
948 | int radeon_modeset_init(struct radeon_device *rdev) | 991 | int radeon_modeset_init(struct radeon_device *rdev) |
949 | { | 992 | { |
950 | int i; | 993 | int i; |
@@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
976 | radeon_combios_check_hardcoded_edid(rdev); | 1019 | radeon_combios_check_hardcoded_edid(rdev); |
977 | } | 1020 | } |
978 | 1021 | ||
979 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
980 | rdev->num_crtc = 1; | ||
981 | else { | ||
982 | if (ASIC_IS_DCE4(rdev)) | ||
983 | rdev->num_crtc = 6; | ||
984 | else | ||
985 | rdev->num_crtc = 2; | ||
986 | } | ||
987 | |||
988 | /* allocate crtcs */ | 1022 | /* allocate crtcs */ |
989 | for (i = 0; i < rdev->num_crtc; i++) { | 1023 | for (i = 0; i < rdev->num_crtc; i++) { |
990 | radeon_crtc_init(rdev->ddev, i); | 1024 | radeon_crtc_init(rdev->ddev, i); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6eec0ece6a6c..055a51732dcb 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -42,9 +42,10 @@ | |||
42 | * KMS wrapper. | 42 | * KMS wrapper. |
43 | * - 2.0.0 - initial interface | 43 | * - 2.0.0 - initial interface |
44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
45 | * - 2.2.0 - add r6xx/r7xx const buffer support | ||
45 | */ | 46 | */ |
46 | #define KMS_DRIVER_MAJOR 2 | 47 | #define KMS_DRIVER_MAJOR 2 |
47 | #define KMS_DRIVER_MINOR 1 | 48 | #define KMS_DRIVER_MINOR 2 |
48 | #define KMS_DRIVER_PATCHLEVEL 0 | 49 | #define KMS_DRIVER_PATCHLEVEL 0 |
49 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 50 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
50 | int radeon_driver_unload_kms(struct drm_device *dev); | 51 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -91,6 +92,8 @@ int radeon_tv = 1; | |||
91 | int radeon_new_pll = -1; | 92 | int radeon_new_pll = -1; |
92 | int radeon_dynpm = -1; | 93 | int radeon_dynpm = -1; |
93 | int radeon_audio = 1; | 94 | int radeon_audio = 1; |
95 | int radeon_disp_priority = 0; | ||
96 | int radeon_hw_i2c = 0; | ||
94 | 97 | ||
95 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 98 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
96 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 99 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -134,6 +137,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444); | |||
134 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); | 137 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); |
135 | module_param_named(audio, radeon_audio, int, 0444); | 138 | module_param_named(audio, radeon_audio, int, 0444); |
136 | 139 | ||
140 | MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); | ||
141 | module_param_named(disp_priority, radeon_disp_priority, int, 0444); | ||
142 | |||
143 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); | ||
144 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | ||
145 | |||
137 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 146 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
138 | { | 147 | { |
139 | drm_radeon_private_t *dev_priv = dev->dev_private; | 148 | drm_radeon_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index ec55f2b23c22..448eba89d1e6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -107,9 +107,10 @@ | |||
107 | * 1.30- Add support for occlusion queries | 107 | * 1.30- Add support for occlusion queries |
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | 109 | * 1.32- fixes for rv740 setup |
110 | * 1.33- Add r6xx/r7xx const buffer support | ||
110 | */ | 111 | */ |
111 | #define DRIVER_MAJOR 1 | 112 | #define DRIVER_MAJOR 1 |
112 | #define DRIVER_MINOR 32 | 113 | #define DRIVER_MINOR 33 |
113 | #define DRIVER_PATCHLEVEL 0 | 114 | #define DRIVER_PATCHLEVEL 0 |
114 | 115 | ||
115 | enum radeon_cp_microcode_version { | 116 | enum radeon_cp_microcode_version { |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index bc926ea0a530..52d6f96f274b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -302,7 +302,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
302 | } | 302 | } |
303 | 303 | ||
304 | if (ASIC_IS_DCE3(rdev) && | 304 | if (ASIC_IS_DCE3(rdev) && |
305 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { | 305 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { |
306 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 306 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
307 | radeon_dp_set_link_config(connector, mode); | 307 | radeon_dp_set_link_config(connector, mode); |
308 | } | 308 | } |
@@ -519,7 +519,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
519 | break; | 519 | break; |
520 | } | 520 | } |
521 | 521 | ||
522 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 522 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
523 | return; | ||
523 | 524 | ||
524 | switch (frev) { | 525 | switch (frev) { |
525 | case 1: | 526 | case 1: |
@@ -593,7 +594,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
593 | } | 594 | } |
594 | 595 | ||
595 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 596 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
596 | r600_hdmi_enable(encoder, hdmi_detected); | ||
597 | } | 597 | } |
598 | 598 | ||
599 | int | 599 | int |
@@ -708,7 +708,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
708 | struct radeon_connector_atom_dig *dig_connector = | 708 | struct radeon_connector_atom_dig *dig_connector = |
709 | radeon_get_atom_connector_priv_from_encoder(encoder); | 709 | radeon_get_atom_connector_priv_from_encoder(encoder); |
710 | union dig_encoder_control args; | 710 | union dig_encoder_control args; |
711 | int index = 0, num = 0; | 711 | int index = 0; |
712 | uint8_t frev, crev; | 712 | uint8_t frev, crev; |
713 | 713 | ||
714 | if (!dig || !dig_connector) | 714 | if (!dig || !dig_connector) |
@@ -724,9 +724,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
724 | else | 724 | else |
725 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | 725 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); |
726 | } | 726 | } |
727 | num = dig->dig_encoder + 1; | ||
728 | 727 | ||
729 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 728 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
729 | return; | ||
730 | 730 | ||
731 | args.v1.ucAction = action; | 731 | args.v1.ucAction = action; |
732 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 732 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
@@ -785,7 +785,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
785 | struct drm_connector *connector; | 785 | struct drm_connector *connector; |
786 | struct radeon_connector *radeon_connector; | 786 | struct radeon_connector *radeon_connector; |
787 | union dig_transmitter_control args; | 787 | union dig_transmitter_control args; |
788 | int index = 0, num = 0; | 788 | int index = 0; |
789 | uint8_t frev, crev; | 789 | uint8_t frev, crev; |
790 | bool is_dp = false; | 790 | bool is_dp = false; |
791 | int pll_id = 0; | 791 | int pll_id = 0; |
@@ -814,7 +814,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
814 | } | 814 | } |
815 | } | 815 | } |
816 | 816 | ||
817 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 817 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
818 | return; | ||
818 | 819 | ||
819 | args.v1.ucAction = action; | 820 | args.v1.ucAction = action; |
820 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 821 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
@@ -860,15 +861,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
860 | switch (radeon_encoder->encoder_id) { | 861 | switch (radeon_encoder->encoder_id) { |
861 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 862 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
862 | args.v3.acConfig.ucTransmitterSel = 0; | 863 | args.v3.acConfig.ucTransmitterSel = 0; |
863 | num = 0; | ||
864 | break; | 864 | break; |
865 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 865 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
866 | args.v3.acConfig.ucTransmitterSel = 1; | 866 | args.v3.acConfig.ucTransmitterSel = 1; |
867 | num = 1; | ||
868 | break; | 867 | break; |
869 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 868 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
870 | args.v3.acConfig.ucTransmitterSel = 2; | 869 | args.v3.acConfig.ucTransmitterSel = 2; |
871 | num = 2; | ||
872 | break; | 870 | break; |
873 | } | 871 | } |
874 | 872 | ||
@@ -879,23 +877,19 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
879 | args.v3.acConfig.fCoherentMode = 1; | 877 | args.v3.acConfig.fCoherentMode = 1; |
880 | } | 878 | } |
881 | } else if (ASIC_IS_DCE32(rdev)) { | 879 | } else if (ASIC_IS_DCE32(rdev)) { |
882 | if (dig->dig_encoder == 1) | 880 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
883 | args.v2.acConfig.ucEncoderSel = 1; | ||
884 | if (dig_connector->linkb) | 881 | if (dig_connector->linkb) |
885 | args.v2.acConfig.ucLinkSel = 1; | 882 | args.v2.acConfig.ucLinkSel = 1; |
886 | 883 | ||
887 | switch (radeon_encoder->encoder_id) { | 884 | switch (radeon_encoder->encoder_id) { |
888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 885 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
889 | args.v2.acConfig.ucTransmitterSel = 0; | 886 | args.v2.acConfig.ucTransmitterSel = 0; |
890 | num = 0; | ||
891 | break; | 887 | break; |
892 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
893 | args.v2.acConfig.ucTransmitterSel = 1; | 889 | args.v2.acConfig.ucTransmitterSel = 1; |
894 | num = 1; | ||
895 | break; | 890 | break; |
896 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 891 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
897 | args.v2.acConfig.ucTransmitterSel = 2; | 892 | args.v2.acConfig.ucTransmitterSel = 2; |
898 | num = 2; | ||
899 | break; | 893 | break; |
900 | } | 894 | } |
901 | 895 | ||
@@ -913,31 +907,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
913 | else | 907 | else |
914 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | 908 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; |
915 | 909 | ||
916 | switch (radeon_encoder->encoder_id) { | 910 | if ((rdev->flags & RADEON_IS_IGP) && |
917 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 911 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { |
918 | if (rdev->flags & RADEON_IS_IGP) { | 912 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { |
919 | if (radeon_encoder->pixel_clock > 165000) { | 913 | if (dig_connector->igp_lane_info & 0x1) |
920 | if (dig_connector->igp_lane_info & 0x3) | 914 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
921 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | 915 | else if (dig_connector->igp_lane_info & 0x2) |
922 | else if (dig_connector->igp_lane_info & 0xc) | 916 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; |
923 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | 917 | else if (dig_connector->igp_lane_info & 0x4) |
924 | } else { | 918 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; |
925 | if (dig_connector->igp_lane_info & 0x1) | 919 | else if (dig_connector->igp_lane_info & 0x8) |
926 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | 920 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
927 | else if (dig_connector->igp_lane_info & 0x2) | 921 | } else { |
928 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | 922 | if (dig_connector->igp_lane_info & 0x3) |
929 | else if (dig_connector->igp_lane_info & 0x4) | 923 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
930 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | 924 | else if (dig_connector->igp_lane_info & 0xc) |
931 | else if (dig_connector->igp_lane_info & 0x8) | 925 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
932 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | ||
933 | } | ||
934 | } | 926 | } |
935 | break; | ||
936 | } | 927 | } |
937 | 928 | ||
938 | if (radeon_encoder->pixel_clock > 165000) | ||
939 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
940 | |||
941 | if (dig_connector->linkb) | 929 | if (dig_connector->linkb) |
942 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | 930 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; |
943 | else | 931 | else |
@@ -948,6 +936,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
948 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 936 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
949 | if (dig->coherent_mode) | 937 | if (dig->coherent_mode) |
950 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | 938 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; |
939 | if (radeon_encoder->pixel_clock > 165000) | ||
940 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
951 | } | 941 | } |
952 | } | 942 | } |
953 | 943 | ||
@@ -1054,16 +1044,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1054 | if (is_dig) { | 1044 | if (is_dig) { |
1055 | switch (mode) { | 1045 | switch (mode) { |
1056 | case DRM_MODE_DPMS_ON: | 1046 | case DRM_MODE_DPMS_ON: |
1057 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1047 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1058 | { | ||
1059 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1048 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1049 | |||
1060 | dp_link_train(encoder, connector); | 1050 | dp_link_train(encoder, connector); |
1051 | if (ASIC_IS_DCE4(rdev)) | ||
1052 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | ||
1061 | } | 1053 | } |
1054 | if (!ASIC_IS_DCE4(rdev)) | ||
1055 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1062 | break; | 1056 | break; |
1063 | case DRM_MODE_DPMS_STANDBY: | 1057 | case DRM_MODE_DPMS_STANDBY: |
1064 | case DRM_MODE_DPMS_SUSPEND: | 1058 | case DRM_MODE_DPMS_SUSPEND: |
1065 | case DRM_MODE_DPMS_OFF: | 1059 | case DRM_MODE_DPMS_OFF: |
1066 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1060 | if (!ASIC_IS_DCE4(rdev)) |
1061 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1062 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | ||
1063 | if (ASIC_IS_DCE4(rdev)) | ||
1064 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | ||
1065 | } | ||
1067 | break; | 1066 | break; |
1068 | } | 1067 | } |
1069 | } else { | 1068 | } else { |
@@ -1104,7 +1103,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1104 | 1103 | ||
1105 | memset(&args, 0, sizeof(args)); | 1104 | memset(&args, 0, sizeof(args)); |
1106 | 1105 | ||
1107 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 1106 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
1107 | return; | ||
1108 | 1108 | ||
1109 | switch (frev) { | 1109 | switch (frev) { |
1110 | case 1: | 1110 | case 1: |
@@ -1216,6 +1216,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1218 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1219 | |||
1220 | /* update scratch regs with new routing */ | ||
1221 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1219 | } | 1222 | } |
1220 | 1223 | ||
1221 | static void | 1224 | static void |
@@ -1326,19 +1329,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1326 | struct drm_device *dev = encoder->dev; | 1329 | struct drm_device *dev = encoder->dev; |
1327 | struct radeon_device *rdev = dev->dev_private; | 1330 | struct radeon_device *rdev = dev->dev_private; |
1328 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1331 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1329 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1330 | 1332 | ||
1331 | if (radeon_encoder->active_device & | ||
1332 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | ||
1333 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1334 | if (dig) | ||
1335 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
1336 | } | ||
1337 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1333 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1338 | 1334 | ||
1339 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1340 | atombios_set_encoder_crtc_source(encoder); | ||
1341 | |||
1342 | if (ASIC_IS_AVIVO(rdev)) { | 1335 | if (ASIC_IS_AVIVO(rdev)) { |
1343 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | 1336 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
1344 | atombios_yuv_setup(encoder, true); | 1337 | atombios_yuv_setup(encoder, true); |
@@ -1396,9 +1389,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1396 | } | 1389 | } |
1397 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1390 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
1398 | 1391 | ||
1399 | /* XXX */ | 1392 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
1400 | if (!ASIC_IS_DCE4(rdev)) | 1393 | r600_hdmi_enable(encoder); |
1401 | r600_hdmi_setmode(encoder, adjusted_mode); | 1394 | r600_hdmi_setmode(encoder, adjusted_mode); |
1395 | } | ||
1402 | } | 1396 | } |
1403 | 1397 | ||
1404 | static bool | 1398 | static bool |
@@ -1418,7 +1412,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn | |||
1418 | 1412 | ||
1419 | memset(&args, 0, sizeof(args)); | 1413 | memset(&args, 0, sizeof(args)); |
1420 | 1414 | ||
1421 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 1415 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
1416 | return false; | ||
1422 | 1417 | ||
1423 | args.sDacload.ucMisc = 0; | 1418 | args.sDacload.ucMisc = 0; |
1424 | 1419 | ||
@@ -1492,8 +1487,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1492 | 1487 | ||
1493 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 1488 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1494 | { | 1489 | { |
1490 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1491 | |||
1492 | if (radeon_encoder->active_device & | ||
1493 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | ||
1494 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1495 | if (dig) | ||
1496 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
1497 | } | ||
1498 | |||
1495 | radeon_atom_output_lock(encoder, true); | 1499 | radeon_atom_output_lock(encoder, true); |
1496 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1500 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1501 | |||
1502 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
1503 | atombios_set_encoder_crtc_source(encoder); | ||
1497 | } | 1504 | } |
1498 | 1505 | ||
1499 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | 1506 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
@@ -1509,6 +1516,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1509 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1516 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1510 | 1517 | ||
1511 | if (radeon_encoder_is_digital(encoder)) { | 1518 | if (radeon_encoder_is_digital(encoder)) { |
1519 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
1520 | r600_hdmi_disable(encoder); | ||
1512 | dig = radeon_encoder->enc_priv; | 1521 | dig = radeon_encoder->enc_priv; |
1513 | dig->dig_encoder = -1; | 1522 | dig->dig_encoder = -1; |
1514 | } | 1523 | } |
@@ -1659,6 +1668,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1659 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 1668 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
1660 | break; | 1669 | break; |
1661 | } | 1670 | } |
1662 | |||
1663 | r600_hdmi_init(encoder); | ||
1664 | } | 1671 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 4ae50c19589f..5def6f5dff38 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
59 | return false; | 59 | return false; |
60 | } | 60 | } |
61 | 61 | ||
62 | /* bit banging i2c */ | ||
62 | 63 | ||
63 | static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | 64 | static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) |
64 | { | 65 | { |
@@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data) | |||
181 | WREG32(rec->en_data_reg, val); | 182 | WREG32(rec->en_data_reg, val); |
182 | } | 183 | } |
183 | 184 | ||
185 | static int pre_xfer(struct i2c_adapter *i2c_adap) | ||
186 | { | ||
187 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
188 | |||
189 | radeon_i2c_do_lock(i2c, 1); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static void post_xfer(struct i2c_adapter *i2c_adap) | ||
195 | { | ||
196 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
197 | |||
198 | radeon_i2c_do_lock(i2c, 0); | ||
199 | } | ||
200 | |||
201 | /* hw i2c */ | ||
202 | |||
184 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 203 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
185 | { | 204 | { |
186 | struct radeon_pll *spll = &rdev->clock.spll; | ||
187 | u32 sclk = radeon_get_engine_clock(rdev); | 205 | u32 sclk = radeon_get_engine_clock(rdev); |
188 | u32 prescale = 0; | 206 | u32 prescale = 0; |
189 | u32 n, m; | 207 | u32 nm; |
190 | u8 loop; | 208 | u8 n, m, loop; |
191 | int i2c_clock; | 209 | int i2c_clock; |
192 | 210 | ||
193 | switch (rdev->family) { | 211 | switch (rdev->family) { |
@@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
203 | case CHIP_R300: | 221 | case CHIP_R300: |
204 | case CHIP_R350: | 222 | case CHIP_R350: |
205 | case CHIP_RV350: | 223 | case CHIP_RV350: |
206 | n = (spll->reference_freq) / (4 * 6); | 224 | i2c_clock = 60; |
225 | nm = (sclk * 10) / (i2c_clock * 4); | ||
207 | for (loop = 1; loop < 255; loop++) { | 226 | for (loop = 1; loop < 255; loop++) { |
208 | if ((loop * (loop - 1)) > n) | 227 | if ((nm / loop) < loop) |
209 | break; | 228 | break; |
210 | } | 229 | } |
211 | m = loop - 1; | 230 | n = loop - 1; |
212 | prescale = m | (loop << 8); | 231 | m = loop - 2; |
232 | prescale = m | (n << 8); | ||
213 | break; | 233 | break; |
214 | case CHIP_RV380: | 234 | case CHIP_RV380: |
215 | case CHIP_RS400: | 235 | case CHIP_RS400: |
@@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
217 | case CHIP_R420: | 237 | case CHIP_R420: |
218 | case CHIP_R423: | 238 | case CHIP_R423: |
219 | case CHIP_RV410: | 239 | case CHIP_RV410: |
220 | sclk = radeon_get_engine_clock(rdev); | ||
221 | prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; | 240 | prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; |
222 | break; | 241 | break; |
223 | case CHIP_RS600: | 242 | case CHIP_RS600: |
@@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
232 | case CHIP_RV570: | 251 | case CHIP_RV570: |
233 | case CHIP_R580: | 252 | case CHIP_R580: |
234 | i2c_clock = 50; | 253 | i2c_clock = 50; |
235 | sclk = radeon_get_engine_clock(rdev); | ||
236 | if (rdev->family == CHIP_R520) | 254 | if (rdev->family == CHIP_R520) |
237 | prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); | 255 | prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); |
238 | else | 256 | else |
@@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
291 | prescale = radeon_get_i2c_prescale(rdev); | 309 | prescale = radeon_get_i2c_prescale(rdev); |
292 | 310 | ||
293 | reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | | 311 | reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | |
312 | RADEON_I2C_DRIVE_EN | | ||
294 | RADEON_I2C_START | | 313 | RADEON_I2C_START | |
295 | RADEON_I2C_STOP | | 314 | RADEON_I2C_STOP | |
296 | RADEON_I2C_GO); | 315 | RADEON_I2C_GO); |
@@ -757,26 +776,13 @@ done: | |||
757 | return ret; | 776 | return ret; |
758 | } | 777 | } |
759 | 778 | ||
760 | static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, | 779 | static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap, |
761 | struct i2c_msg *msgs, int num) | 780 | struct i2c_msg *msgs, int num) |
762 | { | 781 | { |
763 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | 782 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); |
764 | int ret; | ||
765 | |||
766 | radeon_i2c_do_lock(i2c, 1); | ||
767 | ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num); | ||
768 | radeon_i2c_do_lock(i2c, 0); | ||
769 | |||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
774 | struct i2c_msg *msgs, int num) | ||
775 | { | ||
776 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
777 | struct radeon_device *rdev = i2c->dev->dev_private; | 783 | struct radeon_device *rdev = i2c->dev->dev_private; |
778 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 784 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
779 | int ret; | 785 | int ret = 0; |
780 | 786 | ||
781 | switch (rdev->family) { | 787 | switch (rdev->family) { |
782 | case CHIP_R100: | 788 | case CHIP_R100: |
@@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
797 | case CHIP_RV410: | 803 | case CHIP_RV410: |
798 | case CHIP_RS400: | 804 | case CHIP_RS400: |
799 | case CHIP_RS480: | 805 | case CHIP_RS480: |
800 | if (rec->hw_capable) | 806 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); |
801 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); | ||
802 | else | ||
803 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
804 | break; | 807 | break; |
805 | case CHIP_RS600: | 808 | case CHIP_RS600: |
806 | case CHIP_RS690: | 809 | case CHIP_RS690: |
807 | case CHIP_RS740: | 810 | case CHIP_RS740: |
808 | /* XXX fill in hw i2c implementation */ | 811 | /* XXX fill in hw i2c implementation */ |
809 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
810 | break; | 812 | break; |
811 | case CHIP_RV515: | 813 | case CHIP_RV515: |
812 | case CHIP_R520: | 814 | case CHIP_R520: |
@@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
814 | case CHIP_RV560: | 816 | case CHIP_RV560: |
815 | case CHIP_RV570: | 817 | case CHIP_RV570: |
816 | case CHIP_R580: | 818 | case CHIP_R580: |
817 | if (rec->hw_capable) { | 819 | if (rec->mm_i2c) |
818 | if (rec->mm_i2c) | 820 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); |
819 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); | 821 | else |
820 | else | 822 | ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); |
821 | ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); | ||
822 | } else | ||
823 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
824 | break; | 823 | break; |
825 | case CHIP_R600: | 824 | case CHIP_R600: |
826 | case CHIP_RV610: | 825 | case CHIP_RV610: |
827 | case CHIP_RV630: | 826 | case CHIP_RV630: |
828 | case CHIP_RV670: | 827 | case CHIP_RV670: |
829 | /* XXX fill in hw i2c implementation */ | 828 | /* XXX fill in hw i2c implementation */ |
830 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
831 | break; | 829 | break; |
832 | case CHIP_RV620: | 830 | case CHIP_RV620: |
833 | case CHIP_RV635: | 831 | case CHIP_RV635: |
@@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
838 | case CHIP_RV710: | 836 | case CHIP_RV710: |
839 | case CHIP_RV740: | 837 | case CHIP_RV740: |
840 | /* XXX fill in hw i2c implementation */ | 838 | /* XXX fill in hw i2c implementation */ |
841 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
842 | break; | 839 | break; |
843 | case CHIP_CEDAR: | 840 | case CHIP_CEDAR: |
844 | case CHIP_REDWOOD: | 841 | case CHIP_REDWOOD: |
@@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
846 | case CHIP_CYPRESS: | 843 | case CHIP_CYPRESS: |
847 | case CHIP_HEMLOCK: | 844 | case CHIP_HEMLOCK: |
848 | /* XXX fill in hw i2c implementation */ | 845 | /* XXX fill in hw i2c implementation */ |
849 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
850 | break; | 846 | break; |
851 | default: | 847 | default: |
852 | DRM_ERROR("i2c: unhandled radeon chip\n"); | 848 | DRM_ERROR("i2c: unhandled radeon chip\n"); |
@@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
857 | return ret; | 853 | return ret; |
858 | } | 854 | } |
859 | 855 | ||
860 | static u32 radeon_i2c_func(struct i2c_adapter *adap) | 856 | static u32 radeon_hw_i2c_func(struct i2c_adapter *adap) |
861 | { | 857 | { |
862 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | 858 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; |
863 | } | 859 | } |
864 | 860 | ||
865 | static const struct i2c_algorithm radeon_i2c_algo = { | 861 | static const struct i2c_algorithm radeon_i2c_algo = { |
866 | .master_xfer = radeon_i2c_xfer, | 862 | .master_xfer = radeon_hw_i2c_xfer, |
867 | .functionality = radeon_i2c_func, | 863 | .functionality = radeon_hw_i2c_func, |
868 | }; | 864 | }; |
869 | 865 | ||
870 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 866 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
871 | struct radeon_i2c_bus_rec *rec, | 867 | struct radeon_i2c_bus_rec *rec, |
872 | const char *name) | 868 | const char *name) |
873 | { | 869 | { |
870 | struct radeon_device *rdev = dev->dev_private; | ||
874 | struct radeon_i2c_chan *i2c; | 871 | struct radeon_i2c_chan *i2c; |
875 | int ret; | 872 | int ret; |
876 | 873 | ||
@@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
878 | if (i2c == NULL) | 875 | if (i2c == NULL) |
879 | return NULL; | 876 | return NULL; |
880 | 877 | ||
881 | /* set the internal bit adapter */ | ||
882 | i2c->algo.radeon.bit_adapter.owner = THIS_MODULE; | ||
883 | i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c); | ||
884 | sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name); | ||
885 | i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data; | ||
886 | i2c->algo.radeon.bit_data.setsda = set_data; | ||
887 | i2c->algo.radeon.bit_data.setscl = set_clock; | ||
888 | i2c->algo.radeon.bit_data.getsda = get_data; | ||
889 | i2c->algo.radeon.bit_data.getscl = get_clock; | ||
890 | i2c->algo.radeon.bit_data.udelay = 20; | ||
891 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | ||
892 | * make this, 2 jiffies is a lot more reliable */ | ||
893 | i2c->algo.radeon.bit_data.timeout = 2; | ||
894 | i2c->algo.radeon.bit_data.data = i2c; | ||
895 | ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter); | ||
896 | if (ret) { | ||
897 | DRM_ERROR("Failed to register internal bit i2c %s\n", name); | ||
898 | goto out_free; | ||
899 | } | ||
900 | /* set the radeon i2c adapter */ | ||
901 | i2c->dev = dev; | ||
902 | i2c->rec = *rec; | 878 | i2c->rec = *rec; |
903 | i2c->adapter.owner = THIS_MODULE; | 879 | i2c->adapter.owner = THIS_MODULE; |
880 | i2c->dev = dev; | ||
904 | i2c_set_adapdata(&i2c->adapter, i2c); | 881 | i2c_set_adapdata(&i2c->adapter, i2c); |
905 | sprintf(i2c->adapter.name, "Radeon i2c %s", name); | 882 | if (rec->mm_i2c || |
906 | i2c->adapter.algo_data = &i2c->algo.radeon; | 883 | (rec->hw_capable && |
907 | i2c->adapter.algo = &radeon_i2c_algo; | 884 | radeon_hw_i2c && |
908 | ret = i2c_add_adapter(&i2c->adapter); | 885 | ((rdev->family <= CHIP_RS480) || |
909 | if (ret) { | 886 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { |
910 | DRM_ERROR("Failed to register i2c %s\n", name); | 887 | /* set the radeon hw i2c adapter */ |
911 | goto out_free; | 888 | sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); |
889 | i2c->adapter.algo = &radeon_i2c_algo; | ||
890 | ret = i2c_add_adapter(&i2c->adapter); | ||
891 | if (ret) { | ||
892 | DRM_ERROR("Failed to register hw i2c %s\n", name); | ||
893 | goto out_free; | ||
894 | } | ||
895 | } else { | ||
896 | /* set the radeon bit adapter */ | ||
897 | sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); | ||
898 | i2c->adapter.algo_data = &i2c->algo.bit; | ||
899 | i2c->algo.bit.pre_xfer = pre_xfer; | ||
900 | i2c->algo.bit.post_xfer = post_xfer; | ||
901 | i2c->algo.bit.setsda = set_data; | ||
902 | i2c->algo.bit.setscl = set_clock; | ||
903 | i2c->algo.bit.getsda = get_data; | ||
904 | i2c->algo.bit.getscl = get_clock; | ||
905 | i2c->algo.bit.udelay = 20; | ||
906 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | ||
907 | * make this, 2 jiffies is a lot more reliable */ | ||
908 | i2c->algo.bit.timeout = 2; | ||
909 | i2c->algo.bit.data = i2c; | ||
910 | ret = i2c_bit_add_bus(&i2c->adapter); | ||
911 | if (ret) { | ||
912 | DRM_ERROR("Failed to register bit i2c %s\n", name); | ||
913 | goto out_free; | ||
914 | } | ||
912 | } | 915 | } |
913 | 916 | ||
914 | return i2c; | 917 | return i2c; |
@@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) | |||
953 | { | 956 | { |
954 | if (!i2c) | 957 | if (!i2c) |
955 | return; | 958 | return; |
956 | i2c_del_adapter(&i2c->algo.radeon.bit_adapter); | ||
957 | i2c_del_adapter(&i2c->adapter); | ||
958 | kfree(i2c); | ||
959 | } | ||
960 | |||
961 | void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c) | ||
962 | { | ||
963 | if (!i2c) | ||
964 | return; | ||
965 | |||
966 | i2c_del_adapter(&i2c->adapter); | 959 | i2c_del_adapter(&i2c->adapter); |
967 | kfree(i2c); | 960 | kfree(i2c); |
968 | } | 961 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index ea4c645ece11..a212041e8b0b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
67 | 67 | ||
68 | /* Disable *all* interrupts */ | 68 | /* Disable *all* interrupts */ |
69 | rdev->irq.sw_int = false; | 69 | rdev->irq.sw_int = false; |
70 | for (i = 0; i < 2; i++) { | 70 | for (i = 0; i < rdev->num_crtc; i++) |
71 | rdev->irq.crtc_vblank_int[i] = false; | 71 | rdev->irq.crtc_vblank_int[i] = false; |
72 | } | 72 | for (i = 0; i < 6; i++) |
73 | rdev->irq.hpd[i] = false; | ||
73 | radeon_irq_set(rdev); | 74 | radeon_irq_set(rdev); |
74 | /* Clear bits */ | 75 | /* Clear bits */ |
75 | radeon_irq_process(rdev); | 76 | radeon_irq_process(rdev); |
@@ -95,28 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
95 | } | 96 | } |
96 | /* Disable *all* interrupts */ | 97 | /* Disable *all* interrupts */ |
97 | rdev->irq.sw_int = false; | 98 | rdev->irq.sw_int = false; |
98 | for (i = 0; i < 2; i++) { | 99 | for (i = 0; i < rdev->num_crtc; i++) |
99 | rdev->irq.crtc_vblank_int[i] = false; | 100 | rdev->irq.crtc_vblank_int[i] = false; |
101 | for (i = 0; i < 6; i++) | ||
100 | rdev->irq.hpd[i] = false; | 102 | rdev->irq.hpd[i] = false; |
101 | } | ||
102 | radeon_irq_set(rdev); | 103 | radeon_irq_set(rdev); |
103 | } | 104 | } |
104 | 105 | ||
105 | int radeon_irq_kms_init(struct radeon_device *rdev) | 106 | int radeon_irq_kms_init(struct radeon_device *rdev) |
106 | { | 107 | { |
107 | int r = 0; | 108 | int r = 0; |
108 | int num_crtc = 2; | ||
109 | 109 | ||
110 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
111 | num_crtc = 1; | ||
112 | spin_lock_init(&rdev->irq.sw_lock); | 110 | spin_lock_init(&rdev->irq.sw_lock); |
113 | r = drm_vblank_init(rdev->ddev, num_crtc); | 111 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
114 | if (r) { | 112 | if (r) { |
115 | return r; | 113 | return r; |
116 | } | 114 | } |
117 | /* enable msi */ | 115 | /* enable msi */ |
118 | rdev->msi_enabled = 0; | 116 | rdev->msi_enabled = 0; |
119 | if (rdev->family >= CHIP_RV380) { | 117 | /* MSIs don't seem to work reliably on all IGP |
118 | * chips. Disable MSI on them for now. | ||
119 | */ | ||
120 | if ((rdev->family >= CHIP_RV380) && | ||
121 | (!(rdev->flags & RADEON_IS_IGP))) { | ||
120 | int ret = pci_enable_msi(rdev->pdev); | 122 | int ret = pci_enable_msi(rdev->pdev); |
121 | if (!ret) { | 123 | if (!ret) { |
122 | rdev->msi_enabled = 1; | 124 | rdev->msi_enabled = 1; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index df23d6a01d02..88865e38fe30 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
603 | ? RADEON_CRTC2_INTERLACE_EN | 603 | ? RADEON_CRTC2_INTERLACE_EN |
604 | : 0)); | 604 | : 0)); |
605 | 605 | ||
606 | /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ | ||
607 | if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) | ||
608 | crtc2_gen_cntl |= RADEON_CRTC2_EN; | ||
609 | |||
606 | disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | 610 | disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); |
607 | disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | 611 | disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; |
608 | 612 | ||
@@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
630 | ? RADEON_CRTC_INTERLACE_EN | 634 | ? RADEON_CRTC_INTERLACE_EN |
631 | : 0)); | 635 | : 0)); |
632 | 636 | ||
637 | /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ | ||
638 | if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) | ||
639 | crtc_gen_cntl |= RADEON_CRTC_EN; | ||
640 | |||
633 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 641 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
634 | crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | | 642 | crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | |
635 | RADEON_CRTC_VSYNC_DIS | | 643 | RADEON_CRTC_VSYNC_DIS | |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 417684daef4c..f2ed27c8055b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -57,6 +57,10 @@ | |||
57 | #define NTSC_TV_PLL_N_14 693 | 57 | #define NTSC_TV_PLL_N_14 693 |
58 | #define NTSC_TV_PLL_P_14 7 | 58 | #define NTSC_TV_PLL_P_14 7 |
59 | 59 | ||
60 | #define PAL_TV_PLL_M_14 19 | ||
61 | #define PAL_TV_PLL_N_14 353 | ||
62 | #define PAL_TV_PLL_P_14 5 | ||
63 | |||
60 | #define VERT_LEAD_IN_LINES 2 | 64 | #define VERT_LEAD_IN_LINES 2 |
61 | #define FRAC_BITS 0xe | 65 | #define FRAC_BITS 0xe |
62 | #define FRAC_MASK 0x3fff | 66 | #define FRAC_MASK 0x3fff |
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = { | |||
205 | 630627, /* defRestart */ | 209 | 630627, /* defRestart */ |
206 | 347, /* crtcPLL_N */ | 210 | 347, /* crtcPLL_N */ |
207 | 14, /* crtcPLL_M */ | 211 | 14, /* crtcPLL_M */ |
208 | 8, /* crtcPLL_postDiv */ | 212 | 8, /* crtcPLL_postDiv */ |
209 | 1022, /* pixToTV */ | 213 | 1022, /* pixToTV */ |
210 | }, | 214 | }, |
215 | { /* PAL timing for 14 Mhz ref clk */ | ||
216 | 800, /* horResolution */ | ||
217 | 600, /* verResolution */ | ||
218 | TV_STD_PAL, /* standard */ | ||
219 | 1131, /* horTotal */ | ||
220 | 742, /* verTotal */ | ||
221 | 813, /* horStart */ | ||
222 | 840, /* horSyncStart */ | ||
223 | 633, /* verSyncStart */ | ||
224 | 708369, /* defRestart */ | ||
225 | 211, /* crtcPLL_N */ | ||
226 | 9, /* crtcPLL_M */ | ||
227 | 8, /* crtcPLL_postDiv */ | ||
228 | 759, /* pixToTV */ | ||
229 | }, | ||
211 | }; | 230 | }; |
212 | 231 | ||
213 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) | 232 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) |
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru | |||
242 | if (pll->reference_freq == 2700) | 261 | if (pll->reference_freq == 2700) |
243 | const_ptr = &available_tv_modes[1]; | 262 | const_ptr = &available_tv_modes[1]; |
244 | else | 263 | else |
245 | const_ptr = &available_tv_modes[1]; /* FIX ME */ | 264 | const_ptr = &available_tv_modes[3]; |
246 | } | 265 | } |
247 | return const_ptr; | 266 | return const_ptr; |
248 | } | 267 | } |
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
685 | n = PAL_TV_PLL_N_27; | 704 | n = PAL_TV_PLL_N_27; |
686 | p = PAL_TV_PLL_P_27; | 705 | p = PAL_TV_PLL_P_27; |
687 | } else { | 706 | } else { |
688 | m = PAL_TV_PLL_M_27; | 707 | m = PAL_TV_PLL_M_14; |
689 | n = PAL_TV_PLL_N_27; | 708 | n = PAL_TV_PLL_N_14; |
690 | p = PAL_TV_PLL_P_27; | 709 | p = PAL_TV_PLL_P_14; |
691 | } | 710 | } |
692 | } | 711 | } |
693 | 712 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 1702b820aa4d..0b8e32776b10 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -129,6 +129,7 @@ struct radeon_tmds_pll { | |||
129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
131 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | 131 | #define RADEON_PLL_USE_POST_DIV (1 << 12) |
132 | #define RADEON_PLL_IS_LCD (1 << 13) | ||
132 | 133 | ||
133 | /* pll algo */ | 134 | /* pll algo */ |
134 | enum radeon_pll_algo { | 135 | enum radeon_pll_algo { |
@@ -149,6 +150,8 @@ struct radeon_pll { | |||
149 | uint32_t pll_in_max; | 150 | uint32_t pll_in_max; |
150 | uint32_t pll_out_min; | 151 | uint32_t pll_out_min; |
151 | uint32_t pll_out_max; | 152 | uint32_t pll_out_max; |
153 | uint32_t lcd_pll_out_min; | ||
154 | uint32_t lcd_pll_out_max; | ||
152 | uint32_t best_vco; | 155 | uint32_t best_vco; |
153 | 156 | ||
154 | /* divider limits */ | 157 | /* divider limits */ |
@@ -170,17 +173,12 @@ struct radeon_pll { | |||
170 | enum radeon_pll_algo algo; | 173 | enum radeon_pll_algo algo; |
171 | }; | 174 | }; |
172 | 175 | ||
173 | struct i2c_algo_radeon_data { | ||
174 | struct i2c_adapter bit_adapter; | ||
175 | struct i2c_algo_bit_data bit_data; | ||
176 | }; | ||
177 | |||
178 | struct radeon_i2c_chan { | 176 | struct radeon_i2c_chan { |
179 | struct i2c_adapter adapter; | 177 | struct i2c_adapter adapter; |
180 | struct drm_device *dev; | 178 | struct drm_device *dev; |
181 | union { | 179 | union { |
180 | struct i2c_algo_bit_data bit; | ||
182 | struct i2c_algo_dp_aux_data dp; | 181 | struct i2c_algo_dp_aux_data dp; |
183 | struct i2c_algo_radeon_data radeon; | ||
184 | } algo; | 182 | } algo; |
185 | struct radeon_i2c_bus_rec rec; | 183 | struct radeon_i2c_bus_rec rec; |
186 | }; | 184 | }; |
@@ -342,6 +340,7 @@ struct radeon_encoder { | |||
342 | struct drm_display_mode native_mode; | 340 | struct drm_display_mode native_mode; |
343 | void *enc_priv; | 341 | void *enc_priv; |
344 | int hdmi_offset; | 342 | int hdmi_offset; |
343 | int hdmi_config_offset; | ||
345 | int hdmi_audio_workaround; | 344 | int hdmi_audio_workaround; |
346 | int hdmi_buffer_status; | 345 | int hdmi_buffer_status; |
347 | }; | 346 | }; |
@@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
431 | struct radeon_i2c_bus_rec *rec, | 430 | struct radeon_i2c_bus_rec *rec, |
432 | const char *name); | 431 | const char *name); |
433 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); | 432 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); |
434 | extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c); | ||
435 | extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, | 433 | extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, |
436 | u8 slave_addr, | 434 | u8 slave_addr, |
437 | u8 addr, | 435 | u8 addr, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index fc9d00ac6b15..dc7e3f449138 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -185,8 +185,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | radeon_ttm_placement_from_domain(bo, domain); | 187 | radeon_ttm_placement_from_domain(bo, domain); |
188 | /* force to pin into visible video ram */ | 188 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
189 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | 189 | /* force to pin into visible video ram */ |
190 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | ||
191 | } | ||
190 | for (i = 0; i < bo->placement.num_placement; i++) | 192 | for (i = 0; i < bo->placement.num_placement; i++) |
191 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | 193 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
192 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 194 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d4d1c39a0e99..a4b57493aa78 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #define RADEON_RECLOCK_DELAY_MS 200 | 28 | #define RADEON_RECLOCK_DELAY_MS 200 |
29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
30 | 30 | ||
31 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | ||
31 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); | 32 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); |
32 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 33 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
33 | static void radeon_pm_idle_work_handler(struct work_struct *work); | 34 | static void radeon_pm_idle_work_handler(struct work_struct *work); |
@@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev, | |||
179 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 180 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); |
180 | } | 181 | } |
181 | 182 | ||
183 | static inline void radeon_sync_with_vblank(struct radeon_device *rdev) | ||
184 | { | ||
185 | if (rdev->pm.active_crtcs) { | ||
186 | rdev->pm.vblank_sync = false; | ||
187 | wait_event_timeout( | ||
188 | rdev->irq.vblank_queue, rdev->pm.vblank_sync, | ||
189 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
190 | } | ||
191 | } | ||
192 | |||
182 | static void radeon_set_power_state(struct radeon_device *rdev) | 193 | static void radeon_set_power_state(struct radeon_device *rdev) |
183 | { | 194 | { |
184 | /* if *_clock_mode are the same, *_power_state are as well */ | 195 | /* if *_clock_mode are the same, *_power_state are as well */ |
@@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
189 | rdev->pm.requested_clock_mode->sclk, | 200 | rdev->pm.requested_clock_mode->sclk, |
190 | rdev->pm.requested_clock_mode->mclk, | 201 | rdev->pm.requested_clock_mode->mclk, |
191 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 202 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); |
203 | |||
192 | /* set pcie lanes */ | 204 | /* set pcie lanes */ |
205 | /* TODO */ | ||
206 | |||
193 | /* set voltage */ | 207 | /* set voltage */ |
208 | /* TODO */ | ||
209 | |||
194 | /* set engine clock */ | 210 | /* set engine clock */ |
211 | radeon_sync_with_vblank(rdev); | ||
212 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
195 | radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); | 213 | radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); |
214 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
215 | |||
216 | #if 0 | ||
196 | /* set memory clock */ | 217 | /* set memory clock */ |
218 | if (rdev->asic->set_memory_clock) { | ||
219 | radeon_sync_with_vblank(rdev); | ||
220 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
221 | radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); | ||
222 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
223 | } | ||
224 | #endif | ||
197 | 225 | ||
198 | rdev->pm.current_power_state = rdev->pm.requested_power_state; | 226 | rdev->pm.current_power_state = rdev->pm.requested_power_state; |
199 | rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; | 227 | rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; |
@@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
229 | return 0; | 257 | return 0; |
230 | } | 258 | } |
231 | 259 | ||
260 | void radeon_pm_fini(struct radeon_device *rdev) | ||
261 | { | ||
262 | if (rdev->pm.i2c_bus) | ||
263 | radeon_i2c_destroy(rdev->pm.i2c_bus); | ||
264 | } | ||
265 | |||
232 | void radeon_pm_compute_clocks(struct radeon_device *rdev) | 266 | void radeon_pm_compute_clocks(struct radeon_device *rdev) |
233 | { | 267 | { |
234 | struct drm_device *ddev = rdev->ddev; | 268 | struct drm_device *ddev = rdev->ddev; |
@@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
245 | list_for_each_entry(connector, | 279 | list_for_each_entry(connector, |
246 | &ddev->mode_config.connector_list, head) { | 280 | &ddev->mode_config.connector_list, head) { |
247 | if (connector->encoder && | 281 | if (connector->encoder && |
248 | connector->dpms != DRM_MODE_DPMS_OFF) { | 282 | connector->encoder->crtc && |
283 | connector->dpms != DRM_MODE_DPMS_OFF) { | ||
249 | radeon_crtc = to_radeon_crtc(connector->encoder->crtc); | 284 | radeon_crtc = to_radeon_crtc(connector->encoder->crtc); |
250 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); | 285 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); |
251 | ++count; | 286 | ++count; |
@@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | |||
333 | break; | 368 | break; |
334 | } | 369 | } |
335 | 370 | ||
336 | /* check if we are in vblank */ | ||
337 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
338 | radeon_set_power_state(rdev); | 371 | radeon_set_power_state(rdev); |
339 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
340 | rdev->pm.planned_action = PM_ACTION_NONE; | 372 | rdev->pm.planned_action = PM_ACTION_NONE; |
341 | } | 373 | } |
342 | 374 | ||
@@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
353 | rdev->pm.req_vblank |= (1 << 1); | 385 | rdev->pm.req_vblank |= (1 << 1); |
354 | drm_vblank_get(rdev->ddev, 1); | 386 | drm_vblank_get(rdev->ddev, 1); |
355 | } | 387 | } |
356 | if (rdev->pm.active_crtcs) | 388 | radeon_pm_set_clocks_locked(rdev); |
357 | wait_event_interruptible_timeout( | ||
358 | rdev->irq.vblank_queue, 0, | ||
359 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
360 | if (rdev->pm.req_vblank & (1 << 0)) { | 389 | if (rdev->pm.req_vblank & (1 << 0)) { |
361 | rdev->pm.req_vblank &= ~(1 << 0); | 390 | rdev->pm.req_vblank &= ~(1 << 0); |
362 | drm_vblank_put(rdev->ddev, 0); | 391 | drm_vblank_put(rdev->ddev, 0); |
@@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
366 | drm_vblank_put(rdev->ddev, 1); | 395 | drm_vblank_put(rdev->ddev, 1); |
367 | } | 396 | } |
368 | 397 | ||
369 | radeon_pm_set_clocks_locked(rdev); | ||
370 | mutex_unlock(&rdev->cp.mutex); | 398 | mutex_unlock(&rdev->cp.mutex); |
371 | } | 399 | } |
372 | 400 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 5c0dc082d330..eabbc9cf30a7 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -346,6 +346,7 @@ | |||
346 | # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) | 346 | # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) |
347 | # define RADEON_TVCLK_TURNOFF (1 << 31) | 347 | # define RADEON_TVCLK_TURNOFF (1 << 31) |
348 | #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ | 348 | #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ |
349 | # define RADEON_PM_MODE_SEL (1 << 13) | ||
349 | # define RADEON_TCL_BYPASS_DISABLE (1 << 20) | 350 | # define RADEON_TCL_BYPASS_DISABLE (1 << 20) |
350 | #define RADEON_CLR_CMP_CLR_3D 0x1a24 | 351 | #define RADEON_CLR_CMP_CLR_3D 0x1a24 |
351 | #define RADEON_CLR_CMP_CLR_DST 0x15c8 | 352 | #define RADEON_CLR_CMP_CLR_DST 0x15c8 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 8f414a5f520f..af0da4ae3f55 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -26,20 +26,16 @@ r600 0x9400 | |||
26 | 0x00028408 VGT_INDX_OFFSET | 26 | 0x00028408 VGT_INDX_OFFSET |
27 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 | 27 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 |
28 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | 28 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 |
29 | 0x000088C0 VGT_LAST_COPY_STATE | ||
30 | 0x00028400 VGT_MAX_VTX_INDX | 29 | 0x00028400 VGT_MAX_VTX_INDX |
31 | 0x000088D8 VGT_MC_LAT_CNTL | ||
32 | 0x00028404 VGT_MIN_VTX_INDX | 30 | 0x00028404 VGT_MIN_VTX_INDX |
33 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN | 31 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN |
34 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX | 32 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX |
35 | 0x00008970 VGT_NUM_INDICES | 33 | 0x00008970 VGT_NUM_INDICES |
36 | 0x00008974 VGT_NUM_INSTANCES | 34 | 0x00008974 VGT_NUM_INSTANCES |
37 | 0x00028A10 VGT_OUTPUT_PATH_CNTL | 35 | 0x00028A10 VGT_OUTPUT_PATH_CNTL |
38 | 0x00028C5C VGT_OUT_DEALLOC_CNTL | ||
39 | 0x00028A84 VGT_PRIMITIVEID_EN | 36 | 0x00028A84 VGT_PRIMITIVEID_EN |
40 | 0x00008958 VGT_PRIMITIVE_TYPE | 37 | 0x00008958 VGT_PRIMITIVE_TYPE |
41 | 0x00028AB4 VGT_REUSE_OFF | 38 | 0x00028AB4 VGT_REUSE_OFF |
42 | 0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL | ||
43 | 0x00028AB8 VGT_VTX_CNT_EN | 39 | 0x00028AB8 VGT_VTX_CNT_EN |
44 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | 40 | 0x000088B0 VGT_VTX_VECT_EJECT_REG |
45 | 0x00028810 PA_CL_CLIP_CNTL | 41 | 0x00028810 PA_CL_CLIP_CNTL |
@@ -280,7 +276,6 @@ r600 0x9400 | |||
280 | 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE | 276 | 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE |
281 | 0x00028814 PA_SU_SC_MODE_CNTL | 277 | 0x00028814 PA_SU_SC_MODE_CNTL |
282 | 0x00028C08 PA_SU_VTX_CNTL | 278 | 0x00028C08 PA_SU_VTX_CNTL |
283 | 0x00008C00 SQ_CONFIG | ||
284 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | 279 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 |
285 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 | 280 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 |
286 | 0x00008C10 SQ_STACK_RESOURCE_MGMT_1 | 281 | 0x00008C10 SQ_STACK_RESOURCE_MGMT_1 |
@@ -320,18 +315,6 @@ r600 0x9400 | |||
320 | 0x000283FC SQ_VTX_SEMANTIC_31 | 315 | 0x000283FC SQ_VTX_SEMANTIC_31 |
321 | 0x000288E0 SQ_VTX_SEMANTIC_CLEAR | 316 | 0x000288E0 SQ_VTX_SEMANTIC_CLEAR |
322 | 0x0003CFF4 SQ_VTX_START_INST_LOC | 317 | 0x0003CFF4 SQ_VTX_START_INST_LOC |
323 | 0x0003C000 SQ_TEX_SAMPLER_WORD0_0 | ||
324 | 0x0003C004 SQ_TEX_SAMPLER_WORD1_0 | ||
325 | 0x0003C008 SQ_TEX_SAMPLER_WORD2_0 | ||
326 | 0x00030000 SQ_ALU_CONSTANT0_0 | ||
327 | 0x00030004 SQ_ALU_CONSTANT1_0 | ||
328 | 0x00030008 SQ_ALU_CONSTANT2_0 | ||
329 | 0x0003000C SQ_ALU_CONSTANT3_0 | ||
330 | 0x0003E380 SQ_BOOL_CONST_0 | ||
331 | 0x0003E384 SQ_BOOL_CONST_1 | ||
332 | 0x0003E388 SQ_BOOL_CONST_2 | ||
333 | 0x0003E200 SQ_LOOP_CONST_0 | ||
334 | 0x0003E200 SQ_LOOP_CONST_DX10_0 | ||
335 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 | 318 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 |
336 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 | 319 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 |
337 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 | 320 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 |
@@ -380,54 +363,6 @@ r600 0x9400 | |||
380 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 | 363 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 |
381 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 | 364 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 |
382 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 | 365 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 |
383 | 0x000289C0 SQ_ALU_CONST_CACHE_GS_0 | ||
384 | 0x000289C4 SQ_ALU_CONST_CACHE_GS_1 | ||
385 | 0x000289C8 SQ_ALU_CONST_CACHE_GS_2 | ||
386 | 0x000289CC SQ_ALU_CONST_CACHE_GS_3 | ||
387 | 0x000289D0 SQ_ALU_CONST_CACHE_GS_4 | ||
388 | 0x000289D4 SQ_ALU_CONST_CACHE_GS_5 | ||
389 | 0x000289D8 SQ_ALU_CONST_CACHE_GS_6 | ||
390 | 0x000289DC SQ_ALU_CONST_CACHE_GS_7 | ||
391 | 0x000289E0 SQ_ALU_CONST_CACHE_GS_8 | ||
392 | 0x000289E4 SQ_ALU_CONST_CACHE_GS_9 | ||
393 | 0x000289E8 SQ_ALU_CONST_CACHE_GS_10 | ||
394 | 0x000289EC SQ_ALU_CONST_CACHE_GS_11 | ||
395 | 0x000289F0 SQ_ALU_CONST_CACHE_GS_12 | ||
396 | 0x000289F4 SQ_ALU_CONST_CACHE_GS_13 | ||
397 | 0x000289F8 SQ_ALU_CONST_CACHE_GS_14 | ||
398 | 0x000289FC SQ_ALU_CONST_CACHE_GS_15 | ||
399 | 0x00028940 SQ_ALU_CONST_CACHE_PS_0 | ||
400 | 0x00028944 SQ_ALU_CONST_CACHE_PS_1 | ||
401 | 0x00028948 SQ_ALU_CONST_CACHE_PS_2 | ||
402 | 0x0002894C SQ_ALU_CONST_CACHE_PS_3 | ||
403 | 0x00028950 SQ_ALU_CONST_CACHE_PS_4 | ||
404 | 0x00028954 SQ_ALU_CONST_CACHE_PS_5 | ||
405 | 0x00028958 SQ_ALU_CONST_CACHE_PS_6 | ||
406 | 0x0002895C SQ_ALU_CONST_CACHE_PS_7 | ||
407 | 0x00028960 SQ_ALU_CONST_CACHE_PS_8 | ||
408 | 0x00028964 SQ_ALU_CONST_CACHE_PS_9 | ||
409 | 0x00028968 SQ_ALU_CONST_CACHE_PS_10 | ||
410 | 0x0002896C SQ_ALU_CONST_CACHE_PS_11 | ||
411 | 0x00028970 SQ_ALU_CONST_CACHE_PS_12 | ||
412 | 0x00028974 SQ_ALU_CONST_CACHE_PS_13 | ||
413 | 0x00028978 SQ_ALU_CONST_CACHE_PS_14 | ||
414 | 0x0002897C SQ_ALU_CONST_CACHE_PS_15 | ||
415 | 0x00028980 SQ_ALU_CONST_CACHE_VS_0 | ||
416 | 0x00028984 SQ_ALU_CONST_CACHE_VS_1 | ||
417 | 0x00028988 SQ_ALU_CONST_CACHE_VS_2 | ||
418 | 0x0002898C SQ_ALU_CONST_CACHE_VS_3 | ||
419 | 0x00028990 SQ_ALU_CONST_CACHE_VS_4 | ||
420 | 0x00028994 SQ_ALU_CONST_CACHE_VS_5 | ||
421 | 0x00028998 SQ_ALU_CONST_CACHE_VS_6 | ||
422 | 0x0002899C SQ_ALU_CONST_CACHE_VS_7 | ||
423 | 0x000289A0 SQ_ALU_CONST_CACHE_VS_8 | ||
424 | 0x000289A4 SQ_ALU_CONST_CACHE_VS_9 | ||
425 | 0x000289A8 SQ_ALU_CONST_CACHE_VS_10 | ||
426 | 0x000289AC SQ_ALU_CONST_CACHE_VS_11 | ||
427 | 0x000289B0 SQ_ALU_CONST_CACHE_VS_12 | ||
428 | 0x000289B4 SQ_ALU_CONST_CACHE_VS_13 | ||
429 | 0x000289B8 SQ_ALU_CONST_CACHE_VS_14 | ||
430 | 0x000289BC SQ_ALU_CONST_CACHE_VS_15 | ||
431 | 0x000288D8 SQ_PGM_CF_OFFSET_ES | 366 | 0x000288D8 SQ_PGM_CF_OFFSET_ES |
432 | 0x000288DC SQ_PGM_CF_OFFSET_FS | 367 | 0x000288DC SQ_PGM_CF_OFFSET_FS |
433 | 0x000288D4 SQ_PGM_CF_OFFSET_GS | 368 | 0x000288D4 SQ_PGM_CF_OFFSET_GS |
@@ -494,12 +429,7 @@ r600 0x9400 | |||
494 | 0x00028438 SX_ALPHA_REF | 429 | 0x00028438 SX_ALPHA_REF |
495 | 0x00028410 SX_ALPHA_TEST_CONTROL | 430 | 0x00028410 SX_ALPHA_TEST_CONTROL |
496 | 0x00028350 SX_MISC | 431 | 0x00028350 SX_MISC |
497 | 0x0000A020 SMX_DC_CTL0 | ||
498 | 0x0000A024 SMX_DC_CTL1 | ||
499 | 0x0000A028 SMX_DC_CTL2 | ||
500 | 0x00009608 TC_CNTL | ||
501 | 0x00009604 TC_INVALIDATE | 432 | 0x00009604 TC_INVALIDATE |
502 | 0x00009490 TD_CNTL | ||
503 | 0x00009400 TD_FILTER4 | 433 | 0x00009400 TD_FILTER4 |
504 | 0x00009404 TD_FILTER4_1 | 434 | 0x00009404 TD_FILTER4_1 |
505 | 0x00009408 TD_FILTER4_2 | 435 | 0x00009408 TD_FILTER4_2 |
@@ -824,14 +754,9 @@ r600 0x9400 | |||
824 | 0x00028428 CB_FOG_GREEN | 754 | 0x00028428 CB_FOG_GREEN |
825 | 0x00028424 CB_FOG_RED | 755 | 0x00028424 CB_FOG_RED |
826 | 0x00008040 WAIT_UNTIL | 756 | 0x00008040 WAIT_UNTIL |
827 | 0x00008950 CC_GC_SHADER_PIPE_CONFIG | ||
828 | 0x00008954 GC_USER_SHADER_PIPE_CONFIG | ||
829 | 0x00009714 VC_ENHANCE | 757 | 0x00009714 VC_ENHANCE |
830 | 0x00009830 DB_DEBUG | 758 | 0x00009830 DB_DEBUG |
831 | 0x00009838 DB_WATERMARKS | 759 | 0x00009838 DB_WATERMARKS |
832 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 | 760 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 |
833 | 0x00028D44 DB_ALPHA_TO_MASK | 761 | 0x00028D44 DB_ALPHA_TO_MASK |
834 | 0x00009504 TA_CNTL | ||
835 | 0x00009700 VC_CNTL | 762 | 0x00009700 VC_CNTL |
836 | 0x00009718 VC_CONFIG | ||
837 | 0x0000A02C SMX_DC_MC_INTF_CTL | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 626d51891ee9..626aaf082b1a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "rs400d.h" | 32 | #include "rs400d.h" |
32 | 33 | ||
33 | /* This files gather functions specifics to : rs400,rs480 */ | 34 | /* This files gather functions specifics to : rs400,rs480 */ |
@@ -202,9 +203,9 @@ void rs400_gart_disable(struct radeon_device *rdev) | |||
202 | 203 | ||
203 | void rs400_gart_fini(struct radeon_device *rdev) | 204 | void rs400_gart_fini(struct radeon_device *rdev) |
204 | { | 205 | { |
206 | radeon_gart_fini(rdev); | ||
205 | rs400_gart_disable(rdev); | 207 | rs400_gart_disable(rdev); |
206 | radeon_gart_table_ram_free(rdev); | 208 | radeon_gart_table_ram_free(rdev); |
207 | radeon_gart_fini(rdev); | ||
208 | } | 209 | } |
209 | 210 | ||
210 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 211 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
@@ -264,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev) | |||
264 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 265 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
265 | radeon_vram_location(rdev, &rdev->mc, base); | 266 | radeon_vram_location(rdev, &rdev->mc, base); |
266 | radeon_gtt_location(rdev, &rdev->mc); | 267 | radeon_gtt_location(rdev, &rdev->mc); |
268 | radeon_update_bandwidth_info(rdev); | ||
267 | } | 269 | } |
268 | 270 | ||
269 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 271 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -388,6 +390,8 @@ static int rs400_startup(struct radeon_device *rdev) | |||
388 | { | 390 | { |
389 | int r; | 391 | int r; |
390 | 392 | ||
393 | r100_set_common_regs(rdev); | ||
394 | |||
391 | rs400_mc_program(rdev); | 395 | rs400_mc_program(rdev); |
392 | /* Resume clock */ | 396 | /* Resume clock */ |
393 | r300_clock_startup(rdev); | 397 | r300_clock_startup(rdev); |
@@ -453,6 +457,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
453 | 457 | ||
454 | void rs400_fini(struct radeon_device *rdev) | 458 | void rs400_fini(struct radeon_device *rdev) |
455 | { | 459 | { |
460 | radeon_pm_fini(rdev); | ||
456 | r100_cp_fini(rdev); | 461 | r100_cp_fini(rdev); |
457 | r100_wb_fini(rdev); | 462 | r100_wb_fini(rdev); |
458 | r100_ib_fini(rdev); | 463 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 47f046b78c6b..abf824c2123d 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | #include "drmP.h" | 38 | #include "drmP.h" |
39 | #include "radeon.h" | 39 | #include "radeon.h" |
40 | #include "radeon_asic.h" | ||
40 | #include "atom.h" | 41 | #include "atom.h" |
41 | #include "rs600d.h" | 42 | #include "rs600d.h" |
42 | 43 | ||
@@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev) | |||
267 | 268 | ||
268 | void rs600_gart_fini(struct radeon_device *rdev) | 269 | void rs600_gart_fini(struct radeon_device *rdev) |
269 | { | 270 | { |
271 | radeon_gart_fini(rdev); | ||
270 | rs600_gart_disable(rdev); | 272 | rs600_gart_disable(rdev); |
271 | radeon_gart_table_vram_free(rdev); | 273 | radeon_gart_table_vram_free(rdev); |
272 | radeon_gart_fini(rdev); | ||
273 | } | 274 | } |
274 | 275 | ||
275 | #define R600_PTE_VALID (1 << 0) | 276 | #define R600_PTE_VALID (1 << 0) |
@@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
392 | /* Vertical blank interrupts */ | 393 | /* Vertical blank interrupts */ |
393 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 394 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { |
394 | drm_handle_vblank(rdev->ddev, 0); | 395 | drm_handle_vblank(rdev->ddev, 0); |
396 | rdev->pm.vblank_sync = true; | ||
395 | wake_up(&rdev->irq.vblank_queue); | 397 | wake_up(&rdev->irq.vblank_queue); |
396 | } | 398 | } |
397 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 399 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { |
398 | drm_handle_vblank(rdev->ddev, 1); | 400 | drm_handle_vblank(rdev->ddev, 1); |
401 | rdev->pm.vblank_sync = true; | ||
399 | wake_up(&rdev->irq.vblank_queue); | 402 | wake_up(&rdev->irq.vblank_queue); |
400 | } | 403 | } |
401 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 404 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { |
@@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
472 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 475 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
473 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 476 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
474 | base = G_000004_MC_FB_START(base) << 16; | 477 | base = G_000004_MC_FB_START(base) << 16; |
478 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
475 | radeon_vram_location(rdev, &rdev->mc, base); | 479 | radeon_vram_location(rdev, &rdev->mc, base); |
476 | radeon_gtt_location(rdev, &rdev->mc); | 480 | radeon_gtt_location(rdev, &rdev->mc); |
481 | radeon_update_bandwidth_info(rdev); | ||
477 | } | 482 | } |
478 | 483 | ||
479 | void rs600_bandwidth_update(struct radeon_device *rdev) | 484 | void rs600_bandwidth_update(struct radeon_device *rdev) |
480 | { | 485 | { |
481 | /* FIXME: implement, should this be like rs690 ? */ | 486 | struct drm_display_mode *mode0 = NULL; |
487 | struct drm_display_mode *mode1 = NULL; | ||
488 | u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; | ||
489 | /* FIXME: implement full support */ | ||
490 | |||
491 | radeon_update_display_priority(rdev); | ||
492 | |||
493 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
494 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
495 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
496 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
497 | |||
498 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
499 | |||
500 | if (rdev->disp_priority == 2) { | ||
501 | d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); | ||
502 | d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); | ||
503 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | ||
504 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
505 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
506 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
507 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
508 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
509 | } | ||
482 | } | 510 | } |
483 | 511 | ||
484 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 512 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
598 | 626 | ||
599 | void rs600_fini(struct radeon_device *rdev) | 627 | void rs600_fini(struct radeon_device *rdev) |
600 | { | 628 | { |
629 | radeon_pm_fini(rdev); | ||
601 | r100_cp_fini(rdev); | 630 | r100_cp_fini(rdev); |
602 | r100_wb_fini(rdev); | 631 | r100_wb_fini(rdev); |
603 | r100_ib_fini(rdev); | 632 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index c1c8f5885cbb..e52d2695510b 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
@@ -535,4 +535,57 @@ | |||
535 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) | 535 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) |
536 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF | 536 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF |
537 | 537 | ||
538 | #define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 | ||
539 | #define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
540 | #define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
541 | #define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
542 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
543 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
544 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
545 | #define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
546 | #define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
547 | #define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
548 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
549 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
550 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
551 | #define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C | ||
552 | #define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
553 | #define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
554 | #define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
555 | #define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
556 | #define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
557 | #define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
558 | #define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
559 | #define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
560 | #define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
561 | #define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
562 | #define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
563 | #define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
564 | #define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 | ||
565 | #define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
566 | #define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
567 | #define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
568 | #define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
569 | #define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
570 | #define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
571 | #define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
572 | #define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
573 | #define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
574 | #define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
575 | #define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
576 | #define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
577 | #define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C | ||
578 | #define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
579 | #define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
580 | #define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
581 | #define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
582 | #define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
583 | #define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
584 | #define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
585 | #define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
586 | #define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
587 | #define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
588 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
589 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
590 | |||
538 | #endif | 591 | #endif |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 83b9174f76f2..bbf3da790fd5 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include "rs690d.h" | 32 | #include "rs690d.h" |
32 | 33 | ||
@@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev) | |||
57 | } | 58 | } |
58 | } | 59 | } |
59 | 60 | ||
61 | union igp_info { | ||
62 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
63 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; | ||
64 | }; | ||
65 | |||
60 | void rs690_pm_info(struct radeon_device *rdev) | 66 | void rs690_pm_info(struct radeon_device *rdev) |
61 | { | 67 | { |
62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 68 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
63 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | 69 | union igp_info *info; |
64 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | ||
65 | void *ptr; | ||
66 | uint16_t data_offset; | 70 | uint16_t data_offset; |
67 | uint8_t frev, crev; | 71 | uint8_t frev, crev; |
68 | fixed20_12 tmp; | 72 | fixed20_12 tmp; |
69 | 73 | ||
70 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | 74 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
71 | &frev, &crev, &data_offset); | 75 | &frev, &crev, &data_offset)) { |
72 | ptr = rdev->mode_info.atom_context->bios + data_offset; | 76 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); |
73 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | 77 | |
74 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | 78 | /* Get various system informations from bios */ |
75 | /* Get various system informations from bios */ | 79 | switch (crev) { |
76 | switch (crev) { | 80 | case 1: |
77 | case 1: | 81 | tmp.full = rfixed_const(100); |
78 | tmp.full = rfixed_const(100); | 82 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); |
79 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | 83 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
80 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 84 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
81 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | 85 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); |
82 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | 86 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); |
83 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | 87 | break; |
84 | break; | 88 | case 2: |
85 | case 2: | 89 | tmp.full = rfixed_const(100); |
86 | tmp.full = rfixed_const(100); | 90 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); |
87 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | 91 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
88 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 92 | rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); |
89 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | 93 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); |
90 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 94 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); |
91 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | 95 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
92 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 96 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
93 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | 97 | break; |
94 | break; | 98 | default: |
95 | default: | 99 | tmp.full = rfixed_const(100); |
100 | /* We assume the slower possible clock ie worst case */ | ||
101 | /* DDR 333Mhz */ | ||
102 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | ||
103 | /* FIXME: system clock ? */ | ||
104 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | ||
105 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
106 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | ||
107 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | ||
108 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | ||
109 | break; | ||
110 | } | ||
111 | } else { | ||
96 | tmp.full = rfixed_const(100); | 112 | tmp.full = rfixed_const(100); |
97 | /* We assume the slower possible clock ie worst case */ | 113 | /* We assume the slower possible clock ie worst case */ |
98 | /* DDR 333Mhz */ | 114 | /* DDR 333Mhz */ |
@@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
103 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 119 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); |
104 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 120 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); |
105 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 121 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
106 | break; | ||
107 | } | 122 | } |
108 | /* Compute various bandwidth */ | 123 | /* Compute various bandwidth */ |
109 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | 124 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
@@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
131 | 146 | ||
132 | void rs690_mc_init(struct radeon_device *rdev) | 147 | void rs690_mc_init(struct radeon_device *rdev) |
133 | { | 148 | { |
134 | fixed20_12 a; | ||
135 | u64 base; | 149 | u64 base; |
136 | 150 | ||
137 | rs400_gart_adjust_size(rdev); | 151 | rs400_gart_adjust_size(rdev); |
@@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
145 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 159 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
146 | base = G_000100_MC_FB_START(base) << 16; | 160 | base = G_000100_MC_FB_START(base) << 16; |
147 | rs690_pm_info(rdev); | 161 | rs690_pm_info(rdev); |
148 | /* FIXME: we should enforce default clock in case GPU is not in | ||
149 | * default setup | ||
150 | */ | ||
151 | a.full = rfixed_const(100); | ||
152 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
153 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
154 | a.full = rfixed_const(16); | ||
155 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
156 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
157 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
158 | radeon_vram_location(rdev, &rdev->mc, base); | 163 | radeon_vram_location(rdev, &rdev->mc, base); |
159 | radeon_gtt_location(rdev, &rdev->mc); | 164 | radeon_gtt_location(rdev, &rdev->mc); |
165 | radeon_update_bandwidth_info(rdev); | ||
160 | } | 166 | } |
161 | 167 | ||
162 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 168 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
@@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
394 | struct drm_display_mode *mode1 = NULL; | 400 | struct drm_display_mode *mode1 = NULL; |
395 | struct rs690_watermark wm0; | 401 | struct rs690_watermark wm0; |
396 | struct rs690_watermark wm1; | 402 | struct rs690_watermark wm1; |
397 | u32 tmp; | 403 | u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
398 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | 404 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
399 | fixed20_12 a, b; | 405 | fixed20_12 a, b; |
400 | 406 | ||
407 | radeon_update_display_priority(rdev); | ||
408 | |||
401 | if (rdev->mode_info.crtcs[0]->base.enabled) | 409 | if (rdev->mode_info.crtcs[0]->base.enabled) |
402 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 410 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
403 | if (rdev->mode_info.crtcs[1]->base.enabled) | 411 | if (rdev->mode_info.crtcs[1]->base.enabled) |
@@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
407 | * modes if the user specifies HIGH for displaypriority | 415 | * modes if the user specifies HIGH for displaypriority |
408 | * option. | 416 | * option. |
409 | */ | 417 | */ |
410 | if (rdev->disp_priority == 2) { | 418 | if ((rdev->disp_priority == 2) && |
419 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { | ||
411 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); | 420 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
412 | tmp &= C_000104_MC_DISP0R_INIT_LAT; | 421 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
413 | tmp &= C_000104_MC_DISP1R_INIT_LAT; | 422 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
@@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
482 | priority_mark12.full = 0; | 491 | priority_mark12.full = 0; |
483 | if (wm1.priority_mark_max.full > priority_mark12.full) | 492 | if (wm1.priority_mark_max.full > priority_mark12.full) |
484 | priority_mark12.full = wm1.priority_mark_max.full; | 493 | priority_mark12.full = wm1.priority_mark_max.full; |
485 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 494 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
486 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 495 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); |
487 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 496 | if (rdev->disp_priority == 2) { |
488 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 497 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
498 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
499 | } | ||
500 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
501 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
502 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
503 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
489 | } else if (mode0) { | 504 | } else if (mode0) { |
490 | if (rfixed_trunc(wm0.dbpp) > 64) | 505 | if (rfixed_trunc(wm0.dbpp) > 64) |
491 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 506 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
@@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
512 | priority_mark02.full = 0; | 527 | priority_mark02.full = 0; |
513 | if (wm0.priority_mark_max.full > priority_mark02.full) | 528 | if (wm0.priority_mark_max.full > priority_mark02.full) |
514 | priority_mark02.full = wm0.priority_mark_max.full; | 529 | priority_mark02.full = wm0.priority_mark_max.full; |
515 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 530 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
516 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 531 | if (rdev->disp_priority == 2) |
532 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | ||
533 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
534 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
517 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, | 535 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, |
518 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); | 536 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); |
519 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | 537 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, |
@@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
544 | priority_mark12.full = 0; | 562 | priority_mark12.full = 0; |
545 | if (wm1.priority_mark_max.full > priority_mark12.full) | 563 | if (wm1.priority_mark_max.full > priority_mark12.full) |
546 | priority_mark12.full = wm1.priority_mark_max.full; | 564 | priority_mark12.full = wm1.priority_mark_max.full; |
565 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | ||
566 | if (rdev->disp_priority == 2) | ||
567 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
547 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, | 568 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
548 | S_006548_D1MODE_PRIORITY_A_OFF(1)); | 569 | S_006548_D1MODE_PRIORITY_A_OFF(1)); |
549 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, | 570 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, |
550 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); | 571 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); |
551 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 572 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
552 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 573 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
553 | } | 574 | } |
554 | } | 575 | } |
555 | 576 | ||
@@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
657 | 678 | ||
658 | void rs690_fini(struct radeon_device *rdev) | 679 | void rs690_fini(struct radeon_device *rdev) |
659 | { | 680 | { |
681 | radeon_pm_fini(rdev); | ||
660 | r100_cp_fini(rdev); | 682 | r100_cp_fini(rdev); |
661 | r100_wb_fini(rdev); | 683 | r100_wb_fini(rdev); |
662 | r100_ib_fini(rdev); | 684 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h index 62d31e7a897f..36e6398a98ae 100644 --- a/drivers/gpu/drm/radeon/rs690d.h +++ b/drivers/gpu/drm/radeon/rs690d.h | |||
@@ -182,6 +182,9 @@ | |||
182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | 182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) |
183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | 183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) |
184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | 184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF |
185 | #define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
186 | #define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
187 | #define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
185 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | 188 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) |
186 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | 189 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) |
187 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | 190 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index bea747da123f..1cf233f7e516 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "rv515d.h" | 30 | #include "rv515d.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | #include "rv515_reg_safe.h" | 34 | #include "rv515_reg_safe.h" |
34 | 35 | ||
@@ -279,19 +280,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
279 | 280 | ||
280 | void rv515_mc_init(struct radeon_device *rdev) | 281 | void rv515_mc_init(struct radeon_device *rdev) |
281 | { | 282 | { |
282 | fixed20_12 a; | ||
283 | 283 | ||
284 | rv515_vram_get_type(rdev); | 284 | rv515_vram_get_type(rdev); |
285 | r100_vram_init_sizes(rdev); | 285 | r100_vram_init_sizes(rdev); |
286 | radeon_vram_location(rdev, &rdev->mc, 0); | 286 | radeon_vram_location(rdev, &rdev->mc, 0); |
287 | if (!(rdev->flags & RADEON_IS_AGP)) | 287 | if (!(rdev->flags & RADEON_IS_AGP)) |
288 | radeon_gtt_location(rdev, &rdev->mc); | 288 | radeon_gtt_location(rdev, &rdev->mc); |
289 | /* FIXME: we should enforce default clock in case GPU is not in | 289 | radeon_update_bandwidth_info(rdev); |
290 | * default setup | ||
291 | */ | ||
292 | a.full = rfixed_const(100); | ||
293 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
294 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
295 | } | 290 | } |
296 | 291 | ||
297 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 292 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -539,6 +534,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
539 | 534 | ||
540 | void rv515_fini(struct radeon_device *rdev) | 535 | void rv515_fini(struct radeon_device *rdev) |
541 | { | 536 | { |
537 | radeon_pm_fini(rdev); | ||
542 | r100_cp_fini(rdev); | 538 | r100_cp_fini(rdev); |
543 | r100_wb_fini(rdev); | 539 | r100_wb_fini(rdev); |
544 | r100_ib_fini(rdev); | 540 | r100_ib_fini(rdev); |
@@ -1020,7 +1016,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1020 | struct drm_display_mode *mode1 = NULL; | 1016 | struct drm_display_mode *mode1 = NULL; |
1021 | struct rv515_watermark wm0; | 1017 | struct rv515_watermark wm0; |
1022 | struct rv515_watermark wm1; | 1018 | struct rv515_watermark wm1; |
1023 | u32 tmp; | 1019 | u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
1024 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | 1020 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
1025 | fixed20_12 a, b; | 1021 | fixed20_12 a, b; |
1026 | 1022 | ||
@@ -1088,10 +1084,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1088 | priority_mark12.full = 0; | 1084 | priority_mark12.full = 0; |
1089 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1085 | if (wm1.priority_mark_max.full > priority_mark12.full) |
1090 | priority_mark12.full = wm1.priority_mark_max.full; | 1086 | priority_mark12.full = wm1.priority_mark_max.full; |
1091 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 1087 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
1092 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 1088 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); |
1093 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 1089 | if (rdev->disp_priority == 2) { |
1094 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 1090 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
1091 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1092 | } | ||
1093 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
1094 | WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
1095 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
1096 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
1095 | } else if (mode0) { | 1097 | } else if (mode0) { |
1096 | if (rfixed_trunc(wm0.dbpp) > 64) | 1098 | if (rfixed_trunc(wm0.dbpp) > 64) |
1097 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 1099 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); |
@@ -1118,8 +1120,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1118 | priority_mark02.full = 0; | 1120 | priority_mark02.full = 0; |
1119 | if (wm0.priority_mark_max.full > priority_mark02.full) | 1121 | if (wm0.priority_mark_max.full > priority_mark02.full) |
1120 | priority_mark02.full = wm0.priority_mark_max.full; | 1122 | priority_mark02.full = wm0.priority_mark_max.full; |
1121 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 1123 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
1122 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 1124 | if (rdev->disp_priority == 2) |
1125 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1126 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
1127 | WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
1123 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1128 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
1124 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1129 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
1125 | } else { | 1130 | } else { |
@@ -1148,10 +1153,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1148 | priority_mark12.full = 0; | 1153 | priority_mark12.full = 0; |
1149 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1154 | if (wm1.priority_mark_max.full > priority_mark12.full) |
1150 | priority_mark12.full = wm1.priority_mark_max.full; | 1155 | priority_mark12.full = wm1.priority_mark_max.full; |
1156 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | ||
1157 | if (rdev->disp_priority == 2) | ||
1158 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1151 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1159 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
1152 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1160 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
1153 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 1161 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
1154 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 1162 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
1155 | } | 1163 | } |
1156 | } | 1164 | } |
1157 | 1165 | ||
@@ -1161,6 +1169,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) | |||
1161 | struct drm_display_mode *mode0 = NULL; | 1169 | struct drm_display_mode *mode0 = NULL; |
1162 | struct drm_display_mode *mode1 = NULL; | 1170 | struct drm_display_mode *mode1 = NULL; |
1163 | 1171 | ||
1172 | radeon_update_display_priority(rdev); | ||
1173 | |||
1164 | if (rdev->mode_info.crtcs[0]->base.enabled) | 1174 | if (rdev->mode_info.crtcs[0]->base.enabled) |
1165 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 1175 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
1166 | if (rdev->mode_info.crtcs[1]->base.enabled) | 1176 | if (rdev->mode_info.crtcs[1]->base.enabled) |
@@ -1170,7 +1180,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) | |||
1170 | * modes if the user specifies HIGH for displaypriority | 1180 | * modes if the user specifies HIGH for displaypriority |
1171 | * option. | 1181 | * option. |
1172 | */ | 1182 | */ |
1173 | if (rdev->disp_priority == 2) { | 1183 | if ((rdev->disp_priority == 2) && |
1184 | (rdev->family == CHIP_RV515)) { | ||
1174 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); | 1185 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); |
1175 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | 1186 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; |
1176 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | 1187 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 37887dee12af..9f37d2efb0a9 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
30 | #include "drmP.h" | 30 | #include "drmP.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
33 | #include "rv770d.h" | 34 | #include "rv770d.h" |
34 | #include "atom.h" | 35 | #include "atom.h" |
@@ -125,9 +126,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) | |||
125 | 126 | ||
126 | void rv770_pcie_gart_fini(struct radeon_device *rdev) | 127 | void rv770_pcie_gart_fini(struct radeon_device *rdev) |
127 | { | 128 | { |
129 | radeon_gart_fini(rdev); | ||
128 | rv770_pcie_gart_disable(rdev); | 130 | rv770_pcie_gart_disable(rdev); |
129 | radeon_gart_table_vram_free(rdev); | 131 | radeon_gart_table_vram_free(rdev); |
130 | radeon_gart_fini(rdev); | ||
131 | } | 132 | } |
132 | 133 | ||
133 | 134 | ||
@@ -647,10 +648,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
647 | 648 | ||
648 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 649 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
649 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 650 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
651 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
650 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 652 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
651 | 653 | ||
652 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | 654 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
653 | WREG32(CGTS_TCC_DISABLE, 0); | 655 | WREG32(CGTS_TCC_DISABLE, 0); |
656 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); | ||
657 | WREG32(CGTS_USER_TCC_DISABLE, 0); | ||
654 | 658 | ||
655 | num_qd_pipes = | 659 | num_qd_pipes = |
656 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 660 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
@@ -864,7 +868,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
864 | 868 | ||
865 | int rv770_mc_init(struct radeon_device *rdev) | 869 | int rv770_mc_init(struct radeon_device *rdev) |
866 | { | 870 | { |
867 | fixed20_12 a; | ||
868 | u32 tmp; | 871 | u32 tmp; |
869 | int chansize, numchan; | 872 | int chansize, numchan; |
870 | 873 | ||
@@ -908,12 +911,8 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
908 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 911 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
909 | } | 912 | } |
910 | r600_vram_gtt_location(rdev, &rdev->mc); | 913 | r600_vram_gtt_location(rdev, &rdev->mc); |
911 | /* FIXME: we should enforce default clock in case GPU is not in | 914 | radeon_update_bandwidth_info(rdev); |
912 | * default setup | 915 | |
913 | */ | ||
914 | a.full = rfixed_const(100); | ||
915 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
916 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
917 | return 0; | 916 | return 0; |
918 | } | 917 | } |
919 | 918 | ||
@@ -1013,6 +1012,13 @@ int rv770_resume(struct radeon_device *rdev) | |||
1013 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1012 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1014 | return r; | 1013 | return r; |
1015 | } | 1014 | } |
1015 | |||
1016 | r = r600_audio_init(rdev); | ||
1017 | if (r) { | ||
1018 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1019 | return r; | ||
1020 | } | ||
1021 | |||
1016 | return r; | 1022 | return r; |
1017 | 1023 | ||
1018 | } | 1024 | } |
@@ -1021,6 +1027,7 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1021 | { | 1027 | { |
1022 | int r; | 1028 | int r; |
1023 | 1029 | ||
1030 | r600_audio_fini(rdev); | ||
1024 | /* FIXME: we should wait for ring to be empty */ | 1031 | /* FIXME: we should wait for ring to be empty */ |
1025 | r700_cp_stop(rdev); | 1032 | r700_cp_stop(rdev); |
1026 | rdev->cp.ready = false; | 1033 | rdev->cp.ready = false; |
@@ -1144,11 +1151,19 @@ int rv770_init(struct radeon_device *rdev) | |||
1144 | } | 1151 | } |
1145 | } | 1152 | } |
1146 | } | 1153 | } |
1154 | |||
1155 | r = r600_audio_init(rdev); | ||
1156 | if (r) { | ||
1157 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1158 | return r; | ||
1159 | } | ||
1160 | |||
1147 | return 0; | 1161 | return 0; |
1148 | } | 1162 | } |
1149 | 1163 | ||
1150 | void rv770_fini(struct radeon_device *rdev) | 1164 | void rv770_fini(struct radeon_device *rdev) |
1151 | { | 1165 | { |
1166 | radeon_pm_fini(rdev); | ||
1152 | r600_blit_fini(rdev); | 1167 | r600_blit_fini(rdev); |
1153 | r600_cp_fini(rdev); | 1168 | r600_cp_fini(rdev); |
1154 | r600_wb_fini(rdev); | 1169 | r600_wb_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 89c38c49066f..dd47b2a9a791 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) | |||
1425 | 1425 | ||
1426 | atomic_set(&glob->bo_count, 0); | 1426 | atomic_set(&glob->bo_count, 0); |
1427 | 1427 | ||
1428 | kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); | 1428 | ret = kobject_init_and_add( |
1429 | ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); | 1429 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
1430 | if (unlikely(ret != 0)) | 1430 | if (unlikely(ret != 0)) |
1431 | kobject_put(&glob->kobj); | 1431 | kobject_put(&glob->kobj); |
1432 | return ret; | 1432 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index eb143e04d402..c40e5f48e9a1 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -260,8 +260,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
260 | zone->used_mem = 0; | 260 | zone->used_mem = 0; |
261 | zone->glob = glob; | 261 | zone->glob = glob; |
262 | glob->zone_kernel = zone; | 262 | glob->zone_kernel = zone; |
263 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 263 | ret = kobject_init_and_add( |
264 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 264 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
265 | if (unlikely(ret != 0)) { | 265 | if (unlikely(ret != 0)) { |
266 | kobject_put(&zone->kobj); | 266 | kobject_put(&zone->kobj); |
267 | return ret; | 267 | return ret; |
@@ -296,8 +296,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | |||
296 | zone->used_mem = 0; | 296 | zone->used_mem = 0; |
297 | zone->glob = glob; | 297 | zone->glob = glob; |
298 | glob->zone_highmem = zone; | 298 | glob->zone_highmem = zone; |
299 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 299 | ret = kobject_init_and_add( |
300 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 300 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
301 | if (unlikely(ret != 0)) { | 301 | if (unlikely(ret != 0)) { |
302 | kobject_put(&zone->kobj); | 302 | kobject_put(&zone->kobj); |
303 | return ret; | 303 | return ret; |
@@ -343,8 +343,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, | |||
343 | zone->used_mem = 0; | 343 | zone->used_mem = 0; |
344 | zone->glob = glob; | 344 | zone->glob = glob; |
345 | glob->zone_dma32 = zone; | 345 | glob->zone_dma32 = zone; |
346 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 346 | ret = kobject_init_and_add( |
347 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 347 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
348 | if (unlikely(ret != 0)) { | 348 | if (unlikely(ret != 0)) { |
349 | kobject_put(&zone->kobj); | 349 | kobject_put(&zone->kobj); |
350 | return ret; | 350 | return ret; |
@@ -365,10 +365,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) | |||
365 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); | 365 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); |
366 | INIT_WORK(&glob->work, ttm_shrink_work); | 366 | INIT_WORK(&glob->work, ttm_shrink_work); |
367 | init_waitqueue_head(&glob->queue); | 367 | init_waitqueue_head(&glob->queue); |
368 | kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); | 368 | ret = kobject_init_and_add( |
369 | ret = kobject_add(&glob->kobj, | 369 | &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); |
370 | ttm_get_kobj(), | ||
371 | "memory_accounting"); | ||
372 | if (unlikely(ret != 0)) { | 370 | if (unlikely(ret != 0)) { |
373 | kobject_put(&glob->kobj); | 371 | kobject_put(&glob->kobj); |
374 | return ret; | 372 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a759170763bb..bab6cd8d8a1e 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,13 +28,13 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
33 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
34 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
35 | #include <linux/file.h> | 34 | #include <linux/file.h> |
36 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
37 | #include "drm_cache.h" | 36 | #include "drm_cache.h" |
37 | #include "drm_mem_util.h" | ||
38 | #include "ttm/ttm_module.h" | 38 | #include "ttm/ttm_module.h" |
39 | #include "ttm/ttm_bo_driver.h" | 39 | #include "ttm/ttm_bo_driver.h" |
40 | #include "ttm/ttm_placement.h" | 40 | #include "ttm/ttm_placement.h" |
@@ -43,32 +43,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
43 | 43 | ||
44 | /** | 44 | /** |
45 | * Allocates storage for pointers to the pages that back the ttm. | 45 | * Allocates storage for pointers to the pages that back the ttm. |
46 | * | ||
47 | * Uses kmalloc if possible. Otherwise falls back to vmalloc. | ||
48 | */ | 46 | */ |
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 47 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
50 | { | 48 | { |
51 | unsigned long size = ttm->num_pages * sizeof(*ttm->pages); | 49 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
52 | ttm->pages = NULL; | ||
53 | |||
54 | if (size <= PAGE_SIZE) | ||
55 | ttm->pages = kzalloc(size, GFP_KERNEL); | ||
56 | |||
57 | if (!ttm->pages) { | ||
58 | ttm->pages = vmalloc_user(size); | ||
59 | if (ttm->pages) | ||
60 | ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; | ||
61 | } | ||
62 | } | 50 | } |
63 | 51 | ||
64 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 52 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
65 | { | 53 | { |
66 | if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { | 54 | drm_free_large(ttm->pages); |
67 | vfree(ttm->pages); | ||
68 | ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; | ||
69 | } else { | ||
70 | kfree(ttm->pages); | ||
71 | } | ||
72 | ttm->pages = NULL; | 55 | ttm->pages = NULL; |
73 | } | 56 | } |
74 | 57 | ||
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index f20b8bcbef39..30ad13344f7b 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_VMWGFX | 1 | config DRM_VMWGFX |
2 | tristate "DRM driver for VMware Virtual GPU" | 2 | tristate "DRM driver for VMware Virtual GPU" |
3 | depends on DRM && PCI | 3 | depends on DRM && PCI && FB |
4 | select FB_DEFERRED_IO | 4 | select FB_DEFERRED_IO |
5 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
6 | select FB_CFB_COPYAREA | 6 | select FB_CFB_COPYAREA |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 4a3c4e441027..de2f82efb15f 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1545,39 +1545,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) | |||
1545 | { | 1545 | { |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | 1548 | #include "drm_mem_util.h" | |
1549 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) | ||
1550 | { | ||
1551 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
1552 | return NULL; | ||
1553 | |||
1554 | if (size * nmemb <= PAGE_SIZE) | ||
1555 | return kcalloc(nmemb, size, GFP_KERNEL); | ||
1556 | |||
1557 | return __vmalloc(size * nmemb, | ||
1558 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
1559 | } | ||
1560 | |||
1561 | /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ | ||
1562 | static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) | ||
1563 | { | ||
1564 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
1565 | return NULL; | ||
1566 | |||
1567 | if (size * nmemb <= PAGE_SIZE) | ||
1568 | return kmalloc(nmemb * size, GFP_KERNEL); | ||
1569 | |||
1570 | return __vmalloc(size * nmemb, | ||
1571 | GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | ||
1572 | } | ||
1573 | |||
1574 | static __inline void drm_free_large(void *ptr) | ||
1575 | { | ||
1576 | if (!is_vmalloc_addr(ptr)) | ||
1577 | return kfree(ptr); | ||
1578 | |||
1579 | vfree(ptr); | ||
1580 | } | ||
1581 | /*@}*/ | 1549 | /*@}*/ |
1582 | 1550 | ||
1583 | #endif /* __KERNEL__ */ | 1551 | #endif /* __KERNEL__ */ |
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h new file mode 100644 index 000000000000..6bd325fedc87 --- /dev/null +++ b/include/drm/drm_mem_util.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Jesse Barnes <jbarnes@virtuousgeek.org> | ||
25 | * | ||
26 | */ | ||
27 | #ifndef _DRM_MEM_UTIL_H_ | ||
28 | #define _DRM_MEM_UTIL_H_ | ||
29 | |||
30 | #include <linux/vmalloc.h> | ||
31 | |||
32 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) | ||
33 | { | ||
34 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
35 | return NULL; | ||
36 | |||
37 | if (size * nmemb <= PAGE_SIZE) | ||
38 | return kcalloc(nmemb, size, GFP_KERNEL); | ||
39 | |||
40 | return __vmalloc(size * nmemb, | ||
41 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
42 | } | ||
43 | |||
44 | /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ | ||
45 | static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) | ||
46 | { | ||
47 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
48 | return NULL; | ||
49 | |||
50 | if (size * nmemb <= PAGE_SIZE) | ||
51 | return kmalloc(nmemb * size, GFP_KERNEL); | ||
52 | |||
53 | return __vmalloc(size * nmemb, | ||
54 | GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | ||
55 | } | ||
56 | |||
57 | static __inline void drm_free_large(void *ptr) | ||
58 | { | ||
59 | if (!is_vmalloc_addr(ptr)) | ||
60 | return kfree(ptr); | ||
61 | |||
62 | vfree(ptr); | ||
63 | } | ||
64 | |||
65 | #endif | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 676104b7818c..04a6ebc27b96 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -410,6 +410,7 @@ | |||
410 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 410 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
411 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 411 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
412 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 412 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
413 | {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
413 | {0, 0, 0} | 414 | {0, 0, 0} |
414 | 415 | ||
415 | #define r128_PCI_IDS \ | 416 | #define r128_PCI_IDS \ |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3f1b4a4b601..e929c27ede22 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -115,7 +115,6 @@ struct ttm_backend { | |||
115 | struct ttm_backend_func *func; | 115 | struct ttm_backend_func *func; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | #define TTM_PAGE_FLAG_VMALLOC (1 << 0) | ||
119 | #define TTM_PAGE_FLAG_USER (1 << 1) | 118 | #define TTM_PAGE_FLAG_USER (1 << 1) |
120 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) | 119 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) |
121 | #define TTM_PAGE_FLAG_WRITE (1 << 3) | 120 | #define TTM_PAGE_FLAG_WRITE (1 << 3) |