aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_mm.c108
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c19
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c112
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c96
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c90
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c62
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c540
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c4
-rw-r--r--include/drm/drm_mm.h35
-rw-r--r--include/drm/ttm/ttm_bo_api.h56
-rw-r--r--include/drm/ttm/ttm_bo_driver.h28
32 files changed, 849 insertions, 512 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3963b3c1081a..4231d6db72ec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
1020{ 1020{
1021 int count = 0; 1021 int count = 0;
1022 1022
1023 /* disable all the possible outputs/crtcs before entering KMS mode */
1024 drm_helper_disable_unused_functions(dev);
1025
1023 drm_fb_helper_parse_command_line(dev); 1026 drm_fb_helper_parse_command_line(dev);
1024 1027
1025 count = drm_helper_probe_connector_modes(dev, 1028 count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 1f0d717dbad6..d7d7eac3ddd2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
226} 226}
227EXPORT_SYMBOL(drm_mm_get_block_generic); 227EXPORT_SYMBOL(drm_mm_get_block_generic);
228 228
229struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
230 unsigned long size,
231 unsigned alignment,
232 unsigned long start,
233 unsigned long end,
234 int atomic)
235{
236 struct drm_mm_node *align_splitoff = NULL;
237 unsigned tmp = 0;
238 unsigned wasted = 0;
239
240 if (node->start < start)
241 wasted += start - node->start;
242 if (alignment)
243 tmp = ((node->start + wasted) % alignment);
244
245 if (tmp)
246 wasted += alignment - tmp;
247 if (wasted) {
248 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
249 if (unlikely(align_splitoff == NULL))
250 return NULL;
251 }
252
253 if (node->size == size) {
254 list_del_init(&node->fl_entry);
255 node->free = 0;
256 } else {
257 node = drm_mm_split_at_start(node, size, atomic);
258 }
259
260 if (align_splitoff)
261 drm_mm_put_block(align_splitoff);
262
263 return node;
264}
265EXPORT_SYMBOL(drm_mm_get_block_range_generic);
266
229/* 267/*
230 * Put a block. Merge with the previous and / or next block if they are free. 268 * Put a block. Merge with the previous and / or next block if they are free.
231 * Otherwise add to the free stack. 269 * Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
331} 369}
332EXPORT_SYMBOL(drm_mm_search_free); 370EXPORT_SYMBOL(drm_mm_search_free);
333 371
372struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
373 unsigned long size,
374 unsigned alignment,
375 unsigned long start,
376 unsigned long end,
377 int best_match)
378{
379 struct list_head *list;
380 const struct list_head *free_stack = &mm->fl_entry;
381 struct drm_mm_node *entry;
382 struct drm_mm_node *best;
383 unsigned long best_size;
384 unsigned wasted;
385
386 best = NULL;
387 best_size = ~0UL;
388
389 list_for_each(list, free_stack) {
390 entry = list_entry(list, struct drm_mm_node, fl_entry);
391 wasted = 0;
392
393 if (entry->size < size)
394 continue;
395
396 if (entry->start > end || (entry->start+entry->size) < start)
397 continue;
398
399 if (entry->start < start)
400 wasted += start - entry->start;
401
402 if (alignment) {
403 register unsigned tmp = (entry->start + wasted) % alignment;
404 if (tmp)
405 wasted += alignment - tmp;
406 }
407
408 if (entry->size >= size + wasted) {
409 if (!best_match)
410 return entry;
411 if (size < best_size) {
412 best = entry;
413 best_size = entry->size;
414 }
415 }
416 }
417
418 return best;
419}
420EXPORT_SYMBOL(drm_mm_search_free_in_range);
421
334int drm_mm_clean(struct drm_mm * mm) 422int drm_mm_clean(struct drm_mm * mm)
335{ 423{
336 struct list_head *head = &mm->ml_entry; 424 struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
381} 469}
382EXPORT_SYMBOL(drm_mm_takedown); 470EXPORT_SYMBOL(drm_mm_takedown);
383 471
472void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
473{
474 struct drm_mm_node *entry;
475 int total_used = 0, total_free = 0, total = 0;
476
477 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
478 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
479 prefix, entry->start, entry->start + entry->size,
480 entry->size, entry->free ? "free" : "used");
481 total += entry->size;
482 if (entry->free)
483 total_free += entry->size;
484 else
485 total_used += entry->size;
486 }
487 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
488 total_used, total_free);
489}
490EXPORT_SYMBOL(drm_mm_debug_table);
491
384#if defined(CONFIG_DEBUG_FS) 492#if defined(CONFIG_DEBUG_FS)
385int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 493int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
386{ 494{
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fba3c96b915b..260fcf59f00c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -499,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
499 else 499 else
500 pll = &rdev->clock.p2pll; 500 pll = &rdev->clock.p2pll;
501 501
502 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 502 if (ASIC_IS_AVIVO(rdev)) {
503 &ref_div, &post_div, pll_flags); 503 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags);
507 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags);
511 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags);
504 514
505 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
599 } 609 }
600 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 610 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
601 radeon_bo_unreserve(rbo); 611 radeon_bo_unreserve(rbo);
602 if (tiling_flags & RADEON_TILING_MACRO)
603 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
604 612
605 switch (crtc->fb->bits_per_pixel) { 613 switch (crtc->fb->bits_per_pixel) {
606 case 8: 614 case 8:
@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
630 return -EINVAL; 638 return -EINVAL;
631 } 639 }
632 640
641 if (tiling_flags & RADEON_TILING_MACRO)
642 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
643
633 if (tiling_flags & RADEON_TILING_MICRO) 644 if (tiling_flags & RADEON_TILING_MICRO)
634 fb_format |= AVIVO_D1GRPH_TILED; 645 fb_format |= AVIVO_D1GRPH_TILED;
635 646
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b7baf16c11d7..824cc6480a06 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
3299 radeon_combios_asic_init(rdev->ddev); 3299 radeon_combios_asic_init(rdev->ddev);
3300 /* Resume clock after posting */ 3300 /* Resume clock after posting */
3301 r100_clock_startup(rdev); 3301 r100_clock_startup(rdev);
3302 /* Initialize surface registers */
3303 radeon_surface_init(rdev);
3302 return r100_startup(rdev); 3304 return r100_startup(rdev);
3303} 3305}
3304 3306
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 86065dcc1982..83378c39d0e3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
1250 radeon_combios_asic_init(rdev->ddev); 1250 radeon_combios_asic_init(rdev->ddev);
1251 /* Resume clock after posting */ 1251 /* Resume clock after posting */
1252 r300_clock_startup(rdev); 1252 r300_clock_startup(rdev);
1253 /* Initialize surface registers */
1254 radeon_surface_init(rdev);
1253 return r300_startup(rdev); 1255 return r300_startup(rdev);
1254} 1256}
1255 1257
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 162c3902fe69..c05a7270cf0c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -231,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
231 } 231 }
232 /* Resume clock after posting */ 232 /* Resume clock after posting */
233 r420_clock_resume(rdev); 233 r420_clock_resume(rdev);
234 234 /* Initialize surface registers */
235 radeon_surface_init(rdev);
235 return r420_startup(rdev); 236 return r420_startup(rdev);
236} 237}
237 238
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 788eef5c2a08..0f3843b6dac7 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -220,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
220 atom_asic_init(rdev->mode_info.atom_context); 220 atom_asic_init(rdev->mode_info.atom_context);
221 /* Resume clock after posting */ 221 /* Resume clock after posting */
222 rv515_clock_startup(rdev); 222 rv515_clock_startup(rdev);
223 /* Initialize surface registers */
224 radeon_surface_init(rdev);
223 return r520_startup(rdev); 225 return r520_startup(rdev);
224} 226}
225 227
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 250ec3fe1a16..f5cf874dc62a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1845,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
1845{ 1845{
1846 int r; 1846 int r;
1847 1847
1848 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1849 r = r600_init_microcode(rdev);
1850 if (r) {
1851 DRM_ERROR("Failed to load firmware!\n");
1852 return r;
1853 }
1854 }
1855
1848 r600_mc_program(rdev); 1856 r600_mc_program(rdev);
1849 if (rdev->flags & RADEON_IS_AGP) { 1857 if (rdev->flags & RADEON_IS_AGP) {
1850 r600_agp_enable(rdev); 1858 r600_agp_enable(rdev);
@@ -2026,25 +2034,17 @@ int r600_init(struct radeon_device *rdev)
2026 rdev->ih.ring_obj = NULL; 2034 rdev->ih.ring_obj = NULL;
2027 r600_ih_ring_init(rdev, 64 * 1024); 2035 r600_ih_ring_init(rdev, 64 * 1024);
2028 2036
2029 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2030 r = r600_init_microcode(rdev);
2031 if (r) {
2032 DRM_ERROR("Failed to load firmware!\n");
2033 return r;
2034 }
2035 }
2036
2037 r = r600_pcie_gart_init(rdev); 2037 r = r600_pcie_gart_init(rdev);
2038 if (r) 2038 if (r)
2039 return r; 2039 return r;
2040 2040
2041 rdev->accel_working = true;
2042 r = r600_blit_init(rdev); 2041 r = r600_blit_init(rdev);
2043 if (r) { 2042 if (r) {
2044 DRM_ERROR("radeon: failled blitter (%d).\n", r); 2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
2045 return r; 2044 return r;
2046 } 2045 }
2047 2046
2047 rdev->accel_working = true;
2048 r = r600_startup(rdev); 2048 r = r600_startup(rdev);
2049 if (r) { 2049 if (r) {
2050 r600_suspend(rdev); 2050 r600_suspend(rdev);
@@ -2056,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
2056 if (rdev->accel_working) { 2056 if (rdev->accel_working) {
2057 r = radeon_ib_pool_init(rdev); 2057 r = radeon_ib_pool_init(rdev);
2058 if (r) { 2058 if (r) {
2059 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 2059 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
2060 rdev->accel_working = false; 2060 rdev->accel_working = false;
2061 } 2061 }
2062 r = r600_ib_test(rdev); 2062 r = r600_ib_test(rdev);
2063 if (r) { 2063 if (r) {
2064 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2064 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2065 rdev->accel_working = false; 2065 rdev->accel_working = false;
2066 } 2066 }
2067 } 2067 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a15cf9ceb9a7..c938bb54123c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -88,6 +88,7 @@ extern int radeon_benchmarking;
88extern int radeon_testing; 88extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll;
91 92
92/* 93/*
93 * Copy from radeon_drv.h so we don't have to include both and have conflicting 94 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -208,6 +209,8 @@ struct radeon_bo {
208 /* Protected by gem.mutex */ 209 /* Protected by gem.mutex */
209 struct list_head list; 210 struct list_head list;
210 /* Protected by tbo.reserved */ 211 /* Protected by tbo.reserved */
212 u32 placements[3];
213 struct ttm_placement placement;
211 struct ttm_buffer_object tbo; 214 struct ttm_buffer_object tbo;
212 struct ttm_bo_kmap_obj kmap; 215 struct ttm_bo_kmap_obj kmap;
213 unsigned pin_count; 216 unsigned pin_count;
@@ -1012,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
1012extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1015extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1013extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1016extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1014extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1017extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1018extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1015 1019
1016/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1020/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1017struct r100_mc_save { 1021struct r100_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d7b0feb7d47f..12a0c760e7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -70,6 +70,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 71 struct _ATOM_GPIO_I2C_INFO *i2c_info;
72 uint16_t data_offset; 72 uint16_t data_offset;
73 int i;
73 74
74 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
75 i2c.valid = false; 76 i2c.valid = false;
@@ -78,38 +79,43 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
78 79
79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 81
81 gpio = &i2c_info->asGPIO_Info[id];
82
83 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
84 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
85 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
86 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
87 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
88 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
89 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
90 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
91 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
92 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
93 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
94 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
95 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
96 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
97 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
98 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
99
100 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
101 i2c.hw_capable = true;
102 else
103 i2c.hw_capable = false;
104 82
105 if (gpio->sucI2cId.ucAccess == 0xa0) 83 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
106 i2c.mm_i2c = true; 84 gpio = &i2c_info->asGPIO_Info[i];
107 else 85
108 i2c.mm_i2c = false; 86 if (gpio->sucI2cId.ucAccess == id) {
87 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
88 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
89 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
90 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
91 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
92 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
93 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
94 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
95 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
96 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
97 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
98 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
99 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
100 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
101 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
102 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
103
104 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
105 i2c.hw_capable = true;
106 else
107 i2c.hw_capable = false;
109 108
110 i2c.i2c_id = gpio->sucI2cId.ucAccess; 109 if (gpio->sucI2cId.ucAccess == 0xa0)
110 i2c.mm_i2c = true;
111 else
112 i2c.mm_i2c = false;
113
114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
111 115
112 i2c.valid = true; 116 i2c.valid = true;
117 }
118 }
113 119
114 return i2c; 120 return i2c;
115} 121}
@@ -503,6 +509,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
503 usRecordOffset)); 509 usRecordOffset));
504 ATOM_I2C_RECORD *i2c_record; 510 ATOM_I2C_RECORD *i2c_record;
505 ATOM_HPD_INT_RECORD *hpd_record; 511 ATOM_HPD_INT_RECORD *hpd_record;
512 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
506 hpd.hpd = RADEON_HPD_NONE; 513 hpd.hpd = RADEON_HPD_NONE;
507 514
508 while (record->ucRecordType > 0 515 while (record->ucRecordType > 0
@@ -514,10 +521,12 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
514 i2c_record = 521 i2c_record =
515 (ATOM_I2C_RECORD *) 522 (ATOM_I2C_RECORD *)
516 record; 523 record;
524 i2c_config =
525 (ATOM_I2C_ID_CONFIG_ACCESS *)
526 &i2c_record->sucI2cId;
517 ddc_bus = radeon_lookup_i2c_gpio(rdev, 527 ddc_bus = radeon_lookup_i2c_gpio(rdev,
518 i2c_record-> 528 i2c_config->
519 sucI2cId. 529 ucAccess);
520 bfI2C_LineMux);
521 break; 530 break;
522 case ATOM_HPD_INT_RECORD_TYPE: 531 case ATOM_HPD_INT_RECORD_TYPE:
523 hpd_record = 532 hpd_record =
@@ -670,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
670 679
671 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; 680 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
672 681
673 if ((rdev->family == CHIP_RS690) || 682 bios_connectors[i].line_mux =
674 (rdev->family == CHIP_RS740)) { 683 ci.sucI2cId.ucAccess;
675 if ((i == ATOM_DEVICE_DFP2_INDEX)
676 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
677 bios_connectors[i].line_mux =
678 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
679 else if ((i == ATOM_DEVICE_DFP3_INDEX)
680 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
681 bios_connectors[i].line_mux =
682 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
683 else
684 bios_connectors[i].line_mux =
685 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
686 } else
687 bios_connectors[i].line_mux =
688 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
689 684
690 /* give tv unique connector ids */ 685 /* give tv unique connector ids */
691 if (i == ATOM_DEVICE_TV1_INDEX) { 686 if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -876,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
876 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per 871 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
877 * family. 872 * family.
878 */ 873 */
879 p1pll->pll_out_min = 64800; 874 if (!radeon_new_pll)
875 p1pll->pll_out_min = 64800;
880 } 876 }
881 877
882 p1pll->pll_in_min = 878 p1pll->pll_in_min =
@@ -1006,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1006 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1002 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1007 uint8_t frev, crev; 1003 uint8_t frev, crev;
1008 struct radeon_atom_ss *ss = NULL; 1004 struct radeon_atom_ss *ss = NULL;
1005 int i;
1009 1006
1010 if (id > ATOM_MAX_SS_ENTRY) 1007 if (id > ATOM_MAX_SS_ENTRY)
1011 return NULL; 1008 return NULL;
@@ -1023,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1023 if (!ss) 1020 if (!ss)
1024 return NULL; 1021 return NULL;
1025 1022
1026 ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); 1023 for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
1027 ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; 1024 if (ss_info->asSS_Info[i].ucSS_Id == id) {
1028 ss->step = ss_info->asSS_Info[id].ucSS_Step; 1025 ss->percentage =
1029 ss->delay = ss_info->asSS_Info[id].ucSS_Delay; 1026 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
1030 ss->range = ss_info->asSS_Info[id].ucSS_Range; 1027 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
1031 ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; 1028 ss->step = ss_info->asSS_Info[i].ucSS_Step;
1029 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1030 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1031 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1032 }
1033 }
1032 } 1034 }
1033 return ss; 1035 return ss;
1034} 1036}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfa2ebb259fe..5eece186e03c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1103,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1103 drm_connector_attach_property(&radeon_connector->base, 1103 drm_connector_attach_property(&radeon_connector->base,
1104 rdev->mode_info.coherent_mode_property, 1104 rdev->mode_info.coherent_mode_property,
1105 1); 1105 1);
1106 radeon_connector->dac_load_detect = true; 1106 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1107 drm_connector_attach_property(&radeon_connector->base, 1107 radeon_connector->dac_load_detect = true;
1108 rdev->mode_info.load_detect_property, 1108 drm_connector_attach_property(&radeon_connector->base,
1109 1); 1109 rdev->mode_info.load_detect_property,
1110 1);
1111 }
1110 break; 1112 break;
1111 case DRM_MODE_CONNECTOR_HDMIA: 1113 case DRM_MODE_CONNECTOR_HDMIA:
1112 case DRM_MODE_CONNECTOR_HDMIB: 1114 case DRM_MODE_CONNECTOR_HDMIB:
@@ -1141,14 +1143,19 @@ radeon_add_atom_connector(struct drm_device *dev,
1141 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1143 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1142 if (ret) 1144 if (ret)
1143 goto failed; 1145 goto failed;
1144 /* add DP i2c bus */
1145 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1146 if (i2c_bus->valid) { 1146 if (i2c_bus->valid) {
1147 /* add DP i2c bus */
1148 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1149 if (!radeon_dig_connector->dp_i2c_bus)
1150 goto failed;
1147 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1151 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
1148 if (!radeon_connector->ddc_bus) 1152 if (!radeon_connector->ddc_bus)
1149 goto failed; 1153 goto failed;
1150 } 1154 }
1151 subpixel_order = SubPixelHorizontalRGB; 1155 subpixel_order = SubPixelHorizontalRGB;
1156 drm_connector_attach_property(&radeon_connector->base,
1157 rdev->mode_info.coherent_mode_property,
1158 1);
1152 break; 1159 break;
1153 case DRM_MODE_CONNECTOR_SVIDEO: 1160 case DRM_MODE_CONNECTOR_SVIDEO:
1154 case DRM_MODE_CONNECTOR_Composite: 1161 case DRM_MODE_CONNECTOR_Composite:
@@ -1183,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1183 if (!radeon_connector->ddc_bus) 1190 if (!radeon_connector->ddc_bus)
1184 goto failed; 1191 goto failed;
1185 } 1192 }
1186 drm_mode_create_scaling_mode_property(dev);
1187 drm_connector_attach_property(&radeon_connector->base, 1193 drm_connector_attach_property(&radeon_connector->base,
1188 dev->mode_config.scaling_mode_property, 1194 dev->mode_config.scaling_mode_property,
1189 DRM_MODE_SCALE_FULLSCREEN); 1195 DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7e55647f118e..02bcdb1240c0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
44 if (rdev->family < CHIP_R600) { 44 if (rdev->family < CHIP_R600) {
45 int i; 45 int i;
46 46
47 for (i = 0; i < 8; i++) { 47 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
48 WREG32(RADEON_SURFACE0_INFO + 48 if (rdev->surface_regs[i].bo)
49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 49 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
50 0); 50 else
51 radeon_clear_surface_reg(rdev, i);
51 } 52 }
52 /* enable surfaces */ 53 /* enable surfaces */
53 WREG32(RADEON_SURFACE_CNTL, 0); 54 WREG32(RADEON_SURFACE_CNTL, 0);
@@ -487,8 +488,10 @@ int radeon_atombios_init(struct radeon_device *rdev)
487 488
488void radeon_atombios_fini(struct radeon_device *rdev) 489void radeon_atombios_fini(struct radeon_device *rdev)
489{ 490{
490 kfree(rdev->mode_info.atom_context->scratch); 491 if (rdev->mode_info.atom_context) {
491 kfree(rdev->mode_info.atom_context); 492 kfree(rdev->mode_info.atom_context->scratch);
493 kfree(rdev->mode_info.atom_context);
494 }
492 kfree(rdev->mode_info.atom_card_info); 495 kfree(rdev->mode_info.atom_card_info);
493} 496}
494 497
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c115f2e442eb..a133b833e45d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -560,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
560 *post_div_p = best_post_div; 560 *post_div_p = best_post_div;
561} 561}
562 562
563void radeon_compute_pll_avivo(struct radeon_pll *pll,
564 uint64_t freq,
565 uint32_t *dot_clock_p,
566 uint32_t *fb_div_p,
567 uint32_t *frac_fb_div_p,
568 uint32_t *ref_div_p,
569 uint32_t *post_div_p,
570 int flags)
571{
572 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
573 fixed20_12 pll_out_max, pll_out_min;
574 fixed20_12 pll_in_max, pll_in_min;
575 fixed20_12 reference_freq;
576 fixed20_12 error, ffreq, a, b;
577
578 pll_out_max.full = rfixed_const(pll->pll_out_max);
579 pll_out_min.full = rfixed_const(pll->pll_out_min);
580 pll_in_max.full = rfixed_const(pll->pll_in_max);
581 pll_in_min.full = rfixed_const(pll->pll_in_min);
582 reference_freq.full = rfixed_const(pll->reference_freq);
583 do_div(freq, 10);
584 ffreq.full = rfixed_const(freq);
585 error.full = rfixed_const(100 * 100);
586
587 /* max p */
588 p.full = rfixed_div(pll_out_max, ffreq);
589 p.full = rfixed_floor(p);
590
591 /* min m */
592 m.full = rfixed_div(reference_freq, pll_in_max);
593 m.full = rfixed_ceil(m);
594
595 while (1) {
596 n.full = rfixed_div(ffreq, reference_freq);
597 n.full = rfixed_mul(n, m);
598 n.full = rfixed_mul(n, p);
599
600 f_vco.full = rfixed_div(n, m);
601 f_vco.full = rfixed_mul(f_vco, reference_freq);
602
603 f_pclk.full = rfixed_div(f_vco, p);
604
605 if (f_pclk.full > ffreq.full)
606 error.full = f_pclk.full - ffreq.full;
607 else
608 error.full = ffreq.full - f_pclk.full;
609 error.full = rfixed_div(error, f_pclk);
610 a.full = rfixed_const(100 * 100);
611 error.full = rfixed_mul(error, a);
612
613 a.full = rfixed_mul(m, p);
614 a.full = rfixed_div(n, a);
615 best_freq.full = rfixed_mul(reference_freq, a);
616
617 if (rfixed_trunc(error) < 25)
618 break;
619
620 a.full = rfixed_const(1);
621 m.full = m.full + a.full;
622 a.full = rfixed_div(reference_freq, m);
623 if (a.full >= pll_in_min.full)
624 continue;
625
626 m.full = rfixed_div(reference_freq, pll_in_max);
627 m.full = rfixed_ceil(m);
628 a.full= rfixed_const(1);
629 p.full = p.full - a.full;
630 a.full = rfixed_mul(p, ffreq);
631 if (a.full >= pll_out_min.full)
632 continue;
633 else {
634 DRM_ERROR("Unable to find pll dividers\n");
635 break;
636 }
637 }
638
639 a.full = rfixed_const(10);
640 b.full = rfixed_mul(n, a);
641
642 frac_n.full = rfixed_floor(n);
643 frac_n.full = rfixed_mul(frac_n, a);
644 frac_n.full = b.full - frac_n.full;
645
646 *dot_clock_p = rfixed_trunc(best_freq);
647 *fb_div_p = rfixed_trunc(n);
648 *frac_fb_div_p = rfixed_trunc(frac_n);
649 *ref_div_p = rfixed_trunc(m);
650 *post_div_p = rfixed_trunc(p);
651
652 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
653}
654
563static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 655static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
564{ 656{
565 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 657 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -660,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
660 return -ENOMEM; 752 return -ENOMEM;
661 753
662 rdev->mode_info.coherent_mode_property->values[0] = 0; 754 rdev->mode_info.coherent_mode_property->values[0] = 0;
663 rdev->mode_info.coherent_mode_property->values[0] = 1; 755 rdev->mode_info.coherent_mode_property->values[1] = 1;
664 } 756 }
665 757
666 if (!ASIC_IS_AVIVO(rdev)) { 758 if (!ASIC_IS_AVIVO(rdev)) {
@@ -684,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
684 if (!rdev->mode_info.load_detect_property) 776 if (!rdev->mode_info.load_detect_property)
685 return -ENOMEM; 777 return -ENOMEM;
686 rdev->mode_info.load_detect_property->values[0] = 0; 778 rdev->mode_info.load_detect_property->values[0] = 0;
687 rdev->mode_info.load_detect_property->values[0] = 1; 779 rdev->mode_info.load_detect_property->values[1] = 1;
688 780
689 drm_mode_create_scaling_mode_property(rdev->ddev); 781 drm_mode_create_scaling_mode_property(rdev->ddev);
690 782
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af8..28077247f4f3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 86int radeon_testing = 0;
87int radeon_connector_table = 0; 87int radeon_connector_table = 0;
88int radeon_tv = 1; 88int radeon_tv = 1;
89int radeon_new_pll = 1;
89 90
90MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 91MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
91module_param_named(no_wb, radeon_no_wb, int, 0444); 92module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
120MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 121MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
121module_param_named(tv, radeon_tv, int, 0444); 122module_param_named(tv, radeon_tv, int, 0444);
122 123
124MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
125module_param_named(new_pll, radeon_new_pll, int, 0444);
126
123static int radeon_suspend(struct drm_device *dev, pm_message_t state) 127static int radeon_suspend(struct drm_device *dev, pm_message_t state)
124{ 128{
125 drm_radeon_private_t *dev_priv = dev->dev_private; 129 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 2ac31633d72c..cb4cd97ae39f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -197,9 +197,8 @@ retry:
197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
198 radeon_fence_signaled(fence), timeout); 198 radeon_fence_signaled(fence), timeout);
199 radeon_irq_kms_sw_irq_put(rdev); 199 radeon_irq_kms_sw_irq_put(rdev);
200 if (unlikely(r == -ERESTARTSYS)) { 200 if (unlikely(r < 0))
201 return -EBUSY; 201 return r;
202 }
203 } else { 202 } else {
204 radeon_irq_kms_sw_irq_get(rdev); 203 radeon_irq_kms_sw_irq_get(rdev);
205 r = wait_event_timeout(rdev->fence_drv.queue, 204 r = wait_event_timeout(rdev->fence_drv.queue,
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d173847..3d4d84e078ac 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) } 38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12) 39#define rfixed_trunc(A) ((A).full >> 12)
40 40
41static inline u32 rfixed_floor(fixed20_12 A)
42{
43 u32 non_frac = rfixed_trunc(A);
44
45 return rfixed_const(non_frac);
46}
47
48static inline u32 rfixed_ceil(fixed20_12 A)
49{
50 u32 non_frac = rfixed_trunc(A);
51
52 if (A.full > rfixed_const(non_frac))
53 return rfixed_const(non_frac + 1);
54 else
55 return rfixed_const(non_frac);
56}
57
41static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) 58static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
42{ 59{
43 u64 tmp = ((u64)A.full << 13); 60 u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057a..f23b05606eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33int radeon_driver_unload_kms(struct drm_device *dev)
34{
35 struct radeon_device *rdev = dev->dev_private;
36
37 if (rdev == NULL)
38 return 0;
39 radeon_modeset_fini(rdev);
40 radeon_device_fini(rdev);
41 kfree(rdev);
42 dev->dev_private = NULL;
43 return 0;
44}
33 45
34/*
35 * Driver load/unload
36 */
37int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 46int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
38{ 47{
39 struct radeon_device *rdev; 48 struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
62 */ 71 */
63 r = radeon_device_init(rdev, dev, dev->pdev, flags); 72 r = radeon_device_init(rdev, dev, dev->pdev, flags);
64 if (r) { 73 if (r) {
65 DRM_ERROR("Fatal error while trying to initialize radeon.\n"); 74 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
66 return r; 75 goto out;
67 } 76 }
68 /* Again modeset_init should fail only on fatal error 77 /* Again modeset_init should fail only on fatal error
69 * otherwise it should provide enough functionalities 78 * otherwise it should provide enough functionalities
70 * for shadowfb to run 79 * for shadowfb to run
71 */ 80 */
72 r = radeon_modeset_init(rdev); 81 r = radeon_modeset_init(rdev);
73 if (r) { 82 if (r)
74 return r; 83 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
75 } 84out:
76 return 0; 85 if (r)
77} 86 radeon_driver_unload_kms(dev);
78 87 return r;
79int radeon_driver_unload_kms(struct drm_device *dev)
80{
81 struct radeon_device *rdev = dev->dev_private;
82
83 if (rdev == NULL)
84 return 0;
85 radeon_modeset_fini(rdev);
86 radeon_device_fini(rdev);
87 kfree(rdev);
88 dev->dev_private = NULL;
89 return 0;
90} 88}
91 89
92 90
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 15ec7ca18a95..44d4b652ea12 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -437,6 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
437 uint32_t *post_div_p, 437 uint32_t *post_div_p,
438 int flags); 438 int flags);
439 439
440extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
441 uint64_t freq,
442 uint32_t *dot_clock_p,
443 uint32_t *fb_div_p,
444 uint32_t *frac_fb_div_p,
445 uint32_t *ref_div_p,
446 uint32_t *post_div_p,
447 int flags);
448
440extern void radeon_setup_encoder_clones(struct drm_device *dev); 449extern void radeon_setup_encoder_clones(struct drm_device *dev);
441 450
442struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 451struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bec494384825..2040937682fd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain)
75 return flags; 75 return flags;
76} 76}
77 77
78void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
79{
80 u32 c = 0;
81
82 rbo->placement.fpfn = 0;
83 rbo->placement.lpfn = 0;
84 rbo->placement.placement = rbo->placements;
85 rbo->placement.busy_placement = rbo->placements;
86 if (domain & RADEON_GEM_DOMAIN_VRAM)
87 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
88 TTM_PL_FLAG_VRAM;
89 if (domain & RADEON_GEM_DOMAIN_GTT)
90 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
91 if (domain & RADEON_GEM_DOMAIN_CPU)
92 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
93 rbo->placement.num_placement = c;
94 rbo->placement.num_busy_placement = c;
95}
96
78int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 97int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
79 unsigned long size, bool kernel, u32 domain, 98 unsigned long size, bool kernel, u32 domain,
80 struct radeon_bo **bo_ptr) 99 struct radeon_bo **bo_ptr)
@@ -102,16 +121,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 INIT_LIST_HEAD(&bo->list); 121 INIT_LIST_HEAD(&bo->list);
103 122
104 flags = radeon_ttm_flags_from_domain(domain); 123 flags = radeon_ttm_flags_from_domain(domain);
105retry: 124 /* Kernel allocation are uninterruptible */
106 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, 125 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
107 flags, 0, 0, true, NULL, size, 126 flags, 0, 0, !kernel, NULL, size,
108 &radeon_ttm_bo_destroy); 127 &radeon_ttm_bo_destroy);
109 if (unlikely(r != 0)) { 128 if (unlikely(r != 0)) {
110 if (r == -ERESTART) 129 if (r != -ERESTARTSYS)
111 goto retry; 130 dev_err(rdev->dev,
112 /* ttm call radeon_ttm_object_object_destroy if error happen */ 131 "object_init failed for (%ld, 0x%08X)\n",
113 dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n", 132 size, flags);
114 size, flags);
115 return r; 133 return r;
116 } 134 }
117 *bo_ptr = bo; 135 *bo_ptr = bo;
@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo)
169 187
170int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 188int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
171{ 189{
172 u32 flags; 190 int r, i;
173 u32 tmp;
174 int r;
175 191
176 flags = radeon_ttm_flags_from_domain(domain); 192 radeon_ttm_placement_from_domain(bo, domain);
177 if (bo->pin_count) { 193 if (bo->pin_count) {
178 bo->pin_count++; 194 bo->pin_count++;
179 if (gpu_addr) 195 if (gpu_addr)
180 *gpu_addr = radeon_bo_gpu_offset(bo); 196 *gpu_addr = radeon_bo_gpu_offset(bo);
181 return 0; 197 return 0;
182 } 198 }
183 tmp = bo->tbo.mem.placement; 199 radeon_ttm_placement_from_domain(bo, domain);
184 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 200 for (i = 0; i < bo->placement.num_placement; i++)
185 bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | 201 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
186 TTM_PL_MASK_CACHING; 202 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
187retry:
188 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
189 true, false);
190 if (likely(r == 0)) { 203 if (likely(r == 0)) {
191 bo->pin_count = 1; 204 bo->pin_count = 1;
192 if (gpu_addr != NULL) 205 if (gpu_addr != NULL)
193 *gpu_addr = radeon_bo_gpu_offset(bo); 206 *gpu_addr = radeon_bo_gpu_offset(bo);
194 } 207 }
195 if (unlikely(r != 0)) { 208 if (unlikely(r != 0))
196 if (r == -ERESTART)
197 goto retry;
198 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 209 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
199 }
200 return r; 210 return r;
201} 211}
202 212
203int radeon_bo_unpin(struct radeon_bo *bo) 213int radeon_bo_unpin(struct radeon_bo *bo)
204{ 214{
205 int r; 215 int r, i;
206 216
207 if (!bo->pin_count) { 217 if (!bo->pin_count) {
208 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 218 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo)
211 bo->pin_count--; 221 bo->pin_count--;
212 if (bo->pin_count) 222 if (bo->pin_count)
213 return 0; 223 return 0;
214 bo->tbo.proposed_placement = bo->tbo.mem.placement & 224 for (i = 0; i < bo->placement.num_placement; i++)
215 ~TTM_PL_FLAG_NO_EVICT; 225 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
216retry: 226 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
217 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, 227 if (unlikely(r != 0))
218 true, false);
219 if (unlikely(r != 0)) {
220 if (r == -ERESTART)
221 goto retry;
222 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 228 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
223 return r; 229 return r;
224 }
225 return 0;
226} 230}
227 231
228int radeon_bo_evict_vram(struct radeon_device *rdev) 232int radeon_bo_evict_vram(struct radeon_device *rdev)
@@ -326,21 +330,17 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
326 bo = lobj->bo; 330 bo = lobj->bo;
327 if (!bo->pin_count) { 331 if (!bo->pin_count) {
328 if (lobj->wdomain) { 332 if (lobj->wdomain) {
329 bo->tbo.proposed_placement = 333 radeon_ttm_placement_from_domain(bo,
330 radeon_ttm_flags_from_domain(lobj->wdomain); 334 lobj->wdomain);
331 } else { 335 } else {
332 bo->tbo.proposed_placement = 336 radeon_ttm_placement_from_domain(bo,
333 radeon_ttm_flags_from_domain(lobj->rdomain); 337 lobj->rdomain);
334 } 338 }
335retry:
336 r = ttm_buffer_object_validate(&bo->tbo, 339 r = ttm_buffer_object_validate(&bo->tbo,
337 bo->tbo.proposed_placement, 340 &bo->placement,
338 true, false); 341 true, false);
339 if (unlikely(r)) { 342 if (unlikely(r))
340 if (r == -ERESTART)
341 goto retry;
342 return r; 343 return r;
343 }
344 } 344 }
345 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 345 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
346 lobj->tiling_flags = bo->tiling_flags; 346 lobj->tiling_flags = bo->tiling_flags;
@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
378 return ttm_fbdev_mmap(vma, &bo->tbo); 378 return ttm_fbdev_mmap(vma, &bo->tbo);
379} 379}
380 380
381static int radeon_bo_get_surface_reg(struct radeon_bo *bo) 381int radeon_bo_get_surface_reg(struct radeon_bo *bo)
382{ 382{
383 struct radeon_device *rdev = bo->rdev; 383 struct radeon_device *rdev = bo->rdev;
384 struct radeon_surface_reg *reg; 384 struct radeon_surface_reg *reg;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e9da13077e2f..f6b69c2c0d00 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -175,5 +175,5 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
175extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, 175extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
176 struct ttm_mem_reg *mem); 176 struct ttm_mem_reg *mem);
177extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 177extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
178 178extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
179#endif 179#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index bdb46c8cadd1..d2ed896cca01 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
197 return 0; 197 return 0;
198} 198}
199 199
200static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) 200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement)
201{ 202{
202 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; 203 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
203
204 switch (bo->mem.mem_type) { 204 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM:
206 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
207 break;
208 case TTM_PL_TT:
205 default: 209 default:
206 return (cur_placement & ~TTM_PL_MASK_CACHING) | 210 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
207 TTM_PL_FLAG_SYSTEM |
208 TTM_PL_FLAG_CACHED;
209 } 211 }
212 *placement = rbo->placement;
210} 213}
211 214
212static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 215static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
283 struct radeon_device *rdev; 286 struct radeon_device *rdev;
284 struct ttm_mem_reg *old_mem = &bo->mem; 287 struct ttm_mem_reg *old_mem = &bo->mem;
285 struct ttm_mem_reg tmp_mem; 288 struct ttm_mem_reg tmp_mem;
286 uint32_t proposed_placement; 289 u32 placements;
290 struct ttm_placement placement;
287 int r; 291 int r;
288 292
289 rdev = radeon_get_rdev(bo->bdev); 293 rdev = radeon_get_rdev(bo->bdev);
290 tmp_mem = *new_mem; 294 tmp_mem = *new_mem;
291 tmp_mem.mm_node = NULL; 295 tmp_mem.mm_node = NULL;
292 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 296 placement.fpfn = 0;
293 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, 297 placement.lpfn = 0;
298 placement.num_placement = 1;
299 placement.placement = &placements;
300 placement.num_busy_placement = 1;
301 placement.busy_placement = &placements;
302 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
303 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
294 interruptible, no_wait); 304 interruptible, no_wait);
295 if (unlikely(r)) { 305 if (unlikely(r)) {
296 return r; 306 return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
329 struct radeon_device *rdev; 339 struct radeon_device *rdev;
330 struct ttm_mem_reg *old_mem = &bo->mem; 340 struct ttm_mem_reg *old_mem = &bo->mem;
331 struct ttm_mem_reg tmp_mem; 341 struct ttm_mem_reg tmp_mem;
332 uint32_t proposed_flags; 342 struct ttm_placement placement;
343 u32 placements;
333 int r; 344 int r;
334 345
335 rdev = radeon_get_rdev(bo->bdev); 346 rdev = radeon_get_rdev(bo->bdev);
336 tmp_mem = *new_mem; 347 tmp_mem = *new_mem;
337 tmp_mem.mm_node = NULL; 348 tmp_mem.mm_node = NULL;
338 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 349 placement.fpfn = 0;
339 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, 350 placement.lpfn = 0;
340 interruptible, no_wait); 351 placement.num_placement = 1;
352 placement.placement = &placements;
353 placement.num_busy_placement = 1;
354 placement.busy_placement = &placements;
355 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
356 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
341 if (unlikely(r)) { 357 if (unlikely(r)) {
342 return r; 358 return r;
343 } 359 }
@@ -407,18 +423,6 @@ memcpy:
407 return r; 423 return r;
408} 424}
409 425
410const uint32_t radeon_mem_prios[] = {
411 TTM_PL_VRAM,
412 TTM_PL_TT,
413 TTM_PL_SYSTEM,
414};
415
416const uint32_t radeon_busy_prios[] = {
417 TTM_PL_TT,
418 TTM_PL_VRAM,
419 TTM_PL_SYSTEM,
420};
421
422static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 426static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
423 bool lazy, bool interruptible) 427 bool lazy, bool interruptible)
424{ 428{
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
446} 450}
447 451
448static struct ttm_bo_driver radeon_bo_driver = { 452static struct ttm_bo_driver radeon_bo_driver = {
449 .mem_type_prio = radeon_mem_prios,
450 .mem_busy_prio = radeon_busy_prios,
451 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
452 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, 453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
454 .invalidate_caches = &radeon_invalidate_caches, 454 .invalidate_caches = &radeon_invalidate_caches,
455 .init_mem_type = &radeon_init_mem_type, 455 .init_mem_type = &radeon_init_mem_type,
@@ -483,7 +483,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
483 return r; 483 return r;
484 } 484 }
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
486 0, rdev->mc.real_vram_size >> PAGE_SHIFT); 486 rdev->mc.real_vram_size >> PAGE_SHIFT);
487 if (r) { 487 if (r) {
488 DRM_ERROR("Failed initializing VRAM heap.\n"); 488 DRM_ERROR("Failed initializing VRAM heap.\n");
489 return r; 489 return r;
@@ -506,7 +506,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
506 DRM_INFO("radeon: %uM of VRAM memory ready\n", 506 DRM_INFO("radeon: %uM of VRAM memory ready\n",
507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); 507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
509 0, rdev->mc.gtt_size >> PAGE_SHIFT); 509 rdev->mc.gtt_size >> PAGE_SHIFT);
510 if (r) { 510 if (r) {
511 DRM_ERROR("Failed initializing GTT heap.\n"); 511 DRM_ERROR("Failed initializing GTT heap.\n");
512 return r; 512 return r;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index eda6d757b5c4..c1fcdddb6be6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
430 radeon_combios_asic_init(rdev->ddev); 430 radeon_combios_asic_init(rdev->ddev);
431 /* Resume clock after posting */ 431 /* Resume clock after posting */
432 r300_clock_startup(rdev); 432 r300_clock_startup(rdev);
433 /* Initialize surface registers */
434 radeon_surface_init(rdev);
433 return rs400_startup(rdev); 435 return rs400_startup(rdev);
434} 436}
435 437
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fd5ab01f6ad1..4f8ea4260572 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -586,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
586 atom_asic_init(rdev->mode_info.atom_context); 586 atom_asic_init(rdev->mode_info.atom_context);
587 /* Resume clock after posting */ 587 /* Resume clock after posting */
588 rv515_clock_startup(rdev); 588 rv515_clock_startup(rdev);
589 /* Initialize surface registers */
590 radeon_surface_init(rdev);
589 return rs600_startup(rdev); 591 return rs600_startup(rdev);
590} 592}
591 593
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index eb486ee7ea00..1e22f52d6039 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -260,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
260 260
261 b.full = rfixed_const(mode->crtc_hdisplay); 261 b.full = rfixed_const(mode->crtc_hdisplay);
262 c.full = rfixed_const(256); 262 c.full = rfixed_const(256);
263 a.full = rfixed_mul(wm->num_line_pair, b); 263 a.full = rfixed_div(b, c);
264 request_fifo_depth.full = rfixed_div(a, c); 264 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
265 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
265 if (a.full < rfixed_const(4)) { 266 if (a.full < rfixed_const(4)) {
266 wm->lb_request_fifo_depth = 4; 267 wm->lb_request_fifo_depth = 4;
267 } else { 268 } else {
@@ -390,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
390 a.full = rfixed_const(16); 391 a.full = rfixed_const(16);
391 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 392 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
392 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 393 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
394 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
393 395
394 /* Determine estimated width */ 396 /* Determine estimated width */
395 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 397 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -399,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
399 } else { 401 } else {
400 a.full = rfixed_const(16); 402 a.full = rfixed_const(16);
401 wm->priority_mark.full = rfixed_div(estimated_width, a); 403 wm->priority_mark.full = rfixed_div(estimated_width, a);
404 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
402 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 405 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
403 } 406 }
404} 407}
@@ -655,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
655 atom_asic_init(rdev->mode_info.atom_context); 658 atom_asic_init(rdev->mode_info.atom_context);
656 /* Resume clock after posting */ 659 /* Resume clock after posting */
657 rv515_clock_startup(rdev); 660 rv515_clock_startup(rdev);
661 /* Initialize surface registers */
662 radeon_surface_init(rdev);
658 return rs690_startup(rdev); 663 return rs690_startup(rdev);
659} 664}
660 665
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7793239e24b2..59632a506b46 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -513,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
513 atom_asic_init(rdev->mode_info.atom_context); 513 atom_asic_init(rdev->mode_info.atom_context);
514 /* Resume clock after posting */ 514 /* Resume clock after posting */
515 rv515_clock_startup(rdev); 515 rv515_clock_startup(rdev);
516 /* Initialize surface registers */
517 radeon_surface_init(rdev);
516 return rv515_startup(rdev); 518 return rv515_startup(rdev);
517} 519}
518 520
@@ -889,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
889 891
890 b.full = rfixed_const(mode->crtc_hdisplay); 892 b.full = rfixed_const(mode->crtc_hdisplay);
891 c.full = rfixed_const(256); 893 c.full = rfixed_const(256);
892 a.full = rfixed_mul(wm->num_line_pair, b); 894 a.full = rfixed_div(b, c);
893 request_fifo_depth.full = rfixed_div(a, c); 895 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
896 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
894 if (a.full < rfixed_const(4)) { 897 if (a.full < rfixed_const(4)) {
895 wm->lb_request_fifo_depth = 4; 898 wm->lb_request_fifo_depth = 4;
896 } else { 899 } else {
@@ -992,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
992 a.full = rfixed_const(16); 995 a.full = rfixed_const(16);
993 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 996 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
994 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 997 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
998 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
995 999
996 /* Determine estimated width */ 1000 /* Determine estimated width */
997 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 1001 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
998 estimated_width.full = rfixed_div(estimated_width, consumption_time); 1002 estimated_width.full = rfixed_div(estimated_width, consumption_time);
999 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 1003 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1000 wm->priority_mark.full = rfixed_const(10); 1004 wm->priority_mark.full = wm->priority_mark_max.full;
1001 } else { 1005 } else {
1002 a.full = rfixed_const(16); 1006 a.full = rfixed_const(16);
1003 wm->priority_mark.full = rfixed_div(estimated_width, a); 1007 wm->priority_mark.full = rfixed_div(estimated_width, a);
1008 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
1004 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 1009 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1005 } 1010 }
1006} 1011}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index dd4f02096a80..2d124bb57762 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -874,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
874{ 874{
875 int r; 875 int r;
876 876
877 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
878 r = r600_init_microcode(rdev);
879 if (r) {
880 DRM_ERROR("Failed to load firmware!\n");
881 return r;
882 }
883 }
884
877 rv770_mc_program(rdev); 885 rv770_mc_program(rdev);
878 if (rdev->flags & RADEON_IS_AGP) { 886 if (rdev->flags & RADEON_IS_AGP) {
879 rv770_agp_enable(rdev); 887 rv770_agp_enable(rdev);
@@ -1039,25 +1047,17 @@ int rv770_init(struct radeon_device *rdev)
1039 rdev->ih.ring_obj = NULL; 1047 rdev->ih.ring_obj = NULL;
1040 r600_ih_ring_init(rdev, 64 * 1024); 1048 r600_ih_ring_init(rdev, 64 * 1024);
1041 1049
1042 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1043 r = r600_init_microcode(rdev);
1044 if (r) {
1045 DRM_ERROR("Failed to load firmware!\n");
1046 return r;
1047 }
1048 }
1049
1050 r = r600_pcie_gart_init(rdev); 1050 r = r600_pcie_gart_init(rdev);
1051 if (r) 1051 if (r)
1052 return r; 1052 return r;
1053 1053
1054 rdev->accel_working = true;
1055 r = r600_blit_init(rdev); 1054 r = r600_blit_init(rdev);
1056 if (r) { 1055 if (r) {
1057 DRM_ERROR("radeon: failled blitter (%d).\n", r); 1056 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1058 rdev->accel_working = false; 1057 return r;
1059 } 1058 }
1060 1059
1060 rdev->accel_working = true;
1061 r = rv770_startup(rdev); 1061 r = rv770_startup(rdev);
1062 if (r) { 1062 if (r) {
1063 rv770_suspend(rdev); 1063 rv770_suspend(rdev);
@@ -1069,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
1069 if (rdev->accel_working) { 1069 if (rdev->accel_working) {
1070 r = radeon_ib_pool_init(rdev); 1070 r = radeon_ib_pool_init(rdev);
1071 if (r) { 1071 if (r) {
1072 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1072 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1073 rdev->accel_working = false; 1073 rdev->accel_working = false;
1074 } 1074 }
1075 r = r600_ib_test(rdev); 1075 r = r600_ib_test(rdev);
1076 if (r) { 1076 if (r) {
1077 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1077 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1078 rdev->accel_working = false; 1078 rdev->accel_working = false;
1079 } 1079 }
1080 } 1080 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e13fd23f3334..a835b6fe42a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
30 38
31#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
51 .mode = S_IRUGO 59 .mode = S_IRUGO
52}; 60};
53 61
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75 struct ttm_mem_type_manager *man)
76{
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching);
88 spin_lock(&glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&glob->lru_lock);
91}
92
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man);
113 }
114}
115
54static ssize_t ttm_bo_global_show(struct kobject *kobj, 116static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr, 117 struct attribute *attr,
56 char *buffer) 118 char *buffer)
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
117 ret = wait_event_interruptible(bo->event_queue, 179 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0); 180 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
120 return -ERESTART; 182 return ret;
121 } else { 183 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 184 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
123 } 185 }
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
247/* 309/*
248 * Call bo->mutex locked. 310 * Call bo->mutex locked.
249 */ 311 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 312static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{ 313{
253 struct ttm_bo_device *bdev = bo->bdev; 314 struct ttm_bo_device *bdev = bo->bdev;
@@ -329,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
329 } 390 }
330 391
331 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 392 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
332 393 bo->mem = *mem;
333 struct ttm_mem_reg *old_mem = &bo->mem;
334 uint32_t save_flags = old_mem->placement;
335
336 *old_mem = *mem;
337 mem->mm_node = NULL; 394 mem->mm_node = NULL;
338 ttm_flag_masked(&save_flags, mem->placement,
339 TTM_PL_MASK_MEMTYPE);
340 goto moved; 395 goto moved;
341 } 396 }
342 397
@@ -419,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
419 kref_put(&bo->list_kref, ttm_bo_ref_bug); 474 kref_put(&bo->list_kref, ttm_bo_ref_bug);
420 } 475 }
421 if (bo->mem.mm_node) { 476 if (bo->mem.mm_node) {
477 bo->mem.mm_node->private = NULL;
422 drm_mm_put_block(bo->mem.mm_node); 478 drm_mm_put_block(bo->mem.mm_node);
423 bo->mem.mm_node = NULL; 479 bo->mem.mm_node = NULL;
424 } 480 }
@@ -555,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
555} 611}
556EXPORT_SYMBOL(ttm_bo_unref); 612EXPORT_SYMBOL(ttm_bo_unref);
557 613
558static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, 614static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
559 bool interruptible, bool no_wait) 615 bool no_wait)
560{ 616{
561 int ret = 0;
562 struct ttm_bo_device *bdev = bo->bdev; 617 struct ttm_bo_device *bdev = bo->bdev;
563 struct ttm_bo_global *glob = bo->glob; 618 struct ttm_bo_global *glob = bo->glob;
564 struct ttm_mem_reg evict_mem; 619 struct ttm_mem_reg evict_mem;
565 uint32_t proposed_placement; 620 struct ttm_placement placement;
566 621 int ret = 0;
567 if (bo->mem.mem_type != mem_type)
568 goto out;
569 622
570 spin_lock(&bo->lock); 623 spin_lock(&bo->lock);
571 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 624 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
572 spin_unlock(&bo->lock); 625 spin_unlock(&bo->lock);
573 626
574 if (unlikely(ret != 0)) { 627 if (unlikely(ret != 0)) {
575 if (ret != -ERESTART) { 628 if (ret != -ERESTARTSYS) {
576 printk(KERN_ERR TTM_PFX 629 printk(KERN_ERR TTM_PFX
577 "Failed to expire sync object before " 630 "Failed to expire sync object before "
578 "buffer eviction.\n"); 631 "buffer eviction.\n");
@@ -585,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
585 evict_mem = bo->mem; 638 evict_mem = bo->mem;
586 evict_mem.mm_node = NULL; 639 evict_mem.mm_node = NULL;
587 640
588 proposed_placement = bdev->driver->evict_flags(bo); 641 placement.fpfn = 0;
589 642 placement.lpfn = 0;
590 ret = ttm_bo_mem_space(bo, proposed_placement, 643 placement.num_placement = 0;
591 &evict_mem, interruptible, no_wait); 644 placement.num_busy_placement = 0;
592 if (unlikely(ret != 0 && ret != -ERESTART)) 645 bdev->driver->evict_flags(bo, &placement);
593 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, 646 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
594 &evict_mem, interruptible, no_wait); 647 no_wait);
595
596 if (ret) { 648 if (ret) {
597 if (ret != -ERESTART) 649 if (ret != -ERESTARTSYS) {
598 printk(KERN_ERR TTM_PFX 650 printk(KERN_ERR TTM_PFX
599 "Failed to find memory space for " 651 "Failed to find memory space for "
600 "buffer 0x%p eviction.\n", bo); 652 "buffer 0x%p eviction.\n", bo);
653 ttm_bo_mem_space_debug(bo, &placement);
654 }
601 goto out; 655 goto out;
602 } 656 }
603 657
604 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 658 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
605 no_wait); 659 no_wait);
606 if (ret) { 660 if (ret) {
607 if (ret != -ERESTART) 661 if (ret != -ERESTARTSYS)
608 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 662 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
663 spin_lock(&glob->lru_lock);
664 if (evict_mem.mm_node) {
665 evict_mem.mm_node->private = NULL;
666 drm_mm_put_block(evict_mem.mm_node);
667 evict_mem.mm_node = NULL;
668 }
669 spin_unlock(&glob->lru_lock);
609 goto out; 670 goto out;
610 } 671 }
672 bo->evicted = true;
673out:
674 return ret;
675}
676
677static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
678 uint32_t mem_type,
679 bool interruptible, bool no_wait)
680{
681 struct ttm_bo_global *glob = bdev->glob;
682 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
683 struct ttm_buffer_object *bo;
684 int ret, put_count = 0;
611 685
612 spin_lock(&glob->lru_lock); 686 spin_lock(&glob->lru_lock);
613 if (evict_mem.mm_node) { 687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
614 drm_mm_put_block(evict_mem.mm_node); 688 kref_get(&bo->list_kref);
615 evict_mem.mm_node = NULL; 689 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
616 } 690 if (likely(ret == 0))
691 put_count = ttm_bo_del_from_lru(bo);
617 spin_unlock(&glob->lru_lock); 692 spin_unlock(&glob->lru_lock);
618 bo->evicted = true; 693 if (unlikely(ret != 0))
619out: 694 return ret;
695 while (put_count--)
696 kref_put(&bo->list_kref, ttm_bo_ref_bug);
697 ret = ttm_bo_evict(bo, interruptible, no_wait);
698 ttm_bo_unreserve(bo);
699 kref_put(&bo->list_kref, ttm_bo_release_list);
620 return ret; 700 return ret;
621} 701}
622 702
703static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
704 struct ttm_mem_type_manager *man,
705 struct ttm_placement *placement,
706 struct ttm_mem_reg *mem,
707 struct drm_mm_node **node)
708{
709 struct ttm_bo_global *glob = bo->glob;
710 unsigned long lpfn;
711 int ret;
712
713 lpfn = placement->lpfn;
714 if (!lpfn)
715 lpfn = man->size;
716 *node = NULL;
717 do {
718 ret = drm_mm_pre_get(&man->manager);
719 if (unlikely(ret))
720 return ret;
721
722 spin_lock(&glob->lru_lock);
723 *node = drm_mm_search_free_in_range(&man->manager,
724 mem->num_pages, mem->page_alignment,
725 placement->fpfn, lpfn, 1);
726 if (unlikely(*node == NULL)) {
727 spin_unlock(&glob->lru_lock);
728 return 0;
729 }
730 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
731 mem->page_alignment,
732 placement->fpfn,
733 lpfn);
734 spin_unlock(&glob->lru_lock);
735 } while (*node == NULL);
736 return 0;
737}
738
623/** 739/**
624 * Repeatedly evict memory from the LRU for @mem_type until we create enough 740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
625 * space, or we've evicted everything and there isn't enough space. 741 * space, or we've evicted everything and there isn't enough space.
626 */ 742 */
627static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, 743static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
628 struct ttm_mem_reg *mem, 744 uint32_t mem_type,
629 uint32_t mem_type, 745 struct ttm_placement *placement,
630 bool interruptible, bool no_wait) 746 struct ttm_mem_reg *mem,
747 bool interruptible, bool no_wait)
631{ 748{
749 struct ttm_bo_device *bdev = bo->bdev;
632 struct ttm_bo_global *glob = bdev->glob; 750 struct ttm_bo_global *glob = bdev->glob;
633 struct drm_mm_node *node;
634 struct ttm_buffer_object *entry;
635 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 751 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
636 struct list_head *lru; 752 struct drm_mm_node *node;
637 unsigned long num_pages = mem->num_pages;
638 int put_count = 0;
639 int ret; 753 int ret;
640 754
641retry_pre_get:
642 ret = drm_mm_pre_get(&man->manager);
643 if (unlikely(ret != 0))
644 return ret;
645
646 spin_lock(&glob->lru_lock);
647 do { 755 do {
648 node = drm_mm_search_free(&man->manager, num_pages, 756 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
649 mem->page_alignment, 1); 757 if (unlikely(ret != 0))
758 return ret;
650 if (node) 759 if (node)
651 break; 760 break;
652 761 spin_lock(&glob->lru_lock);
653 lru = &man->lru; 762 if (list_empty(&man->lru)) {
654 if (list_empty(lru)) 763 spin_unlock(&glob->lru_lock);
655 break; 764 break;
656 765 }
657 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
658 kref_get(&entry->list_kref);
659
660 ret =
661 ttm_bo_reserve_locked(entry, interruptible, no_wait,
662 false, 0);
663
664 if (likely(ret == 0))
665 put_count = ttm_bo_del_from_lru(entry);
666
667 spin_unlock(&glob->lru_lock); 766 spin_unlock(&glob->lru_lock);
668 767 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
768 no_wait);
669 if (unlikely(ret != 0)) 769 if (unlikely(ret != 0))
670 return ret; 770 return ret;
671
672 while (put_count--)
673 kref_put(&entry->list_kref, ttm_bo_ref_bug);
674
675 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
676
677 ttm_bo_unreserve(entry);
678
679 kref_put(&entry->list_kref, ttm_bo_release_list);
680 if (ret)
681 return ret;
682
683 spin_lock(&glob->lru_lock);
684 } while (1); 771 } while (1);
685 772 if (node == NULL)
686 if (!node) {
687 spin_unlock(&glob->lru_lock);
688 return -ENOMEM; 773 return -ENOMEM;
689 }
690
691 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
692 if (unlikely(!node)) {
693 spin_unlock(&glob->lru_lock);
694 goto retry_pre_get;
695 }
696
697 spin_unlock(&glob->lru_lock);
698 mem->mm_node = node; 774 mem->mm_node = node;
699 mem->mem_type = mem_type; 775 mem->mem_type = mem_type;
700 return 0; 776 return 0;
@@ -725,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
725 return result; 801 return result;
726} 802}
727 803
728
729static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 804static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
730 bool disallow_fixed, 805 bool disallow_fixed,
731 uint32_t mem_type, 806 uint32_t mem_type,
@@ -758,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
758 * space. 833 * space.
759 */ 834 */
760int ttm_bo_mem_space(struct ttm_buffer_object *bo, 835int ttm_bo_mem_space(struct ttm_buffer_object *bo,
761 uint32_t proposed_placement, 836 struct ttm_placement *placement,
762 struct ttm_mem_reg *mem, 837 struct ttm_mem_reg *mem,
763 bool interruptible, bool no_wait) 838 bool interruptible, bool no_wait)
764{ 839{
765 struct ttm_bo_device *bdev = bo->bdev; 840 struct ttm_bo_device *bdev = bo->bdev;
766 struct ttm_bo_global *glob = bo->glob;
767 struct ttm_mem_type_manager *man; 841 struct ttm_mem_type_manager *man;
768
769 uint32_t num_prios = bdev->driver->num_mem_type_prio;
770 const uint32_t *prios = bdev->driver->mem_type_prio;
771 uint32_t i;
772 uint32_t mem_type = TTM_PL_SYSTEM; 842 uint32_t mem_type = TTM_PL_SYSTEM;
773 uint32_t cur_flags = 0; 843 uint32_t cur_flags = 0;
774 bool type_found = false; 844 bool type_found = false;
775 bool type_ok = false; 845 bool type_ok = false;
776 bool has_eagain = false; 846 bool has_erestartsys = false;
777 struct drm_mm_node *node = NULL; 847 struct drm_mm_node *node = NULL;
778 int ret; 848 int i, ret;
779 849
780 mem->mm_node = NULL; 850 mem->mm_node = NULL;
781 for (i = 0; i < num_prios; ++i) { 851 for (i = 0; i <= placement->num_placement; ++i) {
782 mem_type = prios[i]; 852 ret = ttm_mem_type_from_flags(placement->placement[i],
853 &mem_type);
854 if (ret)
855 return ret;
783 man = &bdev->man[mem_type]; 856 man = &bdev->man[mem_type];
784 857
785 type_ok = ttm_bo_mt_compatible(man, 858 type_ok = ttm_bo_mt_compatible(man,
786 bo->type == ttm_bo_type_user, 859 bo->type == ttm_bo_type_user,
787 mem_type, proposed_placement, 860 mem_type,
788 &cur_flags); 861 placement->placement[i],
862 &cur_flags);
789 863
790 if (!type_ok) 864 if (!type_ok)
791 continue; 865 continue;
792 866
793 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 867 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
794 cur_flags); 868 cur_flags);
869 /*
870 * Use the access and other non-mapping-related flag bits from
871 * the memory placement flags to the current flags
872 */
873 ttm_flag_masked(&cur_flags, placement->placement[i],
874 ~TTM_PL_MASK_MEMTYPE);
795 875
796 if (mem_type == TTM_PL_SYSTEM) 876 if (mem_type == TTM_PL_SYSTEM)
797 break; 877 break;
798 878
799 if (man->has_type && man->use_type) { 879 if (man->has_type && man->use_type) {
800 type_found = true; 880 type_found = true;
801 do { 881 ret = ttm_bo_man_get_node(bo, man, placement, mem,
802 ret = drm_mm_pre_get(&man->manager); 882 &node);
803 if (unlikely(ret)) 883 if (unlikely(ret))
804 return ret; 884 return ret;
805
806 spin_lock(&glob->lru_lock);
807 node = drm_mm_search_free(&man->manager,
808 mem->num_pages,
809 mem->page_alignment,
810 1);
811 if (unlikely(!node)) {
812 spin_unlock(&glob->lru_lock);
813 break;
814 }
815 node = drm_mm_get_block_atomic(node,
816 mem->num_pages,
817 mem->
818 page_alignment);
819 spin_unlock(&glob->lru_lock);
820 } while (!node);
821 } 885 }
822 if (node) 886 if (node)
823 break; 887 break;
@@ -827,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
827 mem->mm_node = node; 891 mem->mm_node = node;
828 mem->mem_type = mem_type; 892 mem->mem_type = mem_type;
829 mem->placement = cur_flags; 893 mem->placement = cur_flags;
894 if (node)
895 node->private = bo;
830 return 0; 896 return 0;
831 } 897 }
832 898
833 if (!type_found) 899 if (!type_found)
834 return -EINVAL; 900 return -EINVAL;
835 901
836 num_prios = bdev->driver->num_mem_busy_prio; 902 for (i = 0; i <= placement->num_busy_placement; ++i) {
837 prios = bdev->driver->mem_busy_prio; 903 ret = ttm_mem_type_from_flags(placement->placement[i],
838 904 &mem_type);
839 for (i = 0; i < num_prios; ++i) { 905 if (ret)
840 mem_type = prios[i]; 906 return ret;
841 man = &bdev->man[mem_type]; 907 man = &bdev->man[mem_type];
842
843 if (!man->has_type) 908 if (!man->has_type)
844 continue; 909 continue;
845
846 if (!ttm_bo_mt_compatible(man, 910 if (!ttm_bo_mt_compatible(man,
847 bo->type == ttm_bo_type_user, 911 bo->type == ttm_bo_type_user,
848 mem_type, 912 mem_type,
849 proposed_placement, &cur_flags)) 913 placement->placement[i],
914 &cur_flags))
850 continue; 915 continue;
851 916
852 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 917 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
853 cur_flags); 918 cur_flags);
919 /*
920 * Use the access and other non-mapping-related flag bits from
921 * the memory placement flags to the current flags
922 */
923 ttm_flag_masked(&cur_flags, placement->placement[i],
924 ~TTM_PL_MASK_MEMTYPE);
854 925
855 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 926 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
856 interruptible, no_wait); 927 interruptible, no_wait);
857
858 if (ret == 0 && mem->mm_node) { 928 if (ret == 0 && mem->mm_node) {
859 mem->placement = cur_flags; 929 mem->placement = cur_flags;
930 mem->mm_node->private = bo;
860 return 0; 931 return 0;
861 } 932 }
862 933 if (ret == -ERESTARTSYS)
863 if (ret == -ERESTART) 934 has_erestartsys = true;
864 has_eagain = true;
865 } 935 }
866 936 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
867 ret = (has_eagain) ? -ERESTART : -ENOMEM;
868 return ret; 937 return ret;
869} 938}
870EXPORT_SYMBOL(ttm_bo_mem_space); 939EXPORT_SYMBOL(ttm_bo_mem_space);
871 940
872int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) 941int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
873{ 942{
874 int ret = 0;
875
876 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) 943 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
877 return -EBUSY; 944 return -EBUSY;
878 945
879 ret = wait_event_interruptible(bo->event_queue, 946 return wait_event_interruptible(bo->event_queue,
880 atomic_read(&bo->cpu_writers) == 0); 947 atomic_read(&bo->cpu_writers) == 0);
881
882 if (ret == -ERESTARTSYS)
883 ret = -ERESTART;
884
885 return ret;
886} 948}
887 949
888int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 950int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
889 uint32_t proposed_placement, 951 struct ttm_placement *placement,
890 bool interruptible, bool no_wait) 952 bool interruptible, bool no_wait)
891{ 953{
892 struct ttm_bo_global *glob = bo->glob; 954 struct ttm_bo_global *glob = bo->glob;
893 int ret = 0; 955 int ret = 0;
@@ -900,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
900 * Have the driver move function wait for idle when necessary, 962 * Have the driver move function wait for idle when necessary,
901 * instead of doing it here. 963 * instead of doing it here.
902 */ 964 */
903
904 spin_lock(&bo->lock); 965 spin_lock(&bo->lock);
905 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 966 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
906 spin_unlock(&bo->lock); 967 spin_unlock(&bo->lock);
907
908 if (ret) 968 if (ret)
909 return ret; 969 return ret;
910
911 mem.num_pages = bo->num_pages; 970 mem.num_pages = bo->num_pages;
912 mem.size = mem.num_pages << PAGE_SHIFT; 971 mem.size = mem.num_pages << PAGE_SHIFT;
913 mem.page_alignment = bo->mem.page_alignment; 972 mem.page_alignment = bo->mem.page_alignment;
914
915 /* 973 /*
916 * Determine where to move the buffer. 974 * Determine where to move the buffer.
917 */ 975 */
918 976 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
919 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
920 interruptible, no_wait);
921 if (ret) 977 if (ret)
922 goto out_unlock; 978 goto out_unlock;
923
924 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 979 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
925
926out_unlock: 980out_unlock:
927 if (ret && mem.mm_node) { 981 if (ret && mem.mm_node) {
928 spin_lock(&glob->lru_lock); 982 spin_lock(&glob->lru_lock);
983 mem.mm_node->private = NULL;
929 drm_mm_put_block(mem.mm_node); 984 drm_mm_put_block(mem.mm_node);
930 spin_unlock(&glob->lru_lock); 985 spin_unlock(&glob->lru_lock);
931 } 986 }
932 return ret; 987 return ret;
933} 988}
934 989
935static int ttm_bo_mem_compat(uint32_t proposed_placement, 990static int ttm_bo_mem_compat(struct ttm_placement *placement,
936 struct ttm_mem_reg *mem) 991 struct ttm_mem_reg *mem)
937{ 992{
938 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) 993 int i;
939 return 0; 994
940 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) 995 for (i = 0; i < placement->num_placement; i++) {
941 return 0; 996 if ((placement->placement[i] & mem->placement &
942 997 TTM_PL_MASK_CACHING) &&
943 return 1; 998 (placement->placement[i] & mem->placement &
999 TTM_PL_MASK_MEM))
1000 return i;
1001 }
1002 return -1;
944} 1003}
945 1004
946int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 1005int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
947 uint32_t proposed_placement, 1006 struct ttm_placement *placement,
948 bool interruptible, bool no_wait) 1007 bool interruptible, bool no_wait)
949{ 1008{
950 int ret; 1009 int ret;
951 1010
952 BUG_ON(!atomic_read(&bo->reserved)); 1011 BUG_ON(!atomic_read(&bo->reserved));
953 bo->proposed_placement = proposed_placement; 1012 /* Check that range is valid */
954 1013 if (placement->lpfn || placement->fpfn)
955 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", 1014 if (placement->fpfn > placement->lpfn ||
956 (unsigned long)proposed_placement, 1015 (placement->lpfn - placement->fpfn) < bo->num_pages)
957 (unsigned long)bo->mem.placement); 1016 return -EINVAL;
958
959 /* 1017 /*
960 * Check whether we need to move buffer. 1018 * Check whether we need to move buffer.
961 */ 1019 */
962 1020 ret = ttm_bo_mem_compat(placement, &bo->mem);
963 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { 1021 if (ret < 0) {
964 ret = ttm_bo_move_buffer(bo, bo->proposed_placement, 1022 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
965 interruptible, no_wait); 1023 if (ret)
966 if (ret) {
967 if (ret != -ERESTART)
968 printk(KERN_ERR TTM_PFX
969 "Failed moving buffer. "
970 "Proposed placement 0x%08x\n",
971 bo->proposed_placement);
972 if (ret == -ENOMEM)
973 printk(KERN_ERR TTM_PFX
974 "Out of aperture space or "
975 "DRM memory quota.\n");
976 return ret; 1024 return ret;
977 } 1025 } else {
1026 /*
1027 * Use the access and other non-mapping-related flag bits from
1028 * the compatible memory placement flags to the active flags
1029 */
1030 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1031 ~TTM_PL_MASK_MEMTYPE);
978 } 1032 }
979
980 /* 1033 /*
981 * We might need to add a TTM. 1034 * We might need to add a TTM.
982 */ 1035 */
983
984 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1036 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
985 ret = ttm_bo_add_ttm(bo, true); 1037 ret = ttm_bo_add_ttm(bo, true);
986 if (ret) 1038 if (ret)
987 return ret; 1039 return ret;
988 } 1040 }
989 /*
990 * Validation has succeeded, move the access and other
991 * non-mapping-related flag bits from the proposed flags to
992 * the active flags
993 */
994
995 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
996 ~TTM_PL_MASK_MEMTYPE);
997
998 return 0; 1041 return 0;
999} 1042}
1000EXPORT_SYMBOL(ttm_buffer_object_validate); 1043EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1042,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1042 size_t acc_size, 1085 size_t acc_size,
1043 void (*destroy) (struct ttm_buffer_object *)) 1086 void (*destroy) (struct ttm_buffer_object *))
1044{ 1087{
1045 int ret = 0; 1088 int i, c, ret = 0;
1046 unsigned long num_pages; 1089 unsigned long num_pages;
1090 uint32_t placements[8];
1091 struct ttm_placement placement;
1047 1092
1048 size += buffer_start & ~PAGE_MASK; 1093 size += buffer_start & ~PAGE_MASK;
1049 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1094 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1100,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1100 goto out_err; 1145 goto out_err;
1101 } 1146 }
1102 1147
1103 ret = ttm_buffer_object_validate(bo, flags, interruptible, false); 1148 placement.fpfn = 0;
1149 placement.lpfn = 0;
1150 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1151 if (flags & (1 << i))
1152 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1153 placement.placement = placements;
1154 placement.num_placement = c;
1155 placement.busy_placement = placements;
1156 placement.num_busy_placement = c;
1157 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
1104 if (ret) 1158 if (ret)
1105 goto out_err; 1159 goto out_err;
1106 1160
@@ -1135,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1135 struct ttm_buffer_object **p_bo) 1189 struct ttm_buffer_object **p_bo)
1136{ 1190{
1137 struct ttm_buffer_object *bo; 1191 struct ttm_buffer_object *bo;
1138 int ret;
1139 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1192 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1193 int ret;
1140 1194
1141 size_t acc_size = 1195 size_t acc_size =
1142 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1196 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1161,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1161 return ret; 1215 return ret;
1162} 1216}
1163 1217
1164static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1165 uint32_t mem_type, bool allow_errors)
1166{
1167 int ret;
1168
1169 spin_lock(&bo->lock);
1170 ret = ttm_bo_wait(bo, false, false, false);
1171 spin_unlock(&bo->lock);
1172
1173 if (ret && allow_errors)
1174 goto out;
1175
1176 if (bo->mem.mem_type == mem_type)
1177 ret = ttm_bo_evict(bo, mem_type, false, false);
1178
1179 if (ret) {
1180 if (allow_errors) {
1181 goto out;
1182 } else {
1183 ret = 0;
1184 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1185 }
1186 }
1187
1188out:
1189 return ret;
1190}
1191
1192static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1218static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1193 struct list_head *head, 1219 unsigned mem_type, bool allow_errors)
1194 unsigned mem_type, bool allow_errors)
1195{ 1220{
1221 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1196 struct ttm_bo_global *glob = bdev->glob; 1222 struct ttm_bo_global *glob = bdev->glob;
1197 struct ttm_buffer_object *entry;
1198 int ret; 1223 int ret;
1199 int put_count;
1200 1224
1201 /* 1225 /*
1202 * Can't use standard list traversal since we're unlocking. 1226 * Can't use standard list traversal since we're unlocking.
1203 */ 1227 */
1204 1228
1205 spin_lock(&glob->lru_lock); 1229 spin_lock(&glob->lru_lock);
1206 1230 while (!list_empty(&man->lru)) {
1207 while (!list_empty(head)) {
1208 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1209 kref_get(&entry->list_kref);
1210 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1211 put_count = ttm_bo_del_from_lru(entry);
1212 spin_unlock(&glob->lru_lock); 1231 spin_unlock(&glob->lru_lock);
1213 while (put_count--) 1232 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1214 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1233 if (ret) {
1215 BUG_ON(ret); 1234 if (allow_errors) {
1216 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1235 return ret;
1217 ttm_bo_unreserve(entry); 1236 } else {
1218 kref_put(&entry->list_kref, ttm_bo_release_list); 1237 printk(KERN_ERR TTM_PFX
1238 "Cleanup eviction failed\n");
1239 }
1240 }
1219 spin_lock(&glob->lru_lock); 1241 spin_lock(&glob->lru_lock);
1220 } 1242 }
1221
1222 spin_unlock(&glob->lru_lock); 1243 spin_unlock(&glob->lru_lock);
1223
1224 return 0; 1244 return 0;
1225} 1245}
1226 1246
@@ -1247,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1247 1267
1248 ret = 0; 1268 ret = 0;
1249 if (mem_type > 0) { 1269 if (mem_type > 0) {
1250 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1270 ttm_bo_force_list_clean(bdev, mem_type, false);
1251 1271
1252 spin_lock(&glob->lru_lock); 1272 spin_lock(&glob->lru_lock);
1253 if (drm_mm_clean(&man->manager)) 1273 if (drm_mm_clean(&man->manager))
@@ -1280,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1280 return 0; 1300 return 0;
1281 } 1301 }
1282 1302
1283 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); 1303 return ttm_bo_force_list_clean(bdev, mem_type, true);
1284} 1304}
1285EXPORT_SYMBOL(ttm_bo_evict_mm); 1305EXPORT_SYMBOL(ttm_bo_evict_mm);
1286 1306
1287int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1307int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1288 unsigned long p_offset, unsigned long p_size) 1308 unsigned long p_size)
1289{ 1309{
1290 int ret = -EINVAL; 1310 int ret = -EINVAL;
1291 struct ttm_mem_type_manager *man; 1311 struct ttm_mem_type_manager *man;
@@ -1315,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1315 type); 1335 type);
1316 return ret; 1336 return ret;
1317 } 1337 }
1318 ret = drm_mm_init(&man->manager, p_offset, p_size); 1338 ret = drm_mm_init(&man->manager, 0, p_size);
1319 if (ret) 1339 if (ret)
1320 return ret; 1340 return ret;
1321 } 1341 }
@@ -1464,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1464 * Initialize the system memory buffer type. 1484 * Initialize the system memory buffer type.
1465 * Other types need to be driver / IOCTL initialized. 1485 * Other types need to be driver / IOCTL initialized.
1466 */ 1486 */
1467 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1487 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1468 if (unlikely(ret != 0)) 1488 if (unlikely(ret != 0))
1469 goto out_no_sys; 1489 goto out_no_sys;
1470 1490
@@ -1694,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1694 ret = wait_event_interruptible 1714 ret = wait_event_interruptible
1695 (bo->event_queue, atomic_read(&bo->reserved) == 0); 1715 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1696 if (unlikely(ret != 0)) 1716 if (unlikely(ret != 0))
1697 return -ERESTART; 1717 return ret;
1698 } else { 1718 } else {
1699 wait_event(bo->event_queue, 1719 wait_event(bo->event_queue,
1700 atomic_read(&bo->reserved) == 0); 1720 atomic_read(&bo->reserved) == 0);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d040338..609a85a4d855 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
114 ret = ttm_bo_wait(bo, false, true, false); 114 ret = ttm_bo_wait(bo, false, true, false);
115 spin_unlock(&bo->lock); 115 spin_unlock(&bo->lock);
116 if (unlikely(ret != 0)) { 116 if (unlikely(ret != 0)) {
117 retval = (ret != -ERESTART) ? 117 retval = (ret != -ERESTARTSYS) ?
118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
119 goto out_unlock; 119 goto out_unlock;
120 } 120 }
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
349 switch (ret) { 349 switch (ret) {
350 case 0: 350 case 0:
351 break; 351 break;
352 case -ERESTART:
353 ret = -EINTR;
354 goto out_unref;
355 case -EBUSY: 352 case -EBUSY:
356 ret = -EAGAIN; 353 ret = -EAGAIN;
357 goto out_unref; 354 goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
421 switch (ret) { 418 switch (ret) {
422 case 0: 419 case 0:
423 break; 420 break;
424 case -ERESTART:
425 return -EINTR;
426 case -EBUSY: 421 case -EBUSY:
427 return -EAGAIN; 422 return -EAGAIN;
428 default: 423 default:
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8bfde5f40841..f5245c02b8fd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -323,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
323 * No special dma32 zone needed. 323 * No special dma32 zone needed.
324 */ 324 */
325 325
326 if (mem <= ((uint64_t) 1ULL << 32)) 326 if (mem <= ((uint64_t) 1ULL << 32)) {
327 kfree(zone);
327 return 0; 328 return 0;
329 }
328 330
329 /* 331 /*
330 * Limit max dma32 memory to 4GB for now 332 * Limit max dma32 memory to 4GB for now
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9a42cb..4c10be39a43b 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
66 unsigned long size, 66 unsigned long size,
67 unsigned alignment, 67 unsigned alignment,
68 int atomic); 68 int atomic);
69extern struct drm_mm_node *drm_mm_get_block_range_generic(
70 struct drm_mm_node *node,
71 unsigned long size,
72 unsigned alignment,
73 unsigned long start,
74 unsigned long end,
75 int atomic);
69static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, 76static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
70 unsigned long size, 77 unsigned long size,
71 unsigned alignment) 78 unsigned alignment)
@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
78{ 85{
79 return drm_mm_get_block_generic(parent, size, alignment, 1); 86 return drm_mm_get_block_generic(parent, size, alignment, 1);
80} 87}
88static inline struct drm_mm_node *drm_mm_get_block_range(
89 struct drm_mm_node *parent,
90 unsigned long size,
91 unsigned alignment,
92 unsigned long start,
93 unsigned long end)
94{
95 return drm_mm_get_block_range_generic(parent, size, alignment,
96 start, end, 0);
97}
98static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
99 struct drm_mm_node *parent,
100 unsigned long size,
101 unsigned alignment,
102 unsigned long start,
103 unsigned long end)
104{
105 return drm_mm_get_block_range_generic(parent, size, alignment,
106 start, end, 1);
107}
81extern void drm_mm_put_block(struct drm_mm_node *cur); 108extern void drm_mm_put_block(struct drm_mm_node *cur);
82extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 109extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
83 unsigned long size, 110 unsigned long size,
84 unsigned alignment, 111 unsigned alignment,
85 int best_match); 112 int best_match);
113extern struct drm_mm_node *drm_mm_search_free_in_range(
114 const struct drm_mm *mm,
115 unsigned long size,
116 unsigned alignment,
117 unsigned long start,
118 unsigned long end,
119 int best_match);
86extern int drm_mm_init(struct drm_mm *mm, unsigned long start, 120extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
87 unsigned long size); 121 unsigned long size);
88extern void drm_mm_takedown(struct drm_mm *mm); 122extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
99 return block->mm; 133 return block->mm;
100} 134}
101 135
136extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
102#ifdef CONFIG_DEBUG_FS 137#ifdef CONFIG_DEBUG_FS
103int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 138int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
104#endif 139#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 491146170522..4fd498523ce3 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@ struct ttm_bo_device;
44 44
45struct drm_mm_node; 45struct drm_mm_node;
46 46
47
48/**
49 * struct ttm_placement
50 *
51 * @fpfn: first valid page frame number to put the object
52 * @lpfn: last valid page frame number to put the object
53 * @num_placement: number of prefered placements
54 * @placement: prefered placements
55 * @num_busy_placement: number of prefered placements when need to evict buffer
56 * @busy_placement: prefered placements when need to evict buffer
57 *
58 * Structure indicating the placement you request for an object.
59 */
60struct ttm_placement {
61 unsigned fpfn;
62 unsigned lpfn;
63 unsigned num_placement;
64 const uint32_t *placement;
65 unsigned num_busy_placement;
66 const uint32_t *busy_placement;
67};
68
69
47/** 70/**
48 * struct ttm_mem_reg 71 * struct ttm_mem_reg
49 * 72 *
@@ -109,10 +132,6 @@ struct ttm_tt;
109 * the object is destroyed. 132 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change. 133 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members. 134 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement. 135 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 136 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member 137 * pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@ struct ttm_buffer_object {
177 * Members protected by the bo::reserved lock. 196 * Members protected by the bo::reserved lock.
178 */ 197 */
179 198
180 uint32_t proposed_placement;
181 struct ttm_mem_reg mem; 199 struct ttm_mem_reg mem;
182 struct file *persistant_swap_storage; 200 struct file *persistant_swap_storage;
183 struct ttm_tt *ttm; 201 struct ttm_tt *ttm;
@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
285 * Note: It might be necessary to block validations before the 303 * Note: It might be necessary to block validations before the
286 * wait by reserving the buffer. 304 * wait by reserving the buffer.
287 * Returns -EBUSY if no_wait is true and the buffer is busy. 305 * Returns -EBUSY if no_wait is true and the buffer is busy.
288 * Returns -ERESTART if interrupted by a signal. 306 * Returns -ERESTARTSYS if interrupted by a signal.
289 */ 307 */
290extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, 308extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
291 bool interruptible, bool no_wait); 309 bool interruptible, bool no_wait);
@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
293 * ttm_buffer_object_validate 311 * ttm_buffer_object_validate
294 * 312 *
295 * @bo: The buffer object. 313 * @bo: The buffer object.
296 * @proposed_placement: Proposed_placement for the buffer object. 314 * @placement: Proposed placement for the buffer object.
297 * @interruptible: Sleep interruptible if sleeping. 315 * @interruptible: Sleep interruptible if sleeping.
298 * @no_wait: Return immediately if the buffer is busy. 316 * @no_wait: Return immediately if the buffer is busy.
299 * 317 *
300 * Changes placement and caching policy of the buffer object 318 * Changes placement and caching policy of the buffer object
301 * according to bo::proposed_flags. 319 * according proposed placement.
302 * Returns 320 * Returns
303 * -EINVAL on invalid proposed_flags. 321 * -EINVAL on invalid proposed placement.
304 * -ENOMEM on out-of-memory condition. 322 * -ENOMEM on out-of-memory condition.
305 * -EBUSY if no_wait is true and buffer busy. 323 * -EBUSY if no_wait is true and buffer busy.
306 * -ERESTART if interrupted by a signal. 324 * -ERESTARTSYS if interrupted by a signal.
307 */ 325 */
308extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 326extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
309 uint32_t proposed_placement, 327 struct ttm_placement *placement,
310 bool interruptible, bool no_wait); 328 bool interruptible, bool no_wait);
329
311/** 330/**
312 * ttm_bo_unref 331 * ttm_bo_unref
313 * 332 *
@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
328 * waiting for buffer idle. This lock is recursive. 347 * waiting for buffer idle. This lock is recursive.
329 * Returns 348 * Returns
330 * -EBUSY if the buffer is busy and no_wait is true. 349 * -EBUSY if the buffer is busy and no_wait is true.
331 * -ERESTART if interrupted by a signal. 350 * -ERESTARTSYS if interrupted by a signal.
332 */ 351 */
333 352
334extern int 353extern int
@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
371 * Returns 390 * Returns
372 * -ENOMEM: Out of memory. 391 * -ENOMEM: Out of memory.
373 * -EINVAL: Invalid placement flags. 392 * -EINVAL: Invalid placement flags.
374 * -ERESTART: Interrupted by signal while sleeping waiting for resources. 393 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
375 */ 394 */
376 395
377extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, 396extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
411 * Returns 430 * Returns
412 * -ENOMEM: Out of memory. 431 * -ENOMEM: Out of memory.
413 * -EINVAL: Invalid placement flags. 432 * -EINVAL: Invalid placement flags.
414 * -ERESTART: Interrupted by signal while waiting for resources. 433 * -ERESTARTSYS: Interrupted by signal while waiting for resources.
415 */ 434 */
416 435
417extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, 436extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
445 * 464 *
446 * @bdev: Pointer to a ttm_bo_device struct. 465 * @bdev: Pointer to a ttm_bo_device struct.
447 * @mem_type: The memory type. 466 * @mem_type: The memory type.
448 * @p_offset: offset for managed area in pages.
449 * @p_size: size managed area in pages. 467 * @p_size: size managed area in pages.
450 * 468 *
451 * Initialize a manager for a given memory type. 469 * Initialize a manager for a given memory type.
@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
458 */ 476 */
459 477
460extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 478extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
461 unsigned long p_offset, unsigned long p_size); 479 unsigned long p_size);
462/** 480/**
463 * ttm_bo_clean_mm 481 * ttm_bo_clean_mm
464 * 482 *
@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
503 * 521 *
504 * Returns: 522 * Returns:
505 * -EINVAL: Invalid or uninitialized memory type. 523 * -EINVAL: Invalid or uninitialized memory type.
506 * -ERESTART: The call was interrupted by a signal while waiting to 524 * -ERESTARTSYS: The call was interrupted by a signal while waiting to
507 * evict a buffer. 525 * evict a buffer.
508 */ 526 */
509 527
@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
606 * be called from the fops::read and fops::write method. 624 * be called from the fops::read and fops::write method.
607 * Returns: 625 * Returns:
608 * See man (2) write, man(2) read. In particular, 626 * See man (2) write, man(2) read. In particular,
609 * the function may return -EINTR if 627 * the function may return -ERESTARTSYS if
610 * interrupted by a signal. 628 * interrupted by a signal.
611 */ 629 */
612 630
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 7a39ab9aa1d1..ff7664e0c3cd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
242/** 242/**
243 * struct ttm_bo_driver 243 * struct ttm_bo_driver
244 * 244 *
245 * @mem_type_prio: Priority array of memory types to place a buffer object in
246 * if it fits without evicting buffers from any of these memory types.
247 * @mem_busy_prio: Priority array of memory types to place a buffer object in
248 * if it needs to evict buffers to make room.
249 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
250 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
251 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 245 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
252 * @invalidate_caches: Callback to invalidate read caches when a buffer object 246 * @invalidate_caches: Callback to invalidate read caches when a buffer object
253 * has been evicted. 247 * has been evicted.
@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
265 */ 259 */
266 260
267struct ttm_bo_driver { 261struct ttm_bo_driver {
268 const uint32_t *mem_type_prio;
269 const uint32_t *mem_busy_prio;
270 uint32_t num_mem_type_prio;
271 uint32_t num_mem_busy_prio;
272
273 /** 262 /**
274 * struct ttm_bo_driver member create_ttm_backend_entry 263 * struct ttm_bo_driver member create_ttm_backend_entry
275 * 264 *
@@ -306,7 +295,8 @@ struct ttm_bo_driver {
306 * finished, they'll end up in bo->mem.flags 295 * finished, they'll end up in bo->mem.flags
307 */ 296 */
308 297
309 uint32_t(*evict_flags) (struct ttm_buffer_object *bo); 298 void(*evict_flags) (struct ttm_buffer_object *bo,
299 struct ttm_placement *placement);
310 /** 300 /**
311 * struct ttm_bo_driver member move: 301 * struct ttm_bo_driver member move:
312 * 302 *
@@ -648,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
648 * -EBUSY: No space available (only if no_wait == 1). 638 * -EBUSY: No space available (only if no_wait == 1).
649 * -ENOMEM: Could not allocate memory for the buffer object, either due to 639 * -ENOMEM: Could not allocate memory for the buffer object, either due to
650 * fragmentation or concurrent allocators. 640 * fragmentation or concurrent allocators.
651 * -ERESTART: An interruptible sleep was interrupted by a signal. 641 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
652 */ 642 */
653extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 643extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
654 uint32_t proposed_placement, 644 struct ttm_placement *placement,
655 struct ttm_mem_reg *mem, 645 struct ttm_mem_reg *mem,
656 bool interruptible, bool no_wait); 646 bool interruptible, bool no_wait);
657/** 647/**
658 * ttm_bo_wait_for_cpu 648 * ttm_bo_wait_for_cpu
659 * 649 *
@@ -663,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
663 * Wait until a buffer object is no longer sync'ed for CPU access. 653 * Wait until a buffer object is no longer sync'ed for CPU access.
664 * Returns: 654 * Returns:
665 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 655 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
666 * -ERESTART: An interruptible sleep was interrupted by a signal. 656 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
667 */ 657 */
668 658
669extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 659extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -767,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
767 * -EAGAIN: The reservation may cause a deadlock. 757 * -EAGAIN: The reservation may cause a deadlock.
768 * Release all buffer reservations, wait for @bo to become unreserved and 758 * Release all buffer reservations, wait for @bo to become unreserved and
769 * try again. (only if use_sequence == 1). 759 * try again. (only if use_sequence == 1).
770 * -ERESTART: A wait for the buffer to become unreserved was interrupted by 760 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
771 * a signal. Release all buffer reservations and return to user-space. 761 * a signal. Release all buffer reservations and return to user-space.
772 */ 762 */
773extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 763extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -808,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
808 * 798 *
809 * Returns: 799 * Returns:
810 * -EBUSY: If no_wait == 1 and the buffer is already reserved. 800 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
811 * -ERESTART: If interruptible == 1 and the process received a signal 801 * -ERESTARTSYS: If interruptible == 1 and the process received a signal
812 * while sleeping. 802 * while sleeping.
813 */ 803 */
814extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, 804extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,