aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-03-09 09:45:10 -0500
committerDave Airlie <airlied@redhat.com>2010-04-05 20:42:45 -0400
commit225758d8ba4fdcc1e8c9cf617fd89529bd4a9596 (patch)
treea9ac2f23435d4a6db5aa33774ba94d9f0aeb5c4c
parent95beb690170e6ce918fe53c73a0fcc7cf64d704a (diff)
drm/radeon/kms: fence cleanup + more reliable GPU lockup detection V4
This patch cleanup the fence code, it drops the timeout field of fence as the time to complete each IB is unpredictable and shouldn't be bound. The fence cleanup lead to GPU lockup detection improvement, this patch introduce a callback, allowing to do asic specific test for lockup detection. In this patch the CP is use as a first indicator of GPU lockup. If CP doesn't make progress during 1second we assume we are facing a GPU lockup. To avoid overhead of testing GPU lockup frequently due to fence taking time to be signaled we query the lockup callback every 500msec. There is plenty code comment explaining the design & choise inside the code. This have been tested mostly on R3XX/R5XX hw, in normal running destkop (compiz firefox, quake3 running) the lockup callback wasn't call once (1 hour session). Also tested with forcing GPU lockup and lockup was reported after the 1s CP activity timeout. V2 switch to 500ms timeout so GPU lockup get call at least 2 times in less than 2sec. V3 store last jiffies in fence struct so on ERESTART, EBUSY we keep track of how long we already wait for a given fence V4 make sure we got up to date cp read pointer so we don't have false positive Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c86
-rw-r--r--drivers/gpu/drm/radeon/r300.c28
-rw-r--r--drivers/gpu/drm/radeon/r600.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon.h104
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c102
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
9 files changed, 281 insertions, 107 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 647a0efdc353..3070e5994120 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -486,6 +486,12 @@ int evergreen_mc_init(struct radeon_device *rdev)
486 return 0; 486 return 0;
487} 487}
488 488
489bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
490{
491 /* FIXME: implement for evergreen */
492 return false;
493}
494
489int evergreen_gpu_reset(struct radeon_device *rdev) 495int evergreen_gpu_reset(struct radeon_device *rdev)
490{ 496{
491 /* FIXME: implement for evergreen */ 497 /* FIXME: implement for evergreen */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3ae51ada1abf..845c8f3063fe 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1777,6 +1777,92 @@ int r100_rb2d_reset(struct radeon_device *rdev)
1777 return -1; 1777 return -1;
1778} 1778}
1779 1779
1780void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1781{
1782 lockup->last_cp_rptr = cp->rptr;
1783 lockup->last_jiffies = jiffies;
1784}
1785
1786/**
1787 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1788 * @rdev: radeon device structure
1789 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1790 * @cp: radeon_cp structure holding CP information
1791 *
1792 * We don't need to initialize the lockup tracking information as we will either
1793 * have CP rptr to a different value of jiffies wrap around which will force
1794 * initialization of the lockup tracking informations.
1795 *
1796 * A possible false positivie is if we get call after while and last_cp_rptr ==
1797 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1798 * if the elapsed time since last call is bigger than 2 second than we return
1799 * false and update the tracking information. Due to this the caller must call
1800 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1801 * the fencing code should be cautious about that.
1802 *
1803 * Caller should write to the ring to force CP to do something so we don't get
1804 * false positive when CP is just gived nothing to do.
1805 *
1806 **/
1807bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1808{
1809 unsigned long cjiffies, elapsed;
1810
1811 cjiffies = jiffies;
1812 if (!time_after(cjiffies, lockup->last_jiffies)) {
1813 /* likely a wrap around */
1814 lockup->last_cp_rptr = cp->rptr;
1815 lockup->last_jiffies = jiffies;
1816 return false;
1817 }
1818 if (cp->rptr != lockup->last_cp_rptr) {
1819 /* CP is still working no lockup */
1820 lockup->last_cp_rptr = cp->rptr;
1821 lockup->last_jiffies = jiffies;
1822 return false;
1823 }
1824 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
1825 if (elapsed >= 3000) {
1826 /* very likely the improbable case where current
1827 * rptr is equal to last recorded, a while ago, rptr
1828 * this is more likely a false positive update tracking
1829 * information which should force us to be recall at
1830 * latter point
1831 */
1832 lockup->last_cp_rptr = cp->rptr;
1833 lockup->last_jiffies = jiffies;
1834 return false;
1835 }
1836 if (elapsed >= 1000) {
1837 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
1838 return true;
1839 }
1840 /* give a chance to the GPU ... */
1841 return false;
1842}
1843
1844bool r100_gpu_is_lockup(struct radeon_device *rdev)
1845{
1846 u32 rbbm_status;
1847 int r;
1848
1849 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
1850 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
1851 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
1852 return false;
1853 }
1854 /* force CP activities */
1855 r = radeon_ring_lock(rdev, 2);
1856 if (!r) {
1857 /* PACKET2 NOP */
1858 radeon_ring_write(rdev, 0x80000000);
1859 radeon_ring_write(rdev, 0x80000000);
1860 radeon_ring_unlock_commit(rdev);
1861 }
1862 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1863 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
1864}
1865
1780int r100_gpu_reset(struct radeon_device *rdev) 1866int r100_gpu_reset(struct radeon_device *rdev)
1781{ 1867{
1782 uint32_t status; 1868 uint32_t status;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0e9eb761a90f..9825fb19331f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,8 +26,9 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include "drmP.h" 29#include <drm/drmP.h>
30#include "drm.h" 30#include <drm/drm.h>
31#include <drm/drm_crtc_helper.h>
31#include "radeon_reg.h" 32#include "radeon_reg.h"
32#include "radeon.h" 33#include "radeon.h"
33#include "radeon_asic.h" 34#include "radeon_asic.h"
@@ -426,12 +427,35 @@ int r300_ga_reset(struct radeon_device *rdev)
426 return -1; 427 return -1;
427} 428}
428 429
430bool r300_gpu_is_lockup(struct radeon_device *rdev)
431{
432 u32 rbbm_status;
433 int r;
434
435 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
436 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
437 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
438 return false;
439 }
440 /* force CP activities */
441 r = radeon_ring_lock(rdev, 2);
442 if (!r) {
443 /* PACKET2 NOP */
444 radeon_ring_write(rdev, 0x80000000);
445 radeon_ring_write(rdev, 0x80000000);
446 radeon_ring_unlock_commit(rdev);
447 }
448 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
449 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
450}
451
429int r300_gpu_reset(struct radeon_device *rdev) 452int r300_gpu_reset(struct radeon_device *rdev)
430{ 453{
431 uint32_t status; 454 uint32_t status;
432 455
433 /* reset order likely matter */ 456 /* reset order likely matter */
434 status = RREG32(RADEON_RBBM_STATUS); 457 status = RREG32(RADEON_RBBM_STATUS);
458 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
435 /* reset HDP */ 459 /* reset HDP */
436 r100_hdp_reset(rdev); 460 r100_hdp_reset(rdev);
437 /* reset rb2d */ 461 /* reset rb2d */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 5509354c7c89..a09c062df4db 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -784,7 +784,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
784 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 784 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
785 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 785 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
786 (void)RREG32(R_008020_GRBM_SOFT_RESET); 786 (void)RREG32(R_008020_GRBM_SOFT_RESET);
787 udelay(50); 787 mdelay(1);
788 WREG32(R_008020_GRBM_SOFT_RESET, 0); 788 WREG32(R_008020_GRBM_SOFT_RESET, 0);
789 (void)RREG32(R_008020_GRBM_SOFT_RESET); 789 (void)RREG32(R_008020_GRBM_SOFT_RESET);
790 } 790 }
@@ -824,16 +824,16 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
824 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); 824 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
825 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); 825 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
826 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 826 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
827 udelay(50); 827 mdelay(1);
828 WREG32(R_000E60_SRBM_SOFT_RESET, 0); 828 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
829 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 829 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
830 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); 830 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
831 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 831 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
832 udelay(50); 832 mdelay(1);
833 WREG32(R_000E60_SRBM_SOFT_RESET, 0); 833 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
834 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 834 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
835 /* Wait a little for things to settle down */ 835 /* Wait a little for things to settle down */
836 udelay(50); 836 mdelay(1);
837 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 837 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
838 RREG32(R_008010_GRBM_STATUS)); 838 RREG32(R_008010_GRBM_STATUS));
839 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 839 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
@@ -848,6 +848,32 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
848 return 0; 848 return 0;
849} 849}
850 850
851bool r600_gpu_is_lockup(struct radeon_device *rdev)
852{
853 u32 srbm_status;
854 u32 grbm_status;
855 u32 grbm_status2;
856 int r;
857
858 srbm_status = RREG32(R_000E50_SRBM_STATUS);
859 grbm_status = RREG32(R_008010_GRBM_STATUS);
860 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
861 if (!G_008010_GUI_ACTIVE(grbm_status)) {
862 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
863 return false;
864 }
865 /* force CP activities */
866 r = radeon_ring_lock(rdev, 2);
867 if (!r) {
868 /* PACKET2 NOP */
869 radeon_ring_write(rdev, 0x80000000);
870 radeon_ring_write(rdev, 0x80000000);
871 radeon_ring_unlock_commit(rdev);
872 }
873 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
874 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
875}
876
851int r600_gpu_reset(struct radeon_device *rdev) 877int r600_gpu_reset(struct radeon_device *rdev)
852{ 878{
853 return r600_gpu_soft_reset(rdev); 879 return r600_gpu_soft_reset(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbbb..a3d13c367176 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@ extern int radeon_hw_i2c;
99 * symbol; 99 * symbol;
100 */ 100 */
101#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 101#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
102#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
102/* RADEON_IB_POOL_SIZE must be a power of 2 */ 103/* RADEON_IB_POOL_SIZE must be a power of 2 */
103#define RADEON_IB_POOL_SIZE 16 104#define RADEON_IB_POOL_SIZE 16
104#define RADEON_DEBUGFS_MAX_NUM_FILES 32 105#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -182,7 +183,8 @@ struct radeon_fence_driver {
182 uint32_t scratch_reg; 183 uint32_t scratch_reg;
183 atomic_t seq; 184 atomic_t seq;
184 uint32_t last_seq; 185 uint32_t last_seq;
185 unsigned long count_timeout; 186 unsigned long last_jiffies;
187 unsigned long last_timeout;
186 wait_queue_head_t queue; 188 wait_queue_head_t queue;
187 rwlock_t lock; 189 rwlock_t lock;
188 struct list_head created; 190 struct list_head created;
@@ -197,7 +199,6 @@ struct radeon_fence {
197 struct list_head list; 199 struct list_head list;
198 /* protected by radeon_fence.lock */ 200 /* protected by radeon_fence.lock */
199 uint32_t seq; 201 uint32_t seq;
200 unsigned long timeout;
201 bool emited; 202 bool emited;
202 bool signaled; 203 bool signaled;
203}; 204};
@@ -746,6 +747,7 @@ struct radeon_asic {
746 int (*resume)(struct radeon_device *rdev); 747 int (*resume)(struct radeon_device *rdev);
747 int (*suspend)(struct radeon_device *rdev); 748 int (*suspend)(struct radeon_device *rdev);
748 void (*vga_set_state)(struct radeon_device *rdev, bool state); 749 void (*vga_set_state)(struct radeon_device *rdev, bool state);
750 bool (*gpu_is_lockup)(struct radeon_device *rdev);
749 int (*gpu_reset)(struct radeon_device *rdev); 751 int (*gpu_reset)(struct radeon_device *rdev);
750 void (*gart_tlb_flush)(struct radeon_device *rdev); 752 void (*gart_tlb_flush)(struct radeon_device *rdev);
751 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 753 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
@@ -804,59 +806,68 @@ struct radeon_asic {
804/* 806/*
805 * Asic structures 807 * Asic structures
806 */ 808 */
809struct r100_gpu_lockup {
810 unsigned long last_jiffies;
811 u32 last_cp_rptr;
812};
813
807struct r100_asic { 814struct r100_asic {
808 const unsigned *reg_safe_bm; 815 const unsigned *reg_safe_bm;
809 unsigned reg_safe_bm_size; 816 unsigned reg_safe_bm_size;
810 u32 hdp_cntl; 817 u32 hdp_cntl;
818 struct r100_gpu_lockup lockup;
811}; 819};
812 820
813struct r300_asic { 821struct r300_asic {
814 const unsigned *reg_safe_bm; 822 const unsigned *reg_safe_bm;
815 unsigned reg_safe_bm_size; 823 unsigned reg_safe_bm_size;
816 u32 resync_scratch; 824 u32 resync_scratch;
817 u32 hdp_cntl; 825 u32 hdp_cntl;
826 struct r100_gpu_lockup lockup;
818}; 827};
819 828
820struct r600_asic { 829struct r600_asic {
821 unsigned max_pipes; 830 unsigned max_pipes;
822 unsigned max_tile_pipes; 831 unsigned max_tile_pipes;
823 unsigned max_simds; 832 unsigned max_simds;
824 unsigned max_backends; 833 unsigned max_backends;
825 unsigned max_gprs; 834 unsigned max_gprs;
826 unsigned max_threads; 835 unsigned max_threads;
827 unsigned max_stack_entries; 836 unsigned max_stack_entries;
828 unsigned max_hw_contexts; 837 unsigned max_hw_contexts;
829 unsigned max_gs_threads; 838 unsigned max_gs_threads;
830 unsigned sx_max_export_size; 839 unsigned sx_max_export_size;
831 unsigned sx_max_export_pos_size; 840 unsigned sx_max_export_pos_size;
832 unsigned sx_max_export_smx_size; 841 unsigned sx_max_export_smx_size;
833 unsigned sq_num_cf_insts; 842 unsigned sq_num_cf_insts;
834 unsigned tiling_nbanks; 843 unsigned tiling_nbanks;
835 unsigned tiling_npipes; 844 unsigned tiling_npipes;
836 unsigned tiling_group_size; 845 unsigned tiling_group_size;
846 struct r100_gpu_lockup lockup;
837}; 847};
838 848
839struct rv770_asic { 849struct rv770_asic {
840 unsigned max_pipes; 850 unsigned max_pipes;
841 unsigned max_tile_pipes; 851 unsigned max_tile_pipes;
842 unsigned max_simds; 852 unsigned max_simds;
843 unsigned max_backends; 853 unsigned max_backends;
844 unsigned max_gprs; 854 unsigned max_gprs;
845 unsigned max_threads; 855 unsigned max_threads;
846 unsigned max_stack_entries; 856 unsigned max_stack_entries;
847 unsigned max_hw_contexts; 857 unsigned max_hw_contexts;
848 unsigned max_gs_threads; 858 unsigned max_gs_threads;
849 unsigned sx_max_export_size; 859 unsigned sx_max_export_size;
850 unsigned sx_max_export_pos_size; 860 unsigned sx_max_export_pos_size;
851 unsigned sx_max_export_smx_size; 861 unsigned sx_max_export_smx_size;
852 unsigned sq_num_cf_insts; 862 unsigned sq_num_cf_insts;
853 unsigned sx_num_of_sets; 863 unsigned sx_num_of_sets;
854 unsigned sc_prim_fifo_size; 864 unsigned sc_prim_fifo_size;
855 unsigned sc_hiz_tile_fifo_size; 865 unsigned sc_hiz_tile_fifo_size;
856 unsigned sc_earlyz_tile_fifo_fize; 866 unsigned sc_earlyz_tile_fifo_fize;
857 unsigned tiling_nbanks; 867 unsigned tiling_nbanks;
858 unsigned tiling_npipes; 868 unsigned tiling_npipes;
859 unsigned tiling_group_size; 869 unsigned tiling_group_size;
870 struct r100_gpu_lockup lockup;
860}; 871};
861 872
862union radeon_asic_config { 873union radeon_asic_config {
@@ -1145,6 +1156,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1145#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1156#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1146#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 1157#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
1147#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1158#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1159#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
1148#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 1160#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
1149#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 1161#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
1150#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 1162#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
@@ -1200,6 +1212,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
1200extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1212extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1201 1213
1202/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1214/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1215extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1216extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1203 1217
1204/* rv200,rv250,rv280 */ 1218/* rv200,rv250,rv280 */
1205extern void r200_set_safe_registers(struct radeon_device *rdev); 1219extern void r200_set_safe_registers(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa322..7e21985139f7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,6 +134,7 @@ static struct radeon_asic r100_asic = {
134 .suspend = &r100_suspend, 134 .suspend = &r100_suspend,
135 .resume = &r100_resume, 135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state, 136 .vga_set_state = &r100_vga_set_state,
137 .gpu_is_lockup = &r100_gpu_is_lockup,
137 .gpu_reset = &r100_gpu_reset, 138 .gpu_reset = &r100_gpu_reset,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page, 140 .gart_set_page = &r100_pci_gart_set_page,
@@ -172,6 +173,7 @@ static struct radeon_asic r200_asic = {
172 .suspend = &r100_suspend, 173 .suspend = &r100_suspend,
173 .resume = &r100_resume, 174 .resume = &r100_resume,
174 .vga_set_state = &r100_vga_set_state, 175 .vga_set_state = &r100_vga_set_state,
176 .gpu_is_lockup = &r100_gpu_is_lockup,
175 .gpu_reset = &r100_gpu_reset, 177 .gpu_reset = &r100_gpu_reset,
176 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 178 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
177 .gart_set_page = &r100_pci_gart_set_page, 179 .gart_set_page = &r100_pci_gart_set_page,
@@ -209,6 +211,7 @@ static struct radeon_asic r300_asic = {
209 .suspend = &r300_suspend, 211 .suspend = &r300_suspend,
210 .resume = &r300_resume, 212 .resume = &r300_resume,
211 .vga_set_state = &r100_vga_set_state, 213 .vga_set_state = &r100_vga_set_state,
214 .gpu_is_lockup = &r300_gpu_is_lockup,
212 .gpu_reset = &r300_gpu_reset, 215 .gpu_reset = &r300_gpu_reset,
213 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 216 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
214 .gart_set_page = &r100_pci_gart_set_page, 217 .gart_set_page = &r100_pci_gart_set_page,
@@ -247,6 +250,7 @@ static struct radeon_asic r300_asic_pcie = {
247 .suspend = &r300_suspend, 250 .suspend = &r300_suspend,
248 .resume = &r300_resume, 251 .resume = &r300_resume,
249 .vga_set_state = &r100_vga_set_state, 252 .vga_set_state = &r100_vga_set_state,
253 .gpu_is_lockup = &r300_gpu_is_lockup,
250 .gpu_reset = &r300_gpu_reset, 254 .gpu_reset = &r300_gpu_reset,
251 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 255 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
252 .gart_set_page = &rv370_pcie_gart_set_page, 256 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -284,6 +288,7 @@ static struct radeon_asic r420_asic = {
284 .suspend = &r420_suspend, 288 .suspend = &r420_suspend,
285 .resume = &r420_resume, 289 .resume = &r420_resume,
286 .vga_set_state = &r100_vga_set_state, 290 .vga_set_state = &r100_vga_set_state,
291 .gpu_is_lockup = &r300_gpu_is_lockup,
287 .gpu_reset = &r300_gpu_reset, 292 .gpu_reset = &r300_gpu_reset,
288 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 293 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
289 .gart_set_page = &rv370_pcie_gart_set_page, 294 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -322,6 +327,7 @@ static struct radeon_asic rs400_asic = {
322 .suspend = &rs400_suspend, 327 .suspend = &rs400_suspend,
323 .resume = &rs400_resume, 328 .resume = &rs400_resume,
324 .vga_set_state = &r100_vga_set_state, 329 .vga_set_state = &r100_vga_set_state,
330 .gpu_is_lockup = &r300_gpu_is_lockup,
325 .gpu_reset = &r300_gpu_reset, 331 .gpu_reset = &r300_gpu_reset,
326 .gart_tlb_flush = &rs400_gart_tlb_flush, 332 .gart_tlb_flush = &rs400_gart_tlb_flush,
327 .gart_set_page = &rs400_gart_set_page, 333 .gart_set_page = &rs400_gart_set_page,
@@ -360,6 +366,7 @@ static struct radeon_asic rs600_asic = {
360 .suspend = &rs600_suspend, 366 .suspend = &rs600_suspend,
361 .resume = &rs600_resume, 367 .resume = &rs600_resume,
362 .vga_set_state = &r100_vga_set_state, 368 .vga_set_state = &r100_vga_set_state,
369 .gpu_is_lockup = &r300_gpu_is_lockup,
363 .gpu_reset = &r300_gpu_reset, 370 .gpu_reset = &r300_gpu_reset,
364 .gart_tlb_flush = &rs600_gart_tlb_flush, 371 .gart_tlb_flush = &rs600_gart_tlb_flush,
365 .gart_set_page = &rs600_gart_set_page, 372 .gart_set_page = &rs600_gart_set_page,
@@ -398,6 +405,7 @@ static struct radeon_asic rs690_asic = {
398 .suspend = &rs690_suspend, 405 .suspend = &rs690_suspend,
399 .resume = &rs690_resume, 406 .resume = &rs690_resume,
400 .vga_set_state = &r100_vga_set_state, 407 .vga_set_state = &r100_vga_set_state,
408 .gpu_is_lockup = &r300_gpu_is_lockup,
401 .gpu_reset = &r300_gpu_reset, 409 .gpu_reset = &r300_gpu_reset,
402 .gart_tlb_flush = &rs400_gart_tlb_flush, 410 .gart_tlb_flush = &rs400_gart_tlb_flush,
403 .gart_set_page = &rs400_gart_set_page, 411 .gart_set_page = &rs400_gart_set_page,
@@ -436,6 +444,7 @@ static struct radeon_asic rv515_asic = {
436 .suspend = &rv515_suspend, 444 .suspend = &rv515_suspend,
437 .resume = &rv515_resume, 445 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state, 446 .vga_set_state = &r100_vga_set_state,
447 .gpu_is_lockup = &r300_gpu_is_lockup,
439 .gpu_reset = &rv515_gpu_reset, 448 .gpu_reset = &rv515_gpu_reset,
440 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 449 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
441 .gart_set_page = &rv370_pcie_gart_set_page, 450 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -474,6 +483,7 @@ static struct radeon_asic r520_asic = {
474 .suspend = &rv515_suspend, 483 .suspend = &rv515_suspend,
475 .resume = &r520_resume, 484 .resume = &r520_resume,
476 .vga_set_state = &r100_vga_set_state, 485 .vga_set_state = &r100_vga_set_state,
486 .gpu_is_lockup = &r300_gpu_is_lockup,
477 .gpu_reset = &rv515_gpu_reset, 487 .gpu_reset = &rv515_gpu_reset,
478 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 488 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
479 .gart_set_page = &rv370_pcie_gart_set_page, 489 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -513,6 +523,7 @@ static struct radeon_asic r600_asic = {
513 .resume = &r600_resume, 523 .resume = &r600_resume,
514 .cp_commit = &r600_cp_commit, 524 .cp_commit = &r600_cp_commit,
515 .vga_set_state = &r600_vga_set_state, 525 .vga_set_state = &r600_vga_set_state,
526 .gpu_is_lockup = &r600_gpu_is_lockup,
516 .gpu_reset = &r600_gpu_reset, 527 .gpu_reset = &r600_gpu_reset,
517 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 528 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
518 .gart_set_page = &rs600_gart_set_page, 529 .gart_set_page = &rs600_gart_set_page,
@@ -586,7 +597,8 @@ static struct radeon_asic rv770_asic = {
586 .suspend = &rv770_suspend, 597 .suspend = &rv770_suspend,
587 .resume = &rv770_resume, 598 .resume = &rv770_resume,
588 .cp_commit = &r600_cp_commit, 599 .cp_commit = &r600_cp_commit,
589 .gpu_reset = &rv770_gpu_reset, 600 .gpu_reset = &r600_gpu_reset,
601 .gpu_is_lockup = &r600_gpu_is_lockup,
590 .vga_set_state = &r600_vga_set_state, 602 .vga_set_state = &r600_vga_set_state,
591 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 603 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
592 .gart_set_page = &rs600_gart_set_page, 604 .gart_set_page = &rs600_gart_set_page,
@@ -623,6 +635,7 @@ static struct radeon_asic evergreen_asic = {
623 .suspend = &evergreen_suspend, 635 .suspend = &evergreen_suspend,
624 .resume = &evergreen_resume, 636 .resume = &evergreen_resume,
625 .cp_commit = NULL, 637 .cp_commit = NULL,
638 .gpu_is_lockup = &evergreen_gpu_is_lockup,
626 .gpu_reset = &evergreen_gpu_reset, 639 .gpu_reset = &evergreen_gpu_reset,
627 .vga_set_state = &r600_vga_set_state, 640 .vga_set_state = &r600_vga_set_state,
628 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 641 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d1..ce2f3e4f0814 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,6 +60,7 @@ int r100_resume(struct radeon_device *rdev);
60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
62void r100_vga_set_state(struct radeon_device *rdev, bool state); 62void r100_vga_set_state(struct radeon_device *rdev, bool state);
63bool r100_gpu_is_lockup(struct radeon_device *rdev);
63int r100_gpu_reset(struct radeon_device *rdev); 64int r100_gpu_reset(struct radeon_device *rdev);
64u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 65u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
65void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 66void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
@@ -134,7 +135,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
134 uint64_t src_offset, 135 uint64_t src_offset,
135 uint64_t dst_offset, 136 uint64_t dst_offset,
136 unsigned num_pages, 137 unsigned num_pages,
137 struct radeon_fence *fence); 138 struct radeon_fence *fence);
138 139
139/* 140/*
140 * r300,r350,rv350,rv380 141 * r300,r350,rv350,rv380
@@ -143,6 +144,7 @@ extern int r300_init(struct radeon_device *rdev);
143extern void r300_fini(struct radeon_device *rdev); 144extern void r300_fini(struct radeon_device *rdev);
144extern int r300_suspend(struct radeon_device *rdev); 145extern int r300_suspend(struct radeon_device *rdev);
145extern int r300_resume(struct radeon_device *rdev); 146extern int r300_resume(struct radeon_device *rdev);
147extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
146extern int r300_gpu_reset(struct radeon_device *rdev); 148extern int r300_gpu_reset(struct radeon_device *rdev);
147extern void r300_ring_start(struct radeon_device *rdev); 149extern void r300_ring_start(struct radeon_device *rdev);
148extern void r300_fence_ring_emit(struct radeon_device *rdev, 150extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -252,6 +254,7 @@ int r600_copy_dma(struct radeon_device *rdev,
252 struct radeon_fence *fence); 254 struct radeon_fence *fence);
253int r600_irq_process(struct radeon_device *rdev); 255int r600_irq_process(struct radeon_device *rdev);
254int r600_irq_set(struct radeon_device *rdev); 256int r600_irq_set(struct radeon_device *rdev);
257bool r600_gpu_is_lockup(struct radeon_device *rdev);
255int r600_gpu_reset(struct radeon_device *rdev); 258int r600_gpu_reset(struct radeon_device *rdev);
256int r600_set_surface_reg(struct radeon_device *rdev, int reg, 259int r600_set_surface_reg(struct radeon_device *rdev, int reg,
257 uint32_t tiling_flags, uint32_t pitch, 260 uint32_t tiling_flags, uint32_t pitch,
@@ -276,7 +279,6 @@ int rv770_init(struct radeon_device *rdev);
276void rv770_fini(struct radeon_device *rdev); 279void rv770_fini(struct radeon_device *rdev);
277int rv770_suspend(struct radeon_device *rdev); 280int rv770_suspend(struct radeon_device *rdev);
278int rv770_resume(struct radeon_device *rdev); 281int rv770_resume(struct radeon_device *rdev);
279int rv770_gpu_reset(struct radeon_device *rdev);
280 282
281/* 283/*
282 * evergreen 284 * evergreen
@@ -285,6 +287,7 @@ int evergreen_init(struct radeon_device *rdev);
285void evergreen_fini(struct radeon_device *rdev); 287void evergreen_fini(struct radeon_device *rdev);
286int evergreen_suspend(struct radeon_device *rdev); 288int evergreen_suspend(struct radeon_device *rdev);
287int evergreen_resume(struct radeon_device *rdev); 289int evergreen_resume(struct radeon_device *rdev);
290bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
288int evergreen_gpu_reset(struct radeon_device *rdev); 291int evergreen_gpu_reset(struct radeon_device *rdev);
289void evergreen_bandwidth_update(struct radeon_device *rdev); 292void evergreen_bandwidth_update(struct radeon_device *rdev);
290void evergreen_hpd_init(struct radeon_device *rdev); 293void evergreen_hpd_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8495d4e32e18..393154268dea 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -57,7 +57,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
57 radeon_fence_ring_emit(rdev, fence); 57 radeon_fence_ring_emit(rdev, fence);
58 58
59 fence->emited = true; 59 fence->emited = true;
60 fence->timeout = jiffies + ((2000 * HZ) / 1000);
61 list_del(&fence->list); 60 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited); 61 list_add_tail(&fence->list, &rdev->fence_drv.emited);
63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 62 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -70,15 +69,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
70 struct list_head *i, *n; 69 struct list_head *i, *n;
71 uint32_t seq; 70 uint32_t seq;
72 bool wake = false; 71 bool wake = false;
72 unsigned long cjiffies;
73 73
74 if (rdev == NULL) {
75 return true;
76 }
77 if (rdev->shutdown) {
78 return true;
79 }
80 seq = RREG32(rdev->fence_drv.scratch_reg); 74 seq = RREG32(rdev->fence_drv.scratch_reg);
81 rdev->fence_drv.last_seq = seq; 75 if (seq != rdev->fence_drv.last_seq) {
76 rdev->fence_drv.last_seq = seq;
77 rdev->fence_drv.last_jiffies = jiffies;
78 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
79 } else {
80 cjiffies = jiffies;
81 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
82 cjiffies -= rdev->fence_drv.last_jiffies;
83 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
84 /* update the timeout */
85 rdev->fence_drv.last_timeout -= cjiffies;
86 } else {
87 /* the 500ms timeout is elapsed we should test
88 * for GPU lockup
89 */
90 rdev->fence_drv.last_timeout = 1;
91 }
92 } else {
93 /* wrap around update last jiffies, we will just wait
94 * a little longer
95 */
96 rdev->fence_drv.last_jiffies = cjiffies;
97 }
98 return false;
99 }
82 n = NULL; 100 n = NULL;
83 list_for_each(i, &rdev->fence_drv.emited) { 101 list_for_each(i, &rdev->fence_drv.emited) {
84 fence = list_entry(i, struct radeon_fence, list); 102 fence = list_entry(i, struct radeon_fence, list);
@@ -170,9 +188,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
170int radeon_fence_wait(struct radeon_fence *fence, bool intr) 188int radeon_fence_wait(struct radeon_fence *fence, bool intr)
171{ 189{
172 struct radeon_device *rdev; 190 struct radeon_device *rdev;
173 unsigned long cur_jiffies; 191 unsigned long irq_flags, timeout;
174 unsigned long timeout; 192 u32 seq;
175 bool expired = false;
176 int r; 193 int r;
177 194
178 if (fence == NULL) { 195 if (fence == NULL) {
@@ -183,14 +200,10 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
183 if (radeon_fence_signaled(fence)) { 200 if (radeon_fence_signaled(fence)) {
184 return 0; 201 return 0;
185 } 202 }
186 203 timeout = rdev->fence_drv.last_timeout;
187retry: 204retry:
188 cur_jiffies = jiffies; 205 /* save current sequence used to check for GPU lockup */
189 timeout = HZ / 100; 206 seq = rdev->fence_drv.last_seq;
190 if (time_after(fence->timeout, cur_jiffies)) {
191 timeout = fence->timeout - cur_jiffies;
192 }
193
194 if (intr) { 207 if (intr) {
195 radeon_irq_kms_sw_irq_get(rdev); 208 radeon_irq_kms_sw_irq_get(rdev);
196 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 209 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -205,38 +218,34 @@ retry:
205 radeon_irq_kms_sw_irq_put(rdev); 218 radeon_irq_kms_sw_irq_put(rdev);
206 } 219 }
207 if (unlikely(!radeon_fence_signaled(fence))) { 220 if (unlikely(!radeon_fence_signaled(fence))) {
208 if (unlikely(r == 0)) { 221 /* we were interrupted for some reason and fence isn't
209 expired = true; 222 * isn't signaled yet, resume wait
223 */
224 if (r) {
225 timeout = r;
226 goto retry;
210 } 227 }
211 if (unlikely(expired)) { 228 /* don't protect read access to rdev->fence_drv.last_seq
212 timeout = 1; 229 * if we experiencing a lockup the value doesn't change
213 if (time_after(cur_jiffies, fence->timeout)) { 230 */
214 timeout = cur_jiffies - fence->timeout; 231 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
215 } 232 /* good news we believe it's a lockup */
216 timeout = jiffies_to_msecs(timeout); 233 dev_warn(rdev->dev, "GPU lockup (last fence id 0x%08X)\n", seq);
217 if (timeout > 500) { 234 r = radeon_gpu_reset(rdev);
218 DRM_ERROR("fence(%p:0x%08X) %lums timeout " 235 if (r)
219 "going to reset GPU\n", 236 return r;
220 fence, fence->seq, timeout); 237 /* FIXME: what should we do ? marking everyone
221 radeon_gpu_reset(rdev); 238 * as signaled for now
222 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 239 */
223 } 240 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
224 } 241 }
242 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
243 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
244 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
245 rdev->fence_drv.last_jiffies = jiffies;
246 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
225 goto retry; 247 goto retry;
226 } 248 }
227 if (unlikely(expired)) {
228 rdev->fence_drv.count_timeout++;
229 cur_jiffies = jiffies;
230 timeout = 1;
231 if (time_after(cur_jiffies, fence->timeout)) {
232 timeout = cur_jiffies - fence->timeout;
233 }
234 timeout = jiffies_to_msecs(timeout);
235 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
236 fence, fence->seq, timeout);
237 DRM_ERROR("last signaled fence(0x%08X)\n",
238 rdev->fence_drv.last_seq);
239 }
240 return 0; 249 return 0;
241} 250}
242 251
@@ -332,7 +341,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
332 INIT_LIST_HEAD(&rdev->fence_drv.created); 341 INIT_LIST_HEAD(&rdev->fence_drv.created);
333 INIT_LIST_HEAD(&rdev->fence_drv.emited); 342 INIT_LIST_HEAD(&rdev->fence_drv.emited);
334 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 343 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
335 rdev->fence_drv.count_timeout = 0;
336 init_waitqueue_head(&rdev->fence_drv.queue); 344 init_waitqueue_head(&rdev->fence_drv.queue);
337 rdev->fence_drv.initialized = true; 345 rdev->fence_drv.initialized = true;
338 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 346 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9f37d2efb0a9..2b8a4e170654 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -916,12 +916,6 @@ int rv770_mc_init(struct radeon_device *rdev)
916 return 0; 916 return 0;
917} 917}
918 918
919int rv770_gpu_reset(struct radeon_device *rdev)
920{
921 /* FIXME: implement any rv770 specific bits */
922 return r600_gpu_reset(rdev);
923}
924
925static int rv770_startup(struct radeon_device *rdev) 919static int rv770_startup(struct radeon_device *rdev)
926{ 920{
927 int r; 921 int r;