aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2012-11-09 10:45:57 -0500
committerAlex Deucher <alexander.deucher@amd.com>2013-06-25 17:50:31 -0400
commita59781bbe528a0c2b0468d8baeea88a61d8b7e3c (patch)
tree4b0adc4978b4fc72ae26212a86930861a1a34278 /drivers
parentf6796caee6fc0f97e8d38f5b8b060ab1433ae54e (diff)
drm/radeon: add support for interrupts on CIK (v5)
Todo: - handle interrupts for compute queues v2: add documentation v3: update to latest reset code v4: update to latest illegal CP handling v5: fix missing break in interrupt handler switch statement Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/radeon/cik.c840
-rw-r--r--drivers/gpu/drm/radeon/cikd.h170
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
3 files changed, 1021 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a44ede6b0707..72c7e83c6d8c 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -62,6 +62,8 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
62MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 62MODULE_FIRMWARE("radeon/KABINI_mec.bin");
63MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 63MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
64 64
65extern int r600_ih_ring_alloc(struct radeon_device *rdev);
66extern void r600_ih_ring_fini(struct radeon_device *rdev);
65extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 67extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
66extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 68extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
67extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 69extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
@@ -2919,3 +2921,841 @@ static int cik_rlc_resume(struct radeon_device *rdev)
2919 2921
2920 return 0; 2922 return 0;
2921} 2923}
2924
2925/*
2926 * Interrupts
2927 * Starting with r6xx, interrupts are handled via a ring buffer.
2928 * Ring buffers are areas of GPU accessible memory that the GPU
2929 * writes interrupt vectors into and the host reads vectors out of.
2930 * There is a rptr (read pointer) that determines where the
2931 * host is currently reading, and a wptr (write pointer)
2932 * which determines where the GPU has written. When the
2933 * pointers are equal, the ring is idle. When the GPU
2934 * writes vectors to the ring buffer, it increments the
2935 * wptr. When there is an interrupt, the host then starts
2936 * fetching commands and processing them until the pointers are
2937 * equal again at which point it updates the rptr.
2938 */
2939
2940/**
2941 * cik_enable_interrupts - Enable the interrupt ring buffer
2942 *
2943 * @rdev: radeon_device pointer
2944 *
2945 * Enable the interrupt ring buffer (CIK).
2946 */
2947static void cik_enable_interrupts(struct radeon_device *rdev)
2948{
2949 u32 ih_cntl = RREG32(IH_CNTL);
2950 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2951
2952 ih_cntl |= ENABLE_INTR;
2953 ih_rb_cntl |= IH_RB_ENABLE;
2954 WREG32(IH_CNTL, ih_cntl);
2955 WREG32(IH_RB_CNTL, ih_rb_cntl);
2956 rdev->ih.enabled = true;
2957}
2958
2959/**
2960 * cik_disable_interrupts - Disable the interrupt ring buffer
2961 *
2962 * @rdev: radeon_device pointer
2963 *
2964 * Disable the interrupt ring buffer (CIK).
2965 */
2966static void cik_disable_interrupts(struct radeon_device *rdev)
2967{
2968 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2969 u32 ih_cntl = RREG32(IH_CNTL);
2970
2971 ih_rb_cntl &= ~IH_RB_ENABLE;
2972 ih_cntl &= ~ENABLE_INTR;
2973 WREG32(IH_RB_CNTL, ih_rb_cntl);
2974 WREG32(IH_CNTL, ih_cntl);
2975 /* set rptr, wptr to 0 */
2976 WREG32(IH_RB_RPTR, 0);
2977 WREG32(IH_RB_WPTR, 0);
2978 rdev->ih.enabled = false;
2979 rdev->ih.rptr = 0;
2980}
2981
2982/**
2983 * cik_disable_interrupt_state - Disable all interrupt sources
2984 *
2985 * @rdev: radeon_device pointer
2986 *
2987 * Clear all interrupt enable bits used by the driver (CIK).
2988 */
2989static void cik_disable_interrupt_state(struct radeon_device *rdev)
2990{
2991 u32 tmp;
2992
2993 /* gfx ring */
2994 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2995 /* compute queues */
2996 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
2997 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
2998 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
2999 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
3000 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
3001 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
3002 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
3003 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
3004 /* grbm */
3005 WREG32(GRBM_INT_CNTL, 0);
3006 /* vline/vblank, etc. */
3007 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
3008 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
3009 if (rdev->num_crtc >= 4) {
3010 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
3011 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
3012 }
3013 if (rdev->num_crtc >= 6) {
3014 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
3015 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
3016 }
3017
3018 /* dac hotplug */
3019 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
3020
3021 /* digital hotplug */
3022 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3023 WREG32(DC_HPD1_INT_CONTROL, tmp);
3024 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3025 WREG32(DC_HPD2_INT_CONTROL, tmp);
3026 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3027 WREG32(DC_HPD3_INT_CONTROL, tmp);
3028 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3029 WREG32(DC_HPD4_INT_CONTROL, tmp);
3030 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3031 WREG32(DC_HPD5_INT_CONTROL, tmp);
3032 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3033 WREG32(DC_HPD6_INT_CONTROL, tmp);
3034
3035}
3036
3037/**
3038 * cik_irq_init - init and enable the interrupt ring
3039 *
3040 * @rdev: radeon_device pointer
3041 *
3042 * Allocate a ring buffer for the interrupt controller,
3043 * enable the RLC, disable interrupts, enable the IH
3044 * ring buffer and enable it (CIK).
3045 * Called at device load and reume.
3046 * Returns 0 for success, errors for failure.
3047 */
3048static int cik_irq_init(struct radeon_device *rdev)
3049{
3050 int ret = 0;
3051 int rb_bufsz;
3052 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3053
3054 /* allocate ring */
3055 ret = r600_ih_ring_alloc(rdev);
3056 if (ret)
3057 return ret;
3058
3059 /* disable irqs */
3060 cik_disable_interrupts(rdev);
3061
3062 /* init rlc */
3063 ret = cik_rlc_resume(rdev);
3064 if (ret) {
3065 r600_ih_ring_fini(rdev);
3066 return ret;
3067 }
3068
3069 /* setup interrupt control */
3070 /* XXX this should actually be a bus address, not an MC address. same on older asics */
3071 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3072 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3073 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3074 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3075 */
3076 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3077 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3078 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3079 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3080
3081 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3082 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3083
3084 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3085 IH_WPTR_OVERFLOW_CLEAR |
3086 (rb_bufsz << 1));
3087
3088 if (rdev->wb.enabled)
3089 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3090
3091 /* set the writeback address whether it's enabled or not */
3092 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3093 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3094
3095 WREG32(IH_RB_CNTL, ih_rb_cntl);
3096
3097 /* set rptr, wptr to 0 */
3098 WREG32(IH_RB_RPTR, 0);
3099 WREG32(IH_RB_WPTR, 0);
3100
3101 /* Default settings for IH_CNTL (disabled at first) */
3102 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
3103 /* RPTR_REARM only works if msi's are enabled */
3104 if (rdev->msi_enabled)
3105 ih_cntl |= RPTR_REARM;
3106 WREG32(IH_CNTL, ih_cntl);
3107
3108 /* force the active interrupt state to all disabled */
3109 cik_disable_interrupt_state(rdev);
3110
3111 pci_set_master(rdev->pdev);
3112
3113 /* enable irqs */
3114 cik_enable_interrupts(rdev);
3115
3116 return ret;
3117}
3118
3119/**
3120 * cik_irq_set - enable/disable interrupt sources
3121 *
3122 * @rdev: radeon_device pointer
3123 *
3124 * Enable interrupt sources on the GPU (vblanks, hpd,
3125 * etc.) (CIK).
3126 * Returns 0 for success, errors for failure.
3127 */
3128int cik_irq_set(struct radeon_device *rdev)
3129{
3130 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
3131 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
3132 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
3133 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
3134 u32 grbm_int_cntl = 0;
3135
3136 if (!rdev->irq.installed) {
3137 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3138 return -EINVAL;
3139 }
3140 /* don't enable anything if the ih is disabled */
3141 if (!rdev->ih.enabled) {
3142 cik_disable_interrupts(rdev);
3143 /* force the active interrupt state to all disabled */
3144 cik_disable_interrupt_state(rdev);
3145 return 0;
3146 }
3147
3148 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3149 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3150 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3151 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3152 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3153 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3154
3155 /* enable CP interrupts on all rings */
3156 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3157 DRM_DEBUG("cik_irq_set: sw int gfx\n");
3158 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3159 }
3160 /* TODO: compute queues! */
3161 /* CP_ME[1-2]_PIPE[0-3]_INT_CNTL */
3162
3163 if (rdev->irq.crtc_vblank_int[0] ||
3164 atomic_read(&rdev->irq.pflip[0])) {
3165 DRM_DEBUG("cik_irq_set: vblank 0\n");
3166 crtc1 |= VBLANK_INTERRUPT_MASK;
3167 }
3168 if (rdev->irq.crtc_vblank_int[1] ||
3169 atomic_read(&rdev->irq.pflip[1])) {
3170 DRM_DEBUG("cik_irq_set: vblank 1\n");
3171 crtc2 |= VBLANK_INTERRUPT_MASK;
3172 }
3173 if (rdev->irq.crtc_vblank_int[2] ||
3174 atomic_read(&rdev->irq.pflip[2])) {
3175 DRM_DEBUG("cik_irq_set: vblank 2\n");
3176 crtc3 |= VBLANK_INTERRUPT_MASK;
3177 }
3178 if (rdev->irq.crtc_vblank_int[3] ||
3179 atomic_read(&rdev->irq.pflip[3])) {
3180 DRM_DEBUG("cik_irq_set: vblank 3\n");
3181 crtc4 |= VBLANK_INTERRUPT_MASK;
3182 }
3183 if (rdev->irq.crtc_vblank_int[4] ||
3184 atomic_read(&rdev->irq.pflip[4])) {
3185 DRM_DEBUG("cik_irq_set: vblank 4\n");
3186 crtc5 |= VBLANK_INTERRUPT_MASK;
3187 }
3188 if (rdev->irq.crtc_vblank_int[5] ||
3189 atomic_read(&rdev->irq.pflip[5])) {
3190 DRM_DEBUG("cik_irq_set: vblank 5\n");
3191 crtc6 |= VBLANK_INTERRUPT_MASK;
3192 }
3193 if (rdev->irq.hpd[0]) {
3194 DRM_DEBUG("cik_irq_set: hpd 1\n");
3195 hpd1 |= DC_HPDx_INT_EN;
3196 }
3197 if (rdev->irq.hpd[1]) {
3198 DRM_DEBUG("cik_irq_set: hpd 2\n");
3199 hpd2 |= DC_HPDx_INT_EN;
3200 }
3201 if (rdev->irq.hpd[2]) {
3202 DRM_DEBUG("cik_irq_set: hpd 3\n");
3203 hpd3 |= DC_HPDx_INT_EN;
3204 }
3205 if (rdev->irq.hpd[3]) {
3206 DRM_DEBUG("cik_irq_set: hpd 4\n");
3207 hpd4 |= DC_HPDx_INT_EN;
3208 }
3209 if (rdev->irq.hpd[4]) {
3210 DRM_DEBUG("cik_irq_set: hpd 5\n");
3211 hpd5 |= DC_HPDx_INT_EN;
3212 }
3213 if (rdev->irq.hpd[5]) {
3214 DRM_DEBUG("cik_irq_set: hpd 6\n");
3215 hpd6 |= DC_HPDx_INT_EN;
3216 }
3217
3218 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
3219
3220 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3221
3222 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
3223 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
3224 if (rdev->num_crtc >= 4) {
3225 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
3226 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
3227 }
3228 if (rdev->num_crtc >= 6) {
3229 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
3230 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3231 }
3232
3233 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3234 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3235 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3236 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3237 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3238 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3239
3240 return 0;
3241}
3242
3243/**
3244 * cik_irq_ack - ack interrupt sources
3245 *
3246 * @rdev: radeon_device pointer
3247 *
3248 * Ack interrupt sources on the GPU (vblanks, hpd,
3249 * etc.) (CIK). Certain interrupts sources are sw
3250 * generated and do not require an explicit ack.
3251 */
3252static inline void cik_irq_ack(struct radeon_device *rdev)
3253{
3254 u32 tmp;
3255
3256 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3257 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3258 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
3259 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
3260 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
3261 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3262 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
3263
3264 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
3265 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
3266 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
3267 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
3268 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
3269 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
3270 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
3271 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3272
3273 if (rdev->num_crtc >= 4) {
3274 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3275 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3276 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3277 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
3278 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
3279 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
3280 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
3281 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
3282 }
3283
3284 if (rdev->num_crtc >= 6) {
3285 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3286 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3287 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3288 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
3289 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
3290 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
3291 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
3292 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
3293 }
3294
3295 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
3296 tmp = RREG32(DC_HPD1_INT_CONTROL);
3297 tmp |= DC_HPDx_INT_ACK;
3298 WREG32(DC_HPD1_INT_CONTROL, tmp);
3299 }
3300 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
3301 tmp = RREG32(DC_HPD2_INT_CONTROL);
3302 tmp |= DC_HPDx_INT_ACK;
3303 WREG32(DC_HPD2_INT_CONTROL, tmp);
3304 }
3305 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3306 tmp = RREG32(DC_HPD3_INT_CONTROL);
3307 tmp |= DC_HPDx_INT_ACK;
3308 WREG32(DC_HPD3_INT_CONTROL, tmp);
3309 }
3310 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3311 tmp = RREG32(DC_HPD4_INT_CONTROL);
3312 tmp |= DC_HPDx_INT_ACK;
3313 WREG32(DC_HPD4_INT_CONTROL, tmp);
3314 }
3315 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3316 tmp = RREG32(DC_HPD5_INT_CONTROL);
3317 tmp |= DC_HPDx_INT_ACK;
3318 WREG32(DC_HPD5_INT_CONTROL, tmp);
3319 }
3320 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3321 tmp = RREG32(DC_HPD5_INT_CONTROL);
3322 tmp |= DC_HPDx_INT_ACK;
3323 WREG32(DC_HPD6_INT_CONTROL, tmp);
3324 }
3325}
3326
3327/**
3328 * cik_irq_disable - disable interrupts
3329 *
3330 * @rdev: radeon_device pointer
3331 *
3332 * Disable interrupts on the hw (CIK).
3333 */
3334static void cik_irq_disable(struct radeon_device *rdev)
3335{
3336 cik_disable_interrupts(rdev);
3337 /* Wait and acknowledge irq */
3338 mdelay(1);
3339 cik_irq_ack(rdev);
3340 cik_disable_interrupt_state(rdev);
3341}
3342
3343/**
3344 * cik_irq_disable - disable interrupts for suspend
3345 *
3346 * @rdev: radeon_device pointer
3347 *
3348 * Disable interrupts and stop the RLC (CIK).
3349 * Used for suspend.
3350 */
3351static void cik_irq_suspend(struct radeon_device *rdev)
3352{
3353 cik_irq_disable(rdev);
3354 cik_rlc_stop(rdev);
3355}
3356
3357/**
3358 * cik_irq_fini - tear down interrupt support
3359 *
3360 * @rdev: radeon_device pointer
3361 *
3362 * Disable interrupts on the hw and free the IH ring
3363 * buffer (CIK).
3364 * Used for driver unload.
3365 */
3366static void cik_irq_fini(struct radeon_device *rdev)
3367{
3368 cik_irq_suspend(rdev);
3369 r600_ih_ring_fini(rdev);
3370}
3371
3372/**
3373 * cik_get_ih_wptr - get the IH ring buffer wptr
3374 *
3375 * @rdev: radeon_device pointer
3376 *
3377 * Get the IH ring buffer wptr from either the register
3378 * or the writeback memory buffer (CIK). Also check for
3379 * ring buffer overflow and deal with it.
3380 * Used by cik_irq_process().
3381 * Returns the value of the wptr.
3382 */
3383static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
3384{
3385 u32 wptr, tmp;
3386
3387 if (rdev->wb.enabled)
3388 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3389 else
3390 wptr = RREG32(IH_RB_WPTR);
3391
3392 if (wptr & RB_OVERFLOW) {
3393 /* When a ring buffer overflow happen start parsing interrupt
3394 * from the last not overwritten vector (wptr + 16). Hopefully
3395 * this should allow us to catchup.
3396 */
3397 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3398 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3399 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3400 tmp = RREG32(IH_RB_CNTL);
3401 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3402 WREG32(IH_RB_CNTL, tmp);
3403 }
3404 return (wptr & rdev->ih.ptr_mask);
3405}
3406
3407/* CIK IV Ring
3408 * Each IV ring entry is 128 bits:
3409 * [7:0] - interrupt source id
3410 * [31:8] - reserved
3411 * [59:32] - interrupt source data
3412 * [63:60] - reserved
3413 * [71:64] - RINGID: ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
3414 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
3415 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
3416 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
3417 * PIPE_ID - ME0 0=3D
3418 * - ME1&2 compute dispatcher (4 pipes each)
3419 * [79:72] - VMID
3420 * [95:80] - PASID
3421 * [127:96] - reserved
3422 */
3423/**
3424 * cik_irq_process - interrupt handler
3425 *
3426 * @rdev: radeon_device pointer
3427 *
3428 * Interrupt hander (CIK). Walk the IH ring,
3429 * ack interrupts and schedule work to handle
3430 * interrupt events.
3431 * Returns irq process return code.
3432 */
3433int cik_irq_process(struct radeon_device *rdev)
3434{
3435 u32 wptr;
3436 u32 rptr;
3437 u32 src_id, src_data, ring_id;
3438 u8 me_id, pipe_id, queue_id;
3439 u32 ring_index;
3440 bool queue_hotplug = false;
3441 bool queue_reset = false;
3442
3443 if (!rdev->ih.enabled || rdev->shutdown)
3444 return IRQ_NONE;
3445
3446 wptr = cik_get_ih_wptr(rdev);
3447
3448restart_ih:
3449 /* is somebody else already processing irqs? */
3450 if (atomic_xchg(&rdev->ih.lock, 1))
3451 return IRQ_NONE;
3452
3453 rptr = rdev->ih.rptr;
3454 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3455
3456 /* Order reading of wptr vs. reading of IH ring data */
3457 rmb();
3458
3459 /* display interrupts */
3460 cik_irq_ack(rdev);
3461
3462 while (rptr != wptr) {
3463 /* wptr/rptr are in bytes! */
3464 ring_index = rptr / 4;
3465 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3466 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3467 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
3468 /* XXX check the bitfield order! */
3469 me_id = (ring_id & 0x60) >> 5;
3470 pipe_id = (ring_id & 0x18) >> 3;
3471 queue_id = (ring_id & 0x7) >> 0;
3472
3473 switch (src_id) {
3474 case 1: /* D1 vblank/vline */
3475 switch (src_data) {
3476 case 0: /* D1 vblank */
3477 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
3478 if (rdev->irq.crtc_vblank_int[0]) {
3479 drm_handle_vblank(rdev->ddev, 0);
3480 rdev->pm.vblank_sync = true;
3481 wake_up(&rdev->irq.vblank_queue);
3482 }
3483 if (atomic_read(&rdev->irq.pflip[0]))
3484 radeon_crtc_handle_flip(rdev, 0);
3485 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3486 DRM_DEBUG("IH: D1 vblank\n");
3487 }
3488 break;
3489 case 1: /* D1 vline */
3490 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
3491 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3492 DRM_DEBUG("IH: D1 vline\n");
3493 }
3494 break;
3495 default:
3496 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3497 break;
3498 }
3499 break;
3500 case 2: /* D2 vblank/vline */
3501 switch (src_data) {
3502 case 0: /* D2 vblank */
3503 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
3504 if (rdev->irq.crtc_vblank_int[1]) {
3505 drm_handle_vblank(rdev->ddev, 1);
3506 rdev->pm.vblank_sync = true;
3507 wake_up(&rdev->irq.vblank_queue);
3508 }
3509 if (atomic_read(&rdev->irq.pflip[1]))
3510 radeon_crtc_handle_flip(rdev, 1);
3511 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3512 DRM_DEBUG("IH: D2 vblank\n");
3513 }
3514 break;
3515 case 1: /* D2 vline */
3516 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3517 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3518 DRM_DEBUG("IH: D2 vline\n");
3519 }
3520 break;
3521 default:
3522 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3523 break;
3524 }
3525 break;
3526 case 3: /* D3 vblank/vline */
3527 switch (src_data) {
3528 case 0: /* D3 vblank */
3529 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3530 if (rdev->irq.crtc_vblank_int[2]) {
3531 drm_handle_vblank(rdev->ddev, 2);
3532 rdev->pm.vblank_sync = true;
3533 wake_up(&rdev->irq.vblank_queue);
3534 }
3535 if (atomic_read(&rdev->irq.pflip[2]))
3536 radeon_crtc_handle_flip(rdev, 2);
3537 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3538 DRM_DEBUG("IH: D3 vblank\n");
3539 }
3540 break;
3541 case 1: /* D3 vline */
3542 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3543 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3544 DRM_DEBUG("IH: D3 vline\n");
3545 }
3546 break;
3547 default:
3548 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3549 break;
3550 }
3551 break;
3552 case 4: /* D4 vblank/vline */
3553 switch (src_data) {
3554 case 0: /* D4 vblank */
3555 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3556 if (rdev->irq.crtc_vblank_int[3]) {
3557 drm_handle_vblank(rdev->ddev, 3);
3558 rdev->pm.vblank_sync = true;
3559 wake_up(&rdev->irq.vblank_queue);
3560 }
3561 if (atomic_read(&rdev->irq.pflip[3]))
3562 radeon_crtc_handle_flip(rdev, 3);
3563 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3564 DRM_DEBUG("IH: D4 vblank\n");
3565 }
3566 break;
3567 case 1: /* D4 vline */
3568 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3569 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3570 DRM_DEBUG("IH: D4 vline\n");
3571 }
3572 break;
3573 default:
3574 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3575 break;
3576 }
3577 break;
3578 case 5: /* D5 vblank/vline */
3579 switch (src_data) {
3580 case 0: /* D5 vblank */
3581 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3582 if (rdev->irq.crtc_vblank_int[4]) {
3583 drm_handle_vblank(rdev->ddev, 4);
3584 rdev->pm.vblank_sync = true;
3585 wake_up(&rdev->irq.vblank_queue);
3586 }
3587 if (atomic_read(&rdev->irq.pflip[4]))
3588 radeon_crtc_handle_flip(rdev, 4);
3589 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3590 DRM_DEBUG("IH: D5 vblank\n");
3591 }
3592 break;
3593 case 1: /* D5 vline */
3594 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3595 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3596 DRM_DEBUG("IH: D5 vline\n");
3597 }
3598 break;
3599 default:
3600 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3601 break;
3602 }
3603 break;
3604 case 6: /* D6 vblank/vline */
3605 switch (src_data) {
3606 case 0: /* D6 vblank */
3607 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3608 if (rdev->irq.crtc_vblank_int[5]) {
3609 drm_handle_vblank(rdev->ddev, 5);
3610 rdev->pm.vblank_sync = true;
3611 wake_up(&rdev->irq.vblank_queue);
3612 }
3613 if (atomic_read(&rdev->irq.pflip[5]))
3614 radeon_crtc_handle_flip(rdev, 5);
3615 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3616 DRM_DEBUG("IH: D6 vblank\n");
3617 }
3618 break;
3619 case 1: /* D6 vline */
3620 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3621 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3622 DRM_DEBUG("IH: D6 vline\n");
3623 }
3624 break;
3625 default:
3626 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3627 break;
3628 }
3629 break;
3630 case 42: /* HPD hotplug */
3631 switch (src_data) {
3632 case 0:
3633 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
3634 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
3635 queue_hotplug = true;
3636 DRM_DEBUG("IH: HPD1\n");
3637 }
3638 break;
3639 case 1:
3640 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
3641 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3642 queue_hotplug = true;
3643 DRM_DEBUG("IH: HPD2\n");
3644 }
3645 break;
3646 case 2:
3647 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3648 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3649 queue_hotplug = true;
3650 DRM_DEBUG("IH: HPD3\n");
3651 }
3652 break;
3653 case 3:
3654 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3655 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3656 queue_hotplug = true;
3657 DRM_DEBUG("IH: HPD4\n");
3658 }
3659 break;
3660 case 4:
3661 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3662 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3663 queue_hotplug = true;
3664 DRM_DEBUG("IH: HPD5\n");
3665 }
3666 break;
3667 case 5:
3668 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3669 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3670 queue_hotplug = true;
3671 DRM_DEBUG("IH: HPD6\n");
3672 }
3673 break;
3674 default:
3675 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3676 break;
3677 }
3678 break;
3679 case 176: /* GFX RB CP_INT */
3680 case 177: /* GFX IB CP_INT */
3681 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3682 break;
3683 case 181: /* CP EOP event */
3684 DRM_DEBUG("IH: CP EOP\n");
3685 switch (me_id) {
3686 case 0:
3687 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3688 break;
3689 case 1:
3690 /* XXX compute */
3691 break;
3692 case 2:
3693 /* XXX compute */
3694 break;
3695 }
3696 break;
3697 case 184: /* CP Privileged reg access */
3698 DRM_ERROR("Illegal register access in command stream\n");
3699 /* XXX check the bitfield order! */
3700 me_id = (ring_id & 0x60) >> 5;
3701 pipe_id = (ring_id & 0x18) >> 3;
3702 queue_id = (ring_id & 0x7) >> 0;
3703 switch (me_id) {
3704 case 0:
3705 /* This results in a full GPU reset, but all we need to do is soft
3706 * reset the CP for gfx
3707 */
3708 queue_reset = true;
3709 break;
3710 case 1:
3711 /* XXX compute */
3712 break;
3713 case 2:
3714 /* XXX compute */
3715 break;
3716 }
3717 break;
3718 case 185: /* CP Privileged inst */
3719 DRM_ERROR("Illegal instruction in command stream\n");
3720 switch (me_id) {
3721 case 0:
3722 /* This results in a full GPU reset, but all we need to do is soft
3723 * reset the CP for gfx
3724 */
3725 queue_reset = true;
3726 break;
3727 case 1:
3728 /* XXX compute */
3729 break;
3730 case 2:
3731 /* XXX compute */
3732 break;
3733 }
3734 break;
3735 case 233: /* GUI IDLE */
3736 DRM_DEBUG("IH: GUI idle\n");
3737 break;
3738 default:
3739 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3740 break;
3741 }
3742
3743 /* wptr/rptr are in bytes! */
3744 rptr += 16;
3745 rptr &= rdev->ih.ptr_mask;
3746 }
3747 if (queue_hotplug)
3748 schedule_work(&rdev->hotplug_work);
3749 if (queue_reset)
3750 schedule_work(&rdev->reset_work);
3751 rdev->ih.rptr = rptr;
3752 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3753 atomic_set(&rdev->ih.lock, 0);
3754
3755 /* make sure wptr hasn't changed while processing */
3756 wptr = cik_get_ih_wptr(rdev);
3757 if (wptr != rptr)
3758 goto restart_ih;
3759
3760 return IRQ_HANDLED;
3761}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index a11602012496..a282168eadd0 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -178,8 +178,42 @@
178#define HDP_MISC_CNTL 0x2F4C 178#define HDP_MISC_CNTL 0x2F4C
179#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 179#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
180 180
181#define IH_RB_CNTL 0x3e00
182# define IH_RB_ENABLE (1 << 0)
183# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
184# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
185# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
186# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
187# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
188# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
189#define IH_RB_BASE 0x3e04
190#define IH_RB_RPTR 0x3e08
191#define IH_RB_WPTR 0x3e0c
192# define RB_OVERFLOW (1 << 0)
193# define WPTR_OFFSET_MASK 0x3fffc
194#define IH_RB_WPTR_ADDR_HI 0x3e10
195#define IH_RB_WPTR_ADDR_LO 0x3e14
196#define IH_CNTL 0x3e18
197# define ENABLE_INTR (1 << 0)
198# define IH_MC_SWAP(x) ((x) << 1)
199# define IH_MC_SWAP_NONE 0
200# define IH_MC_SWAP_16BIT 1
201# define IH_MC_SWAP_32BIT 2
202# define IH_MC_SWAP_64BIT 3
203# define RPTR_REARM (1 << 4)
204# define MC_WRREQ_CREDIT(x) ((x) << 15)
205# define MC_WR_CLEAN_CNT(x) ((x) << 20)
206# define MC_VMID(x) ((x) << 25)
207
181#define CONFIG_MEMSIZE 0x5428 208#define CONFIG_MEMSIZE 0x5428
182 209
210#define INTERRUPT_CNTL 0x5468
211# define IH_DUMMY_RD_OVERRIDE (1 << 0)
212# define IH_DUMMY_RD_EN (1 << 1)
213# define IH_REQ_NONSNOOP_EN (1 << 3)
214# define GEN_IH_INT_EN (1 << 8)
215#define INTERRUPT_CNTL2 0x546c
216
183#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 217#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
184 218
185#define BIF_FB_EN 0x5490 219#define BIF_FB_EN 0x5490
@@ -203,6 +237,99 @@
203#define SDMA0 (1 << 10) 237#define SDMA0 (1 << 10)
204#define SDMA1 (1 << 11) 238#define SDMA1 (1 << 11)
205 239
240/* 0x6b24, 0x7724, 0x10324, 0x10f24, 0x11b24, 0x12724 */
241#define LB_VLINE_STATUS 0x6b24
242# define VLINE_OCCURRED (1 << 0)
243# define VLINE_ACK (1 << 4)
244# define VLINE_STAT (1 << 12)
245# define VLINE_INTERRUPT (1 << 16)
246# define VLINE_INTERRUPT_TYPE (1 << 17)
247/* 0x6b2c, 0x772c, 0x1032c, 0x10f2c, 0x11b2c, 0x1272c */
248#define LB_VBLANK_STATUS 0x6b2c
249# define VBLANK_OCCURRED (1 << 0)
250# define VBLANK_ACK (1 << 4)
251# define VBLANK_STAT (1 << 12)
252# define VBLANK_INTERRUPT (1 << 16)
253# define VBLANK_INTERRUPT_TYPE (1 << 17)
254
255/* 0x6b20, 0x7720, 0x10320, 0x10f20, 0x11b20, 0x12720 */
256#define LB_INTERRUPT_MASK 0x6b20
257# define VBLANK_INTERRUPT_MASK (1 << 0)
258# define VLINE_INTERRUPT_MASK (1 << 4)
259# define VLINE2_INTERRUPT_MASK (1 << 8)
260
261#define DISP_INTERRUPT_STATUS 0x60f4
262# define LB_D1_VLINE_INTERRUPT (1 << 2)
263# define LB_D1_VBLANK_INTERRUPT (1 << 3)
264# define DC_HPD1_INTERRUPT (1 << 17)
265# define DC_HPD1_RX_INTERRUPT (1 << 18)
266# define DACA_AUTODETECT_INTERRUPT (1 << 22)
267# define DACB_AUTODETECT_INTERRUPT (1 << 23)
268# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
269# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
270#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
271# define LB_D2_VLINE_INTERRUPT (1 << 2)
272# define LB_D2_VBLANK_INTERRUPT (1 << 3)
273# define DC_HPD2_INTERRUPT (1 << 17)
274# define DC_HPD2_RX_INTERRUPT (1 << 18)
275# define DISP_TIMER_INTERRUPT (1 << 24)
276#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
277# define LB_D3_VLINE_INTERRUPT (1 << 2)
278# define LB_D3_VBLANK_INTERRUPT (1 << 3)
279# define DC_HPD3_INTERRUPT (1 << 17)
280# define DC_HPD3_RX_INTERRUPT (1 << 18)
281#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
282# define LB_D4_VLINE_INTERRUPT (1 << 2)
283# define LB_D4_VBLANK_INTERRUPT (1 << 3)
284# define DC_HPD4_INTERRUPT (1 << 17)
285# define DC_HPD4_RX_INTERRUPT (1 << 18)
286#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
287# define LB_D5_VLINE_INTERRUPT (1 << 2)
288# define LB_D5_VBLANK_INTERRUPT (1 << 3)
289# define DC_HPD5_INTERRUPT (1 << 17)
290# define DC_HPD5_RX_INTERRUPT (1 << 18)
291#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
292# define LB_D6_VLINE_INTERRUPT (1 << 2)
293# define LB_D6_VBLANK_INTERRUPT (1 << 3)
294# define DC_HPD6_INTERRUPT (1 << 17)
295# define DC_HPD6_RX_INTERRUPT (1 << 18)
296#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
297
298#define DAC_AUTODETECT_INT_CONTROL 0x67c8
299
300#define DC_HPD1_INT_STATUS 0x601c
301#define DC_HPD2_INT_STATUS 0x6028
302#define DC_HPD3_INT_STATUS 0x6034
303#define DC_HPD4_INT_STATUS 0x6040
304#define DC_HPD5_INT_STATUS 0x604c
305#define DC_HPD6_INT_STATUS 0x6058
306# define DC_HPDx_INT_STATUS (1 << 0)
307# define DC_HPDx_SENSE (1 << 1)
308# define DC_HPDx_SENSE_DELAYED (1 << 4)
309# define DC_HPDx_RX_INT_STATUS (1 << 8)
310
311#define DC_HPD1_INT_CONTROL 0x6020
312#define DC_HPD2_INT_CONTROL 0x602c
313#define DC_HPD3_INT_CONTROL 0x6038
314#define DC_HPD4_INT_CONTROL 0x6044
315#define DC_HPD5_INT_CONTROL 0x6050
316#define DC_HPD6_INT_CONTROL 0x605c
317# define DC_HPDx_INT_ACK (1 << 0)
318# define DC_HPDx_INT_POLARITY (1 << 8)
319# define DC_HPDx_INT_EN (1 << 16)
320# define DC_HPDx_RX_INT_ACK (1 << 20)
321# define DC_HPDx_RX_INT_EN (1 << 24)
322
323#define DC_HPD1_CONTROL 0x6024
324#define DC_HPD2_CONTROL 0x6030
325#define DC_HPD3_CONTROL 0x603c
326#define DC_HPD4_CONTROL 0x6048
327#define DC_HPD5_CONTROL 0x6054
328#define DC_HPD6_CONTROL 0x6060
329# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
330# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
331# define DC_HPDx_EN (1 << 28)
332
206#define GRBM_CNTL 0x8000 333#define GRBM_CNTL 0x8000
207#define GRBM_READ_TIMEOUT(x) ((x) << 0) 334#define GRBM_READ_TIMEOUT(x) ((x) << 0)
208 335
@@ -274,6 +401,10 @@
274#define SOFT_RESET_CPC (1 << 18) /* CP Compute (MEC1/2) */ 401#define SOFT_RESET_CPC (1 << 18) /* CP Compute (MEC1/2) */
275#define SOFT_RESET_CPG (1 << 19) /* CP GFX (PFP, ME, CE) */ 402#define SOFT_RESET_CPG (1 << 19) /* CP GFX (PFP, ME, CE) */
276 403
404#define GRBM_INT_CNTL 0x8060
405# define RDERR_INT_ENABLE (1 << 0)
406# define GUI_IDLE_INT_ENABLE (1 << 19)
407
277#define CP_MEC_CNTL 0x8234 408#define CP_MEC_CNTL 0x8234
278#define MEC_ME2_HALT (1 << 28) 409#define MEC_ME2_HALT (1 << 28)
279#define MEC_ME1_HALT (1 << 30) 410#define MEC_ME1_HALT (1 << 30)
@@ -507,6 +638,45 @@
507# define CP_RINGID1_INT_ENABLE (1 << 30) 638# define CP_RINGID1_INT_ENABLE (1 << 30)
508# define CP_RINGID0_INT_ENABLE (1 << 31) 639# define CP_RINGID0_INT_ENABLE (1 << 31)
509 640
641#define CP_INT_STATUS_RING0 0xC1B4
642# define PRIV_INSTR_INT_STAT (1 << 22)
643# define PRIV_REG_INT_STAT (1 << 23)
644# define TIME_STAMP_INT_STAT (1 << 26)
645# define CP_RINGID2_INT_STAT (1 << 29)
646# define CP_RINGID1_INT_STAT (1 << 30)
647# define CP_RINGID0_INT_STAT (1 << 31)
648
649#define CP_ME1_PIPE0_INT_CNTL 0xC214
650#define CP_ME1_PIPE1_INT_CNTL 0xC218
651#define CP_ME1_PIPE2_INT_CNTL 0xC21C
652#define CP_ME1_PIPE3_INT_CNTL 0xC220
653#define CP_ME2_PIPE0_INT_CNTL 0xC224
654#define CP_ME2_PIPE1_INT_CNTL 0xC228
655#define CP_ME2_PIPE2_INT_CNTL 0xC22C
656#define CP_ME2_PIPE3_INT_CNTL 0xC230
657# define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
658# define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
659# define PRIV_REG_INT_ENABLE (1 << 23)
660# define TIME_STAMP_INT_ENABLE (1 << 26)
661# define GENERIC2_INT_ENABLE (1 << 29)
662# define GENERIC1_INT_ENABLE (1 << 30)
663# define GENERIC0_INT_ENABLE (1 << 31)
664#define CP_ME1_PIPE0_INT_STATUS 0xC214
665#define CP_ME1_PIPE1_INT_STATUS 0xC218
666#define CP_ME1_PIPE2_INT_STATUS 0xC21C
667#define CP_ME1_PIPE3_INT_STATUS 0xC220
668#define CP_ME2_PIPE0_INT_STATUS 0xC224
669#define CP_ME2_PIPE1_INT_STATUS 0xC228
670#define CP_ME2_PIPE2_INT_STATUS 0xC22C
671#define CP_ME2_PIPE3_INT_STATUS 0xC230
672# define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
673# define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
674# define PRIV_REG_INT_STATUS (1 << 23)
675# define TIME_STAMP_INT_STATUS (1 << 26)
676# define GENERIC2_INT_STATUS (1 << 29)
677# define GENERIC1_INT_STATUS (1 << 30)
678# define GENERIC0_INT_STATUS (1 << 31)
679
510#define CP_MAX_CONTEXT 0xC2B8 680#define CP_MAX_CONTEXT 0xC2B8
511 681
512#define CP_RB0_BASE_HI 0xC2C4 682#define CP_RB0_BASE_HI 0xC2C4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1c06c47bf4bd..e09157beeef0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -600,10 +600,21 @@ struct evergreen_irq_stat_regs {
600 u32 afmt_status6; 600 u32 afmt_status6;
601}; 601};
602 602
603struct cik_irq_stat_regs {
604 u32 disp_int;
605 u32 disp_int_cont;
606 u32 disp_int_cont2;
607 u32 disp_int_cont3;
608 u32 disp_int_cont4;
609 u32 disp_int_cont5;
610 u32 disp_int_cont6;
611};
612
603union radeon_irq_stat_regs { 613union radeon_irq_stat_regs {
604 struct r500_irq_stat_regs r500; 614 struct r500_irq_stat_regs r500;
605 struct r600_irq_stat_regs r600; 615 struct r600_irq_stat_regs r600;
606 struct evergreen_irq_stat_regs evergreen; 616 struct evergreen_irq_stat_regs evergreen;
617 struct cik_irq_stat_regs cik;
607}; 618};
608 619
609#define RADEON_MAX_HPD_PINS 6 620#define RADEON_MAX_HPD_PINS 6