aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-07-19 15:36:53 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-25 09:22:05 -0400
commit6af5d92f909796cb706f3b9efefd75cb0f5afcff (patch)
treede568838b3531ec9c4b1055f41bb294b1817f189
parent907b28c56ea40629aa6595ddfa414ec2fc7da41c (diff)
drm/i915: Use a private interface for register access within GT
The GT functions for enabling register access also need to occasionally write to and read from registers. To avoid the potential recursion as we modify the public interface to be stricter, introduce a private register access API for the GT functions. v2: Rebase v3: Rebase onto uncore v4: Use raw interfaces consistently so that we only use the low-level readN functions from a single location. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c136
2 files changed, 90 insertions, 68 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a55315a8d5a3..cf40bb16bb37 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2133,22 +2133,20 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2133int vlv_gpu_freq(int ddr_freq, int val); 2133int vlv_gpu_freq(int ddr_freq, int val);
2134int vlv_freq_opcode(int ddr_freq, int val); 2134int vlv_freq_opcode(int ddr_freq, int val);
2135 2135
2136#define __i915_read(x, y) \ 2136#define __i915_read(x) \
2137 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 2137 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
2138 2138__i915_read(8)
2139__i915_read(8, b) 2139__i915_read(16)
2140__i915_read(16, w) 2140__i915_read(32)
2141__i915_read(32, l) 2141__i915_read(64)
2142__i915_read(64, q)
2143#undef __i915_read 2142#undef __i915_read
2144 2143
2145#define __i915_write(x, y) \ 2144#define __i915_write(x) \
2146 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 2145 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
2147 2146__i915_write(8)
2148__i915_write(8, b) 2147__i915_write(16)
2149__i915_write(16, w) 2148__i915_write(32)
2150__i915_write(32, l) 2149__i915_write(64)
2151__i915_write(64, q)
2152#undef __i915_write 2150#undef __i915_write
2153 2151
2154#define I915_READ8(reg) i915_read8(dev_priv, (reg)) 2152#define I915_READ8(reg) i915_read8(dev_priv, (reg))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 97e8b1b86476..228bc7a3f373 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -26,6 +26,21 @@
26 26
27#define FORCEWAKE_ACK_TIMEOUT_MS 2 27#define FORCEWAKE_ACK_TIMEOUT_MS 2
28 28
29#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43
29static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 44static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
30{ 45{
31 u32 gt_thread_status_mask; 46 u32 gt_thread_status_mask;
@@ -38,26 +53,28 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
38 /* w/a for a sporadic read returning 0 by waiting for the GT 53 /* w/a for a sporadic read returning 0 by waiting for the GT
39 * thread to wake up. 54 * thread to wake up.
40 */ 55 */
41 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 56 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
42 DRM_ERROR("GT thread status wait timed out\n"); 57 DRM_ERROR("GT thread status wait timed out\n");
43} 58}
44 59
45static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 60static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
46{ 61{
47 I915_WRITE_NOTRACE(FORCEWAKE, 0); 62 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
48 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 63 /* something from same cacheline, but !FORCEWAKE */
64 __raw_posting_read(dev_priv, ECOBUS);
49} 65}
50 66
51static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 67static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
52{ 68{
53 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, 69 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
54 FORCEWAKE_ACK_TIMEOUT_MS)) 70 FORCEWAKE_ACK_TIMEOUT_MS))
55 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 71 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
56 72
57 I915_WRITE_NOTRACE(FORCEWAKE, 1); 73 __raw_i915_write32(dev_priv, FORCEWAKE, 1);
58 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 74 /* something from same cacheline, but !FORCEWAKE */
75 __raw_posting_read(dev_priv, ECOBUS);
59 76
60 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), 77 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
61 FORCEWAKE_ACK_TIMEOUT_MS)) 78 FORCEWAKE_ACK_TIMEOUT_MS))
62 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 79 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
63 80
@@ -67,9 +84,9 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
67 84
68static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 85static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
69{ 86{
70 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 87 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
71 /* something from same cacheline, but !FORCEWAKE_MT */ 88 /* something from same cacheline, but !FORCEWAKE_MT */
72 POSTING_READ(ECOBUS); 89 __raw_posting_read(dev_priv, ECOBUS);
73} 90}
74 91
75static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 92static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -81,15 +98,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
81 else 98 else
82 forcewake_ack = FORCEWAKE_MT_ACK; 99 forcewake_ack = FORCEWAKE_MT_ACK;
83 100
84 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, 101 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
85 FORCEWAKE_ACK_TIMEOUT_MS)) 102 FORCEWAKE_ACK_TIMEOUT_MS))
86 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 103 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
87 104
88 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 105 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
106 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
89 /* something from same cacheline, but !FORCEWAKE_MT */ 107 /* something from same cacheline, but !FORCEWAKE_MT */
90 POSTING_READ(ECOBUS); 108 __raw_posting_read(dev_priv, ECOBUS);
91 109
92 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), 110 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
93 FORCEWAKE_ACK_TIMEOUT_MS)) 111 FORCEWAKE_ACK_TIMEOUT_MS))
94 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
95 113
@@ -100,25 +118,27 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
100static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 118static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
101{ 119{
102 u32 gtfifodbg; 120 u32 gtfifodbg;
103 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); 121
122 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
104 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, 123 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
105 "MMIO read or write has been dropped %x\n", gtfifodbg)) 124 "MMIO read or write has been dropped %x\n", gtfifodbg))
106 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); 125 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
107} 126}
108 127
109static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 128static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
110{ 129{
111 I915_WRITE_NOTRACE(FORCEWAKE, 0); 130 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
112 /* something from same cacheline, but !FORCEWAKE */ 131 /* something from same cacheline, but !FORCEWAKE */
113 POSTING_READ(ECOBUS); 132 __raw_posting_read(dev_priv, ECOBUS);
114 gen6_gt_check_fifodbg(dev_priv); 133 gen6_gt_check_fifodbg(dev_priv);
115} 134}
116 135
117static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 136static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
118{ 137{
119 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 138 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
139 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
120 /* something from same cacheline, but !FORCEWAKE_MT */ 140 /* something from same cacheline, but !FORCEWAKE_MT */
121 POSTING_READ(ECOBUS); 141 __raw_posting_read(dev_priv, ECOBUS);
122 gen6_gt_check_fifodbg(dev_priv); 142 gen6_gt_check_fifodbg(dev_priv);
123} 143}
124 144
@@ -128,10 +148,10 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
128 148
129 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 149 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
130 int loop = 500; 150 int loop = 500;
131 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 151 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
132 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 152 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
133 udelay(10); 153 udelay(10);
134 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 154 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
135 } 155 }
136 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 156 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
137 ++ret; 157 ++ret;
@@ -144,26 +164,28 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
144 164
145static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 165static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
146{ 166{
147 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); 167 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
168 _MASKED_BIT_DISABLE(0xffff));
148 /* something from same cacheline, but !FORCEWAKE_VLV */ 169 /* something from same cacheline, but !FORCEWAKE_VLV */
149 POSTING_READ(FORCEWAKE_ACK_VLV); 170 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
150} 171}
151 172
152static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 173static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
153{ 174{
154 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, 175 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
155 FORCEWAKE_ACK_TIMEOUT_MS)) 176 FORCEWAKE_ACK_TIMEOUT_MS))
156 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 177 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
157 178
158 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
159 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, 180 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
160 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 182 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
161 183
162 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), 184 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
163 FORCEWAKE_ACK_TIMEOUT_MS)) 185 FORCEWAKE_ACK_TIMEOUT_MS))
164 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); 186 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
165 187
166 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & 188 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
167 FORCEWAKE_KERNEL), 189 FORCEWAKE_KERNEL),
168 FORCEWAKE_ACK_TIMEOUT_MS)) 190 FORCEWAKE_ACK_TIMEOUT_MS))
169 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); 191 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
@@ -174,8 +196,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
174 196
175static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 197static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
176{ 198{
177 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 199 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
178 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, 200 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
201 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
179 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 202 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
180 /* The below doubles as a POSTING_READ */ 203 /* The below doubles as a POSTING_READ */
181 gen6_gt_check_fifodbg(dev_priv); 204 gen6_gt_check_fifodbg(dev_priv);
@@ -186,7 +209,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
186 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv = dev->dev_private;
187 210
188 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 211 if (HAS_FPGA_DBG_UNCLAIMED(dev))
189 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
190} 213}
191 214
192void intel_uncore_init(struct drm_device *dev) 215void intel_uncore_init(struct drm_device *dev)
@@ -213,7 +236,7 @@ void intel_uncore_init(struct drm_device *dev)
213 */ 236 */
214 mutex_lock(&dev->struct_mutex); 237 mutex_lock(&dev->struct_mutex);
215 __gen6_gt_force_wake_mt_get(dev_priv); 238 __gen6_gt_force_wake_mt_get(dev_priv);
216 ecobus = I915_READ_NOTRACE(ECOBUS); 239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
217 __gen6_gt_force_wake_mt_put(dev_priv); 240 __gen6_gt_force_wake_mt_put(dev_priv);
218 mutex_unlock(&dev->struct_mutex); 241 mutex_unlock(&dev->struct_mutex);
219 242
@@ -295,17 +318,17 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
295 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 318 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
296 * the chip from rc6 before touching it for real. MI_MODE is masked, 319 * the chip from rc6 before touching it for real. MI_MODE is masked,
297 * hence harmless to write 0 into. */ 320 * hence harmless to write 0 into. */
298 I915_WRITE_NOTRACE(MI_MODE, 0); 321 __raw_i915_write32(dev_priv, MI_MODE, 0);
299} 322}
300 323
301static void 324static void
302hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 325hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
303{ 326{
304 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 327 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
305 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 328 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
306 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 329 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
307 reg); 330 reg);
308 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 331 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
309 } 332 }
310} 333}
311 334
@@ -313,13 +336,13 @@ static void
313hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 336hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
314{ 337{
315 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 338 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
316 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 339 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
317 DRM_ERROR("Unclaimed write to %x\n", reg); 340 DRM_ERROR("Unclaimed write to %x\n", reg);
318 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 341 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
319 } 342 }
320} 343}
321 344
322#define __i915_read(x, y) \ 345#define __i915_read(x) \
323u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 346u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
324 unsigned long irqflags; \ 347 unsigned long irqflags; \
325 u##x val = 0; \ 348 u##x val = 0; \
@@ -329,24 +352,24 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
329 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 352 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
330 if (dev_priv->uncore.forcewake_count == 0) \ 353 if (dev_priv->uncore.forcewake_count == 0) \
331 dev_priv->uncore.funcs.force_wake_get(dev_priv); \ 354 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
332 val = read##y(dev_priv->regs + reg); \ 355 val = __raw_i915_read##x(dev_priv, reg); \
333 if (dev_priv->uncore.forcewake_count == 0) \ 356 if (dev_priv->uncore.forcewake_count == 0) \
334 dev_priv->uncore.funcs.force_wake_put(dev_priv); \ 357 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
335 } else { \ 358 } else { \
336 val = read##y(dev_priv->regs + reg); \ 359 val = __raw_i915_read##x(dev_priv, reg); \
337 } \ 360 } \
338 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 361 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
339 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 362 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
340 return val; \ 363 return val; \
341} 364}
342 365
343__i915_read(8, b) 366__i915_read(8)
344__i915_read(16, w) 367__i915_read(16)
345__i915_read(32, l) 368__i915_read(32)
346__i915_read(64, q) 369__i915_read(64)
347#undef __i915_read 370#undef __i915_read
348 371
349#define __i915_write(x, y) \ 372#define __i915_write(x) \
350void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 373void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
351 unsigned long irqflags; \ 374 unsigned long irqflags; \
352 u32 __fifo_ret = 0; \ 375 u32 __fifo_ret = 0; \
@@ -358,17 +381,17 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
358 if (IS_GEN5(dev_priv->dev)) \ 381 if (IS_GEN5(dev_priv->dev)) \
359 ilk_dummy_write(dev_priv); \ 382 ilk_dummy_write(dev_priv); \
360 hsw_unclaimed_reg_clear(dev_priv, reg); \ 383 hsw_unclaimed_reg_clear(dev_priv, reg); \
361 write##y(val, dev_priv->regs + reg); \ 384 __raw_i915_write##x(dev_priv, reg, val); \
362 if (unlikely(__fifo_ret)) { \ 385 if (unlikely(__fifo_ret)) { \
363 gen6_gt_check_fifodbg(dev_priv); \ 386 gen6_gt_check_fifodbg(dev_priv); \
364 } \ 387 } \
365 hsw_unclaimed_reg_check(dev_priv, reg); \ 388 hsw_unclaimed_reg_check(dev_priv, reg); \
366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 389 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
367} 390}
368__i915_write(8, b) 391__i915_write(8)
369__i915_write(16, w) 392__i915_write(16)
370__i915_write(32, l) 393__i915_write(32)
371__i915_write(64, q) 394__i915_write(64)
372#undef __i915_write 395#undef __i915_write
373 396
374static const struct register_whitelist { 397static const struct register_whitelist {
@@ -521,10 +544,10 @@ static int gen6_do_reset(struct drm_device *dev)
521 * for fifo space for the write or forcewake the chip for 544 * for fifo space for the write or forcewake the chip for
522 * the read 545 * the read
523 */ 546 */
524 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); 547 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
525 548
526 /* Spin waiting for the device to ack the reset request */ 549 /* Spin waiting for the device to ack the reset request */
527 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
528 551
529 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 552 /* If reset with a user forcewake, try to restore, otherwise turn it off */
530 if (dev_priv->uncore.forcewake_count) 553 if (dev_priv->uncore.forcewake_count)
@@ -533,7 +556,7 @@ static int gen6_do_reset(struct drm_device *dev)
533 dev_priv->uncore.funcs.force_wake_put(dev_priv); 556 dev_priv->uncore.funcs.force_wake_put(dev_priv);
534 557
535 /* Restore fifo count */ 558 /* Restore fifo count */
536 dev_priv->uncore.fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 559 dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
537 560
538 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 561 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
539 return ret; 562 return ret;
@@ -555,8 +578,9 @@ void intel_uncore_clear_errors(struct drm_device *dev)
555{ 578{
556 struct drm_i915_private *dev_priv = dev->dev_private; 579 struct drm_i915_private *dev_priv = dev->dev_private;
557 580
581 /* XXX needs spinlock around caller's grouping */
558 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 582 if (HAS_FPGA_DBG_UNCLAIMED(dev))
559 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 583 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
560} 584}
561 585
562void intel_uncore_check_errors(struct drm_device *dev) 586void intel_uncore_check_errors(struct drm_device *dev)
@@ -564,8 +588,8 @@ void intel_uncore_check_errors(struct drm_device *dev)
564 struct drm_i915_private *dev_priv = dev->dev_private; 588 struct drm_i915_private *dev_priv = dev->dev_private;
565 589
566 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 590 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
567 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 591 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
568 DRM_ERROR("Unclaimed register before interrupt\n"); 592 DRM_ERROR("Unclaimed register before interrupt\n");
569 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 593 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
570 } 594 }
571} 595}