aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c277
3 files changed, 147 insertions, 143 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 4a9838ddacec..884bb7f90a18 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -59,10 +59,9 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
59void nv50_pm_clock_set(struct drm_device *, void *); 59void nv50_pm_clock_set(struct drm_device *, void *);
60 60
61/* nva3_pm.c */ 61/* nva3_pm.c */
62int nva3_pm_clock_get(struct drm_device *, u32 id); 62int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
63void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, 63void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
64 u32 id, int khz); 64void nva3_pm_clocks_set(struct drm_device *, void *);
65void nva3_pm_clock_set(struct drm_device *, void *);
66 65
67/* nouveau_temp.c */ 66/* nouveau_temp.c */
68void nouveau_temp_init(struct drm_device *dev); 67void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index bd4c8f56b5d9..3e7f3812bfcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -359,9 +359,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
359 engine->pm.clock_set = nv50_pm_clock_set; 359 engine->pm.clock_set = nv50_pm_clock_set;
360 break; 360 break;
361 default: 361 default:
362 engine->pm.clock_get = nva3_pm_clock_get; 362 engine->pm.clocks_get = nva3_pm_clocks_get;
363 engine->pm.clock_pre = nva3_pm_clock_pre; 363 engine->pm.clocks_pre = nva3_pm_clocks_pre;
364 engine->pm.clock_set = nva3_pm_clock_set; 364 engine->pm.clocks_set = nva3_pm_clocks_set;
365 break; 365 break;
366 } 366 }
367 engine->pm.voltage_get = nouveau_voltage_gpio_get; 367 engine->pm.voltage_get = nouveau_voltage_gpio_get;
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 35fc57a93698..562e781c4f53 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,11 +27,20 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30static u32 read_pll(struct drm_device *dev, u32 pll, int clk); 30static u32 read_clk(struct drm_device *, int, bool);
31static u32 read_clk(struct drm_device *dev, int clk); 31static u32 read_pll(struct drm_device *, u32, int);
32 32
33static u32 33static u32
34read_clk(struct drm_device *dev, int clk) 34read_vco(struct drm_device *dev, int clk)
35{
36 u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
37 if ((sctl & 0x00000030) != 0x00000030)
38 return read_pll(dev, 0x00e820, 0x41);
39 return read_pll(dev, 0x00e8a0, 0x42);
40}
41
42static u32
43read_clk(struct drm_device *dev, int clk, bool ignore_en)
35{ 44{
36 u32 sctl, sdiv, sclk; 45 u32 sctl, sdiv, sclk;
37 46
@@ -39,20 +48,19 @@ read_clk(struct drm_device *dev, int clk)
39 return 27000; 48 return 27000;
40 49
41 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); 50 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
42 switch (sctl & 0x00003100) { 51 if (!ignore_en && !(sctl & 0x00000100))
43 case 0x00000100: 52 return 0;
53
54 switch (sctl & 0x00003000) {
55 case 0x00000000:
44 return 27000; 56 return 27000;
45 case 0x00002100: 57 case 0x00002000:
46 if (sctl & 0x00000040) 58 if (sctl & 0x00000040)
47 return 108000; 59 return 108000;
48 return 100000; 60 return 100000;
49 case 0x00003100: 61 case 0x00003000:
62 sclk = read_vco(dev, clk);
50 sdiv = ((sctl & 0x003f0000) >> 16) + 2; 63 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
51 if ((sctl & 0x00000030) != 0x00000030)
52 sclk = read_pll(dev, 0x00e820, 0x41);
53 else
54 sclk = read_pll(dev, 0x00e8a0, 0x42);
55
56 return (sclk * 2) / sdiv; 64 return (sclk * 2) / sdiv;
57 default: 65 default:
58 return 0; 66 return 0;
@@ -73,161 +81,158 @@ read_pll(struct drm_device *dev, u32 pll, int clk)
73 if ((pll & 0x00ff00) == 0x00e800) 81 if ((pll & 0x00ff00) == 0x00e800)
74 P = 1; 82 P = 1;
75 83
76 sclk = read_clk(dev, 0x00 + clk); 84 sclk = read_clk(dev, 0x00 + clk, false);
77 } else { 85 } else {
78 sclk = read_clk(dev, 0x10 + clk); 86 sclk = read_clk(dev, 0x10 + clk, false);
79 } 87 }
80 88
81 return sclk * N / (M * P); 89 return sclk * N / (M * P);
82} 90}
83 91
84struct nva3_pm_state { 92struct creg {
85 enum pll_types type; 93 u32 clk;
86 u32 src0; 94 u32 pll;
87 u32 src1;
88 u32 ctrl;
89 u32 coef;
90 u32 old_pnm;
91 u32 new_pnm;
92 u32 new_div;
93}; 95};
94 96
95static int 97static int
96nva3_pm_pll_offset(u32 id) 98calc_clk(struct drm_device *dev, u32 pll, int clk, u32 khz, struct creg *reg)
97{ 99{
98 static const u32 pll_map[] = { 100 struct pll_lims limits;
99 0x00, PLL_CORE, 101 u32 oclk, sclk, sdiv;
100 0x01, PLL_SHADER, 102 int P, N, M, diff;
101 0x02, PLL_MEMORY, 103 int ret;
102 0x00, 0x00 104
103 }; 105 reg->pll = 0;
104 const u32 *map = pll_map; 106 reg->clk = 0;
105 107
106 while (map[1]) { 108 switch (khz) {
107 if (id == map[1]) 109 case 27000:
108 return map[0]; 110 reg->clk = 0x00000100;
109 map += 2; 111 return khz;
112 case 100000:
113 reg->clk = 0x00002100;
114 return khz;
115 case 108000:
116 reg->clk = 0x00002140;
117 return khz;
118 default:
119 sclk = read_vco(dev, clk);
120 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
121 if (sdiv > 4) {
122 oclk = (sclk * 2) / sdiv;
123 diff = khz - oclk;
124 if (!pll || (diff >= -2000 && diff < 3000)) {
125 reg->clk = (((sdiv - 2) << 16) | 0x00003100);
126 return oclk;
127 }
128 }
129 break;
110 } 130 }
111 131
112 return -ENOENT; 132 ret = get_pll_limits(dev, pll, &limits);
133 if (ret)
134 return ret;
135
136 limits.refclk = read_clk(dev, clk - 0x10, true);
137 if (!limits.refclk)
138 return -EINVAL;
139
140 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
141 if (ret >= 0) {
142 reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
143 reg->pll = (P << 16) | (N << 8) | M;
144 }
145 return ret;
113} 146}
114 147
115int 148int
116nva3_pm_clock_get(struct drm_device *dev, u32 id) 149nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
117{ 150{
118 switch (id) { 151 perflvl->core = read_pll(dev, 0x4200, 0);
119 case PLL_CORE: 152 perflvl->shader = read_pll(dev, 0x4220, 1);
120 return read_pll(dev, 0x4200, 0); 153 perflvl->memory = read_pll(dev, 0x4000, 2);
121 case PLL_SHADER: 154 return 0;
122 return read_pll(dev, 0x4220, 1);
123 case PLL_MEMORY:
124 return read_pll(dev, 0x4000, 2);
125 default:
126 return -ENOENT;
127 }
128} 155}
129 156
157struct nva3_pm_state {
158 struct creg nclk;
159 struct creg sclk;
160 struct creg mclk;
161};
162
130void * 163void *
131nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 164nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
132 u32 id, int khz)
133{ 165{
134 struct nva3_pm_state *pll; 166 struct nva3_pm_state *info;
135 struct pll_lims limits; 167 int ret;
136 int N, M, P, diff;
137 int ret, off;
138 168
139 ret = get_pll_limits(dev, id, &limits); 169 info = kzalloc(sizeof(*info), GFP_KERNEL);
170 if (!info)
171 return ERR_PTR(-ENOMEM);
172
173 ret = calc_clk(dev, 0x4200, 0x10, perflvl->core, &info->nclk);
140 if (ret < 0) 174 if (ret < 0)
141 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 175 goto out;
142 176
143 off = nva3_pm_pll_offset(id); 177 ret = calc_clk(dev, 0x4220, 0x11, perflvl->shader, &info->sclk);
144 if (id < 0) 178 if (ret < 0)
145 return ERR_PTR(-EINVAL); 179 goto out;
146 180
181 ret = calc_clk(dev, 0x4000, 0x12, perflvl->memory, &info->mclk);
182 if (ret < 0)
183 goto out;
147 184
148 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 185out:
149 if (!pll) 186 if (ret < 0) {
150 return ERR_PTR(-ENOMEM); 187 kfree(info);
151 pll->type = id; 188 info = ERR_PTR(ret);
152 pll->src0 = 0x004120 + (off * 4);
153 pll->src1 = 0x004160 + (off * 4);
154 pll->ctrl = limits.reg + 0;
155 pll->coef = limits.reg + 4;
156
157 /* If target clock is within [-2, 3) MHz of a divisor, we'll
158 * use that instead of calculating MNP values
159 */
160 pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
161 if (pll->new_div) {
162 diff = khz - ((limits.refclk * 2) / pll->new_div);
163 if (diff < -2000 || diff >= 3000)
164 pll->new_div = 0;
165 } 189 }
190 return info;
191}
166 192
167 if (!pll->new_div) { 193static void
168 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); 194prog_pll(struct drm_device *dev, u32 pll, int clk, struct creg *reg)
169 if (ret < 0) 195{
170 return ERR_PTR(ret); 196 const u32 src0 = 0x004120 + (clk * 4);
171 197 const u32 src1 = 0x004160 + (clk * 4);
172 pll->new_pnm = (P << 16) | (N << 8) | M; 198 const u32 ctrl = pll + 0;
173 pll->new_div = 2 - 1; 199 const u32 coef = pll + 4;
200 u32 cntl;
201
202 cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
203 if (reg->pll) {
204 nv_mask(dev, src0, 0x00000101, 0x00000101);
205 nv_wr32(dev, coef, reg->pll);
206 nv_wr32(dev, ctrl, cntl | 0x00000015);
207 nv_mask(dev, src1, 0x00000100, 0x00000000);
208 nv_mask(dev, src1, 0x00000001, 0x00000000);
174 } else { 209 } else {
175 pll->new_pnm = 0; 210 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
176 pll->new_div--; 211 nv_wr32(dev, ctrl, cntl | 0x0000001d);
212 nv_mask(dev, ctrl, 0x00000001, 0x00000000);
213 nv_mask(dev, src0, 0x00000100, 0x00000000);
214 nv_mask(dev, src0, 0x00000001, 0x00000000);
177 } 215 }
178
179 if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101)
180 pll->old_pnm = nv_rd32(dev, pll->coef);
181 return pll;
182} 216}
183 217
184void 218void
185nva3_pm_clock_set(struct drm_device *dev, void *pre_state) 219nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
186{ 220{
187 struct nva3_pm_state *pll = pre_state; 221 struct nva3_pm_state *info = pre_state;
188 u32 ctrl = 0; 222
189 223 prog_pll(dev, 0x004200, 0, &info->nclk);
190 /* For the memory clock, NVIDIA will build a "script" describing 224 prog_pll(dev, 0x004220, 1, &info->sclk);
191 * the reclocking process and ask PDAEMON to execute it. 225
192 */ 226 nv_wr32(dev, 0x100210, 0);
193 if (pll->type == PLL_MEMORY) { 227 nv_wr32(dev, 0x1002dc, 1);
194 nv_wr32(dev, 0x100210, 0); 228 nv_wr32(dev, 0x004018, 0x00001000);
195 nv_wr32(dev, 0x1002dc, 1); 229 prog_pll(dev, 0x004000, 2, &info->mclk);
196 nv_wr32(dev, 0x004018, 0x00001000); 230 if (nv_rd32(dev, 0x4000) & 0x00000008)
197 ctrl = 0x18000100; 231 nv_wr32(dev, 0x004018, 0x1000d000);
198 } 232 else
199 233 nv_wr32(dev, 0x004018, 0x10005000);
200 if (pll->old_pnm || !pll->new_pnm) { 234 nv_wr32(dev, 0x1002dc, 0);
201 nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 | 235 nv_wr32(dev, 0x100210, 0x80000000);
202 (pll->new_div << 18)); 236
203 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); 237 kfree(info);
204 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
205 }
206
207 if (pll->new_pnm) {
208 nv_mask(dev, pll->src0, 0x00000101, 0x00000101);
209 nv_wr32(dev, pll->coef, pll->new_pnm);
210 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
211 nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
212 nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
213 nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
214 nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
215 nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
216 if (pll->type == PLL_MEMORY)
217 nv_wr32(dev, 0x4018, 0x10005000);
218 } else {
219 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
220 nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
221 nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
222 if (pll->type == PLL_MEMORY)
223 nv_wr32(dev, 0x4018, 0x1000d000);
224 }
225
226 if (pll->type == PLL_MEMORY) {
227 nv_wr32(dev, 0x1002dc, 0);
228 nv_wr32(dev, 0x100210, 0x80000000);
229 }
230
231 kfree(pll);
232} 238}
233