File drivers/gpu/alga/amd/atombios/pp.c changed (mode: 100644) (index 518f8fa..29caa02) |
... |
... |
static long state_process(struct pp_parse *pp_parse, struct pp_state *s, |
119 |
119 |
return 0; |
return 0; |
120 |
120 |
} |
} |
121 |
121 |
|
|
122 |
|
static u8 is_emergency_state(struct pp_parse *pp_parse, struct pp_state *s) |
|
123 |
|
{ |
|
124 |
|
u16 class_0; |
|
125 |
|
u16 d_of; |
|
126 |
|
struct pp_desc *d; |
|
127 |
|
|
|
128 |
|
d_of = s->desc_idx * pp_parse->desc_array->entry_sz; |
|
129 |
|
d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of); |
|
130 |
|
|
|
131 |
|
class_0 = get_unaligned_le16(&d->class_0); |
|
132 |
|
|
|
133 |
|
if (class_0 & PP_DESC_CLASS_0_EMERGENCY) |
|
134 |
|
return 1; |
|
135 |
|
return 0; |
|
136 |
|
} |
|
137 |
|
|
|
138 |
122 |
static void state_get(struct pp_parse *pp_parse, |
static void state_get(struct pp_parse *pp_parse, |
139 |
123 |
struct atb_pp_state *atb_pp_state, |
struct atb_pp_state *atb_pp_state, |
140 |
124 |
u8 (*state_selector)(struct pp_parse *, struct pp_state *)) |
u8 (*state_selector)(struct pp_parse *, struct pp_state *)) |
|
... |
... |
static void state_get(struct pp_parse *pp_parse, |
163 |
147 |
} |
} |
164 |
148 |
} |
} |
165 |
149 |
|
|
|
150 |
|
static u8 is_emergency_state(struct pp_parse *pp_parse, struct pp_state *s) |
|
151 |
|
{ |
|
152 |
|
u16 class_0; |
|
153 |
|
u16 d_of; |
|
154 |
|
struct pp_desc *d; |
|
155 |
|
|
|
156 |
|
d_of = s->desc_idx * pp_parse->desc_array->entry_sz; |
|
157 |
|
d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of); |
|
158 |
|
|
|
159 |
|
class_0 = get_unaligned_le16(&d->class_0); |
|
160 |
|
|
|
161 |
|
if (class_0 & PP_DESC_CLASS_0_EMERGENCY) |
|
162 |
|
return 1; |
|
163 |
|
return 0; |
|
164 |
|
} |
|
165 |
|
|
166 |
166 |
long atb_pp_emergency_state_get(struct atombios *atb, |
long atb_pp_emergency_state_get(struct atombios *atb, |
167 |
167 |
struct atb_pp_state *emergency) |
struct atb_pp_state *emergency) |
168 |
168 |
{ |
{ |
|
... |
... |
unlock_mutex: |
213 |
213 |
} |
} |
214 |
214 |
EXPORT_SYMBOL_GPL(atb_pp_emergency_state_get); |
EXPORT_SYMBOL_GPL(atb_pp_emergency_state_get); |
215 |
215 |
|
|
|
216 |
|
static u8 is_ulv_state(struct pp_parse *pp_parse, struct pp_state *s) |
|
217 |
|
{ |
|
218 |
|
u16 class_1; |
|
219 |
|
u16 d_of; |
|
220 |
|
struct pp_desc *d; |
|
221 |
|
|
|
222 |
|
d_of = s->desc_idx * pp_parse->desc_array->entry_sz; |
|
223 |
|
d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of); |
|
224 |
|
|
|
225 |
|
class_1 = get_unaligned_le16(&d->class_1); |
|
226 |
|
|
|
227 |
|
if (class_1 & PP_DESC_CLASS_1_ULV) |
|
228 |
|
return 1; |
|
229 |
|
return 0; |
|
230 |
|
} |
|
231 |
|
|
|
232 |
|
long atb_pp_ulv_state_get(struct atombios *atb, struct atb_pp_state *ulv) |
|
233 |
|
{ |
|
234 |
|
u16 of; |
|
235 |
|
struct master_data_tbl *data_tbl; |
|
236 |
|
struct pp_parse pp_parse; |
|
237 |
|
long r; |
|
238 |
|
|
|
239 |
|
mutex_lock(&atb->mutex); |
|
240 |
|
|
|
241 |
|
of = get_unaligned_le16(&atb->hdr->master_data_tbl_of); |
|
242 |
|
data_tbl = atb->adev.rom + of; |
|
243 |
|
|
|
244 |
|
of = get_unaligned_le16(&data_tbl->list.pp_info); |
|
245 |
|
pp_parse.pp = atb->adev.rom + of; |
|
246 |
|
|
|
247 |
|
dev_info(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u\n", |
|
248 |
|
of, pp_parse.pp->pp0.hdr.tbl_fmt_rev, |
|
249 |
|
pp_parse.pp->pp0.hdr.tbl_content_rev); |
|
250 |
|
|
|
251 |
|
if (pp_parse.pp->pp0.hdr.tbl_fmt_rev != 6 |
|
252 |
|
&& pp_parse.pp->pp0.hdr.tbl_content_rev != 1) { |
|
253 |
|
dev_err(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u not supported\n", |
|
254 |
|
of, pp_parse.pp->pp0.hdr.tbl_fmt_rev, |
|
255 |
|
pp_parse.pp->pp0.hdr.tbl_content_rev); |
|
256 |
|
r = -ATB_ERR; |
|
257 |
|
goto unlock_mutex; |
|
258 |
|
} |
|
259 |
|
|
|
260 |
|
pp_parse.state_array = atb->adev.rom + of |
|
261 |
|
+ get_unaligned_le16(&pp_parse.pp-> |
|
262 |
|
pp0.state_array_of); |
|
263 |
|
pp_parse.desc_array = atb->adev.rom + of |
|
264 |
|
+ get_unaligned_le16(&pp_parse.pp->pp0 |
|
265 |
|
.desc_array_of); |
|
266 |
|
pp_parse.clk_array = atb->adev.rom + of |
|
267 |
|
+ get_unaligned_le16(&pp_parse.pp->pp0 |
|
268 |
|
.clk_array_of); |
|
269 |
|
|
|
270 |
|
state_get(&pp_parse, ulv, is_ulv_state); |
|
271 |
|
|
|
272 |
|
r = 0; |
|
273 |
|
|
|
274 |
|
unlock_mutex: |
|
275 |
|
mutex_unlock(&atb->mutex); |
|
276 |
|
return r; |
|
277 |
|
|
|
278 |
|
} |
|
279 |
|
EXPORT_SYMBOL_GPL(atb_pp_ulv_state_get); |
|
280 |
|
|
|
281 |
|
static u8 is_performance_state(struct pp_parse *pp_parse, struct pp_state *s) |
|
282 |
|
{ |
|
283 |
|
u16 class_0; |
|
284 |
|
u16 d_of; |
|
285 |
|
struct pp_desc *d; |
|
286 |
|
|
|
287 |
|
d_of = s->desc_idx * pp_parse->desc_array->entry_sz; |
|
288 |
|
d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of); |
|
289 |
|
|
|
290 |
|
class_0 = get_unaligned_le16(&d->class_0); |
|
291 |
|
class_0 &= PP_DESC_CLASS_UI_MASK; |
|
292 |
|
|
|
293 |
|
if (class_0 == PP_DESC_CLASS_UI_PERFORMANCE) |
|
294 |
|
return 1; |
|
295 |
|
return 0; |
|
296 |
|
} |
|
297 |
|
|
|
298 |
|
long atb_pp_performance_state_get(struct atombios *atb, |
|
299 |
|
struct atb_pp_state *performance) |
|
300 |
|
{ |
|
301 |
|
u16 of; |
|
302 |
|
struct master_data_tbl *data_tbl; |
|
303 |
|
struct pp_parse pp_parse; |
|
304 |
|
long r; |
|
305 |
|
|
|
306 |
|
mutex_lock(&atb->mutex); |
|
307 |
|
|
|
308 |
|
of = get_unaligned_le16(&atb->hdr->master_data_tbl_of); |
|
309 |
|
data_tbl = atb->adev.rom + of; |
|
310 |
|
|
|
311 |
|
of = get_unaligned_le16(&data_tbl->list.pp_info); |
|
312 |
|
pp_parse.pp = atb->adev.rom + of; |
|
313 |
|
|
|
314 |
|
dev_info(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u\n", |
|
315 |
|
of, pp_parse.pp->pp0.hdr.tbl_fmt_rev, |
|
316 |
|
pp_parse.pp->pp0.hdr.tbl_content_rev); |
|
317 |
|
|
|
318 |
|
if (pp_parse.pp->pp0.hdr.tbl_fmt_rev != 6 |
|
319 |
|
&& pp_parse.pp->pp0.hdr.tbl_content_rev != 1) { |
|
320 |
|
dev_err(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u not supported\n", |
|
321 |
|
of, pp_parse.pp->pp0.hdr.tbl_fmt_rev, |
|
322 |
|
pp_parse.pp->pp0.hdr.tbl_content_rev); |
|
323 |
|
r = -ATB_ERR; |
|
324 |
|
goto unlock_mutex; |
|
325 |
|
} |
|
326 |
|
|
|
327 |
|
pp_parse.state_array = atb->adev.rom + of |
|
328 |
|
+ get_unaligned_le16(&pp_parse.pp-> |
|
329 |
|
pp0.state_array_of); |
|
330 |
|
pp_parse.desc_array = atb->adev.rom + of |
|
331 |
|
+ get_unaligned_le16(&pp_parse.pp->pp0 |
|
332 |
|
.desc_array_of); |
|
333 |
|
pp_parse.clk_array = atb->adev.rom + of |
|
334 |
|
+ get_unaligned_le16(&pp_parse.pp->pp0 |
|
335 |
|
.clk_array_of); |
|
336 |
|
|
|
337 |
|
state_get(&pp_parse, performance, is_performance_state); |
|
338 |
|
|
|
339 |
|
r = 0; |
|
340 |
|
|
|
341 |
|
unlock_mutex: |
|
342 |
|
mutex_unlock(&atb->mutex); |
|
343 |
|
return r; |
|
344 |
|
|
|
345 |
|
} |
|
346 |
|
EXPORT_SYMBOL_GPL(atb_pp_performance_state_get); |
|
347 |
|
|
216 |
348 |
/* have thermal protection only if we have the proper internal thermal ctrler */ |
/* have thermal protection only if we have the proper internal thermal ctrler */ |
217 |
349 |
long atb_have_thermal_protection(struct atombios *atb) |
long atb_have_thermal_protection(struct atombios *atb) |
218 |
350 |
{ |
{ |
File drivers/gpu/alga/amd/si/dyn_pm/ctx.c changed (mode: 100644) (index 3fc0e55..1f03c4a) |
37 |
37 |
#include "ctx.h" |
#include "ctx.h" |
38 |
38 |
#include "private.h" |
#include "private.h" |
39 |
39 |
|
|
|
40 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
41 |
|
static void atb_pp_state_dump(struct atb_pp_state *s, char *name) |
|
42 |
|
{ |
|
43 |
|
u8 lvl_idx; |
|
44 |
|
for (lvl_idx = 0; lvl_idx < s->lvls_n; ++lvl_idx) { |
|
45 |
|
struct atb_pp_lvl *lvl; |
|
46 |
|
|
|
47 |
|
lvl = &s->lvls[lvl_idx]; |
|
48 |
|
if (IS_VDDC_LEAKAGE_IDX(lvl->vddc_id)) { |
|
49 |
|
LOG("atb_pp_%s_lvl[%u]:vddc=0x%04x(leakage index) engine clock=%ukHz memory clock=%ukHz", |
|
50 |
|
name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10, |
|
51 |
|
lvl->mem_clk * 10); |
|
52 |
|
} else { |
|
53 |
|
LOG("atb_pp_%s_lvl[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz", |
|
54 |
|
name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10, |
|
55 |
|
lvl->mem_clk * 10); |
|
56 |
|
} |
|
57 |
|
} |
|
58 |
|
} |
|
59 |
|
|
|
60 |
|
static void atb_voltage_on_clk_dep_tbl_dump( |
|
61 |
|
struct atb_voltage_on_clk_dep_tbl *tbl) |
|
62 |
|
{ |
|
63 |
|
u8 entry_idx; |
|
64 |
|
for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) { |
|
65 |
|
struct atb_voltage_on_clk_dep *step; |
|
66 |
|
|
|
67 |
|
step = &tbl->entries[entry_idx]; |
|
68 |
|
if (IS_VDDC_LEAKAGE_IDX(step->voltage_id)) { |
|
69 |
|
LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=0x%04x(leakage index)", |
|
70 |
|
entry_idx, step->clk * 10, step->voltage_id); |
|
71 |
|
} else { |
|
72 |
|
LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=%umV", |
|
73 |
|
entry_idx, step->clk * 10, step->voltage_id); |
|
74 |
|
} |
|
75 |
|
} |
|
76 |
|
} |
|
77 |
|
|
|
78 |
|
static void atb_cac_leakage_tbl_dump(struct atb_cac_leakage_tbl *tbl) |
|
79 |
|
{ |
|
80 |
|
u8 entry_idx; |
|
81 |
|
for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) { |
|
82 |
|
struct atb_cac_leakage *step; |
|
83 |
|
|
|
84 |
|
step = &tbl->entries[entry_idx]; |
|
85 |
|
LOG("cac_leakage[%u]:vddc=%umV leakage=%u", entry_idx, |
|
86 |
|
step->vddc_mv, step->leakage); |
|
87 |
|
} |
|
88 |
|
} |
|
89 |
|
|
|
90 |
|
static void atb_vddc_phase_shed_limits_tbl_dump( |
|
91 |
|
struct atb_vddc_phase_shed_limits_tbl *tbl) |
|
92 |
|
{ |
|
93 |
|
u8 entry_idx; |
|
94 |
|
for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) { |
|
95 |
|
struct atb_vddc_phase_shed_limits *step; |
|
96 |
|
|
|
97 |
|
step = &tbl->entries[entry_idx]; |
|
98 |
|
LOG("atb_vddc_phase_shed_limits[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz", |
|
99 |
|
entry_idx, step->vddc_mv, step->sclk * 10, |
|
100 |
|
step->mclk * 10); |
|
101 |
|
} |
|
102 |
|
} |
|
103 |
|
|
|
104 |
|
static void atb_voltage_tbl_dump(struct atb_voltage_tbl *tbl, char *name) |
|
105 |
|
{ |
|
106 |
|
u8 entry_idx; |
|
107 |
|
|
|
108 |
|
LOG("atb_voltage_%s:phase_delay=%u mask_low=0x%08x", name, |
|
109 |
|
tbl->phase_delay, tbl->mask_low); |
|
110 |
|
for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) { |
|
111 |
|
struct atb_voltage_tbl_entry *entry; |
|
112 |
|
|
|
113 |
|
entry = &tbl->entries[entry_idx]; |
|
114 |
|
LOG("atb_voltage_%s[%u]:smio_low=0x%08x val_mv=%u", |
|
115 |
|
name, entry_idx, entry->smio_low, entry->val_mv); |
|
116 |
|
} |
|
117 |
|
} |
|
118 |
|
#else |
|
119 |
|
static void atb_pp_state_dump(struct atb_pp_state *s, char *name){} |
|
120 |
|
static void atb_voltage_on_clk_dep_tbl_dump( |
|
121 |
|
struct atb_voltage_on_clk_dep_tbl *tbl){} |
|
122 |
|
static void atb_cac_leakage_tbl_dump(struct atb_cac_leakage_tbl *tbl){} |
|
123 |
|
static void atb_vddc_phase_shed_limits_tbl_dump( |
|
124 |
|
struct atb_vddc_phase_shed_limits_tbl *tbl){} |
|
125 |
|
static void atb_voltage_tbl_dump(struct atb_voltage_tbl *tbl, char *name){} |
|
126 |
|
#endif |
|
127 |
|
|
40 |
128 |
static long voltages_ctl_caps_get(struct ctx *ctx) |
static long voltages_ctl_caps_get(struct ctx *ctx) |
41 |
129 |
{ |
{ |
42 |
130 |
struct dev_drv_data *dd; |
struct dev_drv_data *dd; |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
205 |
293 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc voltage table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc voltage table\n"); |
206 |
294 |
goto err; |
goto err; |
207 |
295 |
} |
} |
|
296 |
|
atb_voltage_tbl_dump(&ctx->atb_vddc_tbl, "vddc"); |
208 |
297 |
} |
} |
209 |
298 |
|
|
210 |
299 |
if (ctx->voltage_caps & VOLTAGE_CAPS_MVDD_CTL_ENA) { |
if (ctx->voltage_caps & VOLTAGE_CAPS_MVDD_CTL_ENA) { |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
213 |
302 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the mvddc voltage table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the mvddc voltage table\n"); |
214 |
303 |
goto err; |
goto err; |
215 |
304 |
} |
} |
|
305 |
|
atb_voltage_tbl_dump(&ctx->atb_mvddc_tbl, "mvddc"); |
216 |
306 |
} |
} |
217 |
307 |
|
|
218 |
308 |
if (ctx->voltage_caps & VOLTAGE_CAPS_VDDCI_CTL_ENA) { |
if (ctx->voltage_caps & VOLTAGE_CAPS_VDDCI_CTL_ENA) { |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
221 |
311 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddci voltage table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddci voltage table\n"); |
222 |
312 |
goto err; |
goto err; |
223 |
313 |
} |
} |
|
314 |
|
atb_voltage_tbl_dump(&ctx->atb_vddci_tbl, "vddci"); |
224 |
315 |
} |
} |
225 |
316 |
|
|
226 |
317 |
if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_PHASE_SHED_CTL_ENA) { |
if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_PHASE_SHED_CTL_ENA) { |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
229 |
320 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding table\n"); |
230 |
321 |
goto err; |
goto err; |
231 |
322 |
} |
} |
|
323 |
|
atb_voltage_tbl_dump(&ctx->atb_vddc_phase_shed_tbl, |
|
324 |
|
"vddc_phase_shed"); |
|
325 |
|
|
|
326 |
|
r = atb_vddc_phase_shed_limits_tbl_get(dd->atb, |
|
327 |
|
&ctx->atb_vddc_phase_shed_limits_tbl); |
|
328 |
|
if (r == -ATB_ERR) { |
|
329 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding \n"); |
|
330 |
|
goto err; |
|
331 |
|
} |
|
332 |
|
atb_vddc_phase_shed_limits_tbl_dump( |
|
333 |
|
&ctx->atb_vddc_phase_shed_limits_tbl); |
|
334 |
|
|
232 |
335 |
} |
} |
233 |
336 |
|
|
234 |
337 |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) { |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) { |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
236 |
339 |
&ctx->atb_vddc_dep_on_sclk_tbl); |
&ctx->atb_vddc_dep_on_sclk_tbl); |
237 |
340 |
if (r == -ATB_ERR) { |
if (r == -ATB_ERR) { |
238 |
341 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc on sclk dependency table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc on sclk dependency table\n"); |
239 |
|
goto err; |
|
|
342 |
|
goto err_free_vddc_phase_shed_limits_tbl_entries; |
240 |
343 |
} |
} |
241 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
242 |
|
for (r = 0; r < ctx->atb_vddc_dep_on_sclk_tbl.entries_n; ++r) { |
|
243 |
|
struct atb_voltage_on_clk_dep *step; |
|
244 |
|
|
|
245 |
|
step = &ctx->atb_vddc_dep_on_sclk_tbl.entries[r]; |
|
246 |
|
if (IS_VDDC_LEAKAGE_IDX(step->voltage_id)) { |
|
247 |
|
LOG("vddc_dep_on_sclk[%ld]:clk=%ukHz voltage=0x%04x(leakage index)", |
|
248 |
|
r, step->clk * 10, step->voltage_id); |
|
249 |
|
} else { |
|
250 |
|
LOG("vddc_dep_on_sclk[%ld]:clk=%ukHz voltage=%umV", |
|
251 |
|
r, step->clk * 10, step->voltage_id); |
|
252 |
|
} |
|
253 |
|
} |
|
254 |
|
#endif |
|
|
344 |
|
atb_voltage_on_clk_dep_tbl_dump(&ctx->atb_vddc_dep_on_sclk_tbl); |
255 |
345 |
} |
} |
256 |
346 |
|
|
257 |
347 |
r = atb_cac_leakage_tbl_get(dd->atb, &ctx->atb_cac_leakage_tbl); |
r = atb_cac_leakage_tbl_get(dd->atb, &ctx->atb_cac_leakage_tbl); |
|
... |
... |
long ctx_init(struct pci_dev *dev, struct ctx *ctx) |
259 |
349 |
dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage table\n"); |
dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage table\n"); |
260 |
350 |
goto err_free_vddc_dep_on_sclk_tbl_entries; |
goto err_free_vddc_dep_on_sclk_tbl_entries; |
261 |
351 |
} |
} |
262 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
263 |
|
for (r = 0; r < ctx->atb_cac_leakage_tbl.entries_n; ++r) { |
|
264 |
|
struct atb_cac_leakage *step; |
|
265 |
|
|
|
266 |
|
step = &ctx->atb_cac_leakage_tbl.entries[r]; |
|
267 |
|
LOG("cac_leakage[%ld]:vddc=%umV leakage=%u", r, step->vddc_mv, |
|
268 |
|
step->leakage); |
|
269 |
|
} |
|
270 |
|
#endif |
|
|
352 |
|
atb_cac_leakage_tbl_dump(&ctx->atb_cac_leakage_tbl); |
271 |
353 |
|
|
272 |
|
r = atb_vddc_phase_shed_limits_tbl_get(dd->atb, |
|
273 |
|
&ctx->atb_vddc_phase_shed_limits_tbl); |
|
|
354 |
|
r = atb_pp_emergency_state_get(dd->atb, &ctx->atb_emergency_state); |
274 |
355 |
if (r == -ATB_ERR) { |
if (r == -ATB_ERR) { |
275 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding \n"); |
|
|
356 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n"); |
276 |
357 |
goto err_free_cac_leakage_tbl_entries; |
goto err_free_cac_leakage_tbl_entries; |
277 |
358 |
} |
} |
278 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
279 |
|
for (r = 0; r < ctx->atb_vddc_phase_shed_limits_tbl.entries_n; ++r) { |
|
280 |
|
struct atb_vddc_phase_shed_limits *step; |
|
281 |
|
|
|
282 |
|
step = &ctx->atb_vddc_phase_shed_limits_tbl.entries[r]; |
|
283 |
|
LOG("atb_vddc_phase_shed_limits[%ld]:vddc=%umV engine clock=%ukHz memory clock=%ukHz", |
|
284 |
|
r, step->vddc_mv, step->sclk * 10, step->mclk * 10); |
|
285 |
|
} |
|
286 |
|
#endif |
|
|
359 |
|
atb_pp_state_dump(&ctx->atb_emergency_state, "emergency"); |
287 |
360 |
|
|
288 |
|
r = atb_pp_emergency_state_get(dd->atb, &ctx->atb_emergency_state); |
|
|
361 |
|
r = atb_pp_ulv_state_get(dd->atb, &ctx->atb_ulv_state); |
289 |
362 |
if (r == -ATB_ERR) { |
if (r == -ATB_ERR) { |
290 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n"); |
|
291 |
|
goto err_free_vddc_phase_shed_limits_tbl_entries; |
|
|
363 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the ulv state\n"); |
|
364 |
|
goto err_free_cac_leakage_tbl_entries; |
292 |
365 |
} |
} |
293 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
294 |
|
for (r = 0; r < ctx->atb_emergency_state.lvls_n; ++r) { |
|
295 |
|
struct atb_pp_lvl *lvl; |
|
|
366 |
|
atb_pp_state_dump(&ctx->atb_ulv_state, "ulv"); |
296 |
367 |
|
|
297 |
|
lvl = &ctx->atb_emergency_state.lvls[r]; |
|
298 |
|
if (IS_VDDC_LEAKAGE_IDX(lvl->vddc_id)) { |
|
299 |
|
LOG("atb_pp_emergency_lvl[%ld]:vddc=0x%04x(leakage index) engine clock=%ukHz memory clock=%ukHz", |
|
300 |
|
r, lvl->vddc_id, lvl->eng_clk * 10, |
|
301 |
|
lvl->mem_clk * 10); |
|
302 |
|
} else { |
|
303 |
|
LOG("atb_atb_pp_emergency_lvl[%ld]:vddc=%umV engine clock=%ukHz memory clock=%ukHz", |
|
304 |
|
r, lvl->vddc_id, lvl->eng_clk * 10, |
|
305 |
|
lvl->mem_clk * 10); |
|
306 |
|
} |
|
|
368 |
|
r = atb_pp_performance_state_get(dd->atb, &ctx->atb_performance_state); |
|
369 |
|
if (r == -ATB_ERR) { |
|
370 |
|
dev_err(&dev->dev, "dyn_pm:unable to fetch the performance state\n"); |
|
371 |
|
goto err_free_cac_leakage_tbl_entries; |
307 |
372 |
} |
} |
308 |
|
#endif |
|
|
373 |
|
atb_pp_state_dump(&ctx->atb_performance_state, "performance"); |
309 |
374 |
return 0; |
return 0; |
310 |
375 |
|
|
311 |
|
err_free_vddc_phase_shed_limits_tbl_entries: |
|
312 |
|
if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n) |
|
313 |
|
kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries); |
|
314 |
376 |
err_free_cac_leakage_tbl_entries: |
err_free_cac_leakage_tbl_entries: |
315 |
377 |
if (ctx->atb_cac_leakage_tbl.entries_n) |
if (ctx->atb_cac_leakage_tbl.entries_n) |
316 |
378 |
kfree(ctx->atb_cac_leakage_tbl.entries); |
kfree(ctx->atb_cac_leakage_tbl.entries); |
|
... |
... |
err_free_vddc_dep_on_sclk_tbl_entries: |
318 |
380 |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) |
319 |
381 |
if (ctx->atb_cac_leakage_tbl.entries_n) |
if (ctx->atb_cac_leakage_tbl.entries_n) |
320 |
382 |
kfree(ctx->atb_cac_leakage_tbl.entries); |
kfree(ctx->atb_cac_leakage_tbl.entries); |
|
383 |
|
err_free_vddc_phase_shed_limits_tbl_entries: |
|
384 |
|
if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n) |
|
385 |
|
kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries); |
321 |
386 |
err: |
err: |
322 |
387 |
return -SI_ERR; |
return -SI_ERR; |
323 |
388 |
} |
} |
324 |
389 |
|
|
325 |
390 |
void ctx_free(struct ctx *ctx) |
void ctx_free(struct ctx *ctx) |
326 |
391 |
{ |
{ |
327 |
|
if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n) |
|
328 |
|
kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries); |
|
329 |
392 |
if (ctx->atb_cac_leakage_tbl.entries_n) |
if (ctx->atb_cac_leakage_tbl.entries_n) |
330 |
393 |
kfree(ctx->atb_cac_leakage_tbl.entries); |
kfree(ctx->atb_cac_leakage_tbl.entries); |
331 |
394 |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) |
if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) |
332 |
395 |
if (ctx->atb_vddc_dep_on_sclk_tbl.entries_n) |
if (ctx->atb_vddc_dep_on_sclk_tbl.entries_n) |
333 |
396 |
kfree(ctx->atb_vddc_dep_on_sclk_tbl.entries); |
kfree(ctx->atb_vddc_dep_on_sclk_tbl.entries); |
|
397 |
|
if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n) |
|
398 |
|
kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries); |
334 |
399 |
kfree(ctx); |
kfree(ctx); |
335 |
400 |
} |
} |
File drivers/gpu/alga/amd/si/dyn_pm/smc_state_tbl.c changed (mode: 100644) (index 8ef4bd6..f28e021) |
... |
... |
static void smc_vddc_tbl_fill(struct ctx *ctx, |
72 |
72 |
} |
} |
73 |
73 |
} |
} |
74 |
74 |
smc_state_tbl->max_vddc_idx = max_vddc_idx; |
smc_state_tbl->max_vddc_idx = max_vddc_idx; |
75 |
|
|
|
76 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
77 |
|
LOG("smc vddc voltage table, mask(for smio_low)=0x%08x, max_vddc_idx=%u", |
|
78 |
|
ctx->atb_vddc_tbl.mask_low, max_vddc_idx); |
|
79 |
|
for (i = 0; i < ctx->atb_vddc_tbl.entries_n; ++i) { |
|
80 |
|
LOG(" %u:%u mV smio_low=0x%08x", i, |
|
81 |
|
ctx->atb_vddc_tbl.entries[i].val_mv, |
|
82 |
|
ctx->atb_vddc_tbl.entries[i].smio_low); |
|
83 |
|
} |
|
84 |
|
#endif |
|
85 |
75 |
} |
} |
|
76 |
|
|
86 |
77 |
static void smc_mvdd_tbl_fill(struct ctx *ctx, |
static void smc_mvdd_tbl_fill(struct ctx *ctx, |
87 |
78 |
struct smc_state_tbl *smc_state_tbl) |
struct smc_state_tbl *smc_state_tbl) |
88 |
79 |
{ |
{ |
|
... |
... |
static void smc_mvdd_tbl_fill(struct ctx *ctx, |
104 |
95 |
put_unaligned_be32(ctx->atb_mvddc_tbl.mask_low, |
put_unaligned_be32(ctx->atb_mvddc_tbl.mask_low, |
105 |
96 |
&smc_state_tbl->voltage_mask_tbl.mask_low[ |
&smc_state_tbl->voltage_mask_tbl.mask_low[ |
106 |
97 |
SMC_VOLTAGE_MASK_MVDD]); |
SMC_VOLTAGE_MASK_MVDD]); |
107 |
|
|
|
108 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
109 |
|
LOG("smc mvdd voltage table, mask(for smio_low)=0x%08x", |
|
110 |
|
ctx->atb_mvddc_tbl.mask_low); |
|
111 |
|
for (i = 0; i < ctx->atb_mvddc_tbl.entries_n; ++i) { |
|
112 |
|
LOG(" %u:%u mV smio_low=0x%08x", i, |
|
113 |
|
ctx->atb_mvddc_tbl.entries[i].val_mv, |
|
114 |
|
ctx->atb_mvddc_tbl.entries[i].smio_low); |
|
115 |
|
} |
|
116 |
|
#endif |
|
117 |
98 |
} |
} |
118 |
99 |
|
|
119 |
100 |
static void smc_vddci_tbl_fill(struct ctx *ctx, |
static void smc_vddci_tbl_fill(struct ctx *ctx, |
|
... |
... |
static void smc_vddci_tbl_fill(struct ctx *ctx, |
137 |
118 |
put_unaligned_be32(ctx->atb_vddci_tbl.mask_low, |
put_unaligned_be32(ctx->atb_vddci_tbl.mask_low, |
138 |
119 |
&smc_state_tbl->voltage_mask_tbl.mask_low[ |
&smc_state_tbl->voltage_mask_tbl.mask_low[ |
139 |
120 |
SMC_VOLTAGE_MASK_VDDCI]); |
SMC_VOLTAGE_MASK_VDDCI]); |
140 |
|
|
|
141 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
142 |
|
LOG("smc vddci voltage table, mask(for smio_low)=0x%08x", |
|
143 |
|
ctx->atb_vddci_tbl.mask_low); |
|
144 |
|
for (i = 0; i < ctx->atb_vddci_tbl.entries_n; ++i) { |
|
145 |
|
LOG(" %u:%u mV smio_low=0x%08x", i, |
|
146 |
|
ctx->atb_vddci_tbl.entries[i].val_mv, |
|
147 |
|
ctx->atb_vddci_tbl.entries[i].smio_low); |
|
148 |
|
} |
|
149 |
|
#endif |
|
150 |
121 |
} |
} |
151 |
122 |
|
|
152 |
123 |
static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx, |
static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx, |
|
... |
... |
static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx, |
171 |
142 |
put_unaligned_be32(ctx->atb_vddc_phase_shed_tbl.mask_low, |
put_unaligned_be32(ctx->atb_vddc_phase_shed_tbl.mask_low, |
172 |
143 |
&smc_state_tbl->phase_mask_tbl.mask_low[ |
&smc_state_tbl->phase_mask_tbl.mask_low[ |
173 |
144 |
SMC_VOLTAGE_MASK_VDDC]); |
SMC_VOLTAGE_MASK_VDDC]); |
174 |
|
|
|
175 |
|
#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG |
|
176 |
|
LOG("smc vddc phase shedding table, mask(for smio_low)=0x%08x", |
|
177 |
|
ctx->atb_vddc_phase_shed_tbl.mask_low); |
|
178 |
|
for (i = 0; i < ctx->atb_vddc_phase_shed_tbl.entries_n; ++i) { |
|
179 |
|
LOG(" %u:%u smio_low=0x%08x", i, |
|
180 |
|
ctx->atb_vddc_phase_shed_tbl.entries[i].val_mv, |
|
181 |
|
ctx->atb_vddc_phase_shed_tbl.entries[i].smio_low); |
|
182 |
|
} |
|
183 |
|
#endif |
|
184 |
145 |
} |
} |
185 |
146 |
|
|
186 |
147 |
/* |
/* |
|
... |
... |
long smc_state_tbl_fill(struct ctx *ctx, struct smc_state_tbl *tbl) |
255 |
216 |
if (r == -SI_ERR) |
if (r == -SI_ERR) |
256 |
217 |
return -SI_ERR; |
return -SI_ERR; |
257 |
218 |
} |
} |
258 |
|
|
|
|
219 |
|
|
|
220 |
|
tbl->driver_state = tbl->initial_state; |
259 |
221 |
//TODO |
//TODO |
260 |
222 |
return 0; |
return 0; |
261 |
223 |
} |
} |
File drivers/gpu/alga/amd/si/smc_tbls.h changed (mode: 100644) (index 22b5657..964bb8d) |
10 |
10 |
#define SMC_FW_HDR_STATE_TBL 0x10 |
#define SMC_FW_HDR_STATE_TBL 0x10 |
11 |
11 |
#define SMC_FW_HDR_FAN_TBL 0x14 |
#define SMC_FW_HDR_FAN_TBL 0x14 |
12 |
12 |
#define SMC_FW_HDR_CAC_CFG_TBL 0x18 |
#define SMC_FW_HDR_CAC_CFG_TBL 0x18 |
13 |
|
#define SMC_FW_HDR_MC_REG_TBL 0x24 |
|
14 |
|
#define SMC_FW_HDR_MC_ARB_DRAM_TBL 0x30 |
|
|
13 |
|
#define SMC_FW_HDR_MC_TBL 0x24 |
|
14 |
|
#define SMC_FW_HDR_MC_ARB_TBL 0x30 |
15 |
15 |
#define SMC_FW_HDR_SPLL_TBL 0x38 |
#define SMC_FW_HDR_SPLL_TBL 0x38 |
16 |
16 |
#define SMC_FW_HDR_DTE_CFG 0x40 |
#define SMC_FW_HDR_DTE_CFG 0x40 |
17 |
17 |
#define SMC_FW_HDR_PAPM_PARAMS 0x48 |
#define SMC_FW_HDR_PAPM_PARAMS 0x48 |
18 |
18 |
|
|
19 |
19 |
/*----------------------------------------------------------------------------*/ |
/*----------------------------------------------------------------------------*/ |
20 |
|
struct smc_mc_arb_dram_regs { |
|
21 |
|
__be32 mc_arb_dram_timing_x_0; |
|
22 |
|
__be32 mc_arb_dram_timing_x_1; |
|
23 |
|
u8 mc_arb_rfsh_rate; |
|
24 |
|
u8 mc_arb_burst_time; |
|
|
20 |
|
struct smc_mc_arb_regs { |
|
21 |
|
__be32 dram_timing_x_0; |
|
22 |
|
__be32 dram_timing_x_1; |
|
23 |
|
u8 rfsh_rate; |
|
24 |
|
u8 burst_time; |
25 |
25 |
u8 pad[2]; |
u8 pad[2]; |
26 |
26 |
} __packed; |
} __packed; |
27 |
27 |
|
|
28 |
|
struct smc_mc_arb_dram_tbl { |
|
|
28 |
|
#define SMC_ARB_REGS_SETS_N_MAX 16 |
|
29 |
|
struct smc_mc_arb_tbl { |
29 |
30 |
u8 arb_current; |
u8 arb_current; |
30 |
31 |
u8 rsvd[3]; |
u8 rsvd[3]; |
31 |
|
struct smc_mc_arb_dram_regs sets[16]; |
|
|
32 |
|
struct smc_mc_arb_regs sets[SMC_ARB_REGS_SETS_N_MAX]; |
|
33 |
|
} __packed; |
|
34 |
|
/*----------------------------------------------------------------------------*/ |
|
35 |
|
|
|
36 |
|
/*----------------------------------------------------------------------------*/ |
|
37 |
|
#define SMC_MC_REGS_N_MAX 16 |
|
38 |
|
#define SMC_MC_REGS_SETS_N_MAX 20 |
|
39 |
|
struct smc_mc_reg_addr { |
|
40 |
|
__be16 s0; |
|
41 |
|
__be16 s1; |
|
42 |
|
} __packed; |
|
43 |
|
|
|
44 |
|
struct smc_mc_regs_set { |
|
45 |
|
__be32 vals[SMC_MC_REGS_N_MAX]; |
|
46 |
|
} __packed; |
|
47 |
|
|
|
48 |
|
struct smc_mc_tbl { |
|
49 |
|
u8 last; |
|
50 |
|
u8 rsvd[3]; |
|
51 |
|
struct smc_mc_reg_addr addrs[SMC_MC_REGS_N_MAX]; |
|
52 |
|
struct smc_mc_regs_set sets[SMC_MC_REGS_SETS_N_MAX]; |
32 |
53 |
} __packed; |
} __packed; |
33 |
54 |
/*----------------------------------------------------------------------------*/ |
/*----------------------------------------------------------------------------*/ |
34 |
55 |
|
|
|
... |
... |
struct smc_voltage_val { |
86 |
107 |
#define SMC_MC_FLGS_PG_ENA BIT(4) |
#define SMC_MC_FLGS_PG_ENA BIT(4) |
87 |
108 |
|
|
88 |
109 |
struct smc_lvl { |
struct smc_lvl { |
89 |
|
u8 ac_idx;//XXX:initstate |
|
|
110 |
|
u8 mc_set_idx;//XXX:initstate |
90 |
111 |
u8 disp_watermark; |
u8 disp_watermark; |
91 |
112 |
u8 pcie_gen;//XXX:initstate |
u8 pcie_gen;//XXX:initstate |
92 |
113 |
u8 uvd_watermark; |
u8 uvd_watermark; |
|
... |
... |
struct smc_lvl { |
105 |
126 |
u8 hysteresis_up; |
u8 hysteresis_up; |
106 |
127 |
u8 hysteresis_down; |
u8 hysteresis_down; |
107 |
128 |
u8 state_flgs; |
u8 state_flgs; |
108 |
|
u8 arb_refresh_state;//XXX:initstate |
|
|
129 |
|
u8 mc_arb_set_idx;//XXX:initstate |
109 |
130 |
__be32 sq_pwr_throttle_0;//XXX:initstate |
__be32 sq_pwr_throttle_0;//XXX:initstate |
110 |
131 |
__be32 sq_pwr_throttle_1;//XXX:initstate |
__be32 sq_pwr_throttle_1;//XXX:initstate |
111 |
132 |
__be32 max_pwred_up_cu; |
__be32 max_pwred_up_cu; |