List of commits:
Subject Hash Author Date (UTC)
atombios data and dump 702d5ca1cb1d6d28ea33e5c550ece8a2f04976cc Sylvain BERTRAND 2013-11-13 07:56:18
tens of thousand of lines to refactor e6067d9bf38a4f6d71ecc8cfe3bb0c52f7a44834 Sylvain BERTRAND 2013-11-13 01:51:14
shame on me 8d3c8a1c1cb494c723603b7062a5b0cdaa4742be Sylvain BERTRAND 2013-11-04 00:37:42
writeX and readX already do the leX swapping 3b7d5a29f98160ed555309952a5e7f2a1f265684 Sylvain BERTRAND 2013-10-27 11:39:42
first part of dynamic clock/power management 732f467ca8db8ba676fdce6a5387b1fa9bc6964b Sylvain BERTRAND 2013-10-23 05:01:48
dce:upstream new addr conf reg f7d4efda311c439b3a084b935e563eab47216613 Sylvain BERTRAND 2013-10-21 02:33:41
atombios cleanup 74f7fae53d33509b2a160d8060f9906ca099772a Sylvain BERTRAND 2013-10-21 00:45:57
upstream: do not force the cp internal int state b9bdb0bd3ab4a2e2407720f077ff47dc7c0c6d32 Sylvain BERTRAND 2013-09-24 01:23:52
upstream: disable grph block 3c8dc463ba4bcf260eb8d5d5e5f6b8afc4871b46 Sylvain BERTRAND 2013-09-20 11:56:00
use new dma api helper 837cd3765e616f3446693ec685e45cf47d1967af Sylvain BERTRAND 2013-09-20 11:19:48
improved/simpler userland API d2249ed8838a4bb28b97d0f89f769170e10d85b2 Sylvain BERTRAND 2013-09-20 11:15:28
added dce sysfs display h/v sizes 5b22865b9dc7c15d8cf62b3e33b82b61a3938c14 Sylvain BERTRAND 2013-09-18 16:35:45
strange paths modification (kbuild trick?) c4a66ad58f11419cc530e10467beb9bc9fd97e23 Sylvain BERTRAND 2013-09-15 19:46:12
missed a userland shared header file 6f0e2e20070cfb7e64a227ad29a8b4957653d66c Sylvain BERTRAND 2013-09-14 15:50:37
cleanly support userspace hearders 34a4458bcd98dcde266ef09bac6db7ad7da795b5 Sylvain BERTRAND 2013-09-14 14:55:45
more robust patch logic 5f607dffcf1515b9a01ed8220428453412d3a6d0 Sylvain BERTRAND 2013-09-14 11:59:16
automate installation in a linux src tree e35d9c2014df5132c9001e325b3b477529f2ba99 Sylvain BERTRAND 2013-09-13 23:14:54
timeouts to userspace 22117a1689daf66e04d4d2259b698857899041ba Sylvain BERTRAND 2013-09-01 21:26:45
events provide a monotonic raw timespec 9fd5264d0d9420471a14f6a60b444556c39c89cc Sylvain BERTRAND 2013-08-31 02:39:32
relax dma fence timeout, proper event dequeuing 0b80be3780e995eca635eccf7628006750e48e5e Sylvain BERTRAND 2013-08-31 00:49:12
Commit 702d5ca1cb1d6d28ea33e5c550ece8a2f04976cc - atombios data and dump
* fetch more powerplay states
* clean the dump functions
Author: Sylvain BERTRAND
Author date (UTC): 2013-11-13 07:56
Committer name: Sylvain BERTRAND
Committer date (UTC): 2013-11-13 07:56
Parent(s): e6067d9bf38a4f6d71ecc8cfe3bb0c52f7a44834
Signer:
Signing key:
Signing status: N
Tree: e12013aebcdd08daefdc3201e410a69968e8b34e
File Lines added Lines deleted
drivers/gpu/alga/amd/atombios/pp.c 148 16
drivers/gpu/alga/amd/si/drv.h 0 3
drivers/gpu/alga/amd/si/dyn_pm/ctx.c 123 58
drivers/gpu/alga/amd/si/dyn_pm/ctx.h 3 1
drivers/gpu/alga/amd/si/dyn_pm/dyn_pm.c 13 10
drivers/gpu/alga/amd/si/dyn_pm/private.h 13 4
drivers/gpu/alga/amd/si/dyn_pm/smc_emergency_state.c 5 1
drivers/gpu/alga/amd/si/dyn_pm/smc_initial_state.c 4 2
drivers/gpu/alga/amd/si/dyn_pm/smc_state_tbl.c 3 41
drivers/gpu/alga/amd/si/smc.c 1 2
drivers/gpu/alga/amd/si/smc_tbls.h 32 11
include/alga/amd/atombios/pp.h 4 0
File drivers/gpu/alga/amd/atombios/pp.c changed (mode: 100644) (index 518f8fa..29caa02)
... ... static long state_process(struct pp_parse *pp_parse, struct pp_state *s,
119 119 return 0; return 0;
120 120 } }
121 121
122 static u8 is_emergency_state(struct pp_parse *pp_parse, struct pp_state *s)
123 {
124 u16 class_0;
125 u16 d_of;
126 struct pp_desc *d;
127
128 d_of = s->desc_idx * pp_parse->desc_array->entry_sz;
129 d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of);
130
131 class_0 = get_unaligned_le16(&d->class_0);
132
133 if (class_0 & PP_DESC_CLASS_0_EMERGENCY)
134 return 1;
135 return 0;
136 }
137
138 122 static void state_get(struct pp_parse *pp_parse, static void state_get(struct pp_parse *pp_parse,
139 123 struct atb_pp_state *atb_pp_state, struct atb_pp_state *atb_pp_state,
140 124 u8 (*state_selector)(struct pp_parse *, struct pp_state *)) u8 (*state_selector)(struct pp_parse *, struct pp_state *))
 
... ... static void state_get(struct pp_parse *pp_parse,
163 147 } }
164 148 } }
165 149
150 static u8 is_emergency_state(struct pp_parse *pp_parse, struct pp_state *s)
151 {
152 u16 class_0;
153 u16 d_of;
154 struct pp_desc *d;
155
156 d_of = s->desc_idx * pp_parse->desc_array->entry_sz;
157 d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of);
158
159 class_0 = get_unaligned_le16(&d->class_0);
160
161 if (class_0 & PP_DESC_CLASS_0_EMERGENCY)
162 return 1;
163 return 0;
164 }
165
166 166 long atb_pp_emergency_state_get(struct atombios *atb, long atb_pp_emergency_state_get(struct atombios *atb,
167 167 struct atb_pp_state *emergency) struct atb_pp_state *emergency)
168 168 { {
 
... ... unlock_mutex:
213 213 } }
214 214 EXPORT_SYMBOL_GPL(atb_pp_emergency_state_get); EXPORT_SYMBOL_GPL(atb_pp_emergency_state_get);
215 215
216 static u8 is_ulv_state(struct pp_parse *pp_parse, struct pp_state *s)
217 {
218 u16 class_1;
219 u16 d_of;
220 struct pp_desc *d;
221
222 d_of = s->desc_idx * pp_parse->desc_array->entry_sz;
223 d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of);
224
225 class_1 = get_unaligned_le16(&d->class_1);
226
227 if (class_1 & PP_DESC_CLASS_1_ULV)
228 return 1;
229 return 0;
230 }
231
232 long atb_pp_ulv_state_get(struct atombios *atb, struct atb_pp_state *ulv)
233 {
234 u16 of;
235 struct master_data_tbl *data_tbl;
236 struct pp_parse pp_parse;
237 long r;
238
239 mutex_lock(&atb->mutex);
240
241 of = get_unaligned_le16(&atb->hdr->master_data_tbl_of);
242 data_tbl = atb->adev.rom + of;
243
244 of = get_unaligned_le16(&data_tbl->list.pp_info);
245 pp_parse.pp = atb->adev.rom + of;
246
247 dev_info(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u\n",
248 of, pp_parse.pp->pp0.hdr.tbl_fmt_rev,
249 pp_parse.pp->pp0.hdr.tbl_content_rev);
250
251 if (pp_parse.pp->pp0.hdr.tbl_fmt_rev != 6
252 && pp_parse.pp->pp0.hdr.tbl_content_rev != 1) {
253 dev_err(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u not supported\n",
254 of, pp_parse.pp->pp0.hdr.tbl_fmt_rev,
255 pp_parse.pp->pp0.hdr.tbl_content_rev);
256 r = -ATB_ERR;
257 goto unlock_mutex;
258 }
259
260 pp_parse.state_array = atb->adev.rom + of
261 + get_unaligned_le16(&pp_parse.pp->
262 pp0.state_array_of);
263 pp_parse.desc_array = atb->adev.rom + of
264 + get_unaligned_le16(&pp_parse.pp->pp0
265 .desc_array_of);
266 pp_parse.clk_array = atb->adev.rom + of
267 + get_unaligned_le16(&pp_parse.pp->pp0
268 .clk_array_of);
269
270 state_get(&pp_parse, ulv, is_ulv_state);
271
272 r = 0;
273
274 unlock_mutex:
275 mutex_unlock(&atb->mutex);
276 return r;
277
278 }
279 EXPORT_SYMBOL_GPL(atb_pp_ulv_state_get);
280
281 static u8 is_performance_state(struct pp_parse *pp_parse, struct pp_state *s)
282 {
283 u16 class_0;
284 u16 d_of;
285 struct pp_desc *d;
286
287 d_of = s->desc_idx * pp_parse->desc_array->entry_sz;
288 d = (struct pp_desc*)((u8*)&pp_parse->desc_array->descs[0] + d_of);
289
290 class_0 = get_unaligned_le16(&d->class_0);
291 class_0 &= PP_DESC_CLASS_UI_MASK;
292
293 if (class_0 == PP_DESC_CLASS_UI_PERFORMANCE)
294 return 1;
295 return 0;
296 }
297
298 long atb_pp_performance_state_get(struct atombios *atb,
299 struct atb_pp_state *performance)
300 {
301 u16 of;
302 struct master_data_tbl *data_tbl;
303 struct pp_parse pp_parse;
304 long r;
305
306 mutex_lock(&atb->mutex);
307
308 of = get_unaligned_le16(&atb->hdr->master_data_tbl_of);
309 data_tbl = atb->adev.rom + of;
310
311 of = get_unaligned_le16(&data_tbl->list.pp_info);
312 pp_parse.pp = atb->adev.rom + of;
313
314 dev_info(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u\n",
315 of, pp_parse.pp->pp0.hdr.tbl_fmt_rev,
316 pp_parse.pp->pp0.hdr.tbl_content_rev);
317
318 if (pp_parse.pp->pp0.hdr.tbl_fmt_rev != 6
319 && pp_parse.pp->pp0.hdr.tbl_content_rev != 1) {
320 dev_err(atb->adev.dev, "atombios:pp_info (0x%04x) revision %u.%u not supported\n",
321 of, pp_parse.pp->pp0.hdr.tbl_fmt_rev,
322 pp_parse.pp->pp0.hdr.tbl_content_rev);
323 r = -ATB_ERR;
324 goto unlock_mutex;
325 }
326
327 pp_parse.state_array = atb->adev.rom + of
328 + get_unaligned_le16(&pp_parse.pp->
329 pp0.state_array_of);
330 pp_parse.desc_array = atb->adev.rom + of
331 + get_unaligned_le16(&pp_parse.pp->pp0
332 .desc_array_of);
333 pp_parse.clk_array = atb->adev.rom + of
334 + get_unaligned_le16(&pp_parse.pp->pp0
335 .clk_array_of);
336
337 state_get(&pp_parse, performance, is_performance_state);
338
339 r = 0;
340
341 unlock_mutex:
342 mutex_unlock(&atb->mutex);
343 return r;
344
345 }
346 EXPORT_SYMBOL_GPL(atb_pp_performance_state_get);
347
216 348 /* have thermal protection only if we have the proper internal thermal ctrler */ /* have thermal protection only if we have the proper internal thermal ctrler */
217 349 long atb_have_thermal_protection(struct atombios *atb) long atb_have_thermal_protection(struct atombios *atb)
218 350 { {
File drivers/gpu/alga/amd/si/drv.h changed (mode: 100644) (index 8f5b8cd..90f95e8)
... ... struct clks_regs {
41 41 u32 mpll_ss_1; u32 mpll_ss_1;
42 42 }; };
43 43
44 #define PP_STATE_EMERGENCY_STATE_ENA BIT(0)
45 44 struct pp { struct pp {
46 45 /*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
47 46 /* default/boot/initial values */ /* default/boot/initial values */
 
... ... struct pp {
53 52 u8 default_pcie_gen; /* used in dynamic power management */ u8 default_pcie_gen; /* used in dynamic power management */
54 53 struct clks_regs clks_regs; struct clks_regs clks_regs;
55 54 /*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
56
57 u8 state;
58 55 }; };
59 56
60 57 struct dev_drv_data { struct dev_drv_data {
File drivers/gpu/alga/amd/si/dyn_pm/ctx.c changed (mode: 100644) (index 3fc0e55..1f03c4a)
37 37 #include "ctx.h" #include "ctx.h"
38 38 #include "private.h" #include "private.h"
39 39
40 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
41 static void atb_pp_state_dump(struct atb_pp_state *s, char *name)
42 {
43 u8 lvl_idx;
44 for (lvl_idx = 0; lvl_idx < s->lvls_n; ++lvl_idx) {
45 struct atb_pp_lvl *lvl;
46
47 lvl = &s->lvls[lvl_idx];
48 if (IS_VDDC_LEAKAGE_IDX(lvl->vddc_id)) {
49 LOG("atb_pp_%s_lvl[%u]:vddc=0x%04x(leakage index) engine clock=%ukHz memory clock=%ukHz",
50 name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10,
51 lvl->mem_clk * 10);
52 } else {
53 LOG("atb_pp_%s_lvl[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz",
54 name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10,
55 lvl->mem_clk * 10);
56 }
57 }
58 }
59
60 static void atb_voltage_on_clk_dep_tbl_dump(
61 struct atb_voltage_on_clk_dep_tbl *tbl)
62 {
63 u8 entry_idx;
64 for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
65 struct atb_voltage_on_clk_dep *step;
66
67 step = &tbl->entries[entry_idx];
68 if (IS_VDDC_LEAKAGE_IDX(step->voltage_id)) {
69 LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=0x%04x(leakage index)",
70 entry_idx, step->clk * 10, step->voltage_id);
71 } else {
72 LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=%umV",
73 entry_idx, step->clk * 10, step->voltage_id);
74 }
75 }
76 }
77
78 static void atb_cac_leakage_tbl_dump(struct atb_cac_leakage_tbl *tbl)
79 {
80 u8 entry_idx;
81 for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
82 struct atb_cac_leakage *step;
83
84 step = &tbl->entries[entry_idx];
85 LOG("cac_leakage[%u]:vddc=%umV leakage=%u", entry_idx,
86 step->vddc_mv, step->leakage);
87 }
88 }
89
90 static void atb_vddc_phase_shed_limits_tbl_dump(
91 struct atb_vddc_phase_shed_limits_tbl *tbl)
92 {
93 u8 entry_idx;
94 for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
95 struct atb_vddc_phase_shed_limits *step;
96
97 step = &tbl->entries[entry_idx];
98 LOG("atb_vddc_phase_shed_limits[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz",
99 entry_idx, step->vddc_mv, step->sclk * 10,
100 step->mclk * 10);
101 }
102 }
103
104 static void atb_voltage_tbl_dump(struct atb_voltage_tbl *tbl, char *name)
105 {
106 u8 entry_idx;
107
108 LOG("atb_voltage_%s:phase_delay=%u mask_low=0x%08x", name,
109 tbl->phase_delay, tbl->mask_low);
110 for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
111 struct atb_voltage_tbl_entry *entry;
112
113 entry = &tbl->entries[entry_idx];
114 LOG("atb_voltage_%s[%u]:smio_low=0x%08x val_mv=%u",
115 name, entry_idx, entry->smio_low, entry->val_mv);
116 }
117 }
118 #else
119 static void atb_pp_state_dump(struct atb_pp_state *s, char *name){}
120 static void atb_voltage_on_clk_dep_tbl_dump(
121 struct atb_voltage_on_clk_dep_tbl *tbl){}
122 static void atb_cac_leakage_tbl_dump(struct atb_cac_leakage_tbl *tbl){}
123 static void atb_vddc_phase_shed_limits_tbl_dump(
124 struct atb_vddc_phase_shed_limits_tbl *tbl){}
125 static void atb_voltage_tbl_dump(struct atb_voltage_tbl *tbl, char *name){}
126 #endif
127
40 128 static long voltages_ctl_caps_get(struct ctx *ctx) static long voltages_ctl_caps_get(struct ctx *ctx)
41 129 { {
42 130 struct dev_drv_data *dd; struct dev_drv_data *dd;
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
205 293 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc voltage table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc voltage table\n");
206 294 goto err; goto err;
207 295 } }
296 atb_voltage_tbl_dump(&ctx->atb_vddc_tbl, "vddc");
208 297 } }
209 298
210 299 if (ctx->voltage_caps & VOLTAGE_CAPS_MVDD_CTL_ENA) { if (ctx->voltage_caps & VOLTAGE_CAPS_MVDD_CTL_ENA) {
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
213 302 dev_err(&dev->dev, "dyn_pm:unable to fetch the mvddc voltage table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the mvddc voltage table\n");
214 303 goto err; goto err;
215 304 } }
305 atb_voltage_tbl_dump(&ctx->atb_mvddc_tbl, "mvddc");
216 306 } }
217 307
218 308 if (ctx->voltage_caps & VOLTAGE_CAPS_VDDCI_CTL_ENA) { if (ctx->voltage_caps & VOLTAGE_CAPS_VDDCI_CTL_ENA) {
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
221 311 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddci voltage table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the vddci voltage table\n");
222 312 goto err; goto err;
223 313 } }
314 atb_voltage_tbl_dump(&ctx->atb_vddci_tbl, "vddci");
224 315 } }
225 316
226 317 if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_PHASE_SHED_CTL_ENA) { if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_PHASE_SHED_CTL_ENA) {
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
229 320 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding table\n");
230 321 goto err; goto err;
231 322 } }
323 atb_voltage_tbl_dump(&ctx->atb_vddc_phase_shed_tbl,
324 "vddc_phase_shed");
325
326 r = atb_vddc_phase_shed_limits_tbl_get(dd->atb,
327 &ctx->atb_vddc_phase_shed_limits_tbl);
328 if (r == -ATB_ERR) {
329 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding \n");
330 goto err;
331 }
332 atb_vddc_phase_shed_limits_tbl_dump(
333 &ctx->atb_vddc_phase_shed_limits_tbl);
334
232 335 } }
233 336
234 337 if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) { if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) {
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
236 339 &ctx->atb_vddc_dep_on_sclk_tbl); &ctx->atb_vddc_dep_on_sclk_tbl);
237 340 if (r == -ATB_ERR) { if (r == -ATB_ERR) {
238 341 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc on sclk dependency table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc on sclk dependency table\n");
239 goto err;
342 goto err_free_vddc_phase_shed_limits_tbl_entries;
240 343 } }
241 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
242 for (r = 0; r < ctx->atb_vddc_dep_on_sclk_tbl.entries_n; ++r) {
243 struct atb_voltage_on_clk_dep *step;
244
245 step = &ctx->atb_vddc_dep_on_sclk_tbl.entries[r];
246 if (IS_VDDC_LEAKAGE_IDX(step->voltage_id)) {
247 LOG("vddc_dep_on_sclk[%ld]:clk=%ukHz voltage=0x%04x(leakage index)",
248 r, step->clk * 10, step->voltage_id);
249 } else {
250 LOG("vddc_dep_on_sclk[%ld]:clk=%ukHz voltage=%umV",
251 r, step->clk * 10, step->voltage_id);
252 }
253 }
254 #endif
344 atb_voltage_on_clk_dep_tbl_dump(&ctx->atb_vddc_dep_on_sclk_tbl);
255 345 } }
256 346
257 347 r = atb_cac_leakage_tbl_get(dd->atb, &ctx->atb_cac_leakage_tbl); r = atb_cac_leakage_tbl_get(dd->atb, &ctx->atb_cac_leakage_tbl);
 
... ... long ctx_init(struct pci_dev *dev, struct ctx *ctx)
259 349 dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage table\n"); dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage table\n");
260 350 goto err_free_vddc_dep_on_sclk_tbl_entries; goto err_free_vddc_dep_on_sclk_tbl_entries;
261 351 } }
262 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
263 for (r = 0; r < ctx->atb_cac_leakage_tbl.entries_n; ++r) {
264 struct atb_cac_leakage *step;
265
266 step = &ctx->atb_cac_leakage_tbl.entries[r];
267 LOG("cac_leakage[%ld]:vddc=%umV leakage=%u", r, step->vddc_mv,
268 step->leakage);
269 }
270 #endif
352 atb_cac_leakage_tbl_dump(&ctx->atb_cac_leakage_tbl);
271 353
272 r = atb_vddc_phase_shed_limits_tbl_get(dd->atb,
273 &ctx->atb_vddc_phase_shed_limits_tbl);
354 r = atb_pp_emergency_state_get(dd->atb, &ctx->atb_emergency_state);
274 355 if (r == -ATB_ERR) { if (r == -ATB_ERR) {
275 dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding \n");
356 dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n");
276 357 goto err_free_cac_leakage_tbl_entries; goto err_free_cac_leakage_tbl_entries;
277 358 } }
278 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
279 for (r = 0; r < ctx->atb_vddc_phase_shed_limits_tbl.entries_n; ++r) {
280 struct atb_vddc_phase_shed_limits *step;
281
282 step = &ctx->atb_vddc_phase_shed_limits_tbl.entries[r];
283 LOG("atb_vddc_phase_shed_limits[%ld]:vddc=%umV engine clock=%ukHz memory clock=%ukHz",
284 r, step->vddc_mv, step->sclk * 10, step->mclk * 10);
285 }
286 #endif
359 atb_pp_state_dump(&ctx->atb_emergency_state, "emergency");
287 360
288 r = atb_pp_emergency_state_get(dd->atb, &ctx->atb_emergency_state);
361 r = atb_pp_ulv_state_get(dd->atb, &ctx->atb_ulv_state);
289 362 if (r == -ATB_ERR) { if (r == -ATB_ERR) {
290 dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n");
291 goto err_free_vddc_phase_shed_limits_tbl_entries;
363 dev_err(&dev->dev, "dyn_pm:unable to fetch the ulv state\n");
364 goto err_free_cac_leakage_tbl_entries;
292 365 } }
293 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
294 for (r = 0; r < ctx->atb_emergency_state.lvls_n; ++r) {
295 struct atb_pp_lvl *lvl;
366 atb_pp_state_dump(&ctx->atb_ulv_state, "ulv");
296 367
297 lvl = &ctx->atb_emergency_state.lvls[r];
298 if (IS_VDDC_LEAKAGE_IDX(lvl->vddc_id)) {
299 LOG("atb_pp_emergency_lvl[%ld]:vddc=0x%04x(leakage index) engine clock=%ukHz memory clock=%ukHz",
300 r, lvl->vddc_id, lvl->eng_clk * 10,
301 lvl->mem_clk * 10);
302 } else {
303 LOG("atb_atb_pp_emergency_lvl[%ld]:vddc=%umV engine clock=%ukHz memory clock=%ukHz",
304 r, lvl->vddc_id, lvl->eng_clk * 10,
305 lvl->mem_clk * 10);
306 }
368 r = atb_pp_performance_state_get(dd->atb, &ctx->atb_performance_state);
369 if (r == -ATB_ERR) {
370 dev_err(&dev->dev, "dyn_pm:unable to fetch the performance state\n");
371 goto err_free_cac_leakage_tbl_entries;
307 372 } }
308 #endif
373 atb_pp_state_dump(&ctx->atb_performance_state, "performance");
309 374 return 0; return 0;
310 375
311 err_free_vddc_phase_shed_limits_tbl_entries:
312 if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
313 kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
314 376 err_free_cac_leakage_tbl_entries: err_free_cac_leakage_tbl_entries:
315 377 if (ctx->atb_cac_leakage_tbl.entries_n) if (ctx->atb_cac_leakage_tbl.entries_n)
316 378 kfree(ctx->atb_cac_leakage_tbl.entries); kfree(ctx->atb_cac_leakage_tbl.entries);
 
... ... err_free_vddc_dep_on_sclk_tbl_entries:
318 380 if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE)
319 381 if (ctx->atb_cac_leakage_tbl.entries_n) if (ctx->atb_cac_leakage_tbl.entries_n)
320 382 kfree(ctx->atb_cac_leakage_tbl.entries); kfree(ctx->atb_cac_leakage_tbl.entries);
383 err_free_vddc_phase_shed_limits_tbl_entries:
384 if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
385 kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
321 386 err: err:
322 387 return -SI_ERR; return -SI_ERR;
323 388 } }
324 389
325 390 void ctx_free(struct ctx *ctx) void ctx_free(struct ctx *ctx)
326 391 { {
327 if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
328 kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
329 392 if (ctx->atb_cac_leakage_tbl.entries_n) if (ctx->atb_cac_leakage_tbl.entries_n)
330 393 kfree(ctx->atb_cac_leakage_tbl.entries); kfree(ctx->atb_cac_leakage_tbl.entries);
331 394 if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE) if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLTAGE)
332 395 if (ctx->atb_vddc_dep_on_sclk_tbl.entries_n) if (ctx->atb_vddc_dep_on_sclk_tbl.entries_n)
333 396 kfree(ctx->atb_vddc_dep_on_sclk_tbl.entries); kfree(ctx->atb_vddc_dep_on_sclk_tbl.entries);
397 if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
398 kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
334 399 kfree(ctx); kfree(ctx);
335 400 } }
File drivers/gpu/alga/amd/si/dyn_pm/ctx.h changed (mode: 100644) (index ed52f78..5d01478)
... ... struct ctx {
41 41 struct atb_cac_leakage_tbl atb_cac_leakage_tbl; struct atb_cac_leakage_tbl atb_cac_leakage_tbl;
42 42 struct atb_vddc_phase_shed_limits_tbl atb_vddc_phase_shed_limits_tbl; struct atb_vddc_phase_shed_limits_tbl atb_vddc_phase_shed_limits_tbl;
43 43
44 struct atb_pp_state atb_emergency_state; /* only one level */
44 struct atb_pp_state atb_emergency_state; /* only one level */
45 struct atb_pp_state atb_ulv_state; /* only one level */
46 struct atb_pp_state atb_performance_state;
45 47
46 48 u8 pcie_root_speeds_mask; u8 pcie_root_speeds_mask;
47 49 }; };
File drivers/gpu/alga/amd/si/dyn_pm/dyn_pm.c changed (mode: 100644) (index 0661db5..4b8440e)
... ... long dyn_pm_ena(struct pci_dev *dev)
693 693 return DYN_PM_ENA_FAILURE; return DYN_PM_ENA_FAILURE;
694 694
695 695 if (ctx->atb_emergency_state.lvls_n) { if (ctx->atb_emergency_state.lvls_n) {
696 struct dev_drv_data *dd;
697
698 696 LOG("enabling emergency state"); LOG("enabling emergency state");
699 697 emergency_state_ena(dev); emergency_state_ena(dev);
700
701 dd = pci_get_drvdata(dev);
702 dd->pp.state |= PP_STATE_EMERGENCY_STATE_ENA;
703 698 } else } else
704 699 LOG("emergency state not enabled"); LOG("emergency state not enabled");
705 700
 
... ... void dyn_pm_dis(struct pci_dev *dev)
803 798 { {
804 799 long r; long r;
805 800 struct dev_drv_data *dd; struct dev_drv_data *dd;
801 struct atb_pp_state atb_emergency_state;
806 802
807 803 if (!smc_is_running(dev)) if (!smc_is_running(dev))
808 804 dev_err(&dev->dev, "dyn_pm:smc is not running\n"); dev_err(&dev->dev, "dyn_pm:smc is not running\n");
 
... ... void dyn_pm_dis(struct pci_dev *dev)
866 862 LOG("voltage pm not disabled"); LOG("voltage pm not disabled");
867 863 /*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
868 864
869 if (dd->pp.state & PP_STATE_EMERGENCY_STATE_ENA) {
870 LOG("disabling emergency state");
871 emergency_state_dis(dev);
872 } else
873 LOG("emergency state not disabled");
865 /*--------------------------------------------------------------------*/
866 r = atb_pp_emergency_state_get(dd->atb, &atb_emergency_state);
867 if (r == -ATB_ERR) {
868 dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n");
869 } else {
870 if (atb_emergency_state.lvls_n) {
871 LOG("disabling emergency state");
872 emergency_state_dis(dev);
873 } else
874 LOG("emergency state not disabled");
875 }
876 /*--------------------------------------------------------------------*/
874 877
875 878 arb_set_f0(dev); arb_set_f0(dev);
876 879 } }
File drivers/gpu/alga/amd/si/dyn_pm/private.h changed (mode: 100644) (index d5b9881..93eaab2)
11 11 #define LOG(fmt,...) #define LOG(fmt,...)
12 12 #endif #endif
13 13
14 #define ARB_IDX_INITIAL_STATE 0
15 #define ARB_IDX_ACPI_STATE 1
16 #define ARB_IDX_ULV_STATE 2
17 #define ARB_IDX_DRIVER_STATE 3
14 /* this is our layout for the smc mc_arb_tbl */
15 #define MC_ARB_SET_IDX_INITIAL_STATE 0 /* only one level */
16 #define MC_ARB_SET_IDX_EMERGENCY_STATE 1 /* only one level */
17 #define MC_ARB_SET_IDX_ULV_STATE 2 /* only one level */
18 /* index of the set of the first current power state level */
19 #define MC_ARB_SET_IDX_DRIVER_STATE 3
20
21 /* this is our layout for the smc mc_tbl */
22 #define MC_SET_IDX_INITIAL_STATE 0 /* only one level */
23 #define MC_SET_IDX_EMERGENCY_STATE 1 /* only one level */
24 #define MC_SET_IDX_ULV_STATE 2 /* only one level */
25 /* index of the set of the first current power state level */
26 #define MC_SET_IDX_DRIVERS_STATE 3
18 27
19 28 #define IS_VDDC_LEAKAGE_IDX(x) (((x) & 0xff00) == 0xff00) #define IS_VDDC_LEAKAGE_IDX(x) (((x) & 0xff00) == 0xff00)
20 29 long voltage_step_idx(struct atb_voltage_tbl *tbl, u16 val_mv); long voltage_step_idx(struct atb_voltage_tbl *tbl, u16 val_mv);
File drivers/gpu/alga/amd/si/dyn_pm/smc_emergency_state.c changed (mode: 100644) (index 08e02c8..4ef341a)
... ... long smc_state_tbl_emergency_state_fill(struct ctx *ctx,
295 295 &lvl->mvdd.val); &lvl->mvdd.val);
296 296 } }
297 297
298 lvl->ac_idx = 0;
298 /*
299 * XXX: we cheat here, we use the mc_tbl initial state set, to be clean
300 * we should use MC_SET_IDX_EMERGENCY_STATE
301 */
302 lvl->mc_set_idx = MC_SET_IDX_INITIAL_STATE;
299 303
300 304 lvl->dpm_to_perf_lvl.max_ps = 0; lvl->dpm_to_perf_lvl.max_ps = 0;
301 305 lvl->dpm_to_perf_lvl.near_tdp_dec = 0; lvl->dpm_to_perf_lvl.near_tdp_dec = 0;
File drivers/gpu/alga/amd/si/dyn_pm/smc_initial_state.c changed (mode: 100644) (index 8d2502a..091a2ca)
... ... long smc_state_tbl_initial_state_fill(struct ctx *ctx,
87 87 long r; long r;
88 88 u32 val; u32 val;
89 89
90 LOG("filling initial smc table");
91
90 92 lvl = &state->lvls[0]; lvl = &state->lvls[0];
91 93 dd = pci_get_drvdata(ctx->dev); dd = pci_get_drvdata(ctx->dev);
92 94
 
... ... long smc_state_tbl_initial_state_fill(struct ctx *ctx,
129 131 put_unaligned_be32(dd->pp.default_eng_clk, &lvl->sclk.sclk_val); put_unaligned_be32(dd->pp.default_eng_clk, &lvl->sclk.sclk_val);
130 132 /*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
131 133
132 lvl->arb_refresh_state = ARB_IDX_INITIAL_STATE;
133 lvl->ac_idx = 0;
134 lvl->mc_arb_set_idx = MC_ARB_SET_IDX_INITIAL_STATE;
135 lvl->mc_set_idx = MC_SET_IDX_INITIAL_STATE;
134 136
135 137 if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_CTL_ENA) { if (ctx->voltage_caps & VOLTAGE_CAPS_VDDC_CTL_ENA) {
136 138 r = voltage_step_idx(&ctx->atb_vddc_tbl, dd->pp.default_vddc); r = voltage_step_idx(&ctx->atb_vddc_tbl, dd->pp.default_vddc);
File drivers/gpu/alga/amd/si/dyn_pm/smc_state_tbl.c changed (mode: 100644) (index 8ef4bd6..f28e021)
... ... static void smc_vddc_tbl_fill(struct ctx *ctx,
72 72 } }
73 73 } }
74 74 smc_state_tbl->max_vddc_idx = max_vddc_idx; smc_state_tbl->max_vddc_idx = max_vddc_idx;
75
76 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
77 LOG("smc vddc voltage table, mask(for smio_low)=0x%08x, max_vddc_idx=%u",
78 ctx->atb_vddc_tbl.mask_low, max_vddc_idx);
79 for (i = 0; i < ctx->atb_vddc_tbl.entries_n; ++i) {
80 LOG(" %u:%u mV smio_low=0x%08x", i,
81 ctx->atb_vddc_tbl.entries[i].val_mv,
82 ctx->atb_vddc_tbl.entries[i].smio_low);
83 }
84 #endif
85 75 } }
76
86 77 static void smc_mvdd_tbl_fill(struct ctx *ctx, static void smc_mvdd_tbl_fill(struct ctx *ctx,
87 78 struct smc_state_tbl *smc_state_tbl) struct smc_state_tbl *smc_state_tbl)
88 79 { {
 
... ... static void smc_mvdd_tbl_fill(struct ctx *ctx,
104 95 put_unaligned_be32(ctx->atb_mvddc_tbl.mask_low, put_unaligned_be32(ctx->atb_mvddc_tbl.mask_low,
105 96 &smc_state_tbl->voltage_mask_tbl.mask_low[ &smc_state_tbl->voltage_mask_tbl.mask_low[
106 97 SMC_VOLTAGE_MASK_MVDD]); SMC_VOLTAGE_MASK_MVDD]);
107
108 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
109 LOG("smc mvdd voltage table, mask(for smio_low)=0x%08x",
110 ctx->atb_mvddc_tbl.mask_low);
111 for (i = 0; i < ctx->atb_mvddc_tbl.entries_n; ++i) {
112 LOG(" %u:%u mV smio_low=0x%08x", i,
113 ctx->atb_mvddc_tbl.entries[i].val_mv,
114 ctx->atb_mvddc_tbl.entries[i].smio_low);
115 }
116 #endif
117 98 } }
118 99
119 100 static void smc_vddci_tbl_fill(struct ctx *ctx, static void smc_vddci_tbl_fill(struct ctx *ctx,
 
... ... static void smc_vddci_tbl_fill(struct ctx *ctx,
137 118 put_unaligned_be32(ctx->atb_vddci_tbl.mask_low, put_unaligned_be32(ctx->atb_vddci_tbl.mask_low,
138 119 &smc_state_tbl->voltage_mask_tbl.mask_low[ &smc_state_tbl->voltage_mask_tbl.mask_low[
139 120 SMC_VOLTAGE_MASK_VDDCI]); SMC_VOLTAGE_MASK_VDDCI]);
140
141 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
142 LOG("smc vddci voltage table, mask(for smio_low)=0x%08x",
143 ctx->atb_vddci_tbl.mask_low);
144 for (i = 0; i < ctx->atb_vddci_tbl.entries_n; ++i) {
145 LOG(" %u:%u mV smio_low=0x%08x", i,
146 ctx->atb_vddci_tbl.entries[i].val_mv,
147 ctx->atb_vddci_tbl.entries[i].smio_low);
148 }
149 #endif
150 121 } }
151 122
152 123 static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx, static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx,
 
... ... static void smc_vddc_phase_shed_tbl_fill(struct ctx *ctx,
171 142 put_unaligned_be32(ctx->atb_vddc_phase_shed_tbl.mask_low, put_unaligned_be32(ctx->atb_vddc_phase_shed_tbl.mask_low,
172 143 &smc_state_tbl->phase_mask_tbl.mask_low[ &smc_state_tbl->phase_mask_tbl.mask_low[
173 144 SMC_VOLTAGE_MASK_VDDC]); SMC_VOLTAGE_MASK_VDDC]);
174
175 #ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
176 LOG("smc vddc phase shedding table, mask(for smio_low)=0x%08x",
177 ctx->atb_vddc_phase_shed_tbl.mask_low);
178 for (i = 0; i < ctx->atb_vddc_phase_shed_tbl.entries_n; ++i) {
179 LOG(" %u:%u smio_low=0x%08x", i,
180 ctx->atb_vddc_phase_shed_tbl.entries[i].val_mv,
181 ctx->atb_vddc_phase_shed_tbl.entries[i].smio_low);
182 }
183 #endif
184 145 } }
185 146
186 147 /* /*
 
... ... long smc_state_tbl_fill(struct ctx *ctx, struct smc_state_tbl *tbl)
255 216 if (r == -SI_ERR) if (r == -SI_ERR)
256 217 return -SI_ERR; return -SI_ERR;
257 218 } }
258
219
220 tbl->driver_state = tbl->initial_state;
259 221 //TODO //TODO
260 222 return 0; return 0;
261 223 } }
File drivers/gpu/alga/amd/si/smc.c changed (mode: 100644) (index d791cd2..6fd540f)
... ... u8 smc_tbls_cur_arb_set_get(struct pci_dev *dev)
148 148 u32 arb_dram_tbl_addr_addr; u32 arb_dram_tbl_addr_addr;
149 149 u32 arb_dram_tbl_addr; u32 arb_dram_tbl_addr;
150 150 u32 be_dw; u32 be_dw;
151 arb_dram_tbl_addr_addr = SMC_FW_HDR_START
152 + SMC_FW_HDR_MC_ARB_DRAM_TBL;
151 arb_dram_tbl_addr_addr = SMC_FW_HDR_START + SMC_FW_HDR_MC_ARB_TBL;
153 152
154 153 arb_dram_tbl_addr = smc_r32(dev, arb_dram_tbl_addr_addr); arb_dram_tbl_addr = smc_r32(dev, arb_dram_tbl_addr_addr);
155 154
File drivers/gpu/alga/amd/si/smc_tbls.h changed (mode: 100644) (index 22b5657..964bb8d)
10 10 #define SMC_FW_HDR_STATE_TBL 0x10 #define SMC_FW_HDR_STATE_TBL 0x10
11 11 #define SMC_FW_HDR_FAN_TBL 0x14 #define SMC_FW_HDR_FAN_TBL 0x14
12 12 #define SMC_FW_HDR_CAC_CFG_TBL 0x18 #define SMC_FW_HDR_CAC_CFG_TBL 0x18
13 #define SMC_FW_HDR_MC_REG_TBL 0x24
14 #define SMC_FW_HDR_MC_ARB_DRAM_TBL 0x30
13 #define SMC_FW_HDR_MC_TBL 0x24
14 #define SMC_FW_HDR_MC_ARB_TBL 0x30
15 15 #define SMC_FW_HDR_SPLL_TBL 0x38 #define SMC_FW_HDR_SPLL_TBL 0x38
16 16 #define SMC_FW_HDR_DTE_CFG 0x40 #define SMC_FW_HDR_DTE_CFG 0x40
17 17 #define SMC_FW_HDR_PAPM_PARAMS 0x48 #define SMC_FW_HDR_PAPM_PARAMS 0x48
18 18
19 19 /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
20 struct smc_mc_arb_dram_regs {
21 __be32 mc_arb_dram_timing_x_0;
22 __be32 mc_arb_dram_timing_x_1;
23 u8 mc_arb_rfsh_rate;
24 u8 mc_arb_burst_time;
20 struct smc_mc_arb_regs {
21 __be32 dram_timing_x_0;
22 __be32 dram_timing_x_1;
23 u8 rfsh_rate;
24 u8 burst_time;
25 25 u8 pad[2]; u8 pad[2];
26 26 } __packed; } __packed;
27 27
28 struct smc_mc_arb_dram_tbl {
28 #define SMC_ARB_REGS_SETS_N_MAX 16
29 struct smc_mc_arb_tbl {
29 30 u8 arb_current; u8 arb_current;
30 31 u8 rsvd[3]; u8 rsvd[3];
31 struct smc_mc_arb_dram_regs sets[16];
32 struct smc_mc_arb_regs sets[SMC_ARB_REGS_SETS_N_MAX];
33 } __packed;
34 /*----------------------------------------------------------------------------*/
35
36 /*----------------------------------------------------------------------------*/
37 #define SMC_MC_REGS_N_MAX 16
38 #define SMC_MC_REGS_SETS_N_MAX 20
39 struct smc_mc_reg_addr {
40 __be16 s0;
41 __be16 s1;
42 } __packed;
43
44 struct smc_mc_regs_set {
45 __be32 vals[SMC_MC_REGS_N_MAX];
46 } __packed;
47
48 struct smc_mc_tbl {
49 u8 last;
50 u8 rsvd[3];
51 struct smc_mc_reg_addr addrs[SMC_MC_REGS_N_MAX];
52 struct smc_mc_regs_set sets[SMC_MC_REGS_SETS_N_MAX];
32 53 } __packed; } __packed;
33 54 /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
34 55
 
... ... struct smc_voltage_val {
86 107 #define SMC_MC_FLGS_PG_ENA BIT(4) #define SMC_MC_FLGS_PG_ENA BIT(4)
87 108
88 109 struct smc_lvl { struct smc_lvl {
89 u8 ac_idx;//XXX:initstate
110 u8 mc_set_idx;//XXX:initstate
90 111 u8 disp_watermark; u8 disp_watermark;
91 112 u8 pcie_gen;//XXX:initstate u8 pcie_gen;//XXX:initstate
92 113 u8 uvd_watermark; u8 uvd_watermark;
 
... ... struct smc_lvl {
105 126 u8 hysteresis_up; u8 hysteresis_up;
106 127 u8 hysteresis_down; u8 hysteresis_down;
107 128 u8 state_flgs; u8 state_flgs;
108 u8 arb_refresh_state;//XXX:initstate
129 u8 mc_arb_set_idx;//XXX:initstate
109 130 __be32 sq_pwr_throttle_0;//XXX:initstate __be32 sq_pwr_throttle_0;//XXX:initstate
110 131 __be32 sq_pwr_throttle_1;//XXX:initstate __be32 sq_pwr_throttle_1;//XXX:initstate
111 132 __be32 max_pwred_up_cu; __be32 max_pwred_up_cu;
File include/alga/amd/atombios/pp.h changed (mode: 100644) (index 0b27ad2..1f04de3)
... ... struct atb_pp_state {
27 27 u8 lvls_n; u8 lvls_n;
28 28 struct atb_pp_lvl lvls[ATB_PP_STATE_LVLS_N_MAX]; struct atb_pp_lvl lvls[ATB_PP_STATE_LVLS_N_MAX];
29 29 }; };
30 /* boot state in entirely build from other tables than powerplay tables */
30 31 long atb_pp_emergency_state_get(struct atombios *atb, long atb_pp_emergency_state_get(struct atombios *atb,
31 32 struct atb_pp_state *emergency); struct atb_pp_state *emergency);
33 long atb_pp_ulv_state_get(struct atombios *atb, struct atb_pp_state *ulv);
34 long atb_pp_performance_state_get(struct atombios *atb,
35 struct atb_pp_state *performance);
32 36 #define ATB_VOLTAGE_TYPE_VDDC 1 #define ATB_VOLTAGE_TYPE_VDDC 1
33 37 #define ATB_VOLTAGE_TYPE_MVDDC 2 #define ATB_VOLTAGE_TYPE_MVDDC 2
34 38 #define ATB_VOLTAGE_TYPE_MVDDQ 3 #define ATB_VOLTAGE_TYPE_MVDDQ 3
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/sylware/linux-gpu-amd-si

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/sylware/linux-gpu-amd-si

Clone this repository using git:
git clone git://git.rocketgit.com/user/sylware/linux-gpu-amd-si

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main