File drivers/gpu/alga/amd/si/dyn_pm/driver.c changed (mode: 100644) (index 4d6f141..0cddb2a) |
35 |
35 |
#include "ctx.h" |
#include "ctx.h" |
36 |
36 |
#include "private.h" |
#include "private.h" |
37 |
37 |
|
|
38 |
|
/* the driver state mc regs are the initial state ones for the init */ |
|
|
38 |
|
/* the driver state mc regs are the initial/emergency ones for the init */ |
39 |
39 |
void smc_mc_reg_tbl_driver_init(struct smc_mc_reg_tbl *tbl) |
void smc_mc_reg_tbl_driver_init(struct smc_mc_reg_tbl *tbl) |
40 |
40 |
{ |
{ |
41 |
41 |
struct smc_mc_reg_set *initial; |
struct smc_mc_reg_set *initial; |
42 |
42 |
struct smc_mc_reg_set *driver; |
struct smc_mc_reg_set *driver; |
43 |
43 |
|
|
44 |
|
initial = &tbl->sets[MC_REG_SET_IDX_INITIAL]; |
|
|
44 |
|
initial = &tbl->sets[MC_REG_SET_IDX_INITIAL_EMERGENCY]; |
45 |
45 |
driver = &tbl->sets[MC_REG_SET_IDX_DRIVER]; |
driver = &tbl->sets[MC_REG_SET_IDX_DRIVER]; |
46 |
46 |
|
|
47 |
47 |
memcpy(driver, initial, sizeof(*driver)); |
memcpy(driver, initial, sizeof(*driver)); |
File drivers/gpu/alga/amd/si/dyn_pm/emergency.c changed (mode: 100644) (index bfa4c46..4416510) |
... |
... |
long smc_state_tbl_emergency_init(struct ctx *ctx, struct smc_state_tbl *tbl) |
190 |
190 |
&lvl->sq_pwr_throttle_1); |
&lvl->sq_pwr_throttle_1); |
191 |
191 |
return 0; |
return 0; |
192 |
192 |
} |
} |
193 |
|
|
|
194 |
|
void smc_mc_reg_tbl_emergency_init(struct ctx *ctx, |
|
195 |
|
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
|
196 |
|
{ |
|
197 |
|
/* |
|
198 |
|
* for the emergency state, we are using the reg set for the lowest |
|
199 |
|
* mem clk, namely the first set, since the table is sorted from lowest |
|
200 |
|
* mem clk to highest. Yes, the mem clk from the atombios pwr lvl is |
|
201 |
|
* ignored. |
|
202 |
|
*/ |
|
203 |
|
smc_mc_reg_set_load(ctx, 0, |
|
204 |
|
&smc_mc_reg_tbl->sets[MC_REG_SET_IDX_EMERGENCY]); |
|
205 |
|
} |
|
File drivers/gpu/alga/amd/si/dyn_pm/initial.c changed (mode: 100644) (index e79e77e..85120e0) |
... |
... |
long smc_state_tbl_initial_init(struct ctx *ctx, struct smc_state_tbl *tbl) |
93 |
93 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
94 |
94 |
|
|
95 |
95 |
lvl->mc_arb_set_idx = MC_ARB_SET_IDX_INITIAL; |
lvl->mc_arb_set_idx = MC_ARB_SET_IDX_INITIAL; |
96 |
|
lvl->mc_reg_set_idx = MC_REG_SET_IDX_INITIAL; |
|
|
96 |
|
lvl->mc_reg_set_idx = MC_REG_SET_IDX_INITIAL_EMERGENCY; |
97 |
97 |
|
|
98 |
98 |
if (ctx->volt_caps & VOLT_CAPS_VDDC_CTL_ENA) { |
if (ctx->volt_caps & VOLT_CAPS_VDDC_CTL_ENA) { |
99 |
99 |
r = smc_volt_vddc_set_from_atb_mv(ctx, &lvl->vddc, |
r = smc_volt_vddc_set_from_atb_mv(ctx, &lvl->vddc, |
|
... |
... |
long smc_state_tbl_initial_init(struct ctx *ctx, struct smc_state_tbl *tbl) |
174 |
174 |
return 0; |
return 0; |
175 |
175 |
} |
} |
176 |
176 |
|
|
177 |
|
/* |
|
178 |
|
* we are not using a atb_pp_state here, since the initial state is basically |
|
179 |
|
* built from scratch (the atombios "boot" state pwr lvl is ignored) |
|
180 |
|
*/ |
|
181 |
|
void smc_mc_reg_tbl_initial_init(struct ctx *ctx, |
|
182 |
|
struct smc_state_tbl *smc_state_tbl, |
|
183 |
|
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
|
184 |
|
{ |
|
185 |
|
u8 mc_reg_set_idx; |
|
186 |
|
u32 initial_lvl_mem_clk; |
|
187 |
|
|
|
188 |
|
/* |
|
189 |
|
* we are using the already inited initial state smc table, since |
|
190 |
|
* the atombios pwr lvl is mostly or completely ignored when loading |
|
191 |
|
* the smc initial state |
|
192 |
|
*/ |
|
193 |
|
initial_lvl_mem_clk = get_unaligned_be32( |
|
194 |
|
&smc_state_tbl->initial_lvl.mem_clk.clk); |
|
195 |
|
|
|
196 |
|
/* select a set of mc regs which can support the pwr lvl mem clk */ |
|
197 |
|
for (mc_reg_set_idx = 0; mc_reg_set_idx < ctx->atb_mc_reg_tbl.sets_n; |
|
198 |
|
++mc_reg_set_idx) { |
|
199 |
|
struct atb_mc_reg_set *mc_reg_set; |
|
200 |
|
|
|
201 |
|
mc_reg_set = &ctx->atb_mc_reg_tbl.sets[mc_reg_set_idx]; |
|
202 |
|
|
|
203 |
|
if (initial_lvl_mem_clk <= mc_reg_set->mem_clk_max) |
|
204 |
|
break; |
|
205 |
|
} |
|
206 |
|
|
|
207 |
|
/* |
|
208 |
|
* Not found, then try the last one as a work around which should |
|
209 |
|
* accomodate the highest mem clk. The tbl seems to be sorted |
|
210 |
|
* from lowest mem clk to highest mem clk. |
|
211 |
|
*/ |
|
212 |
|
if (mc_reg_set_idx == ctx->atb_mc_reg_tbl.sets_n) |
|
213 |
|
--mc_reg_set_idx; /* initial lvl is always here */ |
|
214 |
|
|
|
215 |
|
smc_mc_reg_set_load(ctx, mc_reg_set_idx, |
|
216 |
|
&smc_mc_reg_tbl->sets[MC_REG_SET_IDX_INITIAL]); |
|
217 |
|
|
|
218 |
|
} |
|
219 |
|
|
|
220 |
177 |
long smc_mc_arb_tbl_initial_init(struct ctx *ctx, |
long smc_mc_arb_tbl_initial_init(struct ctx *ctx, |
221 |
178 |
struct smc_state_tbl *smc_state_tbl, |
struct smc_state_tbl *smc_state_tbl, |
222 |
179 |
struct smc_mc_arb_tbl *smc_mc_arb_tbl) |
struct smc_mc_arb_tbl *smc_mc_arb_tbl) |
File drivers/gpu/alga/amd/si/dyn_pm/initial.h changed (mode: 100644) (index e20f1ad..df94436) |
6 |
6 |
See README at root of alga tree. |
See README at root of alga tree. |
7 |
7 |
*/ |
*/ |
8 |
8 |
long smc_state_tbl_initial_init(struct ctx *ctx, struct smc_state_tbl *tbl); |
long smc_state_tbl_initial_init(struct ctx *ctx, struct smc_state_tbl *tbl); |
9 |
|
void smc_mc_reg_tbl_initial_init(struct ctx *ctx, |
|
10 |
|
struct smc_state_tbl *smc_state_tbl, |
|
11 |
|
struct smc_mc_reg_tbl *smc_mc_reg_tbl); |
|
12 |
9 |
long smc_mc_arb_tbl_initial_init(struct ctx *ctx, |
long smc_mc_arb_tbl_initial_init(struct ctx *ctx, |
13 |
10 |
struct smc_state_tbl *smc_state_tbl, |
struct smc_state_tbl *smc_state_tbl, |
14 |
11 |
struct smc_mc_arb_tbl *smc_mc_arb_tbl); |
struct smc_mc_arb_tbl *smc_mc_arb_tbl); |
File drivers/gpu/alga/amd/si/dyn_pm/private.h changed (mode: 100644) (index 760d8ef..cf86f5f) |
25 |
25 |
#define MC_ARB_SET_IDX_DRIVER 3 |
#define MC_ARB_SET_IDX_DRIVER 3 |
26 |
26 |
|
|
27 |
27 |
/* this is our layout for the smc mc_reg_tbl */ |
/* this is our layout for the smc mc_reg_tbl */ |
28 |
|
#define MC_REG_SET_IDX_INITIAL 0 /* only one lvl */ |
|
29 |
|
#define MC_REG_SET_IDX_EMERGENCY 1 /* only one lvl */ |
|
30 |
|
#define MC_REG_SET_IDX_ULV 2 /* only one lvl */ |
|
|
28 |
|
#define MC_REG_SET_IDX_INITIAL_EMERGENCY 0 |
|
29 |
|
#define MC_REG_SET_IDX_ULV 1 |
|
30 |
|
/* the third slot is not used */ |
31 |
31 |
/* index of the set of the first current pwr state lvl */ |
/* index of the set of the first current pwr state lvl */ |
32 |
|
#define MC_REG_SET_IDX_DRIVER 3 |
|
|
32 |
|
#define MC_REG_SET_IDX_DRIVER 3 |
33 |
33 |
|
|
34 |
34 |
u8 pcie_speed_cap(struct ctx *ctx, u8 pcie_gen); |
u8 pcie_speed_cap(struct ctx *ctx, u8 pcie_gen); |
35 |
35 |
struct eng_pll { |
struct eng_pll { |
File drivers/gpu/alga/amd/si/dyn_pm/smc_mc_reg_tbl.c changed (mode: 100644) (index 772352d..b344461) |
... |
... |
void smc_mc_reg_set_load(struct ctx *ctx, u8 atb_mc_reg_set_idx, |
167 |
167 |
} |
} |
168 |
168 |
} |
} |
169 |
169 |
|
|
|
170 |
|
/* |
|
171 |
|
* we are not using a atb_pp_state here, since the initial state is basically |
|
172 |
|
* built from scratch (the atombios "boot" state pwr lvl is ignored) |
|
173 |
|
*/ |
|
174 |
|
static void initial_emergency_init(struct ctx *ctx, |
|
175 |
|
struct smc_state_tbl *smc_state_tbl, |
|
176 |
|
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
|
177 |
|
{ |
|
178 |
|
u8 mc_reg_set_idx; |
|
179 |
|
u32 initial_lvl_mem_clk; |
|
180 |
|
struct smc_mc_reg_set *set; |
|
181 |
|
|
|
182 |
|
/* |
|
183 |
|
* we are using the already inited initial state smc table, since |
|
184 |
|
* the atombios pwr lvl is mostly or completely ignored when loading |
|
185 |
|
* the smc initial state |
|
186 |
|
*/ |
|
187 |
|
initial_lvl_mem_clk = get_unaligned_be32( |
|
188 |
|
&smc_state_tbl->initial_lvl.mem_clk.clk); |
|
189 |
|
|
|
190 |
|
/* select a set of mc regs which can support the pwr lvl mem clk */ |
|
191 |
|
for (mc_reg_set_idx = 0; mc_reg_set_idx < ctx->atb_mc_reg_tbl.sets_n; |
|
192 |
|
++mc_reg_set_idx) { |
|
193 |
|
struct atb_mc_reg_set *mc_reg_set; |
|
194 |
|
|
|
195 |
|
mc_reg_set = &ctx->atb_mc_reg_tbl.sets[mc_reg_set_idx]; |
|
196 |
|
|
|
197 |
|
if (initial_lvl_mem_clk <= mc_reg_set->mem_clk_max) |
|
198 |
|
break; |
|
199 |
|
} |
|
200 |
|
|
|
201 |
|
/* |
|
202 |
|
* Not found, then try the last one as a work around which should |
|
203 |
|
* accomodate the highest mem clk. The tbl seems to be sorted |
|
204 |
|
* from lowest mem clk to highest mem clk. |
|
205 |
|
*/ |
|
206 |
|
if (mc_reg_set_idx == ctx->atb_mc_reg_tbl.sets_n) |
|
207 |
|
--mc_reg_set_idx; /* initial lvl is always here */ |
|
208 |
|
|
|
209 |
|
set = &smc_mc_reg_tbl->sets[MC_REG_SET_IDX_INITIAL_EMERGENCY]; |
|
210 |
|
smc_mc_reg_set_load(ctx, mc_reg_set_idx, set); |
|
211 |
|
} |
|
212 |
|
|
170 |
213 |
long smc_mc_reg_tbl_init(struct ctx *ctx, struct smc_state_tbl *smc_state_tbl, |
long smc_mc_reg_tbl_init(struct ctx *ctx, struct smc_state_tbl *smc_state_tbl, |
171 |
214 |
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
172 |
215 |
{ |
{ |
|
... |
... |
long smc_mc_reg_tbl_init(struct ctx *ctx, struct smc_state_tbl *smc_state_tbl, |
183 |
226 |
smc_mc_reg_tbl->addrs_n = all_valid_regs_n; |
smc_mc_reg_tbl->addrs_n = all_valid_regs_n; |
184 |
227 |
regs_addr_cpy(ctx, smc_mc_reg_tbl); |
regs_addr_cpy(ctx, smc_mc_reg_tbl); |
185 |
228 |
|
|
186 |
|
smc_mc_reg_tbl_initial_init(ctx, smc_state_tbl, smc_mc_reg_tbl); |
|
187 |
|
smc_mc_reg_tbl_emergency_init(ctx, smc_mc_reg_tbl); |
|
|
229 |
|
initial_emergency_init(ctx, smc_state_tbl, smc_mc_reg_tbl); |
188 |
230 |
smc_mc_reg_tbl_ulv_init(ctx, smc_mc_reg_tbl); |
smc_mc_reg_tbl_ulv_init(ctx, smc_mc_reg_tbl); |
189 |
231 |
smc_mc_reg_tbl_driver_init(smc_mc_reg_tbl); |
smc_mc_reg_tbl_driver_init(smc_mc_reg_tbl); |
190 |
232 |
return 0; |
return 0; |
File drivers/gpu/alga/amd/si/dyn_pm/ulv.c changed (mode: 100644) (index b4b17ba..e763d7a) |
... |
... |
long smc_state_tbl_ulv_init(struct ctx *ctx, struct smc_state_tbl *tbl) |
43 |
43 |
{ |
{ |
44 |
44 |
long r; |
long r; |
45 |
45 |
|
|
46 |
|
LOG("ulv smc table init"); |
|
|
46 |
|
LOG("ulv smc tbl init"); |
47 |
47 |
|
|
48 |
48 |
r = smc_lvl_from_atb(ctx, &ctx->atb_ulv.lvls[0], &tbl->ulv_lvl); |
r = smc_lvl_from_atb(ctx, &ctx->atb_ulv.lvls[0], &tbl->ulv_lvl); |
49 |
49 |
if (r == -SI_ERR) { |
if (r == -SI_ERR) { |
50 |
|
dev_err(&ctx->dev->dev, "dyn_pm:unable to init the ulv power level\n"); |
|
|
50 |
|
dev_err(&ctx->dev->dev, "dyn_pm:unable to init the ultra low voltage power level\n"); |
51 |
51 |
return -SI_ERR; |
return -SI_ERR; |
52 |
52 |
} |
} |
53 |
53 |
|
|
|
... |
... |
long smc_state_tbl_ulv_init(struct ctx *ctx, struct smc_state_tbl *tbl) |
56 |
56 |
//TODO:don't forget to adjust to number of display the power levels |
//TODO:don't forget to adjust to number of display the power levels |
57 |
57 |
tbl->ulv_lvl.state_flgs |= SMC_STATE_FLGS_DEEPSLEEP_BYPASS; |
tbl->ulv_lvl.state_flgs |= SMC_STATE_FLGS_DEEPSLEEP_BYPASS; |
58 |
58 |
tbl->ulv_lvl.mc_arb_set_idx = MC_ARB_SET_IDX_ULV; |
tbl->ulv_lvl.mc_arb_set_idx = MC_ARB_SET_IDX_ULV; |
59 |
|
/* |
|
60 |
|
* TODO: we cheat here, we use the mc_tbl emergency state set, to be |
|
61 |
|
* clean we should use MC_REG_SET_IDX_ULV, BUG? |
|
62 |
|
*/ |
|
63 |
|
tbl->ulv_lvl.mc_reg_set_idx = MC_REG_SET_IDX_EMERGENCY; |
|
|
59 |
|
tbl->ulv_lvl.mc_reg_set_idx = MC_REG_SET_IDX_ULV; |
64 |
60 |
tbl->ulv_lvl.std_vddc = tbl->ulv_lvl.vddc; |
tbl->ulv_lvl.std_vddc = tbl->ulv_lvl.vddc; |
65 |
61 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
66 |
62 |
|
|
|
... |
... |
void smc_sw_regs_ulv_init(struct ctx *ctx) |
87 |
83 |
SMC_SW_NON_ULV_PCIE_LINK_WIDTH); |
SMC_SW_NON_ULV_PCIE_LINK_WIDTH); |
88 |
84 |
} |
} |
89 |
85 |
|
|
|
86 |
|
#if 0 |
|
87 |
|
this following code may be used somewhere else |
90 |
88 |
static u8 mc_reg_set_find(struct ctx *ctx, u32 mem_clk) |
static u8 mc_reg_set_find(struct ctx *ctx, u32 mem_clk) |
91 |
89 |
{ |
{ |
92 |
90 |
u8 mc_reg_set_idx; |
u8 mc_reg_set_idx; |
|
... |
... |
static u8 mc_reg_set_find(struct ctx *ctx, u32 mem_clk) |
111 |
109 |
--mc_reg_set_idx; /* we presume we have at least one set */ |
--mc_reg_set_idx; /* we presume we have at least one set */ |
112 |
110 |
return mc_reg_set_idx; |
return mc_reg_set_idx; |
113 |
111 |
} |
} |
|
112 |
|
#endif |
114 |
113 |
|
|
115 |
114 |
void smc_mc_reg_tbl_ulv_init(struct ctx *ctx, |
void smc_mc_reg_tbl_ulv_init(struct ctx *ctx, |
116 |
115 |
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
struct smc_mc_reg_tbl *smc_mc_reg_tbl) |
117 |
116 |
{ |
{ |
118 |
|
u8 mc_reg_set_idx; |
|
119 |
|
|
|
|
117 |
|
struct smc_mc_reg_set *set; |
120 |
118 |
/* |
/* |
121 |
|
* for the ulv state, if the atombios define an ulv pwr lvl, do |
|
122 |
|
* use the mem clk from here, if not, use the firt set of mc reg, which |
|
123 |
|
* should be targetted for the lowest mem clk since sets are sorted |
|
124 |
|
* from lowest mem clk to highest mem clk |
|
|
119 |
|
* for the ulv state, we are using the reg set for the lowest mem clk, |
|
120 |
|
* namely the first set, since the table is sorted from lowest mem clk |
|
121 |
|
* to highest. Yes, the mem clk from the atombios pwr lvl is ignored. |
125 |
122 |
*/ |
*/ |
126 |
|
if (ctx->atb_ulv.lvls_n) { |
|
127 |
|
u32 mem_clk; |
|
128 |
|
|
|
129 |
|
mem_clk = ctx->atb_ulv.lvls[0].mem_clk; |
|
130 |
|
|
|
131 |
|
mc_reg_set_idx = mc_reg_set_find(ctx, mem_clk); |
|
132 |
|
} else |
|
133 |
|
mc_reg_set_idx = 0; |
|
134 |
|
|
|
135 |
|
smc_mc_reg_set_load(ctx, mc_reg_set_idx, |
|
136 |
|
&smc_mc_reg_tbl->sets[MC_REG_SET_IDX_ULV]); |
|
|
123 |
|
set = &smc_mc_reg_tbl->sets[MC_REG_SET_IDX_INITIAL_EMERGENCY]; |
|
124 |
|
smc_mc_reg_set_load(ctx, 0, set); |
137 |
125 |
} |
} |
138 |
126 |
|
|
139 |
127 |
long smc_mc_arb_tbl_ulv_init(struct ctx *ctx, |
long smc_mc_arb_tbl_ulv_init(struct ctx *ctx, |