File 2d/kickstart/app.c added (mode: 100644) (index 0000000..470f7a9) |
|
1 |
|
#ifndef APP_C |
|
2 |
|
#define APP_C |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS */ |
|
8 |
|
/* |
|
9 |
|
* XXX vk abstraction is much more complex than real hardware, aka the cost of |
|
10 |
|
* most software generalisation/abstraction (and some hardware has lost its |
|
11 |
|
* way...). Better know some real hardware programming (for instance AMD |
|
12 |
|
* open gpu) and keep that in mind while dealing with vk. |
|
13 |
|
* since it's complex, have a "safe mode", kind of minimal use of vk. doing |
|
14 |
|
* fancy stuff above this "safe mode" must be validated by hardware vendors |
|
15 |
|
* then the user... or you literally walking on eggs. |
|
16 |
|
* |
|
17 |
|
* XXX: this is a "One Compilation Unit" source code with preprocessor |
|
18 |
|
* namespace support. this allow the project to grow to very large and keep the |
|
19 |
|
* global identifier space in check (= tradeoff). Each source file, *.h *.c, |
|
20 |
|
* should compile without errors. |
|
21 |
|
* |
|
22 |
|
* XXX: you may have to track the dependencies of vk objs that in order to be |
|
23 |
|
* able to deal with any of their brutal state changes: |
|
24 |
|
* - a dev can be lost (i.e. power management evt or bad hardware) |
|
25 |
|
* - a surf can be lost (i.e. power management evt or something went |
|
26 |
|
* really bad) |
|
27 |
|
* - a swpchn can become out-of-date. for instance the win system did resz |
|
28 |
|
* the parent surf of the swpchn... if you did allow it to happen |
|
29 |
|
* asynchronously. |
|
30 |
|
* here we choose that any of those evts will be fatal for simplicity. |
|
31 |
|
* for instance, if you choose to support the swpchn out-of-date state, |
|
32 |
|
* if your rendering state was too much "pre-configured" in advanced, you would |
|
33 |
|
* have to "re-pre-configure" everything... or you should drop |
|
34 |
|
* "pre-configuring" and program everything *again* for each swpchn img you |
|
35 |
|
* draw. |
|
36 |
|
* |
|
37 |
|
* display programing is demonstrated "officially" in khronos vk cube.c and |
|
38 |
|
* there is a tutorial slide "1-Vulkan-Tutorial.pdf" (just abstract away c++ |
|
39 |
|
* cr*p) which is "the programming manual" on top of specs. this code is |
|
40 |
|
* different: |
|
41 |
|
* - only 1 "main" synchronous loop |
|
42 |
|
* - only xcb wsi. xcb is a client library on top of the x11 protocol. |
|
43 |
|
* (we know wayland should be added...) |
|
44 |
|
* - dynamic loading of xcb. |
|
45 |
|
* - no need of vk headers (using directly the ABI with custom headers). |
|
46 |
|
* |
|
47 |
|
* WARNING: vk core q fam props discovery is not used for the discovery of |
|
48 |
|
* q fams able to support disp, aka wsi. This is the case because disp |
|
49 |
|
* handling (wsi), is done as an ext and is not core (vk can be use without |
|
50 |
|
* a wsi). |
|
51 |
|
* |
|
52 |
|
* a phydev must have a q with gfx and compute. additionally, any q with gfx or |
|
53 |
|
* compute does implicitely support transfer. basically, it is possible to have |
|
54 |
|
* qs with only transfer support, and we are guaranteed to have a q with gfx |
|
55 |
|
* and compute and transfer support. Keep in mind that many vk resources must |
|
56 |
|
* pay the cost of transfering from 1 q fam to another q fam: then think twice |
|
57 |
|
* on how you want to spread your work load on the q fams. |
|
58 |
|
* |
|
59 |
|
* we did change the name of some vk objs to make some areas more explicit. |
|
60 |
|
* |
|
61 |
|
* for proper keyboard support, joypad way or/and text input way, read the |
|
62 |
|
* included KEYBOARD file. here, since we use basic layout independent standard |
|
63 |
|
* keys, the x11 core keyboard protocol is fairly enough. |
|
64 |
|
* |
|
65 |
|
* TODO: use as less as possible device memory object, namely try to allocate |
|
66 |
|
* one big chunk and manage alignment constraint ourself. |
|
67 |
|
*/ |
|
68 |
|
#include <stdlib.h> |
|
69 |
|
#include <string.h> |
|
70 |
|
|
|
71 |
|
#include "app_core_types.h" |
|
72 |
|
#include "vk_types.h" |
|
73 |
|
#include "app_state_types.h" |
|
74 |
|
#include "log.h" |
|
75 |
|
|
|
76 |
|
#include "vk_syms.c" |
|
77 |
|
#include "app_state.c" |
|
78 |
|
#include "xcb.c" |
|
79 |
|
|
|
80 |
|
/*---------------------------------------------------------------------------*/ |
|
81 |
|
#include "namespace/app.c" |
|
82 |
|
#include "namespace/vk_syms.c" |
|
83 |
|
#include "namespace/app_state_types.h" |
|
84 |
|
#include "namespace/app_state.c" |
|
85 |
|
|
|
86 |
|
/* the phydev q fam selected */ |
|
87 |
|
static void dev_create(void) |
|
88 |
|
{ |
|
89 |
|
struct vk_dev_create_info_t info; |
|
90 |
|
struct vk_dev_q_create_info_t q_info; |
|
91 |
|
float q_prio; |
|
92 |
|
static u8 *exts[] = { |
|
93 |
|
/* 1.1 promoted */ |
|
94 |
|
"VK_KHR_bind_memory2", |
|
95 |
|
/* 1.1 promoted */ |
|
96 |
|
"VK_KHR_get_memory_requirements2", |
|
97 |
|
"VK_KHR_swapchain"}; |
|
98 |
|
|
|
99 |
|
s32 r; |
|
100 |
|
|
|
101 |
|
memset(&info, 0, sizeof(info)); |
|
102 |
|
memset(&q_info, 0, sizeof(q_info)); |
|
103 |
|
/*--------------------------------------------------------------------*/ |
|
104 |
|
q_info.type = vk_struct_type_dev_q_create_info; |
|
105 |
|
q_info.q_fam = surf_g.dev.phydev.q_fam; |
|
106 |
|
q_info.qs_n = 1; |
|
107 |
|
q_info.q_prios = &q_prio; |
|
108 |
|
q_prio = 1.0f; |
|
109 |
|
/*--------------------------------------------------------------------*/ |
|
110 |
|
info.type = vk_struct_type_dev_create_info; |
|
111 |
|
info.q_create_infos_n = 1; |
|
112 |
|
info.q_create_infos = &q_info; |
|
113 |
|
info.enabled_exts_n = ARRAY_N(exts); |
|
114 |
|
info.enabled_ext_names = exts; |
|
115 |
|
|
|
116 |
|
r = vk_create_dev(surf_g.dev.phydev.vk, &info, 0, &surf_g.dev.vk); |
|
117 |
|
if (r < 0) { |
|
118 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:unable to create a vulkan device\n", r, surf_g.dev.phydev.vk); |
|
119 |
|
exit(1); |
|
120 |
|
} |
|
121 |
|
LOG("0:MAIN:physical device:%p:vulkan device created with one proper queue:%p\n", surf_g.dev.phydev.vk, surf_g.dev.vk); |
|
122 |
|
} |
|
123 |
|
|
|
124 |
|
static void instance_create(void) |
|
125 |
|
{ |
|
126 |
|
s32 r; |
|
127 |
|
struct vk_instance_create_info_t info; |
|
128 |
|
static u8 *exts[] = { |
|
129 |
|
/* |
|
130 |
|
* XXX: not 1.1 promoted, should not use it, but it is fixing |
|
131 |
|
* some non-consistency from 1.0 |
|
132 |
|
*/ |
|
133 |
|
"VK_KHR_get_surface_capabilities2", |
|
134 |
|
/* 1.1 promoted */ |
|
135 |
|
"VK_KHR_get_physical_device_properties2", |
|
136 |
|
"VK_KHR_xcb_surface", |
|
137 |
|
"VK_KHR_surface"}; |
|
138 |
|
u32 i; |
|
139 |
|
|
|
140 |
|
i = 0; |
|
141 |
|
loop { |
|
142 |
|
if (i == ARRAY_N(exts)) |
|
143 |
|
break; |
|
144 |
|
LOG("0:MAIN:will use vulkan instance_g extension %s\n", exts[i]); |
|
145 |
|
++i; |
|
146 |
|
} |
|
147 |
|
|
|
148 |
|
memset(&info, 0, sizeof(info)); |
|
149 |
|
|
|
150 |
|
info.type = vk_struct_type_instance_create_info; |
|
151 |
|
info.enabled_exts_n = ARRAY_N(exts); |
|
152 |
|
info.enabled_ext_names = exts; |
|
153 |
|
r = vk_create_instance(&info, 0, &instance_g); |
|
154 |
|
if (r < 0) { |
|
155 |
|
LOG("0:MAIN:FATAL:%d:unable to create a vulkan instance_g\n", r); |
|
156 |
|
exit(1); |
|
157 |
|
} |
|
158 |
|
LOG("0:MAIN:vulkan instance_g handle %p\n", instance_g); |
|
159 |
|
} |
|
160 |
|
|
|
161 |
|
/* in theory, this could change on the fly */ |
|
162 |
|
static void instance_exts_dump(void) |
|
163 |
|
{ |
|
164 |
|
#define EXTS_N_MAX 512 |
|
165 |
|
struct vk_ext_props_t exts[EXTS_N_MAX]; |
|
166 |
|
u32 n; |
|
167 |
|
s32 r; |
|
168 |
|
|
|
169 |
|
memset(exts, 0, sizeof(exts)); |
|
170 |
|
|
|
171 |
|
n = EXTS_N_MAX; |
|
172 |
|
|
|
173 |
|
r = vk_enumerate_instance_ext_props(0, &n, exts); |
|
174 |
|
|
|
175 |
|
if (r != vk_success && r != vk_incomplete) { |
|
176 |
|
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g extension(s)\n", r); |
|
177 |
|
return; |
|
178 |
|
} |
|
179 |
|
|
|
180 |
|
if (r == vk_incomplete) { |
|
181 |
|
LOG("0:MAIN:ERROR:too many extensions (%u/%u), dumping disabled", n, EXTS_N_MAX); |
|
182 |
|
return; |
|
183 |
|
} |
|
184 |
|
|
|
185 |
|
/* vk_success */ |
|
186 |
|
|
|
187 |
|
LOG("0:MAIN:have %u instance_g extension(s)\n", n); |
|
188 |
|
|
|
189 |
|
loop { |
|
190 |
|
if (n == 0) |
|
191 |
|
break; |
|
192 |
|
LOG("0:MAIN:instance_g extension:name=%s:specification version=%u\n", exts[n - 1].name, exts[n - 1].spec_version); |
|
193 |
|
n--; |
|
194 |
|
} |
|
195 |
|
#undef EXTS_N_MAX |
|
196 |
|
} |
|
197 |
|
|
|
198 |
|
/* in theory, this could change on the fly */ |
|
199 |
|
static void instance_layers_dump(void) |
|
200 |
|
{ |
|
201 |
|
#define LAYERS_N_MAX 32 |
|
202 |
|
struct vk_layer_props_t layers[LAYERS_N_MAX]; |
|
203 |
|
u32 n; |
|
204 |
|
s32 r; |
|
205 |
|
|
|
206 |
|
memset(layers, 0, sizeof(layers)); |
|
207 |
|
|
|
208 |
|
n = LAYERS_N_MAX; |
|
209 |
|
|
|
210 |
|
r = vk_enumerate_instance_layer_props(&n, layers); |
|
211 |
|
|
|
212 |
|
if (r != vk_success && r != vk_incomplete) { |
|
213 |
|
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g layer(s)\n", r); |
|
214 |
|
return; |
|
215 |
|
} |
|
216 |
|
|
|
217 |
|
if (r == vk_incomplete) { |
|
218 |
|
LOG("0:MAIN:ERROR:too many layers (%u/%u), dumping disabled", n, LAYERS_N_MAX); |
|
219 |
|
return; |
|
220 |
|
} |
|
221 |
|
|
|
222 |
|
/* vk_success */ |
|
223 |
|
|
|
224 |
|
LOG("0:MAIN:have %u instance_g layer(s)\n", n); |
|
225 |
|
|
|
226 |
|
loop { |
|
227 |
|
if (n == 0) |
|
228 |
|
break; |
|
229 |
|
LOG("0:MAIN:instance_g layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n, layers[n].name, layers[n].spec_version, layers[n].implementation_version, layers[n].desc); |
|
230 |
|
n--; |
|
231 |
|
} |
|
232 |
|
#undef LAYERS_N_MAX |
|
233 |
|
} |
|
234 |
|
|
|
235 |
|
static void tmp_phydevs_get(void) |
|
236 |
|
{ |
|
237 |
|
void *phydevs[tmp_phydevs_n_max]; |
|
238 |
|
u32 n; |
|
239 |
|
s32 r; |
|
240 |
|
|
|
241 |
|
memset(phydevs, 0, sizeof(phydevs)); |
|
242 |
|
|
|
243 |
|
n = tmp_phydevs_n_max; |
|
244 |
|
|
|
245 |
|
r = vk_enumerate_phydevs(instance_g, &n, phydevs); |
|
246 |
|
|
|
247 |
|
if (r != vk_success && r != vk_incomplete) { |
|
248 |
|
LOG("0:MAIN:FATAL:%ld:unable to enumerate physical devices\n",r); |
|
249 |
|
exit(1); |
|
250 |
|
} |
|
251 |
|
|
|
252 |
|
if (r == vk_incomplete) { |
|
253 |
|
LOG("0:MAIN:FATAL:too many vulkan physical devices %u/%u for our temporary storage\n", n, tmp_phydevs_n_max); |
|
254 |
|
exit(1); |
|
255 |
|
} |
|
256 |
|
|
|
257 |
|
/* vk_success */ |
|
258 |
|
|
|
259 |
|
LOG("0:MAIN:detected %u physical devices\n", n); |
|
260 |
|
if (n == 0) { |
|
261 |
|
LOG("0:MAIN:no vulkan physical devices, exiting\n"); |
|
262 |
|
exit(1); |
|
263 |
|
} |
|
264 |
|
|
|
265 |
|
tmp_phydevs_n_g = n; |
|
266 |
|
memset(tmp_phydevs_g, 0, sizeof(tmp_phydevs_g)); |
|
267 |
|
|
|
268 |
|
n = 0; |
|
269 |
|
loop { |
|
270 |
|
if (n == tmp_phydevs_n_g) |
|
271 |
|
break; |
|
272 |
|
tmp_phydevs_g[n].vk = phydevs[n]; |
|
273 |
|
++n; |
|
274 |
|
}; |
|
275 |
|
} |
|
276 |
|
|
|
277 |
|
static void phydev_exts_dump(void *phydev) |
|
278 |
|
{ |
|
279 |
|
#define EXTS_N_MAX 512 |
|
280 |
|
struct vk_ext_props_t exts[EXTS_N_MAX]; |
|
281 |
|
u32 n; |
|
282 |
|
s32 r; |
|
283 |
|
|
|
284 |
|
memset(exts, 0, sizeof(exts)); |
|
285 |
|
|
|
286 |
|
n = EXTS_N_MAX; |
|
287 |
|
|
|
288 |
|
r = vk_enumerate_dev_ext_props(phydev, 0, &n, exts); |
|
289 |
|
|
|
290 |
|
if (r != vk_success && r != vk_incomplete) { |
|
291 |
|
LOG("0:MAIN:ERROR:physical device:%p:%d:unable to enumerate device extension(s)\n", phydev, r); |
|
292 |
|
return; |
|
293 |
|
} |
|
294 |
|
|
|
295 |
|
if (r == vk_incomplete) { |
|
296 |
|
LOG("0:MAIN:ERROR:physical device:%p:too many extensions (%u/%u), dumping disabled", phydev, n, EXTS_N_MAX); |
|
297 |
|
return; |
|
298 |
|
} |
|
299 |
|
|
|
300 |
|
/* vk_success */ |
|
301 |
|
|
|
302 |
|
LOG("0:MAIN:physical device:%p:have %u device extension(s)\n", phydev, n); |
|
303 |
|
|
|
304 |
|
loop { |
|
305 |
|
if (n == 0) |
|
306 |
|
break; |
|
307 |
|
LOG("0:MAIN:physical device:%p:device extension:name=%s:specification version=%u\n", phydev, exts[n - 1].name, exts[n - 1].spec_version); |
|
308 |
|
n--; |
|
309 |
|
} |
|
310 |
|
#undef EXTS_N_MAX |
|
311 |
|
} |
|
312 |
|
|
|
313 |
|
static void tmp_phydevs_exts_dump(void) |
|
314 |
|
{ |
|
315 |
|
u8 i; |
|
316 |
|
|
|
317 |
|
i = 0; |
|
318 |
|
loop { |
|
319 |
|
if (i == tmp_phydevs_n_g) |
|
320 |
|
break; |
|
321 |
|
phydev_exts_dump(tmp_phydevs_g[i].vk); |
|
322 |
|
++i; |
|
323 |
|
} |
|
324 |
|
} |
|
325 |
|
|
|
326 |
|
static u8 *dev_type_str(u32 type) |
|
327 |
|
{ |
|
328 |
|
switch (type) { |
|
329 |
|
case vk_phydev_type_other: |
|
330 |
|
return "other"; |
|
331 |
|
case vk_phydev_type_integrated_gpu: |
|
332 |
|
return "integrated gpu"; |
|
333 |
|
case vk_phydev_type_discrete_gpu: |
|
334 |
|
return "discrete gpu"; |
|
335 |
|
case vk_phydev_type_virtual_gpu: |
|
336 |
|
return "virtual gpu"; |
|
337 |
|
case vk_phydev_type_cpu: |
|
338 |
|
return "cpu"; |
|
339 |
|
default: |
|
340 |
|
return "UNKNOWN"; |
|
341 |
|
} |
|
342 |
|
} |
|
343 |
|
|
|
344 |
|
static u8 *uuid_str(u8 *uuid) |
|
345 |
|
{ |
|
346 |
|
static u8 uuid_str[VK_UUID_SZ * 2 + 1]; |
|
347 |
|
u8 i; |
|
348 |
|
|
|
349 |
|
memset(uuid_str, 0, sizeof(uuid_str)); |
|
350 |
|
i = 0; |
|
351 |
|
loop { |
|
352 |
|
if (i == VK_UUID_SZ) |
|
353 |
|
break; |
|
354 |
|
/* XXX: always write a terminating 0, truncated or not */ |
|
355 |
|
snprintf(uuid_str + i * 2, 3, "%02x", uuid[i]); |
|
356 |
|
|
|
357 |
|
++i; |
|
358 |
|
} |
|
359 |
|
return uuid_str; |
|
360 |
|
} |
|
361 |
|
|
|
362 |
|
static void tmp_phydevs_props_dump(void) |
|
363 |
|
{ |
|
364 |
|
u32 i; |
|
365 |
|
|
|
366 |
|
i = 0; |
|
367 |
|
loop { |
|
368 |
|
struct vk_phydev_props_t props; |
|
369 |
|
struct tmp_phydev_t *p; |
|
370 |
|
|
|
371 |
|
if (i == tmp_phydevs_n_g) |
|
372 |
|
break; |
|
373 |
|
|
|
374 |
|
p = &tmp_phydevs_g[i]; |
|
375 |
|
|
|
376 |
|
memset(&props, 0, sizeof(props)); |
|
377 |
|
props.type = vk_struct_type_phydev_props; |
|
378 |
|
|
|
379 |
|
vk_get_phydev_props(p->vk, &props); |
|
380 |
|
|
|
381 |
|
LOG("0:MAIN:physical device:%p:properties:api version=%#x=%u.%u.%u\n", p->vk, props.core.api_version, VK_VERSION_MAJOR(props.core.api_version), VK_VERSION_MINOR(props.core.api_version), VK_VERSION_PATCH(props.core.api_version)); |
|
382 |
|
LOG("0:MAIN:physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p->vk, props.core.driver_version, VK_VERSION_MAJOR(props.core.driver_version), VK_VERSION_MINOR(props.core.driver_version), VK_VERSION_PATCH(props.core.driver_version)); |
|
383 |
|
LOG("0:MAIN:physical device:%p:properties:vendor id=%#x\n", p->vk, props.core.vendor_id); |
|
384 |
|
LOG("0:MAIN:physical device:%p:properties:device id=%#x\n", p->vk, props.core.dev_id); |
|
385 |
|
LOG("0:MAIN:physical device:%p:properties:type=%s\n", p->vk, dev_type_str(props.core.dev_type)); |
|
386 |
|
if (props.core.dev_type == vk_phydev_type_discrete_gpu) |
|
387 |
|
p->is_discret_gpu = true; |
|
388 |
|
else |
|
389 |
|
p->is_discret_gpu = false; |
|
390 |
|
LOG("0:MAIN:physical device:%p:properties:name=%s\n", p->vk, props.core.name); |
|
391 |
|
LOG("0:MAIN:physical device:%p:properties:pipeline cache uuid=%s\n", p->vk, uuid_str(props.core.pipeline_cache_uuid)); |
|
392 |
|
/* display the limits and sparse props at log level 1, if needed */ |
|
393 |
|
++i; |
|
394 |
|
} |
|
395 |
|
} |
|
396 |
|
|
|
397 |
|
static void tmp_phydev_q_fams_get(struct tmp_phydev_t *p) |
|
398 |
|
{ |
|
399 |
|
u8 i; |
|
400 |
|
u32 n; |
|
401 |
|
|
|
402 |
|
n = 0; |
|
403 |
|
vk_get_phydev_q_fam_props(p->vk, &n, 0); |
|
404 |
|
if (n > tmp_phydev_q_fams_n_max) { |
|
405 |
|
LOG("0:MAIN:FATAL:physical device:%p:too many queue families %u/%u\n", p->vk, n, tmp_phydev_q_fams_n_max); |
|
406 |
|
exit(1); |
|
407 |
|
} |
|
408 |
|
|
|
409 |
|
memset(p->q_fams, 0, sizeof(p->q_fams)); |
|
410 |
|
i = 0; |
|
411 |
|
loop { |
|
412 |
|
if (i == tmp_phydev_q_fams_n_max) |
|
413 |
|
break; |
|
414 |
|
p->q_fams[i].type = vk_struct_type_q_fam_props; |
|
415 |
|
++i; |
|
416 |
|
} |
|
417 |
|
|
|
418 |
|
vk_get_phydev_q_fam_props(p->vk, &n, p->q_fams); |
|
419 |
|
p->q_fams_n = n; |
|
420 |
|
LOG("0:MAIN:physical device:%p:have %u queue families\n", p->vk, p->q_fams_n); |
|
421 |
|
} |
|
422 |
|
|
|
423 |
|
static void tmp_phydevs_q_fams_get(void) |
|
424 |
|
{ |
|
425 |
|
u8 i; |
|
426 |
|
|
|
427 |
|
i = 0; |
|
428 |
|
loop { |
|
429 |
|
if (i == tmp_phydevs_n_g) |
|
430 |
|
break; |
|
431 |
|
tmp_phydev_q_fams_get(&tmp_phydevs_g[i]); |
|
432 |
|
++i; |
|
433 |
|
} |
|
434 |
|
} |
|
435 |
|
|
|
436 |
|
static void tmp_phydev_q_fams_dump(struct tmp_phydev_t *p) |
|
437 |
|
{ |
|
438 |
|
u8 i; |
|
439 |
|
|
|
440 |
|
i = 0; |
|
441 |
|
loop { |
|
442 |
|
if (i == p->q_fams_n) |
|
443 |
|
break; |
|
444 |
|
if ((p->q_fams[i].core.flags & vk_q_gfx_bit) != 0) |
|
445 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:flags:graphics\n", p->vk, i); |
|
446 |
|
if ((p->q_fams[i].core.flags & vk_q_compute_bit) != 0) |
|
447 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:flags:compute\n", p->vk, i); |
|
448 |
|
if ((p->q_fams[i].core.flags & vk_q_transfer_bit) != 0) |
|
449 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:flags:transfer\n", p->vk, i); |
|
450 |
|
if ((p->q_fams[i].core.flags & vk_q_sparse_binding_bit) != 0) |
|
451 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:flags:sparse binding\n", p->vk, i); |
|
452 |
|
if ((p->q_fams[i].core.flags & vk_q_protected_bit) != 0) |
|
453 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:flags:protected\n", p->vk, i); |
|
454 |
|
|
|
455 |
|
|
|
456 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:%u queues\n", p->vk, i, p->q_fams[i].core.qs_n); |
|
457 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:%u bits timestamps\n", p->vk, i, p->q_fams[i].core.timestamp_valid_bits); |
|
458 |
|
|
|
459 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p->vk, i, p->q_fams[i].core.min_img_transfer_granularity.width, p->q_fams[i].core.min_img_transfer_granularity.height, p->q_fams[i].core.min_img_transfer_granularity.depth); |
|
460 |
|
++i; |
|
461 |
|
} |
|
462 |
|
} |
|
463 |
|
|
|
464 |
|
static void cmdpools_create(void) |
|
465 |
|
{ |
|
466 |
|
s32 r; |
|
467 |
|
struct vk_cmdpool_create_info_t info; |
|
468 |
|
|
|
469 |
|
memset(&info, 0, sizeof(info)); |
|
470 |
|
info.type = vk_struct_type_cmdpool_create_info; |
|
471 |
|
info.flags = vk_cmdpool_create_reset_cmdbuf_bit; |
|
472 |
|
info.q_fam = surf_g.dev.phydev.q_fam; |
|
473 |
|
|
|
474 |
|
r = vk_create_cmdpool(&info, 0, &surf_g.dev.cmdpool); |
|
475 |
|
if (r < 0) { |
|
476 |
|
LOG("0:MAIN:FATAL:%d:unable create the commmand pool\n", r); |
|
477 |
|
exit(1); |
|
478 |
|
} |
|
479 |
|
LOG("0:MAIN:device:%p:queue family:%u:created command pool %p\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam, surf_g.dev.cmdpool); |
|
480 |
|
} |
|
481 |
|
|
|
482 |
|
static void tmp_phydevs_q_fams_dump(void) |
|
483 |
|
{ |
|
484 |
|
u8 i; |
|
485 |
|
|
|
486 |
|
i = 0; |
|
487 |
|
loop { |
|
488 |
|
if (i == tmp_phydevs_n_g) |
|
489 |
|
break; |
|
490 |
|
tmp_phydev_q_fams_dump(&tmp_phydevs_g[i]); |
|
491 |
|
++i; |
|
492 |
|
} |
|
493 |
|
} |
|
494 |
|
|
|
495 |
|
static void q_get(void) |
|
496 |
|
{ |
|
497 |
|
LOG("0:MAIN:device:%p:getting queue:family=%u queue=0\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam); |
|
498 |
|
vk_get_dev_q(surf_g.dev.phydev.q_fam, 0, &surf_g.dev.q); |
|
499 |
|
LOG("0:MAIN:device:%p:got queue:%p\n", surf_g.dev.vk, surf_g.dev.q); |
|
500 |
|
} |
|
501 |
|
|
|
502 |
|
static void check_vk_version(void) |
|
503 |
|
{ |
|
504 |
|
u32 api_version; |
|
505 |
|
s32 r; |
|
506 |
|
|
|
507 |
|
r = vk_enumerate_instance_version(&api_version); |
|
508 |
|
if (r != vk_success) { |
|
509 |
|
LOG("0:MAIN:FATAL:%d:unable to enumerate instance_g version\n", r); |
|
510 |
|
exit(1); |
|
511 |
|
} |
|
512 |
|
LOG("0:MAIN:vulkan instance_g version %#x = %u.%u.%u\n", api_version, VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), VK_VERSION_PATCH(api_version)); |
|
513 |
|
if (VK_VERSION_MAJOR(api_version) == 1 |
|
514 |
|
&& VK_VERSION_MINOR(api_version) == 0) { |
|
515 |
|
LOG("0:MAIN:FATAL:vulkan instance_g version too old\n"); |
|
516 |
|
exit(1); |
|
517 |
|
} |
|
518 |
|
} |
|
519 |
|
/* |
|
520 |
|
* the major obj to use in vk abstraction of gfx hardware is the q. In this |
|
521 |
|
* abstraction, many core objs like bufs/imgs are "own" by a specific q, and |
|
522 |
|
* transfer of such ownership to other qs can be expensive. we know it's not |
|
523 |
|
* really the case on AMD hardware, but if vk abstraction insists on this, it |
|
524 |
|
* probably means it is important on some hardware of other vendors. |
|
525 |
|
*/ |
|
526 |
|
static void tmp_phydevs_q_fams_surf_support_get(void) |
|
527 |
|
{ |
|
528 |
|
u8 i; |
|
529 |
|
|
|
530 |
|
i = 0; |
|
531 |
|
loop { |
|
532 |
|
struct tmp_phydev_t *p; |
|
533 |
|
u8 j; |
|
534 |
|
|
|
535 |
|
if (i == tmp_phydevs_n_g) |
|
536 |
|
break; |
|
537 |
|
|
|
538 |
|
p = &tmp_phydevs_g[i]; |
|
539 |
|
|
|
540 |
|
j = 0; |
|
541 |
|
loop { |
|
542 |
|
s32 r; |
|
543 |
|
u32 supported; |
|
544 |
|
|
|
545 |
|
if (j == p->q_fams_n) |
|
546 |
|
break; |
|
547 |
|
|
|
548 |
|
supported = vk_false; |
|
549 |
|
r = vk_get_phydev_surf_support(p->vk, j, surf_g.vk, |
|
550 |
|
&supported); |
|
551 |
|
if (r < 0) { |
|
552 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r, p->vk, j, surf_g.vk); |
|
553 |
|
exit(1); |
|
554 |
|
} |
|
555 |
|
|
|
556 |
|
if (supported == vk_true) { |
|
557 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p->vk, j, surf_g.vk); |
|
558 |
|
p->q_fams_surf_support[j] = true; |
|
559 |
|
} else { |
|
560 |
|
LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p->vk, j, surf_g.vk); |
|
561 |
|
p->q_fams_surf_support[j] = false; |
|
562 |
|
} |
|
563 |
|
++j; |
|
564 |
|
} |
|
565 |
|
++i; |
|
566 |
|
} |
|
567 |
|
} |
|
568 |
|
|
|
569 |
|
static void tmp_selected_phydev_cherry_pick(u8 i) |
|
570 |
|
{ |
|
571 |
|
struct tmp_phydev_t *p; |
|
572 |
|
|
|
573 |
|
p = &tmp_phydevs_g[i]; |
|
574 |
|
|
|
575 |
|
surf_g.dev.phydev.vk = p->vk; |
|
576 |
|
surf_g.dev.phydev.is_discret_gpu = p->is_discret_gpu; |
|
577 |
|
surf_g.dev.phydev.mem_types_n = p->mem_props.core.mem_types_n; |
|
578 |
|
memcpy(surf_g.dev.phydev.mem_types, p->mem_props.core.mem_types, |
|
579 |
|
sizeof(surf_g.dev.phydev.mem_types)); |
|
580 |
|
} |
|
581 |
|
|
|
582 |
|
/* |
|
583 |
|
* we ask qs of phydevs which one is able to present imgs to the |
|
584 |
|
* external pe surf_g. Additionally we require this q to support gfx. we |
|
585 |
|
* select basically the first q from the first phydev fitting what we are |
|
586 |
|
* looking for. |
|
587 |
|
*/ |
|
588 |
|
static void tmp_phydev_and_q_fam_select(void) |
|
589 |
|
{ |
|
590 |
|
u8 i; |
|
591 |
|
|
|
592 |
|
i = 0; |
|
593 |
|
loop { |
|
594 |
|
u8 j; |
|
595 |
|
struct tmp_phydev_t *p; |
|
596 |
|
|
|
597 |
|
if (i == tmp_phydevs_n_g) |
|
598 |
|
break; |
|
599 |
|
|
|
600 |
|
p = &tmp_phydevs_g[i]; |
|
601 |
|
|
|
602 |
|
j = 0; |
|
603 |
|
loop { |
|
604 |
|
if (j == p->q_fams_n) |
|
605 |
|
break; |
|
606 |
|
/* |
|
607 |
|
* we are looking for a q fam with: |
|
608 |
|
* - img presentation to our surf_g |
|
609 |
|
* - gfx |
|
610 |
|
* - transfer (implicit with gfx) |
|
611 |
|
*/ |
|
612 |
|
if (p->q_fams_surf_support[j] |
|
613 |
|
&& (p->q_fams[j].core.flags & vk_q_gfx_bit) |
|
614 |
|
!= 0) { |
|
615 |
|
surf_g.dev.phydev.q_fam = j; |
|
616 |
|
tmp_selected_phydev_cherry_pick(i); |
|
617 |
|
LOG("0:MAIN:physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_g.dev.phydev.vk, surf_g.vk, surf_g.dev.phydev.q_fam); |
|
618 |
|
return; |
|
619 |
|
} |
|
620 |
|
++j; |
|
621 |
|
} |
|
622 |
|
++i; |
|
623 |
|
} |
|
624 |
|
} |
|
625 |
|
|
|
626 |
|
/* |
|
627 |
|
* XXX: the surf_g is an obj at the instance_g lvl, NOT THE [PHYSICAL] |
|
628 |
|
* DEV LVL. |
|
629 |
|
*/ |
|
630 |
|
static void surf_create(void) |
|
631 |
|
{ |
|
632 |
|
struct vk_xcb_surf_create_info_t xcb_info; |
|
633 |
|
s32 r; |
|
634 |
|
|
|
635 |
|
memset(&surf_g, 0, sizeof(surf_g)); |
|
636 |
|
|
|
637 |
|
memset(&xcb_info, 0, sizeof(xcb_info)); |
|
638 |
|
xcb_info.type = vk_struct_type_xcb_surf_create_info; |
|
639 |
|
xcb_info.c = app_xcb.c; |
|
640 |
|
xcb_info.win = app_xcb.win_id; |
|
641 |
|
|
|
642 |
|
r = vk_create_xcb_surf(instance_g, &xcb_info, 0, &surf_g.vk); |
|
643 |
|
if (r < 0) {/* ok because this enum is forded to a signed 32 bits */ |
|
644 |
|
LOG("0:MAIN:FATAL:%d:xcb:%s:screen:%d:root window id:%#x:window id:%#x:unable to create a vulkan surface from this x11 window\n", r, app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id); |
|
645 |
|
exit(1); |
|
646 |
|
} |
|
647 |
|
LOG("0:MAIN:xcb:'%s':screen:%d:root window id:%#x:window id:%#x:created vk_surface=%p\n", app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id, surf_g.vk); |
|
648 |
|
} |
|
649 |
|
|
|
650 |
|
static void texel_mem_blk_confs_dump(u32 confs_n, |
|
651 |
|
struct vk_surf_texel_mem_blk_conf_t *confs) |
|
652 |
|
{ |
|
653 |
|
u32 i; |
|
654 |
|
|
|
655 |
|
i = 0; |
|
656 |
|
loop { |
|
657 |
|
if (i == confs_n) |
|
658 |
|
break; |
|
659 |
|
LOG("0:MAIN:physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_g.dev.phydev.vk, surf_g.vk, confs[i].core.fmt, confs[i].core.color_space); |
|
660 |
|
++i; |
|
661 |
|
} |
|
662 |
|
} |
|
663 |
|
|
|
664 |
|
/* |
|
665 |
|
* we only know this phydev/q is "able to present imgs" to the external |
|
666 |
|
* pe surf_g. Here we choose the conf of textel blk |
|
667 |
|
*/ |
|
668 |
|
#define CONFS_N_MAX 1024 |
|
669 |
|
static void texel_mem_blk_conf_select(void) |
|
670 |
|
{ |
|
671 |
|
struct vk_phydev_surf_info_t info; |
|
672 |
|
struct vk_surf_texel_mem_blk_conf_t confs[CONFS_N_MAX]; |
|
673 |
|
struct vk_surf_texel_mem_blk_conf_core_t *cc; |
|
674 |
|
s32 r; |
|
675 |
|
u32 confs_n; |
|
676 |
|
u32 i; |
|
677 |
|
|
|
678 |
|
memset(&info, 0, sizeof(info)); |
|
679 |
|
info.type = vk_struct_type_phydev_surf_info; |
|
680 |
|
info.surf = surf_g.vk; |
|
681 |
|
|
|
682 |
|
r = vk_get_phydev_surf_texel_mem_blk_confs(surf_g.dev.phydev.vk, &info, |
|
683 |
|
&confs_n, 0); |
|
684 |
|
if (r < 0) { |
|
685 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
686 |
|
exit(1); |
|
687 |
|
} |
|
688 |
|
|
|
689 |
|
if (confs_n > CONFS_N_MAX) { |
|
690 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_g.dev.phydev.vk, surf_g.vk, confs_n, CONFS_N_MAX); |
|
691 |
|
exit(1); |
|
692 |
|
} |
|
693 |
|
|
|
694 |
|
memset(confs, 0, sizeof(confs[0]) * confs_n); |
|
695 |
|
i = 0; |
|
696 |
|
loop { |
|
697 |
|
if (i == confs_n) |
|
698 |
|
break; |
|
699 |
|
confs[i].type = vk_struct_type_surf_texel_mem_blk_conf; |
|
700 |
|
++i; |
|
701 |
|
} |
|
702 |
|
|
|
703 |
|
r = vk_get_phydev_surf_texel_mem_blk_confs(surf_g.dev.phydev.vk, &info, |
|
704 |
|
&confs_n, confs); |
|
705 |
|
if (r < 0) { |
|
706 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
707 |
|
exit(1); |
|
708 |
|
} |
|
709 |
|
|
|
710 |
|
if (confs_n == 0) { |
|
711 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_g.dev.phydev.vk, surf_g.vk); |
|
712 |
|
exit(1); |
|
713 |
|
} |
|
714 |
|
|
|
715 |
|
texel_mem_blk_confs_dump(confs_n, confs); |
|
716 |
|
|
|
717 |
|
cc = &surf_g.dev.phydev.selected_texel_mem_blk_conf_core; |
|
718 |
|
|
|
719 |
|
if ((confs_n == 1) && (confs[0].core.fmt |
|
720 |
|
== vk_texel_mem_blk_fmt_undefined)) { |
|
721 |
|
/* this means the dev let us choose our the fmt */ |
|
722 |
|
cc->fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb; |
|
723 |
|
LOG("0:MAIN:physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->fmt); |
|
724 |
|
|
|
725 |
|
cc->color_space = vk_color_space_srgb_nonlinear; |
|
726 |
|
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
|
727 |
|
} else { |
|
728 |
|
/* the first valid fmt is the prefered fmt */ |
|
729 |
|
surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt = |
|
730 |
|
confs[0].core.fmt; |
|
731 |
|
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt); |
|
732 |
|
|
|
733 |
|
cc->color_space = confs[0].core.color_space; |
|
734 |
|
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
|
735 |
|
} |
|
736 |
|
} |
|
737 |
|
|
|
738 |
|
static void tmp_phydev_mem_props_get(struct tmp_phydev_t *p) |
|
739 |
|
{ |
|
740 |
|
memset(&p->mem_props, 0, sizeof(p->mem_props)); |
|
741 |
|
p->mem_props.type = vk_struct_type_phydev_mem_props; |
|
742 |
|
|
|
743 |
|
vk_get_phydev_mem_props(p->vk, &p->mem_props); |
|
744 |
|
} |
|
745 |
|
|
|
746 |
|
static void tmp_phydevs_mem_props_get(void) |
|
747 |
|
{ |
|
748 |
|
u8 i; |
|
749 |
|
|
|
750 |
|
i = 0; |
|
751 |
|
loop { |
|
752 |
|
if (i == tmp_phydevs_n_g) |
|
753 |
|
break; |
|
754 |
|
tmp_phydev_mem_props_get(&tmp_phydevs_g[i]); |
|
755 |
|
++i; |
|
756 |
|
} |
|
757 |
|
} |
|
758 |
|
|
|
759 |
|
static void phydev_mem_heap_dump(void *phydev, u8 i, |
|
760 |
|
struct vk_mem_heap_t *heap) |
|
761 |
|
{ |
|
762 |
|
LOG("0:MAIN:physical device:%p:memory heap:%u:size:%u bytes\n", phydev, i, heap->sz); |
|
763 |
|
LOG("0:MAIN:physical device:%p:memory heap:%u:flags:%#08x\n", phydev, i, heap->flags); |
|
764 |
|
if ((heap->flags & vk_mem_heap_dev_local_bit) != 0) |
|
765 |
|
LOG("0:MAIN:physical device:%p:memory heap:%u:device local\n", phydev, i); |
|
766 |
|
if ((heap->flags & vk_mem_heap_multi_instance_bit) != 0) |
|
767 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:multi instance_g\n", phydev, i); |
|
768 |
|
} |
|
769 |
|
|
|
770 |
|
static void phydev_mem_type_dump(void *phydev, u8 i, |
|
771 |
|
struct vk_mem_type_t *type) |
|
772 |
|
{ |
|
773 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:heap:%u\n", phydev, i, type->heap); |
|
774 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:flags:%#08x\n", phydev, i, type->prop_flags); |
|
775 |
|
if ((type->prop_flags & vk_mem_prop_dev_local_bit) != 0) |
|
776 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:device local\n", phydev, i); |
|
777 |
|
if ((type->prop_flags & vk_mem_prop_host_visible_bit) != 0) |
|
778 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:host visible\n", phydev, i); |
|
779 |
|
if ((type->prop_flags & vk_mem_prop_host_cached_bit) != 0) |
|
780 |
|
LOG("0:MAIN:physical device:%p:memory type:%u:host cached\n", phydev, i); |
|
781 |
|
} |
|
782 |
|
|
|
783 |
|
static void tmp_phydev_mem_types_dump(struct tmp_phydev_t *p) |
|
784 |
|
{ |
|
785 |
|
u8 i; |
|
786 |
|
|
|
787 |
|
LOG("0:MAIN:physical device:%p:%u memory types\n", p->vk, p->mem_props.core.mem_types_n); |
|
788 |
|
|
|
789 |
|
i = 0; |
|
790 |
|
loop { |
|
791 |
|
if (i == p->mem_props.core.mem_types_n) |
|
792 |
|
break; |
|
793 |
|
phydev_mem_type_dump(p->vk, i, |
|
794 |
|
&p->mem_props.core.mem_types[i]); |
|
795 |
|
++i; |
|
796 |
|
} |
|
797 |
|
} |
|
798 |
|
|
|
799 |
|
static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t *p) |
|
800 |
|
{ |
|
801 |
|
u8 i; |
|
802 |
|
|
|
803 |
|
LOG("0:MAIN:physical device:%p:%u memory heaps\n", p->vk, p->mem_props.core.mem_heaps_n); |
|
804 |
|
|
|
805 |
|
i = 0; |
|
806 |
|
loop { |
|
807 |
|
if (i == p->mem_props.core.mem_heaps_n) |
|
808 |
|
break; |
|
809 |
|
phydev_mem_heap_dump(p->vk, i, |
|
810 |
|
&p->mem_props.core.mem_heaps[i]); |
|
811 |
|
++i; |
|
812 |
|
} |
|
813 |
|
|
|
814 |
|
} |
|
815 |
|
|
|
816 |
|
static void tmp_phydev_mem_props_dump(struct tmp_phydev_t *p) |
|
817 |
|
{ |
|
818 |
|
tmp_phydev_mem_types_dump(p); |
|
819 |
|
tmp_phydev_mem_heaps_dump(p); |
|
820 |
|
} |
|
821 |
|
|
|
822 |
|
static void tmp_phydevs_mem_props_dump(void) |
|
823 |
|
{ |
|
824 |
|
u8 i; |
|
825 |
|
|
|
826 |
|
i = 0; |
|
827 |
|
loop { |
|
828 |
|
if (i == tmp_phydevs_n_g) |
|
829 |
|
break; |
|
830 |
|
tmp_phydev_mem_props_dump(&tmp_phydevs_g[i]); |
|
831 |
|
++i; |
|
832 |
|
} |
|
833 |
|
} |
|
834 |
|
|
|
835 |
|
static void tmp_surf_caps_get(void) |
|
836 |
|
{ |
|
837 |
|
s32 r; |
|
838 |
|
struct vk_phydev_surf_info_t info; |
|
839 |
|
|
|
840 |
|
memset(&info, 0, sizeof(info)); |
|
841 |
|
info.type = vk_struct_type_phydev_surf_info; |
|
842 |
|
info.surf = surf_g.vk; |
|
843 |
|
|
|
844 |
|
r = vk_get_phydev_surf_caps(surf_g.dev.phydev.vk, &info, &tmp_surf_caps_g); |
|
845 |
|
if (r < 0) { |
|
846 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
847 |
|
exit(1); |
|
848 |
|
} |
|
849 |
|
|
|
850 |
|
/* we have room for a maximum of 3 images per swapchain */ |
|
851 |
|
if (tmp_surf_caps_g.core.imgs_n_min > swpchn_imgs_n_max) { |
|
852 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_g.dev.phydev.vk, surf_g.vk, swpchn_imgs_n_max, tmp_surf_caps_g.core.imgs_n_min); |
|
853 |
|
exit(1); |
|
854 |
|
} |
|
855 |
|
} |
|
856 |
|
|
|
857 |
|
static void tmp_surf_caps_dump(void) |
|
858 |
|
{ |
|
859 |
|
LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_min=%u\n", surf_g.dev.phydev.vk, surf_g.vk, app_tmp_surf_caps.core.imgs_n_min); |
|
860 |
|
LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_max=%u\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.imgs_n_max); |
|
861 |
|
LOG("0:MAIN:physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.current_extent.width, tmp_surf_caps_g.core.current_extent.height); |
|
862 |
|
LOG("0:MAIN:physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.img_extent_min.width, tmp_surf_caps_g.core.img_extent_min.height); |
|
863 |
|
LOG("0:MAIN:physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.img_extent_max.width, tmp_surf_caps_g.core.img_extent_max.height); |
|
864 |
|
LOG("0:MAIN:physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.img_array_layers_n_max); |
|
865 |
|
LOG("0:MAIN:physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.supported_transforms); |
|
866 |
|
LOG("0:MAIN:physical device:%p:surface:%p:current_transform=%#08x\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.current_transform); |
|
867 |
|
LOG("0:MAIN:physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.supported_composite_alpha); |
|
868 |
|
LOG("0:MAIN:physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_surf_caps_g.core.supported_img_usage_flags); |
|
869 |
|
} |
|
870 |
|
|
|
871 |
|
static void swpchn_imgs_get(void) |
|
872 |
|
{ |
|
873 |
|
s32 r; |
|
874 |
|
|
|
875 |
|
surf_g.dev.swpchn.imgs_n = swpchn_imgs_n_max; |
|
876 |
|
r = vk_get_swpchn_imgs(surf_g.dev.swpchn.vk, &surf_g.dev.swpchn.imgs_n, |
|
877 |
|
surf_g.dev.swpchn.imgs); |
|
878 |
|
if (r < 0) { |
|
879 |
|
LOG("0:MAIN:FATAL:%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r, surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk); |
|
880 |
|
exit(1); |
|
881 |
|
} |
|
882 |
|
LOG("0:MAIN:device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk, surf_g.dev.swpchn.imgs_n); |
|
883 |
|
} |
|
884 |
|
|
|
885 |
|
static void swpchn_init(void) |
|
886 |
|
{ |
|
887 |
|
struct vk_swpchn_create_info_t info; |
|
888 |
|
struct phydev_t *p; |
|
889 |
|
s32 r; |
|
890 |
|
|
|
891 |
|
memset(&info, 0, sizeof(info)); |
|
892 |
|
|
|
893 |
|
p = &surf_g.dev.phydev; |
|
894 |
|
|
|
895 |
|
info.type = vk_struct_type_swpchn_create_info; |
|
896 |
|
info.surf = surf_g.vk; |
|
897 |
|
info.imgs_n_min = tmp_surf_caps_g.core.imgs_n_min; |
|
898 |
|
info.img_texel_mem_blk_fmt = p->selected_texel_mem_blk_conf_core.fmt; |
|
899 |
|
info.img_color_space = p->selected_texel_mem_blk_conf_core.color_space; |
|
900 |
|
memcpy(&info.img_extent, &tmp_surf_caps_g.core.current_extent, |
|
901 |
|
sizeof(info.img_extent)); |
|
902 |
|
info.img_layers_n = 1; |
|
903 |
|
info.img_usage = vk_img_usage_color_attachment_bit |
|
904 |
|
| vk_img_usage_transfer_dst_bit; |
|
905 |
|
info.img_sharing_mode = vk_sharing_mode_exclusive; |
|
906 |
|
info.pre_transform = vk_surf_transform_identity_bit; |
|
907 |
|
info.composite_alpha = vk_composite_alpha_opaque_bit; |
|
908 |
|
info.present_mode = vk_present_mode_fifo; |
|
909 |
|
info.clipped = vk_true; |
|
910 |
|
|
|
911 |
|
r = vk_create_swpchn(&info, 0, &surf_g.dev.swpchn.vk); |
|
912 |
|
if (r < 0) { |
|
913 |
|
LOG("0:MAIN:FATAL:%d:device:%p:surface:%p:unable to create the initial swapchain\n", r, surf_g.dev.vk, surf_g.vk); |
|
914 |
|
exit(1); |
|
915 |
|
} |
|
916 |
|
LOG("0:MAIN:device:%p:surface:%p:swapchain created %p\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk); |
|
917 |
|
} |
|
918 |
|
|
|
919 |
|
static void tmp_present_modes_get(void) |
|
920 |
|
{ |
|
921 |
|
s32 r; |
|
922 |
|
|
|
923 |
|
tmp_present_modes_n_g = tmp_present_modes_n_max; |
|
924 |
|
|
|
925 |
|
r = vk_get_phydev_surf_present_modes(surf_g.dev.phydev.vk, surf_g.vk, |
|
926 |
|
&tmp_present_modes_n_g, tmp_present_modes_g); |
|
927 |
|
if (r < 0) { |
|
928 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
929 |
|
exit(1); |
|
930 |
|
} |
|
931 |
|
} |
|
932 |
|
|
|
933 |
|
static u8 *present_mode_to_str(u32 mode) |
|
934 |
|
{ |
|
935 |
|
switch (mode) { |
|
936 |
|
case vk_present_mode_immediate: |
|
937 |
|
return "immediate"; |
|
938 |
|
case vk_present_mode_mailbox: |
|
939 |
|
return "mailbox"; |
|
940 |
|
case vk_present_mode_fifo: |
|
941 |
|
return "fifo"; |
|
942 |
|
case vk_present_mode_fifo_relaxed: |
|
943 |
|
return "fifo relaxed"; |
|
944 |
|
default: |
|
945 |
|
return "unknown"; |
|
946 |
|
} |
|
947 |
|
} |
|
948 |
|
|
|
949 |
|
static void tmp_present_modes_dump(void) |
|
950 |
|
{ |
|
951 |
|
u8 i; |
|
952 |
|
|
|
953 |
|
i = 0; |
|
954 |
|
LOG("0:MAIN:physical device:%p:surface:%p:%u present modes\n", surf_g.dev.phydev.vk, surf_g.vk, tmp_present_modes_n_g); |
|
955 |
|
loop { |
|
956 |
|
if (i == (u8)tmp_present_modes_n_g) |
|
957 |
|
break; |
|
958 |
|
LOG("0:MAIN:physical device:%p:surface:%p:present mode=%s\n", surf_g.dev.phydev.vk, surf_g.vk, present_mode_to_str(tmp_present_modes_g[i])); |
|
959 |
|
++i; |
|
960 |
|
} |
|
961 |
|
} |
|
962 |
|
|
|
963 |
|
static void cpu_img_create(u8 i) |
|
964 |
|
{ |
|
965 |
|
struct vk_img_create_info_t info; |
|
966 |
|
s32 r; |
|
967 |
|
|
|
968 |
|
memset(&info, 0, sizeof(info)); |
|
969 |
|
|
|
970 |
|
info.type = vk_struct_type_img_create_info; |
|
971 |
|
info.flags = vk_img_create_flag_2d_array_compatible_bit; |
|
972 |
|
info.img_type = vk_img_type_2d; |
|
973 |
|
info.texel_mem_blk_fmt = vk_texel_mem_blk_fmt_b8g8r8a8_unorm; |
|
974 |
|
info.extent.width = APP_CPU_IMG_WIDTH; |
|
975 |
|
info.extent.height = APP_CPU_IMG_HEIGHT; |
|
976 |
|
info.extent.depth = 1; |
|
977 |
|
info.mip_lvls_n = 1; |
|
978 |
|
info.samples_n = vk_samples_n_1_bit; |
|
979 |
|
info.array_layers_n = 1; |
|
980 |
|
info.img_tiling = vk_img_tiling_linear; |
|
981 |
|
info.usage = vk_img_usage_transfer_src_bit; |
|
982 |
|
info.initial_layout = vk_img_layout_undefined; |
|
983 |
|
|
|
984 |
|
r = vk_create_img(&info, 0, &surf_g.dev.cpu_imgs[i].vk); |
|
985 |
|
if (r < 0) { |
|
986 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to create swapchain cpu image %u\n", r, surf_g.dev.vk, i); |
|
987 |
|
exit(1); |
|
988 |
|
} |
|
989 |
|
LOG("0:MAIN:device:%p:swapchain cpu image %u created %p\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].vk); |
|
990 |
|
} |
|
991 |
|
|
|
992 |
|
static void cpu_imgs_create(void) |
|
993 |
|
{ |
|
994 |
|
u8 i; |
|
995 |
|
|
|
996 |
|
i = 0; |
|
997 |
|
loop { |
|
998 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
999 |
|
break; |
|
1000 |
|
cpu_img_create(i); |
|
1001 |
|
++i; |
|
1002 |
|
} |
|
1003 |
|
} |
|
1004 |
|
|
|
1005 |
|
static void img_mem_barrier_run_once(u8 i, struct vk_img_mem_barrier_t *b) |
|
1006 |
|
{ |
|
1007 |
|
s32 r; |
|
1008 |
|
struct vk_cmdbuf_begin_info_t begin_info; |
|
1009 |
|
struct vk_submit_info_t submit_info; |
|
1010 |
|
|
|
1011 |
|
memset(&begin_info, 0, sizeof(begin_info)); |
|
1012 |
|
begin_info.type = vk_struct_type_cmdbuf_begin_info; |
|
1013 |
|
begin_info.flags = vk_cmdbuf_usage_one_time_submit_bit; |
|
1014 |
|
|
|
1015 |
|
r = vk_begin_cmdbuf(surf_g.dev.cmdbufs[i], &begin_info); |
|
1016 |
|
if (r < 0) { |
|
1017 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to begin recording the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1018 |
|
exit(1); |
|
1019 |
|
} |
|
1020 |
|
/*--------------------------------------------------------------------*/ |
|
1021 |
|
vk_cmd_pipeline_barrier(app_surf.dev.cmdbufs[i], |
|
1022 |
|
vk_pipeline_stage_top_of_pipe_bit, |
|
1023 |
|
vk_pipeline_stage_top_of_pipe_bit, 0, 0, 0, 0, 0, 1, b); |
|
1024 |
|
/*--------------------------------------------------------------------*/ |
|
1025 |
|
r = vk_end_cmdbuf(surf_g.dev.cmdbufs[i]); |
|
1026 |
|
if (r < 0) { |
|
1027 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to end recording of the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1028 |
|
exit(1); |
|
1029 |
|
} |
|
1030 |
|
/*--------------------------------------------------------------------*/ |
|
1031 |
|
memset(&submit_info, 0, sizeof(submit_info)); |
|
1032 |
|
submit_info.type = vk_struct_type_submit_info; |
|
1033 |
|
submit_info.cmdbufs_n = 1; |
|
1034 |
|
submit_info.cmdbufs = &surf_g.dev.cmdbufs[i]; |
|
1035 |
|
|
|
1036 |
|
r = vk_q_submit(surf_g.dev.q, 1, &submit_info, 0); |
|
1037 |
|
if (r < 0) { |
|
1038 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the initial layout transition command buffer\n", r, surf_g.dev.q); |
|
1039 |
|
exit(1); |
|
1040 |
|
} |
|
1041 |
|
/*--------------------------------------------------------------------*/ |
|
1042 |
|
r = vk_q_wait_idle(surf_g.dev.q); |
|
1043 |
|
if (r < 0) { |
|
1044 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r, surf_g.dev.q); |
|
1045 |
|
exit(1); |
|
1046 |
|
} |
|
1047 |
|
/*--------------------------------------------------------------------*/ |
|
1048 |
|
/* |
|
1049 |
|
* since it is tagged to run once its state_g is invalid, we need to |
|
1050 |
|
* reset it to the initial state_g |
|
1051 |
|
*/ |
|
1052 |
|
r = vk_reset_cmdbuf(surf_g.dev.cmdbufs[i], 0); |
|
1053 |
|
if (r < 0) { |
|
1054 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1055 |
|
exit(1); |
|
1056 |
|
} |
|
1057 |
|
} |
|
1058 |
|
|
|
1059 |
|
static void cpu_img_layout_to_general(u8 i) |
|
1060 |
|
{ |
|
1061 |
|
struct vk_img_mem_barrier_t b; |
|
1062 |
|
struct vk_img_subrsrc_range_t *r; |
|
1063 |
|
|
|
1064 |
|
memset(&b, 0, sizeof(b)); |
|
1065 |
|
b.type = vk_struct_type_img_mem_barrier; |
|
1066 |
|
b.old_layout = vk_img_layout_undefined; |
|
1067 |
|
b.new_layout = vk_img_layout_general; |
|
1068 |
|
b.src_q_fam = vk_q_fam_ignored; |
|
1069 |
|
b.dst_q_fam = vk_q_fam_ignored; |
|
1070 |
|
b.img = surf_g.dev.cpu_imgs[i].vk; |
|
1071 |
|
|
|
1072 |
|
r = &b.subrsrc_range; |
|
1073 |
|
r->aspect = vk_img_aspect_color_bit; |
|
1074 |
|
r->lvls_n = 1; |
|
1075 |
|
r->array_layers_n = 1; |
|
1076 |
|
|
|
1077 |
|
img_mem_barrier_run_once(i, &b); |
|
1078 |
|
LOG("0:MAIN:cpu image:%p[%u]:transition to general layout successful\n", surf_g.dev.cpu_imgs[i].vk, i); |
|
1079 |
|
} |
|
1080 |
|
|
|
1081 |
|
/* once in general layout, the dev sees the img */ |
|
1082 |
|
static void cpu_imgs_layout_to_general(void) |
|
1083 |
|
{ |
|
1084 |
|
u8 i; |
|
1085 |
|
|
|
1086 |
|
i = 0; |
|
1087 |
|
loop { |
|
1088 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1089 |
|
break; |
|
1090 |
|
cpu_img_layout_to_general(i); |
|
1091 |
|
++i; |
|
1092 |
|
} |
|
1093 |
|
} |
|
1094 |
|
|
|
1095 |
|
static void tmp_cpu_img_mem_rqmts_get(u8 i) |
|
1096 |
|
{ |
|
1097 |
|
struct vk_img_mem_rqmts_info_t info; |
|
1098 |
|
struct vk_mem_rqmts_t *rqmts; |
|
1099 |
|
s32 r; |
|
1100 |
|
|
|
1101 |
|
memset(&info, 0, sizeof(info)); |
|
1102 |
|
info.type = vk_struct_type_img_mem_rqmts_info; |
|
1103 |
|
info.img = surf_g.dev.cpu_imgs[i].vk; |
|
1104 |
|
|
|
1105 |
|
rqmts = &tmp_mem_rqmts_g[i]; |
|
1106 |
|
memset(rqmts, 0, sizeof(*rqmts)); |
|
1107 |
|
rqmts->type = vk_struct_type_mem_rqmts; |
|
1108 |
|
|
|
1109 |
|
r = vk_get_img_mem_rqmts(&info, rqmts); |
|
1110 |
|
if (r < 0) { |
|
1111 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to get memory requirements for cpu image %u\n", r, surf_g.dev.vk, i); |
|
1112 |
|
exit(1); |
|
1113 |
|
} |
|
1114 |
|
LOG("0:MAIN:device:%p:cpu image %u core requirements are size=%lu bytes, alignment=%lu bytes, memory type=%#08x\n", surf_g.dev.vk, i, (long)rqmts->core.sz, (long)rqmts->core.alignment, rqmts->core.mem_type_bits); |
|
1115 |
|
} |
|
1116 |
|
|
|
1117 |
|
static void tmp_cpu_imgs_mem_rqmts_get(void) |
|
1118 |
|
{ |
|
1119 |
|
u8 i; |
|
1120 |
|
|
|
1121 |
|
i = 0; |
|
1122 |
|
loop { |
|
1123 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1124 |
|
break; |
|
1125 |
|
tmp_cpu_img_mem_rqmts_get(i); |
|
1126 |
|
++i; |
|
1127 |
|
} |
|
1128 |
|
} |
|
1129 |
|
|
|
1130 |
|
#define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \ |
|
1131 |
|
| vk_mem_prop_host_cached_bit) |
|
1132 |
|
#define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0) |
|
1133 |
|
static bool match_mem_type(u8 mem_type_idx, |
|
1134 |
|
struct vk_mem_rqmts_t *img_rqmts, bool ignore_gpu_is_discret) |
|
1135 |
|
{ |
|
1136 |
|
struct vk_mem_type_t *mem_type; |
|
1137 |
|
|
|
1138 |
|
/* first check this mem type is in our img rqmts */ |
|
1139 |
|
if (((1 << mem_type_idx) & img_rqmts->core.mem_type_bits) == 0) |
|
1140 |
|
return false; |
|
1141 |
|
|
|
1142 |
|
mem_type = &surf_g.dev.phydev.mem_types[mem_type_idx]; |
|
1143 |
|
|
|
1144 |
|
if (!ignore_gpu_is_discret) |
|
1145 |
|
if (surf_g.dev.phydev.is_discret_gpu && IS_DEV_LOCAL(mem_type)) |
|
1146 |
|
return false; |
|
1147 |
|
|
|
1148 |
|
if ((mem_type->prop_flags & WANTED_MEM_PROPS) == WANTED_MEM_PROPS) |
|
1149 |
|
return true; |
|
1150 |
|
return false; |
|
1151 |
|
} |
|
1152 |
|
#undef WANTED_MEM_PROPS |
|
1153 |
|
#undef IS_DEV_LOCAL |
|
1154 |
|
|
|
1155 |
|
static bool try_alloc_cpu_img_dev_mem(u8 i, |
|
1156 |
|
struct vk_mem_rqmts_t *img_rqmts, u8 mem_type_idx) |
|
1157 |
|
{ |
|
1158 |
|
struct vk_mem_alloc_info_t info; |
|
1159 |
|
s32 r; |
|
1160 |
|
|
|
1161 |
|
memset(&info, 0, sizeof(info)); |
|
1162 |
|
info.type = vk_struct_type_mem_alloc_info; |
|
1163 |
|
info.sz = img_rqmts->core.sz; |
|
1164 |
|
info.mem_type_idx = mem_type_idx; |
|
1165 |
|
|
|
1166 |
|
r = vk_alloc_mem(&info, 0, &surf_g.dev.cpu_imgs[i].dev_mem); |
|
1167 |
|
if (r < 0) { |
|
1168 |
|
LOG("0:MAIN:WARNING:%d:device:%p:cpu image:%u:unable to allocate %lu bytes from physical dev %p memory type %u\n", r, surf_g.dev.vk, img_rqmts->core.sz, surf_g.dev.phydev.vk, mem_type_idx); |
|
1169 |
|
return false; |
|
1170 |
|
} |
|
1171 |
|
LOG("0:MAIN:device:%p:physical device:%p:cpu image:%u:%lu bytes allocated from memory type %u\n", surf_g.dev.vk, surf_g.dev.phydev.vk, i, img_rqmts->core.sz, mem_type_idx); |
|
1172 |
|
return true; |
|
1173 |
|
} |
|
1174 |
|
|
|
1175 |
|
/* |
|
1176 |
|
* we are looking for host visible and host cached mem. on discret gpu we would |
|
1177 |
|
* like non dev local mem that in order to avoid wasting video ram. if we have |
|
1178 |
|
* a discret gpu but could not find a mem type without dev local mem, let's |
|
1179 |
|
* retry with only host visible and host cached mem. |
|
1180 |
|
*/ |
|
1181 |
|
#define IGNORE_GPU_IS_DISCRET true |
|
1182 |
|
static void cpu_img_dev_mem_alloc(u8 i) |
|
1183 |
|
{ |
|
1184 |
|
struct vk_mem_rqmts_t *img_rqmts; |
|
1185 |
|
u8 mem_type; |
|
1186 |
|
|
|
1187 |
|
img_rqmts = &tmp_mem_rqmts_g[i]; |
|
1188 |
|
|
|
1189 |
|
mem_type = 0; |
|
1190 |
|
loop { |
|
1191 |
|
if (mem_type == surf_g.dev.phydev.mem_types_n) |
|
1192 |
|
break; |
|
1193 |
|
|
|
1194 |
|
if (match_mem_type(mem_type, img_rqmts, |
|
1195 |
|
!IGNORE_GPU_IS_DISCRET)) { |
|
1196 |
|
if (try_alloc_cpu_img_dev_mem(i, img_rqmts, |
|
1197 |
|
mem_type)) |
|
1198 |
|
return; |
|
1199 |
|
} |
|
1200 |
|
++mem_type; |
|
1201 |
|
} |
|
1202 |
|
|
|
1203 |
|
if (!surf_g.dev.phydev.is_discret_gpu) { |
|
1204 |
|
LOG("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i); |
|
1205 |
|
exit(1); |
|
1206 |
|
} |
|
1207 |
|
|
|
1208 |
|
/* |
|
1209 |
|
* lookup again, but relax the match based on discret gpu constraint for |
|
1210 |
|
* gpu |
|
1211 |
|
*/ |
|
1212 |
|
mem_type = 0; |
|
1213 |
|
loop { |
|
1214 |
|
if (mem_type == surf_g.dev.phydev.mem_types_n) |
|
1215 |
|
break; |
|
1216 |
|
|
|
1217 |
|
if (match_mem_type(mem_type, img_rqmts, IGNORE_GPU_IS_DISCRET) |
|
1218 |
|
&& try_alloc_cpu_img_dev_mem(i, img_rqmts, mem_type)) |
|
1219 |
|
return; |
|
1220 |
|
++mem_type; |
|
1221 |
|
} |
|
1222 |
|
LOG("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i); |
|
1223 |
|
exit(1); |
|
1224 |
|
} |
|
1225 |
|
#undef IGNORE_GPU_IS_DISCRET |
|
1226 |
|
|
|
1227 |
|
static void cpu_imgs_dev_mem_alloc(void) |
|
1228 |
|
{ |
|
1229 |
|
u8 i; |
|
1230 |
|
|
|
1231 |
|
i = 0; |
|
1232 |
|
loop { |
|
1233 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1234 |
|
break; |
|
1235 |
|
cpu_img_dev_mem_alloc(i); |
|
1236 |
|
++i; |
|
1237 |
|
} |
|
1238 |
|
} |
|
1239 |
|
|
|
1240 |
|
static void cpu_imgs_dev_mem_bind(void) |
|
1241 |
|
{ |
|
1242 |
|
struct vk_bind_img_mem_info_t infos[swpchn_imgs_n_max]; |
|
1243 |
|
u8 i; |
|
1244 |
|
s32 r; |
|
1245 |
|
|
|
1246 |
|
memset(&infos, 0, sizeof(infos[0]) * surf_g.dev.swpchn.imgs_n); |
|
1247 |
|
|
|
1248 |
|
i = 0; |
|
1249 |
|
loop { |
|
1250 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1251 |
|
break; |
|
1252 |
|
infos[i].type = vk_struct_type_bind_img_mem_info; |
|
1253 |
|
infos[i].img = surf_g.dev.cpu_imgs[i].vk; |
|
1254 |
|
infos[i].mem = surf_g.dev.cpu_imgs[i].dev_mem; |
|
1255 |
|
++i; |
|
1256 |
|
} |
|
1257 |
|
r = vk_bind_img_mem(surf_g.dev.swpchn.imgs_n, infos); |
|
1258 |
|
if (r < 0) { |
|
1259 |
|
LOG("0:MAIN:FATAL:%d:device:%p:cpu images:unable to bind device memory to images\n", r, surf_g.dev.vk); |
|
1260 |
|
exit(1); |
|
1261 |
|
} |
|
1262 |
|
LOG("0:MAIN:device:%p:cpu images:bound device memory to images\n", surf_g.dev.vk); |
|
1263 |
|
} |
|
1264 |
|
|
|
1265 |
|
static void cpu_imgs_dev_mem_map(void) |
|
1266 |
|
{ |
|
1267 |
|
u8 i; |
|
1268 |
|
|
|
1269 |
|
i = 0; |
|
1270 |
|
loop { |
|
1271 |
|
s32 r; |
|
1272 |
|
|
|
1273 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1274 |
|
break; |
|
1275 |
|
r = vk_map_mem(surf_g.dev.cpu_imgs[i].dev_mem, 0, vk_whole_sz, 0, |
|
1276 |
|
&surf_g.dev.cpu_imgs[i].data); |
|
1277 |
|
if (r < 0) { |
|
1278 |
|
LOG("0:MAIN:FATAL:%d:device:%p:cpu image:%u:unable to map image memory\n", r, surf_g.dev.vk, i); |
|
1279 |
|
exit(1); |
|
1280 |
|
} |
|
1281 |
|
LOG("0:MAIN:device:%p:cpu image:%u:image memory mapped\n", surf_g.dev.vk, i); |
|
1282 |
|
++i; |
|
1283 |
|
} |
|
1284 |
|
} |
|
1285 |
|
|
|
1286 |
|
static void cpu_img_subrsrc_layout_get(u8 i) |
|
1287 |
|
{ |
|
1288 |
|
struct vk_img_subrsrc_t s; |
|
1289 |
|
|
|
1290 |
|
memset(&s, 0, sizeof(s)); |
|
1291 |
|
|
|
1292 |
|
/* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */ |
|
1293 |
|
s.aspect = vk_img_aspect_color_bit; |
|
1294 |
|
|
|
1295 |
|
vk_get_img_subrsrc_layout(surf_g.dev.cpu_imgs[i].vk, &s, |
|
1296 |
|
&surf_g.dev.cpu_imgs[i].layout); |
|
1297 |
|
LOG("0:MAIN:device:%p:cpu image:%u:layout:offset=%lu bytes size=%lu bytes row_pitch=%lu bytes array_pitch=%lu bytes depth_pitch=%lu bytes\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].layout.offset, surf_g.dev.cpu_imgs[i].layout.sz, surf_g.dev.cpu_imgs[i].layout.row_pitch, surf_g.dev.cpu_imgs[i].layout.array_pitch, surf_g.dev.cpu_imgs[i].layout.depth_pitch); |
|
1298 |
|
} |
|
1299 |
|
|
|
1300 |
|
static void cpu_imgs_subrsrc_layout_get(void) |
|
1301 |
|
{ |
|
1302 |
|
u8 i; |
|
1303 |
|
|
|
1304 |
|
i = 0; |
|
1305 |
|
loop { |
|
1306 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1307 |
|
break; |
|
1308 |
|
cpu_img_subrsrc_layout_get(i); |
|
1309 |
|
++i; |
|
1310 |
|
} |
|
1311 |
|
} |
|
1312 |
|
|
|
1313 |
|
static void sems_create(void) |
|
1314 |
|
{ |
|
1315 |
|
s32 r; |
|
1316 |
|
struct vk_sem_create_info_t info; |
|
1317 |
|
u8 sem; |
|
1318 |
|
|
|
1319 |
|
sem = 0; |
|
1320 |
|
loop { |
|
1321 |
|
if (sem == sems_n) |
|
1322 |
|
break; |
|
1323 |
|
|
|
1324 |
|
memset(&info, 0, sizeof(info)); |
|
1325 |
|
info.type = vk_struct_type_sem_create_info; |
|
1326 |
|
|
|
1327 |
|
r = vk_create_sem(&info, 0, &surf_g.dev.sems[sem]); |
|
1328 |
|
if (r < 0) { |
|
1329 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to create a semaphore %u for our swapchain\n", r, surf_g.dev.vk, sem); |
|
1330 |
|
exit(1); |
|
1331 |
|
} |
|
1332 |
|
LOG("0:MAIN:device:%p:semaphore %u for our swapchain created %p\n", surf_g.dev.vk, sem, surf_g.dev.sems[sem]); |
|
1333 |
|
|
|
1334 |
|
++sem; |
|
1335 |
|
} |
|
1336 |
|
} |
|
1337 |
|
|
|
1338 |
|
static void cmdbufs_create(void) |
|
1339 |
|
{ |
|
1340 |
|
s32 r; |
|
1341 |
|
struct vk_cmdbuf_alloc_info_t alloc_info; |
|
1342 |
|
|
|
1343 |
|
memset(&alloc_info, 0, sizeof(alloc_info)); |
|
1344 |
|
alloc_info.type = vk_struct_type_cmdbuf_alloc_info; |
|
1345 |
|
alloc_info.cmdpool = surf_g.dev.cmdpool; |
|
1346 |
|
alloc_info.lvl = vk_cmdbuf_lvl_primary; |
|
1347 |
|
alloc_info.cmdbufs_n = surf_g.dev.swpchn.imgs_n; |
|
1348 |
|
|
|
1349 |
|
r = vk_alloc_cmdbufs(&alloc_info, surf_g.dev.cmdbufs); |
|
1350 |
|
|
|
1351 |
|
if (r < 0) { |
|
1352 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r, surf_g.dev.vk, surf_g.dev.cmdpool); |
|
1353 |
|
exit(1); |
|
1354 |
|
} |
|
1355 |
|
LOG("0:MAIN:device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_g.dev.vk, surf_g.dev.swpchn.imgs_n, surf_g.dev.cmdpool); |
|
1356 |
|
} |
|
1357 |
|
|
|
1358 |
|
static void cmdbuf_record(u8 i) |
|
1359 |
|
{ |
|
1360 |
|
s32 r; |
|
1361 |
|
struct vk_cmdbuf_begin_info_t begin_info; |
|
1362 |
|
struct vk_img_mem_barrier_t b; |
|
1363 |
|
struct vk_img_blit_t region; |
|
1364 |
|
/*--------------------------------------------------------------------*/ |
|
1365 |
|
memset(&begin_info, 0, sizeof(begin_info)); |
|
1366 |
|
begin_info.type = vk_struct_type_cmdbuf_begin_info; |
|
1367 |
|
|
|
1368 |
|
r = vk_begin_cmdbuf(surf_g.dev.cmdbufs[i], &begin_info); |
|
1369 |
|
if (r < 0) { |
|
1370 |
|
LOG("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r, i, surf_g.dev.cmdbufs[i]); |
|
1371 |
|
exit(1); |
|
1372 |
|
} |
|
1373 |
|
/*--------------------------------------------------------------------*/ |
|
1374 |
|
/* acquired img (undefined layout) to presentation layout */ |
|
1375 |
|
memset(&b, 0, sizeof(b)); |
|
1376 |
|
b.type = vk_struct_type_img_mem_barrier; |
|
1377 |
|
b.old_layout = vk_img_layout_undefined; |
|
1378 |
|
b.new_layout = vk_img_layout_present; |
|
1379 |
|
b.src_q_fam = vk_q_fam_ignored; |
|
1380 |
|
b.dst_q_fam = vk_q_fam_ignored; |
|
1381 |
|
b.img = surf_g.dev.swpchn.imgs[i]; |
|
1382 |
|
|
|
1383 |
|
b.subrsrc_range.aspect = vk_img_aspect_color_bit; |
|
1384 |
|
b.subrsrc_range.lvls_n = 1; |
|
1385 |
|
b.subrsrc_range.array_layers_n = 1; |
|
1386 |
|
|
|
1387 |
|
vk_cmd_pipeline_barrier(surf_g.dev.cmdbufs[i], |
|
1388 |
|
vk_pipeline_stage_top_of_pipe_bit, |
|
1389 |
|
vk_pipeline_stage_top_of_pipe_bit, 0, 0, 0, 0, 0, 1, &b); |
|
1390 |
|
/*--------------------------------------------------------------------*/ |
|
1391 |
|
/* blit from cpu img to pe img */ |
|
1392 |
|
memset(®ion, 0, sizeof(region)); |
|
1393 |
|
region.src_subrsrc.aspect = vk_img_aspect_color_bit; |
|
1394 |
|
region.src_subrsrc.array_layers_n = 1; |
|
1395 |
|
region.src_offsets[1].x = APP_CPU_IMG_WIDTH; |
|
1396 |
|
region.src_offsets[1].y = APP_CPU_IMG_HEIGHT; |
|
1397 |
|
region.dst_subrsrc.aspect = vk_img_aspect_color_bit; |
|
1398 |
|
region.dst_subrsrc.array_layers_n = 1; |
|
1399 |
|
region.dst_offsets[1].x = APP_CPU_IMG_WIDTH; |
|
1400 |
|
region.dst_offsets[1].y = APP_CPU_IMG_HEIGHT; |
|
1401 |
|
|
|
1402 |
|
vk_cmd_blit_img(surf_g.dev.cmdbufs[i], surf_g.dev.cpu_imgs[i].vk, |
|
1403 |
|
vk_img_layout_general, surf_g.dev.swpchn.imgs[i], |
|
1404 |
|
vk_img_layout_present, 1, ®ion, 0); |
|
1405 |
|
/*--------------------------------------------------------------------*/ |
|
1406 |
|
r = vk_end_cmdbuf(surf_g.dev.cmdbufs[i]); |
|
1407 |
|
if (r < 0) { |
|
1408 |
|
LOG("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r, i, surf_g.dev.cmdbufs[i]); |
|
1409 |
|
exit(1); |
|
1410 |
|
} |
|
1411 |
|
} |
|
1412 |
|
|
|
1413 |
|
static void cmdbufs_record(void) |
|
1414 |
|
{ |
|
1415 |
|
u8 i; |
|
1416 |
|
|
|
1417 |
|
i = 0; |
|
1418 |
|
loop { |
|
1419 |
|
if (i == surf_g.dev.swpchn.imgs_n) |
|
1420 |
|
break; |
|
1421 |
|
cmdbuf_record(i); |
|
1422 |
|
++i; |
|
1423 |
|
} |
|
1424 |
|
} |
|
1425 |
|
|
|
1426 |
|
static void phydev_init(void) |
|
1427 |
|
{ |
|
1428 |
|
tmp_phydevs_get(); |
|
1429 |
|
/*--------------------------------------------------------------------*/ |
|
1430 |
|
tmp_phydevs_exts_dump(); |
|
1431 |
|
tmp_phydevs_props_dump(); |
|
1432 |
|
tmp_phydevs_mem_props_get(); |
|
1433 |
|
tmp_phydevs_mem_props_dump(); |
|
1434 |
|
/*--------------------------------------------------------------------*/ |
|
1435 |
|
tmp_phydevs_q_fams_get(); |
|
1436 |
|
tmp_phydevs_q_fams_dump(); |
|
1437 |
|
tmp_phydevs_q_fams_surf_support_get(); |
|
1438 |
|
/*--------------------------------------------------------------------*/ |
|
1439 |
|
tmp_phydev_and_q_fam_select(); |
|
1440 |
|
/*--------------------------------------------------------------------*/ |
|
1441 |
|
texel_mem_blk_conf_select(); |
|
1442 |
|
/*--------------------------------------------------------------------*/ |
|
1443 |
|
tmp_surf_caps_get(); |
|
1444 |
|
tmp_surf_caps_dump(); |
|
1445 |
|
/*--------------------------------------------------------------------*/ |
|
1446 |
|
tmp_present_modes_get(); |
|
1447 |
|
tmp_present_modes_dump(); |
|
1448 |
|
} |
|
1449 |
|
|
|
1450 |
|
static void dev_init(void) |
|
1451 |
|
{ |
|
1452 |
|
phydev_init(); |
|
1453 |
|
/*--------------------------------------------------------------------*/ |
|
1454 |
|
dev_create(); |
|
1455 |
|
dev_syms(); |
|
1456 |
|
q_get(); |
|
1457 |
|
cmdpools_create(); |
|
1458 |
|
} |
|
1459 |
|
|
|
1460 |
|
static void surf_init(void) |
|
1461 |
|
{ |
|
1462 |
|
surf_create(); |
|
1463 |
|
dev_init(); |
|
1464 |
|
swpchn_init(); |
|
1465 |
|
swpchn_imgs_get(); |
|
1466 |
|
|
|
1467 |
|
/* our cpu imgs for swpchn imgs */ |
|
1468 |
|
cpu_imgs_create(); |
|
1469 |
|
sems_create(); |
|
1470 |
|
cmdbufs_create(); |
|
1471 |
|
cpu_imgs_layout_to_general(); |
|
1472 |
|
cpu_imgs_subrsrc_layout_get(); |
|
1473 |
|
tmp_cpu_imgs_mem_rqmts_get(); |
|
1474 |
|
cpu_imgs_dev_mem_alloc(); |
|
1475 |
|
cpu_imgs_dev_mem_bind(); |
|
1476 |
|
cpu_imgs_dev_mem_map(); |
|
1477 |
|
cmdbufs_record(); |
|
1478 |
|
} |
|
1479 |
|
|
|
1480 |
|
static void init_vk(void) |
|
1481 |
|
{ |
|
1482 |
|
load_vk_loader(); |
|
1483 |
|
loader_syms(); |
|
1484 |
|
instance_static_syms(); |
|
1485 |
|
check_vk_version(); |
|
1486 |
|
instance_exts_dump(); |
|
1487 |
|
instance_layers_dump(); |
|
1488 |
|
/*--------------------------------------------------------------------*/ |
|
1489 |
|
instance_create(); |
|
1490 |
|
instance_syms(); |
|
1491 |
|
/*--------------------------------------------------------------------*/ |
|
1492 |
|
surf_init(); |
|
1493 |
|
} |
|
1494 |
|
|
|
1495 |
|
static void swpchn_acquire_next_img(u32 *i) |
|
1496 |
|
{ |
|
1497 |
|
struct vk_acquire_next_img_info_t info; |
|
1498 |
|
s32 r; |
|
1499 |
|
|
|
1500 |
|
memset(&info, 0, sizeof(info)); |
|
1501 |
|
info.type = vk_struct_type_acquire_next_img_info; |
|
1502 |
|
info.swpchn = surf_g.dev.swpchn.vk; |
|
1503 |
|
info.timeout = u64_max; /* infinite */ |
|
1504 |
|
info.devs = 0x00000001; /* no device group then 1 */ |
|
1505 |
|
info.sem = surf_g.dev.sems[sem_acquire_img_done]; |
|
1506 |
|
|
|
1507 |
|
r = vk_acquire_next_img(&info, i); |
|
1508 |
|
if (r < 0) { |
|
1509 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to acquire next image from swapchain %p\n", r, surf_g.dev.vk, surf_g.dev.swpchn.vk); |
|
1510 |
|
exit(1); |
|
1511 |
|
} |
|
1512 |
|
/* XXX:TRACE */ |
|
1513 |
|
LOG("0:MAIN:device:%p:swapchain:%p:acquired image %u\n", surf_g.dev.vk, surf_g.dev.swpchn.vk, *i); |
|
1514 |
|
} |
|
1515 |
|
|
|
1516 |
|
/* solid color */ |
|
1517 |
|
static void cpu_img_draw(u8 i) |
|
1518 |
|
{ |
|
1519 |
|
u32 *texel; |
|
1520 |
|
u64 row; |
|
1521 |
|
u64 col; |
|
1522 |
|
|
|
1523 |
|
texel = (u32*)surf_g.dev.cpu_imgs[i].data; |
|
1524 |
|
row = 0; |
|
1525 |
|
loop { |
|
1526 |
|
if (row == APP_CPU_IMG_HEIGHT) |
|
1527 |
|
break; |
|
1528 |
|
col = 0; |
|
1529 |
|
loop { |
|
1530 |
|
struct vk_subrsrc_layout_t *l; |
|
1531 |
|
u64 o; /* _byte_ offset */ |
|
1532 |
|
u64 o_dw; /* _32 bits_ dword offset */ |
|
1533 |
|
|
|
1534 |
|
if (col == APP_CPU_IMG_WIDTH) |
|
1535 |
|
break; |
|
1536 |
|
|
|
1537 |
|
l = &surf_g.dev.cpu_imgs[i].layout; |
|
1538 |
|
|
|
1539 |
|
o = row * l->row_pitch + col * sizeof(*texel); |
|
1540 |
|
o_dw = o >> 2; |
|
1541 |
|
|
|
1542 |
|
texel[o_dw] = fill_texel_g; |
|
1543 |
|
|
|
1544 |
|
++col; |
|
1545 |
|
} |
|
1546 |
|
++row; |
|
1547 |
|
} |
|
1548 |
|
} |
|
1549 |
|
|
|
1550 |
|
static void cpu_img_to_pe(u8 i) |
|
1551 |
|
{ |
|
1552 |
|
s32 r; |
|
1553 |
|
struct vk_submit_info_t submit_info; |
|
1554 |
|
struct vk_present_info_t present_info; |
|
1555 |
|
u32 idxs[1]; |
|
1556 |
|
|
|
1557 |
|
memset(&submit_info, 0, sizeof(submit_info)); |
|
1558 |
|
submit_info.type = vk_struct_type_submit_info; |
|
1559 |
|
submit_info.wait_sems_n = 1; |
|
1560 |
|
submit_info.wait_sems = &surf_g.dev.sems[sem_acquire_img_done]; |
|
1561 |
|
submit_info.cmdbufs_n = 1; |
|
1562 |
|
submit_info.cmdbufs = &surf_g.dev.cmdbufs[i]; |
|
1563 |
|
submit_info.signal_sems_n = 1; |
|
1564 |
|
submit_info.signal_sems = &surf_g.dev.sems[app_sem_blit_done]; |
|
1565 |
|
|
|
1566 |
|
r = vk_q_submit(surf_g.dev.q, 1, &submit_info, 0); |
|
1567 |
|
if (r < 0) { |
|
1568 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r, surf_g.dev.q); |
|
1569 |
|
exit(1); |
|
1570 |
|
} |
|
1571 |
|
/*--------------------------------------------------------------------*/ |
|
1572 |
|
idxs[0] = i; |
|
1573 |
|
memset(&present_info, 0, sizeof(present_info)); |
|
1574 |
|
present_info.type = vk_struct_type_present_info; |
|
1575 |
|
present_info.wait_sems_n = 1; |
|
1576 |
|
present_info.wait_sems = &surf_g.dev.sems[sem_blit_done]; |
|
1577 |
|
present_info.swpchns_n = 1; |
|
1578 |
|
present_info.swpchns = &surf_g.dev.swpchn.vk; |
|
1579 |
|
present_info.idxs = idxs; |
|
1580 |
|
present_info.results = 0; |
|
1581 |
|
|
|
1582 |
|
r = vk_q_present(surf_g.dev.q, &present_info); |
|
1583 |
|
if (r < 0) { |
|
1584 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the image %u to the presentation engine\n", r, surf_g.dev.q, i); |
|
1585 |
|
exit(1); |
|
1586 |
|
} |
|
1587 |
|
} |
|
1588 |
|
|
|
1589 |
|
static void render(void) |
|
1590 |
|
{ |
|
1591 |
|
u32 i; |
|
1592 |
|
|
|
1593 |
|
swpchn_acquire_next_img(&i); |
|
1594 |
|
cpu_img_draw(i); /* cpu rendering */ |
|
1595 |
|
cpu_img_to_pe(i); |
|
1596 |
|
|
|
1597 |
|
do_render_g = false; |
|
1598 |
|
if (fill_texel_g == 0x0000ff00) |
|
1599 |
|
fill_texel_g = 0x00ff0000; |
|
1600 |
|
else |
|
1601 |
|
fill_texel_g = 0x0000ff00; |
|
1602 |
|
} |
|
1603 |
|
|
|
1604 |
|
/* "main" loop */ |
|
1605 |
|
static void run(void) |
|
1606 |
|
{ |
|
1607 |
|
state_g = state_run; |
|
1608 |
|
loop { |
|
1609 |
|
xcb_generic_event_t *e; |
|
1610 |
|
|
|
1611 |
|
do_render_g = false; |
|
1612 |
|
|
|
1613 |
|
/* "evts which could lead to change what we display" */ |
|
1614 |
|
e = dl_xcb_wait_for_event(app_xcb.c); |
|
1615 |
|
if (e == 0) { /* i/o err */ |
|
1616 |
|
LOG("0:MAIN:xcb:'%s':connection:%p:event:input/output error | x11 server connection lost\n", app_xcb.disp_env, app_xcb.c); |
|
1617 |
|
break; |
|
1618 |
|
} |
|
1619 |
|
|
|
1620 |
|
loop { /* drain evts */ |
|
1621 |
|
app_xcb_evt_handle(e); |
|
1622 |
|
free(e); |
|
1623 |
|
|
|
1624 |
|
if (state_g == state_quit) |
|
1625 |
|
return; |
|
1626 |
|
|
|
1627 |
|
e = dl_xcb_poll_for_event(app_xcb.c); |
|
1628 |
|
if (e == 0) |
|
1629 |
|
break; |
|
1630 |
|
} |
|
1631 |
|
|
|
1632 |
|
/* synchronous rendering */ |
|
1633 |
|
if (do_render_g) |
|
1634 |
|
render(); |
|
1635 |
|
} |
|
1636 |
|
} |
|
1637 |
|
|
|
1638 |
|
int main(void) |
|
1639 |
|
{ |
|
1640 |
|
LOG("0:starting app\n"); |
|
1641 |
|
app_xcb_init(); |
|
1642 |
|
init_vk(); |
|
1643 |
|
fill_texel_g = 0x0000ff00; |
|
1644 |
|
run(); |
|
1645 |
|
LOG("0:exiting app\n"); |
|
1646 |
|
exit(0); |
|
1647 |
|
} |
|
1648 |
|
#define CLEANUP |
|
1649 |
|
#include "namespace/app.c" |
|
1650 |
|
#include "namespace/vk_syms.c" |
|
1651 |
|
#undef CLEANUP |
|
1652 |
|
#endif |
File 2d/kickstart/vk_types.h added (mode: 100644) (index 0000000..28c778d) |
|
1 |
|
#ifndef VK_TYPES_H |
|
2 |
|
#define VK_TYPES_H |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS */ |
|
8 |
|
/* |
|
9 |
|
* XXX: we are fixing vulkan ABI which made the huge mistake to use |
|
10 |
|
* enums as function parameters or function return values. |
|
11 |
|
*/ |
|
12 |
|
#include <stddef.h> |
|
13 |
|
#include <xcb.h> /* we use the xcb wsi */ |
|
14 |
|
#include "app_core_types.h" |
|
15 |
|
/* macro */ |
|
16 |
|
/*----------------------------------------------------------------------------*/ |
|
17 |
|
#define vk_true 1 |
|
18 |
|
#define vk_false 0 |
|
19 |
|
#define vk_whole_sz 0xffffffffffffffff |
|
20 |
|
#define vk_q_fam_ignored 0xffffffff |
|
21 |
|
/*----------------------------------------------------------------------------*/ |
|
22 |
|
#define VK_VERSION_MAJOR(x) (x >> 22) |
|
23 |
|
#define VK_VERSION_MINOR(x) ((x >> 12) & 0x3ff) |
|
24 |
|
#define VK_VERSION_PATCH(x) (x & 0xfff) |
|
25 |
|
/* macro */ |
|
26 |
|
/******************************************************************************/ |
|
27 |
|
/* types */ |
|
28 |
|
/* |
|
29 |
|
* 64 bits platforms: enums do default to 32 bits, but can go up to 64 bits |
|
30 |
|
* based on the range of values they hold. this is important for |
|
31 |
|
* vulkan ABI which we will fix. |
|
32 |
|
* _individually_, each value is defaulted to 32bits, if possible, and signed |
|
33 |
|
* or not. |
|
34 |
|
* XXX: All vulkan enums use 32 bits storage |
|
35 |
|
*/ |
|
36 |
|
enum { |
|
37 |
|
vk_err_out_of_host_mem = -1, |
|
38 |
|
/*--------------------------------------------------------------------*/ |
|
39 |
|
vk_success = 0, |
|
40 |
|
vk_incomplete = 5, |
|
41 |
|
vk_r_enum_max = 0x7fffffff |
|
42 |
|
}; |
|
43 |
|
|
|
44 |
|
enum { |
|
45 |
|
vk_struct_type_instance_create_info = 1, |
|
46 |
|
vk_struct_type_dev_q_create_info = 2, |
|
47 |
|
vk_struct_type_dev_create_info = 3, |
|
48 |
|
vk_struct_type_submit_info = 4, |
|
49 |
|
vk_struct_type_mem_alloc_info = 5, |
|
50 |
|
vk_struct_type_fence_create_info = 8, |
|
51 |
|
vk_struct_type_sem_create_info = 9, |
|
52 |
|
vk_struct_type_img_create_info = 14, |
|
53 |
|
vk_struct_type_cmdpool_create_info = 39, |
|
54 |
|
vk_struct_type_cmdbuf_alloc_info = 40, |
|
55 |
|
vk_struct_type_cmdbuf_begin_info = 42, |
|
56 |
|
vk_struct_type_img_mem_barrier = 45, |
|
57 |
|
/* extension number 2 or index 1, offset 0 */ |
|
58 |
|
vk_struct_type_swpchn_create_info = 1000000000 + 1000 + 0, |
|
59 |
|
/* extension number 2 or index 1, offset 1 */ |
|
60 |
|
vk_struct_type_present_info = 1000000000 + 1000 + 1, |
|
61 |
|
/* extension number 6 or index 5, offset 0 */ |
|
62 |
|
vk_struct_type_xcb_surf_create_info = 1000000000 + 5000 + 0, |
|
63 |
|
/* extension number 60 or index 59, offset 1 */ |
|
64 |
|
vk_struct_type_phydev_props = 1000000000 + 59000 + 1, |
|
65 |
|
/* extension number 60 or index 59, offset 5 */ |
|
66 |
|
vk_struct_type_q_fam_props = 1000000000 + 59000 + 5, |
|
67 |
|
/* extension number 60 or index 59, offset 6 */ |
|
68 |
|
vk_struct_type_phydev_mem_props = 1000000000 + 59000 + 6, |
|
69 |
|
/* extension number 60 or index 59, offset 10 */ |
|
70 |
|
vk_struct_type_acquire_next_img_info = 1000000000 + 59000 + 10, |
|
71 |
|
/* extension number 91 or index 90, offset 0 */ |
|
72 |
|
vk_struct_type_surf_caps = 1000000000 + 90000 + 0, |
|
73 |
|
/* extension number 120 or index 119, offset 0 */ |
|
74 |
|
vk_struct_type_phydev_surf_info = 1000000000 + 119000 + 0, |
|
75 |
|
/* extension number 120 or index 119, offset 2 */ |
|
76 |
|
vk_struct_type_surf_texel_mem_blk_conf = 1000000000 + 119000 + 2, |
|
77 |
|
/* extension number 147 or index 146, offset 1 */ |
|
78 |
|
vk_struct_type_img_mem_rqmts_info = 1000000000 + 146000 + 1, |
|
79 |
|
/* extension number 147 or index 146, offset 3 */ |
|
80 |
|
vk_struct_type_mem_rqmts = 1000000000 + 146000 + 3, |
|
81 |
|
/* extension number 158 or index 157, offset 1 */ |
|
82 |
|
vk_struct_type_bind_img_mem_info = 1000000000 + 157000 + 1, |
|
83 |
|
vk_struct_type_enum_max = 0x7fffffff |
|
84 |
|
}; |
|
85 |
|
|
|
86 |
|
struct vk_instance_create_info_t { |
|
87 |
|
u32 type; |
|
88 |
|
void *next; |
|
89 |
|
u32 flags; |
|
90 |
|
void *app_info; /* allow easy hidden driver optimizations: no! */ |
|
91 |
|
u32 enabled_layers_n; |
|
92 |
|
u8 **enabled_layer_names; |
|
93 |
|
u32 enabled_exts_n; |
|
94 |
|
u8 **enabled_ext_names; |
|
95 |
|
}; |
|
96 |
|
|
|
97 |
|
#define VK_MAX_EXT_NAME_SZ 256 |
|
98 |
|
struct vk_ext_props_t { |
|
99 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
100 |
|
u32 spec_version; |
|
101 |
|
}; |
|
102 |
|
|
|
103 |
|
#define VK_MAX_DESC_SZ 256 |
|
104 |
|
struct vk_layer_props_t { |
|
105 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
106 |
|
u32 spec_version; |
|
107 |
|
u32 implementation_version; |
|
108 |
|
u8 desc[VK_MAX_DESC_SZ]; |
|
109 |
|
}; |
|
110 |
|
|
|
111 |
|
enum { |
|
112 |
|
vk_phydev_type_other = 0, |
|
113 |
|
vk_phydev_type_integrated_gpu = 1, |
|
114 |
|
vk_phydev_type_discrete_gpu = 2, |
|
115 |
|
vk_phydev_type_virtual_gpu = 3, |
|
116 |
|
vk_phydev_type_cpu = 4, |
|
117 |
|
vk_phydev_type_enum_max = 0x7fffffff |
|
118 |
|
}; |
|
119 |
|
|
|
120 |
|
struct vk_phydev_limits_t { |
|
121 |
|
u32 not_used_00[11]; |
|
122 |
|
u64 not_used_01[2]; |
|
123 |
|
u32 not_used_02[51]; |
|
124 |
|
float not_used_03[2]; |
|
125 |
|
u32 not_used_04[3]; |
|
126 |
|
float not_used_05[2]; |
|
127 |
|
u32 not_used_06; |
|
128 |
|
size_t not_used_07; |
|
129 |
|
u64 not_used_08[3]; |
|
130 |
|
u32 not_used_09[4]; |
|
131 |
|
float not_used_10[2]; |
|
132 |
|
u32 not_used_11[16]; |
|
133 |
|
float not_used_12; |
|
134 |
|
u32 not_used_13[4]; |
|
135 |
|
float not_used_14[6]; |
|
136 |
|
u32 not_used_15[2]; |
|
137 |
|
u64 not_used_16[3]; |
|
138 |
|
}; |
|
139 |
|
|
|
140 |
|
struct vk_phydev_sparse_props_t { |
|
141 |
|
u32 not_used[5]; |
|
142 |
|
}; |
|
143 |
|
|
|
144 |
|
/*----------------------------------------------------------------------------*/ |
|
145 |
|
#define VK_MAX_PHYDEV_NAME_SZ 256 |
|
146 |
|
#define VK_UUID_SZ 16 |
|
147 |
|
struct vk_phydev_props_core_t { |
|
148 |
|
u32 api_version; |
|
149 |
|
u32 driver_version; |
|
150 |
|
u32 vendor_id; |
|
151 |
|
u32 dev_id; |
|
152 |
|
u32 dev_type; |
|
153 |
|
u8 name[VK_MAX_PHYDEV_NAME_SZ]; |
|
154 |
|
u8 pipeline_cache_uuid[VK_UUID_SZ]; |
|
155 |
|
struct vk_phydev_limits_t limits; |
|
156 |
|
struct vk_phydev_sparse_props_t sparse_props; |
|
157 |
|
}; |
|
158 |
|
/* the vulkan 1.1 version */ |
|
159 |
|
struct vk_phydev_props_t { |
|
160 |
|
u32 type; |
|
161 |
|
void *next; |
|
162 |
|
struct vk_phydev_props_core_t core; |
|
163 |
|
}; |
|
164 |
|
/*----------------------------------------------------------------------------*/ |
|
165 |
|
enum { |
|
166 |
|
vk_q_gfx_bit = 0x00000001, |
|
167 |
|
vk_q_compute_bit = 0x00000002, |
|
168 |
|
vk_q_transfer_bit = 0x00000004, |
|
169 |
|
vk_q_sparse_binding_bit = 0x00000008, |
|
170 |
|
vk_q_protected_bit = 0x00000010, |
|
171 |
|
vk_q_flag_bits_enum_max = 0x7fffffff |
|
172 |
|
}; |
|
173 |
|
|
|
174 |
|
struct vk_extent_3d_t { |
|
175 |
|
u32 width; |
|
176 |
|
u32 height; |
|
177 |
|
u32 depth; |
|
178 |
|
}; |
|
179 |
|
/*----------------------------------------------------------------------------*/ |
|
180 |
|
struct vk_q_fam_props_core_t { |
|
181 |
|
u32 flags; |
|
182 |
|
u32 qs_n; |
|
183 |
|
u32 timestamp_valid_bits; |
|
184 |
|
struct vk_extent_3d_t min_img_transfer_granularity; |
|
185 |
|
}; |
|
186 |
|
|
|
187 |
|
struct vk_q_fam_props_t { |
|
188 |
|
u32 type; |
|
189 |
|
void *next; |
|
190 |
|
struct vk_q_fam_props_core_t core; |
|
191 |
|
}; |
|
192 |
|
/*----------------------------------------------------------------------------*/ |
|
193 |
|
struct vk_phydev_features_t { |
|
194 |
|
u32 not_used[55]; |
|
195 |
|
}; |
|
196 |
|
|
|
197 |
|
struct vk_dev_q_create_info_t { |
|
198 |
|
u32 type; |
|
199 |
|
void *next; |
|
200 |
|
u32 flags; |
|
201 |
|
u32 q_fam; |
|
202 |
|
u32 qs_n; |
|
203 |
|
float *q_prios; |
|
204 |
|
}; |
|
205 |
|
|
|
206 |
|
struct vk_dev_create_info_t { |
|
207 |
|
u32 type; |
|
208 |
|
void *next; |
|
209 |
|
u32 flags; |
|
210 |
|
u32 q_create_infos_n; |
|
211 |
|
struct vk_dev_q_create_info_t *q_create_infos; |
|
212 |
|
u32 do_not_use_0; |
|
213 |
|
void *do_not_use_1; |
|
214 |
|
u32 enabled_exts_n; |
|
215 |
|
u8 **enabled_ext_names; |
|
216 |
|
void *do_not_use_2; |
|
217 |
|
}; |
|
218 |
|
|
|
219 |
|
enum { |
|
220 |
|
vk_cmdpool_create_transient_bit = 0x00000001, |
|
221 |
|
vk_cmdpool_create_reset_cmdbuf_bit = 0x00000002, |
|
222 |
|
vk_cmdpool_create_flag_bits_enum_max = 0x7fffffff |
|
223 |
|
}; |
|
224 |
|
|
|
225 |
|
struct vk_cmdpool_create_info_t { |
|
226 |
|
u32 type; |
|
227 |
|
void *next; |
|
228 |
|
u32 flags; |
|
229 |
|
u32 q_fam; |
|
230 |
|
}; |
|
231 |
|
|
|
232 |
|
struct vk_xcb_surf_create_info_t { |
|
233 |
|
u32 type; |
|
234 |
|
void *next; |
|
235 |
|
u32 flags; |
|
236 |
|
xcb_connection_t *c; |
|
237 |
|
xcb_window_t win; |
|
238 |
|
}; |
|
239 |
|
|
|
240 |
|
struct vk_phydev_surf_info_t { |
|
241 |
|
u32 type; |
|
242 |
|
void *next; |
|
243 |
|
void *surf; |
|
244 |
|
}; |
|
245 |
|
|
|
246 |
|
enum { |
|
247 |
|
vk_texel_mem_blk_fmt_undefined = 0, |
|
248 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_unorm = 44, |
|
249 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_srgb = 50, |
|
250 |
|
vk_texel_mem_blk_fmt_enum_max = 0x7fffffff |
|
251 |
|
}; |
|
252 |
|
|
|
253 |
|
enum { |
|
254 |
|
vk_color_space_srgb_nonlinear = 0, |
|
255 |
|
vk_color_space_enum_max = 0x7fffffff |
|
256 |
|
}; |
|
257 |
|
|
|
258 |
|
struct vk_surf_texel_mem_blk_conf_core_t { |
|
259 |
|
u32 fmt; |
|
260 |
|
u32 color_space; |
|
261 |
|
}; |
|
262 |
|
|
|
263 |
|
struct vk_surf_texel_mem_blk_conf_t { |
|
264 |
|
u32 type; |
|
265 |
|
void *next; |
|
266 |
|
struct vk_surf_texel_mem_blk_conf_core_t core; |
|
267 |
|
}; |
|
268 |
|
/*----------------------------------------------------------------------------*/ |
|
269 |
|
enum { |
|
270 |
|
vk_mem_prop_dev_local_bit = 0x00000001, |
|
271 |
|
vk_mem_prop_host_visible_bit = 0x00000002, |
|
272 |
|
vk_mem_prop_host_cached_bit = 0x00000008, |
|
273 |
|
vk_mem_prop_flag_bits_enum_max = 0x7fffffff |
|
274 |
|
}; |
|
275 |
|
|
|
276 |
|
struct vk_mem_type_t { |
|
277 |
|
u32 prop_flags; |
|
278 |
|
u32 heap; |
|
279 |
|
}; |
|
280 |
|
/*----------------------------------------------------------------------------*/ |
|
281 |
|
enum { |
|
282 |
|
vk_mem_heap_dev_local_bit = 0x00000001, |
|
283 |
|
vk_mem_heap_multi_instance_bit = 0x00000002, |
|
284 |
|
vk_mem_heap_flag_bits_enum_max = 0x7FFFFFFF |
|
285 |
|
}; |
|
286 |
|
|
|
287 |
|
struct vk_mem_heap_t { |
|
288 |
|
u64 sz; |
|
289 |
|
u32 flags; |
|
290 |
|
}; |
|
291 |
|
/*----------------------------------------------------------------------------*/ |
|
292 |
|
#define VK_MEM_TYPES_N_MAX 32 |
|
293 |
|
#define VK_MEM_HEAPS_N_MAX 16 |
|
294 |
|
struct vk_phydev_mem_props_core_t { |
|
295 |
|
u32 mem_types_n; |
|
296 |
|
struct vk_mem_type_t mem_types[VK_MEM_TYPES_N_MAX]; |
|
297 |
|
u32 mem_heaps_n; |
|
298 |
|
struct vk_mem_heap_t mem_heaps[VK_MEM_HEAPS_N_MAX]; |
|
299 |
|
}; |
|
300 |
|
|
|
301 |
|
struct vk_phydev_mem_props_t { |
|
302 |
|
u32 type; |
|
303 |
|
void *next; |
|
304 |
|
struct vk_phydev_mem_props_core_t core; |
|
305 |
|
}; |
|
306 |
|
/*----------------------------------------------------------------------------*/ |
|
307 |
|
struct vk_extent_2d_t { |
|
308 |
|
u32 width; |
|
309 |
|
u32 height; |
|
310 |
|
}; |
|
311 |
|
|
|
312 |
|
enum { |
|
313 |
|
vk_surf_transform_identity_bit = 0x00000001, |
|
314 |
|
vk_surf_transform_flag_bits_enum_max = 0x7fffffff |
|
315 |
|
}; |
|
316 |
|
|
|
317 |
|
enum { |
|
318 |
|
vk_composite_alpha_opaque_bit = 0x00000001, |
|
319 |
|
vk_composite_alpha_flag_bits_enum_max = 0x7fffffff |
|
320 |
|
}; |
|
321 |
|
|
|
322 |
|
enum { |
|
323 |
|
vk_img_usage_transfer_src_bit = 0x00000001, |
|
324 |
|
vk_img_usage_transfer_dst_bit = 0x00000002, |
|
325 |
|
vk_img_usage_color_attachment_bit = 0x00000010, |
|
326 |
|
vk_img_usage_flag_bits_enum_max = 0x7fffffff |
|
327 |
|
}; |
|
328 |
|
|
|
329 |
|
struct vk_surf_caps_core_t { |
|
330 |
|
u32 imgs_n_min; |
|
331 |
|
u32 imgs_n_max; |
|
332 |
|
struct vk_extent_2d_t current_extent; |
|
333 |
|
struct vk_extent_2d_t img_extent_min; |
|
334 |
|
struct vk_extent_2d_t img_extent_max; |
|
335 |
|
u32 img_array_layers_n_max; |
|
336 |
|
u32 supported_transforms; |
|
337 |
|
u32 current_transform; |
|
338 |
|
u32 supported_composite_alpha; |
|
339 |
|
u32 supported_img_usage_flags; |
|
340 |
|
}; |
|
341 |
|
|
|
342 |
|
struct vk_surf_caps_t { |
|
343 |
|
u32 type; |
|
344 |
|
void *next; |
|
345 |
|
struct vk_surf_caps_core_t core; |
|
346 |
|
u32 flags; |
|
347 |
|
void *surf; |
|
348 |
|
u32 imgs_n; |
|
349 |
|
|
|
350 |
|
}; |
|
351 |
|
/*----------------------------------------------------------------------------*/ |
|
352 |
|
enum { |
|
353 |
|
vk_sharing_mode_exclusive = 0, |
|
354 |
|
vk_sharing_mode_enum_max = 0x7fffffff |
|
355 |
|
}; |
|
356 |
|
|
|
357 |
|
enum { |
|
358 |
|
vk_present_mode_immediate = 0, |
|
359 |
|
vk_present_mode_mailbox = 1, |
|
360 |
|
vk_present_mode_fifo = 2, |
|
361 |
|
vk_present_mode_fifo_relaxed = 3, |
|
362 |
|
vk_present_mode_enum_max = 0x7fffffff |
|
363 |
|
}; |
|
364 |
|
|
|
365 |
|
struct vk_swpchn_create_info_t { |
|
366 |
|
u32 type; |
|
367 |
|
void *next; |
|
368 |
|
u32 flags; |
|
369 |
|
void *surf; |
|
370 |
|
u32 imgs_n_min; |
|
371 |
|
u32 img_texel_mem_blk_fmt; |
|
372 |
|
u32 img_color_space; |
|
373 |
|
struct vk_extent_2d_t img_extent; |
|
374 |
|
u32 img_layers_n; |
|
375 |
|
u32 img_usage; |
|
376 |
|
u32 img_sharing_mode; |
|
377 |
|
u32 q_fams_n; |
|
378 |
|
u32 *q_fams; |
|
379 |
|
u32 pre_transform; |
|
380 |
|
u32 composite_alpha; |
|
381 |
|
u32 present_mode; |
|
382 |
|
u32 clipped; |
|
383 |
|
void *old_swpchn; |
|
384 |
|
}; |
|
385 |
|
|
|
386 |
|
enum { |
|
387 |
|
vk_img_type_2d = 1, |
|
388 |
|
vk_img_type_enum_max = 0x7fffffff |
|
389 |
|
}; |
|
390 |
|
|
|
391 |
|
enum { |
|
392 |
|
vk_samples_n_1_bit = 0x00000001, |
|
393 |
|
vk_samples_n_enum_max = 0x7fffffff |
|
394 |
|
}; |
|
395 |
|
|
|
396 |
|
enum { |
|
397 |
|
vk_img_tiling_optimal = 0, |
|
398 |
|
vk_img_tiling_linear = 1, |
|
399 |
|
vk_img_tiling_enum_max = 0x7fffffff |
|
400 |
|
}; |
|
401 |
|
|
|
402 |
|
enum { |
|
403 |
|
vk_img_create_flag_2d_array_compatible_bit = 0x00000002, |
|
404 |
|
vk_img_create_flag_enum_max = 0x7fffffff |
|
405 |
|
}; |
|
406 |
|
|
|
407 |
|
enum { |
|
408 |
|
vk_img_layout_undefined = 0, |
|
409 |
|
vk_img_layout_general = 1, |
|
410 |
|
/* extension number 2 or index 1, offset 2 */ |
|
411 |
|
vk_img_layout_present = 1000000000 + 1000 + 2, |
|
412 |
|
vk_img_layout_enum_n_max = 0x7fffffff |
|
413 |
|
}; |
|
414 |
|
|
|
415 |
|
struct vk_img_create_info_t { |
|
416 |
|
u32 type; |
|
417 |
|
void *next; |
|
418 |
|
u32 flags; |
|
419 |
|
u32 img_type; |
|
420 |
|
u32 texel_mem_blk_fmt; |
|
421 |
|
struct vk_extent_3d_t extent; |
|
422 |
|
u32 mip_lvls_n; |
|
423 |
|
u32 array_layers_n; |
|
424 |
|
u32 samples_n; /* flags */ |
|
425 |
|
u32 img_tiling; |
|
426 |
|
u32 usage; |
|
427 |
|
u32 sharing_mode; |
|
428 |
|
u32 q_fams_n; |
|
429 |
|
u32 *q_fams; |
|
430 |
|
u32 initial_layout; |
|
431 |
|
}; |
|
432 |
|
|
|
433 |
|
struct vk_img_mem_rqmts_info_t { |
|
434 |
|
u32 type; |
|
435 |
|
void *next; |
|
436 |
|
void *img; |
|
437 |
|
}; |
|
438 |
|
|
|
439 |
|
struct vk_mem_rqmts_core_t { |
|
440 |
|
u64 sz; |
|
441 |
|
u64 alignment; |
|
442 |
|
/*idxs of bits are idxs in mem types of vk_phydev_mem_props_core_t */ |
|
443 |
|
u32 mem_type_bits; |
|
444 |
|
}; |
|
445 |
|
|
|
446 |
|
struct vk_mem_rqmts_t { |
|
447 |
|
u32 type; |
|
448 |
|
void *next; |
|
449 |
|
struct vk_mem_rqmts_core_t core; |
|
450 |
|
}; |
|
451 |
|
|
|
452 |
|
struct vk_mem_alloc_info_t { |
|
453 |
|
u32 type; |
|
454 |
|
void *next; |
|
455 |
|
u64 sz; |
|
456 |
|
u32 mem_type_idx; /* in the physical device array of memory types */ |
|
457 |
|
}; |
|
458 |
|
|
|
459 |
|
struct vk_bind_img_mem_info_t { |
|
460 |
|
u32 type; |
|
461 |
|
void *next; |
|
462 |
|
void *img; |
|
463 |
|
void *mem; |
|
464 |
|
u64 offset; |
|
465 |
|
}; |
|
466 |
|
|
|
467 |
|
enum { |
|
468 |
|
vk_pipeline_stage_top_of_pipe_bit = (1 << 0), |
|
469 |
|
vk_pipeline_stage_bottom_of_pipe_bit = (1 << 13), |
|
470 |
|
vk_pipeline_stage_enum_max = 0x7fffffff |
|
471 |
|
}; |
|
472 |
|
|
|
473 |
|
enum { |
|
474 |
|
vk_img_aspect_color_bit = 1, |
|
475 |
|
vk_img_aspect_enum_max = 0x7fffffff |
|
476 |
|
}; |
|
477 |
|
|
|
478 |
|
struct vk_img_subrsrc_range_t { |
|
479 |
|
u32 aspect; |
|
480 |
|
u32 base_mip_lvl; |
|
481 |
|
u32 lvls_n; |
|
482 |
|
u32 base_array_layer; |
|
483 |
|
u32 array_layers_n; |
|
484 |
|
}; |
|
485 |
|
|
|
486 |
|
struct vk_img_mem_barrier_t { |
|
487 |
|
u32 type; |
|
488 |
|
void *next; |
|
489 |
|
u32 src_access; |
|
490 |
|
u32 dst_access; |
|
491 |
|
u32 old_layout; |
|
492 |
|
u32 new_layout; |
|
493 |
|
u32 src_q_fam; |
|
494 |
|
u32 dst_q_fam; |
|
495 |
|
void *img; |
|
496 |
|
struct vk_img_subrsrc_range_t subrsrc_range; |
|
497 |
|
}; |
|
498 |
|
|
|
499 |
|
enum { |
|
500 |
|
vk_cmdbuf_lvl_primary = 0, |
|
501 |
|
vk_cmdbuf_lvl_enum_max = 0x7fffffff |
|
502 |
|
}; |
|
503 |
|
|
|
504 |
|
struct vk_cmdbuf_alloc_info_t { |
|
505 |
|
u32 type; |
|
506 |
|
void *next; |
|
507 |
|
void *cmdpool; |
|
508 |
|
u32 lvl; |
|
509 |
|
u32 cmdbufs_n; |
|
510 |
|
}; |
|
511 |
|
|
|
512 |
|
enum { |
|
513 |
|
vk_cmdbuf_usage_one_time_submit_bit = 0x00000001, |
|
514 |
|
vk_cmdbuf_usage_enum_max = 0x7fffffff |
|
515 |
|
}; |
|
516 |
|
|
|
517 |
|
struct vk_cmdbuf_begin_info_t { |
|
518 |
|
u32 type; |
|
519 |
|
void *next; |
|
520 |
|
u32 flags; |
|
521 |
|
void *do_not_use; |
|
522 |
|
}; |
|
523 |
|
|
|
524 |
|
struct vk_submit_info_t { |
|
525 |
|
u32 type; |
|
526 |
|
void *next; |
|
527 |
|
u32 wait_sems_n; |
|
528 |
|
void **wait_sems; |
|
529 |
|
u32* wait_dst_stages; |
|
530 |
|
u32 cmdbufs_n; |
|
531 |
|
void **cmdbufs; |
|
532 |
|
u32 signal_sems_n; |
|
533 |
|
void **signal_sems; |
|
534 |
|
}; |
|
535 |
|
|
|
536 |
|
struct vk_img_subrsrc_t { |
|
537 |
|
u32 aspect; |
|
538 |
|
u32 mip_lvl; |
|
539 |
|
u32 array_layer; |
|
540 |
|
}; |
|
541 |
|
|
|
542 |
|
struct vk_subrsrc_layout_t { |
|
543 |
|
u64 offset; |
|
544 |
|
u64 sz; |
|
545 |
|
u64 row_pitch; |
|
546 |
|
u64 array_pitch; |
|
547 |
|
u64 depth_pitch; |
|
548 |
|
}; |
|
549 |
|
|
|
550 |
|
struct vk_acquire_next_img_info_t { |
|
551 |
|
u32 type; |
|
552 |
|
void *next; |
|
553 |
|
void *swpchn; |
|
554 |
|
u64 timeout; |
|
555 |
|
void *sem; |
|
556 |
|
void *fence; |
|
557 |
|
u32 devs; |
|
558 |
|
}; |
|
559 |
|
|
|
560 |
|
struct vk_fence_create_info_t { |
|
561 |
|
u32 type; |
|
562 |
|
void *next; |
|
563 |
|
u32 flags; |
|
564 |
|
}; |
|
565 |
|
|
|
566 |
|
struct vk_img_subrsrc_layers_t { |
|
567 |
|
u32 aspect; |
|
568 |
|
u32 mip_lvl; |
|
569 |
|
u32 base_array_layer; |
|
570 |
|
u32 array_layers_n; |
|
571 |
|
}; |
|
572 |
|
|
|
573 |
|
struct vk_offset_3d_t { |
|
574 |
|
u32 x; |
|
575 |
|
u32 y; |
|
576 |
|
u32 z; |
|
577 |
|
}; |
|
578 |
|
|
|
579 |
|
struct vk_img_blit_t { |
|
580 |
|
struct vk_img_subrsrc_layers_t src_subrsrc; |
|
581 |
|
struct vk_offset_3d_t src_offsets[2]; |
|
582 |
|
struct vk_img_subrsrc_layers_t dst_subrsrc; |
|
583 |
|
struct vk_offset_3d_t dst_offsets[2]; |
|
584 |
|
}; |
|
585 |
|
|
|
586 |
|
struct vk_present_info_t { |
|
587 |
|
u32 type; |
|
588 |
|
void *next; |
|
589 |
|
u32 wait_sems_n; |
|
590 |
|
void **wait_sems; |
|
591 |
|
u32 swpchns_n; |
|
592 |
|
void **swpchns; |
|
593 |
|
u32 *idxs; |
|
594 |
|
s32 *results; |
|
595 |
|
}; |
|
596 |
|
|
|
597 |
|
struct vk_sem_create_info_t { |
|
598 |
|
u32 type; |
|
599 |
|
void *next; |
|
600 |
|
u32 flags; |
|
601 |
|
}; |
|
602 |
|
/******************************************************************************/ |
|
603 |
|
/* dev function pointers prototypes with some namespace/local keywords */ |
|
604 |
|
#define vk_get_dev_q(...) \ |
|
605 |
|
app_surf.dev.dl_vk_get_dev_q(app_surf.dev.vk,##__VA_ARGS__) |
|
606 |
|
|
|
607 |
|
#define vk_create_cmdpool(...) \ |
|
608 |
|
app_surf.dev.dl_vk_create_cmdpool(app_surf.dev.vk,##__VA_ARGS__) |
|
609 |
|
|
|
610 |
|
#define vk_create_swpchn(...) \ |
|
611 |
|
app_surf.dev.dl_vk_create_swpchn(app_surf.dev.vk,##__VA_ARGS__) |
|
612 |
|
|
|
613 |
|
#define vk_get_swpchn_imgs(...) \ |
|
614 |
|
app_surf.dev.dl_vk_get_swpchn_imgs(app_surf.dev.vk,##__VA_ARGS__) |
|
615 |
|
|
|
616 |
|
#define vk_create_img(...) \ |
|
617 |
|
app_surf.dev.dl_vk_create_img(app_surf.dev.vk,##__VA_ARGS__) |
|
618 |
|
|
|
619 |
|
#define vk_get_img_mem_rqmts(...) \ |
|
620 |
|
app_surf.dev.dl_vk_get_img_mem_rqmts(app_surf.dev.vk,##__VA_ARGS__) |
|
621 |
|
|
|
622 |
|
#define vk_alloc_mem(...) \ |
|
623 |
|
app_surf.dev.dl_vk_alloc_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
624 |
|
|
|
625 |
|
#define vk_bind_img_mem(...) \ |
|
626 |
|
app_surf.dev.dl_vk_bind_img_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
627 |
|
|
|
628 |
|
#define vk_map_mem(...) \ |
|
629 |
|
app_surf.dev.dl_vk_map_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
630 |
|
|
|
631 |
|
#define vk_alloc_cmdbufs(...) \ |
|
632 |
|
app_surf.dev.dl_vk_alloc_cmdbufs(app_surf.dev.vk,##__VA_ARGS__) |
|
633 |
|
|
|
634 |
|
#define vk_free_cmdbufs(...) \ |
|
635 |
|
app_surf.dev.dl_vk_free_cmdbufs(app_surf.dev.vk,##__VA_ARGS__) |
|
636 |
|
|
|
637 |
|
#define vk_begin_cmdbuf(...) \ |
|
638 |
|
app_surf.dev.dl_vk_begin_cmdbuf(__VA_ARGS__) |
|
639 |
|
|
|
640 |
|
#define vk_end_cmdbuf(...) \ |
|
641 |
|
app_surf.dev.dl_vk_end_cmdbuf(__VA_ARGS__) |
|
642 |
|
|
|
643 |
|
#define vk_cmd_pipeline_barrier(...) \ |
|
644 |
|
app_surf.dev.dl_vk_cmd_pipeline_barrier(__VA_ARGS__) |
|
645 |
|
|
|
646 |
|
#define vk_q_submit(...) \ |
|
647 |
|
app_surf.dev.dl_vk_q_submit(__VA_ARGS__) |
|
648 |
|
|
|
649 |
|
#define vk_q_wait_idle(...) \ |
|
650 |
|
app_surf.dev.dl_vk_q_wait_idle(__VA_ARGS__) |
|
651 |
|
|
|
652 |
|
#define vk_get_img_subrsrc_layout(...) \ |
|
653 |
|
app_surf.dev.dl_vk_get_img_subrsrc_layout(app_surf.dev.vk,##__VA_ARGS__) |
|
654 |
|
|
|
655 |
|
#define vk_acquire_next_img(...) \ |
|
656 |
|
app_surf.dev.dl_vk_acquire_next_img(app_surf.dev.vk,##__VA_ARGS__) |
|
657 |
|
|
|
658 |
|
#define vk_create_fence(...) \ |
|
659 |
|
app_surf.dev.dl_create_fence(app_surf.dev.vk,##__VA_ARGS__) |
|
660 |
|
|
|
661 |
|
#define vk_reset_cmdbuf(...) \ |
|
662 |
|
app_surf.dev.dl_vk_reset_cmdbuf(__VA_ARGS__) |
|
663 |
|
|
|
664 |
|
#define vk_cmd_blit_img(...) \ |
|
665 |
|
app_surf.dev.dl_vk_cmd_blit_img(__VA_ARGS__) |
|
666 |
|
|
|
667 |
|
#define vk_wait_for_fences(...) \ |
|
668 |
|
app_surf.dev.dl_vk_wait_for_fences(app_surf.dev.vk,##__VA_ARGS__) |
|
669 |
|
|
|
670 |
|
#define vk_reset_fences(...) \ |
|
671 |
|
app_surf.dev.dl_vk_reset_fences(app_surf.dev.vk,##__VA_ARGS__) |
|
672 |
|
|
|
673 |
|
#define vk_q_present(...) \ |
|
674 |
|
app_surf.dev.dl_vk_q_present(__VA_ARGS__) |
|
675 |
|
|
|
676 |
|
#define vk_create_sem(...) \ |
|
677 |
|
app_surf.dev.dl_vk_create_sem(app_surf.dev.vk,##__VA_ARGS__) |
|
678 |
|
/*----------------------------------------------------------------------------*/ |
|
679 |
|
#define VK_DEV_SYMS \ |
|
680 |
|
void (*dl_vk_get_dev_q)(void *dev, u32 fam, u32 q_idx, void **q); \ |
|
681 |
|
s32 (*dl_vk_create_cmdpool)( \ |
|
682 |
|
void *dev, \ |
|
683 |
|
struct vk_cmdpool_create_info_t *create_info, \ |
|
684 |
|
void *allocator, \ |
|
685 |
|
void **vk_cmdpool); \ |
|
686 |
|
s32 (*dl_vk_create_swpchn)( \ |
|
687 |
|
void *dev, \ |
|
688 |
|
struct vk_swpchn_create_info_t *info, \ |
|
689 |
|
void *allocator, \ |
|
690 |
|
void **swpchn); \ |
|
691 |
|
s32 (*dl_vk_get_swpchn_imgs)( \ |
|
692 |
|
void *dev, \ |
|
693 |
|
void *swpchn, \ |
|
694 |
|
u32 *imgs_n, \ |
|
695 |
|
void **imgs); \ |
|
696 |
|
s32 (*dl_vk_create_img)( \ |
|
697 |
|
void *dev, \ |
|
698 |
|
struct vk_img_create_info_t *info, \ |
|
699 |
|
void *allocator, \ |
|
700 |
|
void **img); \ |
|
701 |
|
s32 (*dl_vk_get_img_mem_rqmts)( \ |
|
702 |
|
void *dev, \ |
|
703 |
|
struct vk_img_mem_rqmts_info_t *info, \ |
|
704 |
|
struct vk_mem_rqmts_t *mem_rqmts); \ |
|
705 |
|
s32 (*dl_vk_alloc_mem)( \ |
|
706 |
|
void *dev, \ |
|
707 |
|
struct vk_mem_alloc_info_t *info, \ |
|
708 |
|
void *allocator, \ |
|
709 |
|
void **mem); \ |
|
710 |
|
s32 (*dl_vk_bind_img_mem)( \ |
|
711 |
|
void *dev, \ |
|
712 |
|
u32 infos_n, \ |
|
713 |
|
struct vk_bind_img_mem_info_t *infos); \ |
|
714 |
|
s32 (*dl_vk_map_mem)( \ |
|
715 |
|
void *dev, \ |
|
716 |
|
void *mem, \ |
|
717 |
|
u64 offset, \ |
|
718 |
|
u64 sz, \ |
|
719 |
|
u32 flags, \ |
|
720 |
|
void **data); \ |
|
721 |
|
s32 (*dl_vk_alloc_cmdbufs)( \ |
|
722 |
|
void *dev, \ |
|
723 |
|
struct vk_cmdbuf_alloc_info_t *info, \ |
|
724 |
|
void **cmdbufs); \ |
|
725 |
|
void (*dl_vk_free_cmdbufs)( \ |
|
726 |
|
void *dev, \ |
|
727 |
|
void *cmdpool, \ |
|
728 |
|
u32 cmdbufs_n, \ |
|
729 |
|
void **cmdbufs); \ |
|
730 |
|
s32 (*dl_vk_begin_cmdbuf)( \ |
|
731 |
|
void *cmdbuf, \ |
|
732 |
|
struct vk_cmdbuf_begin_info_t *info); \ |
|
733 |
|
s32 (*dl_vk_end_cmdbuf)(void *cmdbuf); \ |
|
734 |
|
void (*dl_vk_cmd_pipeline_barrier)( \ |
|
735 |
|
void *cmdbuf, \ |
|
736 |
|
u32 src_stage, \ |
|
737 |
|
u32 dst_stage, \ |
|
738 |
|
u32 dependency_flags, \ |
|
739 |
|
u32 mem_barriers_n, \ |
|
740 |
|
void *mem_barriers, \ |
|
741 |
|
u32 buf_mem_barriers_n, \ |
|
742 |
|
void *buf_mem_barriers, \ |
|
743 |
|
u32 img_mem_barriers_n, \ |
|
744 |
|
struct vk_img_mem_barrier_t *img_mem_barriers); \ |
|
745 |
|
s32 (*dl_vk_q_submit)( \ |
|
746 |
|
void *q, \ |
|
747 |
|
u32 submits_n, \ |
|
748 |
|
struct vk_submit_info_t *submits, \ |
|
749 |
|
void *fence); \ |
|
750 |
|
s32 (*dl_vk_q_wait_idle)(void *q); \ |
|
751 |
|
void (*dl_vk_get_img_subrsrc_layout)( \ |
|
752 |
|
void *dev, \ |
|
753 |
|
void *img, \ |
|
754 |
|
struct vk_img_subrsrc_t *subrsrc, \ |
|
755 |
|
struct vk_subrsrc_layout_t *layout); \ |
|
756 |
|
s32 (*dl_vk_acquire_next_img)( \ |
|
757 |
|
void *dev, \ |
|
758 |
|
struct vk_acquire_next_img_info_t *info, \ |
|
759 |
|
u32 *img_idx); \ |
|
760 |
|
s32 (*dl_vk_create_fence)( \ |
|
761 |
|
void *dev, \ |
|
762 |
|
struct vk_fence_create_info_t *info, \ |
|
763 |
|
void *allocator, \ |
|
764 |
|
void **fence); \ |
|
765 |
|
s32 (*dl_vk_reset_cmdbuf)( \ |
|
766 |
|
void *cmdbuf, \ |
|
767 |
|
u32 flags); \ |
|
768 |
|
void (*dl_vk_cmd_blit_img)( \ |
|
769 |
|
void *cmdbuf, \ |
|
770 |
|
void *src_img, \ |
|
771 |
|
u32 src_img_layout, \ |
|
772 |
|
void *dst_img, \ |
|
773 |
|
u32 dst_img_layout, \ |
|
774 |
|
u32 regions_n, \ |
|
775 |
|
struct vk_img_blit_t *regions, \ |
|
776 |
|
u32 filter); \ |
|
777 |
|
s32 (*dl_vk_wait_for_fences)( \ |
|
778 |
|
void *dev, \ |
|
779 |
|
u32 fences_n, \ |
|
780 |
|
void **fences, \ |
|
781 |
|
u32 wail_all, \ |
|
782 |
|
u64 timeout); \ |
|
783 |
|
s32 (*dl_vk_reset_fences)( \ |
|
784 |
|
void *dev, \ |
|
785 |
|
u32 fences_n, \ |
|
786 |
|
void **fences); \ |
|
787 |
|
s32 (*dl_vk_q_present)( \ |
|
788 |
|
void *q, \ |
|
789 |
|
struct vk_present_info_t *info); \ |
|
790 |
|
s32 (*dl_vk_create_sem)( \ |
|
791 |
|
void *dev, \ |
|
792 |
|
struct vk_sem_create_info_t *info, \ |
|
793 |
|
void *allocator, \ |
|
794 |
|
void **sem); |
|
795 |
|
#endif |