File 2d/blit/app.c renamed from 2d/kickstart/app.c (similarity 78%) (mode: 100644) (index 470f7a9..57844c8) |
4 |
4 |
* this is public domain without any warranties of any kind |
* this is public domain without any warranties of any kind |
5 |
5 |
* Sylvain BERTRAND |
* Sylvain BERTRAND |
6 |
6 |
*/ |
*/ |
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS */ |
|
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS, ALWAYS */ |
8 |
8 |
/* |
/* |
9 |
9 |
* XXX vk abstraction is much more complex than real hardware, aka the cost of |
* XXX vk abstraction is much more complex than real hardware, aka the cost of |
10 |
10 |
* most software generalisation/abstraction (and some hardware has lost its |
* most software generalisation/abstraction (and some hardware has lost its |
|
14 |
14 |
* fancy stuff above this "safe mode" must be validated by hardware vendors |
* fancy stuff above this "safe mode" must be validated by hardware vendors |
15 |
15 |
* then the user... or you literally walking on eggs. |
* then the user... or you literally walking on eggs. |
16 |
16 |
* |
* |
|
17 |
|
* The vulkan API is, here, simplified and taylored for the app using the C |
|
18 |
|
* preprocessor. In other words, when there is no ambiguity in the context of |
|
19 |
|
* this code, vulkan API function parameters get simplified out in C |
|
20 |
|
* preprocessor macros. |
|
21 |
|
* |
17 |
22 |
* XXX: this is a "One Compilation Unit" source code with preprocessor |
* XXX: this is a "One Compilation Unit" source code with preprocessor |
18 |
23 |
* namespace support. this allow the project to grow to very large and keep the |
* namespace support. this allow the project to grow to very large and keep the |
19 |
24 |
* global identifier space in check (= tradeoff). Each source file, *.h *.c, |
* global identifier space in check (= tradeoff). Each source file, *.h *.c, |
|
27 |
32 |
* - a swpchn can become out-of-date. for instance the win system did resz |
* - a swpchn can become out-of-date. for instance the win system did resz |
28 |
33 |
* the parent surf of the swpchn... if you did allow it to happen |
* the parent surf of the swpchn... if you did allow it to happen |
29 |
34 |
* asynchronously. |
* asynchronously. |
30 |
|
* here we choose that any of those evts will be fatal for simplicity. |
|
31 |
|
* for instance, if you choose to support the swpchn out-of-date state, |
|
32 |
|
* if your rendering state was too much "pre-configured" in advanced, you would |
|
33 |
|
* have to "re-pre-configure" everything... or you should drop |
|
34 |
|
* "pre-configuring" and program everything *again* for each swpchn img you |
|
35 |
|
* draw. |
|
|
35 |
|
* here we choose that any of those evts will be fatal for simplicity. for |
|
36 |
|
* instance, if you choose to support the swpchn out-of-date state, if your |
|
37 |
|
* rendering state was too much "pre-configured", you would have to |
|
38 |
|
* "re-pre-configure" everything... or you should drop "pre-configuring" and |
|
39 |
|
* program everything *again* for each swpchn img you draw. |
36 |
40 |
* |
* |
37 |
41 |
* display programing is demonstrated "officially" in khronos vk cube.c and |
* display programing is demonstrated "officially" in khronos vk cube.c and |
38 |
42 |
* there is a tutorial slide "1-Vulkan-Tutorial.pdf" (just abstract away c++ |
* there is a tutorial slide "1-Vulkan-Tutorial.pdf" (just abstract away c++ |
|
40 |
44 |
* different: |
* different: |
41 |
45 |
* - only 1 "main" synchronous loop |
* - only 1 "main" synchronous loop |
42 |
46 |
* - only xcb wsi. xcb is a client library on top of the x11 protocol. |
* - only xcb wsi. xcb is a client library on top of the x11 protocol. |
43 |
|
* (we know wayland should be added...) |
|
|
47 |
|
* we know wayland ("x12") should be added. |
44 |
48 |
* - dynamic loading of xcb. |
* - dynamic loading of xcb. |
45 |
49 |
* - no need of vk headers (using directly the ABI with custom headers). |
* - no need of vk headers (using directly the ABI with custom headers). |
46 |
50 |
* |
* |
|
56 |
60 |
* pay the cost of transfering from 1 q fam to another q fam: then think twice |
* pay the cost of transfering from 1 q fam to another q fam: then think twice |
57 |
61 |
* on how you want to spread your work load on the q fams. |
* on how you want to spread your work load on the q fams. |
58 |
62 |
* |
* |
59 |
|
* we did change the name of some vk objs to make some areas more explicit. |
|
60 |
|
* |
|
61 |
63 |
* for proper keyboard support, joypad way or/and text input way, read the |
* for proper keyboard support, joypad way or/and text input way, read the |
62 |
64 |
* included KEYBOARD file. here, since we use basic layout independent standard |
* included KEYBOARD file. here, since we use basic layout independent standard |
63 |
65 |
* keys, the x11 core keyboard protocol is fairly enough. |
* keys, the x11 core keyboard protocol is fairly enough. |
64 |
66 |
* |
* |
65 |
67 |
* TODO: use as less as possible device memory object, namely try to allocate |
* TODO: use as less as possible device memory object, namely try to allocate |
66 |
|
* one big chunk and manage alignment constraint ourself. |
|
|
68 |
|
* one big chunk and manage alignment constraint ourself. vk api does provide |
|
69 |
|
* a way to query for the memory alignment constraints. |
67 |
70 |
*/ |
*/ |
68 |
71 |
#include <stdlib.h> |
#include <stdlib.h> |
69 |
72 |
#include <string.h> |
#include <string.h> |
70 |
|
|
|
|
73 |
|
#include <xcb.h> |
71 |
74 |
#include "app_core_types.h" |
#include "app_core_types.h" |
72 |
|
#include "vk_types.h" |
|
|
75 |
|
#include "nyanvk/consts.h" |
|
76 |
|
#include "nyanvk/types.h" |
|
77 |
|
#include "vk_app.h" |
73 |
78 |
#include "app_state_types.h" |
#include "app_state_types.h" |
74 |
79 |
#include "log.h" |
#include "log.h" |
75 |
|
|
|
76 |
80 |
#include "vk_syms.c" |
#include "vk_syms.c" |
77 |
81 |
#include "app_state.c" |
#include "app_state.c" |
78 |
82 |
#include "xcb.c" |
#include "xcb.c" |
79 |
|
|
|
80 |
83 |
/*---------------------------------------------------------------------------*/ |
/*---------------------------------------------------------------------------*/ |
81 |
84 |
#include "namespace/app.c" |
#include "namespace/app.c" |
82 |
85 |
#include "namespace/vk_syms.c" |
#include "namespace/vk_syms.c" |
83 |
86 |
#include "namespace/app_state_types.h" |
#include "namespace/app_state_types.h" |
84 |
87 |
#include "namespace/app_state.c" |
#include "namespace/app_state.c" |
|
88 |
|
/*---------------------------------------------------------------------------*/ |
|
89 |
|
#define VK_FATAL(fmt, ...) \ |
|
90 |
|
if (r < 0) {\ |
|
91 |
|
LOG(fmt, ##__VA_ARGS__);\ |
|
92 |
|
exit(1);\ |
|
93 |
|
} |
85 |
94 |
|
|
|
95 |
|
#define FATAL(fmt, ...) \ |
|
96 |
|
{\ |
|
97 |
|
LOG(fmt, ##__VA_ARGS__);\ |
|
98 |
|
exit(1);\ |
|
99 |
|
} |
86 |
100 |
/* the phydev q fam selected */ |
/* the phydev q fam selected */ |
87 |
101 |
static void dev_create(void) |
static void dev_create(void) |
88 |
102 |
{ |
{ |
|
... |
... |
static void dev_create(void) |
112 |
126 |
info.q_create_infos = &q_info; |
info.q_create_infos = &q_info; |
113 |
127 |
info.enabled_exts_n = ARRAY_N(exts); |
info.enabled_exts_n = ARRAY_N(exts); |
114 |
128 |
info.enabled_ext_names = exts; |
info.enabled_ext_names = exts; |
115 |
|
|
|
116 |
|
r = vk_create_dev(surf_g.dev.phydev.vk, &info, 0, &surf_g.dev.vk); |
|
117 |
|
if (r < 0) { |
|
118 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:unable to create a vulkan device\n", r, surf_g.dev.phydev.vk); |
|
119 |
|
exit(1); |
|
120 |
|
} |
|
|
129 |
|
vk_create_dev(&info); |
|
130 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:unable to create a vulkan device\n", r, surf_g.dev.phydev.vk) |
121 |
131 |
LOG("0:MAIN:physical device:%p:vulkan device created with one proper queue:%p\n", surf_g.dev.phydev.vk, surf_g.dev.vk); |
LOG("0:MAIN:physical device:%p:vulkan device created with one proper queue:%p\n", surf_g.dev.phydev.vk, surf_g.dev.vk); |
122 |
132 |
} |
} |
123 |
133 |
|
|
|
... |
... |
static void instance_create(void) |
144 |
154 |
LOG("0:MAIN:will use vulkan instance_g extension %s\n", exts[i]); |
LOG("0:MAIN:will use vulkan instance_g extension %s\n", exts[i]); |
145 |
155 |
++i; |
++i; |
146 |
156 |
} |
} |
147 |
|
|
|
148 |
157 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
149 |
|
|
|
150 |
158 |
info.type = vk_struct_type_instance_create_info; |
info.type = vk_struct_type_instance_create_info; |
151 |
159 |
info.enabled_exts_n = ARRAY_N(exts); |
info.enabled_exts_n = ARRAY_N(exts); |
152 |
160 |
info.enabled_ext_names = exts; |
info.enabled_ext_names = exts; |
153 |
|
r = vk_create_instance(&info, 0, &instance_g); |
|
154 |
|
if (r < 0) { |
|
155 |
|
LOG("0:MAIN:FATAL:%d:unable to create a vulkan instance_g\n", r); |
|
156 |
|
exit(1); |
|
157 |
|
} |
|
|
161 |
|
vk_create_instance(&info); |
|
162 |
|
VK_FATAL("0:MAIN:FATAL:%d:unable to create a vulkan instance_g\n", r) |
158 |
163 |
LOG("0:MAIN:vulkan instance_g handle %p\n", instance_g); |
LOG("0:MAIN:vulkan instance_g handle %p\n", instance_g); |
159 |
164 |
} |
} |
160 |
165 |
|
|
|
... |
... |
static void instance_exts_dump(void) |
167 |
172 |
s32 r; |
s32 r; |
168 |
173 |
|
|
169 |
174 |
memset(exts, 0, sizeof(exts)); |
memset(exts, 0, sizeof(exts)); |
170 |
|
|
|
171 |
175 |
n = EXTS_N_MAX; |
n = EXTS_N_MAX; |
172 |
|
|
|
173 |
|
r = vk_enumerate_instance_ext_props(0, &n, exts); |
|
174 |
|
|
|
|
176 |
|
vk_enumerate_instance_ext_props(&n, exts); |
175 |
177 |
if (r != vk_success && r != vk_incomplete) { |
if (r != vk_success && r != vk_incomplete) { |
176 |
178 |
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g extension(s)\n", r); |
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g extension(s)\n", r); |
177 |
179 |
return; |
return; |
178 |
180 |
} |
} |
179 |
|
|
|
180 |
181 |
if (r == vk_incomplete) { |
if (r == vk_incomplete) { |
181 |
182 |
LOG("0:MAIN:ERROR:too many extensions (%u/%u), dumping disabled", n, EXTS_N_MAX); |
LOG("0:MAIN:ERROR:too many extensions (%u/%u), dumping disabled", n, EXTS_N_MAX); |
182 |
183 |
return; |
return; |
183 |
184 |
} |
} |
184 |
|
|
|
185 |
185 |
/* vk_success */ |
/* vk_success */ |
186 |
|
|
|
187 |
186 |
LOG("0:MAIN:have %u instance_g extension(s)\n", n); |
LOG("0:MAIN:have %u instance_g extension(s)\n", n); |
188 |
|
|
|
189 |
187 |
loop { |
loop { |
190 |
188 |
if (n == 0) |
if (n == 0) |
191 |
189 |
break; |
break; |
|
... |
... |
static void instance_layers_dump(void) |
204 |
202 |
s32 r; |
s32 r; |
205 |
203 |
|
|
206 |
204 |
memset(layers, 0, sizeof(layers)); |
memset(layers, 0, sizeof(layers)); |
207 |
|
|
|
208 |
205 |
n = LAYERS_N_MAX; |
n = LAYERS_N_MAX; |
209 |
|
|
|
210 |
|
r = vk_enumerate_instance_layer_props(&n, layers); |
|
211 |
|
|
|
|
206 |
|
vk_enumerate_instance_layer_props(&n, layers); |
212 |
207 |
if (r != vk_success && r != vk_incomplete) { |
if (r != vk_success && r != vk_incomplete) { |
213 |
208 |
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g layer(s)\n", r); |
LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g layer(s)\n", r); |
214 |
209 |
return; |
return; |
215 |
210 |
} |
} |
216 |
|
|
|
217 |
211 |
if (r == vk_incomplete) { |
if (r == vk_incomplete) { |
218 |
212 |
LOG("0:MAIN:ERROR:too many layers (%u/%u), dumping disabled", n, LAYERS_N_MAX); |
LOG("0:MAIN:ERROR:too many layers (%u/%u), dumping disabled", n, LAYERS_N_MAX); |
219 |
213 |
return; |
return; |
220 |
214 |
} |
} |
221 |
|
|
|
222 |
215 |
/* vk_success */ |
/* vk_success */ |
223 |
|
|
|
224 |
216 |
LOG("0:MAIN:have %u instance_g layer(s)\n", n); |
LOG("0:MAIN:have %u instance_g layer(s)\n", n); |
225 |
|
|
|
226 |
217 |
loop { |
loop { |
227 |
218 |
if (n == 0) |
if (n == 0) |
228 |
219 |
break; |
break; |
|
... |
... |
static void tmp_phydevs_get(void) |
239 |
230 |
s32 r; |
s32 r; |
240 |
231 |
|
|
241 |
232 |
memset(phydevs, 0, sizeof(phydevs)); |
memset(phydevs, 0, sizeof(phydevs)); |
242 |
|
|
|
243 |
233 |
n = tmp_phydevs_n_max; |
n = tmp_phydevs_n_max; |
244 |
|
|
|
245 |
|
r = vk_enumerate_phydevs(instance_g, &n, phydevs); |
|
246 |
|
|
|
247 |
|
if (r != vk_success && r != vk_incomplete) { |
|
248 |
|
LOG("0:MAIN:FATAL:%ld:unable to enumerate physical devices\n",r); |
|
249 |
|
exit(1); |
|
250 |
|
} |
|
251 |
|
|
|
252 |
|
if (r == vk_incomplete) { |
|
253 |
|
LOG("0:MAIN:FATAL:too many vulkan physical devices %u/%u for our temporary storage\n", n, tmp_phydevs_n_max); |
|
254 |
|
exit(1); |
|
255 |
|
} |
|
256 |
|
|
|
|
234 |
|
vk_enumerate_phydevs(&n, phydevs); |
|
235 |
|
if (r != vk_success && r != vk_incomplete) |
|
236 |
|
FATAL("0:MAIN:FATAL:%ld:unable to enumerate physical devices\n",r) |
|
237 |
|
if (r == vk_incomplete) |
|
238 |
|
FATAL("0:MAIN:FATAL:too many vulkan physical devices %u/%u for our temporary storage\n", n, tmp_phydevs_n_max) |
257 |
239 |
/* vk_success */ |
/* vk_success */ |
258 |
|
|
|
259 |
240 |
LOG("0:MAIN:detected %u physical devices\n", n); |
LOG("0:MAIN:detected %u physical devices\n", n); |
260 |
|
if (n == 0) { |
|
261 |
|
LOG("0:MAIN:no vulkan physical devices, exiting\n"); |
|
262 |
|
exit(1); |
|
263 |
|
} |
|
264 |
|
|
|
|
241 |
|
if (n == 0) |
|
242 |
|
FATAL("0:MAIN:no vulkan physical devices, exiting\n") |
265 |
243 |
tmp_phydevs_n_g = n; |
tmp_phydevs_n_g = n; |
266 |
244 |
memset(tmp_phydevs_g, 0, sizeof(tmp_phydevs_g)); |
memset(tmp_phydevs_g, 0, sizeof(tmp_phydevs_g)); |
267 |
|
|
|
268 |
245 |
n = 0; |
n = 0; |
269 |
246 |
loop { |
loop { |
270 |
247 |
if (n == tmp_phydevs_n_g) |
if (n == tmp_phydevs_n_g) |
|
... |
... |
static void phydev_exts_dump(void *phydev) |
282 |
259 |
s32 r; |
s32 r; |
283 |
260 |
|
|
284 |
261 |
memset(exts, 0, sizeof(exts)); |
memset(exts, 0, sizeof(exts)); |
285 |
|
|
|
286 |
262 |
n = EXTS_N_MAX; |
n = EXTS_N_MAX; |
287 |
|
|
|
288 |
|
r = vk_enumerate_dev_ext_props(phydev, 0, &n, exts); |
|
289 |
|
|
|
|
263 |
|
vk_enumerate_dev_ext_props(phydev, &n, exts); |
290 |
264 |
if (r != vk_success && r != vk_incomplete) { |
if (r != vk_success && r != vk_incomplete) { |
291 |
265 |
LOG("0:MAIN:ERROR:physical device:%p:%d:unable to enumerate device extension(s)\n", phydev, r); |
LOG("0:MAIN:ERROR:physical device:%p:%d:unable to enumerate device extension(s)\n", phydev, r); |
292 |
266 |
return; |
return; |
293 |
267 |
} |
} |
294 |
|
|
|
295 |
268 |
if (r == vk_incomplete) { |
if (r == vk_incomplete) { |
296 |
269 |
LOG("0:MAIN:ERROR:physical device:%p:too many extensions (%u/%u), dumping disabled", phydev, n, EXTS_N_MAX); |
LOG("0:MAIN:ERROR:physical device:%p:too many extensions (%u/%u), dumping disabled", phydev, n, EXTS_N_MAX); |
297 |
270 |
return; |
return; |
298 |
271 |
} |
} |
299 |
|
|
|
300 |
272 |
/* vk_success */ |
/* vk_success */ |
301 |
|
|
|
302 |
273 |
LOG("0:MAIN:physical device:%p:have %u device extension(s)\n", phydev, n); |
LOG("0:MAIN:physical device:%p:have %u device extension(s)\n", phydev, n); |
303 |
|
|
|
304 |
274 |
loop { |
loop { |
305 |
275 |
if (n == 0) |
if (n == 0) |
306 |
276 |
break; |
break; |
|
... |
... |
static u8 *uuid_str(u8 *uuid) |
353 |
323 |
break; |
break; |
354 |
324 |
/* XXX: always write a terminating 0, truncated or not */ |
/* XXX: always write a terminating 0, truncated or not */ |
355 |
325 |
snprintf(uuid_str + i * 2, 3, "%02x", uuid[i]); |
snprintf(uuid_str + i * 2, 3, "%02x", uuid[i]); |
356 |
|
|
|
357 |
326 |
++i; |
++i; |
358 |
327 |
} |
} |
359 |
328 |
return uuid_str; |
return uuid_str; |
|
... |
... |
static void tmp_phydevs_props_dump(void) |
370 |
339 |
|
|
371 |
340 |
if (i == tmp_phydevs_n_g) |
if (i == tmp_phydevs_n_g) |
372 |
341 |
break; |
break; |
373 |
|
|
|
374 |
342 |
p = &tmp_phydevs_g[i]; |
p = &tmp_phydevs_g[i]; |
375 |
|
|
|
376 |
343 |
memset(&props, 0, sizeof(props)); |
memset(&props, 0, sizeof(props)); |
377 |
344 |
props.type = vk_struct_type_phydev_props; |
props.type = vk_struct_type_phydev_props; |
378 |
|
|
|
379 |
345 |
vk_get_phydev_props(p->vk, &props); |
vk_get_phydev_props(p->vk, &props); |
380 |
|
|
|
381 |
346 |
LOG("0:MAIN:physical device:%p:properties:api version=%#x=%u.%u.%u\n", p->vk, props.core.api_version, VK_VERSION_MAJOR(props.core.api_version), VK_VERSION_MINOR(props.core.api_version), VK_VERSION_PATCH(props.core.api_version)); |
LOG("0:MAIN:physical device:%p:properties:api version=%#x=%u.%u.%u\n", p->vk, props.core.api_version, VK_VERSION_MAJOR(props.core.api_version), VK_VERSION_MINOR(props.core.api_version), VK_VERSION_PATCH(props.core.api_version)); |
382 |
347 |
LOG("0:MAIN:physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p->vk, props.core.driver_version, VK_VERSION_MAJOR(props.core.driver_version), VK_VERSION_MINOR(props.core.driver_version), VK_VERSION_PATCH(props.core.driver_version)); |
LOG("0:MAIN:physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p->vk, props.core.driver_version, VK_VERSION_MAJOR(props.core.driver_version), VK_VERSION_MINOR(props.core.driver_version), VK_VERSION_PATCH(props.core.driver_version)); |
383 |
348 |
LOG("0:MAIN:physical device:%p:properties:vendor id=%#x\n", p->vk, props.core.vendor_id); |
LOG("0:MAIN:physical device:%p:properties:vendor id=%#x\n", p->vk, props.core.vendor_id); |
|
... |
... |
static void tmp_phydevs_props_dump(void) |
388 |
353 |
else |
else |
389 |
354 |
p->is_discret_gpu = false; |
p->is_discret_gpu = false; |
390 |
355 |
LOG("0:MAIN:physical device:%p:properties:name=%s\n", p->vk, props.core.name); |
LOG("0:MAIN:physical device:%p:properties:name=%s\n", p->vk, props.core.name); |
391 |
|
LOG("0:MAIN:physical device:%p:properties:pipeline cache uuid=%s\n", p->vk, uuid_str(props.core.pipeline_cache_uuid)); |
|
|
356 |
|
LOG("0:MAIN:physical device:%p:properties:pipeline cache uuid=%s\n", p->vk, uuid_str(props.core.pl_cache_uuid)); |
392 |
357 |
/* display the limits and sparse props at log level 1, if needed */ |
/* display the limits and sparse props at log level 1, if needed */ |
393 |
358 |
++i; |
++i; |
394 |
359 |
} |
} |
|
... |
... |
static void tmp_phydev_q_fams_get(struct tmp_phydev_t *p) |
401 |
366 |
|
|
402 |
367 |
n = 0; |
n = 0; |
403 |
368 |
vk_get_phydev_q_fam_props(p->vk, &n, 0); |
vk_get_phydev_q_fam_props(p->vk, &n, 0); |
404 |
|
if (n > tmp_phydev_q_fams_n_max) { |
|
405 |
|
LOG("0:MAIN:FATAL:physical device:%p:too many queue families %u/%u\n", p->vk, n, tmp_phydev_q_fams_n_max); |
|
406 |
|
exit(1); |
|
407 |
|
} |
|
408 |
|
|
|
|
369 |
|
if (n > tmp_phydev_q_fams_n_max) |
|
370 |
|
FATAL("0:MAIN:FATAL:physical device:%p:too many queue families %u/%u\n", p->vk, n, tmp_phydev_q_fams_n_max) |
409 |
371 |
memset(p->q_fams, 0, sizeof(p->q_fams)); |
memset(p->q_fams, 0, sizeof(p->q_fams)); |
410 |
372 |
i = 0; |
i = 0; |
411 |
373 |
loop { |
loop { |
|
... |
... |
static void tmp_phydev_q_fams_get(struct tmp_phydev_t *p) |
414 |
376 |
p->q_fams[i].type = vk_struct_type_q_fam_props; |
p->q_fams[i].type = vk_struct_type_q_fam_props; |
415 |
377 |
++i; |
++i; |
416 |
378 |
} |
} |
417 |
|
|
|
418 |
379 |
vk_get_phydev_q_fam_props(p->vk, &n, p->q_fams); |
vk_get_phydev_q_fam_props(p->vk, &n, p->q_fams); |
419 |
380 |
p->q_fams_n = n; |
p->q_fams_n = n; |
420 |
381 |
LOG("0:MAIN:physical device:%p:have %u queue families\n", p->vk, p->q_fams_n); |
LOG("0:MAIN:physical device:%p:have %u queue families\n", p->vk, p->q_fams_n); |
|
... |
... |
static void tmp_phydev_q_fams_dump(struct tmp_phydev_t *p) |
451 |
412 |
LOG("0:MAIN:physical device:%p:queue family:%u:flags:sparse binding\n", p->vk, i); |
LOG("0:MAIN:physical device:%p:queue family:%u:flags:sparse binding\n", p->vk, i); |
452 |
413 |
if ((p->q_fams[i].core.flags & vk_q_protected_bit) != 0) |
if ((p->q_fams[i].core.flags & vk_q_protected_bit) != 0) |
453 |
414 |
LOG("0:MAIN:physical device:%p:queue family:%u:flags:protected\n", p->vk, i); |
LOG("0:MAIN:physical device:%p:queue family:%u:flags:protected\n", p->vk, i); |
454 |
|
|
|
455 |
|
|
|
456 |
415 |
LOG("0:MAIN:physical device:%p:queue family:%u:%u queues\n", p->vk, i, p->q_fams[i].core.qs_n); |
LOG("0:MAIN:physical device:%p:queue family:%u:%u queues\n", p->vk, i, p->q_fams[i].core.qs_n); |
457 |
416 |
LOG("0:MAIN:physical device:%p:queue family:%u:%u bits timestamps\n", p->vk, i, p->q_fams[i].core.timestamp_valid_bits); |
LOG("0:MAIN:physical device:%p:queue family:%u:%u bits timestamps\n", p->vk, i, p->q_fams[i].core.timestamp_valid_bits); |
458 |
|
|
|
459 |
417 |
LOG("0:MAIN:physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p->vk, i, p->q_fams[i].core.min_img_transfer_granularity.width, p->q_fams[i].core.min_img_transfer_granularity.height, p->q_fams[i].core.min_img_transfer_granularity.depth); |
LOG("0:MAIN:physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p->vk, i, p->q_fams[i].core.min_img_transfer_granularity.width, p->q_fams[i].core.min_img_transfer_granularity.height, p->q_fams[i].core.min_img_transfer_granularity.depth); |
460 |
418 |
++i; |
++i; |
461 |
419 |
} |
} |
462 |
420 |
} |
} |
463 |
421 |
|
|
464 |
|
static void cmdpools_create(void) |
|
|
422 |
|
static void cp_create(void) |
465 |
423 |
{ |
{ |
466 |
424 |
s32 r; |
s32 r; |
467 |
|
struct vk_cmdpool_create_info_t info; |
|
|
425 |
|
struct vk_cp_create_info_t info; |
468 |
426 |
|
|
469 |
427 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
470 |
|
info.type = vk_struct_type_cmdpool_create_info; |
|
471 |
|
info.flags = vk_cmdpool_create_reset_cmdbuf_bit; |
|
|
428 |
|
info.type = vk_struct_type_cp_create_info; |
|
429 |
|
info.flags = vk_cp_create_reset_cb_bit; |
472 |
430 |
info.q_fam = surf_g.dev.phydev.q_fam; |
info.q_fam = surf_g.dev.phydev.q_fam; |
473 |
|
|
|
474 |
|
r = vk_create_cmdpool(&info, 0, &surf_g.dev.cmdpool); |
|
475 |
|
if (r < 0) { |
|
476 |
|
LOG("0:MAIN:FATAL:%d:unable create the commmand pool\n", r); |
|
477 |
|
exit(1); |
|
478 |
|
} |
|
479 |
|
LOG("0:MAIN:device:%p:queue family:%u:created command pool %p\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam, surf_g.dev.cmdpool); |
|
|
431 |
|
vk_create_cp(&info); |
|
432 |
|
VK_FATAL("0:MAIN:FATAL:%d:unable create the commmand pool\n", r) |
|
433 |
|
LOG("0:MAIN:device:%p:queue family:%u:created command pool %p\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam, surf_g.dev.cp); |
480 |
434 |
} |
} |
481 |
435 |
|
|
482 |
436 |
static void tmp_phydevs_q_fams_dump(void) |
static void tmp_phydevs_q_fams_dump(void) |
|
... |
... |
static void tmp_phydevs_q_fams_dump(void) |
495 |
449 |
static void q_get(void) |
static void q_get(void) |
496 |
450 |
{ |
{ |
497 |
451 |
LOG("0:MAIN:device:%p:getting queue:family=%u queue=0\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam); |
LOG("0:MAIN:device:%p:getting queue:family=%u queue=0\n", surf_g.dev.vk, surf_g.dev.phydev.q_fam); |
498 |
|
vk_get_dev_q(surf_g.dev.phydev.q_fam, 0, &surf_g.dev.q); |
|
|
452 |
|
vk_get_dev_q(); |
499 |
453 |
LOG("0:MAIN:device:%p:got queue:%p\n", surf_g.dev.vk, surf_g.dev.q); |
LOG("0:MAIN:device:%p:got queue:%p\n", surf_g.dev.vk, surf_g.dev.q); |
500 |
454 |
} |
} |
501 |
455 |
|
|
|
... |
... |
static void check_vk_version(void) |
504 |
458 |
u32 api_version; |
u32 api_version; |
505 |
459 |
s32 r; |
s32 r; |
506 |
460 |
|
|
507 |
|
r = vk_enumerate_instance_version(&api_version); |
|
508 |
|
if (r != vk_success) { |
|
509 |
|
LOG("0:MAIN:FATAL:%d:unable to enumerate instance_g version\n", r); |
|
510 |
|
exit(1); |
|
511 |
|
} |
|
|
461 |
|
vk_enumerate_instance_version(&api_version); |
|
462 |
|
if (r != vk_success) |
|
463 |
|
FATAL("0:MAIN:FATAL:%d:unable to enumerate instance_g version\n", r) |
512 |
464 |
LOG("0:MAIN:vulkan instance_g version %#x = %u.%u.%u\n", api_version, VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), VK_VERSION_PATCH(api_version)); |
LOG("0:MAIN:vulkan instance_g version %#x = %u.%u.%u\n", api_version, VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), VK_VERSION_PATCH(api_version)); |
513 |
465 |
if (VK_VERSION_MAJOR(api_version) == 1 |
if (VK_VERSION_MAJOR(api_version) == 1 |
514 |
|
&& VK_VERSION_MINOR(api_version) == 0) { |
|
515 |
|
LOG("0:MAIN:FATAL:vulkan instance_g version too old\n"); |
|
516 |
|
exit(1); |
|
517 |
|
} |
|
|
466 |
|
&& VK_VERSION_MINOR(api_version) == 0) |
|
467 |
|
FATAL("0:MAIN:FATAL:vulkan instance_g version too old\n") |
518 |
468 |
} |
} |
519 |
469 |
/* |
/* |
520 |
470 |
* the major obj to use in vk abstraction of gfx hardware is the q. In this |
* the major obj to use in vk abstraction of gfx hardware is the q. In this |
|
... |
... |
static void tmp_phydevs_q_fams_surf_support_get(void) |
534 |
484 |
|
|
535 |
485 |
if (i == tmp_phydevs_n_g) |
if (i == tmp_phydevs_n_g) |
536 |
486 |
break; |
break; |
537 |
|
|
|
538 |
487 |
p = &tmp_phydevs_g[i]; |
p = &tmp_phydevs_g[i]; |
539 |
|
|
|
540 |
488 |
j = 0; |
j = 0; |
541 |
489 |
loop { |
loop { |
542 |
490 |
s32 r; |
s32 r; |
|
... |
... |
static void tmp_phydevs_q_fams_surf_support_get(void) |
544 |
492 |
|
|
545 |
493 |
if (j == p->q_fams_n) |
if (j == p->q_fams_n) |
546 |
494 |
break; |
break; |
547 |
|
|
|
548 |
495 |
supported = vk_false; |
supported = vk_false; |
549 |
|
r = vk_get_phydev_surf_support(p->vk, j, surf_g.vk, |
|
550 |
|
&supported); |
|
551 |
|
if (r < 0) { |
|
552 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r, p->vk, j, surf_g.vk); |
|
553 |
|
exit(1); |
|
554 |
|
} |
|
555 |
|
|
|
|
496 |
|
vk_get_phydev_surf_support(p->vk, j, &supported); |
|
497 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r, p->vk, j, surf_g.vk) |
556 |
498 |
if (supported == vk_true) { |
if (supported == vk_true) { |
557 |
499 |
LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p->vk, j, surf_g.vk); |
LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p->vk, j, surf_g.vk); |
558 |
500 |
p->q_fams_surf_support[j] = true; |
p->q_fams_surf_support[j] = true; |
|
... |
... |
static void tmp_selected_phydev_cherry_pick(u8 i) |
571 |
513 |
struct tmp_phydev_t *p; |
struct tmp_phydev_t *p; |
572 |
514 |
|
|
573 |
515 |
p = &tmp_phydevs_g[i]; |
p = &tmp_phydevs_g[i]; |
574 |
|
|
|
575 |
516 |
surf_g.dev.phydev.vk = p->vk; |
surf_g.dev.phydev.vk = p->vk; |
576 |
517 |
surf_g.dev.phydev.is_discret_gpu = p->is_discret_gpu; |
surf_g.dev.phydev.is_discret_gpu = p->is_discret_gpu; |
577 |
518 |
surf_g.dev.phydev.mem_types_n = p->mem_props.core.mem_types_n; |
surf_g.dev.phydev.mem_types_n = p->mem_props.core.mem_types_n; |
|
... |
... |
static void tmp_phydev_and_q_fam_select(void) |
596 |
537 |
|
|
597 |
538 |
if (i == tmp_phydevs_n_g) |
if (i == tmp_phydevs_n_g) |
598 |
539 |
break; |
break; |
599 |
|
|
|
600 |
540 |
p = &tmp_phydevs_g[i]; |
p = &tmp_phydevs_g[i]; |
601 |
|
|
|
602 |
541 |
j = 0; |
j = 0; |
603 |
542 |
loop { |
loop { |
604 |
543 |
if (j == p->q_fams_n) |
if (j == p->q_fams_n) |
|
... |
... |
static void surf_create(void) |
633 |
572 |
s32 r; |
s32 r; |
634 |
573 |
|
|
635 |
574 |
memset(&surf_g, 0, sizeof(surf_g)); |
memset(&surf_g, 0, sizeof(surf_g)); |
636 |
|
|
|
637 |
575 |
memset(&xcb_info, 0, sizeof(xcb_info)); |
memset(&xcb_info, 0, sizeof(xcb_info)); |
638 |
576 |
xcb_info.type = vk_struct_type_xcb_surf_create_info; |
xcb_info.type = vk_struct_type_xcb_surf_create_info; |
639 |
577 |
xcb_info.c = app_xcb.c; |
xcb_info.c = app_xcb.c; |
640 |
578 |
xcb_info.win = app_xcb.win_id; |
xcb_info.win = app_xcb.win_id; |
641 |
|
|
|
642 |
|
r = vk_create_xcb_surf(instance_g, &xcb_info, 0, &surf_g.vk); |
|
643 |
|
if (r < 0) {/* ok because this enum is forded to a signed 32 bits */ |
|
644 |
|
LOG("0:MAIN:FATAL:%d:xcb:%s:screen:%d:root window id:%#x:window id:%#x:unable to create a vulkan surface from this x11 window\n", r, app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id); |
|
645 |
|
exit(1); |
|
646 |
|
} |
|
|
579 |
|
vk_create_xcb_surf(&xcb_info); |
|
580 |
|
VK_FATAL("0:MAIN:FATAL:%d:xcb:%s:screen:%d:root window id:%#x:window id:%#x:unable to create a vulkan surface from this x11 window\n", r, app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id) |
647 |
581 |
LOG("0:MAIN:xcb:'%s':screen:%d:root window id:%#x:window id:%#x:created vk_surface=%p\n", app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id, surf_g.vk); |
LOG("0:MAIN:xcb:'%s':screen:%d:root window id:%#x:window id:%#x:created vk_surface=%p\n", app_xcb.disp_env, app_xcb.scr_idx, app_xcb.scr->root, app_xcb.win_id, surf_g.vk); |
648 |
582 |
} |
} |
649 |
583 |
|
|
|
... |
... |
static void texel_mem_blk_conf_select(void) |
678 |
612 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
679 |
613 |
info.type = vk_struct_type_phydev_surf_info; |
info.type = vk_struct_type_phydev_surf_info; |
680 |
614 |
info.surf = surf_g.vk; |
info.surf = surf_g.vk; |
681 |
|
|
|
682 |
|
r = vk_get_phydev_surf_texel_mem_blk_confs(surf_g.dev.phydev.vk, &info, |
|
683 |
|
&confs_n, 0); |
|
684 |
|
if (r < 0) { |
|
685 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
686 |
|
exit(1); |
|
687 |
|
} |
|
688 |
|
|
|
689 |
|
if (confs_n > CONFS_N_MAX) { |
|
690 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_g.dev.phydev.vk, surf_g.vk, confs_n, CONFS_N_MAX); |
|
691 |
|
exit(1); |
|
692 |
|
} |
|
|
615 |
|
vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, 0); |
|
616 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk) |
|
617 |
|
if (confs_n > CONFS_N_MAX) |
|
618 |
|
FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_g.dev.phydev.vk, surf_g.vk, confs_n, CONFS_N_MAX) |
693 |
619 |
|
|
694 |
620 |
memset(confs, 0, sizeof(confs[0]) * confs_n); |
memset(confs, 0, sizeof(confs[0]) * confs_n); |
695 |
621 |
i = 0; |
i = 0; |
|
... |
... |
static void texel_mem_blk_conf_select(void) |
699 |
625 |
confs[i].type = vk_struct_type_surf_texel_mem_blk_conf; |
confs[i].type = vk_struct_type_surf_texel_mem_blk_conf; |
700 |
626 |
++i; |
++i; |
701 |
627 |
} |
} |
702 |
|
|
|
703 |
|
r = vk_get_phydev_surf_texel_mem_blk_confs(surf_g.dev.phydev.vk, &info, |
|
704 |
|
&confs_n, confs); |
|
705 |
|
if (r < 0) { |
|
706 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
707 |
|
exit(1); |
|
708 |
|
} |
|
709 |
|
|
|
710 |
|
if (confs_n == 0) { |
|
711 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_g.dev.phydev.vk, surf_g.vk); |
|
712 |
|
exit(1); |
|
713 |
|
} |
|
714 |
|
|
|
|
628 |
|
vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, confs); |
|
629 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r, surf_g.dev.phydev.vk, surf_g.vk) |
|
630 |
|
if (confs_n == 0) |
|
631 |
|
FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_g.dev.phydev.vk, surf_g.vk) |
715 |
632 |
texel_mem_blk_confs_dump(confs_n, confs); |
texel_mem_blk_confs_dump(confs_n, confs); |
716 |
633 |
|
|
717 |
634 |
cc = &surf_g.dev.phydev.selected_texel_mem_blk_conf_core; |
cc = &surf_g.dev.phydev.selected_texel_mem_blk_conf_core; |
718 |
|
|
|
719 |
635 |
if ((confs_n == 1) && (confs[0].core.fmt |
if ((confs_n == 1) && (confs[0].core.fmt |
720 |
636 |
== vk_texel_mem_blk_fmt_undefined)) { |
== vk_texel_mem_blk_fmt_undefined)) { |
721 |
637 |
/* this means the dev let us choose our the fmt */ |
/* this means the dev let us choose our the fmt */ |
722 |
638 |
cc->fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb; |
cc->fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb; |
723 |
639 |
LOG("0:MAIN:physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->fmt); |
LOG("0:MAIN:physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->fmt); |
724 |
|
|
|
725 |
640 |
cc->color_space = vk_color_space_srgb_nonlinear; |
cc->color_space = vk_color_space_srgb_nonlinear; |
726 |
641 |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
727 |
642 |
} else { |
} else { |
|
... |
... |
static void texel_mem_blk_conf_select(void) |
729 |
644 |
surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt = |
surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt = |
730 |
645 |
confs[0].core.fmt; |
confs[0].core.fmt; |
731 |
646 |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt); |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block format %u\n", surf_g.dev.phydev.vk, surf_g.vk, surf_g.dev.phydev.selected_texel_mem_blk_conf_core.fmt); |
732 |
|
|
|
733 |
647 |
cc->color_space = confs[0].core.color_space; |
cc->color_space = confs[0].core.color_space; |
734 |
648 |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g.dev.phydev.vk, surf_g.vk, cc->color_space); |
735 |
649 |
} |
} |
|
... |
... |
static void tmp_phydev_mem_props_get(struct tmp_phydev_t *p) |
739 |
653 |
{ |
{ |
740 |
654 |
memset(&p->mem_props, 0, sizeof(p->mem_props)); |
memset(&p->mem_props, 0, sizeof(p->mem_props)); |
741 |
655 |
p->mem_props.type = vk_struct_type_phydev_mem_props; |
p->mem_props.type = vk_struct_type_phydev_mem_props; |
742 |
|
|
|
743 |
656 |
vk_get_phydev_mem_props(p->vk, &p->mem_props); |
vk_get_phydev_mem_props(p->vk, &p->mem_props); |
744 |
657 |
} |
} |
745 |
658 |
|
|
|
... |
... |
static void tmp_phydev_mem_types_dump(struct tmp_phydev_t *p) |
785 |
698 |
u8 i; |
u8 i; |
786 |
699 |
|
|
787 |
700 |
LOG("0:MAIN:physical device:%p:%u memory types\n", p->vk, p->mem_props.core.mem_types_n); |
LOG("0:MAIN:physical device:%p:%u memory types\n", p->vk, p->mem_props.core.mem_types_n); |
788 |
|
|
|
789 |
701 |
i = 0; |
i = 0; |
790 |
702 |
loop { |
loop { |
791 |
703 |
if (i == p->mem_props.core.mem_types_n) |
if (i == p->mem_props.core.mem_types_n) |
|
... |
... |
static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t *p) |
801 |
713 |
u8 i; |
u8 i; |
802 |
714 |
|
|
803 |
715 |
LOG("0:MAIN:physical device:%p:%u memory heaps\n", p->vk, p->mem_props.core.mem_heaps_n); |
LOG("0:MAIN:physical device:%p:%u memory heaps\n", p->vk, p->mem_props.core.mem_heaps_n); |
804 |
|
|
|
805 |
716 |
i = 0; |
i = 0; |
806 |
717 |
loop { |
loop { |
807 |
718 |
if (i == p->mem_props.core.mem_heaps_n) |
if (i == p->mem_props.core.mem_heaps_n) |
|
... |
... |
static void tmp_surf_caps_get(void) |
840 |
751 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
841 |
752 |
info.type = vk_struct_type_phydev_surf_info; |
info.type = vk_struct_type_phydev_surf_info; |
842 |
753 |
info.surf = surf_g.vk; |
info.surf = surf_g.vk; |
843 |
|
|
|
844 |
|
r = vk_get_phydev_surf_caps(surf_g.dev.phydev.vk, &info, &tmp_surf_caps_g); |
|
845 |
|
if (r < 0) { |
|
846 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
847 |
|
exit(1); |
|
848 |
|
} |
|
849 |
|
|
|
|
754 |
|
vk_get_phydev_surf_caps(&info); |
|
755 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r, surf_g.dev.phydev.vk, surf_g.vk) |
850 |
756 |
/* we have room for a maximum of 3 images per swapchain */ |
/* we have room for a maximum of 3 images per swapchain */ |
851 |
|
if (tmp_surf_caps_g.core.imgs_n_min > swpchn_imgs_n_max) { |
|
852 |
|
LOG("0:MAIN:FATAL:physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_g.dev.phydev.vk, surf_g.vk, swpchn_imgs_n_max, tmp_surf_caps_g.core.imgs_n_min); |
|
853 |
|
exit(1); |
|
854 |
|
} |
|
|
757 |
|
if (tmp_surf_caps_g.core.imgs_n_min > swpchn_imgs_n_max) |
|
758 |
|
FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_g.dev.phydev.vk, surf_g.vk, swpchn_imgs_n_max, tmp_surf_caps_g.core.imgs_n_min) |
855 |
759 |
} |
} |
856 |
760 |
|
|
857 |
761 |
static void tmp_surf_caps_dump(void) |
static void tmp_surf_caps_dump(void) |
|
... |
... |
static void swpchn_imgs_get(void) |
873 |
777 |
s32 r; |
s32 r; |
874 |
778 |
|
|
875 |
779 |
surf_g.dev.swpchn.imgs_n = swpchn_imgs_n_max; |
surf_g.dev.swpchn.imgs_n = swpchn_imgs_n_max; |
876 |
|
r = vk_get_swpchn_imgs(surf_g.dev.swpchn.vk, &surf_g.dev.swpchn.imgs_n, |
|
877 |
|
surf_g.dev.swpchn.imgs); |
|
878 |
|
if (r < 0) { |
|
879 |
|
LOG("0:MAIN:FATAL:%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r, surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk); |
|
880 |
|
exit(1); |
|
881 |
|
} |
|
|
780 |
|
vk_get_swpchn_imgs(); |
|
781 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r, surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk) |
882 |
782 |
LOG("0:MAIN:device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk, surf_g.dev.swpchn.imgs_n); |
LOG("0:MAIN:device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk, surf_g.dev.swpchn.imgs_n); |
883 |
783 |
} |
} |
884 |
784 |
|
|
|
... |
... |
static void swpchn_init(void) |
889 |
789 |
s32 r; |
s32 r; |
890 |
790 |
|
|
891 |
791 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
892 |
|
|
|
893 |
792 |
p = &surf_g.dev.phydev; |
p = &surf_g.dev.phydev; |
894 |
|
|
|
895 |
793 |
info.type = vk_struct_type_swpchn_create_info; |
info.type = vk_struct_type_swpchn_create_info; |
896 |
794 |
info.surf = surf_g.vk; |
info.surf = surf_g.vk; |
897 |
795 |
info.imgs_n_min = tmp_surf_caps_g.core.imgs_n_min; |
info.imgs_n_min = tmp_surf_caps_g.core.imgs_n_min; |
|
... |
... |
static void swpchn_init(void) |
907 |
805 |
info.composite_alpha = vk_composite_alpha_opaque_bit; |
info.composite_alpha = vk_composite_alpha_opaque_bit; |
908 |
806 |
info.present_mode = vk_present_mode_fifo; |
info.present_mode = vk_present_mode_fifo; |
909 |
807 |
info.clipped = vk_true; |
info.clipped = vk_true; |
910 |
|
|
|
911 |
|
r = vk_create_swpchn(&info, 0, &surf_g.dev.swpchn.vk); |
|
912 |
|
if (r < 0) { |
|
913 |
|
LOG("0:MAIN:FATAL:%d:device:%p:surface:%p:unable to create the initial swapchain\n", r, surf_g.dev.vk, surf_g.vk); |
|
914 |
|
exit(1); |
|
915 |
|
} |
|
|
808 |
|
vk_create_swpchn(&info); |
|
809 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:unable to create the initial swapchain\n", r, surf_g.dev.vk, surf_g.vk) |
916 |
810 |
LOG("0:MAIN:device:%p:surface:%p:swapchain created %p\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk); |
LOG("0:MAIN:device:%p:surface:%p:swapchain created %p\n", surf_g.dev.vk, surf_g.vk, surf_g.dev.swpchn.vk); |
917 |
811 |
} |
} |
918 |
812 |
|
|
|
... |
... |
static void tmp_present_modes_get(void) |
921 |
815 |
s32 r; |
s32 r; |
922 |
816 |
|
|
923 |
817 |
tmp_present_modes_n_g = tmp_present_modes_n_max; |
tmp_present_modes_n_g = tmp_present_modes_n_max; |
924 |
|
|
|
925 |
|
r = vk_get_phydev_surf_present_modes(surf_g.dev.phydev.vk, surf_g.vk, |
|
926 |
|
&tmp_present_modes_n_g, tmp_present_modes_g); |
|
927 |
|
if (r < 0) { |
|
928 |
|
LOG("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r, surf_g.dev.phydev.vk, surf_g.vk); |
|
929 |
|
exit(1); |
|
930 |
|
} |
|
|
818 |
|
vk_get_phydev_surf_present_modes(); |
|
819 |
|
VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r, surf_g.dev.phydev.vk, surf_g.vk) |
931 |
820 |
} |
} |
932 |
821 |
|
|
933 |
822 |
static u8 *present_mode_to_str(u32 mode) |
static u8 *present_mode_to_str(u32 mode) |
|
... |
... |
static void cpu_img_create(u8 i) |
966 |
855 |
s32 r; |
s32 r; |
967 |
856 |
|
|
968 |
857 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
969 |
|
|
|
970 |
858 |
info.type = vk_struct_type_img_create_info; |
info.type = vk_struct_type_img_create_info; |
971 |
859 |
info.flags = vk_img_create_flag_2d_array_compatible_bit; |
info.flags = vk_img_create_flag_2d_array_compatible_bit; |
972 |
860 |
info.img_type = vk_img_type_2d; |
info.img_type = vk_img_type_2d; |
|
... |
... |
static void cpu_img_create(u8 i) |
980 |
868 |
info.img_tiling = vk_img_tiling_linear; |
info.img_tiling = vk_img_tiling_linear; |
981 |
869 |
info.usage = vk_img_usage_transfer_src_bit; |
info.usage = vk_img_usage_transfer_src_bit; |
982 |
870 |
info.initial_layout = vk_img_layout_undefined; |
info.initial_layout = vk_img_layout_undefined; |
983 |
|
|
|
984 |
|
r = vk_create_img(&info, 0, &surf_g.dev.cpu_imgs[i].vk); |
|
985 |
|
if (r < 0) { |
|
986 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to create swapchain cpu image %u\n", r, surf_g.dev.vk, i); |
|
987 |
|
exit(1); |
|
988 |
|
} |
|
|
871 |
|
vk_create_img(&info, &surf_g.dev.cpu_imgs[i].vk); |
|
872 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create swapchain cpu image %u\n", r, surf_g.dev.vk, i) |
989 |
873 |
LOG("0:MAIN:device:%p:swapchain cpu image %u created %p\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].vk); |
LOG("0:MAIN:device:%p:swapchain cpu image %u created %p\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].vk); |
990 |
874 |
} |
} |
991 |
875 |
|
|
|
... |
... |
static void cpu_imgs_create(void) |
1005 |
889 |
static void img_mem_barrier_run_once(u8 i, struct vk_img_mem_barrier_t *b) |
static void img_mem_barrier_run_once(u8 i, struct vk_img_mem_barrier_t *b) |
1006 |
890 |
{ |
{ |
1007 |
891 |
s32 r; |
s32 r; |
1008 |
|
struct vk_cmdbuf_begin_info_t begin_info; |
|
|
892 |
|
struct vk_cb_begin_info_t begin_info; |
1009 |
893 |
struct vk_submit_info_t submit_info; |
struct vk_submit_info_t submit_info; |
1010 |
894 |
|
|
1011 |
895 |
memset(&begin_info, 0, sizeof(begin_info)); |
memset(&begin_info, 0, sizeof(begin_info)); |
1012 |
|
begin_info.type = vk_struct_type_cmdbuf_begin_info; |
|
1013 |
|
begin_info.flags = vk_cmdbuf_usage_one_time_submit_bit; |
|
1014 |
|
|
|
1015 |
|
r = vk_begin_cmdbuf(surf_g.dev.cmdbufs[i], &begin_info); |
|
1016 |
|
if (r < 0) { |
|
1017 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to begin recording the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1018 |
|
exit(1); |
|
1019 |
|
} |
|
|
896 |
|
begin_info.type = vk_struct_type_cb_begin_info; |
|
897 |
|
begin_info.flags = vk_cb_usage_one_time_submit_bit; |
|
898 |
|
vk_begin_cb(surf_g.dev.cbs[i], &begin_info); |
|
899 |
|
VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to begin recording the initial layout transition command buffer\n", r, surf_g.dev.cbs[i]) |
1020 |
900 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1021 |
|
vk_cmd_pipeline_barrier(app_surf.dev.cmdbufs[i], |
|
1022 |
|
vk_pipeline_stage_top_of_pipe_bit, |
|
1023 |
|
vk_pipeline_stage_top_of_pipe_bit, 0, 0, 0, 0, 0, 1, b); |
|
|
901 |
|
vk_cmd_pl_barrier(app_surf.dev.cbs[i], b); |
1024 |
902 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1025 |
|
r = vk_end_cmdbuf(surf_g.dev.cmdbufs[i]); |
|
1026 |
|
if (r < 0) { |
|
1027 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to end recording of the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1028 |
|
exit(1); |
|
1029 |
|
} |
|
|
903 |
|
vk_end_cb(surf_g.dev.cbs[i]); |
|
904 |
|
VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to end recording of the initial layout transition command buffer\n", r, surf_g.dev.cbs[i]) |
1030 |
905 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1031 |
906 |
memset(&submit_info, 0, sizeof(submit_info)); |
memset(&submit_info, 0, sizeof(submit_info)); |
1032 |
907 |
submit_info.type = vk_struct_type_submit_info; |
submit_info.type = vk_struct_type_submit_info; |
1033 |
|
submit_info.cmdbufs_n = 1; |
|
1034 |
|
submit_info.cmdbufs = &surf_g.dev.cmdbufs[i]; |
|
1035 |
|
|
|
1036 |
|
r = vk_q_submit(surf_g.dev.q, 1, &submit_info, 0); |
|
1037 |
|
if (r < 0) { |
|
1038 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the initial layout transition command buffer\n", r, surf_g.dev.q); |
|
1039 |
|
exit(1); |
|
1040 |
|
} |
|
|
908 |
|
submit_info.cbs_n = 1; |
|
909 |
|
submit_info.cbs = &surf_g.dev.cbs[i]; |
|
910 |
|
vk_q_submit(&submit_info); |
|
911 |
|
VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the initial layout transition command buffer\n", r, surf_g.dev.q) |
1041 |
912 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1042 |
|
r = vk_q_wait_idle(surf_g.dev.q); |
|
1043 |
|
if (r < 0) { |
|
1044 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r, surf_g.dev.q); |
|
1045 |
|
exit(1); |
|
1046 |
|
} |
|
|
913 |
|
vk_q_wait_idle(); |
|
914 |
|
VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r, surf_g.dev.q) |
1047 |
915 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1048 |
916 |
/* |
/* |
1049 |
917 |
* since it is tagged to run once its state_g is invalid, we need to |
* since it is tagged to run once its state_g is invalid, we need to |
1050 |
918 |
* reset it to the initial state_g |
* reset it to the initial state_g |
1051 |
919 |
*/ |
*/ |
1052 |
|
r = vk_reset_cmdbuf(surf_g.dev.cmdbufs[i], 0); |
|
1053 |
|
if (r < 0) { |
|
1054 |
|
LOG("0:MAIN:FATAL:%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r, surf_g.dev.cmdbufs[i]); |
|
1055 |
|
exit(1); |
|
1056 |
|
} |
|
|
920 |
|
vk_reset_cb(surf_g.dev.cbs[i]); |
|
921 |
|
VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r, surf_g.dev.cbs[i]) |
1057 |
922 |
} |
} |
1058 |
923 |
|
|
1059 |
924 |
static void cpu_img_layout_to_general(u8 i) |
static void cpu_img_layout_to_general(u8 i) |
|
... |
... |
static void cpu_img_layout_to_general(u8 i) |
1068 |
933 |
b.src_q_fam = vk_q_fam_ignored; |
b.src_q_fam = vk_q_fam_ignored; |
1069 |
934 |
b.dst_q_fam = vk_q_fam_ignored; |
b.dst_q_fam = vk_q_fam_ignored; |
1070 |
935 |
b.img = surf_g.dev.cpu_imgs[i].vk; |
b.img = surf_g.dev.cpu_imgs[i].vk; |
1071 |
|
|
|
1072 |
936 |
r = &b.subrsrc_range; |
r = &b.subrsrc_range; |
1073 |
937 |
r->aspect = vk_img_aspect_color_bit; |
r->aspect = vk_img_aspect_color_bit; |
1074 |
938 |
r->lvls_n = 1; |
r->lvls_n = 1; |
1075 |
939 |
r->array_layers_n = 1; |
r->array_layers_n = 1; |
1076 |
|
|
|
1077 |
940 |
img_mem_barrier_run_once(i, &b); |
img_mem_barrier_run_once(i, &b); |
1078 |
941 |
LOG("0:MAIN:cpu image:%p[%u]:transition to general layout successful\n", surf_g.dev.cpu_imgs[i].vk, i); |
LOG("0:MAIN:cpu image:%p[%u]:transition to general layout successful\n", surf_g.dev.cpu_imgs[i].vk, i); |
1079 |
942 |
} |
} |
|
... |
... |
static void tmp_cpu_img_mem_rqmts_get(u8 i) |
1101 |
964 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
1102 |
965 |
info.type = vk_struct_type_img_mem_rqmts_info; |
info.type = vk_struct_type_img_mem_rqmts_info; |
1103 |
966 |
info.img = surf_g.dev.cpu_imgs[i].vk; |
info.img = surf_g.dev.cpu_imgs[i].vk; |
1104 |
|
|
|
1105 |
967 |
rqmts = &tmp_mem_rqmts_g[i]; |
rqmts = &tmp_mem_rqmts_g[i]; |
1106 |
968 |
memset(rqmts, 0, sizeof(*rqmts)); |
memset(rqmts, 0, sizeof(*rqmts)); |
1107 |
969 |
rqmts->type = vk_struct_type_mem_rqmts; |
rqmts->type = vk_struct_type_mem_rqmts; |
1108 |
|
|
|
1109 |
|
r = vk_get_img_mem_rqmts(&info, rqmts); |
|
1110 |
|
if (r < 0) { |
|
1111 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to get memory requirements for cpu image %u\n", r, surf_g.dev.vk, i); |
|
1112 |
|
exit(1); |
|
1113 |
|
} |
|
|
970 |
|
vk_get_img_mem_rqmts(&info, rqmts); |
|
971 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to get memory requirements for cpu image %u\n", r, surf_g.dev.vk, i) |
1114 |
972 |
LOG("0:MAIN:device:%p:cpu image %u core requirements are size=%lu bytes, alignment=%lu bytes, memory type=%#08x\n", surf_g.dev.vk, i, (long)rqmts->core.sz, (long)rqmts->core.alignment, rqmts->core.mem_type_bits); |
LOG("0:MAIN:device:%p:cpu image %u core requirements are size=%lu bytes, alignment=%lu bytes, memory type=%#08x\n", surf_g.dev.vk, i, (long)rqmts->core.sz, (long)rqmts->core.alignment, rqmts->core.mem_type_bits); |
1115 |
973 |
} |
} |
1116 |
974 |
|
|
|
... |
... |
static bool match_mem_type(u8 mem_type_idx, |
1138 |
996 |
/* first check this mem type is in our img rqmts */ |
/* first check this mem type is in our img rqmts */ |
1139 |
997 |
if (((1 << mem_type_idx) & img_rqmts->core.mem_type_bits) == 0) |
if (((1 << mem_type_idx) & img_rqmts->core.mem_type_bits) == 0) |
1140 |
998 |
return false; |
return false; |
1141 |
|
|
|
1142 |
999 |
mem_type = &surf_g.dev.phydev.mem_types[mem_type_idx]; |
mem_type = &surf_g.dev.phydev.mem_types[mem_type_idx]; |
1143 |
|
|
|
1144 |
1000 |
if (!ignore_gpu_is_discret) |
if (!ignore_gpu_is_discret) |
1145 |
1001 |
if (surf_g.dev.phydev.is_discret_gpu && IS_DEV_LOCAL(mem_type)) |
if (surf_g.dev.phydev.is_discret_gpu && IS_DEV_LOCAL(mem_type)) |
1146 |
1002 |
return false; |
return false; |
1147 |
|
|
|
1148 |
1003 |
if ((mem_type->prop_flags & WANTED_MEM_PROPS) == WANTED_MEM_PROPS) |
if ((mem_type->prop_flags & WANTED_MEM_PROPS) == WANTED_MEM_PROPS) |
1149 |
1004 |
return true; |
return true; |
1150 |
1005 |
return false; |
return false; |
|
... |
... |
static bool try_alloc_cpu_img_dev_mem(u8 i, |
1162 |
1017 |
info.type = vk_struct_type_mem_alloc_info; |
info.type = vk_struct_type_mem_alloc_info; |
1163 |
1018 |
info.sz = img_rqmts->core.sz; |
info.sz = img_rqmts->core.sz; |
1164 |
1019 |
info.mem_type_idx = mem_type_idx; |
info.mem_type_idx = mem_type_idx; |
1165 |
|
|
|
1166 |
|
r = vk_alloc_mem(&info, 0, &surf_g.dev.cpu_imgs[i].dev_mem); |
|
|
1020 |
|
vk_alloc_mem(&info, &surf_g.dev.cpu_imgs[i].dev_mem); |
1167 |
1021 |
if (r < 0) { |
if (r < 0) { |
1168 |
1022 |
LOG("0:MAIN:WARNING:%d:device:%p:cpu image:%u:unable to allocate %lu bytes from physical dev %p memory type %u\n", r, surf_g.dev.vk, img_rqmts->core.sz, surf_g.dev.phydev.vk, mem_type_idx); |
LOG("0:MAIN:WARNING:%d:device:%p:cpu image:%u:unable to allocate %lu bytes from physical dev %p memory type %u\n", r, surf_g.dev.vk, img_rqmts->core.sz, surf_g.dev.phydev.vk, mem_type_idx); |
1169 |
1023 |
return false; |
return false; |
|
... |
... |
static void cpu_img_dev_mem_alloc(u8 i) |
1185 |
1039 |
u8 mem_type; |
u8 mem_type; |
1186 |
1040 |
|
|
1187 |
1041 |
img_rqmts = &tmp_mem_rqmts_g[i]; |
img_rqmts = &tmp_mem_rqmts_g[i]; |
1188 |
|
|
|
1189 |
1042 |
mem_type = 0; |
mem_type = 0; |
1190 |
1043 |
loop { |
loop { |
1191 |
1044 |
if (mem_type == surf_g.dev.phydev.mem_types_n) |
if (mem_type == surf_g.dev.phydev.mem_types_n) |
1192 |
1045 |
break; |
break; |
1193 |
|
|
|
1194 |
1046 |
if (match_mem_type(mem_type, img_rqmts, |
if (match_mem_type(mem_type, img_rqmts, |
1195 |
1047 |
!IGNORE_GPU_IS_DISCRET)) { |
!IGNORE_GPU_IS_DISCRET)) { |
1196 |
1048 |
if (try_alloc_cpu_img_dev_mem(i, img_rqmts, |
if (try_alloc_cpu_img_dev_mem(i, img_rqmts, |
|
... |
... |
static void cpu_img_dev_mem_alloc(u8 i) |
1199 |
1051 |
} |
} |
1200 |
1052 |
++mem_type; |
++mem_type; |
1201 |
1053 |
} |
} |
1202 |
|
|
|
1203 |
|
if (!surf_g.dev.phydev.is_discret_gpu) { |
|
1204 |
|
LOG("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i); |
|
1205 |
|
exit(1); |
|
1206 |
|
} |
|
1207 |
|
|
|
|
1054 |
|
if (!surf_g.dev.phydev.is_discret_gpu) |
|
1055 |
|
FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i) |
1208 |
1056 |
/* |
/* |
1209 |
1057 |
* lookup again, but relax the match based on discret gpu constraint for |
* lookup again, but relax the match based on discret gpu constraint for |
1210 |
1058 |
* gpu |
* gpu |
|
... |
... |
static void cpu_img_dev_mem_alloc(u8 i) |
1213 |
1061 |
loop { |
loop { |
1214 |
1062 |
if (mem_type == surf_g.dev.phydev.mem_types_n) |
if (mem_type == surf_g.dev.phydev.mem_types_n) |
1215 |
1063 |
break; |
break; |
1216 |
|
|
|
1217 |
1064 |
if (match_mem_type(mem_type, img_rqmts, IGNORE_GPU_IS_DISCRET) |
if (match_mem_type(mem_type, img_rqmts, IGNORE_GPU_IS_DISCRET) |
1218 |
1065 |
&& try_alloc_cpu_img_dev_mem(i, img_rqmts, mem_type)) |
&& try_alloc_cpu_img_dev_mem(i, img_rqmts, mem_type)) |
1219 |
1066 |
return; |
return; |
1220 |
1067 |
++mem_type; |
++mem_type; |
1221 |
1068 |
} |
} |
1222 |
|
LOG("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i); |
|
1223 |
|
exit(1); |
|
|
1069 |
|
FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g.dev.phydev.vk, i) |
1224 |
1070 |
} |
} |
1225 |
1071 |
#undef IGNORE_GPU_IS_DISCRET |
#undef IGNORE_GPU_IS_DISCRET |
1226 |
1072 |
|
|
|
... |
... |
static void cpu_imgs_dev_mem_bind(void) |
1244 |
1090 |
s32 r; |
s32 r; |
1245 |
1091 |
|
|
1246 |
1092 |
memset(&infos, 0, sizeof(infos[0]) * surf_g.dev.swpchn.imgs_n); |
memset(&infos, 0, sizeof(infos[0]) * surf_g.dev.swpchn.imgs_n); |
1247 |
|
|
|
1248 |
1093 |
i = 0; |
i = 0; |
1249 |
1094 |
loop { |
loop { |
1250 |
1095 |
if (i == surf_g.dev.swpchn.imgs_n) |
if (i == surf_g.dev.swpchn.imgs_n) |
|
... |
... |
static void cpu_imgs_dev_mem_bind(void) |
1254 |
1099 |
infos[i].mem = surf_g.dev.cpu_imgs[i].dev_mem; |
infos[i].mem = surf_g.dev.cpu_imgs[i].dev_mem; |
1255 |
1100 |
++i; |
++i; |
1256 |
1101 |
} |
} |
1257 |
|
r = vk_bind_img_mem(surf_g.dev.swpchn.imgs_n, infos); |
|
1258 |
|
if (r < 0) { |
|
1259 |
|
LOG("0:MAIN:FATAL:%d:device:%p:cpu images:unable to bind device memory to images\n", r, surf_g.dev.vk); |
|
1260 |
|
exit(1); |
|
1261 |
|
} |
|
|
1102 |
|
vk_bind_img_mem(infos); |
|
1103 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu images:unable to bind device memory to images\n", r, surf_g.dev.vk) |
1262 |
1104 |
LOG("0:MAIN:device:%p:cpu images:bound device memory to images\n", surf_g.dev.vk); |
LOG("0:MAIN:device:%p:cpu images:bound device memory to images\n", surf_g.dev.vk); |
1263 |
1105 |
} |
} |
1264 |
1106 |
|
|
|
... |
... |
static void cpu_imgs_dev_mem_map(void) |
1272 |
1114 |
|
|
1273 |
1115 |
if (i == surf_g.dev.swpchn.imgs_n) |
if (i == surf_g.dev.swpchn.imgs_n) |
1274 |
1116 |
break; |
break; |
1275 |
|
r = vk_map_mem(surf_g.dev.cpu_imgs[i].dev_mem, 0, vk_whole_sz, 0, |
|
|
1117 |
|
vk_map_mem(surf_g.dev.cpu_imgs[i].dev_mem, |
1276 |
1118 |
&surf_g.dev.cpu_imgs[i].data); |
&surf_g.dev.cpu_imgs[i].data); |
1277 |
|
if (r < 0) { |
|
1278 |
|
LOG("0:MAIN:FATAL:%d:device:%p:cpu image:%u:unable to map image memory\n", r, surf_g.dev.vk, i); |
|
1279 |
|
exit(1); |
|
1280 |
|
} |
|
|
1119 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu image:%u:unable to map image memory\n", r, surf_g.dev.vk, i) |
1281 |
1120 |
LOG("0:MAIN:device:%p:cpu image:%u:image memory mapped\n", surf_g.dev.vk, i); |
LOG("0:MAIN:device:%p:cpu image:%u:image memory mapped\n", surf_g.dev.vk, i); |
1282 |
1121 |
++i; |
++i; |
1283 |
1122 |
} |
} |
|
... |
... |
static void cpu_img_subrsrc_layout_get(u8 i) |
1288 |
1127 |
struct vk_img_subrsrc_t s; |
struct vk_img_subrsrc_t s; |
1289 |
1128 |
|
|
1290 |
1129 |
memset(&s, 0, sizeof(s)); |
memset(&s, 0, sizeof(s)); |
1291 |
|
|
|
1292 |
1130 |
/* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */ |
/* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */ |
1293 |
1131 |
s.aspect = vk_img_aspect_color_bit; |
s.aspect = vk_img_aspect_color_bit; |
1294 |
|
|
|
1295 |
1132 |
vk_get_img_subrsrc_layout(surf_g.dev.cpu_imgs[i].vk, &s, |
vk_get_img_subrsrc_layout(surf_g.dev.cpu_imgs[i].vk, &s, |
1296 |
1133 |
&surf_g.dev.cpu_imgs[i].layout); |
&surf_g.dev.cpu_imgs[i].layout); |
1297 |
1134 |
LOG("0:MAIN:device:%p:cpu image:%u:layout:offset=%lu bytes size=%lu bytes row_pitch=%lu bytes array_pitch=%lu bytes depth_pitch=%lu bytes\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].layout.offset, surf_g.dev.cpu_imgs[i].layout.sz, surf_g.dev.cpu_imgs[i].layout.row_pitch, surf_g.dev.cpu_imgs[i].layout.array_pitch, surf_g.dev.cpu_imgs[i].layout.depth_pitch); |
LOG("0:MAIN:device:%p:cpu image:%u:layout:offset=%lu bytes size=%lu bytes row_pitch=%lu bytes array_pitch=%lu bytes depth_pitch=%lu bytes\n", surf_g.dev.vk, i, surf_g.dev.cpu_imgs[i].layout.offset, surf_g.dev.cpu_imgs[i].layout.sz, surf_g.dev.cpu_imgs[i].layout.row_pitch, surf_g.dev.cpu_imgs[i].layout.array_pitch, surf_g.dev.cpu_imgs[i].layout.depth_pitch); |
|
... |
... |
static void sems_create(void) |
1320 |
1157 |
loop { |
loop { |
1321 |
1158 |
if (sem == sems_n) |
if (sem == sems_n) |
1322 |
1159 |
break; |
break; |
1323 |
|
|
|
1324 |
1160 |
memset(&info, 0, sizeof(info)); |
memset(&info, 0, sizeof(info)); |
1325 |
1161 |
info.type = vk_struct_type_sem_create_info; |
info.type = vk_struct_type_sem_create_info; |
1326 |
|
|
|
1327 |
|
r = vk_create_sem(&info, 0, &surf_g.dev.sems[sem]); |
|
1328 |
|
if (r < 0) { |
|
1329 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to create a semaphore %u for our swapchain\n", r, surf_g.dev.vk, sem); |
|
1330 |
|
exit(1); |
|
1331 |
|
} |
|
|
1162 |
|
vk_create_sem(&info, &surf_g.dev.sems[sem]); |
|
1163 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create a semaphore %u for our swapchain\n", r, surf_g.dev.vk, sem) |
1332 |
1164 |
LOG("0:MAIN:device:%p:semaphore %u for our swapchain created %p\n", surf_g.dev.vk, sem, surf_g.dev.sems[sem]); |
LOG("0:MAIN:device:%p:semaphore %u for our swapchain created %p\n", surf_g.dev.vk, sem, surf_g.dev.sems[sem]); |
1333 |
|
|
|
1334 |
1165 |
++sem; |
++sem; |
1335 |
1166 |
} |
} |
1336 |
1167 |
} |
} |
1337 |
1168 |
|
|
1338 |
|
static void cmdbufs_create(void) |
|
|
1169 |
|
static void cbs_create(void) |
1339 |
1170 |
{ |
{ |
1340 |
1171 |
s32 r; |
s32 r; |
1341 |
|
struct vk_cmdbuf_alloc_info_t alloc_info; |
|
|
1172 |
|
struct vk_cb_alloc_info_t alloc_info; |
1342 |
1173 |
|
|
1343 |
1174 |
memset(&alloc_info, 0, sizeof(alloc_info)); |
memset(&alloc_info, 0, sizeof(alloc_info)); |
1344 |
|
alloc_info.type = vk_struct_type_cmdbuf_alloc_info; |
|
1345 |
|
alloc_info.cmdpool = surf_g.dev.cmdpool; |
|
1346 |
|
alloc_info.lvl = vk_cmdbuf_lvl_primary; |
|
1347 |
|
alloc_info.cmdbufs_n = surf_g.dev.swpchn.imgs_n; |
|
1348 |
|
|
|
1349 |
|
r = vk_alloc_cmdbufs(&alloc_info, surf_g.dev.cmdbufs); |
|
1350 |
|
|
|
1351 |
|
if (r < 0) { |
|
1352 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r, surf_g.dev.vk, surf_g.dev.cmdpool); |
|
1353 |
|
exit(1); |
|
1354 |
|
} |
|
1355 |
|
LOG("0:MAIN:device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_g.dev.vk, surf_g.dev.swpchn.imgs_n, surf_g.dev.cmdpool); |
|
|
1175 |
|
alloc_info.type = vk_struct_type_cb_alloc_info; |
|
1176 |
|
alloc_info.cp = surf_g.dev.cp; |
|
1177 |
|
alloc_info.lvl = vk_cb_lvl_primary; |
|
1178 |
|
alloc_info.cbs_n = surf_g.dev.swpchn.imgs_n; |
|
1179 |
|
vk_alloc_cbs(&alloc_info); |
|
1180 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r, surf_g.dev.vk, surf_g.dev.cp) |
|
1181 |
|
LOG("0:MAIN:device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_g.dev.vk, surf_g.dev.swpchn.imgs_n, surf_g.dev.cp); |
1356 |
1182 |
} |
} |
1357 |
1183 |
|
|
1358 |
|
static void cmdbuf_record(u8 i) |
|
|
1184 |
|
static void cb_rec(u8 i) |
1359 |
1185 |
{ |
{ |
1360 |
1186 |
s32 r; |
s32 r; |
1361 |
|
struct vk_cmdbuf_begin_info_t begin_info; |
|
|
1187 |
|
struct vk_cb_begin_info_t begin_info; |
1362 |
1188 |
struct vk_img_mem_barrier_t b; |
struct vk_img_mem_barrier_t b; |
1363 |
1189 |
struct vk_img_blit_t region; |
struct vk_img_blit_t region; |
1364 |
1190 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1365 |
1191 |
memset(&begin_info, 0, sizeof(begin_info)); |
memset(&begin_info, 0, sizeof(begin_info)); |
1366 |
|
begin_info.type = vk_struct_type_cmdbuf_begin_info; |
|
1367 |
|
|
|
1368 |
|
r = vk_begin_cmdbuf(surf_g.dev.cmdbufs[i], &begin_info); |
|
1369 |
|
if (r < 0) { |
|
1370 |
|
LOG("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r, i, surf_g.dev.cmdbufs[i]); |
|
1371 |
|
exit(1); |
|
1372 |
|
} |
|
|
1192 |
|
begin_info.type = vk_struct_type_cb_begin_info; |
|
1193 |
|
vk_begin_cb(surf_g.dev.cbs[i], &begin_info); |
|
1194 |
|
VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r, i, surf_g.dev.cbs[i]) |
1373 |
1195 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1374 |
1196 |
/* acquired img (undefined layout) to presentation layout */ |
/* acquired img (undefined layout) to presentation layout */ |
1375 |
1197 |
memset(&b, 0, sizeof(b)); |
memset(&b, 0, sizeof(b)); |
|
... |
... |
static void cmdbuf_record(u8 i) |
1379 |
1201 |
b.src_q_fam = vk_q_fam_ignored; |
b.src_q_fam = vk_q_fam_ignored; |
1380 |
1202 |
b.dst_q_fam = vk_q_fam_ignored; |
b.dst_q_fam = vk_q_fam_ignored; |
1381 |
1203 |
b.img = surf_g.dev.swpchn.imgs[i]; |
b.img = surf_g.dev.swpchn.imgs[i]; |
1382 |
|
|
|
1383 |
1204 |
b.subrsrc_range.aspect = vk_img_aspect_color_bit; |
b.subrsrc_range.aspect = vk_img_aspect_color_bit; |
1384 |
1205 |
b.subrsrc_range.lvls_n = 1; |
b.subrsrc_range.lvls_n = 1; |
1385 |
1206 |
b.subrsrc_range.array_layers_n = 1; |
b.subrsrc_range.array_layers_n = 1; |
1386 |
|
|
|
1387 |
|
vk_cmd_pipeline_barrier(surf_g.dev.cmdbufs[i], |
|
1388 |
|
vk_pipeline_stage_top_of_pipe_bit, |
|
1389 |
|
vk_pipeline_stage_top_of_pipe_bit, 0, 0, 0, 0, 0, 1, &b); |
|
|
1207 |
|
vk_cmd_pl_barrier(surf_g.dev.cbs[i], &b); |
1390 |
1208 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1391 |
1209 |
/* blit from cpu img to pe img */ |
/* blit from cpu img to pe img */ |
1392 |
1210 |
memset(®ion, 0, sizeof(region)); |
memset(®ion, 0, sizeof(region)); |
|
... |
... |
static void cmdbuf_record(u8 i) |
1396 |
1214 |
region.src_offsets[1].y = APP_CPU_IMG_HEIGHT; |
region.src_offsets[1].y = APP_CPU_IMG_HEIGHT; |
1397 |
1215 |
region.dst_subrsrc.aspect = vk_img_aspect_color_bit; |
region.dst_subrsrc.aspect = vk_img_aspect_color_bit; |
1398 |
1216 |
region.dst_subrsrc.array_layers_n = 1; |
region.dst_subrsrc.array_layers_n = 1; |
|
1217 |
|
/* XXX: it is a scaling blit: you can use APP_WIN_WIDTH/APP_WIN_HEIGHT */ |
1399 |
1218 |
region.dst_offsets[1].x = APP_CPU_IMG_WIDTH; |
region.dst_offsets[1].x = APP_CPU_IMG_WIDTH; |
1400 |
1219 |
region.dst_offsets[1].y = APP_CPU_IMG_HEIGHT; |
region.dst_offsets[1].y = APP_CPU_IMG_HEIGHT; |
1401 |
|
|
|
1402 |
|
vk_cmd_blit_img(surf_g.dev.cmdbufs[i], surf_g.dev.cpu_imgs[i].vk, |
|
1403 |
|
vk_img_layout_general, surf_g.dev.swpchn.imgs[i], |
|
1404 |
|
vk_img_layout_present, 1, ®ion, 0); |
|
|
1220 |
|
vk_cmd_blit_img(surf_g.dev.cbs[i], surf_g.dev.cpu_imgs[i].vk, |
|
1221 |
|
surf_g.dev.swpchn.imgs[i], ®ion); |
1405 |
1222 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1406 |
|
r = vk_end_cmdbuf(surf_g.dev.cmdbufs[i]); |
|
1407 |
|
if (r < 0) { |
|
1408 |
|
LOG("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r, i, surf_g.dev.cmdbufs[i]); |
|
1409 |
|
exit(1); |
|
1410 |
|
} |
|
|
1223 |
|
vk_end_cb(surf_g.dev.cbs[i]); |
|
1224 |
|
VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r, i, surf_g.dev.cbs[i]) |
1411 |
1225 |
} |
} |
1412 |
1226 |
|
|
1413 |
|
static void cmdbufs_record(void) |
|
|
1227 |
|
static void cbs_rec(void) |
1414 |
1228 |
{ |
{ |
1415 |
1229 |
u8 i; |
u8 i; |
1416 |
1230 |
|
|
|
... |
... |
static void cmdbufs_record(void) |
1418 |
1232 |
loop { |
loop { |
1419 |
1233 |
if (i == surf_g.dev.swpchn.imgs_n) |
if (i == surf_g.dev.swpchn.imgs_n) |
1420 |
1234 |
break; |
break; |
1421 |
|
cmdbuf_record(i); |
|
|
1235 |
|
cb_rec(i); |
1422 |
1236 |
++i; |
++i; |
1423 |
1237 |
} |
} |
1424 |
1238 |
} |
} |
|
... |
... |
static void dev_init(void) |
1454 |
1268 |
dev_create(); |
dev_create(); |
1455 |
1269 |
dev_syms(); |
dev_syms(); |
1456 |
1270 |
q_get(); |
q_get(); |
1457 |
|
cmdpools_create(); |
|
|
1271 |
|
cp_create(); |
1458 |
1272 |
} |
} |
1459 |
1273 |
|
|
1460 |
1274 |
static void surf_init(void) |
static void surf_init(void) |
|
... |
... |
static void surf_init(void) |
1463 |
1277 |
dev_init(); |
dev_init(); |
1464 |
1278 |
swpchn_init(); |
swpchn_init(); |
1465 |
1279 |
swpchn_imgs_get(); |
swpchn_imgs_get(); |
1466 |
|
|
|
1467 |
1280 |
/* our cpu imgs for swpchn imgs */ |
/* our cpu imgs for swpchn imgs */ |
1468 |
1281 |
cpu_imgs_create(); |
cpu_imgs_create(); |
1469 |
1282 |
sems_create(); |
sems_create(); |
1470 |
|
cmdbufs_create(); |
|
|
1283 |
|
cbs_create(); |
1471 |
1284 |
cpu_imgs_layout_to_general(); |
cpu_imgs_layout_to_general(); |
1472 |
1285 |
cpu_imgs_subrsrc_layout_get(); |
cpu_imgs_subrsrc_layout_get(); |
1473 |
1286 |
tmp_cpu_imgs_mem_rqmts_get(); |
tmp_cpu_imgs_mem_rqmts_get(); |
1474 |
1287 |
cpu_imgs_dev_mem_alloc(); |
cpu_imgs_dev_mem_alloc(); |
1475 |
1288 |
cpu_imgs_dev_mem_bind(); |
cpu_imgs_dev_mem_bind(); |
1476 |
1289 |
cpu_imgs_dev_mem_map(); |
cpu_imgs_dev_mem_map(); |
1477 |
|
cmdbufs_record(); |
|
|
1290 |
|
cbs_rec(); |
1478 |
1291 |
} |
} |
1479 |
1292 |
|
|
1480 |
1293 |
static void init_vk(void) |
static void init_vk(void) |
|
... |
... |
static void swpchn_acquire_next_img(u32 *i) |
1503 |
1316 |
info.timeout = u64_max; /* infinite */ |
info.timeout = u64_max; /* infinite */ |
1504 |
1317 |
info.devs = 0x00000001; /* no device group then 1 */ |
info.devs = 0x00000001; /* no device group then 1 */ |
1505 |
1318 |
info.sem = surf_g.dev.sems[sem_acquire_img_done]; |
info.sem = surf_g.dev.sems[sem_acquire_img_done]; |
1506 |
|
|
|
1507 |
|
r = vk_acquire_next_img(&info, i); |
|
1508 |
|
if (r < 0) { |
|
1509 |
|
LOG("0:MAIN:FATAL:%d:device:%p:unable to acquire next image from swapchain %p\n", r, surf_g.dev.vk, surf_g.dev.swpchn.vk); |
|
1510 |
|
exit(1); |
|
1511 |
|
} |
|
1512 |
|
/* XXX:TRACE */ |
|
|
1319 |
|
vk_acquire_next_img(&info, i); |
|
1320 |
|
VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to acquire next image from swapchain %p\n", r, surf_g.dev.vk, surf_g.dev.swpchn.vk) |
1513 |
1321 |
LOG("0:MAIN:device:%p:swapchain:%p:acquired image %u\n", surf_g.dev.vk, surf_g.dev.swpchn.vk, *i); |
LOG("0:MAIN:device:%p:swapchain:%p:acquired image %u\n", surf_g.dev.vk, surf_g.dev.swpchn.vk, *i); |
1514 |
1322 |
} |
} |
1515 |
1323 |
|
|
|
... |
... |
static void cpu_img_draw(u8 i) |
1529 |
1337 |
loop { |
loop { |
1530 |
1338 |
struct vk_subrsrc_layout_t *l; |
struct vk_subrsrc_layout_t *l; |
1531 |
1339 |
u64 o; /* _byte_ offset */ |
u64 o; /* _byte_ offset */ |
1532 |
|
u64 o_dw; /* _32 bits_ dword offset */ |
|
|
1340 |
|
u64 o_w; /* _32 bits_ word offset */ |
1533 |
1341 |
|
|
1534 |
1342 |
if (col == APP_CPU_IMG_WIDTH) |
if (col == APP_CPU_IMG_WIDTH) |
1535 |
1343 |
break; |
break; |
1536 |
|
|
|
1537 |
1344 |
l = &surf_g.dev.cpu_imgs[i].layout; |
l = &surf_g.dev.cpu_imgs[i].layout; |
1538 |
|
|
|
1539 |
1345 |
o = row * l->row_pitch + col * sizeof(*texel); |
o = row * l->row_pitch + col * sizeof(*texel); |
1540 |
|
o_dw = o >> 2; |
|
1541 |
|
|
|
1542 |
|
texel[o_dw] = fill_texel_g; |
|
1543 |
|
|
|
|
1346 |
|
o_w = o >> 2; |
|
1347 |
|
texel[o_w] = fill_texel_g; |
1544 |
1348 |
++col; |
++col; |
1545 |
1349 |
} |
} |
1546 |
1350 |
++row; |
++row; |
|
... |
... |
static void cpu_img_to_pe(u8 i) |
1552 |
1356 |
s32 r; |
s32 r; |
1553 |
1357 |
struct vk_submit_info_t submit_info; |
struct vk_submit_info_t submit_info; |
1554 |
1358 |
struct vk_present_info_t present_info; |
struct vk_present_info_t present_info; |
|
1359 |
|
u32 wait_dst_stage; |
1555 |
1360 |
u32 idxs[1]; |
u32 idxs[1]; |
1556 |
1361 |
|
|
1557 |
1362 |
memset(&submit_info, 0, sizeof(submit_info)); |
memset(&submit_info, 0, sizeof(submit_info)); |
1558 |
1363 |
submit_info.type = vk_struct_type_submit_info; |
submit_info.type = vk_struct_type_submit_info; |
1559 |
1364 |
submit_info.wait_sems_n = 1; |
submit_info.wait_sems_n = 1; |
1560 |
1365 |
submit_info.wait_sems = &surf_g.dev.sems[sem_acquire_img_done]; |
submit_info.wait_sems = &surf_g.dev.sems[sem_acquire_img_done]; |
1561 |
|
submit_info.cmdbufs_n = 1; |
|
1562 |
|
submit_info.cmdbufs = &surf_g.dev.cmdbufs[i]; |
|
|
1366 |
|
wait_dst_stage = vk_pl_stage_bottom_of_pipe_bit; |
|
1367 |
|
submit_info.wait_dst_stages = &wait_dst_stage; |
|
1368 |
|
submit_info.cbs_n = 1; |
|
1369 |
|
submit_info.cbs = &surf_g.dev.cbs[i]; |
1563 |
1370 |
submit_info.signal_sems_n = 1; |
submit_info.signal_sems_n = 1; |
1564 |
1371 |
submit_info.signal_sems = &surf_g.dev.sems[app_sem_blit_done]; |
submit_info.signal_sems = &surf_g.dev.sems[app_sem_blit_done]; |
|
1372 |
|
LOG("MAIN:queue:%p\n", surf_g.dev.q); |
1565 |
1373 |
|
|
1566 |
|
r = vk_q_submit(surf_g.dev.q, 1, &submit_info, 0); |
|
1567 |
|
if (r < 0) { |
|
1568 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r, surf_g.dev.q); |
|
1569 |
|
exit(1); |
|
1570 |
|
} |
|
|
1374 |
|
vk_q_submit(&submit_info); |
|
1375 |
|
VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r, surf_g.dev.q) |
1571 |
1376 |
/*--------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------*/ |
1572 |
1377 |
idxs[0] = i; |
idxs[0] = i; |
1573 |
1378 |
memset(&present_info, 0, sizeof(present_info)); |
memset(&present_info, 0, sizeof(present_info)); |
|
... |
... |
static void cpu_img_to_pe(u8 i) |
1578 |
1383 |
present_info.swpchns = &surf_g.dev.swpchn.vk; |
present_info.swpchns = &surf_g.dev.swpchn.vk; |
1579 |
1384 |
present_info.idxs = idxs; |
present_info.idxs = idxs; |
1580 |
1385 |
present_info.results = 0; |
present_info.results = 0; |
1581 |
|
|
|
1582 |
|
r = vk_q_present(surf_g.dev.q, &present_info); |
|
1583 |
|
if (r < 0) { |
|
1584 |
|
LOG("0:MAIN:FATAL:%d:queue:%p:unable to submit the image %u to the presentation engine\n", r, surf_g.dev.q, i); |
|
1585 |
|
exit(1); |
|
1586 |
|
} |
|
|
1386 |
|
vk_q_present(&present_info); |
|
1387 |
|
VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image %u to the presentation engine\n", r, surf_g.dev.q, i) |
1587 |
1388 |
} |
} |
1588 |
1389 |
|
|
1589 |
1390 |
static void render(void) |
static void render(void) |
|
... |
... |
static void render(void) |
1593 |
1394 |
swpchn_acquire_next_img(&i); |
swpchn_acquire_next_img(&i); |
1594 |
1395 |
cpu_img_draw(i); /* cpu rendering */ |
cpu_img_draw(i); /* cpu rendering */ |
1595 |
1396 |
cpu_img_to_pe(i); |
cpu_img_to_pe(i); |
1596 |
|
|
|
1597 |
1397 |
do_render_g = false; |
do_render_g = false; |
1598 |
1398 |
if (fill_texel_g == 0x0000ff00) |
if (fill_texel_g == 0x0000ff00) |
1599 |
1399 |
fill_texel_g = 0x00ff0000; |
fill_texel_g = 0x00ff0000; |
|
... |
... |
static void render(void) |
1601 |
1401 |
fill_texel_g = 0x0000ff00; |
fill_texel_g = 0x0000ff00; |
1602 |
1402 |
} |
} |
1603 |
1403 |
|
|
1604 |
|
/* "main" loop */ |
|
1605 |
1404 |
static void run(void) |
static void run(void) |
1606 |
1405 |
{ |
{ |
1607 |
1406 |
state_g = state_run; |
state_g = state_run; |
|
... |
... |
static void run(void) |
1609 |
1408 |
xcb_generic_event_t *e; |
xcb_generic_event_t *e; |
1610 |
1409 |
|
|
1611 |
1410 |
do_render_g = false; |
do_render_g = false; |
1612 |
|
|
|
1613 |
1411 |
/* "evts which could lead to change what we display" */ |
/* "evts which could lead to change what we display" */ |
1614 |
1412 |
e = dl_xcb_wait_for_event(app_xcb.c); |
e = dl_xcb_wait_for_event(app_xcb.c); |
1615 |
1413 |
if (e == 0) { /* i/o err */ |
if (e == 0) { /* i/o err */ |
1616 |
1414 |
LOG("0:MAIN:xcb:'%s':connection:%p:event:input/output error | x11 server connection lost\n", app_xcb.disp_env, app_xcb.c); |
LOG("0:MAIN:xcb:'%s':connection:%p:event:input/output error | x11 server connection lost\n", app_xcb.disp_env, app_xcb.c); |
1617 |
1415 |
break; |
break; |
1618 |
1416 |
} |
} |
1619 |
|
|
|
1620 |
1417 |
loop { /* drain evts */ |
loop { /* drain evts */ |
1621 |
1418 |
app_xcb_evt_handle(e); |
app_xcb_evt_handle(e); |
1622 |
1419 |
free(e); |
free(e); |
1623 |
|
|
|
1624 |
1420 |
if (state_g == state_quit) |
if (state_g == state_quit) |
1625 |
1421 |
return; |
return; |
1626 |
|
|
|
1627 |
1422 |
e = dl_xcb_poll_for_event(app_xcb.c); |
e = dl_xcb_poll_for_event(app_xcb.c); |
1628 |
1423 |
if (e == 0) |
if (e == 0) |
1629 |
1424 |
break; |
break; |
1630 |
1425 |
} |
} |
1631 |
|
|
|
1632 |
1426 |
/* synchronous rendering */ |
/* synchronous rendering */ |
1633 |
1427 |
if (do_render_g) |
if (do_render_g) |
1634 |
1428 |
render(); |
render(); |
|
... |
... |
int main(void) |
1645 |
1439 |
LOG("0:exiting app\n"); |
LOG("0:exiting app\n"); |
1646 |
1440 |
exit(0); |
exit(0); |
1647 |
1441 |
} |
} |
|
1442 |
|
#undef VK_FATAL |
|
1443 |
|
#undef FATAL |
|
1444 |
|
/*---------------------------------------------------------------------------*/ |
1648 |
1445 |
#define CLEANUP |
#define CLEANUP |
1649 |
1446 |
#include "namespace/app.c" |
#include "namespace/app.c" |
1650 |
1447 |
#include "namespace/vk_syms.c" |
#include "namespace/vk_syms.c" |
|
1448 |
|
#include "namespace/app_state_types.h" |
|
1449 |
|
#include "namespace/app_state.c" |
1651 |
1450 |
#undef CLEANUP |
#undef CLEANUP |
|
1451 |
|
/*---------------------------------------------------------------------------*/ |
1652 |
1452 |
#endif |
#endif |
File 2d/kickstart/vk_types.h deleted (index 28c778d..0000000) |
1 |
|
#ifndef VK_TYPES_H |
|
2 |
|
#define VK_TYPES_H |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS */ |
|
8 |
|
/* |
|
9 |
|
* XXX: we are fixing vulkan ABI which made the huge mistake to use |
|
10 |
|
* enums as function parameters or function return values. |
|
11 |
|
*/ |
|
12 |
|
#include <stddef.h> |
|
13 |
|
#include <xcb.h> /* we use the xcb wsi */ |
|
14 |
|
#include "app_core_types.h" |
|
15 |
|
/* macro */ |
|
16 |
|
/*----------------------------------------------------------------------------*/ |
|
17 |
|
#define vk_true 1 |
|
18 |
|
#define vk_false 0 |
|
19 |
|
#define vk_whole_sz 0xffffffffffffffff |
|
20 |
|
#define vk_q_fam_ignored 0xffffffff |
|
21 |
|
/*----------------------------------------------------------------------------*/ |
|
22 |
|
#define VK_VERSION_MAJOR(x) (x >> 22) |
|
23 |
|
#define VK_VERSION_MINOR(x) ((x >> 12) & 0x3ff) |
|
24 |
|
#define VK_VERSION_PATCH(x) (x & 0xfff) |
|
25 |
|
/* macro */ |
|
26 |
|
/******************************************************************************/ |
|
27 |
|
/* types */ |
|
28 |
|
/* |
|
29 |
|
* 64 bits platforms: enums do default to 32 bits, but can go up to 64 bits |
|
30 |
|
* based on the range of values they hold. this is important for |
|
31 |
|
* vulkan ABI which we will fix. |
|
32 |
|
* _individually_, each value is defaulted to 32bits, if possible, and signed |
|
33 |
|
* or not. |
|
34 |
|
* XXX: All vulkan enums use 32 bits storage |
|
35 |
|
*/ |
|
36 |
|
enum { |
|
37 |
|
vk_err_out_of_host_mem = -1, |
|
38 |
|
/*--------------------------------------------------------------------*/ |
|
39 |
|
vk_success = 0, |
|
40 |
|
vk_incomplete = 5, |
|
41 |
|
vk_r_enum_max = 0x7fffffff |
|
42 |
|
}; |
|
43 |
|
|
|
44 |
|
enum { |
|
45 |
|
vk_struct_type_instance_create_info = 1, |
|
46 |
|
vk_struct_type_dev_q_create_info = 2, |
|
47 |
|
vk_struct_type_dev_create_info = 3, |
|
48 |
|
vk_struct_type_submit_info = 4, |
|
49 |
|
vk_struct_type_mem_alloc_info = 5, |
|
50 |
|
vk_struct_type_fence_create_info = 8, |
|
51 |
|
vk_struct_type_sem_create_info = 9, |
|
52 |
|
vk_struct_type_img_create_info = 14, |
|
53 |
|
vk_struct_type_cmdpool_create_info = 39, |
|
54 |
|
vk_struct_type_cmdbuf_alloc_info = 40, |
|
55 |
|
vk_struct_type_cmdbuf_begin_info = 42, |
|
56 |
|
vk_struct_type_img_mem_barrier = 45, |
|
57 |
|
/* extension number 2 or index 1, offset 0 */ |
|
58 |
|
vk_struct_type_swpchn_create_info = 1000000000 + 1000 + 0, |
|
59 |
|
/* extension number 2 or index 1, offset 1 */ |
|
60 |
|
vk_struct_type_present_info = 1000000000 + 1000 + 1, |
|
61 |
|
/* extension number 6 or index 5, offset 0 */ |
|
62 |
|
vk_struct_type_xcb_surf_create_info = 1000000000 + 5000 + 0, |
|
63 |
|
/* extension number 60 or index 59, offset 1 */ |
|
64 |
|
vk_struct_type_phydev_props = 1000000000 + 59000 + 1, |
|
65 |
|
/* extension number 60 or index 59, offset 5 */ |
|
66 |
|
vk_struct_type_q_fam_props = 1000000000 + 59000 + 5, |
|
67 |
|
/* extension number 60 or index 59, offset 6 */ |
|
68 |
|
vk_struct_type_phydev_mem_props = 1000000000 + 59000 + 6, |
|
69 |
|
/* extension number 60 or index 59, offset 10 */ |
|
70 |
|
vk_struct_type_acquire_next_img_info = 1000000000 + 59000 + 10, |
|
71 |
|
/* extension number 91 or index 90, offset 0 */ |
|
72 |
|
vk_struct_type_surf_caps = 1000000000 + 90000 + 0, |
|
73 |
|
/* extension number 120 or index 119, offset 0 */ |
|
74 |
|
vk_struct_type_phydev_surf_info = 1000000000 + 119000 + 0, |
|
75 |
|
/* extension number 120 or index 119, offset 2 */ |
|
76 |
|
vk_struct_type_surf_texel_mem_blk_conf = 1000000000 + 119000 + 2, |
|
77 |
|
/* extension number 147 or index 146, offset 1 */ |
|
78 |
|
vk_struct_type_img_mem_rqmts_info = 1000000000 + 146000 + 1, |
|
79 |
|
/* extension number 147 or index 146, offset 3 */ |
|
80 |
|
vk_struct_type_mem_rqmts = 1000000000 + 146000 + 3, |
|
81 |
|
/* extension number 158 or index 157, offset 1 */ |
|
82 |
|
vk_struct_type_bind_img_mem_info = 1000000000 + 157000 + 1, |
|
83 |
|
vk_struct_type_enum_max = 0x7fffffff |
|
84 |
|
}; |
|
85 |
|
|
|
86 |
|
struct vk_instance_create_info_t { |
|
87 |
|
u32 type; |
|
88 |
|
void *next; |
|
89 |
|
u32 flags; |
|
90 |
|
void *app_info; /* allow easy hidden driver optimizations: no! */ |
|
91 |
|
u32 enabled_layers_n; |
|
92 |
|
u8 **enabled_layer_names; |
|
93 |
|
u32 enabled_exts_n; |
|
94 |
|
u8 **enabled_ext_names; |
|
95 |
|
}; |
|
96 |
|
|
|
97 |
|
#define VK_MAX_EXT_NAME_SZ 256 |
|
98 |
|
struct vk_ext_props_t { |
|
99 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
100 |
|
u32 spec_version; |
|
101 |
|
}; |
|
102 |
|
|
|
103 |
|
#define VK_MAX_DESC_SZ 256 |
|
104 |
|
struct vk_layer_props_t { |
|
105 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
106 |
|
u32 spec_version; |
|
107 |
|
u32 implementation_version; |
|
108 |
|
u8 desc[VK_MAX_DESC_SZ]; |
|
109 |
|
}; |
|
110 |
|
|
|
111 |
|
enum { |
|
112 |
|
vk_phydev_type_other = 0, |
|
113 |
|
vk_phydev_type_integrated_gpu = 1, |
|
114 |
|
vk_phydev_type_discrete_gpu = 2, |
|
115 |
|
vk_phydev_type_virtual_gpu = 3, |
|
116 |
|
vk_phydev_type_cpu = 4, |
|
117 |
|
vk_phydev_type_enum_max = 0x7fffffff |
|
118 |
|
}; |
|
119 |
|
|
|
120 |
|
struct vk_phydev_limits_t { |
|
121 |
|
u32 not_used_00[11]; |
|
122 |
|
u64 not_used_01[2]; |
|
123 |
|
u32 not_used_02[51]; |
|
124 |
|
float not_used_03[2]; |
|
125 |
|
u32 not_used_04[3]; |
|
126 |
|
float not_used_05[2]; |
|
127 |
|
u32 not_used_06; |
|
128 |
|
size_t not_used_07; |
|
129 |
|
u64 not_used_08[3]; |
|
130 |
|
u32 not_used_09[4]; |
|
131 |
|
float not_used_10[2]; |
|
132 |
|
u32 not_used_11[16]; |
|
133 |
|
float not_used_12; |
|
134 |
|
u32 not_used_13[4]; |
|
135 |
|
float not_used_14[6]; |
|
136 |
|
u32 not_used_15[2]; |
|
137 |
|
u64 not_used_16[3]; |
|
138 |
|
}; |
|
139 |
|
|
|
140 |
|
struct vk_phydev_sparse_props_t { |
|
141 |
|
u32 not_used[5]; |
|
142 |
|
}; |
|
143 |
|
|
|
144 |
|
/*----------------------------------------------------------------------------*/ |
|
145 |
|
#define VK_MAX_PHYDEV_NAME_SZ 256 |
|
146 |
|
#define VK_UUID_SZ 16 |
|
147 |
|
struct vk_phydev_props_core_t { |
|
148 |
|
u32 api_version; |
|
149 |
|
u32 driver_version; |
|
150 |
|
u32 vendor_id; |
|
151 |
|
u32 dev_id; |
|
152 |
|
u32 dev_type; |
|
153 |
|
u8 name[VK_MAX_PHYDEV_NAME_SZ]; |
|
154 |
|
u8 pipeline_cache_uuid[VK_UUID_SZ]; |
|
155 |
|
struct vk_phydev_limits_t limits; |
|
156 |
|
struct vk_phydev_sparse_props_t sparse_props; |
|
157 |
|
}; |
|
158 |
|
/* the vulkan 1.1 version */ |
|
159 |
|
struct vk_phydev_props_t { |
|
160 |
|
u32 type; |
|
161 |
|
void *next; |
|
162 |
|
struct vk_phydev_props_core_t core; |
|
163 |
|
}; |
|
164 |
|
/*----------------------------------------------------------------------------*/ |
|
165 |
|
enum { |
|
166 |
|
vk_q_gfx_bit = 0x00000001, |
|
167 |
|
vk_q_compute_bit = 0x00000002, |
|
168 |
|
vk_q_transfer_bit = 0x00000004, |
|
169 |
|
vk_q_sparse_binding_bit = 0x00000008, |
|
170 |
|
vk_q_protected_bit = 0x00000010, |
|
171 |
|
vk_q_flag_bits_enum_max = 0x7fffffff |
|
172 |
|
}; |
|
173 |
|
|
|
174 |
|
struct vk_extent_3d_t { |
|
175 |
|
u32 width; |
|
176 |
|
u32 height; |
|
177 |
|
u32 depth; |
|
178 |
|
}; |
|
179 |
|
/*----------------------------------------------------------------------------*/ |
|
180 |
|
struct vk_q_fam_props_core_t { |
|
181 |
|
u32 flags; |
|
182 |
|
u32 qs_n; |
|
183 |
|
u32 timestamp_valid_bits; |
|
184 |
|
struct vk_extent_3d_t min_img_transfer_granularity; |
|
185 |
|
}; |
|
186 |
|
|
|
187 |
|
struct vk_q_fam_props_t { |
|
188 |
|
u32 type; |
|
189 |
|
void *next; |
|
190 |
|
struct vk_q_fam_props_core_t core; |
|
191 |
|
}; |
|
192 |
|
/*----------------------------------------------------------------------------*/ |
|
193 |
|
struct vk_phydev_features_t { |
|
194 |
|
u32 not_used[55]; |
|
195 |
|
}; |
|
196 |
|
|
|
197 |
|
struct vk_dev_q_create_info_t { |
|
198 |
|
u32 type; |
|
199 |
|
void *next; |
|
200 |
|
u32 flags; |
|
201 |
|
u32 q_fam; |
|
202 |
|
u32 qs_n; |
|
203 |
|
float *q_prios; |
|
204 |
|
}; |
|
205 |
|
|
|
206 |
|
struct vk_dev_create_info_t { |
|
207 |
|
u32 type; |
|
208 |
|
void *next; |
|
209 |
|
u32 flags; |
|
210 |
|
u32 q_create_infos_n; |
|
211 |
|
struct vk_dev_q_create_info_t *q_create_infos; |
|
212 |
|
u32 do_not_use_0; |
|
213 |
|
void *do_not_use_1; |
|
214 |
|
u32 enabled_exts_n; |
|
215 |
|
u8 **enabled_ext_names; |
|
216 |
|
void *do_not_use_2; |
|
217 |
|
}; |
|
218 |
|
|
|
219 |
|
enum { |
|
220 |
|
vk_cmdpool_create_transient_bit = 0x00000001, |
|
221 |
|
vk_cmdpool_create_reset_cmdbuf_bit = 0x00000002, |
|
222 |
|
vk_cmdpool_create_flag_bits_enum_max = 0x7fffffff |
|
223 |
|
}; |
|
224 |
|
|
|
225 |
|
struct vk_cmdpool_create_info_t { |
|
226 |
|
u32 type; |
|
227 |
|
void *next; |
|
228 |
|
u32 flags; |
|
229 |
|
u32 q_fam; |
|
230 |
|
}; |
|
231 |
|
|
|
232 |
|
struct vk_xcb_surf_create_info_t { |
|
233 |
|
u32 type; |
|
234 |
|
void *next; |
|
235 |
|
u32 flags; |
|
236 |
|
xcb_connection_t *c; |
|
237 |
|
xcb_window_t win; |
|
238 |
|
}; |
|
239 |
|
|
|
240 |
|
struct vk_phydev_surf_info_t { |
|
241 |
|
u32 type; |
|
242 |
|
void *next; |
|
243 |
|
void *surf; |
|
244 |
|
}; |
|
245 |
|
|
|
246 |
|
enum { |
|
247 |
|
vk_texel_mem_blk_fmt_undefined = 0, |
|
248 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_unorm = 44, |
|
249 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_srgb = 50, |
|
250 |
|
vk_texel_mem_blk_fmt_enum_max = 0x7fffffff |
|
251 |
|
}; |
|
252 |
|
|
|
253 |
|
enum { |
|
254 |
|
vk_color_space_srgb_nonlinear = 0, |
|
255 |
|
vk_color_space_enum_max = 0x7fffffff |
|
256 |
|
}; |
|
257 |
|
|
|
258 |
|
struct vk_surf_texel_mem_blk_conf_core_t { |
|
259 |
|
u32 fmt; |
|
260 |
|
u32 color_space; |
|
261 |
|
}; |
|
262 |
|
|
|
263 |
|
struct vk_surf_texel_mem_blk_conf_t { |
|
264 |
|
u32 type; |
|
265 |
|
void *next; |
|
266 |
|
struct vk_surf_texel_mem_blk_conf_core_t core; |
|
267 |
|
}; |
|
268 |
|
/*----------------------------------------------------------------------------*/ |
|
269 |
|
enum { |
|
270 |
|
vk_mem_prop_dev_local_bit = 0x00000001, |
|
271 |
|
vk_mem_prop_host_visible_bit = 0x00000002, |
|
272 |
|
vk_mem_prop_host_cached_bit = 0x00000008, |
|
273 |
|
vk_mem_prop_flag_bits_enum_max = 0x7fffffff |
|
274 |
|
}; |
|
275 |
|
|
|
276 |
|
struct vk_mem_type_t { |
|
277 |
|
u32 prop_flags; |
|
278 |
|
u32 heap; |
|
279 |
|
}; |
|
280 |
|
/*----------------------------------------------------------------------------*/ |
|
281 |
|
enum { |
|
282 |
|
vk_mem_heap_dev_local_bit = 0x00000001, |
|
283 |
|
vk_mem_heap_multi_instance_bit = 0x00000002, |
|
284 |
|
vk_mem_heap_flag_bits_enum_max = 0x7FFFFFFF |
|
285 |
|
}; |
|
286 |
|
|
|
287 |
|
struct vk_mem_heap_t { |
|
288 |
|
u64 sz; |
|
289 |
|
u32 flags; |
|
290 |
|
}; |
|
291 |
|
/*----------------------------------------------------------------------------*/ |
|
292 |
|
#define VK_MEM_TYPES_N_MAX 32 |
|
293 |
|
#define VK_MEM_HEAPS_N_MAX 16 |
|
294 |
|
struct vk_phydev_mem_props_core_t { |
|
295 |
|
u32 mem_types_n; |
|
296 |
|
struct vk_mem_type_t mem_types[VK_MEM_TYPES_N_MAX]; |
|
297 |
|
u32 mem_heaps_n; |
|
298 |
|
struct vk_mem_heap_t mem_heaps[VK_MEM_HEAPS_N_MAX]; |
|
299 |
|
}; |
|
300 |
|
|
|
301 |
|
struct vk_phydev_mem_props_t { |
|
302 |
|
u32 type; |
|
303 |
|
void *next; |
|
304 |
|
struct vk_phydev_mem_props_core_t core; |
|
305 |
|
}; |
|
306 |
|
/*----------------------------------------------------------------------------*/ |
|
307 |
|
struct vk_extent_2d_t { |
|
308 |
|
u32 width; |
|
309 |
|
u32 height; |
|
310 |
|
}; |
|
311 |
|
|
|
312 |
|
enum { |
|
313 |
|
vk_surf_transform_identity_bit = 0x00000001, |
|
314 |
|
vk_surf_transform_flag_bits_enum_max = 0x7fffffff |
|
315 |
|
}; |
|
316 |
|
|
|
317 |
|
enum { |
|
318 |
|
vk_composite_alpha_opaque_bit = 0x00000001, |
|
319 |
|
vk_composite_alpha_flag_bits_enum_max = 0x7fffffff |
|
320 |
|
}; |
|
321 |
|
|
|
322 |
|
enum { |
|
323 |
|
vk_img_usage_transfer_src_bit = 0x00000001, |
|
324 |
|
vk_img_usage_transfer_dst_bit = 0x00000002, |
|
325 |
|
vk_img_usage_color_attachment_bit = 0x00000010, |
|
326 |
|
vk_img_usage_flag_bits_enum_max = 0x7fffffff |
|
327 |
|
}; |
|
328 |
|
|
|
329 |
|
struct vk_surf_caps_core_t { |
|
330 |
|
u32 imgs_n_min; |
|
331 |
|
u32 imgs_n_max; |
|
332 |
|
struct vk_extent_2d_t current_extent; |
|
333 |
|
struct vk_extent_2d_t img_extent_min; |
|
334 |
|
struct vk_extent_2d_t img_extent_max; |
|
335 |
|
u32 img_array_layers_n_max; |
|
336 |
|
u32 supported_transforms; |
|
337 |
|
u32 current_transform; |
|
338 |
|
u32 supported_composite_alpha; |
|
339 |
|
u32 supported_img_usage_flags; |
|
340 |
|
}; |
|
341 |
|
|
|
342 |
|
struct vk_surf_caps_t { |
|
343 |
|
u32 type; |
|
344 |
|
void *next; |
|
345 |
|
struct vk_surf_caps_core_t core; |
|
346 |
|
u32 flags; |
|
347 |
|
void *surf; |
|
348 |
|
u32 imgs_n; |
|
349 |
|
|
|
350 |
|
}; |
|
351 |
|
/*----------------------------------------------------------------------------*/ |
|
352 |
|
enum { |
|
353 |
|
vk_sharing_mode_exclusive = 0, |
|
354 |
|
vk_sharing_mode_enum_max = 0x7fffffff |
|
355 |
|
}; |
|
356 |
|
|
|
357 |
|
enum { |
|
358 |
|
vk_present_mode_immediate = 0, |
|
359 |
|
vk_present_mode_mailbox = 1, |
|
360 |
|
vk_present_mode_fifo = 2, |
|
361 |
|
vk_present_mode_fifo_relaxed = 3, |
|
362 |
|
vk_present_mode_enum_max = 0x7fffffff |
|
363 |
|
}; |
|
364 |
|
|
|
365 |
|
struct vk_swpchn_create_info_t { |
|
366 |
|
u32 type; |
|
367 |
|
void *next; |
|
368 |
|
u32 flags; |
|
369 |
|
void *surf; |
|
370 |
|
u32 imgs_n_min; |
|
371 |
|
u32 img_texel_mem_blk_fmt; |
|
372 |
|
u32 img_color_space; |
|
373 |
|
struct vk_extent_2d_t img_extent; |
|
374 |
|
u32 img_layers_n; |
|
375 |
|
u32 img_usage; |
|
376 |
|
u32 img_sharing_mode; |
|
377 |
|
u32 q_fams_n; |
|
378 |
|
u32 *q_fams; |
|
379 |
|
u32 pre_transform; |
|
380 |
|
u32 composite_alpha; |
|
381 |
|
u32 present_mode; |
|
382 |
|
u32 clipped; |
|
383 |
|
void *old_swpchn; |
|
384 |
|
}; |
|
385 |
|
|
|
386 |
|
enum { |
|
387 |
|
vk_img_type_2d = 1, |
|
388 |
|
vk_img_type_enum_max = 0x7fffffff |
|
389 |
|
}; |
|
390 |
|
|
|
391 |
|
enum { |
|
392 |
|
vk_samples_n_1_bit = 0x00000001, |
|
393 |
|
vk_samples_n_enum_max = 0x7fffffff |
|
394 |
|
}; |
|
395 |
|
|
|
396 |
|
enum { |
|
397 |
|
vk_img_tiling_optimal = 0, |
|
398 |
|
vk_img_tiling_linear = 1, |
|
399 |
|
vk_img_tiling_enum_max = 0x7fffffff |
|
400 |
|
}; |
|
401 |
|
|
|
402 |
|
enum { |
|
403 |
|
vk_img_create_flag_2d_array_compatible_bit = 0x00000002, |
|
404 |
|
vk_img_create_flag_enum_max = 0x7fffffff |
|
405 |
|
}; |
|
406 |
|
|
|
407 |
|
enum { |
|
408 |
|
vk_img_layout_undefined = 0, |
|
409 |
|
vk_img_layout_general = 1, |
|
410 |
|
/* extension number 2 or index 1, offset 2 */ |
|
411 |
|
vk_img_layout_present = 1000000000 + 1000 + 2, |
|
412 |
|
vk_img_layout_enum_n_max = 0x7fffffff |
|
413 |
|
}; |
|
414 |
|
|
|
415 |
|
struct vk_img_create_info_t { |
|
416 |
|
u32 type; |
|
417 |
|
void *next; |
|
418 |
|
u32 flags; |
|
419 |
|
u32 img_type; |
|
420 |
|
u32 texel_mem_blk_fmt; |
|
421 |
|
struct vk_extent_3d_t extent; |
|
422 |
|
u32 mip_lvls_n; |
|
423 |
|
u32 array_layers_n; |
|
424 |
|
u32 samples_n; /* flags */ |
|
425 |
|
u32 img_tiling; |
|
426 |
|
u32 usage; |
|
427 |
|
u32 sharing_mode; |
|
428 |
|
u32 q_fams_n; |
|
429 |
|
u32 *q_fams; |
|
430 |
|
u32 initial_layout; |
|
431 |
|
}; |
|
432 |
|
|
|
433 |
|
struct vk_img_mem_rqmts_info_t { |
|
434 |
|
u32 type; |
|
435 |
|
void *next; |
|
436 |
|
void *img; |
|
437 |
|
}; |
|
438 |
|
|
|
439 |
|
struct vk_mem_rqmts_core_t { |
|
440 |
|
u64 sz; |
|
441 |
|
u64 alignment; |
|
442 |
|
/*idxs of bits are idxs in mem types of vk_phydev_mem_props_core_t */ |
|
443 |
|
u32 mem_type_bits; |
|
444 |
|
}; |
|
445 |
|
|
|
446 |
|
struct vk_mem_rqmts_t { |
|
447 |
|
u32 type; |
|
448 |
|
void *next; |
|
449 |
|
struct vk_mem_rqmts_core_t core; |
|
450 |
|
}; |
|
451 |
|
|
|
452 |
|
struct vk_mem_alloc_info_t { |
|
453 |
|
u32 type; |
|
454 |
|
void *next; |
|
455 |
|
u64 sz; |
|
456 |
|
u32 mem_type_idx; /* in the physical device array of memory types */ |
|
457 |
|
}; |
|
458 |
|
|
|
459 |
|
struct vk_bind_img_mem_info_t { |
|
460 |
|
u32 type; |
|
461 |
|
void *next; |
|
462 |
|
void *img; |
|
463 |
|
void *mem; |
|
464 |
|
u64 offset; |
|
465 |
|
}; |
|
466 |
|
|
|
467 |
|
enum { |
|
468 |
|
vk_pipeline_stage_top_of_pipe_bit = (1 << 0), |
|
469 |
|
vk_pipeline_stage_bottom_of_pipe_bit = (1 << 13), |
|
470 |
|
vk_pipeline_stage_enum_max = 0x7fffffff |
|
471 |
|
}; |
|
472 |
|
|
|
473 |
|
enum { |
|
474 |
|
vk_img_aspect_color_bit = 1, |
|
475 |
|
vk_img_aspect_enum_max = 0x7fffffff |
|
476 |
|
}; |
|
477 |
|
|
|
478 |
|
struct vk_img_subrsrc_range_t { |
|
479 |
|
u32 aspect; |
|
480 |
|
u32 base_mip_lvl; |
|
481 |
|
u32 lvls_n; |
|
482 |
|
u32 base_array_layer; |
|
483 |
|
u32 array_layers_n; |
|
484 |
|
}; |
|
485 |
|
|
|
486 |
|
struct vk_img_mem_barrier_t { |
|
487 |
|
u32 type; |
|
488 |
|
void *next; |
|
489 |
|
u32 src_access; |
|
490 |
|
u32 dst_access; |
|
491 |
|
u32 old_layout; |
|
492 |
|
u32 new_layout; |
|
493 |
|
u32 src_q_fam; |
|
494 |
|
u32 dst_q_fam; |
|
495 |
|
void *img; |
|
496 |
|
struct vk_img_subrsrc_range_t subrsrc_range; |
|
497 |
|
}; |
|
498 |
|
|
|
499 |
|
enum { |
|
500 |
|
vk_cmdbuf_lvl_primary = 0, |
|
501 |
|
vk_cmdbuf_lvl_enum_max = 0x7fffffff |
|
502 |
|
}; |
|
503 |
|
|
|
504 |
|
struct vk_cmdbuf_alloc_info_t { |
|
505 |
|
u32 type; |
|
506 |
|
void *next; |
|
507 |
|
void *cmdpool; |
|
508 |
|
u32 lvl; |
|
509 |
|
u32 cmdbufs_n; |
|
510 |
|
}; |
|
511 |
|
|
|
512 |
|
enum { |
|
513 |
|
vk_cmdbuf_usage_one_time_submit_bit = 0x00000001, |
|
514 |
|
vk_cmdbuf_usage_enum_max = 0x7fffffff |
|
515 |
|
}; |
|
516 |
|
|
|
517 |
|
struct vk_cmdbuf_begin_info_t { |
|
518 |
|
u32 type; |
|
519 |
|
void *next; |
|
520 |
|
u32 flags; |
|
521 |
|
void *do_not_use; |
|
522 |
|
}; |
|
523 |
|
|
|
524 |
|
struct vk_submit_info_t { |
|
525 |
|
u32 type; |
|
526 |
|
void *next; |
|
527 |
|
u32 wait_sems_n; |
|
528 |
|
void **wait_sems; |
|
529 |
|
u32* wait_dst_stages; |
|
530 |
|
u32 cmdbufs_n; |
|
531 |
|
void **cmdbufs; |
|
532 |
|
u32 signal_sems_n; |
|
533 |
|
void **signal_sems; |
|
534 |
|
}; |
|
535 |
|
|
|
536 |
|
struct vk_img_subrsrc_t { |
|
537 |
|
u32 aspect; |
|
538 |
|
u32 mip_lvl; |
|
539 |
|
u32 array_layer; |
|
540 |
|
}; |
|
541 |
|
|
|
542 |
|
struct vk_subrsrc_layout_t { |
|
543 |
|
u64 offset; |
|
544 |
|
u64 sz; |
|
545 |
|
u64 row_pitch; |
|
546 |
|
u64 array_pitch; |
|
547 |
|
u64 depth_pitch; |
|
548 |
|
}; |
|
549 |
|
|
|
550 |
|
struct vk_acquire_next_img_info_t { |
|
551 |
|
u32 type; |
|
552 |
|
void *next; |
|
553 |
|
void *swpchn; |
|
554 |
|
u64 timeout; |
|
555 |
|
void *sem; |
|
556 |
|
void *fence; |
|
557 |
|
u32 devs; |
|
558 |
|
}; |
|
559 |
|
|
|
560 |
|
struct vk_fence_create_info_t { |
|
561 |
|
u32 type; |
|
562 |
|
void *next; |
|
563 |
|
u32 flags; |
|
564 |
|
}; |
|
565 |
|
|
|
566 |
|
struct vk_img_subrsrc_layers_t { |
|
567 |
|
u32 aspect; |
|
568 |
|
u32 mip_lvl; |
|
569 |
|
u32 base_array_layer; |
|
570 |
|
u32 array_layers_n; |
|
571 |
|
}; |
|
572 |
|
|
|
573 |
|
struct vk_offset_3d_t { |
|
574 |
|
u32 x; |
|
575 |
|
u32 y; |
|
576 |
|
u32 z; |
|
577 |
|
}; |
|
578 |
|
|
|
579 |
|
struct vk_img_blit_t { |
|
580 |
|
struct vk_img_subrsrc_layers_t src_subrsrc; |
|
581 |
|
struct vk_offset_3d_t src_offsets[2]; |
|
582 |
|
struct vk_img_subrsrc_layers_t dst_subrsrc; |
|
583 |
|
struct vk_offset_3d_t dst_offsets[2]; |
|
584 |
|
}; |
|
585 |
|
|
|
586 |
|
struct vk_present_info_t { |
|
587 |
|
u32 type; |
|
588 |
|
void *next; |
|
589 |
|
u32 wait_sems_n; |
|
590 |
|
void **wait_sems; |
|
591 |
|
u32 swpchns_n; |
|
592 |
|
void **swpchns; |
|
593 |
|
u32 *idxs; |
|
594 |
|
s32 *results; |
|
595 |
|
}; |
|
596 |
|
|
|
597 |
|
struct vk_sem_create_info_t { |
|
598 |
|
u32 type; |
|
599 |
|
void *next; |
|
600 |
|
u32 flags; |
|
601 |
|
}; |
|
602 |
|
/******************************************************************************/ |
|
603 |
|
/* dev function pointers prototypes with some namespace/local keywords */ |
|
604 |
|
#define vk_get_dev_q(...) \ |
|
605 |
|
app_surf.dev.dl_vk_get_dev_q(app_surf.dev.vk,##__VA_ARGS__) |
|
606 |
|
|
|
607 |
|
#define vk_create_cmdpool(...) \ |
|
608 |
|
app_surf.dev.dl_vk_create_cmdpool(app_surf.dev.vk,##__VA_ARGS__) |
|
609 |
|
|
|
610 |
|
#define vk_create_swpchn(...) \ |
|
611 |
|
app_surf.dev.dl_vk_create_swpchn(app_surf.dev.vk,##__VA_ARGS__) |
|
612 |
|
|
|
613 |
|
#define vk_get_swpchn_imgs(...) \ |
|
614 |
|
app_surf.dev.dl_vk_get_swpchn_imgs(app_surf.dev.vk,##__VA_ARGS__) |
|
615 |
|
|
|
616 |
|
#define vk_create_img(...) \ |
|
617 |
|
app_surf.dev.dl_vk_create_img(app_surf.dev.vk,##__VA_ARGS__) |
|
618 |
|
|
|
619 |
|
#define vk_get_img_mem_rqmts(...) \ |
|
620 |
|
app_surf.dev.dl_vk_get_img_mem_rqmts(app_surf.dev.vk,##__VA_ARGS__) |
|
621 |
|
|
|
622 |
|
#define vk_alloc_mem(...) \ |
|
623 |
|
app_surf.dev.dl_vk_alloc_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
624 |
|
|
|
625 |
|
#define vk_bind_img_mem(...) \ |
|
626 |
|
app_surf.dev.dl_vk_bind_img_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
627 |
|
|
|
628 |
|
#define vk_map_mem(...) \ |
|
629 |
|
app_surf.dev.dl_vk_map_mem(app_surf.dev.vk,##__VA_ARGS__) |
|
630 |
|
|
|
631 |
|
#define vk_alloc_cmdbufs(...) \ |
|
632 |
|
app_surf.dev.dl_vk_alloc_cmdbufs(app_surf.dev.vk,##__VA_ARGS__) |
|
633 |
|
|
|
634 |
|
#define vk_free_cmdbufs(...) \ |
|
635 |
|
app_surf.dev.dl_vk_free_cmdbufs(app_surf.dev.vk,##__VA_ARGS__) |
|
636 |
|
|
|
637 |
|
#define vk_begin_cmdbuf(...) \ |
|
638 |
|
app_surf.dev.dl_vk_begin_cmdbuf(__VA_ARGS__) |
|
639 |
|
|
|
640 |
|
#define vk_end_cmdbuf(...) \ |
|
641 |
|
app_surf.dev.dl_vk_end_cmdbuf(__VA_ARGS__) |
|
642 |
|
|
|
643 |
|
#define vk_cmd_pipeline_barrier(...) \ |
|
644 |
|
app_surf.dev.dl_vk_cmd_pipeline_barrier(__VA_ARGS__) |
|
645 |
|
|
|
646 |
|
#define vk_q_submit(...) \ |
|
647 |
|
app_surf.dev.dl_vk_q_submit(__VA_ARGS__) |
|
648 |
|
|
|
649 |
|
#define vk_q_wait_idle(...) \ |
|
650 |
|
app_surf.dev.dl_vk_q_wait_idle(__VA_ARGS__) |
|
651 |
|
|
|
652 |
|
#define vk_get_img_subrsrc_layout(...) \ |
|
653 |
|
app_surf.dev.dl_vk_get_img_subrsrc_layout(app_surf.dev.vk,##__VA_ARGS__) |
|
654 |
|
|
|
655 |
|
#define vk_acquire_next_img(...) \ |
|
656 |
|
app_surf.dev.dl_vk_acquire_next_img(app_surf.dev.vk,##__VA_ARGS__) |
|
657 |
|
|
|
658 |
|
#define vk_create_fence(...) \ |
|
659 |
|
app_surf.dev.dl_create_fence(app_surf.dev.vk,##__VA_ARGS__) |
|
660 |
|
|
|
661 |
|
#define vk_reset_cmdbuf(...) \ |
|
662 |
|
app_surf.dev.dl_vk_reset_cmdbuf(__VA_ARGS__) |
|
663 |
|
|
|
664 |
|
#define vk_cmd_blit_img(...) \ |
|
665 |
|
app_surf.dev.dl_vk_cmd_blit_img(__VA_ARGS__) |
|
666 |
|
|
|
667 |
|
#define vk_wait_for_fences(...) \ |
|
668 |
|
app_surf.dev.dl_vk_wait_for_fences(app_surf.dev.vk,##__VA_ARGS__) |
|
669 |
|
|
|
670 |
|
#define vk_reset_fences(...) \ |
|
671 |
|
app_surf.dev.dl_vk_reset_fences(app_surf.dev.vk,##__VA_ARGS__) |
|
672 |
|
|
|
673 |
|
#define vk_q_present(...) \ |
|
674 |
|
app_surf.dev.dl_vk_q_present(__VA_ARGS__) |
|
675 |
|
|
|
676 |
|
#define vk_create_sem(...) \ |
|
677 |
|
app_surf.dev.dl_vk_create_sem(app_surf.dev.vk,##__VA_ARGS__) |
|
678 |
|
/*----------------------------------------------------------------------------*/ |
|
679 |
|
#define VK_DEV_SYMS \ |
|
680 |
|
void (*dl_vk_get_dev_q)(void *dev, u32 fam, u32 q_idx, void **q); \ |
|
681 |
|
s32 (*dl_vk_create_cmdpool)( \ |
|
682 |
|
void *dev, \ |
|
683 |
|
struct vk_cmdpool_create_info_t *create_info, \ |
|
684 |
|
void *allocator, \ |
|
685 |
|
void **vk_cmdpool); \ |
|
686 |
|
s32 (*dl_vk_create_swpchn)( \ |
|
687 |
|
void *dev, \ |
|
688 |
|
struct vk_swpchn_create_info_t *info, \ |
|
689 |
|
void *allocator, \ |
|
690 |
|
void **swpchn); \ |
|
691 |
|
s32 (*dl_vk_get_swpchn_imgs)( \ |
|
692 |
|
void *dev, \ |
|
693 |
|
void *swpchn, \ |
|
694 |
|
u32 *imgs_n, \ |
|
695 |
|
void **imgs); \ |
|
696 |
|
s32 (*dl_vk_create_img)( \ |
|
697 |
|
void *dev, \ |
|
698 |
|
struct vk_img_create_info_t *info, \ |
|
699 |
|
void *allocator, \ |
|
700 |
|
void **img); \ |
|
701 |
|
s32 (*dl_vk_get_img_mem_rqmts)( \ |
|
702 |
|
void *dev, \ |
|
703 |
|
struct vk_img_mem_rqmts_info_t *info, \ |
|
704 |
|
struct vk_mem_rqmts_t *mem_rqmts); \ |
|
705 |
|
s32 (*dl_vk_alloc_mem)( \ |
|
706 |
|
void *dev, \ |
|
707 |
|
struct vk_mem_alloc_info_t *info, \ |
|
708 |
|
void *allocator, \ |
|
709 |
|
void **mem); \ |
|
710 |
|
s32 (*dl_vk_bind_img_mem)( \ |
|
711 |
|
void *dev, \ |
|
712 |
|
u32 infos_n, \ |
|
713 |
|
struct vk_bind_img_mem_info_t *infos); \ |
|
714 |
|
s32 (*dl_vk_map_mem)( \ |
|
715 |
|
void *dev, \ |
|
716 |
|
void *mem, \ |
|
717 |
|
u64 offset, \ |
|
718 |
|
u64 sz, \ |
|
719 |
|
u32 flags, \ |
|
720 |
|
void **data); \ |
|
721 |
|
s32 (*dl_vk_alloc_cmdbufs)( \ |
|
722 |
|
void *dev, \ |
|
723 |
|
struct vk_cmdbuf_alloc_info_t *info, \ |
|
724 |
|
void **cmdbufs); \ |
|
725 |
|
void (*dl_vk_free_cmdbufs)( \ |
|
726 |
|
void *dev, \ |
|
727 |
|
void *cmdpool, \ |
|
728 |
|
u32 cmdbufs_n, \ |
|
729 |
|
void **cmdbufs); \ |
|
730 |
|
s32 (*dl_vk_begin_cmdbuf)( \ |
|
731 |
|
void *cmdbuf, \ |
|
732 |
|
struct vk_cmdbuf_begin_info_t *info); \ |
|
733 |
|
s32 (*dl_vk_end_cmdbuf)(void *cmdbuf); \ |
|
734 |
|
void (*dl_vk_cmd_pipeline_barrier)( \ |
|
735 |
|
void *cmdbuf, \ |
|
736 |
|
u32 src_stage, \ |
|
737 |
|
u32 dst_stage, \ |
|
738 |
|
u32 dependency_flags, \ |
|
739 |
|
u32 mem_barriers_n, \ |
|
740 |
|
void *mem_barriers, \ |
|
741 |
|
u32 buf_mem_barriers_n, \ |
|
742 |
|
void *buf_mem_barriers, \ |
|
743 |
|
u32 img_mem_barriers_n, \ |
|
744 |
|
struct vk_img_mem_barrier_t *img_mem_barriers); \ |
|
745 |
|
s32 (*dl_vk_q_submit)( \ |
|
746 |
|
void *q, \ |
|
747 |
|
u32 submits_n, \ |
|
748 |
|
struct vk_submit_info_t *submits, \ |
|
749 |
|
void *fence); \ |
|
750 |
|
s32 (*dl_vk_q_wait_idle)(void *q); \ |
|
751 |
|
void (*dl_vk_get_img_subrsrc_layout)( \ |
|
752 |
|
void *dev, \ |
|
753 |
|
void *img, \ |
|
754 |
|
struct vk_img_subrsrc_t *subrsrc, \ |
|
755 |
|
struct vk_subrsrc_layout_t *layout); \ |
|
756 |
|
s32 (*dl_vk_acquire_next_img)( \ |
|
757 |
|
void *dev, \ |
|
758 |
|
struct vk_acquire_next_img_info_t *info, \ |
|
759 |
|
u32 *img_idx); \ |
|
760 |
|
s32 (*dl_vk_create_fence)( \ |
|
761 |
|
void *dev, \ |
|
762 |
|
struct vk_fence_create_info_t *info, \ |
|
763 |
|
void *allocator, \ |
|
764 |
|
void **fence); \ |
|
765 |
|
s32 (*dl_vk_reset_cmdbuf)( \ |
|
766 |
|
void *cmdbuf, \ |
|
767 |
|
u32 flags); \ |
|
768 |
|
void (*dl_vk_cmd_blit_img)( \ |
|
769 |
|
void *cmdbuf, \ |
|
770 |
|
void *src_img, \ |
|
771 |
|
u32 src_img_layout, \ |
|
772 |
|
void *dst_img, \ |
|
773 |
|
u32 dst_img_layout, \ |
|
774 |
|
u32 regions_n, \ |
|
775 |
|
struct vk_img_blit_t *regions, \ |
|
776 |
|
u32 filter); \ |
|
777 |
|
s32 (*dl_vk_wait_for_fences)( \ |
|
778 |
|
void *dev, \ |
|
779 |
|
u32 fences_n, \ |
|
780 |
|
void **fences, \ |
|
781 |
|
u32 wail_all, \ |
|
782 |
|
u64 timeout); \ |
|
783 |
|
s32 (*dl_vk_reset_fences)( \ |
|
784 |
|
void *dev, \ |
|
785 |
|
u32 fences_n, \ |
|
786 |
|
void **fences); \ |
|
787 |
|
s32 (*dl_vk_q_present)( \ |
|
788 |
|
void *q, \ |
|
789 |
|
struct vk_present_info_t *info); \ |
|
790 |
|
s32 (*dl_vk_create_sem)( \ |
|
791 |
|
void *dev, \ |
|
792 |
|
struct vk_sem_create_info_t *info, \ |
|
793 |
|
void *allocator, \ |
|
794 |
|
void **sem); |
|
795 |
|
#endif |
|