File npa/npa.c renamed from npaf.c (similarity 98%) (mode: 100644) (index dcdf8d9..1f68a94) |
1 |
|
#ifndef NYANPAF_C |
|
2 |
|
#define NYANPAF_C |
|
|
1 |
|
#ifndef NPA_C |
|
2 |
|
#define NPA_C |
3 |
3 |
/* |
/* |
4 |
4 |
* code protected with a GNU affero GPLv3 license |
* code protected with a GNU affero GPLv3 license |
5 |
5 |
* copyright (C) 2020 Sylvain BERTRAND |
* copyright (C) 2020 Sylvain BERTRAND |
|
102 |
102 |
#define EXIT(fmt, ...) ({POUT("EXITING:" fmt, ##__VA_ARGS__);stdin_flags_restore();stdin_tty_conf_restore();exit(0);}) |
#define EXIT(fmt, ...) ({POUT("EXITING:" fmt, ##__VA_ARGS__);stdin_flags_restore();stdin_tty_conf_restore();exit(0);}) |
103 |
103 |
#define STR_SZ 255 /* sz and idx fit in 1 byte */ |
#define STR_SZ 255 /* sz and idx fit in 1 byte */ |
104 |
104 |
/*---------------------------------------------------------------------------*/ |
/*---------------------------------------------------------------------------*/ |
105 |
|
static u8 *file_path; |
|
|
105 |
|
static u8 *url; |
106 |
106 |
static f32 initial_vol; |
static f32 initial_vol; |
107 |
107 |
/*---------------------------------------------------------------------------*/ |
/*---------------------------------------------------------------------------*/ |
108 |
108 |
/* linux and compatible */ |
/* linux and compatible */ |
|
... |
... |
static snd_pcm_hw_params_t *pcm_hw_params; |
141 |
141 |
#define frame_rate sample_rate |
#define frame_rate sample_rate |
142 |
142 |
#define av_frame_fmt_is_planar av_sample_fmt_is_planar |
#define av_frame_fmt_is_planar av_sample_fmt_is_planar |
143 |
143 |
#define frame_fmt sample_fmt |
#define frame_fmt sample_fmt |
144 |
|
static u8 *file_url; |
|
|
144 |
|
static u8 *url; |
145 |
145 |
static AVFormatContext *fmt_ctx; |
static AVFormatContext *fmt_ctx; |
146 |
146 |
static int st_idx; |
static int st_idx; |
147 |
147 |
static AVStream *st; |
static AVStream *st; |
|
... |
... |
static void cmd_mute(void); |
199 |
199 |
static void cmd_info(void); |
static void cmd_info(void); |
200 |
200 |
static void cmd_pause(void); |
static void cmd_pause(void); |
201 |
201 |
/*--------------------------------------------------------------------------*/ |
/*--------------------------------------------------------------------------*/ |
202 |
|
#include "npaf_config.h" |
|
|
202 |
|
#include "npa_config.h" |
203 |
203 |
/*----------------------------------------------------------------------------*/ |
/*----------------------------------------------------------------------------*/ |
204 |
204 |
/* input "state" machine (2 major states: "utf8" and "esc seq"*/ |
/* input "state" machine (2 major states: "utf8" and "esc seq"*/ |
205 |
205 |
static int input_timer_fd; |
static int input_timer_fd; |
|
... |
... |
static u8 esc_seq[STR_SZ]; |
214 |
214 |
static u8 esc_seq_next_byte; /* idx in esc_seq */ |
static u8 esc_seq_next_byte; /* idx in esc_seq */ |
215 |
215 |
#define esc_seq_sz esc_seq_next_byte /* the idx of the next byte is its sz */ |
#define esc_seq_sz esc_seq_next_byte /* the idx of the next byte is its sz */ |
216 |
216 |
/*----------------------------------------------------------------------------*/ |
/*----------------------------------------------------------------------------*/ |
217 |
|
static u8 *apf(u8 *fmt, ...) /* asprintf... meh... */ |
|
218 |
|
{ |
|
219 |
|
u8 *r; |
|
220 |
|
int sz; |
|
221 |
|
va_list ap; |
|
222 |
|
|
|
223 |
|
va_start(ap, fmt); |
|
224 |
|
sz = vsnprintf(0, 0, fmt, ap); |
|
225 |
|
va_end(ap); |
|
226 |
|
|
|
227 |
|
r = malloc(sz + 1); |
|
228 |
|
|
|
229 |
|
va_start(ap, fmt); |
|
230 |
|
vsnprintf(r, sz + 1 , fmt, ap); |
|
231 |
|
va_end(ap); |
|
232 |
|
return r; |
|
233 |
|
} |
|
234 |
|
|
|
235 |
217 |
static void stdin_tty_conf_restore(void) |
static void stdin_tty_conf_restore(void) |
236 |
218 |
{ |
{ |
237 |
219 |
int r; |
int r; |
|
... |
... |
static void cmd_info(void) |
342 |
324 |
|
|
343 |
325 |
RESTORE; |
RESTORE; |
344 |
326 |
GREEN;POUT("================================================================================\n");RESTORE; |
GREEN;POUT("================================================================================\n");RESTORE; |
345 |
|
PURPLE;POUT("%s\n", file_url);RESTORE; |
|
|
327 |
|
PURPLE;POUT("%s\n", url);RESTORE; |
346 |
328 |
ts_str = ts_to_str(dec_frames.most_recent_ts, st->time_base, |
ts_str = ts_to_str(dec_frames.most_recent_ts, st->time_base, |
347 |
329 |
&remaining); |
&remaining); |
348 |
330 |
RED;POUT("%s", ts_str);RESTORE; |
RED;POUT("%s", ts_str);RESTORE; |
|
... |
... |
static void cmd_vol_up(void) |
465 |
447 |
POUT("COMMAND:volume up to value %s\n", vol_l10n_str); |
POUT("COMMAND:volume up to value %s\n", vol_l10n_str); |
466 |
448 |
|
|
467 |
449 |
r = avfilter_graph_send_command(filter_graph, "vol", "volume", |
r = avfilter_graph_send_command(filter_graph, "vol", "volume", |
468 |
|
vol_l10n_str, response, sizeof(response), |
|
469 |
|
AVFILTER_CMD_FLAG_ONE); |
|
|
450 |
|
vol_l10n_str, response, sizeof(response), 0); |
470 |
451 |
if (r < 0) |
if (r < 0) |
471 |
452 |
WARNING("ffmpeg:volume context:unable to set the volume up to \"%s\":response from volume filter:\"%s\"\n", response); |
WARNING("ffmpeg:volume context:unable to set the volume up to \"%s\":response from volume filter:\"%s\"\n", response); |
472 |
453 |
} |
} |
|
... |
... |
static void init_ffmpeg_dec(void) |
593 |
574 |
memset(&dec_frames, 0, sizeof(dec_frames)); |
memset(&dec_frames, 0, sizeof(dec_frames)); |
594 |
575 |
|
|
595 |
576 |
fmt_ctx = 0; |
fmt_ctx = 0; |
596 |
|
r = avformat_open_input(&fmt_ctx, file_url, NULL, NULL); |
|
|
577 |
|
r = avformat_open_input(&fmt_ctx, url, NULL, NULL); |
597 |
578 |
if (r < 0) |
if (r < 0) |
598 |
579 |
FATAL("ffmpeg:unable to open\n"); |
FATAL("ffmpeg:unable to open\n"); |
599 |
580 |
|
|
|
... |
... |
static void init_alsa(void) |
657 |
638 |
else |
else |
658 |
639 |
FATAL("alsa:unable to open \"%s\" pcm for playback\n", pcm_name); |
FATAL("alsa:unable to open \"%s\" pcm for playback\n", pcm_name); |
659 |
640 |
} |
} |
660 |
|
|
|
661 |
|
POUT("ALSA PCM DUMP START-------------------------------------------------------------\n"); |
|
662 |
|
snd_pcm_dump(pcm, pcm_pout); |
|
663 |
|
POUT("ALSA PCM DUMP END---------------------------------------------------------------\n"); |
|
664 |
641 |
} |
} |
665 |
642 |
|
|
666 |
643 |
static bool ffmpeg_fmt2pcm_layout_best_effort(enum AVFrameFormat ffmpeg_fmt, |
static bool ffmpeg_fmt2pcm_layout_best_effort(enum AVFrameFormat ffmpeg_fmt, |
|
... |
... |
static void pcm_conf(void) |
1089 |
1066 |
snd_pcm_sw_params_dump(sw_params, pcm_pout); |
snd_pcm_sw_params_dump(sw_params, pcm_pout); |
1090 |
1067 |
POUT("ALSA:SW_PARAMS END--------------------------------------------------------------\n"); |
POUT("ALSA:SW_PARAMS END--------------------------------------------------------------\n"); |
1091 |
1068 |
|
|
|
1069 |
|
POUT("ALSA PCM DUMP START-------------------------------------------------------------\n"); |
|
1070 |
|
snd_pcm_dump(pcm, pcm_pout); |
|
1071 |
|
POUT("ALSA PCM DUMP END---------------------------------------------------------------\n"); |
|
1072 |
|
|
1092 |
1073 |
r = snd_pcm_poll_descriptors_count(pcm); |
r = snd_pcm_poll_descriptors_count(pcm); |
1093 |
1074 |
POUT("alsa:have %d poll file descriptors\n", r); |
POUT("alsa:have %d poll file descriptors\n", r); |
1094 |
1075 |
|
|
|
... |
... |
static void ffmpeg_log_stdout(void *a, int b, const char *fmt, va_list ap) |
2071 |
2052 |
|
|
2072 |
2053 |
static void usage(void) |
static void usage(void) |
2073 |
2054 |
{ |
{ |
2074 |
|
POUT("npaf [-v volume(0..100)] [-h] file_path\n"); |
|
|
2055 |
|
POUT("npa [-v volume(0..100)] [-h] url\n"); |
2075 |
2056 |
} |
} |
2076 |
2057 |
|
|
2077 |
2058 |
static void opts_parse(int argc, u8 **args) |
static void opts_parse(int argc, u8 **args) |
2078 |
2059 |
{ |
{ |
2079 |
2060 |
int i; |
int i; |
2080 |
|
int file_path_idx; |
|
|
2061 |
|
int url_idx; |
2081 |
2062 |
|
|
2082 |
2063 |
i = 1; |
i = 1; |
2083 |
|
file_path_idx = -1; |
|
|
2064 |
|
url_idx = -1; |
2084 |
2065 |
initial_vol = -1.0; |
initial_vol = -1.0; |
2085 |
2066 |
loop { |
loop { |
2086 |
2067 |
if (i == argc) |
if (i == argc) |
|
... |
... |
static void opts_parse(int argc, u8 **args) |
2103 |
2084 |
usage(); |
usage(); |
2104 |
2085 |
exit(0); |
exit(0); |
2105 |
2086 |
} else { |
} else { |
2106 |
|
file_path_idx = i; |
|
|
2087 |
|
url_idx = i; |
2107 |
2088 |
++i; |
++i; |
2108 |
2089 |
} |
} |
2109 |
2090 |
} |
} |
2110 |
2091 |
|
|
2111 |
|
if (file_path_idx == -1) |
|
2112 |
|
FATAL("missing file path\n"); |
|
2113 |
|
|
|
2114 |
|
file_path = args[file_path_idx]; |
|
2115 |
|
file_url = apf("file:%s", file_path); |
|
|
2092 |
|
if (url_idx == -1) |
|
2093 |
|
FATAL("missing url\n"); |
2116 |
2094 |
|
|
2117 |
|
POUT("playing ####%s####\n", file_url); |
|
|
2095 |
|
url = args[url_idx]; |
|
2096 |
|
POUT("playing ####%s####\n", url); |
2118 |
2097 |
} |
} |
2119 |
2098 |
|
|
2120 |
2099 |
static void init(int argc, u8 **args) |
static void init(int argc, u8 **args) |
|
... |
... |
static void init(int argc, u8 **args) |
2140 |
2119 |
|
|
2141 |
2120 |
/* switch the ffmpeg log to stdout for metadata/etc dump */ |
/* switch the ffmpeg log to stdout for metadata/etc dump */ |
2142 |
2121 |
av_log_set_callback(ffmpeg_log_stdout); |
av_log_set_callback(ffmpeg_log_stdout); |
2143 |
|
av_dump_format(fmt_ctx, 0, file_url, 0); |
|
|
2122 |
|
av_dump_format(fmt_ctx, 0, url, 0); |
2144 |
2123 |
av_log_set_callback(av_log_default_callback); |
av_log_set_callback(av_log_default_callback); |
2145 |
2124 |
} |
} |
2146 |
2125 |
|
|
File npv/audio/filt/public/code.frag.c added (mode: 100644) (index 0000000..400f5d4) |
|
1 |
|
#define AGAIN 0 |
|
2 |
|
#define PUSHED_ONE_SET 1 |
|
3 |
|
#define NO_DEC_SET 2 |
|
4 |
|
#define FILT_SWITCHED_TO_DRAINING 3 |
|
5 |
|
static u8 filt_push_dec_sets(void) |
|
6 |
|
{ |
|
7 |
|
int r; |
|
8 |
|
avutil_audio_set_ref_t **a; |
|
9 |
|
|
|
10 |
|
if (audio_dec_sets_p.n == 0) { |
|
11 |
|
if (audio_dec_sets_p.eof_receive) { |
|
12 |
|
r = avfilter_bufsrc_add_audio_set_flags(abufsrc_ctx_l, |
|
13 |
|
0, AVFILTER_BUFSRC_FLAG_PUSH |
|
14 |
|
| AVFILTER_BUFSRC_FLAG_KEEP_REF); |
|
15 |
|
if (r < 0) |
|
16 |
|
FATALAF("ffmpeg:unable to notify the end of data to the filter source audio buffer context\n"); |
|
17 |
|
POUTAF("ffmpeg:interactive filter switched to draining\n"); |
|
18 |
|
return FILT_SWITCHED_TO_DRAINING; |
|
19 |
|
} |
|
20 |
|
return NO_DEC_SET; |
|
21 |
|
} |
|
22 |
|
a = audio_dec_sets_p.a; |
|
23 |
|
/* the dec_sets_p bufs will be unref in avcodec_audio_receive_set */ |
|
24 |
|
r = avfilter_bufsrc_add_audio_set_flags(abufsrc_ctx_l, a[0], |
|
25 |
|
AVFILTER_BUFSRC_FLAG_PUSH | AVFILTER_BUFSRC_FLAG_KEEP_REF); |
|
26 |
|
if (r >= 0) { |
|
27 |
|
/* rotate the ptrs if needed */ |
|
28 |
|
if (audio_dec_sets_p.n > 1) { |
|
29 |
|
avutil_audio_set_ref_t *save; |
|
30 |
|
|
|
31 |
|
save = a[0]; |
|
32 |
|
memmove(&a[0], &a[1], sizeof(*a) |
|
33 |
|
* (audio_dec_sets_p.n - 1)); |
|
34 |
|
a[audio_dec_sets_p.n - 1] = save; |
|
35 |
|
} |
|
36 |
|
audio_dec_sets_p.n--; |
|
37 |
|
return PUSHED_ONE_SET; |
|
38 |
|
} else if (r == AVERROR(EAGAIN)) |
|
39 |
|
return AGAIN; |
|
40 |
|
FATALAF("ffmpeg:unable to submit a decoder set of frames to the filter source audio buffer context\n"); |
|
41 |
|
} |
|
42 |
|
#undef AGAIN |
|
43 |
|
#undef PUSHED_ONE_SET |
|
44 |
|
#undef NO_DEC_SET |
|
45 |
|
#undef PUSHED_NULL_SET |
|
46 |
|
#undef FILT_SWITCHED_TO_DRAINING |
|
47 |
|
#define AGAIN 0 |
|
48 |
|
#define HAVE_FILT_SET 1 |
|
49 |
|
#define EOF_FILT 2 |
|
50 |
|
static u8 filt_set_try_get(void) |
|
51 |
|
{ |
|
52 |
|
int r; |
|
53 |
|
/* |
|
54 |
|
* the last dec set should switch the filt in draining mode, and |
|
55 |
|
* filt_p.set won't matter. |
|
56 |
|
*/ |
|
57 |
|
r = avfilter_bufsink_get_audio_set(abufsink_ctx_l, filt_p.set); |
|
58 |
|
if (r == AVERROR(EAGAIN)) { |
|
59 |
|
return AGAIN; |
|
60 |
|
} else if (r >= 0) { |
|
61 |
|
filt_p.pcm_written_ufrs_n = 0; |
|
62 |
|
return HAVE_FILT_SET; |
|
63 |
|
} else if (r == AVUTIL_AVERROR_EOF) { |
|
64 |
|
return EOF_FILT; |
|
65 |
|
} |
|
66 |
|
FATALAF("ffmpeg:error while getting frames from the filter\n"); |
|
67 |
|
} |
|
68 |
|
#undef AGAIN |
|
69 |
|
#undef HAVE_FILT_SET |
|
70 |
|
#undef EOF_FILT |
|
71 |
|
#define DONT_PRINT_INFO false |
|
72 |
|
static void filt_flush(void) |
|
73 |
|
{ |
|
74 |
|
enum avutil_audio_fr_fmt_t dst_fmt; |
|
75 |
|
int dst_rate; |
|
76 |
|
int dst_chans_n; |
|
77 |
|
uint64_t dst_chans_layout; |
|
78 |
|
|
|
79 |
|
avutil_audio_set_unref(filt_p.set); |
|
80 |
|
filt_p.pcm_written_ufrs_n = 0; |
|
81 |
|
/* no flush, we have to re-instanciate it from the current state */ |
|
82 |
|
avfilter_graph_free(&graph_l); |
|
83 |
|
|
|
84 |
|
audio_pcm2ff(audio_pcm_p, &dst_fmt, &dst_rate, &dst_chans_n, |
|
85 |
|
&dst_chans_layout, DONT_PRINT_INFO); |
|
86 |
|
audio_filt_cfg( |
|
87 |
|
audio_dec_ctx_p->fr_fmt, audio_dec_ctx_p->fr_rate, |
|
88 |
|
audio_dec_ctx_p->chans_n, audio_dec_ctx_p->chans_layout, |
|
89 |
|
filt_p.muted, filt_p.vol, |
|
90 |
|
dst_fmt, dst_rate, dst_chans_n, dst_chans_layout, |
|
91 |
|
DONT_PRINT_INFO); |
|
92 |
|
} |
|
93 |
|
#undef DONT_PRINT_INFO |
|
94 |
|
static void init_once(void) |
|
95 |
|
{ |
|
96 |
|
init_once_local(); |
|
97 |
|
init_once_public(); |
|
98 |
|
} |
|
99 |
|
static void cfg(enum avutil_audio_fr_fmt_t src_fmt, int src_rate, |
|
100 |
|
int src_chans_n, uint64_t src_chans_layout, |
|
101 |
|
bool muted, double vol, |
|
102 |
|
enum avutil_audio_fr_fmt_t dst_fmt, int dst_rate, |
|
103 |
|
int dst_chans_n, uint64_t dst_chans_layout, |
|
104 |
|
bool print_info) |
|
105 |
|
{ |
|
106 |
|
int r; |
|
107 |
|
char *dump_str; |
|
108 |
|
|
|
109 |
|
avfilter_graph_free(&graph_l); |
|
110 |
|
|
|
111 |
|
graph_l = avfilter_graph_alloc(); |
|
112 |
|
if (graph_l == 0) |
|
113 |
|
FATALAF("unable to create filter graph\n"); |
|
114 |
|
abufsrc_cfg(src_fmt, src_rate, src_chans_n, src_chans_layout, |
|
115 |
|
print_info); |
|
116 |
|
vol_cfg(muted, vol); |
|
117 |
|
afmt_cfg(dst_fmt, dst_rate, dst_chans_n, dst_chans_layout, print_info); |
|
118 |
|
abufsink_cfg(); |
|
119 |
|
r = avfilter_link(abufsrc_ctx_l, 0, vol_ctx_l, 0); |
|
120 |
|
if (r < 0) |
|
121 |
|
FATALAF("unable to connect the audio buffer source filter to the volume filter\n"); |
|
122 |
|
r = avfilter_link(vol_ctx_l, 0, afmt_ctx_l, 0); |
|
123 |
|
if (r < 0) |
|
124 |
|
FATALAF("unable to connect the volume filter to the audio format filter\n"); |
|
125 |
|
r = avfilter_link(afmt_ctx_l, 0, abufsink_ctx_l, 0); |
|
126 |
|
if (r < 0) |
|
127 |
|
FATALAF("unable to connect the audio format filter to the audio buffer sink filter\n"); |
|
128 |
|
/* configure the graph */ |
|
129 |
|
r = avfilter_graph_config(graph_l, 0); |
|
130 |
|
if (r < 0) |
|
131 |
|
FATALAF("unable to configure the filter graph\n"); |
|
132 |
|
/*--------------------------------------------------------------------*/ |
|
133 |
|
if (!print_info) |
|
134 |
|
return; |
|
135 |
|
dump_str = avfilter_graph_dump(graph_l, 0); |
|
136 |
|
if (dump_str == 0) { |
|
137 |
|
WARNINGAF("unable to get a filter graph description\n"); |
|
138 |
|
return; |
|
139 |
|
} |
|
140 |
|
POUTAF("GRAPH START-------------------------------------------------------\n"); |
|
141 |
|
POUT("%s", dump_str); |
|
142 |
|
avutil_free(dump_str); |
|
143 |
|
POUTAF("GRAPH END---------------------------------------------------------\n"); |
|
144 |
|
} |
|
145 |
|
static void audio_filt_cmd_mute(void) |
|
146 |
|
{ |
|
147 |
|
int r; |
|
148 |
|
u8 vol_l10n_str[sizeof("xxx.xx")]; /* should be overkill */ |
|
149 |
|
u8 resp[STR_SZ]; |
|
150 |
|
|
|
151 |
|
if (filt_p.muted) { |
|
152 |
|
POUTAF("COMMAND:unmuting\n"); |
|
153 |
|
|
|
154 |
|
snprintf(vol_l10n_str, sizeof(vol_l10n_str), "%f", filt_p.vol); |
|
155 |
|
r = avfilter_graph_send_cmd(graph_l, "vol", "volume", |
|
156 |
|
vol_l10n_str, resp, sizeof(resp), 0); |
|
157 |
|
if (r < 0) { |
|
158 |
|
WARNINGAF("ffmpeg:volume context:unable to mute the volume to 0:response from volume filter:%s\n", resp); |
|
159 |
|
} else { |
|
160 |
|
filt_p.muted = false; |
|
161 |
|
} |
|
162 |
|
} else { |
|
163 |
|
POUTAF("COMMAND:muting\n"); |
|
164 |
|
|
|
165 |
|
r = avfilter_graph_send_cmd(graph_l, "vol", "volume", |
|
166 |
|
double_zero_l10n_str_l, resp, sizeof(resp), 0); |
|
167 |
|
if (r < 0) { |
|
168 |
|
WARNINGAF("ffmpeg:volume context:unable to mute the volume to 0:response from volume filter:%s\n", resp); |
|
169 |
|
} else { |
|
170 |
|
filt_p.muted = true; |
|
171 |
|
} |
|
172 |
|
} |
|
173 |
|
} |
|
174 |
|
static void audio_filt_cmd_vol_down(void) |
|
175 |
|
{ |
|
176 |
|
int r; |
|
177 |
|
u8 vol_l10n_str[sizeof("xxx.xx")]; /* should be overkill */ |
|
178 |
|
u8 resp[STR_SZ]; |
|
179 |
|
|
|
180 |
|
filt_p.vol -= VOL_DELTA; |
|
181 |
|
if (filt_p.vol < 0.) |
|
182 |
|
filt_p.vol = 0.; |
|
183 |
|
snprintf(vol_l10n_str, sizeof(vol_l10n_str), "%f", filt_p.vol); |
|
184 |
|
POUTAF("COMMAND:volume down to value %s\n", vol_l10n_str); |
|
185 |
|
r = avfilter_graph_send_cmd(graph_l, "vol", "volume", vol_l10n_str, |
|
186 |
|
resp, sizeof(resp), 0); |
|
187 |
|
if (r < 0) |
|
188 |
|
WARNINGAF("ffmpeg:volume context:unable to set the volume down to \"%s\":response from volume filter:\"%s\"\n", resp); |
|
189 |
|
} |
|
190 |
|
static void audio_filt_cmd_vol_up(void) |
|
191 |
|
{ |
|
192 |
|
int r; |
|
193 |
|
u8 vol_l10n_str[sizeof("xxx.xx")]; /* should be overkill */ |
|
194 |
|
u8 resp[STR_SZ]; |
|
195 |
|
|
|
196 |
|
filt_p.vol += VOL_DELTA; |
|
197 |
|
if (filt_p.vol > 1.) |
|
198 |
|
filt_p.vol = 1.; |
|
199 |
|
snprintf(vol_l10n_str, sizeof(vol_l10n_str), "%f", filt_p.vol); |
|
200 |
|
POUTAF("COMMAND:volume up to value %s\n", vol_l10n_str); |
|
201 |
|
r = avfilter_graph_send_cmd(graph_l, "vol", "volume", vol_l10n_str, |
|
202 |
|
resp, sizeof(resp), 0); |
|
203 |
|
if (r < 0) |
|
204 |
|
WARNINGAF("ffmpeg:volume context:unable to set the volume up to \"%s\":response from volume filter:\"%s\"\n", resp); |
|
205 |
|
} |
File npv/audio/local/code.frag.c added (mode: 100644) (index 0000000..54b4c5f) |
|
1 |
|
static bool ff_fmt2pcm_layout_best_effort(enum avutil_audio_fr_fmt_t ff_fmt, |
|
2 |
|
snd_pcm_fmt_t *alsa_fmt, snd_pcm_access_t *alsa_access) |
|
3 |
|
{ |
|
4 |
|
static u8 ff_fmt_str[STR_SZ]; |
|
5 |
|
|
|
6 |
|
avutil_get_audio_fr_fmt_str(ff_fmt_str, STR_SZ, ff_fmt); |
|
7 |
|
|
|
8 |
|
/* XXX: only classic non-mmap ones */ |
|
9 |
|
switch (ff_fmt) { |
|
10 |
|
case AVUTIL_AUDIO_FR_FMT_U8: |
|
11 |
|
*alsa_fmt = SND_PCM_FMT_U8; |
|
12 |
|
*alsa_access = SND_PCM_ACCESS_RW_INTERLEAVED; |
|
13 |
|
break; |
|
14 |
|
case AVUTIL_AUDIO_FR_FMT_S16: |
|
15 |
|
*alsa_fmt = SND_PCM_FMT_S16; |
|
16 |
|
*alsa_access = SND_PCM_ACCESS_RW_INTERLEAVED; |
|
17 |
|
break; |
|
18 |
|
case AVUTIL_AUDIO_FR_FMT_S32: |
|
19 |
|
*alsa_fmt = SND_PCM_FMT_S32; |
|
20 |
|
*alsa_access = SND_PCM_ACCESS_RW_INTERLEAVED; |
|
21 |
|
break; |
|
22 |
|
case AVUTIL_AUDIO_FR_FMT_FLT: |
|
23 |
|
*alsa_fmt = SND_PCM_FMT_FLOAT; |
|
24 |
|
*alsa_access = SND_PCM_ACCESS_RW_INTERLEAVED; |
|
25 |
|
break; |
|
26 |
|
/* ff "planar" fmts are actually non interleaved fmts */ |
|
27 |
|
case AVUTIL_AUDIO_FR_FMT_U8P: |
|
28 |
|
*alsa_fmt = SND_PCM_FMT_U8; |
|
29 |
|
*alsa_access = SND_PCM_ACCESS_RW_NONINTERLEAVED; |
|
30 |
|
break; |
|
31 |
|
case AVUTIL_AUDIO_FR_FMT_S16P: |
|
32 |
|
*alsa_fmt = SND_PCM_FMT_S16; |
|
33 |
|
*alsa_access = SND_PCM_ACCESS_RW_NONINTERLEAVED; |
|
34 |
|
break; |
|
35 |
|
case AVUTIL_AUDIO_FR_FMT_S32P: |
|
36 |
|
*alsa_fmt = SND_PCM_FMT_S32; |
|
37 |
|
*alsa_access = SND_PCM_ACCESS_RW_NONINTERLEAVED; |
|
38 |
|
break; |
|
39 |
|
case AVUTIL_AUDIO_FR_FMT_FLTP: |
|
40 |
|
*alsa_fmt = SND_PCM_FMT_FLOAT; |
|
41 |
|
*alsa_access = SND_PCM_ACCESS_RW_NONINTERLEAVED; |
|
42 |
|
break; |
|
43 |
|
default: |
|
44 |
|
POUTA("best effort:unable to wire ffmpeg sample format \"%sbits\" to alsa sample format, \n,", ff_fmt_str); |
|
45 |
|
return false; |
|
46 |
|
} |
|
47 |
|
POUTA("best effort:ffmpeg format \"%sbits\" (%u bytes) to alsa layout \"%s\" and access \"%s\"\n", ff_fmt_str, av_get_bytes_per_sample(ff_fmt), snd_pcm_fmt_desc(*alsa_fmt), snd_pcm_access_name(*alsa_access)); |
|
48 |
|
return true; |
|
49 |
|
} |
|
50 |
|
static void pcm_hw_chans_n_decide(snd_pcm_t *pcm, |
|
51 |
|
snd_pcm_hw_params_t *pcm_hw_params, unsigned int chans_n) |
|
52 |
|
{ |
|
53 |
|
int r; |
|
54 |
|
unsigned int chans_n_max; |
|
55 |
|
unsigned int chans_n_min; |
|
56 |
|
|
|
57 |
|
r = snd_pcm_hw_params_test_chans_n(pcm, pcm_hw_params, chans_n); |
|
58 |
|
if (r == 0) { |
|
59 |
|
r = snd_pcm_hw_params_set_chans_n(pcm, pcm_hw_params, chans_n); |
|
60 |
|
if (r != 0) |
|
61 |
|
FATALA("alsa:unable to restrict pcm device to %u channels, count which was successfully tested\n", chans_n); |
|
62 |
|
POUTA("alsa:using %u channels\n", chans_n); |
|
63 |
|
return; |
|
64 |
|
} |
|
65 |
|
POUTA("alsa:unable to use %u channels\n", chans_n); |
|
66 |
|
/* try to use the max chans n the pcm can */ |
|
67 |
|
r = snd_pcm_hw_params_get_chans_n_max(pcm_hw_params, &chans_n_max); |
|
68 |
|
if (r != 0) |
|
69 |
|
FATALA("alsa:unable to get the maximum count of pcm device channels\n"); |
|
70 |
|
r = snd_pcm_hw_params_test_chans_n(pcm, pcm_hw_params, chans_n_max); |
|
71 |
|
if (r == 0) { |
|
72 |
|
r = snd_pcm_hw_params_set_chans_n(pcm, pcm_hw_params, |
|
73 |
|
chans_n_max); |
|
74 |
|
if (r != 0) |
|
75 |
|
FATALA("alsa:unable to restrict pcm device to %u channels, count which was successfully tested\n", chans_n_max); |
|
76 |
|
POUTA("alsa:using pcm maximum %u channels\n", chans_n_max); |
|
77 |
|
return; |
|
78 |
|
} |
|
79 |
|
/* ok... last try, the pcm dev min chans n */ |
|
80 |
|
r = snd_pcm_hw_params_get_chans_n_min(pcm_hw_params, &chans_n_min); |
|
81 |
|
if (r != 0) |
|
82 |
|
FATALA("alsa:unable to get the minimum count of pcm device channels\n"); |
|
83 |
|
r = snd_pcm_hw_params_test_chans_n(pcm, pcm_hw_params, chans_n_min); |
|
84 |
|
if (r == 0) { |
|
85 |
|
r = snd_pcm_hw_params_set_chans_n(pcm, pcm_hw_params, |
|
86 |
|
chans_n_min); |
|
87 |
|
if (r != 0) |
|
88 |
|
FATALA("alsa:unable to restrict pcm device to %u channels, count which was successfully tested\n", chans_n_min); |
|
89 |
|
POUTA("alsa:using pcm device minimum %u channels\n", chans_n_min); |
|
90 |
|
return; |
|
91 |
|
} |
|
92 |
|
FATALA("alsa:unable to find a suitable count of channels\n"); |
|
93 |
|
} |
|
94 |
|
static void pcm_hw_rate_decide(snd_pcm_t *pcm, |
|
95 |
|
snd_pcm_hw_params_t *pcm_hw_params, unsigned int rate) |
|
96 |
|
{ |
|
97 |
|
int r; |
|
98 |
|
unsigned int rate_max; |
|
99 |
|
unsigned int rate_near; |
|
100 |
|
unsigned int rate_min; |
|
101 |
|
|
|
102 |
|
r = snd_pcm_hw_params_test_rate(pcm, pcm_hw_params, rate, |
|
103 |
|
SND_PCM_ST_PLAYBACK); |
|
104 |
|
if (r == 0) { |
|
105 |
|
r = snd_pcm_hw_params_set_rate(pcm, pcm_hw_params, rate, |
|
106 |
|
SND_PCM_ST_PLAYBACK); |
|
107 |
|
if (r != 0) |
|
108 |
|
FATALA("alsa:unable to restrict pcm device to %uHz, which was successfully tested\n", rate); |
|
109 |
|
POUTA("alsa:using %uHz\n", rate); |
|
110 |
|
return; |
|
111 |
|
} |
|
112 |
|
POUTA("alsa:unable to use %uHz\n", rate); |
|
113 |
|
/* try to use the max rate the pcm can */ |
|
114 |
|
r = snd_pcm_hw_params_get_rate_max(pcm_hw_params, &rate_max, |
|
115 |
|
SND_PCM_ST_PLAYBACK); |
|
116 |
|
if (r != 0) |
|
117 |
|
FATALA("alsa:unable to get the maximum rate of pcm device\n"); |
|
118 |
|
r = snd_pcm_hw_params_test_rate(pcm, pcm_hw_params, rate_max, |
|
119 |
|
SND_PCM_ST_PLAYBACK); |
|
120 |
|
if (r == 0) { |
|
121 |
|
r = snd_pcm_hw_params_set_rate(pcm, pcm_hw_params, rate_max, |
|
122 |
|
SND_PCM_ST_PLAYBACK); |
|
123 |
|
if (r != 0) |
|
124 |
|
FATALA("alsa:unable to restrict pcm device to %uHz, which was successfully tested\n", rate_max); |
|
125 |
|
POUTA("alsa:using pcm device %uHz\n", rate_max); |
|
126 |
|
return; |
|
127 |
|
} |
|
128 |
|
/* try to use a rate "near" of what the pcm dev can */ |
|
129 |
|
rate_near = rate; |
|
130 |
|
r = snd_pcm_hw_params_set_rate_near(pcm, pcm_hw_params, &rate_near, |
|
131 |
|
SND_PCM_ST_PLAYBACK); |
|
132 |
|
if (r == 0) { |
|
133 |
|
POUTA("alsa:using pcm device %uHz\n", rate_near); |
|
134 |
|
return; |
|
135 |
|
} |
|
136 |
|
/* even a "near" rate did failed... try the min */ |
|
137 |
|
r = snd_pcm_hw_params_get_rate_min(pcm_hw_params, &rate_min, |
|
138 |
|
SND_PCM_ST_PLAYBACK); |
|
139 |
|
if (r != 0) |
|
140 |
|
FATALA("alsa:unable to get the minimum rate of pcm device\n"); |
|
141 |
|
r = snd_pcm_hw_params_test_rate(pcm, pcm_hw_params, rate_min, |
|
142 |
|
SND_PCM_ST_PLAYBACK); |
|
143 |
|
if (r == 0) { |
|
144 |
|
r = snd_pcm_hw_params_set_rate(pcm, pcm_hw_params, rate_min, |
|
145 |
|
SND_PCM_ST_PLAYBACK); |
|
146 |
|
if (r != 0) |
|
147 |
|
FATALA("alsa:unable to restrict pcm device to %uHz, which was successfully tested\n", rate_min); |
|
148 |
|
POUTA("alsa:using pcm device %uHz\n", rate_min); |
|
149 |
|
return; |
|
150 |
|
} |
|
151 |
|
FATALA("alsa:unable to find a suitable rate\n"); |
|
152 |
|
} |
|
153 |
|
static bool pcm_hw_fmt_decide_x(snd_pcm_t *pcm, |
|
154 |
|
snd_pcm_hw_params_t *pcm_hw_params, snd_pcm_fmt_t fmt) |
|
155 |
|
{ |
|
156 |
|
int r; |
|
157 |
|
|
|
158 |
|
r = snd_pcm_hw_params_test_fmt(pcm, pcm_hw_params, fmt); |
|
159 |
|
if (r != 0) |
|
160 |
|
return false; |
|
161 |
|
r = snd_pcm_hw_params_set_fmt(pcm, pcm_hw_params, fmt); |
|
162 |
|
if (r != 0) |
|
163 |
|
FATALA("alsa:unable to restrict pcm device to \"%s\", which was successfully tested\n", snd_pcm_fmt_desc(fmt)); |
|
164 |
|
POUTA("alsa:using \"%s\" format\n", snd_pcm_fmt_desc(fmt)); |
|
165 |
|
return true; |
|
166 |
|
} |
|
167 |
|
#define PCM_HW_FMT_DECIDE_X(fmt) pcm_hw_fmt_decide_x(pcm, pcm_hw_params, fmt) |
|
168 |
|
static void pcm_hw_fmt_decide(snd_pcm_t *pcm, |
|
169 |
|
snd_pcm_hw_params_t *pcm_hw_params, bool force, |
|
170 |
|
snd_pcm_fmt_t forced_fmt) |
|
171 |
|
{ |
|
172 |
|
int r; |
|
173 |
|
snd_pcm_fmt_t *fmt; |
|
174 |
|
|
|
175 |
|
if (force) { |
|
176 |
|
r = snd_pcm_hw_params_test_fmt(pcm, pcm_hw_params, forced_fmt); |
|
177 |
|
if (r == 0) { |
|
178 |
|
r = snd_pcm_hw_params_set_fmt(pcm, pcm_hw_params, |
|
179 |
|
forced_fmt); |
|
180 |
|
if (r != 0) |
|
181 |
|
FATALA("alsa:unable to restrict pcm device to \"%s\", which was successfully tested\n", snd_pcm_fmt_desc(forced_fmt)); |
|
182 |
|
POUTA("alsa:using forced \"%s\" format\n", snd_pcm_fmt_desc(forced_fmt)); |
|
183 |
|
return; |
|
184 |
|
} |
|
185 |
|
} |
|
186 |
|
/* then we try to select from the reasonable "best" to the lowest */ |
|
187 |
|
/* prefer fmts we know supported by ff */ |
|
188 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_FLOAT)) |
|
189 |
|
return; |
|
190 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_S32)) |
|
191 |
|
return; |
|
192 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_S16)) |
|
193 |
|
return; |
|
194 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_U8)) |
|
195 |
|
return; |
|
196 |
|
/* |
|
197 |
|
* from here, at the time of writting, those fmts have no ff |
|
198 |
|
* wiring, but we are alsa centric here, validate that later |
|
199 |
|
*/ |
|
200 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_U32)) |
|
201 |
|
return; |
|
202 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_S24)) |
|
203 |
|
return; |
|
204 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_U24)) |
|
205 |
|
return; |
|
206 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_U16)) |
|
207 |
|
return; |
|
208 |
|
if (PCM_HW_FMT_DECIDE_X(SND_PCM_FMT_S8)) |
|
209 |
|
return; |
|
210 |
|
FATALA("alsa:unable to find a suitable format\n"); |
|
211 |
|
} |
|
212 |
|
#undef PCM_HW_FMT_DECIDE_X |
|
213 |
|
static bool pcm_hw_access_decide_x(snd_pcm_t *pcm, |
|
214 |
|
snd_pcm_hw_params_t *pcm_hw_params, snd_pcm_access_t access) |
|
215 |
|
{ |
|
216 |
|
int r; |
|
217 |
|
|
|
218 |
|
r = snd_pcm_hw_params_test_access(pcm, pcm_hw_params, access); |
|
219 |
|
if (r != 0) |
|
220 |
|
return false; |
|
221 |
|
r = snd_pcm_hw_params_set_access(pcm, pcm_hw_params, access); |
|
222 |
|
if (r != 0) |
|
223 |
|
FATALA("alsa:unable to restrict pcm device to \"%s\", which was successfully tested\n", snd_pcm_access_name(access)); |
|
224 |
|
POUTA("alsa:using \"%s\" access\n", snd_pcm_access_name(access)); |
|
225 |
|
return true; |
|
226 |
|
} |
|
227 |
|
#define PCM_HW_ACCESS_DECIDE_X(access) \ |
|
228 |
|
pcm_hw_access_decide_x(pcm, pcm_hw_params, access) |
|
229 |
|
/* XXX: only classic non-mmap ones */ |
|
230 |
|
static void pcm_hw_access_decide(snd_pcm_t *pcm, |
|
231 |
|
snd_pcm_hw_params_t *pcm_hw_params, bool force, |
|
232 |
|
snd_pcm_access_t forced_access) |
|
233 |
|
{ |
|
234 |
|
int r; |
|
235 |
|
snd_pcm_access_t access; |
|
236 |
|
|
|
237 |
|
if (force) { |
|
238 |
|
r = snd_pcm_hw_params_test_access(pcm, pcm_hw_params, |
|
239 |
|
forced_access); |
|
240 |
|
if (r == 0) { |
|
241 |
|
r = snd_pcm_hw_params_set_access(pcm, pcm_hw_params, |
|
242 |
|
forced_access); |
|
243 |
|
if (r != 0) |
|
244 |
|
FATALA("alsa:unable to restrict pcm device to \"%s\", which was successfully tested\n", snd_pcm_access_name(forced_access)); |
|
245 |
|
POUTA("alsa:using forced \"%s\" access\n", snd_pcm_access_name(forced_access)); |
|
246 |
|
return; |
|
247 |
|
} |
|
248 |
|
} |
|
249 |
|
/* brute force */ |
|
250 |
|
if (PCM_HW_ACCESS_DECIDE_X(SND_PCM_ACCESS_RW_INTERLEAVED)) |
|
251 |
|
return; |
|
252 |
|
if (PCM_HW_ACCESS_DECIDE_X(SND_PCM_ACCESS_RW_NONINTERLEAVED)) |
|
253 |
|
return; |
|
254 |
|
FATALA("alsa:unable to find a suitable access\n"); |
|
255 |
|
} |
|
256 |
|
#undef PCM_HW_ACCESS_DECIDE_X |
|
257 |
|
/* |
|
258 |
|
* latency control: some audio bufs can be huge (tested on a pulseaudio with 10 |
|
259 |
|
* secs audio buf). if we are careless, we will quickly fill this buf which is |
|
260 |
|
* worth a significant amount of time, hence will add huge latency to our |
|
261 |
|
* interactive audio filtering (vol...). in the case of the 10 secs pulseaudio |
|
262 |
|
* buf, it means if you want to mute the audio, it will happen 10 secs later. |
|
263 |
|
* we add lantency control by limiting the sz of the dev audio buf, in periods |
|
264 |
|
* n. |
|
265 |
|
* we choose roughly 0.25 secs, or roughly (rate / 4) frs. |
|
266 |
|
*/ |
|
267 |
|
static void pcm_hw_buf_sz_cfg(snd_pcm_t *pcm, |
|
268 |
|
snd_pcm_hw_params_t *pcm_hw_params) |
|
269 |
|
{ |
|
270 |
|
int r; |
|
271 |
|
snd_pcm_ufrs_t latency_control_target_buf_ufrs_n; |
|
272 |
|
snd_pcm_ufrs_t latency_control_buf_ufrs_n; |
|
273 |
|
unsigned int rate; |
|
274 |
|
|
|
275 |
|
r = snd_pcm_hw_params_get_rate(pcm_hw_params, &rate, 0); |
|
276 |
|
if (r < 0) { |
|
277 |
|
WARNINGA("alsa:latency control:DISABLING LATENCY CONTROL:unable to get the decided rate from the current device parameters\n"); |
|
278 |
|
return; |
|
279 |
|
} |
|
280 |
|
latency_control_target_buf_ufrs_n = (snd_pcm_ufrs_t)rate; |
|
281 |
|
latency_control_target_buf_ufrs_n /= 4; |
|
282 |
|
latency_control_buf_ufrs_n = latency_control_target_buf_ufrs_n; |
|
283 |
|
r = snd_pcm_hw_params_set_buf_sz_near(pcm, pcm_hw_params, |
|
284 |
|
&latency_control_buf_ufrs_n); |
|
285 |
|
if (r < 0) { |
|
286 |
|
WARNINGA("alsa:latency control:DISABLING_LATENCY_CONTROL:unable to set the audio buffer size (count of frames) to %u periods for the current device parameters\n", latency_control_buf_ufrs_n); |
|
287 |
|
return; |
|
288 |
|
} |
|
289 |
|
POUTA("alsa:latency control:target buffer frame count is %u (~0.25 sec), got an audio buffer size set to %u frames\n", latency_control_target_buf_ufrs_n, latency_control_buf_ufrs_n); |
|
290 |
|
} |
|
291 |
|
/* |
|
292 |
|
* this function will "decide" the pcm dev cfg: |
|
293 |
|
* the goal is to be the "closest" to the provided params, |
|
294 |
|
* the "gap" will have to "filled" with ff filts |
|
295 |
|
* |
|
296 |
|
* the "strategy" is a "fall-thru" (chans n then ... then ...) which |
|
297 |
|
* will "restrict" the pcm dev cfg further at each step |
|
298 |
|
* |
|
299 |
|
* we try to use a sensible restrict order regarding audio props |
|
300 |
|
*/ |
|
301 |
|
static void pcm_cfg_hw_core(snd_pcm_t *pcm, snd_pcm_hw_params_t *pcm_hw_params, |
|
302 |
|
int chans_n, int rate, enum avutil_audio_fr_fmt_t ff_fmt) |
|
303 |
|
{ |
|
304 |
|
int r; |
|
305 |
|
bool best_effort_wiring_success; |
|
306 |
|
snd_pcm_fmt_t fmt_from_best_effort; |
|
307 |
|
snd_pcm_access_t access_from_best_effort; |
|
308 |
|
|
|
309 |
|
/* the return value is from a first refine of the raw hw params */ |
|
310 |
|
r = snd_pcm_hw_params_any(pcm, pcm_hw_params); |
|
311 |
|
if (r < 0) |
|
312 |
|
FATALA("alsa:unable to populate the hardware parameters context\n"); |
|
313 |
|
pcm_hw_chans_n_decide(pcm, pcm_hw_params, (unsigned int)chans_n); |
|
314 |
|
pcm_hw_rate_decide(pcm, pcm_hw_params, (unsigned int)rate); |
|
315 |
|
/* try our best */ |
|
316 |
|
best_effort_wiring_success = ff_fmt2pcm_layout_best_effort( |
|
317 |
|
ff_fmt, &fmt_from_best_effort, &access_from_best_effort); |
|
318 |
|
pcm_hw_fmt_decide(pcm, pcm_hw_params, best_effort_wiring_success, |
|
319 |
|
fmt_from_best_effort); |
|
320 |
|
pcm_hw_access_decide(pcm, pcm_hw_params, best_effort_wiring_success, |
|
321 |
|
access_from_best_effort); |
|
322 |
|
pcm_hw_buf_sz_cfg(pcm, pcm_hw_params); |
|
323 |
|
} |
|
324 |
|
/* base on kernel api at the time we wrote this code */ |
|
325 |
|
static u8 *kernel_ts_types_str[] = { |
|
326 |
|
"compat", |
|
327 |
|
"default", |
|
328 |
|
"link", |
|
329 |
|
"link absolute", |
|
330 |
|
"link estimated", |
|
331 |
|
"link synchonized" |
|
332 |
|
}; |
|
333 |
|
static void pcm_cfg_hw(snd_pcm_t *pcm, unsigned int chans_n, unsigned int rate, |
|
334 |
|
enum avutil_audio_fr_fmt_t ff_fmt) |
|
335 |
|
{ |
|
336 |
|
int r; |
|
337 |
|
s8 i; |
|
338 |
|
snd_pcm_access_t access; |
|
339 |
|
snd_pcm_hw_params_t *hw_params; |
|
340 |
|
|
|
341 |
|
POUTA("ALSA:HW_PARAMS START------------------------------------------------------------\n"); |
|
342 |
|
r = snd_pcm_hw_params_malloc(&hw_params); |
|
343 |
|
if (r < 0) |
|
344 |
|
FATALA("alsa:unable to allocate hardware parameters context\n"); |
|
345 |
|
|
|
346 |
|
pcm_cfg_hw_core(pcm, hw_params, chans_n, rate, ff_fmt); |
|
347 |
|
|
|
348 |
|
r = snd_pcm_hw_params(pcm, hw_params); |
|
349 |
|
if (r != 0) |
|
350 |
|
FATALA("alsa:unable to install the hardware parameters\n"); |
|
351 |
|
r = snd_pcm_hw_params_current(pcm, hw_params); |
|
352 |
|
if (r != 0) |
|
353 |
|
FATALA("alsa:unable to get current hardware parameters\n"); |
|
354 |
|
snd_pcm_hw_params_dump(hw_params, pcm_pout_l); |
|
355 |
|
|
|
356 |
|
i = 0; |
|
357 |
|
selected_ts_type_p = -1; |
|
358 |
|
loop { |
|
359 |
|
if (i == ARRAY_N(kernel_ts_types_str)) |
|
360 |
|
break; |
|
361 |
|
r = snd_pcm_hw_params_supports_audio_ts_type(hw_params, i); |
|
362 |
|
if (r == 1) { |
|
363 |
|
selected_ts_type_p = i; |
|
364 |
|
POUTA("kernel audio timestamp type \"%s\" is supported for the current configuration\n", kernel_ts_types_str[i]); |
|
365 |
|
} |
|
366 |
|
++i; |
|
367 |
|
} |
|
368 |
|
/* |
|
369 |
|
* we selected the most accurate, namely with the highest idx, audio ts |
|
370 |
|
* type |
|
371 |
|
*/ |
|
372 |
|
POUTA("%s will be used for the audio based clock\n", kernel_ts_types_str[selected_ts_type_p]); |
|
373 |
|
snd_pcm_hw_params_free(hw_params); |
|
374 |
|
POUTA("ALSA:HW_PARAMS END--------------------------------------------------------------\n"); |
|
375 |
|
} |
|
376 |
|
static void pcm_cfg_sw(snd_pcm_t *pcm) |
|
377 |
|
{ |
|
378 |
|
int r; |
|
379 |
|
snd_pcm_sw_params_t *sw_params; |
|
380 |
|
|
|
381 |
|
POUTA("ALSA:SW_PARAMS START------------------------------------------------------------\n"); |
|
382 |
|
r = snd_pcm_sw_params_malloc(&sw_params); |
|
383 |
|
if (r != 0) |
|
384 |
|
FATALA("alsa:unable to allocate software parameters structure\n"); |
|
385 |
|
r = snd_pcm_sw_params_current(pcm, sw_params); |
|
386 |
|
if (r != 0) |
|
387 |
|
FATALA("alsa:unable to get current software parameters\n"); |
|
388 |
|
r = snd_pcm_sw_params_set_period_evt(pcm, sw_params, 1); |
|
389 |
|
if (r != 0) |
|
390 |
|
FATALA("alsa:unable to enable period event\n"); |
|
391 |
|
/* enable ts to be sure */ |
|
392 |
|
r = snd_pcm_sw_params_set_tstamp_mode(pcm, sw_params, |
|
393 |
|
SND_PCM_TSTAMP_ENABLE); |
|
394 |
|
if (r < 0) |
|
395 |
|
FATALA("unable to set timestamp mode:%s\n", snd_strerror(r)); |
|
396 |
|
r = snd_pcm_sw_params(pcm, sw_params); |
|
397 |
|
if (r != 0) |
|
398 |
|
FATALA("alsa:unable to install sotfware parameters\n"); |
|
399 |
|
snd_pcm_sw_params_dump(sw_params, pcm_pout_l); |
|
400 |
|
snd_pcm_sw_params_free(sw_params); |
|
401 |
|
POUTA("ALSA:SW_PARAMS END--------------------------------------------------------------\n"); |
|
402 |
|
} |
|
403 |
|
static void dec_a_grow(void) |
|
404 |
|
{ |
|
405 |
|
u32 new_idx; |
|
406 |
|
|
|
407 |
|
new_idx = dec_sets_p.n_max; |
|
408 |
|
dec_sets_p.a = realloc(dec_sets_p.a, sizeof(*dec_sets_p.a) |
|
409 |
|
* (dec_sets_p.n_max + 1)); |
|
410 |
|
if (dec_sets_p.a == 0) |
|
411 |
|
FATALA("unable to allocate memory for an additional pointer on a reference of a decoder set of frames\n"); |
|
412 |
|
dec_sets_p.a[new_idx] = avutil_audio_set_ref_alloc(); |
|
413 |
|
if (dec_sets_p.a[new_idx] == 0) |
|
414 |
|
FATALA("ffmpeg:unable to allocate a reference of a decoder set of frames\n"); |
|
415 |
|
++dec_sets_p.n_max; |
|
416 |
|
} |
|
417 |
|
#define AGAIN 0 |
|
418 |
|
#define RECOVERED 1 |
|
419 |
|
#define CONTINUE 2 |
|
420 |
|
static u8 alsa_recover(snd_pcm_sfrs_t r) |
|
421 |
|
{ |
|
422 |
|
if (r >= 0) |
|
423 |
|
return CONTINUE; |
|
424 |
|
/* r < 0 */ |
|
425 |
|
if (r == -EAGAIN) |
|
426 |
|
return AGAIN; |
|
427 |
|
else if (r == -EPIPE || r == -ESTRPIPE) { |
|
428 |
|
/* underrun or suspended */ |
|
429 |
|
int r_recovered; |
|
430 |
|
|
|
431 |
|
r_recovered = snd_pcm_recover(pcm_p, (int)r, 0); |
|
432 |
|
if (r_recovered == 0) { |
|
433 |
|
WARNINGA("alsa:pcm recovered\n"); |
|
434 |
|
return RECOVERED; |
|
435 |
|
} |
|
436 |
|
FATALA("alsa:unable to recover from suspend/underrun\n"); |
|
437 |
|
} |
|
438 |
|
FATALA("alsa:fatal/unhandled error\n"); |
|
439 |
|
} |
|
440 |
|
#undef AGAIN |
|
441 |
|
#undef RECOVERED |
|
442 |
|
#undef CONTINUE |
|
443 |
|
#define NO 0 |
|
444 |
|
#define AGAIN 0 |
|
445 |
|
#define RECOVERED 1 |
|
446 |
|
static void pcm_silence_frs_write(snd_pcm_ufrs_t ufrs_n) { loop |
|
447 |
|
{ |
|
448 |
|
int alsa_r; |
|
449 |
|
u8 r_recover; |
|
450 |
|
int is_planar_fmt; |
|
451 |
|
|
|
452 |
|
if (ufrs_n == 0) |
|
453 |
|
break; |
|
454 |
|
is_planar_fmt = avutil_audio_fr_fmt_is_planar(audio_filt_p.set->fmt); |
|
455 |
|
if (is_planar_fmt == NO) |
|
456 |
|
alsa_r = snd_pcm_writei(pcm_p, pcm_silence_bufs_l[0], ufrs_n); |
|
457 |
|
else |
|
458 |
|
alsa_r = snd_pcm_writen(pcm_p, pcm_silence_bufs_l, ufrs_n); |
|
459 |
|
r_recover = alsa_recover(alsa_r); |
|
460 |
|
if (r_recover == AGAIN) |
|
461 |
|
continue; |
|
462 |
|
else if (r_recover == RECOVERED) |
|
463 |
|
break; |
|
464 |
|
/* r_recover == CONTINUE */ |
|
465 |
|
ufrs_n -= (snd_pcm_ufrs_t)alsa_r; |
|
466 |
|
}} |
|
467 |
|
#undef NO |
|
468 |
|
#undef AGAIN |
|
469 |
|
#undef RECOVERED |
|
470 |
|
#define NO 0 |
|
471 |
|
static void chans_buf_init(u8 **chans_buf, int start_fr_idx) |
|
472 |
|
{ |
|
473 |
|
int is_planar_fmt; |
|
474 |
|
int sample_bytes_n; |
|
475 |
|
|
|
476 |
|
sample_bytes_n = avutil_get_bytes_per_sample(audio_filt_p.set->fmt); |
|
477 |
|
is_planar_fmt = avutil_audio_fr_fmt_is_planar(audio_filt_p.set->fmt); |
|
478 |
|
if (is_planar_fmt == NO) { /* or is pcm interleaved */ |
|
479 |
|
int fr_bytes_n; |
|
480 |
|
|
|
481 |
|
fr_bytes_n = sample_bytes_n * audio_filt_p.set->chans_n; |
|
482 |
|
chans_buf[0] = (u8*)audio_filt_p.set->data[0] + start_fr_idx |
|
483 |
|
* fr_bytes_n; |
|
484 |
|
} else { /* ff planar or pcm noninterleaved */ |
|
485 |
|
int p; |
|
486 |
|
|
|
487 |
|
p = 0; |
|
488 |
|
loop { |
|
489 |
|
if (p == audio_filt_p.set->chans_n) |
|
490 |
|
break; |
|
491 |
|
chans_buf[p] = (u8*)audio_filt_p.set->data[p] |
|
492 |
|
+ start_fr_idx * sample_bytes_n; |
|
493 |
|
++p; |
|
494 |
|
} |
|
495 |
|
} |
|
496 |
|
} |
|
497 |
|
#undef NO |
|
498 |
|
#define NO 0 |
|
499 |
|
/*NSPC*/ |
|
500 |
|
static void chans_buf_inc(u8 **chans_buf, int inc) |
|
501 |
|
{ |
|
502 |
|
int is_planar_fmt; |
|
503 |
|
int sample_bytes_n; |
|
504 |
|
|
|
505 |
|
sample_bytes_n = avutil_get_bytes_per_sample(audio_filt_p.set->fmt); |
|
506 |
|
is_planar_fmt = avutil_audio_fr_fmt_is_planar(audio_filt_p.set->fmt); |
|
507 |
|
if (is_planar_fmt == NO) { /* or is pcm interleaved */ |
|
508 |
|
int fr_bytes_n; |
|
509 |
|
|
|
510 |
|
fr_bytes_n = sample_bytes_n * audio_filt_p.set->chans_n; |
|
511 |
|
chans_buf[0] = (u8*)chans_buf[0] + inc * fr_bytes_n; |
|
512 |
|
} else { /* ff planar or pcm noninterleaved */ |
|
513 |
|
int p; |
|
514 |
|
|
|
515 |
|
p = 0; |
|
516 |
|
loop { |
|
517 |
|
if (p == audio_filt_p.set->chans_n) |
|
518 |
|
break; |
|
519 |
|
chans_buf[p] = (u8*)chans_buf[p] + inc * sample_bytes_n; |
|
520 |
|
++p; |
|
521 |
|
} |
|
522 |
|
} |
|
523 |
|
} |
|
524 |
|
#undef NO |
|
525 |
|
static void draining_state_handle(void) |
|
526 |
|
{ |
|
527 |
|
int r; |
|
528 |
|
|
|
529 |
|
r = snd_pcm_drain(pcm_p); |
|
530 |
|
if (r != 0 && r != -EAGAIN) |
|
531 |
|
FATALA("alsa:an error occured switching to/checking the pcm draining state:%d\n", r); |
|
532 |
|
else if (r == -EAGAIN) |
|
533 |
|
return; |
|
534 |
|
/* r == 0 */ |
|
535 |
|
EXIT("alsa pcm drained exiting\n"); |
|
536 |
|
} |
|
537 |
|
static void draining_state_switch(void) |
|
538 |
|
{ |
|
539 |
|
int r; |
|
540 |
|
u8 i; |
|
541 |
|
struct itimerspec t; |
|
542 |
|
|
|
543 |
|
draining_p = true; |
|
544 |
|
draining_state_handle(); |
|
545 |
|
/* remove the alsa epoll fds */ |
|
546 |
|
i = 0; |
|
547 |
|
loop { |
|
548 |
|
if (i == pcm_pollfds_n_p) |
|
549 |
|
break; |
|
550 |
|
/* in theory, it is thread safe */ |
|
551 |
|
r = epoll_ctl(npv_ep_fd_p, EPOLL_CTL_DEL, pcm_pollfds_p[i].fd, |
|
552 |
|
0); |
|
553 |
|
if (r == -1) |
|
554 |
|
FATALA("unable to remove the alsa file descriptors from epoll\n"); |
|
555 |
|
++i; |
|
556 |
|
} |
|
557 |
|
/* start the draining timer */ |
|
558 |
|
memset(&t, 0, sizeof(t)); |
|
559 |
|
/* initial and interval */ |
|
560 |
|
t.it_value.tv_nsec = DRAINING_TIMER_INTERVAL_NSECS_N; |
|
561 |
|
t.it_interval.tv_nsec = DRAINING_TIMER_INTERVAL_NSECS_N; |
|
562 |
|
r = timerfd_settime(draining_timer_fd_p, 0, &t, 0); |
|
563 |
|
if (r == -1) |
|
564 |
|
FATALA("unable to arm the draining timer\n"); |
|
565 |
|
} |
|
566 |
|
#define NO 0 |
|
567 |
|
#define AGAIN 0 |
|
568 |
|
#define RECOVERED 1 |
|
569 |
|
#define CONTINUE 2 |
|
570 |
|
#define HAVE_FILT_SET 1 |
|
571 |
|
#define EOF_FILT 2 |
|
572 |
|
#define NO_DEC_SET 2 |
|
573 |
|
static void pcm_filt_frs_write(snd_pcm_ufrs_t ufrs_n) { loop |
|
574 |
|
{ |
|
575 |
|
u8 chan_buf; |
|
576 |
|
u8 *chans_buf[AVUTIL_DATA_PTRS_N]; |
|
577 |
|
snd_pcm_ufrs_t ufrs_to_write_n; |
|
578 |
|
snd_pcm_ufrs_t filt_set_remaining_ufrs_n; /* for clarity */ |
|
579 |
|
int is_planar_fmt; |
|
580 |
|
snd_pcm_ufrs_t written_ufrs_n; /* for clarity */ |
|
581 |
|
|
|
582 |
|
if (ufrs_n == 0) |
|
583 |
|
break; |
|
584 |
|
/* |
|
585 |
|
* in this loop we try to get some filt frs from what we got from the |
|
586 |
|
* dec |
|
587 |
|
*/ |
|
588 |
|
if (audio_filt_p.set->frs_n == 0) loop { |
|
589 |
|
u8 r; |
|
590 |
|
|
|
591 |
|
r = filt_push_dec_sets(); |
|
592 |
|
if (r == NO_DEC_SET || r == AGAIN) |
|
593 |
|
return; /* not enough data for 1 set */ |
|
594 |
|
/* r == PUSHED_ONE_SET || r == FILT_SWITCHED_TO_DRAINING */ |
|
595 |
|
r = audio_filt_set_try_get(); |
|
596 |
|
if (r == EOF_FILT) { |
|
597 |
|
draining_state_switch(); |
|
598 |
|
return; |
|
599 |
|
} else if (r == HAVE_FILT_SET) { |
|
600 |
|
audio_filt_p.pcm_written_ufrs_n = 0; |
|
601 |
|
break; |
|
602 |
|
} |
|
603 |
|
/* r == AGAIN */ |
|
604 |
|
} |
|
605 |
|
chans_buf_init(chans_buf, (int)audio_filt_p.pcm_written_ufrs_n); |
|
606 |
|
filt_set_remaining_ufrs_n = (snd_pcm_ufrs_t)audio_filt_p.set->frs_n |
|
607 |
|
- audio_filt_p.pcm_written_ufrs_n; |
|
608 |
|
if (filt_set_remaining_ufrs_n > ufrs_n) |
|
609 |
|
ufrs_to_write_n = ufrs_n; |
|
610 |
|
else |
|
611 |
|
ufrs_to_write_n = filt_set_remaining_ufrs_n; |
|
612 |
|
is_planar_fmt = avutil_audio_fr_fmt_is_planar(audio_filt_p.set->fmt); |
|
613 |
|
written_ufrs_n = 0; |
|
614 |
|
loop { /* short write loop */ |
|
615 |
|
snd_pcm_sfrs_t alsa_r; |
|
616 |
|
u8 r_recover; |
|
617 |
|
|
|
618 |
|
if (is_planar_fmt == NO) |
|
619 |
|
alsa_r = snd_pcm_writei(pcm_p, chans_buf[0], |
|
620 |
|
ufrs_to_write_n - written_ufrs_n); |
|
621 |
|
else |
|
622 |
|
alsa_r = snd_pcm_writen(pcm_p, (void**)chans_buf, |
|
623 |
|
ufrs_to_write_n - written_ufrs_n); |
|
624 |
|
r_recover = alsa_recover(alsa_r); |
|
625 |
|
if (r_recover == AGAIN) |
|
626 |
|
continue; |
|
627 |
|
else if (r_recover == RECOVERED) { |
|
628 |
|
/* account for the written frs anyway */ |
|
629 |
|
if (audio_filt_p.pcm_written_ufrs_n == 0) |
|
630 |
|
clk_ref_time_point_update(audio_filt_p.set->pts, |
|
631 |
|
written_ufrs_n); |
|
632 |
|
audio_filt_p.pcm_written_ufrs_n += written_ufrs_n; |
|
633 |
|
if ((int)audio_filt_p.pcm_written_ufrs_n == |
|
634 |
|
audio_filt_p.set->frs_n) |
|
635 |
|
/* set audio_filt_p.set->frs_n = 0 */ |
|
636 |
|
avutil_audio_set_unref(audio_filt_p.set); |
|
637 |
|
return; |
|
638 |
|
} |
|
639 |
|
/* r_recover == CONTINUE */ |
|
640 |
|
written_ufrs_n += (snd_pcm_ufrs_t)alsa_r; |
|
641 |
|
if (written_ufrs_n == ufrs_to_write_n) |
|
642 |
|
break; |
|
643 |
|
chans_buf_inc(chans_buf, (int)alsa_r); |
|
644 |
|
} |
|
645 |
|
/* |
|
646 |
|
* this is here we update our ref time point for the audio clk |
|
647 |
|
* because with a new filt set of frs, we get a new ts |
|
648 |
|
* |
|
649 |
|
* XXX: getting the "right" ts from ff is convoluted |
|
650 |
|
*/ |
|
651 |
|
if (audio_filt_p.pcm_written_ufrs_n == 0) |
|
652 |
|
clk_ref_time_point_update(audio_filt_p.set->pts, written_ufrs_n); |
|
653 |
|
audio_filt_p.pcm_written_ufrs_n += written_ufrs_n; |
|
654 |
|
ufrs_n -= written_ufrs_n; |
|
655 |
|
|
|
656 |
|
if ((int)audio_filt_p.pcm_written_ufrs_n == audio_filt_p.set->frs_n) |
|
657 |
|
/* set audio_filt_p.av->frs_n = 0 */ |
|
658 |
|
avutil_audio_set_unref(audio_filt_p.set); |
|
659 |
|
}} |
|
660 |
|
#undef NO |
|
661 |
|
#undef AGAIN |
|
662 |
|
#undef RECOVERED |
|
663 |
|
#undef CONTINUE |
|
664 |
|
#undef HAVE_FILT_SET |
|
665 |
|
#undef EOF_FILT |
|
666 |
|
#undef NO_DEC_SET |
|
667 |
|
/* fatal if the wiring cannot be done */ |
|
668 |
|
static void pcm_layout2ff_fmt_strict(snd_pcm_fmt_t alsa_fmt, |
|
669 |
|
snd_pcm_access_t alsa_access, enum avutil_audio_fr_fmt_t *ff_fmt, |
|
670 |
|
bool print_info) |
|
671 |
|
{ |
|
672 |
|
/* |
|
673 |
|
* ff fmt byte order is always native. |
|
674 |
|
* here we handle little endian only |
|
675 |
|
*/ |
|
676 |
|
switch (alsa_fmt) { |
|
677 |
|
case SND_PCM_FMT_FLOAT: |
|
678 |
|
if (alsa_access == SND_PCM_ACCESS_RW_INTERLEAVED) |
|
679 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_FLT; |
|
680 |
|
else |
|
681 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_FLTP; |
|
682 |
|
break; |
|
683 |
|
case SND_PCM_FMT_S32: |
|
684 |
|
if (alsa_access == SND_PCM_ACCESS_RW_INTERLEAVED) |
|
685 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_S32; |
|
686 |
|
else |
|
687 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_S32P; |
|
688 |
|
break; |
|
689 |
|
case SND_PCM_FMT_S16: |
|
690 |
|
if (alsa_access == SND_PCM_ACCESS_RW_INTERLEAVED) |
|
691 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_S16; |
|
692 |
|
else |
|
693 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_S16P; |
|
694 |
|
break; |
|
695 |
|
case SND_PCM_FMT_U8: |
|
696 |
|
if (alsa_access == SND_PCM_ACCESS_RW_INTERLEAVED) |
|
697 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_U8; |
|
698 |
|
else |
|
699 |
|
*ff_fmt = AVUTIL_AUDIO_FR_FMT_U8P; |
|
700 |
|
break; |
|
701 |
|
default: |
|
702 |
|
FATALA("unable to wire strictly alsa layout \"%s\"/\"%s\" to a ffmpeg format\n", snd_pcm_fmt_desc(alsa_fmt), snd_pcm_access_name(alsa_access)); |
|
703 |
|
} |
|
704 |
|
if (print_info) { |
|
705 |
|
u8 ff_fmt_str[STR_SZ]; |
|
706 |
|
|
|
707 |
|
avutil_get_audio_fr_fmt_str(ff_fmt_str, sizeof(ff_fmt_str), |
|
708 |
|
*ff_fmt); |
|
709 |
|
POUTA("alsa pcm layout \"%s\"/\"%s\" wired strictly to ffmpeg format \"%sbits\"\n", snd_pcm_fmt_desc(alsa_fmt), snd_pcm_access_name(alsa_access), ff_fmt_str); |
|
710 |
|
} |
|
711 |
|
} |
|
712 |
|
/* |
|
713 |
|
* XXX: if it is ever used significantly, a fine granularity wiring strategy |
|
714 |
|
* will be implemented instead of using the default wiring |
|
715 |
|
*/ |
|
716 |
|
static uint64_t pcm_chmaps2ff_chans_layout(snd_pcm_t *pcm, |
|
717 |
|
unsigned int pcm_chans_n, bool print_info) |
|
718 |
|
{ |
|
719 |
|
int r; |
|
720 |
|
uint64_t ff_chans_layout; |
|
721 |
|
snd_pcm_chmap_t *pcm_chmap; |
|
722 |
|
u8 chans_layout_str[STR_SZ]; /* should be overkill */ |
|
723 |
|
|
|
724 |
|
pcm_chmap = snd_pcm_get_chmap(pcm); |
|
725 |
|
if (pcm_chmap == 0) { |
|
726 |
|
if (print_info) |
|
727 |
|
POUTA("alsa:no pcm channel map available, wiring to default ffmpeg channel layout\n"); |
|
728 |
|
} else { |
|
729 |
|
if (print_info) |
|
730 |
|
POUTA("alsa:your pcm device support channel maps, but fine granularity wiring strategy is not implemented\n"); |
|
731 |
|
free(pcm_chmap); |
|
732 |
|
} |
|
733 |
|
ff_chans_layout = avutil_get_default_chans_layout((int)pcm_chans_n); |
|
734 |
|
avutil_get_chans_layout_str(chans_layout_str, sizeof(chans_layout_str), |
|
735 |
|
(int)pcm_chans_n, ff_chans_layout); |
|
736 |
|
if (print_info) |
|
737 |
|
POUTA("alsa channel map wired to ffmpeg channel layout:\"%s\" (%u pcm channels)\n", chans_layout_str, pcm_chans_n); |
|
738 |
|
return ff_chans_layout; |
|
739 |
|
} |
|
740 |
|
static void init_pcm_once_public(u8 *pcm_str) |
|
741 |
|
{ |
|
742 |
|
int r; |
|
743 |
|
|
|
744 |
|
r = snd_pcm_open(&pcm_p, pcm_str, SND_PCM_ST_PLAYBACK, |
|
745 |
|
SND_PCM_NONBLOCK); |
|
746 |
|
if (r < 0) { |
|
747 |
|
if (r == -EAGAIN) |
|
748 |
|
FATALA("alsa:\"%s\" pcm is already in use\n", pcm_str); |
|
749 |
|
else |
|
750 |
|
FATALA("alsa:unable to open \"%s\" pcm for playback\n", pcm_str); |
|
751 |
|
} |
|
752 |
|
|
|
753 |
|
r = snd_pcm_poll_descriptors_n(pcm_p); |
|
754 |
|
POUTA("alsa:have %d poll file descriptors\n", r); |
|
755 |
|
if ((r <= 0) || (r > pcm_pollfds_n_max)) |
|
756 |
|
FATALA("alsa:invalid count of alsa poll file descriptors\n"); |
|
757 |
|
pcm_pollfds_n_p =(u8)r; |
|
758 |
|
memset(pcm_pollfds_p, 0, sizeof(pcm_pollfds_p)); |
|
759 |
|
snd_pcm_poll_descriptors(pcm_p, pcm_pollfds_p, pcm_pollfds_n_max); |
|
760 |
|
} |
|
761 |
|
static void init_once_public(u8 *pcm_str) |
|
762 |
|
{ |
|
763 |
|
st_idx_p = -1; |
|
764 |
|
pkt_q_p = pkt_q_new("audio"); |
|
765 |
|
dec_ctx_p = 0; |
|
766 |
|
dec_sets_p.eof_receive = false; |
|
767 |
|
dec_sets_p.n_max = 0; |
|
768 |
|
dec_sets_p.n = 0; |
|
769 |
|
dec_sets_p.a = 0; |
|
770 |
|
init_pcm_once_public(pcm_str); |
|
771 |
|
selected_ts_type_p = -1; |
|
772 |
|
/* linux bug: still no CLOCK_MONOTONIC_RAW for timerfd */ |
|
773 |
|
errno = 0; |
|
774 |
|
draining_timer_fd_p = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); |
|
775 |
|
if (draining_timer_fd_p == -1) |
|
776 |
|
FATALA("unable to get a draining timer file descriptor:%s\n", strerror(errno)); |
|
777 |
|
draining_p = false; |
|
778 |
|
} |
|
779 |
|
static void init_once_local(void) |
|
780 |
|
{ |
|
781 |
|
int r; |
|
782 |
|
|
|
783 |
|
dec_l = 0; |
|
784 |
|
r = snd_output_stdio_attach(&pcm_pout_l, stdout, 0); |
|
785 |
|
if (r < 0) |
|
786 |
|
FATALA("alsa:unable to attach stdout\n"); |
|
787 |
|
r = snd_output_stdio_attach(&pcm_perr_l, stderr, 0); |
|
788 |
|
if (r < 0) |
|
789 |
|
FATALA("alsa:unable to attach stderr\n"); |
|
790 |
|
memset(pcm_silence_bufs_l, 0, sizeof(pcm_silence_bufs_l)); |
|
791 |
|
} |
|
792 |
|
static void dec_a_unref_all(void) |
|
793 |
|
{ |
|
794 |
|
u16 set; |
|
795 |
|
|
|
796 |
|
set = 0; |
|
797 |
|
loop { |
|
798 |
|
if (set == dec_sets_p.n) |
|
799 |
|
break; |
|
800 |
|
avutil_audio_set_unref(dec_sets_p.a[set]); |
|
801 |
|
++set; |
|
802 |
|
} |
|
803 |
|
dec_sets_p.n = 0; |
|
804 |
|
} |
File npv/audio/public/code.frag.c added (mode: 100644) (index 0000000..2eca537) |
|
1 |
|
static void dec_ctx_cfg(avcodec_params_t *params) |
|
2 |
|
{ |
|
3 |
|
int r; |
|
4 |
|
|
|
5 |
|
dec_l = avcodec_find_dec(params->codec_id); |
|
6 |
|
if (dec_l == 0) |
|
7 |
|
FATALA("ffmpeg:unable to find a proper decoder\n"); |
|
8 |
|
avcodec_free_context(&dec_ctx_p); |
|
9 |
|
dec_ctx_p = avcodec_alloc_ctx(dec_l); |
|
10 |
|
if (dec_ctx_p == 0) |
|
11 |
|
FATALA("ffmpeg:unable to allocate a decoder context\n"); |
|
12 |
|
/* XXX: useless ? */ |
|
13 |
|
r = avcodec_params_to_ctx(dec_ctx_p, params); |
|
14 |
|
if (r < 0) |
|
15 |
|
FATALA("ffmpeg:unable to apply stream codec parameters in codec context\n"); |
|
16 |
|
/* XXX: ffmpeg thread count default is 1, set to 0 = auto */ |
|
17 |
|
dec_ctx_p->thread_count = 0; |
|
18 |
|
r = avcodec_open2(dec_ctx_p, dec_l, 0); |
|
19 |
|
if (r < 0) |
|
20 |
|
FATALA("ffmpeg:unable to open the decoder context\n"); |
|
21 |
|
} |
|
22 |
|
#define AGAIN 0 |
|
23 |
|
#define HAVE_DEC_SET 1 |
|
24 |
|
#define EOF_DEC 2 |
|
25 |
|
static u8 dec_set_try_get(void) |
|
26 |
|
{ |
|
27 |
|
int r; |
|
28 |
|
u32 last; |
|
29 |
|
|
|
30 |
|
if (dec_sets_p.eof_receive) |
|
31 |
|
return EOF_DEC; |
|
32 |
|
if (dec_sets_p.n == dec_sets_p.n_max) |
|
33 |
|
dec_a_grow(); |
|
34 |
|
/* will unref any previous dec_sets_p.a[x] bufs for us */ |
|
35 |
|
last = dec_sets_p.n; |
|
36 |
|
r = avcodec_receive_audio_set(dec_ctx_p, dec_sets_p.a[last]); |
|
37 |
|
if (r == AVUTIL_AVERROR(EAGAIN)) { |
|
38 |
|
return AGAIN; |
|
39 |
|
} else if (r == 0) { |
|
40 |
|
++dec_sets_p.n; |
|
41 |
|
return HAVE_DEC_SET; |
|
42 |
|
} else if (r == AVUTIL_AVERROR_EOF) { |
|
43 |
|
POUTA("ffmpeg:last decoder set of frames reached (receiving)\n"); |
|
44 |
|
dec_sets_p.eof_receive = true; |
|
45 |
|
return EOF_DEC; |
|
46 |
|
} |
|
47 |
|
FATALA("ffmpeg:error while receiving a set of frames from the decoder\n"); |
|
48 |
|
} |
|
49 |
|
#undef AGAIN |
|
50 |
|
#undef HAVE_DEC_SET |
|
51 |
|
#undef EOF_DEC |
|
52 |
|
#define AGAIN 0 |
|
53 |
|
#define HAVE_DEC_SET 1 |
|
54 |
|
#define EOF_DEC 2 |
|
55 |
|
static void dec_sets_get_avail(void) { loop |
|
56 |
|
{ |
|
57 |
|
u8 r; |
|
58 |
|
|
|
59 |
|
r = dec_set_try_get(); |
|
60 |
|
if (r == HAVE_DEC_SET) |
|
61 |
|
continue; |
|
62 |
|
else if (r == AGAIN || r == EOF_DEC) |
|
63 |
|
break; |
|
64 |
|
}} |
|
65 |
|
#undef AGAIN |
|
66 |
|
#undef HAVE_DEC_SET |
|
67 |
|
#undef EOF_DEC |
|
68 |
|
static void draining_state_evt(void) |
|
69 |
|
{ |
|
70 |
|
int r; |
|
71 |
|
uint64_t exps_n; |
|
72 |
|
|
|
73 |
|
r = read(draining_timer_fd_p, &exps_n, sizeof(exps_n)); |
|
74 |
|
if (r == -1) |
|
75 |
|
FATALA("unable to read the number of draining state timer expirations\n"); |
|
76 |
|
draining_state_handle(); |
|
77 |
|
} |
|
78 |
|
#define AGAIN 0 |
|
79 |
|
#define RECOVERED 1 |
|
80 |
|
static void evt_pcm_write(void) |
|
81 |
|
{ |
|
82 |
|
snd_pcm_sfrs_t alsa_r; |
|
83 |
|
snd_pcm_ufrs_t ufrs_n; |
|
84 |
|
u8 r_recover; |
|
85 |
|
|
|
86 |
|
alsa_r = snd_pcm_avail(pcm_p); |
|
87 |
|
r_recover = alsa_recover(alsa_r); |
|
88 |
|
if (r_recover == AGAIN || r_recover == RECOVERED) |
|
89 |
|
return; |
|
90 |
|
/* r_recover == CONTINUE */ |
|
91 |
|
ufrs_n = (snd_pcm_ufrs_t)alsa_r; |
|
92 |
|
if (npv_paused_p) |
|
93 |
|
pcm_silence_frs_write(ufrs_n); |
|
94 |
|
else |
|
95 |
|
pcm_filt_frs_write(ufrs_n); |
|
96 |
|
} |
|
97 |
|
#undef AGAIN |
|
98 |
|
#undef RECOVERED |
|
99 |
|
static void pcm_silence_bufs_cfg(bool print_info) |
|
100 |
|
{ |
|
101 |
|
int r; |
|
102 |
|
snd_pcm_hw_params_t *hw_params; |
|
103 |
|
snd_pcm_ufrs_t buf_ufrs_n; |
|
104 |
|
snd_pcm_fmt_t fmt; |
|
105 |
|
snd_pcm_access_t access; |
|
106 |
|
unsigned int chans_n; |
|
107 |
|
u8 c; |
|
108 |
|
|
|
109 |
|
r = snd_pcm_hw_params_malloc(&hw_params); |
|
110 |
|
if (r < 0) |
|
111 |
|
FATALA("silence:alsa:unable to allocate memory for a hardware parameters container\n"); |
|
112 |
|
r = snd_pcm_hw_params_current(pcm_p, hw_params); |
|
113 |
|
if (r != 0) |
|
114 |
|
FATALA("silence:alsa:unable to get the pcm hardware parameters\n"); |
|
115 |
|
r = snd_pcm_hw_params_get_buf_sz(hw_params, &buf_ufrs_n); |
|
116 |
|
if (r < 0) |
|
117 |
|
FATALA("silence:alsa:unable to get the number of frames in the pcm buffer\n"); |
|
118 |
|
r = snd_pcm_hw_params_get_format(hw_params, &fmt); |
|
119 |
|
if (r < 0) |
|
120 |
|
FATALA("silence:alsa:unable to get the pcm format\n"); |
|
121 |
|
r = snd_pcm_hw_params_get_access(hw_params, &access); |
|
122 |
|
if (r < 0) |
|
123 |
|
FATALA("silence:alsa:unable to get the pcm access mode\n"); |
|
124 |
|
r = snd_pcm_hw_params_get_channels(hw_params, &chans_n); |
|
125 |
|
if (r < 0) |
|
126 |
|
FATALA("silence:alsa:unable to get the pcm number of channels\n"); |
|
127 |
|
|
|
128 |
|
/* wipe silence bufs first */ |
|
129 |
|
c = 0; |
|
130 |
|
loop { |
|
131 |
|
if (c == AVUTIL_DATA_PTRS_N) |
|
132 |
|
break; |
|
133 |
|
if (pcm_silence_bufs_l[c] != 0) { |
|
134 |
|
free(pcm_silence_bufs_l[c]); |
|
135 |
|
pcm_silence_bufs_l[c] = 0; |
|
136 |
|
} |
|
137 |
|
++c; |
|
138 |
|
} |
|
139 |
|
if (access == SND_PCM_ACCESS_RW_INTERLEAVED |
|
140 |
|
|| access == SND_PCM_ACCESS_MMAP_INTERLEAVED) { |
|
141 |
|
ssize_t buf_bytes_n; |
|
142 |
|
|
|
143 |
|
buf_bytes_n = snd_pcm_frames_to_bytes(pcm_p, |
|
144 |
|
(snd_pcm_sframes_t)buf_ufrs_n); |
|
145 |
|
if (buf_bytes_n <= 0) |
|
146 |
|
FATALA("silence:alsa:interleaved:unable to get the pcm number of bytes of all buffer frames\n"); |
|
147 |
|
pcm_silence_bufs_l[0] = malloc((size_t)buf_bytes_n); |
|
148 |
|
if (pcm_silence_bufs_l[0] == 0) |
|
149 |
|
FATALA("silence:interleaved:unable to allocate the silence buffer of %d bytes\n", (int)buf_bytes_n); |
|
150 |
|
if (print_info) |
|
151 |
|
POUTA("silence:interleaved:buffer of %d bytes is allocated\n", (int)buf_bytes_n); |
|
152 |
|
r = snd_pcm_format_set_silence(fmt, pcm_silence_bufs_l[0], |
|
153 |
|
(unsigned int)buf_ufrs_n); |
|
154 |
|
if (r < 0) |
|
155 |
|
FATALA("silence:non interleaved:unable to fill with silence the buffer\n"); |
|
156 |
|
POUTA("silence:non planar format:silence buffer filled with %u silence frames\n", buf_ufrs_n); |
|
157 |
|
} else if (access == SND_PCM_ACCESS_RW_NONINTERLEAVED |
|
158 |
|
|| access == SND_PCM_ACCESS_MMAP_NONINTERLEAVED) { |
|
159 |
|
ssize_t buf_bytes_n; |
|
160 |
|
long buf_samples_n; |
|
161 |
|
|
|
162 |
|
buf_samples_n = (long)buf_ufrs_n; |
|
163 |
|
buf_bytes_n = snd_pcm_samples_to_bytes(pcm_p, buf_samples_n); |
|
164 |
|
if (buf_bytes_n <= 0) |
|
165 |
|
FATALA("silence:alsa:non interleaved:unable to get the pcm number of total bytes of all buffer samples\n"); |
|
166 |
|
c = 0; |
|
167 |
|
loop { |
|
168 |
|
if (c == chans_n) |
|
169 |
|
break; |
|
170 |
|
pcm_silence_bufs_l[c] = malloc((size_t)buf_bytes_n); |
|
171 |
|
if (pcm_silence_bufs_l[c] == 0) |
|
172 |
|
FATALA("silence:non interleaved:unable to allocate silence buffer %u of %d bytes\n", c, (int)buf_bytes_n); |
|
173 |
|
r = snd_pcm_format_set_silence(fmt, |
|
174 |
|
pcm_silence_bufs_l[c], |
|
175 |
|
(unsigned int)buf_samples_n); |
|
176 |
|
if (r < 0) |
|
177 |
|
FATALA("silence:non interleaved:unable to fill with silence the buffer\n"); |
|
178 |
|
if (print_info) |
|
179 |
|
POUTA("silence:non interleaved:buffer[%u] of %d bytes is allocated\n", c, (int)buf_bytes_n); |
|
180 |
|
++c; |
|
181 |
|
} |
|
182 |
|
if (print_info) |
|
183 |
|
POUTA("silence:non interleaved:allocated %u silence buffers for %u frames\n", chans_n, (unsigned int)buf_ufrs_n); |
|
184 |
|
} else |
|
185 |
|
FATALA("silence:the pcm access type is not supported\n"); |
|
186 |
|
snd_pcm_hw_params_free(hw_params); |
|
187 |
|
} |
|
188 |
|
static void init_once(u8 *pcm_str) |
|
189 |
|
{ |
|
190 |
|
init_once_local(); |
|
191 |
|
init_once_public(pcm_str); |
|
192 |
|
} |
|
193 |
|
static void dec_flush(void) |
|
194 |
|
{ |
|
195 |
|
pkt_q_unref_all(pkt_q_p); |
|
196 |
|
dec_a_unref_all(); |
|
197 |
|
dec_sets_p.eof_receive = false; |
|
198 |
|
avcodec_flush_bufs(dec_ctx_p); |
|
199 |
|
} |
|
200 |
|
static void pcm_cfg(snd_pcm_t *pcm, unsigned int chans_n, unsigned int rate, |
|
201 |
|
enum avutil_audio_fr_fmt_t ff_fmt) |
|
202 |
|
{ |
|
203 |
|
pcm_cfg_hw(pcm, chans_n, rate, ff_fmt); |
|
204 |
|
pcm_cfg_sw(pcm); |
|
205 |
|
POUTA("ALSA PCM DUMP START-------------------------------------------------------------\n"); |
|
206 |
|
snd_pcm_dump(pcm, pcm_pout_l); |
|
207 |
|
POUTA("ALSA PCM DUMP END---------------------------------------------------------------\n"); |
|
208 |
|
} |
|
209 |
|
static void pcm2ff(snd_pcm_t *pcm, enum avutil_audio_fr_fmt_t *ff_fmt, |
|
210 |
|
int *ff_rate, int *ff_chans_n, uint64_t *ff_chans_layout, |
|
211 |
|
bool print_info) |
|
212 |
|
{ |
|
213 |
|
int r; |
|
214 |
|
snd_pcm_hw_params_t *hw_params; |
|
215 |
|
snd_pcm_access_t pcm_access; |
|
216 |
|
snd_pcm_fmt_t pcm_fmt; |
|
217 |
|
unsigned int pcm_rate; |
|
218 |
|
unsigned int pcm_chans_n; |
|
219 |
|
|
|
220 |
|
r = snd_pcm_hw_params_malloc(&hw_params); |
|
221 |
|
if (r < 0) |
|
222 |
|
FATALA("alsa:unable to allocate hardware parameters context for ffmpeg filter wiring\n"); |
|
223 |
|
r = snd_pcm_hw_params_current(pcm, hw_params); |
|
224 |
|
if (r != 0) |
|
225 |
|
FATALA("alsa:unable to get current hardware parameters for ffmpeg filter wiring\n"); |
|
226 |
|
r = snd_pcm_hw_params_get_access(hw_params, &pcm_access); |
|
227 |
|
if (r < 0) |
|
228 |
|
FATALA("alsa:unable to get the pcm access for ffmpeg filter wiring\n"); |
|
229 |
|
r = snd_pcm_hw_params_get_fmt(hw_params, &pcm_fmt); |
|
230 |
|
if (r < 0) |
|
231 |
|
FATALA("alsa:unable to get the pcm format for ffmpeg filter wiring\n"); |
|
232 |
|
/*--------------------------------------------------------------------*/ |
|
233 |
|
pcm_layout2ff_fmt_strict(pcm_fmt, pcm_access, ff_fmt, print_info); |
|
234 |
|
/*--------------------------------------------------------------------*/ |
|
235 |
|
r = snd_pcm_hw_params_get_rate(hw_params, &pcm_rate, |
|
236 |
|
SND_PCM_ST_PLAYBACK); |
|
237 |
|
if (r < 0) |
|
238 |
|
FATALA("alsa:unable to get the pcm rate for ffmpeg filter wiring\n"); |
|
239 |
|
*ff_rate = (int)pcm_rate; |
|
240 |
|
r = snd_pcm_hw_params_get_chans_n(hw_params, &pcm_chans_n); |
|
241 |
|
if (r < 0) |
|
242 |
|
FATALA("alsa:unable to get the pcm count of channels for ffmpeg filter wiring\n"); |
|
243 |
|
*ff_chans_n = (int)pcm_chans_n; |
|
244 |
|
/*--------------------------------------------------------------------*/ |
|
245 |
|
*ff_chans_layout = pcm_chmaps2ff_chans_layout(pcm, pcm_chans_n, |
|
246 |
|
print_info); |
|
247 |
|
/*--------------------------------------------------------------------*/ |
|
248 |
|
snd_pcm_hw_params_free(hw_params); |
|
249 |
|
} |
File npv/local/code.frag.c added (mode: 100644) (index 0000000..4fd08bd) |
|
1 |
|
/* meh... */ |
|
2 |
|
/*NSPC*/ |
|
3 |
|
static u8 *ts_to_str(int64_t ts, avutil_rational_t time_base, |
|
4 |
|
int64_t *remaining) |
|
5 |
|
{ |
|
6 |
|
static u8 str[sizeof("~S00:00:00.000 remains S9223372036854775807 time base units")]; |
|
7 |
|
bool is_neg; |
|
8 |
|
int64_t hours_n; |
|
9 |
|
int64_t mins_n; |
|
10 |
|
int64_t secs_n; |
|
11 |
|
int64_t msecs_n; |
|
12 |
|
int64_t one_hour; /* in ffmpeg time_base units */ |
|
13 |
|
int64_t one_min; /* in ffmpeg time_base units */ |
|
14 |
|
int64_t one_sec; /* in ffmpeg time_base units */ |
|
15 |
|
int64_t one_msec; /* in ffmpeg time_base units */ |
|
16 |
|
|
|
17 |
|
if (ts < 0) { |
|
18 |
|
ts = -ts; |
|
19 |
|
is_neg = true; |
|
20 |
|
} else |
|
21 |
|
is_neg = false; |
|
22 |
|
|
|
23 |
|
one_hour = INT64_C(3600) * (int64_t)time_base.den |
|
24 |
|
/ (int64_t)time_base.num; |
|
25 |
|
one_min = INT64_C(60) * (int64_t)time_base.den |
|
26 |
|
/ (int64_t)time_base.num; |
|
27 |
|
one_sec = (int64_t)time_base.den / (int64_t)time_base.num; |
|
28 |
|
one_msec = one_sec / INT64_C(1000); |
|
29 |
|
|
|
30 |
|
hours_n = ts / one_hour; |
|
31 |
|
|
|
32 |
|
*remaining = ts % one_hour; |
|
33 |
|
mins_n = *remaining / one_min; |
|
34 |
|
|
|
35 |
|
*remaining = *remaining % one_min; |
|
36 |
|
secs_n = *remaining / one_sec; |
|
37 |
|
|
|
38 |
|
*remaining = *remaining % one_sec; |
|
39 |
|
msecs_n = *remaining / one_msec; |
|
40 |
|
|
|
41 |
|
/* account for all rounding errors */ |
|
42 |
|
*remaining = ts - (hours_n * one_hour + mins_n * one_min |
|
43 |
|
+ secs_n * one_sec + msecs_n * one_msec); |
|
44 |
|
if (!is_neg) |
|
45 |
|
snprintf(str, sizeof(str), "%02"PRId64":%02"PRId64":%02"PRId64".%03"PRId64, hours_n, mins_n, secs_n, msecs_n); |
|
46 |
|
else { |
|
47 |
|
str[0] = '-'; |
|
48 |
|
snprintf(str + 1, sizeof(str) - 1, "%02"PRId64":%02"PRId64":%02"PRId64".%03"PRId64, hours_n, mins_n, secs_n, msecs_n); |
|
49 |
|
} |
|
50 |
|
return str; |
|
51 |
|
} |
|
52 |
|
/*--------------------------------------------------------------------------*/ |
|
53 |
|
/* |
|
54 |
|
* block as much as possible. |
|
55 |
|
* handle only async "usual" sigs, with sync signalfd. |
|
56 |
|
* allow some signals to go thru though. many sync signal can only |
|
57 |
|
* be handles with an async handler. |
|
58 |
|
* always presume the process "controlling terminal" is different than the |
|
59 |
|
* terminal connected on standard input and standard output |
|
60 |
|
*/ |
|
61 |
|
/*NSPC*/ |
|
62 |
|
static void sigs_init_once(void) |
|
63 |
|
{ |
|
64 |
|
int r; |
|
65 |
|
sigset_t sset; |
|
66 |
|
|
|
67 |
|
r = sigfillset(&sset); |
|
68 |
|
if (r == -1) |
|
69 |
|
FATAL("unable to get a full signal mask\n"); |
|
70 |
|
|
|
71 |
|
/* the "controlling terminal" line asks for a core dump, leave it be */ |
|
72 |
|
r = sigdelset(&sset, SIGQUIT); |
|
73 |
|
if (r == -1) |
|
74 |
|
FATAL("unable to remove SIGQUIT from our signal mask\n"); |
|
75 |
|
|
|
76 |
|
r = pthread_sigmask(SIG_SETMASK, &sset, 0); |
|
77 |
|
if (r != 0) |
|
78 |
|
FATAL("unable to \"block\" \"all\" signals\n"); |
|
79 |
|
|
|
80 |
|
/* from here, we "steal" signals with signalfd */ |
|
81 |
|
|
|
82 |
|
r = sigemptyset(&sset); |
|
83 |
|
if (r == -1) |
|
84 |
|
FATAL("unable to get an empty signal mask\n"); |
|
85 |
|
/* we are asked nicely to terminate */ |
|
86 |
|
r = sigaddset(&sset, SIGTERM); |
|
87 |
|
if (r == -1) |
|
88 |
|
FATAL("unable to add SIGTERM to our signal mask\n"); |
|
89 |
|
/* the "controlling terminal" line (^c) asks nicely to terminate */ |
|
90 |
|
r = sigaddset(&sset, SIGINT); |
|
91 |
|
if (r == -1) |
|
92 |
|
FATAL("unable to add SIGINT to our signal mask\n"); |
|
93 |
|
|
|
94 |
|
r = signalfd(-1, &sset, SFD_NONBLOCK); |
|
95 |
|
if (r == -1) |
|
96 |
|
FATAL("unable to get a signalfd file descriptor\n"); |
|
97 |
|
sig_fd_l = r; |
|
98 |
|
} |
|
99 |
|
/*NSPC*/ |
|
100 |
|
static void evt_init_once(void) |
|
101 |
|
{ |
|
102 |
|
ep_fd_p = epoll_create1(0); |
|
103 |
|
if (ep_fd_p == -1) |
|
104 |
|
FATAL("unable to create the epoll file descriptor\n"); |
|
105 |
|
} |
|
106 |
|
/*NSPC*/ |
|
107 |
|
static void evt_add_all_fds(void) |
|
108 |
|
{ |
|
109 |
|
int r; |
|
110 |
|
u8 i; |
|
111 |
|
struct epoll_event evt; |
|
112 |
|
|
|
113 |
|
/* signals */ |
|
114 |
|
evt.events = EPOLLIN; |
|
115 |
|
evt.data.fd = sig_fd_l; |
|
116 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, sig_fd_l, &evt); |
|
117 |
|
if (r == -1) |
|
118 |
|
FATAL("unable to add the signalfd file descriptior to the epoll file descriptor\n"); |
|
119 |
|
/*--------------------------------------------------------------------*/ |
|
120 |
|
/* the input timer */ |
|
121 |
|
evt.events = EPOLLIN; |
|
122 |
|
evt.data.fd = input_timer_fd_p; |
|
123 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, input_timer_fd_p, &evt); |
|
124 |
|
if (r == -1) |
|
125 |
|
FATAL("unable to add the input timer file descriptor\n"); |
|
126 |
|
/*--------------------------------------------------------------------*/ |
|
127 |
|
/* the video timer */ |
|
128 |
|
evt.events = EPOLLIN; |
|
129 |
|
evt.data.fd = video_timer_fd_p; |
|
130 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, video_timer_fd_p, &evt); |
|
131 |
|
if (r == -1) |
|
132 |
|
FATAL("unable to add the video timer file descriptor\n"); |
|
133 |
|
/*--------------------------------------------------------------------*/ |
|
134 |
|
/* the x11 xcb file descriptor */ |
|
135 |
|
evt.events = EPOLLIN; |
|
136 |
|
evt.data.fd = npv_xcb_p.fd; |
|
137 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, npv_xcb_p.fd, &evt); |
|
138 |
|
if (r == -1) |
|
139 |
|
FATAL("unable to add the x11 xcb file descriptor\n"); |
|
140 |
|
/*--------------------------------------------------------------------*/ |
|
141 |
|
/* alsa pcm poll file descriptors */ |
|
142 |
|
i = 0; |
|
143 |
|
loop { |
|
144 |
|
if (i == audio_pcm_pollfds_n_p) |
|
145 |
|
break; |
|
146 |
|
evt.events = audio_pcm_pollfds_p[i].events; |
|
147 |
|
evt.data.fd = audio_pcm_pollfds_p[i].fd; |
|
148 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, audio_pcm_pollfds_p[i].fd, |
|
149 |
|
&evt); |
|
150 |
|
if (r == -1) |
|
151 |
|
FATAL("unable to add alsa poll file descriptor index %d to epoll file descriptor\n", i); |
|
152 |
|
++i; |
|
153 |
|
} |
|
154 |
|
/*--------------------------------------------------------------------*/ |
|
155 |
|
/* the draining timer */ |
|
156 |
|
evt.events = EPOLLIN; |
|
157 |
|
evt.data.fd = audio_draining_timer_fd_p; |
|
158 |
|
r = epoll_ctl(ep_fd_p, EPOLL_CTL_ADD, audio_draining_timer_fd_p, &evt); |
|
159 |
|
if (r == -1) |
|
160 |
|
FATAL("unable to add the draining timer file descriptor\n"); |
|
161 |
|
} |
|
162 |
|
/*NSPC*/ |
|
163 |
|
static void evt_sigs(void) |
|
164 |
|
{ |
|
165 |
|
int r; |
|
166 |
|
struct signalfd_siginfo siginfo; |
|
167 |
|
|
|
168 |
|
/* no short reads */ |
|
169 |
|
r = read(sig_fd_l, &siginfo, sizeof(siginfo)); |
|
170 |
|
if (r != sizeof(siginfo)) |
|
171 |
|
FATAL("unable to read signal information\n"); |
|
172 |
|
|
|
173 |
|
switch (siginfo.ssi_signo) { |
|
174 |
|
case SIGTERM: |
|
175 |
|
EXIT("received SIGTERM\n"); |
|
176 |
|
case SIGINT: |
|
177 |
|
EXIT("received SIGINT\n"); |
|
178 |
|
default: |
|
179 |
|
WARNING("signal handle:unwanted signal %d received, discarding\n", siginfo.ssi_signo); |
|
180 |
|
} |
|
181 |
|
} |
|
182 |
|
/*NSPC*/ |
|
183 |
|
static void evt_accumulate(struct epoll_event *evt, bool *have_evt_sigs, |
|
184 |
|
bool *have_evt_pcm, bool *have_evt_video, bool *have_evt_pcm_draining, |
|
185 |
|
bool *have_evt_x11, bool *have_evt_input) |
|
186 |
|
{ |
|
187 |
|
u8 i; |
|
188 |
|
|
|
189 |
|
if (evt->data.fd == sig_fd_l) { |
|
190 |
|
if ((evt->events & EPOLLIN) != 0) { |
|
191 |
|
*have_evt_sigs = true; |
|
192 |
|
return; |
|
193 |
|
} |
|
194 |
|
FATAL("event loop wait:signal:unexpected event\n"); |
|
195 |
|
} |
|
196 |
|
/*-------------------------------------------------------------------*/ |
|
197 |
|
/* only update alsa fds */ |
|
198 |
|
i = 0; |
|
199 |
|
loop { |
|
200 |
|
if (i == audio_pcm_pollfds_n_p) |
|
201 |
|
break; |
|
202 |
|
if (evt->data.fd == audio_pcm_pollfds_p[i].fd) { |
|
203 |
|
audio_pcm_pollfds_p[i].revents = evt->events; |
|
204 |
|
*have_evt_pcm = true; |
|
205 |
|
return; |
|
206 |
|
} |
|
207 |
|
++i; |
|
208 |
|
} |
|
209 |
|
/*-------------------------------------------------------------------*/ |
|
210 |
|
if (evt->data.fd == npv_xcb_p.fd) { |
|
211 |
|
if ((evt->events & EPOLLIN) != 0) |
|
212 |
|
*have_evt_x11 = true; |
|
213 |
|
return; |
|
214 |
|
} |
|
215 |
|
/*-------------------------------------------------------------------*/ |
|
216 |
|
if (evt->data.fd == video_timer_fd_p) { |
|
217 |
|
if ((evt->events & EPOLLIN) != 0) { |
|
218 |
|
*have_evt_video = true; |
|
219 |
|
return; |
|
220 |
|
} |
|
221 |
|
FATAL("event loop wait:video:unexpected event\n"); |
|
222 |
|
} |
|
223 |
|
/*-------------------------------------------------------------------*/ |
|
224 |
|
if (evt->data.fd == input_timer_fd_p) { |
|
225 |
|
if ((evt->events & EPOLLIN) != 0) { |
|
226 |
|
*have_evt_input = true; |
|
227 |
|
return; |
|
228 |
|
} |
|
229 |
|
FATAL("event loop wait:input:unexpected event\n"); |
|
230 |
|
} |
|
231 |
|
/*-------------------------------------------------------------------*/ |
|
232 |
|
if (evt->data.fd == audio_draining_timer_fd_p) { |
|
233 |
|
if ((evt->events & EPOLLIN) != 0) { |
|
234 |
|
*have_evt_pcm_draining = true; |
|
235 |
|
return; |
|
236 |
|
} |
|
237 |
|
FATAL("event loop wait:audio draining timer:unexpected event\n"); |
|
238 |
|
} |
|
239 |
|
} |
|
240 |
|
/* |
|
241 |
|
* XXX: remember that all heavy lifting should be done in other threads. |
|
242 |
|
* this thread should not "block" or perform "expensive" work. |
|
243 |
|
* "blocking", "expensive" work should be offloaded to other threads. |
|
244 |
|
*/ |
|
245 |
|
#define EPOLL_EVTS_N 32 /* why not */ |
|
246 |
|
/*NSPC*/ |
|
247 |
|
static void evts_loop(void) |
|
248 |
|
{ |
|
249 |
|
int fds_n; |
|
250 |
|
int fd_idx; |
|
251 |
|
struct epoll_event evts[EPOLL_EVTS_N]; |
|
252 |
|
bool have_evt_sigs; |
|
253 |
|
bool have_evt_pcm; |
|
254 |
|
bool have_evt_video; |
|
255 |
|
bool have_evt_pcm_draining; |
|
256 |
|
bool have_evt_x11; |
|
257 |
|
bool have_evt_input; |
|
258 |
|
int r; |
|
259 |
|
short pcm_evts; |
|
260 |
|
|
|
261 |
|
errno = 0; |
|
262 |
|
memset(evts, 0, sizeof(evts)); |
|
263 |
|
fds_n = epoll_wait(ep_fd_p, evts, EPOLL_EVTS_N, -1); |
|
264 |
|
if (fds_n == -1) { |
|
265 |
|
if (errno == EINTR) { |
|
266 |
|
WARNING("event loop wait:was interrupted by a signal\n"); |
|
267 |
|
return; |
|
268 |
|
} |
|
269 |
|
FATAL("event loop wait:an error occured\n"); |
|
270 |
|
} |
|
271 |
|
|
|
272 |
|
have_evt_sigs = false; |
|
273 |
|
have_evt_pcm = false; |
|
274 |
|
have_evt_video = false; |
|
275 |
|
have_evt_pcm_draining = false; |
|
276 |
|
have_evt_x11 = false; |
|
277 |
|
have_evt_input = false; |
|
278 |
|
|
|
279 |
|
fd_idx = 0; |
|
280 |
|
loop { |
|
281 |
|
if (fd_idx == fds_n) |
|
282 |
|
break; |
|
283 |
|
evt_accumulate(&evts[fd_idx], &have_evt_sigs, &have_evt_pcm, |
|
284 |
|
&have_evt_video, &have_evt_pcm_draining, |
|
285 |
|
&have_evt_x11, &have_evt_input); |
|
286 |
|
++fd_idx; |
|
287 |
|
} |
|
288 |
|
|
|
289 |
|
/* once we have our evts, we use a sort of priority order */ |
|
290 |
|
|
|
291 |
|
/* process any q-ed and we-handle sigs before anything */ |
|
292 |
|
if (have_evt_sigs) |
|
293 |
|
evt_sigs(); |
|
294 |
|
/* |
|
295 |
|
* XXX: it may be more appropriate to break this in 2 steps: key inputs |
|
296 |
|
* (light processing), wins resizing (heavy processing) |
|
297 |
|
*/ |
|
298 |
|
/* key input and win resizing */ |
|
299 |
|
if (have_evt_x11) |
|
300 |
|
npv_xcb_evt(); |
|
301 |
|
/* XXX: once in audio draining mode, this should not really happen */ |
|
302 |
|
/* we are more sensitive to audio issues than video issues */ |
|
303 |
|
if (have_evt_pcm) { |
|
304 |
|
/* |
|
305 |
|
* since alsa could use several file descriptors, only once the |
|
306 |
|
* pollfds were properly updated we can actually know we got |
|
307 |
|
* something from alsa |
|
308 |
|
*/ |
|
309 |
|
r = snd_pcm_poll_descriptors_revents(audio_pcm_p, |
|
310 |
|
audio_pcm_pollfds_p, audio_pcm_pollfds_n_p, &pcm_evts); |
|
311 |
|
if (r != 0) |
|
312 |
|
FATAL("alsa:error processing the poll file descriptors\n"); |
|
313 |
|
|
|
314 |
|
if ((pcm_evts & ~POLLOUT) != 0) |
|
315 |
|
FATAL("alsa:unexpected events\n"); |
|
316 |
|
|
|
317 |
|
if ((pcm_evts & POLLOUT) != 0) |
|
318 |
|
audio_evt_pcm_write(); |
|
319 |
|
} |
|
320 |
|
if (have_evt_video) |
|
321 |
|
video_timer_evt(); |
|
322 |
|
if (have_evt_input) |
|
323 |
|
input_timer_evt(); |
|
324 |
|
/* while audio is draining, video fr may need to be displayed */ |
|
325 |
|
if (have_evt_pcm_draining) |
|
326 |
|
audio_draining_state_evt(); |
|
327 |
|
} |
|
328 |
|
#undef EPOLL_EVTS_N |
|
329 |
|
/*NSPC*/ |
|
330 |
|
static void ff_log_stdout(void *a, int b, const char *fmt, va_list ap) |
|
331 |
|
{ |
|
332 |
|
vprintf(fmt, ap); |
|
333 |
|
} |
|
334 |
|
/*NSPC*/ |
|
335 |
|
static void usage(void) |
|
336 |
|
{ |
|
337 |
|
POUT("npv [-p alsa pcm] [-v volume(0..100)] [-h height in pixels] [-w width in pixels] [-help] url\n"); |
|
338 |
|
} |
|
339 |
|
/*NSPC*/ |
|
340 |
|
static void opts_parse(int argc, u8 **args, u8 **url, u16 *w, u16 *h, |
|
341 |
|
u8 **pcm_str, double *vol) |
|
342 |
|
{ |
|
343 |
|
int i; |
|
344 |
|
int url_idx; |
|
345 |
|
|
|
346 |
|
i = 1; |
|
347 |
|
url_idx = -1; |
|
348 |
|
loop { |
|
349 |
|
if (i == argc) |
|
350 |
|
break; |
|
351 |
|
|
|
352 |
|
if (strcmp("-p", args[i]) == 0) { |
|
353 |
|
unsigned long vol_ul; |
|
354 |
|
|
|
355 |
|
if ((i + 1) == argc) |
|
356 |
|
FATAL("-p:alsa pcm is missing\n"); |
|
357 |
|
|
|
358 |
|
*pcm_str = args[i + 1]; |
|
359 |
|
POUT("-p:alsa pcm \"%f\"\n", *pcm_str); |
|
360 |
|
i += 2; |
|
361 |
|
} else if (strcmp("-v", args[i]) == 0) { |
|
362 |
|
unsigned long vol_ul; |
|
363 |
|
|
|
364 |
|
if ((i + 1) == argc) |
|
365 |
|
FATAL("-v:initial volume option is missing\n"); |
|
366 |
|
|
|
367 |
|
vol_ul = strtoul(args[i + 1], 0, 10); |
|
368 |
|
if (vol_ul < 0 || 100 < vol_ul) |
|
369 |
|
FATAL("-v:invalid volume value %lu (0..100)\n", vol_ul); |
|
370 |
|
|
|
371 |
|
*vol = (double)vol_ul / 100.; |
|
372 |
|
POUT("-v:using initial volume %f\n", *vol); |
|
373 |
|
i += 2; |
|
374 |
|
} else if (strcmp("-h", args[i]) == 0) { |
|
375 |
|
unsigned long h_ul; |
|
376 |
|
|
|
377 |
|
if ((i + 1) == argc) |
|
378 |
|
FATAL("-h:initial window pixel height is missing\n"); |
|
379 |
|
|
|
380 |
|
h_ul = strtoul(args[i + 1], 0, 10); |
|
381 |
|
if (h_ul == 0 || h_ul > U16_MAX) |
|
382 |
|
FATAL("-h:invalid window pixel height %lu (1..%lu)\n", h_ul, U16_MAX); |
|
383 |
|
*h = (u16)h_ul; |
|
384 |
|
POUT("-h:using initial window height %lu pixels\n", h_ul); |
|
385 |
|
i += 2; |
|
386 |
|
} else if (strcmp("-w", args[i]) == 0) { |
|
387 |
|
unsigned long w_ul; |
|
388 |
|
|
|
389 |
|
if ((i + 1) == argc) |
|
390 |
|
FATAL("-h:initial window pixel width is missing\n"); |
|
391 |
|
|
|
392 |
|
w_ul = strtoul(args[i + 1], 0, 10); |
|
393 |
|
if (w_ul == 0 || w_ul > U16_MAX) |
|
394 |
|
FATAL("-w:invalid window pixel width %lu (1..%lu)\n", w_ul, U16_MAX); |
|
395 |
|
*w = (u16)w_ul; |
|
396 |
|
POUT("-w:using initial window width %lu pixels\n", w_ul); |
|
397 |
|
i += 2; |
|
398 |
|
} else if (strcmp("-help", args[i]) == 0) { |
|
399 |
|
usage(); |
|
400 |
|
exit(0); |
|
401 |
|
} else { |
|
402 |
|
url_idx = i; |
|
403 |
|
++i; |
|
404 |
|
} |
|
405 |
|
} |
|
406 |
|
if (url_idx == -1) |
|
407 |
|
FATAL("missing url\n"); |
|
408 |
|
*url = args[url_idx]; |
|
409 |
|
POUT("url-->####%s####\n", *url); |
|
410 |
|
} |
|
411 |
|
/*NSPC*/ |
|
412 |
|
#define WIDTH_NOT_DEFINED 0 |
|
413 |
|
#define HEIGHT_NOT_DEFINED 0 |
|
414 |
|
static void init_once(u8 *url, u16 win_width, u16 win_height, u8 *pcm_str) |
|
415 |
|
{ |
|
416 |
|
evt_init_once(); |
|
417 |
|
sigs_init_once(); |
|
418 |
|
npv_vk_init_once(); /* generic plumbing */ |
|
419 |
|
audio_filt_init_once(); |
|
420 |
|
audio_init_once(pcm_str); /* before audio_st_idx_p is actually used */ |
|
421 |
|
video_init_once(); /* before video_st_idx_p is actually used */ |
|
422 |
|
clk_init_once(); |
|
423 |
|
input_init_once(); |
|
424 |
|
|
|
425 |
|
fmt_init_once(url); |
|
426 |
|
/* we need something to start with */ |
|
427 |
|
fmt_probe_best_sts(&video_st_idx_p, &audio_st_idx_p); |
|
428 |
|
if (win_width == WIDTH_NOT_DEFINED) |
|
429 |
|
win_width = fmt_ctx_p->sts[video_st_idx_p]->codecpar->width; |
|
430 |
|
if (win_height == HEIGHT_NOT_DEFINED) |
|
431 |
|
win_height = fmt_ctx_p->sts[video_st_idx_p]->codecpar->height; |
|
432 |
|
npv_xcb_init_once(win_width, win_height); |
|
433 |
|
npv_vk_surf_init_once(npv_xcb_p.c, npv_xcb_p.win_id); |
|
434 |
|
} |
|
435 |
|
#undef WIDTH_NOT_DEFINED |
|
436 |
|
#undef HEIGHT_NOT_DEFINED |
|
437 |
|
/*NSPC*/ |
|
438 |
|
#define PRINT_INFO true |
|
439 |
|
static void prepare(double initial_vol) |
|
440 |
|
{ |
|
441 |
|
enum avutil_audio_fr_fmt_t dst_fmt; |
|
442 |
|
int dst_rate; |
|
443 |
|
int dst_chans_n; |
|
444 |
|
uint64_t dst_chans_layout; |
|
445 |
|
|
|
446 |
|
audio_dec_ctx_cfg(fmt_ctx_p->sts[audio_st_idx_p]->codecpar); |
|
447 |
|
video_dec_ctx_cfg(fmt_ctx_p->sts[video_st_idx_p]->codecpar); |
|
448 |
|
/* |
|
449 |
|
* do our best to match the pcm cfg to audio ff dec output, BUT we don't |
|
450 |
|
* expect to match it "exactly": see right below why |
|
451 |
|
*/ |
|
452 |
|
audio_pcm_cfg(audio_pcm_p, audio_dec_ctx_p->chans_n, |
|
453 |
|
audio_dec_ctx_p->fr_rate, audio_dec_ctx_p->fr_fmt); |
|
454 |
|
/* use a ff filt to fill in the gap between the pcm and audio ff dec */ |
|
455 |
|
audio_pcm2ff(audio_pcm_p, &dst_fmt, &dst_rate, &dst_chans_n, |
|
456 |
|
&dst_chans_layout, PRINT_INFO); |
|
457 |
|
audio_filt_cfg( |
|
458 |
|
audio_dec_ctx_p->fr_fmt, audio_dec_ctx_p->fr_rate, |
|
459 |
|
audio_dec_ctx_p->chans_n, audio_dec_ctx_p->chans_layout, |
|
460 |
|
false, initial_vol, |
|
461 |
|
dst_fmt, dst_rate, dst_chans_n, dst_chans_layout, |
|
462 |
|
PRINT_INFO); |
|
463 |
|
audio_pcm_silence_bufs_cfg(PRINT_INFO); |
|
464 |
|
|
|
465 |
|
input_bootstrap_audio_video(); |
|
466 |
|
|
|
467 |
|
evt_add_all_fds(); |
|
468 |
|
} |
|
469 |
|
#undef PRINT_INFO |
|
470 |
|
static void cmd_info(void) |
|
471 |
|
{ |
|
472 |
|
//TODO: info OSD toggle |
|
473 |
|
} |
|
474 |
|
|
|
475 |
|
static void cmd_quit(void) |
|
476 |
|
{ |
|
477 |
|
EXIT("quit command received\n"); |
|
478 |
|
} |
|
479 |
|
/*NSPC*/ |
|
480 |
|
#define TS_FROM_CLK_OK 0 |
|
481 |
|
static void seek_x(s64 delta) |
|
482 |
|
{ |
|
483 |
|
int ri; |
|
484 |
|
u8 r8; |
|
485 |
|
avformat_st_t *audio_st; |
|
486 |
|
avformat_st_t *video_st; |
|
487 |
|
s64 new_audio_ts; |
|
488 |
|
s64 new_video_ts; |
|
489 |
|
s64 audio_now; |
|
490 |
|
s64 video_now; |
|
491 |
|
|
|
492 |
|
if (audio_draining_p) { |
|
493 |
|
WARNING("seek:audio is draining, seeking disable\n"); |
|
494 |
|
return; |
|
495 |
|
} |
|
496 |
|
if ((fmt_ctx_p->ctx_flags & AVFMTCTX_UNSEEKABLE) != 0) { |
|
497 |
|
WARNING("seek:format is flagged unseekable\n"); |
|
498 |
|
return; |
|
499 |
|
} |
|
500 |
|
|
|
501 |
|
thdsws_wait_for_idle(video_scaler_p.ctx); |
|
502 |
|
|
|
503 |
|
r8 = clk_get_audio_st_ts(&audio_now); |
|
504 |
|
if (r8 != TS_FROM_CLK_OK) { |
|
505 |
|
WARNING("seek:audio:clock timestamp unavailable, ignoring command\n"); |
|
506 |
|
return; |
|
507 |
|
} |
|
508 |
|
r8 = clk_get_video_st_ts(&video_now); |
|
509 |
|
if (r8 != TS_FROM_CLK_OK) { |
|
510 |
|
WARNING("seek:video:clock timestamp unavailable, ignoring command\n"); |
|
511 |
|
return; |
|
512 |
|
} |
|
513 |
|
/*--------------------------------------------------------------------*/ |
|
514 |
|
audio_st = fmt_ctx_p->sts[audio_st_idx_p]; |
|
515 |
|
video_st = fmt_ctx_p->sts[video_st_idx_p]; |
|
516 |
|
|
|
517 |
|
new_audio_ts = audio_now + delta |
|
518 |
|
* audio_st->time_base.den / audio_st->time_base.num; |
|
519 |
|
/* rewind capping if possible */ |
|
520 |
|
if (audio_st->start_time != AV_NOPTS_VALUE) |
|
521 |
|
if (new_audio_ts < audio_st->start_time) |
|
522 |
|
new_audio_ts = audio_st->start_time; |
|
523 |
|
POUT("trying to seek to %"PRId64" audio stream time base units\n", new_audio_ts); |
|
524 |
|
|
|
525 |
|
ri = avformat_seek_pkt(fmt_ctx_p, audio_st->id, new_audio_ts, 0); |
|
526 |
|
if (ri < 0) { |
|
527 |
|
WARNING("unable to seek to %"PRId64" audio stream time base units\n", new_audio_ts); |
|
528 |
|
goto try_restore_audio; |
|
529 |
|
} |
|
530 |
|
POUT("audio seek to %"PRId64" audio stream time base units\n", new_audio_ts); |
|
531 |
|
/*--------------------------------------------------------------------*/ |
|
532 |
|
new_video_ts = video_now + delta |
|
533 |
|
* video_st->time_base.den / video_st->time_base.num; |
|
534 |
|
/* rewind capping if possible */ |
|
535 |
|
if (video_st->start_time != AV_NOPTS_VALUE) |
|
536 |
|
if (new_video_ts < video_st->start_time) |
|
537 |
|
new_video_ts = video_st->start_time; |
|
538 |
|
ri = avformat_seek_pkt(fmt_ctx_p, video_st->id, new_video_ts, 0); |
|
539 |
|
if (ri < 0) { |
|
540 |
|
WARNING("unable to seek to %"PRId64" video stream time base units but audio was seeked)\n", new_video_ts); |
|
541 |
|
goto try_restore_video; |
|
542 |
|
} |
|
543 |
|
POUT("video seek to %"PRId64" video stream time base units\n", new_video_ts); |
|
544 |
|
flush: |
|
545 |
|
video_dec_flush(); |
|
546 |
|
audio_dec_flush(); |
|
547 |
|
audio_filt_flush(); |
|
548 |
|
fmt_flush(); |
|
549 |
|
clk_invalidate(); |
|
550 |
|
input_bootstrap_audio_video(); |
|
551 |
|
return; |
|
552 |
|
|
|
553 |
|
try_restore_video: |
|
554 |
|
ri = avformat_seek_pkt(fmt_ctx_p, video_st->id, video_now, 0); |
|
555 |
|
if (ri < 0) /* we don't send an application error */ |
|
556 |
|
EXIT("unable to restore video to %"PRId64" video stream time base units\n", video_now); |
|
557 |
|
try_restore_audio: |
|
558 |
|
ri = avformat_seek_pkt(fmt_ctx_p, audio_st->id, audio_now, 0); |
|
559 |
|
if (ri < 0) /* we don't send an application error */ |
|
560 |
|
EXIT("unable to restore audio to %"PRId64" audio stream time base units\n", audio_now); |
|
561 |
|
goto flush; |
|
562 |
|
} |
|
563 |
|
#undef TS_FROM_CLK_OK |
|
564 |
|
static void cmd_rewind(void) |
|
565 |
|
{ |
|
566 |
|
POUT("COMMAND:rewind\n"); |
|
567 |
|
seek_x(-SEEK_DELTA); |
|
568 |
|
} |
|
569 |
|
|
|
570 |
|
static void cmd_rewind_big(void) |
|
571 |
|
{ |
|
572 |
|
POUT("COMMAND:rewind big\n"); |
|
573 |
|
seek_x(-SEEK_DELTA_BIG); |
|
574 |
|
} |
|
575 |
|
|
|
576 |
|
static void cmd_fastforward(void) |
|
577 |
|
{ |
|
578 |
|
POUT("COMMAND:fastforward\n"); |
|
579 |
|
seek_x(SEEK_DELTA); |
|
580 |
|
} |
|
581 |
|
|
|
582 |
|
static void cmd_fastforward_big(void) |
|
583 |
|
{ |
|
584 |
|
POUT("COMMAND:fastforward big\n"); |
|
585 |
|
seek_x(SEEK_DELTA_BIG); |
|
586 |
|
} |
|
587 |
|
|
|
588 |
|
static void cmd_pause(void) |
|
589 |
|
{ |
|
590 |
|
if (audio_draining_p) { |
|
591 |
|
WARNING("pause:audio is draining, toggling pause is disable\n"); |
|
592 |
|
return; |
|
593 |
|
} |
|
594 |
|
if (paused_p) { |
|
595 |
|
POUT("COMMAND:unpause\n"); |
|
596 |
|
paused_p = false; |
|
597 |
|
} else { |
|
598 |
|
POUT("COMMAND:pause\n"); |
|
599 |
|
paused_p = true; |
|
600 |
|
} |
|
601 |
|
} |
|
602 |
|
|
|
603 |
|
static void cmd_vol_up(void) |
|
604 |
|
{ |
|
605 |
|
audio_filt_cmd_vol_up(); |
|
606 |
|
} |
|
607 |
|
|
|
608 |
|
static void cmd_vol_down(void) |
|
609 |
|
{ |
|
610 |
|
audio_filt_cmd_vol_down(); |
|
611 |
|
} |
|
612 |
|
|
|
613 |
|
static void cmd_mute(void) |
|
614 |
|
{ |
|
615 |
|
audio_filt_cmd_mute(); |
|
616 |
|
} |
|
617 |
|
#define WIDTH_NOT_DEFINED 0 |
|
618 |
|
#define HEIGHT_NOT_DEFINED 0 |
|
619 |
|
int main(int argc, u8 **args) |
|
620 |
|
{ |
|
621 |
|
u16 win_width; |
|
622 |
|
u16 win_height; |
|
623 |
|
u8 *pcm_str; |
|
624 |
|
u8 *url; |
|
625 |
|
double initial_vol; |
|
626 |
|
|
|
627 |
|
/* "turn on utf8" processing in used libs if any *AND* locale system */ |
|
628 |
|
setlocale(LC_ALL, ""); |
|
629 |
|
/* av_log_set_level(AV_LOG_VERBOSE); */ |
|
630 |
|
/* av_log_set_level(AV_LOG_DEBUG); */ |
|
631 |
|
|
|
632 |
|
win_width = WIDTH_NOT_DEFINED; |
|
633 |
|
win_height = HEIGHT_NOT_DEFINED; |
|
634 |
|
url = 0; |
|
635 |
|
pcm_str = "default"; |
|
636 |
|
url = 0; |
|
637 |
|
initial_vol = 1.; |
|
638 |
|
opts_parse(argc, args, &url, &win_width, &win_height, &pcm_str, &initial_vol); |
|
639 |
|
|
|
640 |
|
init_once(url, win_width, win_height, pcm_str); |
|
641 |
|
prepare(initial_vol); |
|
642 |
|
|
|
643 |
|
/* switch the ffmpeg log to stdout for metadata/etc dump */ |
|
644 |
|
avutil_log_set_callback(ff_log_stdout); |
|
645 |
|
avformat_dump_fmt(fmt_ctx_p, 0, url, 0); |
|
646 |
|
avutil_log_set_callback(avutil_log_default_callback); |
|
647 |
|
|
|
648 |
|
video_timer_start(); |
|
649 |
|
input_timer_start(); |
|
650 |
|
|
|
651 |
|
loop evts_loop(); |
|
652 |
|
/* unreachable */ |
|
653 |
|
} |
|
654 |
|
#undef WIDTH_NOT_DEFINED |
|
655 |
|
#undef HEIGHT_NOT_DEFINED |
File npv/nyanvk/consts.h added (mode: 100644) (index 0000000..e07c820) |
|
1 |
|
#ifndef NYANVK_CONSTS_H |
|
2 |
|
#define NYANVK_CONSTS_H |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS, ALWAYS */ |
|
8 |
|
/* |
|
9 |
|
* 64 bits platforms: enums do default to 32 bits, but can go up to 64 bits |
|
10 |
|
* based on the range of vals they hold. this is important for |
|
11 |
|
* vulkan ABI which we will fix. |
|
12 |
|
* _individually_, each val is defaulted to 32bits, if possible, and signed |
|
13 |
|
* or not. |
|
14 |
|
* XXX: All vulkan enums use 32 bits storage |
|
15 |
|
*/ |
|
16 |
|
enum { |
|
17 |
|
vk_err_fragmented_pool = -12, |
|
18 |
|
vk_err_fmt_not_supported = -11, |
|
19 |
|
vk_err_too_many_objs = -10, |
|
20 |
|
vk_err_incompatible_drv = -9, |
|
21 |
|
vk_err_feature_not_present = -8, |
|
22 |
|
vk_err_ext_not_present = -7, |
|
23 |
|
vk_err_layer_not_present = -6, |
|
24 |
|
vk_err_mmap_failed = -5, |
|
25 |
|
vk_err_dev_lost = -4, |
|
26 |
|
vk_err_ini_failed = -3, |
|
27 |
|
vk_err_out_of_dev_mem = -2, |
|
28 |
|
vk_err_out_of_host_mem = -1, |
|
29 |
|
/*--------------------------------------------------------------------*/ |
|
30 |
|
vk_success = 0, |
|
31 |
|
vk_not_ready = 1, |
|
32 |
|
vk_timeout = 2, |
|
33 |
|
vk_incomplete = 5, |
|
34 |
|
vk_r_enum_max = 0x7fffffff |
|
35 |
|
}; |
|
36 |
|
enum { |
|
37 |
|
vk_struct_type_instance_create_info = 1, |
|
38 |
|
vk_struct_type_dev_q_create_info = 2, |
|
39 |
|
vk_struct_type_dev_create_info = 3, |
|
40 |
|
vk_struct_type_submit_info = 4, |
|
41 |
|
vk_struct_type_mem_alloc_info = 5, |
|
42 |
|
vk_struct_type_sem_create_info = 9, |
|
43 |
|
vk_struct_type_img_create_info = 14, |
|
44 |
|
vk_struct_type_imgview_create_info = 15, |
|
45 |
|
vk_struct_type_shmod_create_info = 16, |
|
46 |
|
vk_struct_type_pl_sh_stage_create_info = 18, |
|
47 |
|
vk_struct_type_pl_vtx_input_state_create_info = 19, |
|
48 |
|
vk_struct_type_pl_input_assembly_state_create_info = 20, |
|
49 |
|
vk_struct_type_pl_viewport_state_create_info = 22, |
|
50 |
|
vk_struct_type_pl_raster_state_create_info = 23, |
|
51 |
|
vk_struct_type_pl_multisample_state_create_info = 24, |
|
52 |
|
vk_struct_type_pl_color_blend_state_create_info = 26, |
|
53 |
|
vk_struct_type_gfx_pl_create_info = 28, |
|
54 |
|
vk_struct_type_pl_layout_create_info = 30, |
|
55 |
|
vk_struct_type_fb_create_info = 37, |
|
56 |
|
vk_struct_type_cp_create_info = 39, |
|
57 |
|
vk_struct_type_cb_alloc_info = 40, |
|
58 |
|
vk_struct_type_cb_begin_info = 42, |
|
59 |
|
vk_struct_type_rp_begin_info = 43, |
|
60 |
|
vk_struct_type_img_mem_barrier = 45, |
|
61 |
|
/* extension number 2 or index 1, offset 0 */ |
|
62 |
|
vk_struct_type_swpchn_create_info = 1000000000 + 1000 + 0, |
|
63 |
|
/* extension number 2 or index 1, offset 1 */ |
|
64 |
|
vk_struct_type_present_info = 1000000000 + 1000 + 1, |
|
65 |
|
/* extension number 6 or index 5, offset 0 */ |
|
66 |
|
vk_struct_type_xcb_surf_create_info = 1000000000 + 5000 + 0, |
|
67 |
|
/* extension number 60 or index 59, offset 1 */ |
|
68 |
|
vk_struct_type_phydev_props = 1000000000 + 59000 + 1, |
|
69 |
|
/* extension number 60 or index 59, offset 5 */ |
|
70 |
|
vk_struct_type_q_fam_props = 1000000000 + 59000 + 5, |
|
71 |
|
/* extension number 60 or index 59, offset 6 */ |
|
72 |
|
vk_struct_type_phydev_mem_props = 1000000000 + 59000 + 6, |
|
73 |
|
/* extension number 60 or index 59, offset 10 */ |
|
74 |
|
vk_struct_type_acquire_next_img_info = 1000000000 + 59000 + 10, |
|
75 |
|
/* extension number 110 or index 109, offset 0 */ |
|
76 |
|
vk_struct_type_at_desc = 1000000000 + 109000 + 0, |
|
77 |
|
/* extension number 110 or index 109, offset 1 */ |
|
78 |
|
vk_struct_type_at_ref = 1000000000 + 109000 + 1, |
|
79 |
|
/* extension number 110 or index 109, offset 2 */ |
|
80 |
|
vk_struct_type_sp_desc = 1000000000 + 109000 + 2, |
|
81 |
|
/* extension number 110 or index 109, offset 4 */ |
|
82 |
|
vk_struct_type_rp_create_info = 1000000000 + 109000 + 4, |
|
83 |
|
/* extension number 110 or index 109, offset 5 */ |
|
84 |
|
vk_struct_type_sp_begin_info = 1000000000 + 109000 + 5, |
|
85 |
|
/* extension number 110 or index 109, offset 6 */ |
|
86 |
|
vk_struct_type_sp_end_info = 1000000000 + 109000 + 6, |
|
87 |
|
/* extension number 120 or index 119, offset 0 */ |
|
88 |
|
vk_struct_type_phydev_surf_info = 1000000000 + 119000 + 0, |
|
89 |
|
/* extension number 120 or index 119, offset 1 */ |
|
90 |
|
vk_struct_type_surf_caps = 1000000000 + 119000 + 1, |
|
91 |
|
/* extension number 120 or index 119, offset 2 */ |
|
92 |
|
vk_struct_type_surf_texel_mem_blk_conf = 1000000000 + 119000 + 2, |
|
93 |
|
/* extension number 147 or index 146, offset 1 */ |
|
94 |
|
vk_struct_type_img_mem_rqmts_info = 1000000000 + 146000 + 1, |
|
95 |
|
/* extension number 147 or index 146, offset 3 */ |
|
96 |
|
vk_struct_type_mem_rqmts = 1000000000 + 146000 + 3, |
|
97 |
|
/* extension number 158 or index 157, offset 1 */ |
|
98 |
|
vk_struct_type_bind_img_mem_info = 1000000000 + 157000 + 1, |
|
99 |
|
vk_struct_type_enum_max = 0x7fffffff |
|
100 |
|
}; |
|
101 |
|
enum { |
|
102 |
|
vk_phydev_type_other = 0, |
|
103 |
|
vk_phydev_type_integrated_gpu = 1, |
|
104 |
|
vk_phydev_type_discrete_gpu = 2, |
|
105 |
|
vk_phydev_type_virtual_gpu = 3, |
|
106 |
|
vk_phydev_type_cpu = 4, |
|
107 |
|
vk_phydev_type_enum_max = 0x7fffffff |
|
108 |
|
}; |
|
109 |
|
enum { |
|
110 |
|
vk_q_gfx_bit = 0x00000001, |
|
111 |
|
vk_q_compute_bit = 0x00000002, |
|
112 |
|
vk_q_transfer_bit = 0x00000004, |
|
113 |
|
vk_q_sparse_binding_bit = 0x00000008, |
|
114 |
|
vk_q_protected_bit = 0x00000010, |
|
115 |
|
vk_q_flag_bits_enum_max = 0x7fffffff |
|
116 |
|
}; |
|
117 |
|
enum { |
|
118 |
|
vk_cp_create_transient_bit = 0x00000001, |
|
119 |
|
vk_cp_create_reset_cb_bit = 0x00000002, |
|
120 |
|
vk_cp_create_flag_bits_enum_max = 0x7fffffff |
|
121 |
|
}; |
|
122 |
|
enum { |
|
123 |
|
vk_texel_mem_blk_fmt_undefined = 0, |
|
124 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_unorm = 44, |
|
125 |
|
vk_texel_mem_blk_fmt_b8g8r8a8_srgb = 50, |
|
126 |
|
vk_texel_mem_blk_fmt_enum_max = 0x7fffffff |
|
127 |
|
}; |
|
128 |
|
enum { |
|
129 |
|
vk_color_space_srgb_nonlinear = 0, |
|
130 |
|
vk_color_space_enum_max = 0x7fffffff |
|
131 |
|
}; |
|
132 |
|
enum { |
|
133 |
|
vk_mem_prop_dev_local_bit = 0x00000001, |
|
134 |
|
vk_mem_prop_host_visible_bit = 0x00000002, |
|
135 |
|
vk_mem_prop_host_cached_bit = 0x00000008, |
|
136 |
|
vk_mem_prop_bits_enum_max = 0x7fffffff |
|
137 |
|
}; |
|
138 |
|
enum { |
|
139 |
|
vk_mem_heap_dev_local_bit = 0x00000001, |
|
140 |
|
vk_mem_heap_multi_instance_bit = 0x00000002, |
|
141 |
|
vk_mem_heap_bits_enum_max = 0x7fffffff |
|
142 |
|
}; |
|
143 |
|
enum { |
|
144 |
|
vk_surf_transform_identity_bit = 0x00000001, |
|
145 |
|
vk_surf_transform_bits_enum_max = 0x7fffffff |
|
146 |
|
}; |
|
147 |
|
enum { |
|
148 |
|
vk_composite_alpha_opaque_bit = 0x00000001, |
|
149 |
|
vk_composite_alpha_bits_enum_max = 0x7fffffff |
|
150 |
|
}; |
|
151 |
|
enum { |
|
152 |
|
vk_img_usage_transfer_src_bit = 0x00000001, |
|
153 |
|
vk_img_usage_transfer_dst_bit = 0x00000002, |
|
154 |
|
vk_img_usage_color_attachment_bit = 0x00000010, |
|
155 |
|
vk_img_usage_bits_enum_max = 0x7fffffff |
|
156 |
|
}; |
|
157 |
|
enum { |
|
158 |
|
vk_sharing_mode_exclusive = 0, |
|
159 |
|
vk_sharing_mode_enum_max = 0x7fffffff |
|
160 |
|
}; |
|
161 |
|
enum { |
|
162 |
|
vk_present_mode_immediate = 0, |
|
163 |
|
vk_present_mode_mailbox = 1, |
|
164 |
|
vk_present_mode_fifo = 2, |
|
165 |
|
vk_present_mode_fifo_relaxed = 3, |
|
166 |
|
vk_present_mode_enum_max = 0x7fffffff |
|
167 |
|
}; |
|
168 |
|
enum { |
|
169 |
|
vk_img_type_2d = 1, |
|
170 |
|
vk_img_type_enum_max = 0x7fffffff |
|
171 |
|
}; |
|
172 |
|
enum { |
|
173 |
|
vk_samples_n_1_bit = 0x00000001, |
|
174 |
|
vk_samples_n_bits_enum_max = 0x7fffffff |
|
175 |
|
}; |
|
176 |
|
enum { |
|
177 |
|
vk_img_tiling_optimal = 0, |
|
178 |
|
vk_img_tiling_linear = 1, |
|
179 |
|
vk_img_tiling_enum_max = 0x7fffffff |
|
180 |
|
}; |
|
181 |
|
enum { |
|
182 |
|
vk_img_create_flag_2d_array_compatible_bit = 0x00000002, |
|
183 |
|
vk_img_create_flag_enum_max = 0x7fffffff |
|
184 |
|
}; |
|
185 |
|
enum { |
|
186 |
|
vk_img_layout_undefined = 0, |
|
187 |
|
vk_img_layout_general = 1, |
|
188 |
|
vk_img_layout_color_at_optimal = 2, |
|
189 |
|
/* extension number 2 or index 1, offset 2 */ |
|
190 |
|
vk_img_layout_present = 1000000000 + 1000 + 2, |
|
191 |
|
vk_img_layout_enum_n_max = 0x7fffffff |
|
192 |
|
}; |
|
193 |
|
enum { |
|
194 |
|
/* more */ |
|
195 |
|
vk_pl_stage_top_of_pipe_bit = (1 << 0), |
|
196 |
|
vk_pl_stage_bottom_of_pipe_bit = (1 << 13), |
|
197 |
|
vk_pl_stage_bits_enum_max = 0x7fffffff |
|
198 |
|
}; |
|
199 |
|
enum { |
|
200 |
|
vk_img_aspect_color_bit = 1, |
|
201 |
|
vk_img_aspect_bits_enum_max = 0x7fffffff |
|
202 |
|
}; |
|
203 |
|
enum { |
|
204 |
|
vk_cb_lvl_primary = 0, |
|
205 |
|
vk_cb_lvl_enum_max = 0x7fffffff |
|
206 |
|
}; |
|
207 |
|
enum { |
|
208 |
|
vk_cb_usage_one_time_submit_bit = 0x00000001, |
|
209 |
|
vk_cb_usage_enum_max = 0x7fffffff |
|
210 |
|
}; |
|
211 |
|
enum { |
|
212 |
|
vk_comp_swizzle_identity = 0, |
|
213 |
|
vk_comp_swizzle_enum_max = 0x7fffffff |
|
214 |
|
}; |
|
215 |
|
enum { |
|
216 |
|
vk_imgview_type_2d = 1, |
|
217 |
|
vk_imgview_type_enum_max = 0x7fffffff |
|
218 |
|
}; |
|
219 |
|
enum { |
|
220 |
|
vk_at_load_op_load = 0, |
|
221 |
|
vk_at_load_op_clr = 1, |
|
222 |
|
vk_at_load_op_dont_care = 2, |
|
223 |
|
vk_at_load_op_enum_max = 0x7fffffff |
|
224 |
|
}; |
|
225 |
|
enum { |
|
226 |
|
vk_at_store_op_store = 0, |
|
227 |
|
vk_at_store_op_dont_care = 1, |
|
228 |
|
vk_at_store_op_enum_max = 0x7fffffff |
|
229 |
|
}; |
|
230 |
|
enum { |
|
231 |
|
vk_pl_bind_point_gfx = 0, |
|
232 |
|
vk_pl_bind_point_compute = 1, |
|
233 |
|
vk_pl_bind_point_enum_max = 0x7fffffff |
|
234 |
|
}; |
|
235 |
|
enum { |
|
236 |
|
vk_sh_stage_vtx_bit = 0x00000001, |
|
237 |
|
vk_sh_stage_frag_bit = 0x00000010, |
|
238 |
|
/* meh */ |
|
239 |
|
vk_sh_stage_compute_bit = 0x00000020, |
|
240 |
|
vk_sh_stage_bits_enum_max = 0x7fffffff |
|
241 |
|
}; |
|
242 |
|
enum { |
|
243 |
|
vk_prim_topology_triangle_list = 3, |
|
244 |
|
/* more */ |
|
245 |
|
vk_prim_topology_enum_max = 0x7fffffff |
|
246 |
|
}; |
|
247 |
|
enum { |
|
248 |
|
vk_logic_op_copy = 3, |
|
249 |
|
vk_logic_op_enum_max = 0x7fffffff |
|
250 |
|
}; |
|
251 |
|
enum { |
|
252 |
|
vk_color_comp_r_bit = 0x00000001, |
|
253 |
|
vk_color_comp_p_bit = 0x00000002, |
|
254 |
|
vk_color_comp_b_bit = 0x00000004, |
|
255 |
|
vk_color_comp_a_bit = 0x00000008, |
|
256 |
|
vk_color_comp_bits_enum_max = 0x7fffffff |
|
257 |
|
}; |
|
258 |
|
enum { |
|
259 |
|
vk_blend_factor_zero = 0, |
|
260 |
|
vk_blend_factor_one = 1, |
|
261 |
|
vk_blend_factor_src_alpha = 6, |
|
262 |
|
vk_blend_factor_one_minus_src_alpha = 7, |
|
263 |
|
vk_blend_factor_enum_max = 0x7fffffff |
|
264 |
|
}; |
|
265 |
|
enum { |
|
266 |
|
vk_blend_op_add = 0, |
|
267 |
|
vk_blend_op_substract = 1, |
|
268 |
|
vk_blend_op_reverse_substract = 2, |
|
269 |
|
vk_blend_op_min = 3, |
|
270 |
|
vk_blend_op_max = 4, |
|
271 |
|
vk_blend_op_enum_max = 0x7fffffff |
|
272 |
|
}; |
|
273 |
|
enum { |
|
274 |
|
vk_sp_contents_inline = 0, |
|
275 |
|
/* more? */ |
|
276 |
|
vk_sp_contents_enum_max = 0x7fffffff |
|
277 |
|
}; |
|
278 |
|
enum { |
|
279 |
|
vk_polygon_mode_fill = 0, |
|
280 |
|
/* more */ |
|
281 |
|
vk_polygon_mode_enum_max = 0x7fffffff |
|
282 |
|
}; |
|
283 |
|
enum { |
|
284 |
|
vk_cull_mode_none = 0, |
|
285 |
|
vk_cull_mode_front_bit = 0x00000001, |
|
286 |
|
vk_cull_mode_back_bit = 0x00000002, |
|
287 |
|
vk_cull_mode_bits_enum_max = 0x7fffffff |
|
288 |
|
}; |
|
289 |
|
enum { |
|
290 |
|
vk_front_face_counter_clockwise = 0, |
|
291 |
|
vk_front_face_clockwise = 1, |
|
292 |
|
vk_front_face_enum_max = 0x7fffffff |
|
293 |
|
}; |
|
294 |
|
#endif |
File npv/nyanvk/types.h added (mode: 100644) (index 0000000..72da47b) |
|
1 |
|
#ifndef NYANVK_TYPES_H |
|
2 |
|
#define NYANVK_TYPES_H |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* XXX: KEEP AN EYE ON ABBREVIATIONS, ALWAYS */ |
|
8 |
|
/*----------------------------------------------------------------------------*/ |
|
9 |
|
#define vk_true 1 |
|
10 |
|
#define vk_false 0 |
|
11 |
|
#define vk_whole_sz 0xffffffffffffffff |
|
12 |
|
#define vk_q_fam_ignored 0xffffffff |
|
13 |
|
/*----------------------------------------------------------------------------*/ |
|
14 |
|
#define VK_VERSION_MAJOR(x) (x >> 22) |
|
15 |
|
#define VK_VERSION_MINOR(x) ((x >> 12) & 0x3ff) |
|
16 |
|
#define VK_VERSION_PATCH(x) (x & 0xfff) |
|
17 |
|
/*----------------------------------------------------------------------------*/ |
|
18 |
|
/* opaque or not defined */ |
|
19 |
|
struct vk_dev_t; |
|
20 |
|
struct vk_cp_t; |
|
21 |
|
struct vk_img_t; |
|
22 |
|
struct vk_dev_mem_t; |
|
23 |
|
struct vk_swpchn_t; |
|
24 |
|
struct vk_cb_t; |
|
25 |
|
struct vk_buf_barrier_t; |
|
26 |
|
struct vk_mem_barrier_t; |
|
27 |
|
struct vk_q_t; |
|
28 |
|
struct vk_fence_t; |
|
29 |
|
struct vk_sem_t; |
|
30 |
|
struct vk_imgview_t; |
|
31 |
|
struct vk_rp_t; |
|
32 |
|
struct vk_fb_t; |
|
33 |
|
struct vk_shmod_t; |
|
34 |
|
struct vk_pl_layout_t; |
|
35 |
|
struct vk_pl_cache_t; |
|
36 |
|
struct vk_pl_t; |
|
37 |
|
struct vk_surf_t; |
|
38 |
|
struct vk_specialization_info_t; |
|
39 |
|
struct vk_dset_layout_t; |
|
40 |
|
struct vk_instance_t; |
|
41 |
|
struct vk_phydev_t; |
|
42 |
|
/*----------------------------------------------------------------------------*/ |
|
43 |
|
struct vk_offset_2d_t { |
|
44 |
|
s32 x; |
|
45 |
|
s32 y; |
|
46 |
|
}; |
|
47 |
|
struct vk_offset_3d_t { |
|
48 |
|
s32 x; |
|
49 |
|
s32 y; |
|
50 |
|
s32 z; |
|
51 |
|
}; |
|
52 |
|
struct vk_extent_2d_t { |
|
53 |
|
u32 width; |
|
54 |
|
u32 height; |
|
55 |
|
}; |
|
56 |
|
struct vk_extent_3d_t { |
|
57 |
|
u32 width; |
|
58 |
|
u32 height; |
|
59 |
|
u32 depth; |
|
60 |
|
}; |
|
61 |
|
struct vk_rect_2d_t { |
|
62 |
|
struct vk_offset_2d_t offset; |
|
63 |
|
struct vk_extent_2d_t extent; |
|
64 |
|
}; |
|
65 |
|
struct vk_instance_create_info_t { |
|
66 |
|
u32 type; |
|
67 |
|
void *next; |
|
68 |
|
u32 flags; |
|
69 |
|
void *app_info; /* allow easy hidden driver optimizations: no! */ |
|
70 |
|
u32 enabled_layers_n; |
|
71 |
|
u8 **enabled_layer_names; |
|
72 |
|
u32 enabled_exts_n; |
|
73 |
|
u8 **enabled_ext_names; |
|
74 |
|
}; |
|
75 |
|
#define VK_MAX_EXT_NAME_SZ 256 |
|
76 |
|
struct vk_ext_props_t { |
|
77 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
78 |
|
u32 spec_version; |
|
79 |
|
}; |
|
80 |
|
#define VK_MAX_DESC_SZ 256 |
|
81 |
|
struct vk_layer_props_t { |
|
82 |
|
u8 name[VK_MAX_EXT_NAME_SZ]; |
|
83 |
|
u32 spec_version; |
|
84 |
|
u32 implementation_version; |
|
85 |
|
u8 desc[VK_MAX_DESC_SZ]; |
|
86 |
|
}; |
|
87 |
|
struct vk_phydev_limits_t { |
|
88 |
|
u32 not_used_00[11]; |
|
89 |
|
u64 not_used_01[2]; |
|
90 |
|
u32 not_used_02[51]; |
|
91 |
|
float not_used_03[2]; |
|
92 |
|
u32 not_used_04[3]; |
|
93 |
|
float not_used_05[2]; |
|
94 |
|
u32 not_used_06; |
|
95 |
|
size_t not_used_07; |
|
96 |
|
u64 not_used_08[3]; |
|
97 |
|
u32 not_used_09[4]; |
|
98 |
|
float not_used_10[2]; |
|
99 |
|
u32 not_used_11[16]; |
|
100 |
|
float not_used_12; |
|
101 |
|
u32 not_used_13[4]; |
|
102 |
|
float not_used_14[6]; |
|
103 |
|
u32 not_used_15[2]; |
|
104 |
|
u64 not_used_16[3]; |
|
105 |
|
}; |
|
106 |
|
struct vk_phydev_sparse_props_t { |
|
107 |
|
u32 not_used[5]; |
|
108 |
|
}; |
|
109 |
|
#define VK_MAX_PHYDEV_NAME_SZ 256 |
|
110 |
|
#define VK_UUID_SZ 16 |
|
111 |
|
struct vk_phydev_props_core_t { |
|
112 |
|
u32 api_version; |
|
113 |
|
u32 driver_version; |
|
114 |
|
u32 vendor_id; |
|
115 |
|
u32 dev_id; |
|
116 |
|
u32 dev_type; |
|
117 |
|
u8 name[VK_MAX_PHYDEV_NAME_SZ]; |
|
118 |
|
u8 pl_cache_uuid[VK_UUID_SZ]; |
|
119 |
|
struct vk_phydev_limits_t limits; |
|
120 |
|
struct vk_phydev_sparse_props_t sparse_props; |
|
121 |
|
}; |
|
122 |
|
/* the vulkan 1.1 version */ |
|
123 |
|
struct vk_phydev_props_t { |
|
124 |
|
u32 type; |
|
125 |
|
void *next; |
|
126 |
|
struct vk_phydev_props_core_t core; |
|
127 |
|
}; |
|
128 |
|
struct vk_q_fam_props_core_t { |
|
129 |
|
u32 flags; |
|
130 |
|
u32 qs_n; |
|
131 |
|
u32 timestamp_valid_bits; |
|
132 |
|
struct vk_extent_3d_t min_img_transfer_granularity; |
|
133 |
|
}; |
|
134 |
|
struct vk_q_fam_props_t { |
|
135 |
|
u32 type; |
|
136 |
|
void *next; |
|
137 |
|
struct vk_q_fam_props_core_t core; |
|
138 |
|
}; |
|
139 |
|
struct vk_phydev_features_t { |
|
140 |
|
u32 not_used[55]; |
|
141 |
|
}; |
|
142 |
|
struct vk_dev_q_create_info_t { |
|
143 |
|
u32 type; |
|
144 |
|
void *next; |
|
145 |
|
u32 flags; |
|
146 |
|
u32 q_fam; |
|
147 |
|
u32 qs_n; |
|
148 |
|
float *q_prios; |
|
149 |
|
}; |
|
150 |
|
struct vk_dev_create_info_t { |
|
151 |
|
u32 type; |
|
152 |
|
void *next; |
|
153 |
|
u32 flags; |
|
154 |
|
u32 q_create_infos_n; |
|
155 |
|
struct vk_dev_q_create_info_t *q_create_infos; |
|
156 |
|
u32 do_not_use_0; |
|
157 |
|
void *do_not_use_1; |
|
158 |
|
u32 enabled_exts_n; |
|
159 |
|
u8 **enabled_ext_names; |
|
160 |
|
void *do_not_use_2; |
|
161 |
|
}; |
|
162 |
|
struct vk_cp_create_info_t { |
|
163 |
|
u32 type; |
|
164 |
|
void *next; |
|
165 |
|
u32 flags; |
|
166 |
|
u32 q_fam; |
|
167 |
|
}; |
|
168 |
|
struct vk_xcb_surf_create_info_t { |
|
169 |
|
u32 type; |
|
170 |
|
void *next; |
|
171 |
|
u32 flags; |
|
172 |
|
xcb_connection_t *c; |
|
173 |
|
xcb_window_t win; |
|
174 |
|
}; |
|
175 |
|
struct vk_phydev_surf_info_t { |
|
176 |
|
u32 type; |
|
177 |
|
void *next; |
|
178 |
|
void *surf; |
|
179 |
|
}; |
|
180 |
|
struct vk_surf_texel_mem_blk_conf_core_t { |
|
181 |
|
u32 fmt; |
|
182 |
|
u32 color_space; |
|
183 |
|
}; |
|
184 |
|
struct vk_surf_texel_mem_blk_conf_t { |
|
185 |
|
u32 type; |
|
186 |
|
void *next; |
|
187 |
|
struct vk_surf_texel_mem_blk_conf_core_t core; |
|
188 |
|
}; |
|
189 |
|
struct vk_mem_type_t { |
|
190 |
|
u32 prop_flags; |
|
191 |
|
u32 heap; |
|
192 |
|
}; |
|
193 |
|
struct vk_mem_heap_t { |
|
194 |
|
u64 sz; |
|
195 |
|
u32 flags; |
|
196 |
|
}; |
|
197 |
|
#define VK_MEM_TYPES_N_MAX 32 |
|
198 |
|
#define VK_MEM_HEAPS_N_MAX 16 |
|
199 |
|
struct vk_phydev_mem_props_core_t { |
|
200 |
|
u32 mem_types_n; |
|
201 |
|
struct vk_mem_type_t mem_types[VK_MEM_TYPES_N_MAX]; |
|
202 |
|
u32 mem_heaps_n; |
|
203 |
|
struct vk_mem_heap_t mem_heaps[VK_MEM_HEAPS_N_MAX]; |
|
204 |
|
}; |
|
205 |
|
struct vk_phydev_mem_props_t { |
|
206 |
|
u32 type; |
|
207 |
|
void *next; |
|
208 |
|
struct vk_phydev_mem_props_core_t core; |
|
209 |
|
}; |
|
210 |
|
struct vk_surf_caps_core_t { |
|
211 |
|
u32 imgs_n_min; |
|
212 |
|
u32 imgs_n_max; |
|
213 |
|
struct vk_extent_2d_t current_extent; |
|
214 |
|
struct vk_extent_2d_t img_extent_min; |
|
215 |
|
struct vk_extent_2d_t img_extent_max; |
|
216 |
|
u32 img_array_layers_n_max; |
|
217 |
|
u32 supported_transforms; |
|
218 |
|
u32 current_transform; |
|
219 |
|
u32 supported_composite_alpha; |
|
220 |
|
u32 supported_img_usage_flags; |
|
221 |
|
}; |
|
222 |
|
struct vk_surf_caps_t { |
|
223 |
|
u32 type; |
|
224 |
|
void *next; |
|
225 |
|
struct vk_surf_caps_core_t core; |
|
226 |
|
}; |
|
227 |
|
struct vk_swpchn_create_info_t { |
|
228 |
|
u32 type; |
|
229 |
|
void *next; |
|
230 |
|
u32 flags; |
|
231 |
|
struct vk_surf_t *surf; |
|
232 |
|
u32 imgs_n_min; |
|
233 |
|
u32 img_texel_mem_blk_fmt; |
|
234 |
|
u32 img_color_space; |
|
235 |
|
struct vk_extent_2d_t img_extent; |
|
236 |
|
u32 img_layers_n; |
|
237 |
|
u32 img_usage; |
|
238 |
|
u32 img_sharing_mode; |
|
239 |
|
u32 q_fams_n; |
|
240 |
|
u32 *q_fams; |
|
241 |
|
u32 pre_transform; |
|
242 |
|
u32 composite_alpha; |
|
243 |
|
u32 present_mode; |
|
244 |
|
u32 clipped; |
|
245 |
|
struct vk_swpchn_t *old_swpchn; |
|
246 |
|
}; |
|
247 |
|
struct vk_img_create_info_t { |
|
248 |
|
u32 type; |
|
249 |
|
void *next; |
|
250 |
|
u32 flags; |
|
251 |
|
u32 img_type; |
|
252 |
|
u32 texel_mem_blk_fmt; |
|
253 |
|
struct vk_extent_3d_t extent; |
|
254 |
|
u32 mip_lvls_n; |
|
255 |
|
u32 array_layers_n; |
|
256 |
|
u32 samples_n; /* flags */ |
|
257 |
|
u32 img_tiling; |
|
258 |
|
u32 usage; |
|
259 |
|
u32 sharing_mode; |
|
260 |
|
u32 q_fams_n; |
|
261 |
|
u32 *q_fams; |
|
262 |
|
u32 initial_layout; |
|
263 |
|
}; |
|
264 |
|
struct vk_img_mem_rqmts_info_t { |
|
265 |
|
u32 type; |
|
266 |
|
void *next; |
|
267 |
|
struct vk_img_t *img; |
|
268 |
|
}; |
|
269 |
|
struct vk_mem_rqmts_core_t { |
|
270 |
|
u64 sz; |
|
271 |
|
u64 alignment; |
|
272 |
|
/* idxs of bits are idxs in mem types of vk_phydev_mem_props_core_t */ |
|
273 |
|
u32 mem_type_bits; |
|
274 |
|
}; |
|
275 |
|
struct vk_mem_rqmts_t { |
|
276 |
|
u32 type; |
|
277 |
|
void *next; |
|
278 |
|
struct vk_mem_rqmts_core_t core; |
|
279 |
|
}; |
|
280 |
|
struct vk_mem_alloc_info_t { |
|
281 |
|
u32 type; |
|
282 |
|
void *next; |
|
283 |
|
u64 sz; |
|
284 |
|
u32 mem_type_idx; /* in the physical device array of memory types */ |
|
285 |
|
}; |
|
286 |
|
struct vk_bind_img_mem_info_t { |
|
287 |
|
u32 type; |
|
288 |
|
void *next; |
|
289 |
|
struct vk_img_t *img; |
|
290 |
|
struct vk_dev_mem_t *mem; |
|
291 |
|
u64 offset; |
|
292 |
|
}; |
|
293 |
|
struct vk_img_subrsrc_range_t { |
|
294 |
|
u32 aspect; |
|
295 |
|
u32 base_mip_lvl; |
|
296 |
|
u32 lvls_n; |
|
297 |
|
u32 base_array_layer; |
|
298 |
|
u32 array_layers_n; |
|
299 |
|
}; |
|
300 |
|
struct vk_img_mem_barrier_t { |
|
301 |
|
u32 type; |
|
302 |
|
void *next; |
|
303 |
|
u32 src_access; |
|
304 |
|
u32 dst_access; |
|
305 |
|
u32 old_layout; |
|
306 |
|
u32 new_layout; |
|
307 |
|
u32 src_q_fam; |
|
308 |
|
u32 dst_q_fam; |
|
309 |
|
struct vk_img_t *img; |
|
310 |
|
struct vk_img_subrsrc_range_t subrsrc_range; |
|
311 |
|
}; |
|
312 |
|
struct vk_cb_alloc_info_t { |
|
313 |
|
u32 type; |
|
314 |
|
void *next; |
|
315 |
|
struct vk_cp_t *cp; |
|
316 |
|
u32 lvl; |
|
317 |
|
u32 cbs_n; |
|
318 |
|
}; |
|
319 |
|
struct vk_cb_begin_info_t { |
|
320 |
|
u32 type; |
|
321 |
|
void *next; |
|
322 |
|
u32 flags; |
|
323 |
|
void *do_not_use; |
|
324 |
|
}; |
|
325 |
|
struct vk_submit_info_t { |
|
326 |
|
u32 type; |
|
327 |
|
void *next; |
|
328 |
|
u32 wait_sems_n; |
|
329 |
|
struct vk_sem_t **wait_sems; |
|
330 |
|
u32* wait_dst_stages; |
|
331 |
|
u32 cbs_n; |
|
332 |
|
struct vk_cb_t **cbs; |
|
333 |
|
u32 signal_sems_n; |
|
334 |
|
struct vk_sem_t **signal_sems; |
|
335 |
|
}; |
|
336 |
|
struct vk_img_subrsrc_t { |
|
337 |
|
u32 aspect; |
|
338 |
|
u32 mip_lvl; |
|
339 |
|
u32 array_layer; |
|
340 |
|
}; |
|
341 |
|
struct vk_subrsrc_layout_t { |
|
342 |
|
u64 offset; |
|
343 |
|
u64 sz; |
|
344 |
|
u64 row_pitch; |
|
345 |
|
u64 array_pitch; |
|
346 |
|
u64 depth_pitch; |
|
347 |
|
}; |
|
348 |
|
struct vk_img_subrsrc_layers_t { |
|
349 |
|
u32 aspect; |
|
350 |
|
u32 mip_lvl; |
|
351 |
|
u32 base_array_layer; |
|
352 |
|
u32 array_layers_n; |
|
353 |
|
}; |
|
354 |
|
struct vk_acquire_next_img_info_t { |
|
355 |
|
u32 type; |
|
356 |
|
void *next; |
|
357 |
|
struct vk_swpchn_t *swpchn; |
|
358 |
|
u64 timeout; |
|
359 |
|
struct vk_sem_t *sem; |
|
360 |
|
struct vk_fence_t *fence; |
|
361 |
|
u32 devs; |
|
362 |
|
}; |
|
363 |
|
struct vk_img_blit_t { |
|
364 |
|
struct vk_img_subrsrc_layers_t src_subrsrc; |
|
365 |
|
struct vk_offset_3d_t src_offsets[2]; |
|
366 |
|
struct vk_img_subrsrc_layers_t dst_subrsrc; |
|
367 |
|
struct vk_offset_3d_t dst_offsets[2]; |
|
368 |
|
}; |
|
369 |
|
struct vk_present_info_t { |
|
370 |
|
u32 type; |
|
371 |
|
void *next; |
|
372 |
|
u32 wait_sems_n; |
|
373 |
|
struct vk_sem_t **wait_sems; |
|
374 |
|
u32 swpchns_n; |
|
375 |
|
struct vk_swpchn_t **swpchns; |
|
376 |
|
u32 *idxs; |
|
377 |
|
s32 *results; |
|
378 |
|
}; |
|
379 |
|
struct vk_sem_create_info_t { |
|
380 |
|
u32 type; |
|
381 |
|
void *next; |
|
382 |
|
u32 flags; |
|
383 |
|
}; |
|
384 |
|
struct vk_comp_map_t { |
|
385 |
|
s32 r; |
|
386 |
|
s32 g; |
|
387 |
|
s32 b; |
|
388 |
|
s32 a; |
|
389 |
|
}; |
|
390 |
|
struct vk_imgview_create_info_t { |
|
391 |
|
u32 type; |
|
392 |
|
void *next; |
|
393 |
|
u32 flags; |
|
394 |
|
struct vk_img_t *img; |
|
395 |
|
s32 view_type; |
|
396 |
|
s32 fmt; |
|
397 |
|
struct vk_comp_map_t comps; |
|
398 |
|
struct vk_img_subrsrc_range_t subrsrc_range; |
|
399 |
|
}; |
|
400 |
|
struct vk_at_desc_t { |
|
401 |
|
u32 type; |
|
402 |
|
void *next; |
|
403 |
|
u32 flags; |
|
404 |
|
u32 fmt; |
|
405 |
|
u32 samples_n; |
|
406 |
|
u32 load_op; |
|
407 |
|
u32 store_op; |
|
408 |
|
u32 stencil_load_op; |
|
409 |
|
u32 stencil_store_op; |
|
410 |
|
u32 initial_layout; |
|
411 |
|
u32 final_layout; |
|
412 |
|
}; |
|
413 |
|
struct vk_at_ref_t { |
|
414 |
|
u32 type; |
|
415 |
|
void *next; |
|
416 |
|
u32 at; |
|
417 |
|
u32 layout; |
|
418 |
|
}; |
|
419 |
|
struct vk_sp_desc_t { |
|
420 |
|
u32 type; |
|
421 |
|
void *next; |
|
422 |
|
u32 flags; |
|
423 |
|
u32 pl_bind_point; |
|
424 |
|
u32 viewmask; |
|
425 |
|
u32 input_ats_n; |
|
426 |
|
struct vk_at_ref_t *input_ats; |
|
427 |
|
u32 color_ats_n; |
|
428 |
|
struct vk_at_ref_t *color_ats; |
|
429 |
|
struct vk_at_ref_t *resolve_ats; |
|
430 |
|
struct vk_at_ref_t *depth_stencil_ats; |
|
431 |
|
u32 preserve_ats_n; |
|
432 |
|
u32 *preserve_ats; |
|
433 |
|
}; |
|
434 |
|
struct vk_sp_dep_t { |
|
435 |
|
u32 src_sp; |
|
436 |
|
u32 dst_sp; |
|
437 |
|
u32 src_stage_mask; |
|
438 |
|
u32 dst_stage_mask; |
|
439 |
|
u32 src_access_mask; |
|
440 |
|
u32 dst_access_mask; |
|
441 |
|
u32 dep; |
|
442 |
|
}; |
|
443 |
|
struct vk_rp_create_info_t { |
|
444 |
|
u32 type; |
|
445 |
|
void *next; |
|
446 |
|
u32 flags; |
|
447 |
|
u32 ats_n; |
|
448 |
|
struct vk_at_desc_t *ats; |
|
449 |
|
u32 sps_n; |
|
450 |
|
struct vk_sp_desc_t *sps; |
|
451 |
|
u32 deps_n; |
|
452 |
|
struct vk_sp_dep_t *deps; |
|
453 |
|
u32 correlated_viewmasks_n; |
|
454 |
|
u32 *correlated_viewmasks; |
|
455 |
|
}; |
|
456 |
|
struct vk_sp_begin_info_t { |
|
457 |
|
u32 type; |
|
458 |
|
void *next; |
|
459 |
|
u32 contents; |
|
460 |
|
}; |
|
461 |
|
struct vk_sp_end_info_t { |
|
462 |
|
u32 type; |
|
463 |
|
void *next; |
|
464 |
|
}; |
|
465 |
|
struct vk_fb_create_info_t { |
|
466 |
|
u32 type; |
|
467 |
|
void *next; |
|
468 |
|
u32 flags; |
|
469 |
|
struct vk_rp_t *rp; |
|
470 |
|
u32 ats_n; |
|
471 |
|
struct vk_imgview_t *ats; |
|
472 |
|
u32 width; |
|
473 |
|
u32 height; |
|
474 |
|
u32 layers_n; |
|
475 |
|
}; |
|
476 |
|
struct vk_shmod_create_info_t { |
|
477 |
|
u32 type; |
|
478 |
|
void *next; |
|
479 |
|
u32 flags; |
|
480 |
|
size_t code_sz; /* bytes_n */ |
|
481 |
|
u32 *code; |
|
482 |
|
}; |
|
483 |
|
struct vk_pl_sh_stage_create_info_t { |
|
484 |
|
u32 type; |
|
485 |
|
void *next; |
|
486 |
|
u32 flags; |
|
487 |
|
u32 stage; |
|
488 |
|
struct vk_shmod_t *shmod; |
|
489 |
|
u8 *name; |
|
490 |
|
struct vk_specialization_t *specialization_info; /* later */ |
|
491 |
|
}; |
|
492 |
|
struct vk_pl_vtx_input_state_create_info_t { |
|
493 |
|
u32 type; |
|
494 |
|
void *next; |
|
495 |
|
u32 flags; |
|
496 |
|
u32 not_used_0; |
|
497 |
|
void *not_used_1; |
|
498 |
|
u32 not_used_2; |
|
499 |
|
void *not_used_3; |
|
500 |
|
}; |
|
501 |
|
struct vk_pl_input_assembly_state_create_info_t { |
|
502 |
|
u32 type; |
|
503 |
|
void *next; |
|
504 |
|
u32 flags; |
|
505 |
|
u32 topology; |
|
506 |
|
u32 prim_restart_ena; |
|
507 |
|
}; |
|
508 |
|
/* XXX: this does define a _transformation_ from "normalized" coords ! */ |
|
509 |
|
struct vk_viewport_t { |
|
510 |
|
f32 x; |
|
511 |
|
f32 y; |
|
512 |
|
f32 width; |
|
513 |
|
f32 height; |
|
514 |
|
f32 depth_min; |
|
515 |
|
f32 depth_max; |
|
516 |
|
}; |
|
517 |
|
struct vk_pl_viewport_state_create_info_t { |
|
518 |
|
u32 type; |
|
519 |
|
void *next; |
|
520 |
|
u32 flags; |
|
521 |
|
u32 viewports_n; |
|
522 |
|
struct vk_viewport_t *viewports; |
|
523 |
|
u32 scissors_n; |
|
524 |
|
struct vk_rect_2d_t *scissors; |
|
525 |
|
}; |
|
526 |
|
struct vk_pl_raster_state_create_info_t { |
|
527 |
|
u32 type; |
|
528 |
|
void *next; |
|
529 |
|
u32 flags; |
|
530 |
|
u32 depth_clamp_ena; |
|
531 |
|
u32 raster_discard_ena; |
|
532 |
|
u32 polygon_mode; |
|
533 |
|
u32 cull_mode; |
|
534 |
|
u32 front_face; |
|
535 |
|
u32 depth_bias_ena; |
|
536 |
|
f32 depth_bias_constant_factor; |
|
537 |
|
f32 depth_bias_clamp; |
|
538 |
|
f32 depth_bias_slope_factor; |
|
539 |
|
f32 line_width; |
|
540 |
|
}; |
|
541 |
|
struct vk_pl_multisample_state_create_info_t { |
|
542 |
|
u32 type; |
|
543 |
|
void *next; |
|
544 |
|
u32 flags; |
|
545 |
|
u32 raster_samples_n; /* flag */ |
|
546 |
|
u32 sample_shading_ena; |
|
547 |
|
f32 sample_shading_min; |
|
548 |
|
u32 *sample_mask; |
|
549 |
|
u32 alpha_to_coverage_ena; |
|
550 |
|
u32 alpha_to_one_ena; |
|
551 |
|
}; |
|
552 |
|
struct vk_pl_color_blend_at_state_t { |
|
553 |
|
u32 blend_ena; |
|
554 |
|
|
|
555 |
|
u32 src_color_blend_factor; |
|
556 |
|
u32 dst_color_blend_factor; |
|
557 |
|
/* normalized integer */ |
|
558 |
|
u32 color_blend_op; |
|
559 |
|
|
|
560 |
|
u32 src_alpha_blend_factor; |
|
561 |
|
u32 dst_alpha_blend_factor; |
|
562 |
|
u32 alpha_blend_op; |
|
563 |
|
|
|
564 |
|
/* normalized integer */ |
|
565 |
|
/* XXX: must always be set, blending enabled or disabled */ |
|
566 |
|
u32 color_write_mask; |
|
567 |
|
}; |
|
568 |
|
struct vk_pl_color_blend_state_create_info_t { |
|
569 |
|
u32 type; |
|
570 |
|
void *next; |
|
571 |
|
u32 flags; |
|
572 |
|
u32 logic_op_ena; |
|
573 |
|
/* floats */ |
|
574 |
|
u32 logic_op; |
|
575 |
|
u32 ats_n; |
|
576 |
|
struct vk_pl_color_blend_at_state_t *ats; |
|
577 |
|
f32 blend_consts[4]; |
|
578 |
|
}; |
|
579 |
|
struct vk_pushconst_range_t { |
|
580 |
|
u32 shader_stages; |
|
581 |
|
u32 offset; |
|
582 |
|
u32 size; |
|
583 |
|
}; |
|
584 |
|
struct vk_pl_layout_create_info_t { |
|
585 |
|
u32 type; |
|
586 |
|
void *next; |
|
587 |
|
u32 flags; |
|
588 |
|
u32 layouts_n; |
|
589 |
|
struct vk_dset_layout_t **layouts; |
|
590 |
|
u32 pushconst_ranges_n; |
|
591 |
|
struct vk_pushconst_range_t *ranges; |
|
592 |
|
}; |
|
593 |
|
struct vk_gfx_pl_create_info_t { |
|
594 |
|
u32 type; |
|
595 |
|
void *next; |
|
596 |
|
u32 flags; |
|
597 |
|
u32 sh_stages_n; |
|
598 |
|
struct vk_pl_sh_stage_create_info_t *sh_stages; |
|
599 |
|
struct vk_pl_vtx_input_state_create_info_t *vtx_input_state; |
|
600 |
|
struct vk_pl_input_assembly_state_create_info_t *input_assembly_state; |
|
601 |
|
void *dont_use_0; |
|
602 |
|
struct vk_pl_viewport_state_create_info_t *viewport_state; |
|
603 |
|
struct vk_pl_raster_state_create_info_t *raster_state; |
|
604 |
|
struct vk_pl_multisample_state_create_info_t *multisample_state; |
|
605 |
|
void *not_used_0; |
|
606 |
|
struct vk_pl_color_blend_state_create_info_t *color_blend_state; |
|
607 |
|
void *not_used_1; |
|
608 |
|
struct vk_pl_layout_t *layout; |
|
609 |
|
struct vk_rp_t *rp; |
|
610 |
|
u32 sp; |
|
611 |
|
struct vk_pl_t *base_pl; |
|
612 |
|
u32 base_pl_idx; |
|
613 |
|
}; |
|
614 |
|
union vk_clr_color_val_t { |
|
615 |
|
f32 f32s[4]; |
|
616 |
|
u32 u32s[4]; |
|
617 |
|
s32 s32s[4]; |
|
618 |
|
}; |
|
619 |
|
struct vk_clr_depth_stencil_val_t { |
|
620 |
|
f32 depth; |
|
621 |
|
u32 stencil; |
|
622 |
|
}; |
|
623 |
|
union vk_clr_val_t { |
|
624 |
|
union vk_clr_color_val_t color; |
|
625 |
|
struct vk_clr_depth_stencil_val_t depth_stencil; |
|
626 |
|
}; |
|
627 |
|
struct vk_rp_begin_info_t { |
|
628 |
|
u32 type; |
|
629 |
|
void *next; |
|
630 |
|
struct vk_rp_t *rp; |
|
631 |
|
struct vk_fb_t *fb; |
|
632 |
|
struct vk_rect_2d_t render_area; |
|
633 |
|
u32 clr_vals_n; |
|
634 |
|
union vk_clr_val_t *clr_vals; |
|
635 |
|
}; |
|
636 |
|
#endif |
File npv/video/local/code.frag.c added (mode: 100644) (index 0000000..a827fe4) |
|
1 |
|
static void init_once_local(void) |
|
2 |
|
{ |
|
3 |
|
u8 i; |
|
4 |
|
|
|
5 |
|
dec_l = 0; |
|
6 |
|
memset(&tmp_mem_rqmts_l, 0, sizeof(tmp_mem_rqmts_l)); |
|
7 |
|
i = 0; |
|
8 |
|
loop { |
|
9 |
|
if (i == npv_vk_swpchn_imgs_n_max) |
|
10 |
|
break; |
|
11 |
|
blit_l[i].viewport.width = -1; |
|
12 |
|
blit_l[i].viewport.height = -1; |
|
13 |
|
++i; |
|
14 |
|
} |
|
15 |
|
} |
|
16 |
|
static void scaler_img_create(u8 fr) |
|
17 |
|
{ |
|
18 |
|
struct vk_img_create_info_t info; |
|
19 |
|
s32 r; |
|
20 |
|
|
|
21 |
|
memset(&info, 0, sizeof(info)); |
|
22 |
|
info.type = vk_struct_type_img_create_info; |
|
23 |
|
info.flags = vk_img_create_flag_2d_array_compatible_bit; |
|
24 |
|
info.img_type = vk_img_type_2d; |
|
25 |
|
info.texel_mem_blk_fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb; |
|
26 |
|
info.extent.width = (u32)dec_frs_p.a[fr]->width; |
|
27 |
|
info.extent.height = (u32)dec_frs_p.a[fr]->height; |
|
28 |
|
info.extent.depth = 1; |
|
29 |
|
info.mip_lvls_n = 1; |
|
30 |
|
info.samples_n = vk_samples_n_1_bit; |
|
31 |
|
info.array_layers_n = 1; |
|
32 |
|
info.img_tiling = vk_img_tiling_linear; |
|
33 |
|
info.usage = vk_img_usage_transfer_src_bit; |
|
34 |
|
info.initial_layout = vk_img_layout_undefined; |
|
35 |
|
vk_create_img(&info, &scaler_p.img.vk); |
|
36 |
|
IF_FATALVVK("%d:device:%p:unable to create scaler frame image\n", r, npv_vk_surf_p.dev.vk); |
|
37 |
|
} |
|
38 |
|
static void img_mem_barrier_run_once(struct vk_img_mem_barrier_t *b) |
|
39 |
|
{ |
|
40 |
|
s32 r; |
|
41 |
|
struct vk_cb_begin_info_t begin_info; |
|
42 |
|
struct vk_submit_info_t submit_info; |
|
43 |
|
|
|
44 |
|
memset(&begin_info, 0, sizeof(begin_info)); |
|
45 |
|
begin_info.type = vk_struct_type_cb_begin_info; |
|
46 |
|
begin_info.flags = vk_cb_usage_one_time_submit_bit; |
|
47 |
|
/* we use the first cb which will be used for the swpchn */ |
|
48 |
|
vk_begin_cb(npv_vk_surf_p.dev.cbs[0], &begin_info); |
|
49 |
|
IF_FATALVVK("%d:unable to begin recording the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]); |
|
50 |
|
/*--------------------------------------------------------------------*/ |
|
51 |
|
vk_cmd_pl_barrier(npv_vk_surf_p.dev.cbs[0], b); |
|
52 |
|
/*--------------------------------------------------------------------*/ |
|
53 |
|
vk_end_cb(npv_vk_surf_p.dev.cbs[0]); |
|
54 |
|
IF_FATALVVK("%d:unable to end recording of the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]); |
|
55 |
|
/*--------------------------------------------------------------------*/ |
|
56 |
|
memset(&submit_info, 0, sizeof(submit_info)); |
|
57 |
|
submit_info.type = vk_struct_type_submit_info; |
|
58 |
|
submit_info.cbs_n = 1; |
|
59 |
|
submit_info.cbs = &npv_vk_surf_p.dev.cbs[0]; |
|
60 |
|
vk_q_submit(&submit_info); |
|
61 |
|
IF_FATALVVK("%d:queue:%p:unable to submit the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.q); |
|
62 |
|
/*--------------------------------------------------------------------*/ |
|
63 |
|
vk_q_wait_idle(); |
|
64 |
|
IF_FATALVVK("%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r, npv_vk_surf_p.dev.q); |
|
65 |
|
/*--------------------------------------------------------------------*/ |
|
66 |
|
/* |
|
67 |
|
* since it is tagged to run once its state_p is invalid, we need to |
|
68 |
|
* reset it to the initial state_p |
|
69 |
|
*/ |
|
70 |
|
vk_reset_cb(npv_vk_surf_p.dev.cbs[0]); |
|
71 |
|
IF_FATALVVK("%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]); |
|
72 |
|
} |
|
73 |
|
/* once in general layout, the dev sees the img */ |
|
74 |
|
static void scaler_img_layout_to_general(void) |
|
75 |
|
{ |
|
76 |
|
struct vk_img_mem_barrier_t b; |
|
77 |
|
struct vk_img_subrsrc_range_t *r; |
|
78 |
|
|
|
79 |
|
memset(&b, 0, sizeof(b)); |
|
80 |
|
b.type = vk_struct_type_img_mem_barrier; |
|
81 |
|
b.old_layout = vk_img_layout_undefined; |
|
82 |
|
b.new_layout = vk_img_layout_general; |
|
83 |
|
b.src_q_fam = vk_q_fam_ignored; |
|
84 |
|
b.dst_q_fam = vk_q_fam_ignored; |
|
85 |
|
b.img = scaler_p.img.vk; |
|
86 |
|
r = &b.subrsrc_range; |
|
87 |
|
r->aspect = vk_img_aspect_color_bit; |
|
88 |
|
r->lvls_n = 1; |
|
89 |
|
r->array_layers_n = 1; |
|
90 |
|
img_mem_barrier_run_once(&b); |
|
91 |
|
//POUTVVK("scaler queue frame image:%p[%u]:transition to general layout successful\n", scaler_p.q_imgs[i].vk, i); |
|
92 |
|
} |
|
93 |
|
static void scaler_img_subrsrc_layout_get(void) |
|
94 |
|
{ |
|
95 |
|
struct vk_img_subrsrc_t s; |
|
96 |
|
|
|
97 |
|
memset(&s, 0, sizeof(s)); |
|
98 |
|
/* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */ |
|
99 |
|
s.aspect = vk_img_aspect_color_bit; |
|
100 |
|
vk_get_img_subrsrc_layout(scaler_p.img.vk, &s, &scaler_p.img.layout); |
|
101 |
|
} |
|
102 |
|
static void tmp_scaler_img_mem_rqmts_get(void) |
|
103 |
|
{ |
|
104 |
|
struct vk_img_mem_rqmts_info_t info; |
|
105 |
|
struct vk_mem_rqmts_t *rqmts; |
|
106 |
|
s32 r; |
|
107 |
|
|
|
108 |
|
memset(&info, 0, sizeof(info)); |
|
109 |
|
info.type = vk_struct_type_img_mem_rqmts_info; |
|
110 |
|
info.img = scaler_p.img.vk; |
|
111 |
|
rqmts = &tmp_mem_rqmts_l; |
|
112 |
|
memset(rqmts, 0, sizeof(*rqmts)); |
|
113 |
|
rqmts->type = vk_struct_type_mem_rqmts; |
|
114 |
|
vk_get_img_mem_rqmts(&info, rqmts); |
|
115 |
|
IF_FATALVVK("%d:device:%p:unable to get memory requirements for scaler image\n", r, npv_vk_surf_p.dev.vk); |
|
116 |
|
} |
|
117 |
|
#define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \ |
|
118 |
|
| vk_mem_prop_host_cached_bit) |
|
119 |
|
#define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0) |
|
120 |
|
static bool match_mem_type(u8 mem_type_idx, |
|
121 |
|
struct vk_mem_rqmts_t *img_rqmts, bool ignore_gpu_is_discret) |
|
122 |
|
{ |
|
123 |
|
struct vk_mem_type_t *mem_type; |
|
124 |
|
|
|
125 |
|
/* first check this mem type is in our img rqmts */ |
|
126 |
|
if (((1 << mem_type_idx) & img_rqmts->core.mem_type_bits) == 0) |
|
127 |
|
return false; |
|
128 |
|
mem_type = &npv_vk_surf_p.dev.phydev.mem_types[mem_type_idx]; |
|
129 |
|
if (!ignore_gpu_is_discret) |
|
130 |
|
if (npv_vk_surf_p.dev.phydev.is_discret_gpu |
|
131 |
|
&& IS_DEV_LOCAL(mem_type)) |
|
132 |
|
return false; |
|
133 |
|
if ((mem_type->prop_flags & WANTED_MEM_PROPS) == WANTED_MEM_PROPS) |
|
134 |
|
return true; |
|
135 |
|
return false; |
|
136 |
|
} |
|
137 |
|
#undef WANTED_MEM_PROPS |
|
138 |
|
#undef IS_DEV_LOCAL |
|
139 |
|
static bool try_alloc_scaler_img_dev_mem(struct vk_mem_rqmts_t *img_rqmts, |
|
140 |
|
u8 mem_type_idx) |
|
141 |
|
{ |
|
142 |
|
struct vk_mem_alloc_info_t info; |
|
143 |
|
s32 r; |
|
144 |
|
|
|
145 |
|
memset(&info, 0, sizeof(info)); |
|
146 |
|
info.type = vk_struct_type_mem_alloc_info; |
|
147 |
|
info.sz = img_rqmts->core.sz; |
|
148 |
|
info.mem_type_idx = mem_type_idx; |
|
149 |
|
vk_alloc_mem(&info, &scaler_p.img.dev_mem); |
|
150 |
|
if (r < 0) { |
|
151 |
|
WARNINGVVK("%d:device:%p:unable to allocate %lu bytes from physical dev %p memory type %u\n", r, npv_vk_surf_p.dev.vk, img_rqmts->core.sz, npv_vk_surf_p.dev.phydev.vk, mem_type_idx); |
|
152 |
|
return false; |
|
153 |
|
} |
|
154 |
|
//POUTVVK("device:%p:physical device:%p:scaler queue image:%u:%lu bytes allocated from memory type %u\n", npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.phydev.vk, i, img_rqmts->core.sz, mem_type_idx); |
|
155 |
|
return true; |
|
156 |
|
} |
|
157 |
|
/* |
|
158 |
|
* we are looking for host visible and host cached mem. on discret gpu we would |
|
159 |
|
* like non dev local mem that in order to avoid wasting video ram. if we have |
|
160 |
|
* a discret gpu but could not find a mem type without dev local mem, let's |
|
161 |
|
* retry with only host visible and host cached mem. |
|
162 |
|
*/ |
|
163 |
|
#define IGNORE_GPU_IS_DISCRET true |
|
164 |
|
static void scaler_img_dev_mem_alloc(void) |
|
165 |
|
{ |
|
166 |
|
struct vk_mem_rqmts_t *img_rqmts; |
|
167 |
|
u8 mem_type; |
|
168 |
|
|
|
169 |
|
img_rqmts = &tmp_mem_rqmts_l; |
|
170 |
|
mem_type = 0; |
|
171 |
|
loop { |
|
172 |
|
if (mem_type == npv_vk_surf_p.dev.phydev.mem_types_n) |
|
173 |
|
break; |
|
174 |
|
if (match_mem_type(mem_type, img_rqmts, !IGNORE_GPU_IS_DISCRET)) |
|
175 |
|
if (try_alloc_scaler_img_dev_mem(img_rqmts, mem_type)) |
|
176 |
|
return; |
|
177 |
|
++mem_type; |
|
178 |
|
} |
|
179 |
|
if (!npv_vk_surf_p.dev.phydev.is_discret_gpu) |
|
180 |
|
FATALVVK("physical device:%p:scaler image:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p.dev.phydev.vk); |
|
181 |
|
/* |
|
182 |
|
* lookup again, but relax the match based on discret gpu constraint for |
|
183 |
|
* gpu |
|
184 |
|
*/ |
|
185 |
|
mem_type = 0; |
|
186 |
|
loop { |
|
187 |
|
if (mem_type == npv_vk_surf_p.dev.phydev.mem_types_n) |
|
188 |
|
break; |
|
189 |
|
if (match_mem_type(mem_type, img_rqmts, IGNORE_GPU_IS_DISCRET) |
|
190 |
|
&& try_alloc_scaler_img_dev_mem(img_rqmts, mem_type)) |
|
191 |
|
return; |
|
192 |
|
++mem_type; |
|
193 |
|
} |
|
194 |
|
FATALVVK("physical device:%p:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p.dev.phydev.vk); |
|
195 |
|
} |
|
196 |
|
#undef IGNORE_GPU_IS_DISCRET |
|
197 |
|
static void scaler_img_dev_mem_bind(void) |
|
198 |
|
{ |
|
199 |
|
struct vk_bind_img_mem_info_t info; |
|
200 |
|
s32 r; |
|
201 |
|
|
|
202 |
|
memset(&info, 0, sizeof(info) * 1); |
|
203 |
|
info.type = vk_struct_type_bind_img_mem_info; |
|
204 |
|
info.img = scaler_p.img.vk; |
|
205 |
|
info.mem = scaler_p.img.dev_mem; |
|
206 |
|
/* |
|
207 |
|
* TODO: switch to vkBindImageMemory2 if extension in vk 1.1 for |
|
208 |
|
* consistency |
|
209 |
|
*/ |
|
210 |
|
vk_bind_img_mem(&info); |
|
211 |
|
IF_FATALVVK("%d:device:%p:scaler image:unable to bind device memory to image\n", r, npv_vk_surf_p.dev.vk); |
|
212 |
|
} |
|
213 |
|
static void scaler_img_dev_mem_map(void) |
|
214 |
|
{ |
|
215 |
|
s32 r; |
|
216 |
|
|
|
217 |
|
vk_map_mem(scaler_p.img.dev_mem, &scaler_p.img.data); |
|
218 |
|
IF_FATALVVK("%d:device:%p:scaler image:unable to map image memory\n", r, npv_vk_surf_p.dev.vk); |
|
219 |
|
} |
|
220 |
|
static void dec_a_grow(void) |
|
221 |
|
{ |
|
222 |
|
u16 new_idx; |
|
223 |
|
|
|
224 |
|
new_idx = dec_frs_p.n_max; |
|
225 |
|
dec_frs_p.a = realloc(dec_frs_p.a, sizeof(*dec_frs_p.a) |
|
226 |
|
* (dec_frs_p.n_max + 1)); |
|
227 |
|
if (dec_frs_p.a == 0) |
|
228 |
|
FATALV("unable to allocate memory for an additional pointer on a decoded frame reference\n"); |
|
229 |
|
dec_frs_p.priv_a = realloc(dec_frs_p.priv_a, |
|
230 |
|
sizeof(*dec_frs_p.priv_a) * (dec_frs_p.n_max + 1)); |
|
231 |
|
if (dec_frs_p.priv_a == 0) |
|
232 |
|
FATALV("unable to allocate memory for an additional private data for decoded frames\n"); |
|
233 |
|
dec_frs_p.a[new_idx] = avutil_video_fr_ref_alloc(); |
|
234 |
|
if (dec_frs_p.a[new_idx] == 0) |
|
235 |
|
FATALV("ffmpeg:unable to allocate a decoded frame reference\n"); |
|
236 |
|
++dec_frs_p.n_max; |
|
237 |
|
} |
|
238 |
|
/* extract a fr ref, shift the a, push it back at the e, and unref its bufs */ |
|
239 |
|
static void fr_drop(u16 fr) |
|
240 |
|
{ |
|
241 |
|
avutil_video_fr_ref_t *save; |
|
242 |
|
|
|
243 |
|
save = dec_frs_p.a[fr]; |
|
244 |
|
avutil_video_fr_unref(save); |
|
245 |
|
if (dec_frs_p.n > 1) { |
|
246 |
|
u16 e; |
|
247 |
|
|
|
248 |
|
e = dec_frs_p.n; |
|
249 |
|
memmove(&dec_frs_p.a[fr], &dec_frs_p.a[fr + 1], |
|
250 |
|
sizeof(*dec_frs_p.a) * (e - (fr + 1))); |
|
251 |
|
dec_frs_p.a[e - 1] = save; |
|
252 |
|
|
|
253 |
|
memmove(&dec_frs_p.priv_a[fr], &dec_frs_p.priv_a[fr + 1], |
|
254 |
|
sizeof(dec_frs_p.priv_a[fr]) * (e - (fr + 1))); |
|
255 |
|
memset(&dec_frs_p.priv_a[e - 1], 0, |
|
256 |
|
sizeof(dec_frs_p.priv_a[e - 1])); |
|
257 |
|
} |
|
258 |
|
dec_frs_p.n--; |
|
259 |
|
} |
|
260 |
|
static void frs_drop(s64 now) |
|
261 |
|
{ |
|
262 |
|
s64 low; |
|
263 |
|
s64 threshold; |
|
264 |
|
u16 fr; |
|
265 |
|
avformat_st_t *st; |
|
266 |
|
|
|
267 |
|
st = fmt_ctx_p->sts[st_idx_p]; |
|
268 |
|
|
|
269 |
|
/* audio can be late of 0.25s, and audio is 'now' */ |
|
270 |
|
threshold = (300 * 1000 * st->tb.num) / st->tb.den; |
|
271 |
|
low = now - threshold; |
|
272 |
|
fr = 0; |
|
273 |
|
loop { |
|
274 |
|
s64 pts; |
|
275 |
|
struct dec_fr_priv_t *fr_priv; |
|
276 |
|
|
|
277 |
|
if (dec_frs_p.n == fr) |
|
278 |
|
break; |
|
279 |
|
|
|
280 |
|
pts = dec_frs_p.a[fr]->pts; |
|
281 |
|
fr_priv = dec_frs_p.priv_a + fr; |
|
282 |
|
|
|
283 |
|
if ((fr != dec_frs_p.fr_being_scaled) && (pts < low)) |
|
284 |
|
fr_drop(fr); /* do not advance */ |
|
285 |
|
else |
|
286 |
|
++fr; |
|
287 |
|
} |
|
288 |
|
} |
|
289 |
|
#define NO_FR U16_MAX |
|
290 |
|
static u16 select_fr(s64 now) |
|
291 |
|
{ |
|
292 |
|
u16 fr; |
|
293 |
|
u16 selected_fr; |
|
294 |
|
u64 selected_fr_delta; |
|
295 |
|
|
|
296 |
|
fr = 0; |
|
297 |
|
selected_fr = NO_FR; |
|
298 |
|
selected_fr_delta = S64_MAX; |
|
299 |
|
loop { |
|
300 |
|
u64 delta; |
|
301 |
|
|
|
302 |
|
if (fr == dec_frs_p.n) |
|
303 |
|
break; |
|
304 |
|
delta = s64_abs(now - (s64)dec_frs_p.a[fr]->pts); |
|
305 |
|
if (delta < selected_fr_delta) { |
|
306 |
|
selected_fr = fr; |
|
307 |
|
selected_fr_delta = delta; |
|
308 |
|
} |
|
309 |
|
++fr; |
|
310 |
|
} |
|
311 |
|
return selected_fr; |
|
312 |
|
} |
|
313 |
|
#undef NO_FR |
|
314 |
|
static void frs_clear_last_qed_to_pe(void) |
|
315 |
|
{ |
|
316 |
|
u16 fr; |
|
317 |
|
|
|
318 |
|
fr = 0; |
|
319 |
|
loop { |
|
320 |
|
struct dec_fr_priv_t *fr_priv; |
|
321 |
|
|
|
322 |
|
if (fr == dec_frs_p.n) |
|
323 |
|
break; |
|
324 |
|
fr_priv = dec_frs_p.priv_a + fr; |
|
325 |
|
if (fr_priv->is_last_qed_to_pe) { |
|
326 |
|
fr_priv->is_last_qed_to_pe = false; |
|
327 |
|
break; |
|
328 |
|
} |
|
329 |
|
++fr; |
|
330 |
|
} |
|
331 |
|
} |
|
332 |
|
static void frs_reset(void) |
|
333 |
|
{ |
|
334 |
|
u16 fr; |
|
335 |
|
|
|
336 |
|
fr = 0; |
|
337 |
|
loop { |
|
338 |
|
if (fr == dec_frs_p.n) |
|
339 |
|
break; |
|
340 |
|
avutil_video_fr_unref(dec_frs_p.a[fr]); |
|
341 |
|
++fr; |
|
342 |
|
} |
|
343 |
|
memset(dec_frs_p.priv_a, 0, sizeof(*dec_frs_p.priv_a) * dec_frs_p.n); |
|
344 |
|
dec_frs_p.n = 0; |
|
345 |
|
} |
|
346 |
|
static void scaler_img_destroy(void) |
|
347 |
|
{ |
|
348 |
|
vk_destroy_img(scaler_p.img.vk); |
|
349 |
|
scaler_p.img.vk = 0; |
|
350 |
|
vk_unmap_mem(scaler_p.img.dev_mem); |
|
351 |
|
scaler_p.img.data = 0; |
|
352 |
|
vk_free_mem(scaler_p.img.dev_mem); |
|
353 |
|
scaler_p.img.dev_mem = 0; |
|
354 |
|
} |
|
355 |
|
static void blit_setup(u8 swpchn_img, bool scaler_dims_changed) |
|
356 |
|
{ |
|
357 |
|
s32 r; |
|
358 |
|
struct vk_cb_begin_info_t begin_info; |
|
359 |
|
struct vk_img_mem_barrier_t b; |
|
360 |
|
struct vk_img_blit_t region; |
|
361 |
|
|
|
362 |
|
if (!scaler_dims_changed |
|
363 |
|
&& blit_l[swpchn_img].viewport.width == npv_xcb_p.width |
|
364 |
|
&& blit_l[swpchn_img].viewport.height == npv_xcb_p.height) |
|
365 |
|
return; |
|
366 |
|
/* sync: may be in pending state? */ |
|
367 |
|
vk_reset_cb(npv_vk_surf_p.dev.cbs[swpchn_img]); |
|
368 |
|
IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable reset\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]); |
|
369 |
|
/*--------------------------------------------------------------------*/ |
|
370 |
|
memset(&begin_info, 0, sizeof(begin_info)); |
|
371 |
|
begin_info.type = vk_struct_type_cb_begin_info; |
|
372 |
|
vk_begin_cb(npv_vk_surf_p.dev.cbs[swpchn_img], &begin_info); |
|
373 |
|
IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]); |
|
374 |
|
/*--------------------------------------------------------------------*/ |
|
375 |
|
/* acquired img (undefined layout) to presentation layout */ |
|
376 |
|
memset(&b, 0, sizeof(b)); |
|
377 |
|
b.type = vk_struct_type_img_mem_barrier; |
|
378 |
|
b.old_layout = vk_img_layout_undefined; |
|
379 |
|
b.new_layout = vk_img_layout_present; |
|
380 |
|
b.src_q_fam = vk_q_fam_ignored; |
|
381 |
|
b.dst_q_fam = vk_q_fam_ignored; |
|
382 |
|
b.img = npv_vk_surf_p.dev.swpchn.imgs[swpchn_img]; |
|
383 |
|
b.subrsrc_range.aspect = vk_img_aspect_color_bit; |
|
384 |
|
b.subrsrc_range.lvls_n = 1; |
|
385 |
|
b.subrsrc_range.array_layers_n = 1; |
|
386 |
|
vk_cmd_pl_barrier(npv_vk_surf_p.dev.cbs[swpchn_img], &b); |
|
387 |
|
/*--------------------------------------------------------------------*/ |
|
388 |
|
/* blit from cpu img to pe img */ |
|
389 |
|
memset(®ion, 0, sizeof(region)); |
|
390 |
|
region.src_subrsrc.aspect = vk_img_aspect_color_bit; |
|
391 |
|
region.src_subrsrc.array_layers_n = 1; |
|
392 |
|
/* scaler */ |
|
393 |
|
region.src_offsets[1].x = scaler_p.ctx->cfg.width; |
|
394 |
|
region.src_offsets[1].y = scaler_p.ctx->cfg.height; |
|
395 |
|
region.dst_subrsrc.aspect = vk_img_aspect_color_bit; |
|
396 |
|
region.dst_subrsrc.array_layers_n = 1; |
|
397 |
|
/* xcb viewport */ |
|
398 |
|
region.dst_offsets[1].x = npv_xcb_p.width; |
|
399 |
|
region.dst_offsets[1].y = npv_xcb_p.height; |
|
400 |
|
vk_cmd_blit_img(npv_vk_surf_p.dev.cbs[swpchn_img], scaler_p.img.vk, |
|
401 |
|
npv_vk_surf_p.dev.swpchn.imgs[swpchn_img], ®ion); |
|
402 |
|
/*--------------------------------------------------------------------*/ |
|
403 |
|
vk_end_cb(npv_vk_surf_p.dev.cbs[swpchn_img]); |
|
404 |
|
IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]); |
|
405 |
|
/*--------------------------------------------------------------------*/ |
|
406 |
|
/* keep track in order to detect change */ |
|
407 |
|
blit_l[npv_vk_swpchn_imgs_n_max].viewport.width = npv_xcb_p.width; |
|
408 |
|
blit_l[npv_vk_swpchn_imgs_n_max].viewport.height = npv_xcb_p.height; |
|
409 |
|
} |
|
410 |
|
#define READY 0 |
|
411 |
|
#define NOT_READY 1 |
|
412 |
|
static u8 swpchn_next_img(u32 *swpchn_img) |
|
413 |
|
{ |
|
414 |
|
struct vk_acquire_next_img_info_t info; |
|
415 |
|
s32 r; |
|
416 |
|
|
|
417 |
|
memset(&info, 0, sizeof(info)); |
|
418 |
|
info.type = vk_struct_type_acquire_next_img_info; |
|
419 |
|
info.swpchn = npv_vk_surf_p.dev.swpchn.vk; |
|
420 |
|
info.timeout = 0; |
|
421 |
|
info.devs = 0x00000001; /* no device group then 1 */ |
|
422 |
|
info.sem = npv_vk_surf_p.dev.sems[npv_vk_sem_acquire_img_done]; |
|
423 |
|
vk_acquire_next_img(&info, swpchn_img); |
|
424 |
|
if (r == vk_not_ready) |
|
425 |
|
return NOT_READY; |
|
426 |
|
IF_FATALVVK("%d:device:%p:unable to acquire next image from swapchain %p\n", r, npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.swpchn.vk); |
|
427 |
|
return READY; |
|
428 |
|
} |
|
429 |
|
#undef READY |
|
430 |
|
#undef NOT_READY |
|
431 |
|
static void send_to_pe(u32 swpchn_img) |
|
432 |
|
{ |
|
433 |
|
struct vk_submit_info_t submit_info; |
|
434 |
|
struct vk_present_info_t present_info; |
|
435 |
|
u32 wait_dst_stage; |
|
436 |
|
s32 r; |
|
437 |
|
u32 idxs[1]; |
|
438 |
|
/* run the command buffer and do present queue */ |
|
439 |
|
/*--------------------------------------------------------------------*/ |
|
440 |
|
memset(&submit_info, 0, sizeof(submit_info)); |
|
441 |
|
submit_info.type = vk_struct_type_submit_info; |
|
442 |
|
submit_info.wait_sems_n = 1; |
|
443 |
|
submit_info.wait_sems = |
|
444 |
|
&npv_vk_surf_p.dev.sems[npv_vk_sem_acquire_img_done]; |
|
445 |
|
wait_dst_stage = vk_pl_stage_bottom_of_pipe_bit; |
|
446 |
|
submit_info.wait_dst_stages = &wait_dst_stage; |
|
447 |
|
submit_info.cbs_n = 1; |
|
448 |
|
submit_info.cbs = &npv_vk_surf_p.dev.cbs[swpchn_img]; |
|
449 |
|
submit_info.signal_sems_n = 1; |
|
450 |
|
submit_info.signal_sems = &npv_vk_surf_p.dev.sems[npv_vk_sem_blit_done]; |
|
451 |
|
vk_q_submit(&submit_info); |
|
452 |
|
IF_FATALVVK("%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r, npv_vk_surf_p.dev.q); |
|
453 |
|
/*--------------------------------------------------------------------*/ |
|
454 |
|
idxs[0] = swpchn_img; |
|
455 |
|
memset(&present_info, 0, sizeof(present_info)); |
|
456 |
|
present_info.type = vk_struct_type_present_info; |
|
457 |
|
present_info.wait_sems_n = 1; |
|
458 |
|
present_info.wait_sems = &npv_vk_surf_p.dev.sems[npv_vk_sem_blit_done]; |
|
459 |
|
present_info.swpchns_n = 1; |
|
460 |
|
present_info.swpchns = &npv_vk_surf_p.dev.swpchn.vk; |
|
461 |
|
present_info.idxs = idxs; |
|
462 |
|
present_info.results = 0; |
|
463 |
|
vk_q_present(&present_info); |
|
464 |
|
IF_FATALVVK("%d:queue:%p:unable to submit the image %u to the presentation engine\n", r, npv_vk_surf_p.dev.q, swpchn_img); |
|
465 |
|
/*--------------------------------------------------------------------*/ |
|
466 |
|
} |
|
467 |
|
static void start_scaling(u8 fr, bool *scaler_dims_changed) |
|
468 |
|
{ |
|
469 |
|
u32 scaled_line_bytes_n; |
|
470 |
|
|
|
471 |
|
if (scaler_p.ctx->cfg.width != dec_frs_p.a[fr]->width |
|
472 |
|
|| scaler_p.ctx->cfg.height != dec_frs_p.a[fr]->height) { |
|
473 |
|
if (scaler_p.img.vk != 0) |
|
474 |
|
scaler_img_destroy(); |
|
475 |
|
scaler_img_create(fr); |
|
476 |
|
scaler_img_layout_to_general(); |
|
477 |
|
scaler_img_subrsrc_layout_get(); |
|
478 |
|
tmp_scaler_img_mem_rqmts_get(); |
|
479 |
|
scaler_img_dev_mem_alloc(); |
|
480 |
|
scaler_img_dev_mem_bind(); |
|
481 |
|
scaler_img_dev_mem_map(); |
|
482 |
|
|
|
483 |
|
*scaler_dims_changed = true; |
|
484 |
|
scaler_p.ctx->cfg.width = dec_frs_p.a[fr]->width; |
|
485 |
|
scaler_p.ctx->cfg.height = dec_frs_p.a[fr]->height; |
|
486 |
|
} else |
|
487 |
|
*scaler_dims_changed = false; |
|
488 |
|
scaler_p.ctx->cfg.src_fmt = dec_frs_p.a[fr]->fmt; |
|
489 |
|
scaler_p.ctx->cfg.dst_fmt = AVUTIL_PIX_FMT_RGB32; |
|
490 |
|
scaler_p.ctx->cfg.flags = SWS_POINT; /* | SWS_PRINT_INFO */ |
|
491 |
|
|
|
492 |
|
scaled_line_bytes_n = (u32)scaler_p.img.layout.row_pitch; |
|
493 |
|
scaler_p.ctx->scale.src_slices = dec_frs_p.a[fr]->data; |
|
494 |
|
scaler_p.ctx->scale.src_strides = dec_frs_p.a[fr]->linesize; |
|
495 |
|
scaler_p.ctx->scale.dst_slice = scaler_p.img.data; |
|
496 |
|
scaler_p.ctx->scale.dst_stride = scaled_line_bytes_n; |
|
497 |
|
thdsws_run(scaler_p.ctx); |
|
498 |
|
dec_frs_p.fr_being_scaled = fr; |
|
499 |
|
} |
|
500 |
|
static void timer_ack(void) |
|
501 |
|
{ |
|
502 |
|
int r; |
|
503 |
|
uint64_t exps_n; |
|
504 |
|
|
|
505 |
|
exps_n = 0; |
|
506 |
|
r = read(timer_fd_p, &exps_n, sizeof(exps_n)); |
|
507 |
|
if (r == -1) |
|
508 |
|
FATALV("unable to read the number of timer expirations\n"); |
|
509 |
|
} |
File npv/vk/api_usage.h added (mode: 100644) (index 0000000..78723c6) |
|
1 |
|
#ifndef NPV_VK_API_USAGE_H |
|
2 |
|
#define NPV_VK_API_USAGE_H |
|
3 |
|
/* |
|
4 |
|
* this is public domain without any warranties of any kind |
|
5 |
|
* Sylvain BERTRAND |
|
6 |
|
*/ |
|
7 |
|
/* |
|
8 |
|
* this is the simplification and taylorization of vk api for the specific |
|
9 |
|
* uses of npv |
|
10 |
|
*/ |
|
11 |
|
#define FATALVK(fmt, ...) FATAL("vulkan:" fmt, ##__VA_ARGS__) |
|
12 |
|
#define WARNINGVK(fmt, ...) WARNING("vulkan:" fmt, ##__VA_ARGS__) |
|
13 |
|
#define POUTVK(fmt, ...) POUT("vulkan:" fmt, ##__VA_ARGS__) |
|
14 |
|
|
|
15 |
|
#define IF_FATALVK(fmt, ...) \ |
|
16 |
|
if (r < 0) \ |
|
17 |
|
FATALVK(fmt, ##__VA_ARGS__) |
|
18 |
|
/*----------------------------------------------------------------------------*/ |
|
19 |
|
#define vk_get_dev_q() \ |
|
20 |
|
npv_vk_surf_p.dev.dl_vk_get_dev_q(npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.phydev.q_fam, 0, &npv_vk_surf_p.dev.q) |
|
21 |
|
|
|
22 |
|
#define vk_create_cp(info) \ |
|
23 |
|
r = npv_vk_surf_p.dev.dl_vk_create_cp(npv_vk_surf_p.dev.vk, info, 0, &npv_vk_surf_p.dev.cp) |
|
24 |
|
|
|
25 |
|
#define vk_create_swpchn(info) \ |
|
26 |
|
r = npv_vk_surf_p.dev.dl_vk_create_swpchn(npv_vk_surf_p.dev.vk, info, 0, &npv_vk_surf_p.dev.swpchn.vk) |
|
27 |
|
|
|
28 |
|
#define vk_get_swpchn_imgs() \ |
|
29 |
|
r = npv_vk_surf_p.dev.dl_vk_get_swpchn_imgs(npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.swpchn.vk, &npv_vk_surf_p.dev.swpchn.imgs_n, npv_vk_surf_p.dev.swpchn.imgs) |
|
30 |
|
|
|
31 |
|
#define vk_create_img(info, img) \ |
|
32 |
|
r = npv_vk_surf_p.dev.dl_vk_create_img(npv_vk_surf_p.dev.vk, info, 0, img) |
|
33 |
|
|
|
34 |
|
#define vk_destroy_img(img) \ |
|
35 |
|
npv_vk_surf_p.dev.dl_vk_destroy_img(npv_vk_surf_p.dev.vk, img, 0) |
|
36 |
|
|
|
37 |
|
#define vk_get_img_mem_rqmts(...) \ |
|
38 |
|
r = npv_vk_surf_p.dev.dl_vk_get_img_mem_rqmts(npv_vk_surf_p.dev.vk,##__VA_ARGS__) |
|
39 |
|
|
|
40 |
|
#define vk_alloc_mem(info, dev_mem) \ |
|
41 |
|
r = npv_vk_surf_p.dev.dl_vk_alloc_mem(npv_vk_surf_p.dev.vk, info, 0, dev_mem) |
|
42 |
|
|
|
43 |
|
#define vk_free_mem(dev_mem) \ |
|
44 |
|
npv_vk_surf_p.dev.dl_vk_free_mem(npv_vk_surf_p.dev.vk, dev_mem, 0) |
|
45 |
|
|
|
46 |
|
#define vk_bind_img_mem(infos) \ |
|
47 |
|
r = npv_vk_surf_p.dev.dl_vk_bind_img_mem(npv_vk_surf_p.dev.vk, 1, infos) |
|
48 |
|
|
|
49 |
|
#define vk_map_mem(dev_mem, data) \ |
|
50 |
|
r = npv_vk_surf_p.dev.dl_vk_map_mem(npv_vk_surf_p.dev.vk, dev_mem, 0, vk_whole_sz, 0, data) |
|
51 |
|
|
|
52 |
|
#define vk_unmap_mem(dev_mem) \ |
|
53 |
|
npv_vk_surf_p.dev.dl_vk_unmap_mem(npv_vk_surf_p.dev.vk, dev_mem) |
|
54 |
|
|
|
55 |
|
#define vk_alloc_cbs(info) \ |
|
56 |
|
r = npv_vk_surf_p.dev.dl_vk_alloc_cbs(npv_vk_surf_p.dev.vk, info, npv_vk_surf_p.dev.cbs) |
|
57 |
|
|
|
58 |
|
#define vk_begin_cb(...) \ |
|
59 |
|
r = npv_vk_surf_p.dev.dl_vk_begin_cb(__VA_ARGS__) |
|
60 |
|
|
|
61 |
|
#define vk_end_cb(...) \ |
|
62 |
|
r = npv_vk_surf_p.dev.dl_vk_end_cb(__VA_ARGS__) |
|
63 |
|
|
|
64 |
|
#define vk_cmd_pl_barrier(cb, b) \ |
|
65 |
|
npv_vk_surf_p.dev.dl_vk_cmd_pl_barrier(cb, vk_pl_stage_top_of_pipe_bit, vk_pl_stage_top_of_pipe_bit, 0, 0, 0, 0, 0, 1, b) |
|
66 |
|
|
|
67 |
|
#define vk_q_submit(info) \ |
|
68 |
|
r = npv_vk_surf_p.dev.dl_vk_q_submit(npv_vk_surf_p.dev.q, 1, info, 0) |
|
69 |
|
|
|
70 |
|
#define vk_q_wait_idle() \ |
|
71 |
|
r = npv_vk_surf_p.dev.dl_vk_q_wait_idle(npv_vk_surf_p.dev.q) |
|
72 |
|
|
|
73 |
|
#define vk_get_img_subrsrc_layout(...) \ |
|
74 |
|
npv_vk_surf_p.dev.dl_vk_get_img_subrsrc_layout(npv_vk_surf_p.dev.vk, ##__VA_ARGS__) |
|
75 |
|
|
|
76 |
|
#define vk_acquire_next_img(...) \ |
|
77 |
|
r = npv_vk_surf_p.dev.dl_vk_acquire_next_img(npv_vk_surf_p.dev.vk,##__VA_ARGS__) |
|
78 |
|
|
|
79 |
|
#define vk_reset_cb(cb) \ |
|
80 |
|
r = npv_vk_surf_p.dev.dl_vk_reset_cb(cb, 0) |
|
81 |
|
|
|
82 |
|
#define vk_cmd_blit_img(cb, src_img, dst_img, region) \ |
|
83 |
|
npv_vk_surf_p.dev.dl_vk_cmd_blit_img(cb, src_img, vk_img_layout_general, dst_img, vk_img_layout_present, 1, region, 0) |
|
84 |
|
|
|
85 |
|
#define vk_q_present(info) \ |
|
86 |
|
r = npv_vk_surf_p.dev.dl_vk_q_present(npv_vk_surf_p.dev.q, info) |
|
87 |
|
|
|
88 |
|
#define vk_create_sem(info, sem) \ |
|
89 |
|
r = npv_vk_surf_p.dev.dl_vk_create_sem(npv_vk_surf_p.dev.vk, info, 0, sem) |
|
90 |
|
/******************************************************************************/ |
|
91 |
|
/* cherry picked from nyanvk/syms_global.h */ |
|
92 |
|
#define VK_GLOBAL_SYMS \ |
|
93 |
|
static void *(*dl_vk_get_instance_proc_addr)(struct vk_instance_t *instance, u8 *name); \ |
|
94 |
|
static void *(*dl_vk_get_dev_proc_addr)(struct vk_dev_t *dev, u8 *name); \ |
|
95 |
|
static s32 (*dl_vk_enumerate_instance_version)(u32 *version); \ |
|
96 |
|
static s32 (*dl_vk_enumerate_instance_layer_props)( \ |
|
97 |
|
u32 *props_n, \ |
|
98 |
|
struct vk_layer_props_t *props); \ |
|
99 |
|
static s32 (*dl_vk_enumerate_instance_ext_props)( \ |
|
100 |
|
u8 *layer_name, \ |
|
101 |
|
u32 *props_n, \ |
|
102 |
|
struct vk_ext_props_t *props); \ |
|
103 |
|
static s32 (*dl_vk_create_instance)( \ |
|
104 |
|
struct vk_instance_create_info_t *info, \ |
|
105 |
|
void *allocator, \ |
|
106 |
|
struct vk_instance_t **instance); \ |
|
107 |
|
static s32 (*dl_vk_enumerate_phydevs)( \ |
|
108 |
|
struct vk_instance_t *instance, \ |
|
109 |
|
u32 *phydevs_n, \ |
|
110 |
|
struct vk_phydev_t **phydevs); \ |
|
111 |
|
static s32 (*dl_vk_enumerate_dev_ext_props)( \ |
|
112 |
|
struct vk_phydev_t *phydev, \ |
|
113 |
|
u8 *layer_name, \ |
|
114 |
|
u32 *props_n, \ |
|
115 |
|
struct vk_ext_props_t *props); \ |
|
116 |
|
static void (*dl_vk_get_phydev_props)( \ |
|
117 |
|
struct vk_phydev_t *phydev, \ |
|
118 |
|
struct vk_phydev_props_t *props); \ |
|
119 |
|
static s32 (*dl_vk_create_dev)( \ |
|
120 |
|
struct vk_phydev_t *phydev, \ |
|
121 |
|
struct vk_dev_create_info_t *create_info, \ |
|
122 |
|
void *allocator, \ |
|
123 |
|
struct vk_dev_t **dev); \ |
|
124 |
|
static void (*dl_vk_get_phydev_q_fam_props)( \ |
|
125 |
|
struct vk_phydev_t *phydev, \ |
|
126 |
|
u32 *q_fam_props_n, \ |
|
127 |
|
struct vk_q_fam_props_t *props); \ |
|
128 |
|
static s32 (*dl_vk_create_xcb_surf)( \ |
|
129 |
|
struct vk_instance_t *instance, \ |
|
130 |
|
struct vk_xcb_surf_create_info_t *info, \ |
|
131 |
|
void *allocator, \ |
|
132 |
|
struct vk_surf_t **surf); \ |
|
133 |
|
static void (*dl_vk_destroy_surf)(\ |
|
134 |
|
struct vk_instance_t *instance,\ |
|
135 |
|
struct vk_surf_t *surf,\ |
|
136 |
|
void *allocator); \ |
|
137 |
|
static s32 (*dl_vk_get_phydev_surf_support)( \ |
|
138 |
|
struct vk_phydev_t *phydev, \ |
|
139 |
|
u32 q_fam, \ |
|
140 |
|
struct vk_surf_t *surf, \ |
|
141 |
|
u32 *supported); \ |
|
142 |
|
static s32 (*dl_vk_get_phydev_surf_texel_mem_blk_confs)( \ |
|
143 |
|
struct vk_phydev_t *phydev, \ |
|
144 |
|
struct vk_phydev_surf_info_t *info, \ |
|
145 |
|
u32 *confs_n, \ |
|
146 |
|
struct vk_surf_texel_mem_blk_conf_t *confs); \ |
|
147 |
|
static void (*dl_vk_get_phydev_mem_props)( \ |
|
148 |
|
struct vk_phydev_t *phydev, \ |
|
149 |
|
struct vk_phydev_mem_props_t *props); \ |
|
150 |
|
static s32 (*dl_vk_get_phydev_surf_caps)( \ |
|
151 |
|
struct vk_phydev_t *phydev, \ |
|
152 |
|
struct vk_phydev_surf_info_t *info, \ |
|
153 |
|
struct vk_surf_caps_t *caps); \ |
|
154 |
|
static s32 (*dl_vk_get_phydev_surf_present_modes)( \ |
|
155 |
|
struct vk_phydev_t *phydev, \ |
|
156 |
|
struct vk_surf_t *surf, \ |
|
157 |
|
u32 *modes_n, \ |
|
158 |
|
u32 *modes); |
|
159 |
|
/******************************************************************************/ |
|
160 |
|
#define vk_get_instance_proc_addr dl_vk_get_instance_proc_addr |
|
161 |
|
|
|
162 |
|
#define vk_get_dev_proc_addr dl_vk_get_dev_proc_addr |
|
163 |
|
|
|
164 |
|
#define vk_enumerate_instance_version \ |
|
165 |
|
r = dl_vk_enumerate_instance_version |
|
166 |
|
|
|
167 |
|
#define vk_enumerate_instance_layer_props \ |
|
168 |
|
r = dl_vk_enumerate_instance_layer_props |
|
169 |
|
|
|
170 |
|
#define vk_enumerate_instance_ext_props(...) \ |
|
171 |
|
r = dl_vk_enumerate_instance_ext_props(0,##__VA_ARGS__) |
|
172 |
|
|
|
173 |
|
#define vk_create_instance(info) \ |
|
174 |
|
r = dl_vk_create_instance(info, 0, &npv_vk_instance_l) |
|
175 |
|
|
|
176 |
|
#define vk_enumerate_phydevs(...) \ |
|
177 |
|
r = dl_vk_enumerate_phydevs(npv_vk_instance_l,##__VA_ARGS__) |
|
178 |
|
|
|
179 |
|
#define vk_enumerate_dev_ext_props(phydev, props_n, props) \ |
|
180 |
|
r = dl_vk_enumerate_dev_ext_props(phydev, 0, props_n, props) |
|
181 |
|
|
|
182 |
|
#define vk_get_phydev_props dl_vk_get_phydev_props |
|
183 |
|
|
|
184 |
|
#define vk_create_dev(info) \ |
|
185 |
|
r = dl_vk_create_dev(npv_vk_surf_p.dev.phydev.vk, info, 0, &npv_vk_surf_p.dev.vk) |
|
186 |
|
|
|
187 |
|
#define vk_get_phydev_q_fam_props dl_vk_get_phydev_q_fam_props |
|
188 |
|
|
|
189 |
|
#define vk_create_xcb_surf(info) \ |
|
190 |
|
r = dl_vk_create_xcb_surf(npv_vk_instance_l, info, 0, &npv_vk_surf_p.vk) |
|
191 |
|
|
|
192 |
|
#define vk_get_phydev_surf_support(phydev, q_fam, supported) \ |
|
193 |
|
r = dl_vk_get_phydev_surf_support(phydev, q_fam, npv_vk_surf_p.vk, supported) |
|
194 |
|
|
|
195 |
|
#define vk_get_phydev_surf_texel_mem_blk_confs(info, ...) \ |
|
196 |
|
r = dl_vk_get_phydev_surf_texel_mem_blk_confs(npv_vk_surf_p.dev.phydev.vk, info, ##__VA_ARGS__) |
|
197 |
|
|
|
198 |
|
#define vk_get_phydev_mem_props dl_vk_get_phydev_mem_props |
|
199 |
|
|
|
200 |
|
#define vk_get_phydev_surf_caps(info, caps) \ |
|
201 |
|
r = dl_vk_get_phydev_surf_caps(npv_vk_surf_p.dev.phydev.vk, info, caps) |
|
202 |
|
|
|
203 |
|
#define vk_get_phydev_surf_present_modes() \ |
|
204 |
|
r = dl_vk_get_phydev_surf_present_modes(npv_vk_surf_p.dev.phydev.vk, npv_vk_surf_p.vk, &npv_vk_tmp_present_modes_n_l, npv_vk_tmp_present_modes_l) |
|
205 |
|
#endif |
File npv/vk/local/code.frag.c added (mode: 100644) (index 0000000..b08fe08) |
|
1 |
|
#define INSTANCE_STATIC_SYM(x,y) \ |
|
2 |
|
dl_##y = vk_get_instance_proc_addr(0, #x); \ |
|
3 |
|
if (dl_##y == 0) \ |
|
4 |
|
FATALVK("unable to find vulkan " #x "\n"); |
|
5 |
|
static void instance_static_syms(void) |
|
6 |
|
{ |
|
7 |
|
INSTANCE_STATIC_SYM(vkEnumerateInstanceVersion, |
|
8 |
|
vk_enumerate_instance_version); |
|
9 |
|
INSTANCE_STATIC_SYM(vkEnumerateInstanceExtensionProperties, |
|
10 |
|
vk_enumerate_instance_ext_props); |
|
11 |
|
INSTANCE_STATIC_SYM(vkEnumerateInstanceLayerProperties, |
|
12 |
|
vk_enumerate_instance_layer_props); |
|
13 |
|
INSTANCE_STATIC_SYM(vkCreateInstance, vk_create_instance); |
|
14 |
|
} |
|
15 |
|
#undef INSTANCE_STATIC_SYM |
|
16 |
|
/*----------------------------------------------------------------------------*/ |
|
17 |
|
#define INSTANCE_SYM(x,y) \ |
|
18 |
|
dl_##y = vk_get_instance_proc_addr(instance_l, #x); \ |
|
19 |
|
if (dl_##y == 0) \ |
|
20 |
|
FATALVK("unable to find vulkan " #x "\n"); |
|
21 |
|
static void instance_syms(void) |
|
22 |
|
{ |
|
23 |
|
INSTANCE_SYM(vkEnumeratePhysicalDevices, vk_enumerate_phydevs); |
|
24 |
|
INSTANCE_SYM(vkEnumerateDeviceExtensionProperties, |
|
25 |
|
vk_enumerate_dev_ext_props); |
|
26 |
|
INSTANCE_SYM(vkGetPhysicalDeviceProperties2, vk_get_phydev_props); |
|
27 |
|
INSTANCE_SYM(vkGetPhysicalDeviceQueueFamilyProperties2, |
|
28 |
|
vk_get_phydev_q_fam_props); |
|
29 |
|
INSTANCE_SYM(vkCreateDevice, vk_create_dev); |
|
30 |
|
/* wsi related -------------------------------------------------------*/ |
|
31 |
|
INSTANCE_SYM(vkGetPhysicalDeviceSurfaceSupportKHR, |
|
32 |
|
vk_get_phydev_surf_support); |
|
33 |
|
INSTANCE_SYM(vkGetPhysicalDeviceSurfaceFormats2KHR, |
|
34 |
|
vk_get_phydev_surf_texel_mem_blk_confs); |
|
35 |
|
INSTANCE_SYM(vkCreateXcbSurfaceKHR, vk_create_xcb_surf); |
|
36 |
|
INSTANCE_SYM(vkGetPhysicalDeviceMemoryProperties2, |
|
37 |
|
vk_get_phydev_mem_props); |
|
38 |
|
INSTANCE_SYM(vkGetPhysicalDeviceSurfaceCapabilities2KHR, |
|
39 |
|
vk_get_phydev_surf_caps); |
|
40 |
|
INSTANCE_SYM(vkGetPhysicalDeviceSurfacePresentModesKHR, |
|
41 |
|
vk_get_phydev_surf_present_modes); |
|
42 |
|
/*--------------------------------------------------------------------*/ |
|
43 |
|
} |
|
44 |
|
#undef INSTANCE_SYM |
|
45 |
|
/*----------------------------------------------------------------------------*/ |
|
46 |
|
#define DEV_SYM(x,y) \ |
|
47 |
|
surf_p.dev.dl_##y = vk_get_dev_proc_addr(surf_p.dev.vk, #x); \ |
|
48 |
|
if (surf_p.dev.dl_##y == 0) \ |
|
49 |
|
FATALVK("unable to find vulkan device " #x "\n"); |
|
50 |
|
static void dev_syms(void) |
|
51 |
|
{ |
|
52 |
|
DEV_SYM(vkGetDeviceQueue, vk_get_dev_q); |
|
53 |
|
DEV_SYM(vkCreateCommandPool, vk_create_cp); |
|
54 |
|
DEV_SYM(vkCreateSwapchainKHR, vk_create_swpchn); |
|
55 |
|
DEV_SYM(vkGetSwapchainImagesKHR, vk_get_swpchn_imgs); |
|
56 |
|
DEV_SYM(vkCreateImage, vk_create_img); |
|
57 |
|
DEV_SYM(vkDestroyImage, vk_destroy_img); |
|
58 |
|
DEV_SYM(vkGetImageMemoryRequirements2KHR, vk_get_img_mem_rqmts); |
|
59 |
|
DEV_SYM(vkAllocateMemory, vk_alloc_mem); |
|
60 |
|
DEV_SYM(vkFreeMemory, vk_free_mem); |
|
61 |
|
DEV_SYM(vkBindImageMemory2KHR, vk_bind_img_mem); |
|
62 |
|
DEV_SYM(vkMapMemory, vk_map_mem); |
|
63 |
|
DEV_SYM(vkUnmapMemory, vk_unmap_mem); |
|
64 |
|
DEV_SYM(vkAllocateCommandBuffers, vk_alloc_cbs); |
|
65 |
|
DEV_SYM(vkBeginCommandBuffer, vk_begin_cb); |
|
66 |
|
DEV_SYM(vkEndCommandBuffer, vk_end_cb); |
|
67 |
|
DEV_SYM(vkCmdPipelineBarrier, vk_cmd_pl_barrier); |
|
68 |
|
DEV_SYM(vkQueueSubmit, vk_q_submit); |
|
69 |
|
DEV_SYM(vkQueueWaitIdle, vk_q_wait_idle); |
|
70 |
|
DEV_SYM(vkGetImageSubresourceLayout, vk_get_img_subrsrc_layout); |
|
71 |
|
DEV_SYM(vkAcquireNextImage2KHR, vk_acquire_next_img); |
|
72 |
|
DEV_SYM(vkResetCommandBuffer, vk_reset_cb); |
|
73 |
|
DEV_SYM(vkCmdBlitImage, vk_cmd_blit_img); |
|
74 |
|
DEV_SYM(vkQueuePresentKHR, vk_q_present); |
|
75 |
|
DEV_SYM(vkCreateSemaphore, vk_create_sem); |
|
76 |
|
} |
|
77 |
|
#undef DEVICE_SYM |
|
78 |
|
/*----------------------------------------------------------------------------*/ |
|
79 |
|
#define DLSYM(x, y) \ |
|
80 |
|
dl_##y = dlsym(loader_l, #x); \ |
|
81 |
|
if (dl_##y == 0) \ |
|
82 |
|
FATALVK("%s:unable to find " #x "\n", dlerror()); |
|
83 |
|
static void loader_syms(void) |
|
84 |
|
{ |
|
85 |
|
DLSYM(vkGetInstanceProcAddr, vk_get_instance_proc_addr); |
|
86 |
|
DLSYM(vkGetDeviceProcAddr, vk_get_dev_proc_addr); |
|
87 |
|
} |
|
88 |
|
#undef DLSYM |
|
89 |
|
/*----------------------------------------------------------------------------*/ |
|
90 |
|
/*NSPC*/ |
|
91 |
|
static void load_vk_loader(void) |
|
92 |
|
{ |
|
93 |
|
/* no '/' in the shared dynamic lib path name, then standard lookup */ |
|
94 |
|
loader_l = dlopen("libvulkan.so.1", RTLD_LAZY); |
|
95 |
|
if (loader_l == 0) |
|
96 |
|
FATALVK("%s:unable to load the vulkan loader dynamic shared library\n", dlerror()); |
|
97 |
|
} |
|
98 |
|
/*NSPC*/ |
|
99 |
|
static void check_vk_version(void) |
|
100 |
|
{ |
|
101 |
|
u32 api_version; |
|
102 |
|
s32 r; |
|
103 |
|
|
|
104 |
|
vk_enumerate_instance_version(&api_version); |
|
105 |
|
if (r != vk_success) |
|
106 |
|
FATALVK("%d:unable to enumerate instance version\n", r); |
|
107 |
|
POUTVK("vulkan instance version %#x = %u.%u.%u\n", api_version, VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), VK_VERSION_PATCH(api_version)); |
|
108 |
|
if (VK_VERSION_MAJOR(api_version) == 1 |
|
109 |
|
&& VK_VERSION_MINOR(api_version) == 0) |
|
110 |
|
FATALVK("instance version too old\n"); |
|
111 |
|
} |
|
112 |
|
#define EXTS_N_MAX 256 |
|
113 |
|
/*NSPC*/ |
|
114 |
|
/* in theory, this could change on the fly */ |
|
115 |
|
static void instance_exts_dump(void) |
|
116 |
|
{ |
|
117 |
|
struct vk_ext_props_t exts[EXTS_N_MAX]; |
|
118 |
|
u32 n; |
|
119 |
|
s32 r; |
|
120 |
|
|
|
121 |
|
memset(exts, 0, sizeof(exts)); |
|
122 |
|
n = EXTS_N_MAX; |
|
123 |
|
vk_enumerate_instance_ext_props(&n, exts); |
|
124 |
|
if (r != vk_success && r != vk_incomplete) { |
|
125 |
|
WARNINGVK("%d:unable to enumerate instance extension(s)\n", r); |
|
126 |
|
return; |
|
127 |
|
} |
|
128 |
|
if (r == vk_incomplete) { |
|
129 |
|
WARNINGVK("too many extensions (%u/%u), dumping disabled", n, EXTS_N_MAX); |
|
130 |
|
return; |
|
131 |
|
} |
|
132 |
|
/* vk_success */ |
|
133 |
|
POUTVK("have %u instance extension(s)\n", n); |
|
134 |
|
loop { |
|
135 |
|
if (n == 0) |
|
136 |
|
break; |
|
137 |
|
POUTVK("instance extension:name=%s:specification version=%u\n", exts[n - 1].name, exts[n - 1].spec_version); |
|
138 |
|
n--; |
|
139 |
|
} |
|
140 |
|
} |
|
141 |
|
#undef EXTS_N_MAX |
|
142 |
|
#define LAYERS_N_MAX 32 |
|
143 |
|
/*NSPC*/ |
|
144 |
|
/* in theory, this could change on the fly */ |
|
145 |
|
static void instance_layers_dump(void) |
|
146 |
|
{ |
|
147 |
|
struct vk_layer_props_t layers[LAYERS_N_MAX]; |
|
148 |
|
u32 n; |
|
149 |
|
s32 r; |
|
150 |
|
|
|
151 |
|
memset(layers, 0, sizeof(layers)); |
|
152 |
|
n = LAYERS_N_MAX; |
|
153 |
|
vk_enumerate_instance_layer_props(&n, layers); |
|
154 |
|
if (r != vk_success && r != vk_incomplete) { |
|
155 |
|
WARNINGVK("%d:unable to enumerate instance layer(s)\n", r); |
|
156 |
|
return; |
|
157 |
|
} |
|
158 |
|
if (r == vk_incomplete) { |
|
159 |
|
WARNINGVK("too many layers (%u/%u), dumping disabled", n, LAYERS_N_MAX); |
|
160 |
|
return; |
|
161 |
|
} |
|
162 |
|
/* vk_success */ |
|
163 |
|
POUTVK("have %u instance layer(s)\n", n); |
|
164 |
|
loop { |
|
165 |
|
if (n == 0) |
|
166 |
|
break; |
|
167 |
|
POUTVK("instance layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n, layers[n].name, layers[n].spec_version, layers[n].implementation_version, layers[n].desc); |
|
168 |
|
n--; |
|
169 |
|
} |
|
170 |
|
} |
|
171 |
|
#undef LAYERS_N_MAX |
|
172 |
|
/*NSPC*/ |
|
173 |
|
static void instance_create(void) |
|
174 |
|
{ |
|
175 |
|
s32 r; |
|
176 |
|
struct vk_instance_create_info_t info; |
|
177 |
|
static u8 *exts[] = { |
|
178 |
|
/* |
|
179 |
|
* TODO: there is a shabby (coze mess of pixel fmts), |
|
180 |
|
* "expensive", promoted YUV extension |
|
181 |
|
*/ |
|
182 |
|
/* |
|
183 |
|
* XXX: not 1.1 promoted, should not use it, but it is fixing |
|
184 |
|
* some non-consistency from 1.0 |
|
185 |
|
*/ |
|
186 |
|
"VK_KHR_get_surface_capabilities2", |
|
187 |
|
/* 1.1 promoted */ |
|
188 |
|
"VK_KHR_get_physical_device_properties2", |
|
189 |
|
"VK_KHR_xcb_surface", |
|
190 |
|
"VK_KHR_surface"}; |
|
191 |
|
u32 i; |
|
192 |
|
|
|
193 |
|
i = 0; |
|
194 |
|
loop { |
|
195 |
|
if (i == ARRAY_N(exts)) |
|
196 |
|
break; |
|
197 |
|
POUTVK("will use instance extension %s\n", exts[i]); |
|
198 |
|
++i; |
|
199 |
|
} |
|
200 |
|
memset(&info, 0, sizeof(info)); |
|
201 |
|
info.type = vk_struct_type_instance_create_info; |
|
202 |
|
info.enabled_exts_n = ARRAY_N(exts); |
|
203 |
|
info.enabled_ext_names = exts; |
|
204 |
|
vk_create_instance(&info); |
|
205 |
|
IF_FATALVK("%d:unable to create an instance\n", r); |
|
206 |
|
POUTVK("instance handle %p\n", instance_l); |
|
207 |
|
} |
|
208 |
|
/*NSPC*/ |
|
209 |
|
static void tmp_phydevs_get(void) |
|
210 |
|
{ |
|
211 |
|
struct vk_phydev_t *phydevs[tmp_phydevs_n_max]; |
|
212 |
|
u32 n; |
|
213 |
|
s32 r; |
|
214 |
|
|
|
215 |
|
memset(phydevs, 0, sizeof(phydevs)); |
|
216 |
|
n = tmp_phydevs_n_max; |
|
217 |
|
vk_enumerate_phydevs(&n, phydevs); |
|
218 |
|
if (r != vk_success && r != vk_incomplete) |
|
219 |
|
FATALVK("%ld:unable to enumerate physical devices\n", r); |
|
220 |
|
if (r == vk_incomplete) |
|
221 |
|
FATALVK("too many vulkan physical devices %u/%u for our temporary storage\n", n, tmp_phydevs_n_max); |
|
222 |
|
/* vk_success */ |
|
223 |
|
POUTVK("detected %u physical devices\n", n); |
|
224 |
|
if (n == 0) |
|
225 |
|
FATALVK("no vulkan physical devices, exiting\n"); |
|
226 |
|
tmp_phydevs_n_l = n; |
|
227 |
|
memset(tmp_phydevs_l, 0, sizeof(tmp_phydevs_l)); |
|
228 |
|
n = 0; |
|
229 |
|
loop { |
|
230 |
|
if (n == tmp_phydevs_n_l) |
|
231 |
|
break; |
|
232 |
|
tmp_phydevs_l[n].vk = phydevs[n]; |
|
233 |
|
++n; |
|
234 |
|
}; |
|
235 |
|
} |
|
236 |
|
#define EXTS_N_MAX 512 |
|
237 |
|
/*NSPC*/ |
|
238 |
|
static void phydev_exts_dump(void *phydev) |
|
239 |
|
{ |
|
240 |
|
struct vk_ext_props_t exts[EXTS_N_MAX]; |
|
241 |
|
u32 n; |
|
242 |
|
s32 r; |
|
243 |
|
|
|
244 |
|
memset(exts, 0, sizeof(exts)); |
|
245 |
|
n = EXTS_N_MAX; |
|
246 |
|
vk_enumerate_dev_ext_props(phydev, &n, exts); |
|
247 |
|
if (r != vk_success && r != vk_incomplete) { |
|
248 |
|
WARNINGVK("physical device:%p:%d:unable to enumerate device extension(s)\n", phydev, r); |
|
249 |
|
return; |
|
250 |
|
} |
|
251 |
|
if (r == vk_incomplete) { |
|
252 |
|
WARNINGVK("physical device:%p:too many extensions (%u/%u), dumping disabled", phydev, n, EXTS_N_MAX); |
|
253 |
|
return; |
|
254 |
|
} |
|
255 |
|
/* vk_success */ |
|
256 |
|
POUTVK("physical device:%p:have %u device extension(s)\n", phydev, n); |
|
257 |
|
loop { |
|
258 |
|
if (n == 0) |
|
259 |
|
break; |
|
260 |
|
POUTVK("physical device:%p:device extension:name=%s:specification version=%u\n", phydev, exts[n - 1].name, exts[n - 1].spec_version); |
|
261 |
|
n--; |
|
262 |
|
} |
|
263 |
|
} |
|
264 |
|
#undef EXTS_N_MAX |
|
265 |
|
/*NSPC*/ |
|
266 |
|
static void tmp_phydevs_exts_dump(void) |
|
267 |
|
{ |
|
268 |
|
u8 i; |
|
269 |
|
|
|
270 |
|
i = 0; |
|
271 |
|
loop { |
|
272 |
|
if (i == tmp_phydevs_n_l) |
|
273 |
|
break; |
|
274 |
|
phydev_exts_dump(tmp_phydevs_l[i].vk); |
|
275 |
|
++i; |
|
276 |
|
} |
|
277 |
|
} |
|
278 |
|
/*NSPC*/ |
|
279 |
|
static u8 *dev_type_str(u32 type) |
|
280 |
|
{ |
|
281 |
|
switch (type) { |
|
282 |
|
case vk_phydev_type_other: |
|
283 |
|
return "other"; |
|
284 |
|
case vk_phydev_type_integrated_gpu: |
|
285 |
|
return "integrated gpu"; |
|
286 |
|
case vk_phydev_type_discrete_gpu: |
|
287 |
|
return "discrete gpu"; |
|
288 |
|
case vk_phydev_type_virtual_gpu: |
|
289 |
|
return "virtual gpu"; |
|
290 |
|
case vk_phydev_type_cpu: |
|
291 |
|
return "cpu"; |
|
292 |
|
default: |
|
293 |
|
return "UNKNOWN"; |
|
294 |
|
} |
|
295 |
|
} |
|
296 |
|
/*NSPC*/ |
|
297 |
|
static u8 *uuid_str(u8 *uuid) |
|
298 |
|
{ |
|
299 |
|
static u8 uuid_str[VK_UUID_SZ * 2 + 1]; |
|
300 |
|
u8 i; |
|
301 |
|
|
|
302 |
|
memset(uuid_str, 0, sizeof(uuid_str)); |
|
303 |
|
i = 0; |
|
304 |
|
loop { |
|
305 |
|
if (i == VK_UUID_SZ) |
|
306 |
|
break; |
|
307 |
|
/* XXX: always write a terminating 0, truncated or not */ |
|
308 |
|
snprintf(uuid_str + i * 2, 3, "%02x", uuid[i]); |
|
309 |
|
++i; |
|
310 |
|
} |
|
311 |
|
return uuid_str; |
|
312 |
|
} |
|
313 |
|
/*NSPC*/ |
|
314 |
|
static void tmp_phydevs_props_dump(void) |
|
315 |
|
{ |
|
316 |
|
u32 i; |
|
317 |
|
|
|
318 |
|
i = 0; |
|
319 |
|
loop { |
|
320 |
|
struct vk_phydev_props_t props; |
|
321 |
|
struct tmp_phydev_t *p; |
|
322 |
|
|
|
323 |
|
if (i == tmp_phydevs_n_l) |
|
324 |
|
break; |
|
325 |
|
p = &tmp_phydevs_l[i]; |
|
326 |
|
memset(&props, 0, sizeof(props)); |
|
327 |
|
props.type = vk_struct_type_phydev_props; |
|
328 |
|
vk_get_phydev_props(p->vk, &props); |
|
329 |
|
POUTVK("physical device:%p:properties:api version=%#x=%u.%u.%u\n", p->vk, props.core.api_version, VK_VERSION_MAJOR(props.core.api_version), VK_VERSION_MINOR(props.core.api_version), VK_VERSION_PATCH(props.core.api_version)); |
|
330 |
|
POUTVK("physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p->vk, props.core.driver_version, VK_VERSION_MAJOR(props.core.driver_version), VK_VERSION_MINOR(props.core.driver_version), VK_VERSION_PATCH(props.core.driver_version)); |
|
331 |
|
POUTVK("physical device:%p:properties:vendor id=%#x\n", p->vk, props.core.vendor_id); |
|
332 |
|
POUTVK("physical device:%p:properties:device id=%#x\n", p->vk, props.core.dev_id); |
|
333 |
|
POUTVK("physical device:%p:properties:type=%s\n", p->vk, dev_type_str(props.core.dev_type)); |
|
334 |
|
if (props.core.dev_type == vk_phydev_type_discrete_gpu) |
|
335 |
|
p->is_discret_gpu = true; |
|
336 |
|
else |
|
337 |
|
p->is_discret_gpu = false; |
|
338 |
|
POUTVK("physical device:%p:properties:name=%s\n", p->vk, props.core.name); |
|
339 |
|
POUTVK("physical device:%p:properties:pipeline cache uuid=%s\n", p->vk, uuid_str(props.core.pl_cache_uuid)); |
|
340 |
|
/* disp the limits and sparse props at "higher log lvl", if needed in the end */ |
|
341 |
|
++i; |
|
342 |
|
} |
|
343 |
|
} |
|
344 |
|
/*NSPC*/ |
|
345 |
|
static void tmp_phydev_mem_props_get(struct tmp_phydev_t *p) |
|
346 |
|
{ |
|
347 |
|
memset(&p->mem_props, 0, sizeof(p->mem_props)); |
|
348 |
|
p->mem_props.type = vk_struct_type_phydev_mem_props; |
|
349 |
|
vk_get_phydev_mem_props(p->vk, &p->mem_props); |
|
350 |
|
} |
|
351 |
|
/*NSPC*/ |
|
352 |
|
static void tmp_phydevs_mem_props_get(void) |
|
353 |
|
{ |
|
354 |
|
u8 i; |
|
355 |
|
|
|
356 |
|
i = 0; |
|
357 |
|
loop { |
|
358 |
|
if (i == tmp_phydevs_n_l) |
|
359 |
|
break; |
|
360 |
|
tmp_phydev_mem_props_get(&tmp_phydevs_l[i]); |
|
361 |
|
++i; |
|
362 |
|
} |
|
363 |
|
} |
|
364 |
|
/*NSPC*/ |
|
365 |
|
static void phydev_mem_type_dump(void *phydev, u8 i, |
|
366 |
|
struct vk_mem_type_t *type) |
|
367 |
|
{ |
|
368 |
|
POUTVK("physical device:%p:memory type:%u:heap:%u\n", phydev, i, type->heap); |
|
369 |
|
POUTVK("physical device:%p:memory type:%u:flags:%#08x\n", phydev, i, type->prop_flags); |
|
370 |
|
if ((type->prop_flags & vk_mem_prop_dev_local_bit) != 0) |
|
371 |
|
POUTVK("physical device:%p:memory type:%u:device local\n", phydev, i); |
|
372 |
|
if ((type->prop_flags & vk_mem_prop_host_visible_bit) != 0) |
|
373 |
|
POUTVK("physical device:%p:memory type:%u:host visible\n", phydev, i); |
|
374 |
|
if ((type->prop_flags & vk_mem_prop_host_cached_bit) != 0) |
|
375 |
|
POUTVK("physical device:%p:memory type:%u:host cached\n", phydev, i); |
|
376 |
|
} |
|
377 |
|
/*NSPC*/ |
|
378 |
|
static void tmp_phydev_mem_types_dump(struct tmp_phydev_t *p) |
|
379 |
|
{ |
|
380 |
|
u8 i; |
|
381 |
|
|
|
382 |
|
POUTVK("physical device:%p:%u memory types\n", p->vk, p->mem_props.core.mem_types_n); |
|
383 |
|
i = 0; |
|
384 |
|
loop { |
|
385 |
|
if (i == p->mem_props.core.mem_types_n) |
|
386 |
|
break; |
|
387 |
|
phydev_mem_type_dump(p->vk, i, |
|
388 |
|
&p->mem_props.core.mem_types[i]); |
|
389 |
|
++i; |
|
390 |
|
} |
|
391 |
|
} |
|
392 |
|
/*NSPC*/ |
|
393 |
|
static void phydev_mem_heap_dump(void *phydev, u8 i, |
|
394 |
|
struct vk_mem_heap_t *heap) |
|
395 |
|
{ |
|
396 |
|
POUTVK("physical device:%p:memory heap:%u:size:%u bytes\n", phydev, i, heap->sz); |
|
397 |
|
POUTVK("physical device:%p:memory heap:%u:flags:%#08x\n", phydev, i, heap->flags); |
|
398 |
|
if ((heap->flags & vk_mem_heap_dev_local_bit) != 0) |
|
399 |
|
POUTVK("physical device:%p:memory heap:%u:device local\n", phydev, i); |
|
400 |
|
if ((heap->flags & vk_mem_heap_multi_instance_bit) != 0) |
|
401 |
|
POUTVK("physical device:%p:memory type:%u:multi instance\n", phydev, i); |
|
402 |
|
} |
|
403 |
|
/*NSPC*/ |
|
404 |
|
static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t *p) |
|
405 |
|
{ |
|
406 |
|
u8 i; |
|
407 |
|
|
|
408 |
|
POUTVK("physical device:%p:%u memory heaps\n", p->vk, p->mem_props.core.mem_heaps_n); |
|
409 |
|
i = 0; |
|
410 |
|
loop { |
|
411 |
|
if (i == p->mem_props.core.mem_heaps_n) |
|
412 |
|
break; |
|
413 |
|
phydev_mem_heap_dump(p->vk, i, |
|
414 |
|
&p->mem_props.core.mem_heaps[i]); |
|
415 |
|
++i; |
|
416 |
|
} |
|
417 |
|
|
|
418 |
|
} |
|
419 |
|
/*NSPC*/ |
|
420 |
|
static void tmp_phydev_mem_props_dump(struct tmp_phydev_t *p) |
|
421 |
|
{ |
|
422 |
|
tmp_phydev_mem_types_dump(p); |
|
423 |
|
tmp_phydev_mem_heaps_dump(p); |
|
424 |
|
} |
|
425 |
|
/*NSPC*/ |
|
426 |
|
static void tmp_phydevs_mem_props_dump(void) |
|
427 |
|
{ |
|
428 |
|
u8 i; |
|
429 |
|
|
|
430 |
|
i = 0; |
|
431 |
|
loop { |
|
432 |
|
if (i == tmp_phydevs_n_l) |
|
433 |
|
break; |
|
434 |
|
tmp_phydev_mem_props_dump(&tmp_phydevs_l[i]); |
|
435 |
|
++i; |
|
436 |
|
} |
|
437 |
|
} |
|
438 |
|
/*NSPC*/ |
|
439 |
|
static void tmp_phydev_q_fams_get(struct tmp_phydev_t *p) |
|
440 |
|
{ |
|
441 |
|
u8 i; |
|
442 |
|
u32 n; |
|
443 |
|
|
|
444 |
|
n = 0; |
|
445 |
|
vk_get_phydev_q_fam_props(p->vk, &n, 0); |
|
446 |
|
if (n > tmp_phydev_q_fams_n_max) |
|
447 |
|
FATALVK("physical device:%p:too many queue families %u/%u\n", p->vk, n, tmp_phydev_q_fams_n_max); |
|
448 |
|
memset(p->q_fams, 0, sizeof(p->q_fams)); |
|
449 |
|
i = 0; |
|
450 |
|
loop { |
|
451 |
|
if (i == tmp_phydev_q_fams_n_max) |
|
452 |
|
break; |
|
453 |
|
p->q_fams[i].type = vk_struct_type_q_fam_props; |
|
454 |
|
++i; |
|
455 |
|
} |
|
456 |
|
vk_get_phydev_q_fam_props(p->vk, &n, p->q_fams); |
|
457 |
|
p->q_fams_n = n; |
|
458 |
|
POUTVK("physical device:%p:have %u queue families\n", p->vk, p->q_fams_n); |
|
459 |
|
} |
|
460 |
|
/*NSPC*/ |
|
461 |
|
static void tmp_phydevs_q_fams_get(void) |
|
462 |
|
{ |
|
463 |
|
u8 i; |
|
464 |
|
|
|
465 |
|
i = 0; |
|
466 |
|
loop { |
|
467 |
|
if (i == tmp_phydevs_n_l) |
|
468 |
|
break; |
|
469 |
|
tmp_phydev_q_fams_get(&tmp_phydevs_l[i]); |
|
470 |
|
++i; |
|
471 |
|
} |
|
472 |
|
} |
|
473 |
|
/*NSPC*/ |
|
474 |
|
static void tmp_phydev_q_fams_dump(struct tmp_phydev_t *p) |
|
475 |
|
{ |
|
476 |
|
u8 i; |
|
477 |
|
|
|
478 |
|
i = 0; |
|
479 |
|
loop { |
|
480 |
|
if (i == p->q_fams_n) |
|
481 |
|
break; |
|
482 |
|
if ((p->q_fams[i].core.flags & vk_q_gfx_bit) != 0) |
|
483 |
|
POUTVK("physical device:%p:queue family:%u:flags:graphics\n", p->vk, i); |
|
484 |
|
if ((p->q_fams[i].core.flags & vk_q_compute_bit) != 0) |
|
485 |
|
POUTVK("physical device:%p:queue family:%u:flags:compute\n", p->vk, i); |
|
486 |
|
if ((p->q_fams[i].core.flags & vk_q_transfer_bit) != 0) |
|
487 |
|
POUTVK("physical device:%p:queue family:%u:flags:transfer\n", p->vk, i); |
|
488 |
|
if ((p->q_fams[i].core.flags & vk_q_sparse_binding_bit) != 0) |
|
489 |
|
POUTVK("physical device:%p:queue family:%u:flags:sparse binding\n", p->vk, i); |
|
490 |
|
if ((p->q_fams[i].core.flags & vk_q_protected_bit) != 0) |
|
491 |
|
POUTVK("physical device:%p:queue family:%u:flags:protected\n", p->vk, i); |
|
492 |
|
POUTVK("physical device:%p:queue family:%u:%u queues\n", p->vk, i, p->q_fams[i].core.qs_n); |
|
493 |
|
POUTVK("physical device:%p:queue family:%u:%u bits timestamps\n", p->vk, i, p->q_fams[i].core.timestamp_valid_bits); |
|
494 |
|
POUTVK("physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p->vk, i, p->q_fams[i].core.min_img_transfer_granularity.width, p->q_fams[i].core.min_img_transfer_granularity.height, p->q_fams[i].core.min_img_transfer_granularity.depth); |
|
495 |
|
++i; |
|
496 |
|
} |
|
497 |
|
} |
|
498 |
|
/*NSPC*/ |
|
499 |
|
static void tmp_phydevs_q_fams_dump(void) |
|
500 |
|
{ |
|
501 |
|
u8 i; |
|
502 |
|
|
|
503 |
|
i = 0; |
|
504 |
|
loop { |
|
505 |
|
if (i == tmp_phydevs_n_l) |
|
506 |
|
break; |
|
507 |
|
tmp_phydev_q_fams_dump(&tmp_phydevs_l[i]); |
|
508 |
|
++i; |
|
509 |
|
} |
|
510 |
|
} |
|
511 |
|
/* |
|
512 |
|
* the major obj to use in vk abstraction of gfx hardware is the q. In this |
|
513 |
|
* abstraction, many core objs like bufs/imgs are "own" by a specific q, and |
|
514 |
|
* transfer of such ownership to other qs can be expensive. we know it's not |
|
515 |
|
* really the case on AMD hardware, but if vk abstraction insists on this, it |
|
516 |
|
* probably means it is important on some hardware of other vendors. |
|
517 |
|
*/ |
|
518 |
|
/*NSPC*/ |
|
519 |
|
static void tmp_phydevs_q_fams_surf_support_get(void) |
|
520 |
|
{ |
|
521 |
|
u8 i; |
|
522 |
|
|
|
523 |
|
i = 0; |
|
524 |
|
loop { |
|
525 |
|
struct tmp_phydev_t *p; |
|
526 |
|
u8 j; |
|
527 |
|
|
|
528 |
|
if (i == tmp_phydevs_n_l) |
|
529 |
|
break; |
|
530 |
|
p = &tmp_phydevs_l[i]; |
|
531 |
|
j = 0; |
|
532 |
|
loop { |
|
533 |
|
s32 r; |
|
534 |
|
u32 supported; |
|
535 |
|
|
|
536 |
|
if (j == p->q_fams_n) |
|
537 |
|
break; |
|
538 |
|
supported = vk_false; |
|
539 |
|
vk_get_phydev_surf_support(p->vk, j, &supported); |
|
540 |
|
IF_FATALVK("%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r, p->vk, j, surf_p.vk); |
|
541 |
|
if (supported == vk_true) { |
|
542 |
|
POUTVK("physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p->vk, j, surf_p.vk); |
|
543 |
|
p->q_fams_surf_support[j] = true; |
|
544 |
|
} else { |
|
545 |
|
POUTVK("physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p->vk, j, surf_p.vk); |
|
546 |
|
p->q_fams_surf_support[j] = false; |
|
547 |
|
} |
|
548 |
|
++j; |
|
549 |
|
} |
|
550 |
|
++i; |
|
551 |
|
} |
|
552 |
|
} |
|
553 |
|
/*NSPC*/ |
|
554 |
|
static void tmp_selected_phydev_cherry_pick(u8 i) |
|
555 |
|
{ |
|
556 |
|
struct tmp_phydev_t *p; |
|
557 |
|
|
|
558 |
|
p = &tmp_phydevs_l[i]; |
|
559 |
|
surf_p.dev.phydev.vk = p->vk; |
|
560 |
|
surf_p.dev.phydev.is_discret_gpu = p->is_discret_gpu; |
|
561 |
|
surf_p.dev.phydev.mem_types_n = p->mem_props.core.mem_types_n; |
|
562 |
|
memcpy(surf_p.dev.phydev.mem_types, p->mem_props.core.mem_types, |
|
563 |
|
sizeof(surf_p.dev.phydev.mem_types)); |
|
564 |
|
} |
|
565 |
|
/* |
|
566 |
|
* we ask qs of phydevs which one is able to present imgs to the |
|
567 |
|
* external pe surf. Additionally we require this q to support gfx. we |
|
568 |
|
* select basically the first q from the first phydev fitting what we are |
|
569 |
|
* looking for. |
|
570 |
|
*/ |
|
571 |
|
/*NSPC*/ |
|
572 |
|
static void tmp_phydev_and_q_fam_select(void) |
|
573 |
|
{ |
|
574 |
|
u8 i; |
|
575 |
|
|
|
576 |
|
i = 0; |
|
577 |
|
loop { |
|
578 |
|
u8 j; |
|
579 |
|
struct tmp_phydev_t *p; |
|
580 |
|
|
|
581 |
|
if (i == tmp_phydevs_n_l) |
|
582 |
|
break; |
|
583 |
|
p = &tmp_phydevs_l[i]; |
|
584 |
|
j = 0; |
|
585 |
|
loop { |
|
586 |
|
if (j == p->q_fams_n) |
|
587 |
|
break; |
|
588 |
|
/* |
|
589 |
|
* we are looking for a q fam with: |
|
590 |
|
* - img presentation to our surf |
|
591 |
|
* - gfx |
|
592 |
|
* - transfer (implicit with gfx) |
|
593 |
|
*/ |
|
594 |
|
if (p->q_fams_surf_support[j] |
|
595 |
|
&& (p->q_fams[j].core.flags & vk_q_gfx_bit) |
|
596 |
|
!= 0) { |
|
597 |
|
surf_p.dev.phydev.q_fam = j; |
|
598 |
|
tmp_selected_phydev_cherry_pick(i); |
|
599 |
|
POUTVK("physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.q_fam); |
|
600 |
|
return; |
|
601 |
|
} |
|
602 |
|
++j; |
|
603 |
|
} |
|
604 |
|
++i; |
|
605 |
|
} |
|
606 |
|
} |
|
607 |
|
/*NSPC*/ |
|
608 |
|
static void texel_mem_blk_confs_dump(u32 confs_n, |
|
609 |
|
struct vk_surf_texel_mem_blk_conf_t *confs) |
|
610 |
|
{ |
|
611 |
|
u32 i; |
|
612 |
|
|
|
613 |
|
i = 0; |
|
614 |
|
loop { |
|
615 |
|
if (i == confs_n) |
|
616 |
|
break; |
|
617 |
|
POUTVK("physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_p.dev.phydev.vk, surf_p.vk, confs[i].core.fmt, confs[i].core.color_space); |
|
618 |
|
++i; |
|
619 |
|
} |
|
620 |
|
} |
|
621 |
|
/* |
|
622 |
|
* we only know this phydev/q is "able to present imgs" to the external |
|
623 |
|
* pe surf. Here we choose the cfg of textel blk |
|
624 |
|
*/ |
|
625 |
|
#define CONFS_N_MAX 1024 |
|
626 |
|
/*NSPC*/ |
|
627 |
|
static void phydev_surf_texel_mem_blk_conf_select(void) |
|
628 |
|
{ |
|
629 |
|
struct vk_phydev_surf_info_t info; |
|
630 |
|
struct vk_surf_texel_mem_blk_conf_t confs[CONFS_N_MAX]; |
|
631 |
|
struct vk_surf_texel_mem_blk_conf_core_t *cc; |
|
632 |
|
s32 r; |
|
633 |
|
u32 confs_n; |
|
634 |
|
u32 i; |
|
635 |
|
|
|
636 |
|
memset(&info, 0, sizeof(info)); |
|
637 |
|
info.type = vk_struct_type_phydev_surf_info; |
|
638 |
|
info.surf = surf_p.vk; |
|
639 |
|
vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, 0); |
|
640 |
|
IF_FATALVK("%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r, surf_p.dev.phydev.vk, surf_p.vk); |
|
641 |
|
if (confs_n > CONFS_N_MAX) |
|
642 |
|
FATALVK("physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_p.dev.phydev.vk, surf_p.vk, confs_n, CONFS_N_MAX); |
|
643 |
|
|
|
644 |
|
memset(confs, 0, sizeof(confs[0]) * confs_n); |
|
645 |
|
i = 0; |
|
646 |
|
loop { |
|
647 |
|
if (i == confs_n) |
|
648 |
|
break; |
|
649 |
|
confs[i].type = vk_struct_type_surf_texel_mem_blk_conf; |
|
650 |
|
++i; |
|
651 |
|
} |
|
652 |
|
vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, confs); |
|
653 |
|
IF_FATALVK("%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r, surf_p.dev.phydev.vk, surf_p.vk); |
|
654 |
|
if (confs_n == 0) |
|
655 |
|
FATALVK("physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_p.dev.phydev.vk, surf_p.vk); |
|
656 |
|
texel_mem_blk_confs_dump(confs_n, confs); |
|
657 |
|
|
|
658 |
|
cc = &surf_p.dev.phydev.selected_texel_mem_blk_conf_core; |
|
659 |
|
/* |
|
660 |
|
* the following texel cfg is guaranteed to exist, and this is what we |
|
661 |
|
* get from ff scaler |
|
662 |
|
*/ |
|
663 |
|
cc->fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb; |
|
664 |
|
POUTVK("physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_p.dev.phydev.vk, surf_p.vk, cc->fmt); |
|
665 |
|
cc->color_space = vk_color_space_srgb_nonlinear; |
|
666 |
|
POUTVK("physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_p.dev.phydev.vk, surf_p.vk, cc->color_space); |
|
667 |
|
} |
|
668 |
|
#undef CONFS_N_MAX |
|
669 |
|
/*NSPC*/ |
|
670 |
|
static void tmp_phydev_surf_caps_get(void) |
|
671 |
|
{ |
|
672 |
|
s32 r; |
|
673 |
|
struct vk_phydev_surf_info_t info; |
|
674 |
|
|
|
675 |
|
memset(&info, 0, sizeof(info)); |
|
676 |
|
info.type = vk_struct_type_phydev_surf_info; |
|
677 |
|
info.surf = surf_p.vk; |
|
678 |
|
memset(&tmp_surf_caps_l, 0, sizeof(tmp_surf_caps_l)); |
|
679 |
|
tmp_surf_caps_l.type = vk_struct_type_surf_caps; |
|
680 |
|
vk_get_phydev_surf_caps(&info, &tmp_surf_caps_l); |
|
681 |
|
IF_FATALVK("%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r, surf_p.dev.phydev.vk, surf_p.vk); |
|
682 |
|
/* we have room for a maximum of 3 images per swapchain */ |
|
683 |
|
if (tmp_surf_caps_l.core.imgs_n_min > swpchn_imgs_n_max) |
|
684 |
|
FATALVK("physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_p.dev.phydev.vk, surf_p.vk, swpchn_imgs_n_max, tmp_surf_caps_l.core.imgs_n_min); |
|
685 |
|
} |
|
686 |
|
/*NSPC*/ |
|
687 |
|
static void tmp_phydev_surf_caps_dump(void) |
|
688 |
|
{ |
|
689 |
|
POUTVK("physical device:%p:surface:%p:imgs_n_min=%u\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.imgs_n_min); |
|
690 |
|
POUTVK("physical device:%p:surface:%p:imgs_n_max=%u\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.imgs_n_max); |
|
691 |
|
POUTVK("physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.current_extent.width, tmp_surf_caps_l.core.current_extent.height); |
|
692 |
|
POUTVK("physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.img_extent_min.width, tmp_surf_caps_l.core.img_extent_min.height); |
|
693 |
|
POUTVK("physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.img_extent_max.width, tmp_surf_caps_l.core.img_extent_max.height); |
|
694 |
|
POUTVK("physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.img_array_layers_n_max); |
|
695 |
|
POUTVK("physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.supported_transforms); |
|
696 |
|
POUTVK("physical device:%p:surface:%p:current_transform=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.current_transform); |
|
697 |
|
POUTVK("physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.supported_composite_alpha); |
|
698 |
|
POUTVK("physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_surf_caps_l.core.supported_img_usage_flags); |
|
699 |
|
} |
|
700 |
|
/*NSPC*/ |
|
701 |
|
static void tmp_phydev_surf_present_modes_get(void) |
|
702 |
|
{ |
|
703 |
|
s32 r; |
|
704 |
|
|
|
705 |
|
tmp_present_modes_n_l = tmp_present_modes_n_max; |
|
706 |
|
vk_get_phydev_surf_present_modes(); |
|
707 |
|
IF_FATALVK("%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r, surf_p.dev.phydev.vk, surf_p.vk); |
|
708 |
|
} |
|
709 |
|
/*NSPC*/ |
|
710 |
|
static u8 *present_mode_to_str(u32 mode) |
|
711 |
|
{ |
|
712 |
|
switch (mode) { |
|
713 |
|
case vk_present_mode_immediate: |
|
714 |
|
return "immediate"; |
|
715 |
|
case vk_present_mode_mailbox: |
|
716 |
|
return "mailbox"; |
|
717 |
|
case vk_present_mode_fifo: |
|
718 |
|
return "fifo"; |
|
719 |
|
case vk_present_mode_fifo_relaxed: |
|
720 |
|
return "fifo relaxed"; |
|
721 |
|
default: |
|
722 |
|
return "unknown"; |
|
723 |
|
} |
|
724 |
|
} |
|
725 |
|
/*NSPC*/ |
|
726 |
|
static void tmp_phydev_surf_present_modes_dump(void) |
|
727 |
|
{ |
|
728 |
|
u8 i; |
|
729 |
|
|
|
730 |
|
i = 0; |
|
731 |
|
POUTVK("physical device:%p:surface:%p:%u present modes\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_present_modes_n_l); |
|
732 |
|
loop { |
|
733 |
|
if (i == (u8)tmp_present_modes_n_l) |
|
734 |
|
break; |
|
735 |
|
POUTVK("physical device:%p:surface:%p:present mode=%s\n", surf_p.dev.phydev.vk, surf_p.vk, present_mode_to_str(tmp_present_modes_l[i])); |
|
736 |
|
++i; |
|
737 |
|
} |
|
738 |
|
} |
|
739 |
|
/*NSPC*/ |
|
740 |
|
static void phydev_init(void) |
|
741 |
|
{ |
|
742 |
|
tmp_phydevs_get(); |
|
743 |
|
/*--------------------------------------------------------------------*/ |
|
744 |
|
tmp_phydevs_exts_dump(); |
|
745 |
|
tmp_phydevs_props_dump(); |
|
746 |
|
tmp_phydevs_mem_props_get(); |
|
747 |
|
tmp_phydevs_mem_props_dump(); |
|
748 |
|
/*--------------------------------------------------------------------*/ |
|
749 |
|
tmp_phydevs_q_fams_get(); |
|
750 |
|
tmp_phydevs_q_fams_dump(); |
|
751 |
|
/*====================================================================*/ |
|
752 |
|
/* from here our surf is involved */ |
|
753 |
|
/*--------------------------------------------------------------------*/ |
|
754 |
|
/* select the phydev and its q family which can work with our surf */ |
|
755 |
|
tmp_phydevs_q_fams_surf_support_get(); |
|
756 |
|
tmp_phydev_and_q_fam_select(); |
|
757 |
|
/*--------------------------------------------------------------------*/ |
|
758 |
|
phydev_surf_texel_mem_blk_conf_select(); |
|
759 |
|
/*--------------------------------------------------------------------*/ |
|
760 |
|
tmp_phydev_surf_caps_get(); |
|
761 |
|
tmp_phydev_surf_caps_dump(); |
|
762 |
|
/*--------------------------------------------------------------------*/ |
|
763 |
|
tmp_phydev_surf_present_modes_get(); |
|
764 |
|
tmp_phydev_surf_present_modes_dump(); |
|
765 |
|
} |
|
766 |
|
/* the phydev q fam selected */ |
|
767 |
|
/*NSPC*/ |
|
768 |
|
static void dev_create(void) |
|
769 |
|
{ |
|
770 |
|
struct vk_dev_create_info_t info; |
|
771 |
|
struct vk_dev_q_create_info_t q_info; |
|
772 |
|
float q_prio; |
|
773 |
|
static u8 *exts[] = { |
|
774 |
|
/* 1.1 promoted */ |
|
775 |
|
"VK_KHR_bind_memory2", |
|
776 |
|
/* 1.1 promoted */ |
|
777 |
|
"VK_KHR_get_memory_requirements2", |
|
778 |
|
"VK_KHR_swapchain"}; |
|
779 |
|
|
|
780 |
|
s32 r; |
|
781 |
|
|
|
782 |
|
memset(&info, 0, sizeof(info)); |
|
783 |
|
memset(&q_info, 0, sizeof(q_info)); |
|
784 |
|
/*--------------------------------------------------------------------*/ |
|
785 |
|
q_info.type = vk_struct_type_dev_q_create_info; |
|
786 |
|
q_info.q_fam = surf_p.dev.phydev.q_fam; |
|
787 |
|
q_info.qs_n = 1; |
|
788 |
|
q_info.q_prios = &q_prio; |
|
789 |
|
q_prio = 1.0f; |
|
790 |
|
/*--------------------------------------------------------------------*/ |
|
791 |
|
info.type = vk_struct_type_dev_create_info; |
|
792 |
|
info.q_create_infos_n = 1; |
|
793 |
|
info.q_create_infos = &q_info; |
|
794 |
|
info.enabled_exts_n = ARRAY_N(exts); |
|
795 |
|
info.enabled_ext_names = exts; |
|
796 |
|
vk_create_dev(&info); |
|
797 |
|
IF_FATALVK("%d:physical device:%p:unable to create a vulkan device\n", r, surf_p.dev.phydev.vk); |
|
798 |
|
POUTVK("physical device:%p:vulkan device created with one proper queue:%p\n", surf_p.dev.phydev.vk, surf_p.dev.vk); |
|
799 |
|
} |
|
800 |
|
/*NSPC*/ |
|
801 |
|
static void q_get(void) |
|
802 |
|
{ |
|
803 |
|
POUTVK("device:%p:getting queue:family=%u queue=0\n", surf_p.dev.vk, surf_p.dev.phydev.q_fam); |
|
804 |
|
vk_get_dev_q(); |
|
805 |
|
POUTVK("device:%p:got queue:%p\n", surf_p.dev.vk, surf_p.dev.q); |
|
806 |
|
} |
|
807 |
|
/*NSPC*/ |
|
808 |
|
static void cp_create(void) |
|
809 |
|
{ |
|
810 |
|
s32 r; |
|
811 |
|
struct vk_cp_create_info_t info; |
|
812 |
|
|
|
813 |
|
memset(&info, 0, sizeof(info)); |
|
814 |
|
info.type = vk_struct_type_cp_create_info; |
|
815 |
|
info.flags = vk_cp_create_reset_cb_bit; |
|
816 |
|
info.q_fam = surf_p.dev.phydev.q_fam; |
|
817 |
|
vk_create_cp(&info); |
|
818 |
|
IF_FATALVK("%d:unable create the commmand pool\n", r); |
|
819 |
|
POUTVK("device:%p:queue family:%u:created command pool %p\n", surf_p.dev.vk, surf_p.dev.phydev.q_fam, surf_p.dev.cp); |
|
820 |
|
} |
|
821 |
|
/*NSPC*/ |
|
822 |
|
static void dev_init(void) |
|
823 |
|
{ |
|
824 |
|
phydev_init(); |
|
825 |
|
/*--------------------------------------------------------------------*/ |
|
826 |
|
dev_create(); |
|
827 |
|
dev_syms(); |
|
828 |
|
q_get(); |
|
829 |
|
cp_create(); |
|
830 |
|
} |
|
831 |
|
/* XXX: the surf is an obj at the instance lvl, NOT THE [PHYSICAL] * DEV LVL */ |
|
832 |
|
/*NSPC*/ |
|
833 |
|
static void surf_create(xcb_connection_t *c, u32 win_id) |
|
834 |
|
{ |
|
835 |
|
struct vk_xcb_surf_create_info_t vk_xcb_info; |
|
836 |
|
s32 r; |
|
837 |
|
|
|
838 |
|
memset(&surf_p, 0, sizeof(surf_p)); |
|
839 |
|
memset(&vk_xcb_info, 0, sizeof(vk_xcb_info)); |
|
840 |
|
vk_xcb_info.type = vk_struct_type_xcb_surf_create_info; |
|
841 |
|
vk_xcb_info.c = c; |
|
842 |
|
vk_xcb_info.win = win_id; |
|
843 |
|
vk_create_xcb_surf(&vk_xcb_info); |
|
844 |
|
IF_FATALVK("%d:xcb:%p:window id:%#x:unable to create a vulkan surface from this x11 window\n", r, c, win_id); |
|
845 |
|
POUTVK("xcb:%p:window id:%#x:created vk_surface=%p\n", c, win_id, surf_p.vk); |
|
846 |
|
} |
|
847 |
|
/*NSPC*/ |
|
848 |
|
static void swpchn_init(void) |
|
849 |
|
{ |
|
850 |
|
struct vk_swpchn_create_info_t info; |
|
851 |
|
struct phydev_t *p; |
|
852 |
|
s32 r; |
|
853 |
|
|
|
854 |
|
memset(&info, 0, sizeof(info)); |
|
855 |
|
p = &surf_p.dev.phydev; |
|
856 |
|
info.type = vk_struct_type_swpchn_create_info; |
|
857 |
|
info.surf = surf_p.vk; |
|
858 |
|
info.imgs_n_min = tmp_surf_caps_l.core.imgs_n_min; |
|
859 |
|
info.img_texel_mem_blk_fmt = p->selected_texel_mem_blk_conf_core.fmt; |
|
860 |
|
info.img_color_space = p->selected_texel_mem_blk_conf_core.color_space; |
|
861 |
|
memcpy(&info.img_extent, &tmp_surf_caps_l.core.current_extent, |
|
862 |
|
sizeof(info.img_extent)); |
|
863 |
|
info.img_layers_n = 1; |
|
864 |
|
info.img_usage = vk_img_usage_color_attachment_bit |
|
865 |
|
| vk_img_usage_transfer_dst_bit; |
|
866 |
|
info.img_sharing_mode = vk_sharing_mode_exclusive; |
|
867 |
|
info.pre_transform = vk_surf_transform_identity_bit; |
|
868 |
|
info.composite_alpha = vk_composite_alpha_opaque_bit; |
|
869 |
|
info.present_mode = vk_present_mode_fifo; |
|
870 |
|
info.clipped = vk_true; |
|
871 |
|
vk_create_swpchn(&info); |
|
872 |
|
IF_FATALVK("%d:device:%p:surface:%p:unable to create the initial swapchain\n", r, surf_p.dev.vk, surf_p.vk); |
|
873 |
|
POUTVK("device:%p:surface:%p:swapchain created %p\n", surf_p.dev.vk, surf_p.vk, surf_p.dev.swpchn.vk); |
|
874 |
|
} |
|
875 |
|
/*NSPC*/ |
|
876 |
|
static void swpchn_imgs_get(void) |
|
877 |
|
{ |
|
878 |
|
s32 r; |
|
879 |
|
u8 target_imgs_n; |
|
880 |
|
|
|
881 |
|
/* |
|
882 |
|
* TODO: should try to figure out how to favor double buf over |
|
883 |
|
* everything else |
|
884 |
|
*/ |
|
885 |
|
surf_p.dev.swpchn.imgs_n = swpchn_imgs_n_max; |
|
886 |
|
vk_get_swpchn_imgs(); |
|
887 |
|
IF_FATALVK("%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r, surf_p.dev.vk, surf_p.vk, surf_p.dev.swpchn.vk); |
|
888 |
|
POUTVK("device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_p.dev.vk, surf_p.vk, surf_p.dev.swpchn.vk, surf_p.dev.swpchn.imgs_n); |
|
889 |
|
} |
|
890 |
|
/*NSPC*/ |
|
891 |
|
static void sems_create(void) |
|
892 |
|
{ |
|
893 |
|
s32 r; |
|
894 |
|
struct vk_sem_create_info_t info; |
|
895 |
|
u8 sem; |
|
896 |
|
|
|
897 |
|
sem = 0; |
|
898 |
|
loop { |
|
899 |
|
if (sem == sems_n) |
|
900 |
|
break; |
|
901 |
|
memset(&info, 0, sizeof(info)); |
|
902 |
|
info.type = vk_struct_type_sem_create_info; |
|
903 |
|
vk_create_sem(&info, &surf_p.dev.sems[sem]); |
|
904 |
|
IF_FATALVK("%d:device:%p:unable to create a semaphore %u for the synchronization of the swapchain\n", r, surf_p.dev.vk, sem); |
|
905 |
|
POUTVK("device:%p:semaphore %u for the synchronization of the swapchain created %p\n", surf_p.dev.vk, sem, surf_p.dev.sems[sem]); |
|
906 |
|
++sem; |
|
907 |
|
} |
|
908 |
|
} |
|
909 |
|
/*NSPC*/ |
|
910 |
|
static void swpchn_imgs_cbs_create(void) |
|
911 |
|
{ |
|
912 |
|
s32 r; |
|
913 |
|
struct vk_cb_alloc_info_t alloc_info; |
|
914 |
|
|
|
915 |
|
memset(&alloc_info, 0, sizeof(alloc_info)); |
|
916 |
|
alloc_info.type = vk_struct_type_cb_alloc_info; |
|
917 |
|
alloc_info.cp = surf_p.dev.cp; |
|
918 |
|
alloc_info.lvl = vk_cb_lvl_primary; |
|
919 |
|
alloc_info.cbs_n = surf_p.dev.swpchn.imgs_n; |
|
920 |
|
vk_alloc_cbs(&alloc_info); |
|
921 |
|
IF_FATALVK("%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r, surf_p.dev.vk, surf_p.dev.cp); |
|
922 |
|
POUTVK("device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_p.dev.vk, surf_p.dev.swpchn.imgs_n, surf_p.dev.cp); |
|
923 |
|
} |
File npv/xcb/local/code.frag.c added (mode: 100644) (index 0000000..4c8e0bc) |
|
1 |
|
static void npv_xcb_client_lib_load(void) |
|
2 |
|
{ |
|
3 |
|
npv_xcb_client_lib_l = dlopen("libxcb.so.1", RTLD_LAZY); |
|
4 |
|
if (npv_xcb_client_lib_l == 0) |
|
5 |
|
FATALX("%s:unable to load the xcb dynamic shared library\n", dlerror()); |
|
6 |
|
} |
|
7 |
|
static void npv_xcb_client_lib_close(void) |
|
8 |
|
{ |
|
9 |
|
int r; |
|
10 |
|
|
|
11 |
|
r = dlclose(npv_xcb_client_lib_l); |
|
12 |
|
if (r != 0) |
|
13 |
|
FATALX("%d:%s:unable to close the xcb dynamic shared library\n", r, dlerror()); |
|
14 |
|
npv_xcb_client_lib_l = 0; |
|
15 |
|
} |
|
16 |
|
#define XCB_DLSYM(x) \ |
|
17 |
|
dl_##x = dlsym(npv_xcb_client_lib_l, #x); \ |
|
18 |
|
if (dl_##x == 0) \ |
|
19 |
|
FATALX("%s:unable to find " #x "\n", dlerror()); |
|
20 |
|
static void npv_xcb_syms(void) |
|
21 |
|
{ |
|
22 |
|
XCB_DLSYM(xcb_connect); |
|
23 |
|
XCB_DLSYM(xcb_get_file_descriptor); |
|
24 |
|
XCB_DLSYM(xcb_generate_id); |
|
25 |
|
XCB_DLSYM(xcb_connection_has_error); |
|
26 |
|
XCB_DLSYM(xcb_get_setup); |
|
27 |
|
XCB_DLSYM(xcb_setup_roots_length); |
|
28 |
|
XCB_DLSYM(xcb_setup_roots_iterator); |
|
29 |
|
XCB_DLSYM(xcb_screen_next); |
|
30 |
|
XCB_DLSYM(xcb_create_window); |
|
31 |
|
XCB_DLSYM(xcb_map_window); |
|
32 |
|
XCB_DLSYM(xcb_map_window_checked); |
|
33 |
|
XCB_DLSYM(xcb_request_check); |
|
34 |
|
XCB_DLSYM(xcb_flush); |
|
35 |
|
XCB_DLSYM(xcb_wait_for_event); |
|
36 |
|
XCB_DLSYM(xcb_poll_for_event); |
|
37 |
|
XCB_DLSYM(xcb_change_property); |
|
38 |
|
XCB_DLSYM(xcb_disconnect); |
|
39 |
|
} |
|
40 |
|
#undef XCB_DLSYM |
|
41 |
|
static void npv_xcb_win_create(void) |
|
42 |
|
{ |
|
43 |
|
u32 value_mask; |
|
44 |
|
u32 value_list[2]; |
|
45 |
|
xcb_void_cookie_t cookie; |
|
46 |
|
xcb_generic_error_t *e; |
|
47 |
|
|
|
48 |
|
npv_xcb_p.win_id = dl_xcb_generate_id(npv_xcb_p.c); |
|
49 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:window id=%#x\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.win_id); |
|
50 |
|
|
|
51 |
|
value_mask = XCB_CW_BACK_PIXEL | XCB_CW_EVENT_MASK; |
|
52 |
|
value_list[0] = npv_xcb_p.scr->black_pixel; |
|
53 |
|
value_list[1] = XCB_EVENT_MASK_KEY_RELEASE |
|
54 |
|
| XCB_EVENT_MASK_RESIZE_REDIRECT; |
|
55 |
|
|
|
56 |
|
dl_xcb_create_window(npv_xcb_p.c, XCB_COPY_FROM_PARENT, |
|
57 |
|
npv_xcb_p.win_id, |
|
58 |
|
npv_xcb_p.scr->root, 0, 0, |
|
59 |
|
npv_xcb_p.width, |
|
60 |
|
npv_xcb_p.height, |
|
61 |
|
0, |
|
62 |
|
XCB_WINDOW_CLASS_INPUT_OUTPUT, |
|
63 |
|
npv_xcb_p.scr->root_visual, |
|
64 |
|
value_mask, value_list); |
|
65 |
|
cookie = dl_xcb_map_window_checked(npv_xcb_p.c, |
|
66 |
|
npv_xcb_p.win_id); |
|
67 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:window id:%#x:map window request cookie=%#x\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.win_id, cookie); |
|
68 |
|
|
|
69 |
|
e = dl_xcb_request_check(npv_xcb_p.c, cookie); |
|
70 |
|
if (e != 0) |
|
71 |
|
FATALX("'%s':connection:%p:screen:%d:root window id:%#x:window id:%#x:unable to map window\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.win_id); |
|
72 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:window id:%#x:window mapped\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.win_id); |
|
73 |
|
} |
|
74 |
|
/* |
|
75 |
|
* a disp is n scrs and 1 [set of] keyboard[s] and 1 [set of] mouse[s] |
|
76 |
|
* 1 scr could be n monitors |
|
77 |
|
* nowdays: usually 1 scr per display |
|
78 |
|
* 1 scr has 1 root win |
|
79 |
|
*/ |
|
80 |
|
static void npv_xcb_connect(void) |
|
81 |
|
{ |
|
82 |
|
int r; |
|
83 |
|
|
|
84 |
|
npv_xcb_p.disp_env = getenv("DISPLAY"); |
|
85 |
|
if (npv_xcb_p.disp_env == 0 || npv_xcb_p.disp_env[0] == 0) |
|
86 |
|
FATALX("no x11 DISPLAY environment variable, exiting\n"); |
|
87 |
|
|
|
88 |
|
npv_xcb_p.scr_idx = 0; |
|
89 |
|
npv_xcb_p.c = dl_xcb_connect(0, &npv_xcb_p.scr_idx); /* should be 0 though */ |
|
90 |
|
r = dl_xcb_connection_has_error(npv_xcb_p.c); |
|
91 |
|
if (r > 0) |
|
92 |
|
FATALX("%d:%s:error while connecting to the x11 server\n", r, npv_xcb_p.disp_env); |
|
93 |
|
POUTX("'%s':connection=%p, default screen index is %d (should be 0)\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx); |
|
94 |
|
|
|
95 |
|
npv_xcb_p.fd = dl_xcb_get_file_descriptor(npv_xcb_p.c); |
|
96 |
|
if (npv_xcb_p.fd == -1) |
|
97 |
|
FATALX("'%s':unable to get the connection file descriptor for epoll\n", npv_xcb_p.disp_env); |
|
98 |
|
POUTX("'%s':connection:%p:file descriptor %d\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.fd); |
|
99 |
|
} |
|
100 |
|
static void npv_xcb_scr_get(void) |
|
101 |
|
{ |
|
102 |
|
xcb_screen_iterator_t iter; |
|
103 |
|
int scrs_n; |
|
104 |
|
int i; |
|
105 |
|
|
|
106 |
|
npv_xcb_p.setup = dl_xcb_get_setup(npv_xcb_p.c); |
|
107 |
|
|
|
108 |
|
scrs_n = dl_xcb_setup_roots_length(npv_xcb_p.setup); |
|
109 |
|
POUTX("'%s':connection:%p:has %d screens (should be 1)\n", npv_xcb_p.disp_env, npv_xcb_p.c, scrs_n); |
|
110 |
|
|
|
111 |
|
iter = dl_xcb_setup_roots_iterator(npv_xcb_p.setup); |
|
112 |
|
i = 0; |
|
113 |
|
npv_xcb_p.scr = 0; |
|
114 |
|
loop { |
|
115 |
|
if (iter.rem == 0) |
|
116 |
|
break; /* no more scr to iterate on */ |
|
117 |
|
|
|
118 |
|
if (i == npv_xcb_p.scr_idx) { |
|
119 |
|
npv_xcb_p.scr = iter.data; |
|
120 |
|
break; |
|
121 |
|
} |
|
122 |
|
dl_xcb_screen_next(&iter); |
|
123 |
|
} |
|
124 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:width=%d pixels\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.scr->width_in_pixels); |
|
125 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:height=%d pixels\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.scr->height_in_pixels); |
|
126 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:white pixel=0x%08x\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.scr->white_pixel); |
|
127 |
|
POUTX("'%s':connection:%p:screen:%d:root window id:%#x:black pixel=0x%08x\n", npv_xcb_p.disp_env, npv_xcb_p.c, npv_xcb_p.scr_idx, npv_xcb_p.scr->root, npv_xcb_p.scr->black_pixel); |
|
128 |
|
} |
|
129 |
|
// TODO: not appropriate for the video window |
|
130 |
|
//#define MIN_SZ_BIT (1 << 4) |
|
131 |
|
//#define MAX_SZ_BIT (1 << 5) |
|
132 |
|
//#define FLAGS 0 |
|
133 |
|
///* 4 padding dwords */ |
|
134 |
|
//#define MIN_WIDTH 5 |
|
135 |
|
//#define MIN_HEIGHT 6 |
|
136 |
|
//#define MAX_WIDTH 7 |
|
137 |
|
//#define MAX_HEIGHT 8 |
|
138 |
|
//#define DWORDS_N 18 |
|
139 |
|
//static void npv_xcb_p_wm_hints(void) |
|
140 |
|
//{ |
|
141 |
|
// u32 data[DWORDS_N]; |
|
142 |
|
// |
|
143 |
|
// memset(data, 0, sizeof(data)); |
|
144 |
|
// data[FLAGS] = MIN_SZ_BIT | MAX_SZ_BIT; |
|
145 |
|
// data[MIN_WIDTH] = APP_WIN_WIDTH; |
|
146 |
|
// data[MIN_HEIGHT] = APP_WIN_HEIGHT; |
|
147 |
|
// data[MAX_WIDTH] = APP_WIN_WIDTH; |
|
148 |
|
// data[MAX_HEIGHT] = APP_WIN_HEIGHT; |
|
149 |
|
// |
|
150 |
|
// dl_xcb_change_property(npv_xcb_p.c, XCB_PROP_MODE_REPLACE, |
|
151 |
|
// npv_xcb_p.win_id, XCB_ATOM_WM_NORMAL_HINTS, |
|
152 |
|
// XCB_ATOM_WM_SIZE_HINTS, 32, DWORDS_N, data); |
|
153 |
|
//} |
|
154 |
|
//#undef MIN_SZ_BIT |
|
155 |
|
//#undef MAX_SZ_BIT |
|
156 |
|
//#undef FLAGS |
|
157 |
|
//#undef MIN_WIDTH |
|
158 |
|
//#undef MIN_HEIGHT |
|
159 |
|
//#undef MAX_WIDTH |
|
160 |
|
//#undef MAX_HEIGHT |
|
161 |
|
//#undef DWORDS_N |
|
162 |
|
static void npv_xcb_evt_key_release(xcb_generic_event_t *evt) |
|
163 |
|
{ |
|
164 |
|
u8 b; |
|
165 |
|
xcb_key_release_event_t *key; |
|
166 |
|
|
|
167 |
|
key = (xcb_key_release_event_t*)evt; |
|
168 |
|
b = 0; |
|
169 |
|
loop { |
|
170 |
|
if (b == ARRAY_N(x11_binds)) |
|
171 |
|
break; |
|
172 |
|
if (key->detail == x11_binds[b].keycode) { |
|
173 |
|
POUTX("'%s':connection:%p:event:key release:keycode:%#02x:running command for bind \"%s\"\n", npv_xcb_p.disp_env, npv_xcb_p.c, key->detail, x11_binds[b].name); |
|
174 |
|
x11_binds[b].cmd(); |
|
175 |
|
return; |
|
176 |
|
} |
|
177 |
|
++b; |
|
178 |
|
} |
|
179 |
|
POUTX("'%s':connection:%p:event:key release:keycode:%#02x\n", npv_xcb_p.disp_env, npv_xcb_p.c, key->detail); |
|
180 |
|
} |
|
181 |
|
static void npv_xcb_evt_resz_request(xcb_generic_event_t *evt) |
|
182 |
|
{ |
|
183 |
|
xcb_resize_request_event_t *rre; |
|
184 |
|
/* |
|
185 |
|
* the x11 server is not resizing the win, it is asking us to |
|
186 |
|
* actually do it |
|
187 |
|
*/ |
|
188 |
|
rre = (xcb_resize_request_event_t*)evt; |
|
189 |
|
POUTX("'%s':connection:%p:event:resize request:window=%u width=%u,height=%u\n", npv_xcb_p.disp_env, npv_xcb_p.c, rre->window, rre->width, rre->height); |
|
190 |
|
} |
|
191 |
|
static void npv_xcb_evt_handle(xcb_generic_event_t *evt) |
|
192 |
|
{ |
|
193 |
|
u8 evt_code; |
|
194 |
|
/* |
|
195 |
|
* do not discriminate evts generated by clients using sendevent |
|
196 |
|
* requests (note: "client message" evts have always their most |
|
197 |
|
* significant bit set) |
|
198 |
|
*/ |
|
199 |
|
evt_code = evt->response_type & 0x7f; |
|
200 |
|
|
|
201 |
|
switch (evt_code) { |
|
202 |
|
case XCB_KEY_RELEASE: |
|
203 |
|
npv_xcb_evt_key_release(evt); |
|
204 |
|
break; |
|
205 |
|
case XCB_RESIZE_REQUEST: |
|
206 |
|
npv_xcb_evt_resz_request(evt); |
|
207 |
|
break; |
|
208 |
|
default: |
|
209 |
|
break; |
|
210 |
|
} |
|
211 |
|
} |