[WIP] More audio shader stuff
This commit is contained in:
parent
b1cd7b9d7e
commit
5577b6ec73
|
@ -16,7 +16,7 @@ bus/1/name = "BGM"
|
|||
bus/1/solo = false
|
||||
bus/1/mute = false
|
||||
bus/1/bypass_fx = false
|
||||
bus/1/volume_db = 0.0
|
||||
bus/1/volume_db = -6.0
|
||||
bus/1/send = "Master"
|
||||
bus/2/name = "BGM0"
|
||||
bus/2/solo = false
|
||||
|
|
|
@ -125,11 +125,98 @@ float get_instrument_sample(float instrument_index, float pitch_scale, float t,
|
|||
return rescale_int16(unpack_int16(get_inst_texel(sample_xy)));
|
||||
}
|
||||
|
||||
const int NUM_CHANNELS = 8;
|
||||
const int MAX_CHANNEL_NOTE_EVENTS = 2048;
|
||||
const int NUM_CHANNEL_NOTE_PROBES = 11; // log2(MAX_CHANNEL_NOTE_EVENTS)
|
||||
uniform sampler2D midi_events;
|
||||
uniform vec2 midi_events_size = vec2(2048.0, 16.0);
|
||||
// SDR rendering only gives us [0.0, 1.0] from the sampler2D so we need to rescale it.
|
||||
uniform float t_scale = 524.0; // Change this if we need longer than 8min44sec.
|
||||
// ^ Other things will also need changing, since 4096x4096 = 8MSamples is barely over 524 seconds at 32kHz.
|
||||
vec4 get_midi_texel(float x, float y) {
|
||||
return texture(midi_events, vec2(x, y)/midi_events_size).xyzw;
|
||||
}
|
||||
vec2 unpack_float(float f) {
|
||||
// Unpack two 10bit values from a single channel (23bit mantissa)
|
||||
float a = f * 1024.0;
|
||||
float x = trunc(a) / 1023.0;
|
||||
float y = fract(a) * 1024.0 / 1023.0;
|
||||
return vec2(x, y);
|
||||
}
|
||||
vec4 render_song(float sample_progress) {
|
||||
// Each texel rendered is a stereo S16LE frame representing 1/32000 of a second
|
||||
// BGM sequences should be relatively small so it should be fine to use RGBAF (4x f32s per texel) as our data texture
|
||||
// 2048 is an established safe texture dimension so may as well go 2048 wide
|
||||
float t = sample_progress/output_mixrate;
|
||||
vec2 downmixed_stereo = vec2(0.0);
|
||||
|
||||
// Binary search the channels
|
||||
for (int channel = 0; channel < NUM_CHANNELS; channel++) {
|
||||
float row = float(channel * 2);
|
||||
float event_idx = 0.0;
|
||||
for (int i = 0; i < NUM_CHANNEL_NOTE_PROBES; i++) {
|
||||
float step_size = exp2(float(NUM_CHANNEL_NOTE_PROBES - i - 1));
|
||||
vec4 note_event = get_midi_texel(event_idx + step_size, row);
|
||||
float t_start = note_event.x;
|
||||
event_idx += (t >= t_start) ? step_size : 0.0;
|
||||
}
|
||||
vec4 note_event = get_midi_texel(event_idx, row);
|
||||
vec4 note_event_supplement = get_midi_texel(event_idx, row+1.0);
|
||||
float t_start = note_event.x * t_scale;
|
||||
float t_end = note_event.y * t_scale;
|
||||
vec2 instrument_and_pitch = unpack_float(note_event.z);
|
||||
float instrument_idx = instrument_and_pitch.x * 1023.0;
|
||||
float pitch_idx = instrument_and_pitch.y * 1023.0; // TODO: Maybe rescale this for fine tuning? Don't use it raw because 2^(127-71) is MASSIVE, keep the power-of-2 calcs in shader.
|
||||
vec2 velocity_and_pan = unpack_float(note_event_supplement.w); // Can leave these as [0.0, 1.0] and then mix appropriately
|
||||
float velocity = velocity_and_pan.x;
|
||||
float pan = velocity_and_pan.y;
|
||||
vec2 attack_and_decay = unpack_float(note_event_supplement.x);
|
||||
vec2 sustain_and_release = unpack_float(note_event_supplement.y);
|
||||
// TBD = note_event_supplement.zw; - tremolo/vibrato/noise/pan_lfo/pitchbend/echo remain
|
||||
|
||||
// For now, just branch this
|
||||
if (t_end > t) {
|
||||
float samp = get_instrument_sample(instrument_idx, get_pitch_scale(pitch_idx), t-t_start, t_end-t_start);
|
||||
samp *= velocity;
|
||||
// TODO: do some ADSR here?
|
||||
downmixed_stereo += samp * vec2(1.0-pan, pan); // TODO: double it to maintain the mono level on each channel at center=0.5?
|
||||
}
|
||||
}
|
||||
// Convert the stereo float audio to S16LE
|
||||
return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(downmixed_stereo.y));
|
||||
}
|
||||
|
||||
void fragment() {
|
||||
// GLES2
|
||||
vec2 uv = vec2(UV.x, 1.0-UV.y);
|
||||
uv = (trunc(uv*UV_QUANTIZE)+0.5)/UV_QUANTIZE;
|
||||
|
||||
COLOR.xyzw = test_writeback(uv);
|
||||
// COLOR.xyzw = test_writeback(uv);
|
||||
COLOR.xyzw = render_song(dot(uv, vec2(1.0, midi_events_size.x)));
|
||||
}
|
||||
|
||||
// const int MAX_TEMPO_EVENTS = 256;
|
||||
// const int NUM_TEMPO_PROBES = 8; // log2(MAX_TEMPO_EVENTS)
|
||||
// Because tempo is dynamic, it will need to be encoded into a header in song_texture
|
||||
// // Binary search the first row for tempo information
|
||||
// float tempo_idx = 0.0;
|
||||
// vec4 tempo_event;
|
||||
// float t_start;
|
||||
// for (int i = 0; i < NUM_TEMPO_PROBES; i++) {
|
||||
// float step_size = exp2(float(NUM_TEMPO_PROBES - i - 1));
|
||||
// tempo_event = get_midi_texel(tempo_idx + step_size, 0.0);
|
||||
// t_start = tempo_event.x;
|
||||
// tempo_idx += (t >= t_start) ? step_size : 0.0;
|
||||
// }
|
||||
// float beat_start = tempo_event.y;
|
||||
// float tempo_start = tempo_event.z;
|
||||
// float tempo_end = tempo_event.w; // For tempo slides
|
||||
// vec4 next_tempo_event = get_midi_texel(tempo_idx + 1.0, 0.0);
|
||||
// float t_end = next_tempo_event.x;
|
||||
// float beat_end = next_tempo_event.y;
|
||||
// // Use the tempo information to convert wall time to beat time
|
||||
// float t0 = t - t_start;
|
||||
// float t_length = t_end - t_start;
|
||||
// float tempo_section_progression = t0 / t_length;
|
||||
// float tempo_at_t = mix(tempo_start, tempo_end, tempo_section_progression);
|
||||
// float current_beat = beat_start + (t0 * (tempo_start+tempo_at_t) * 0.5); // Use the average tempo across the period to turn integration into area of a rectangle
|
||||
// Now that we have our position on the beatmap,
|
||||
|
|
Loading…
Reference in New Issue