[WIP] Still messing with audio shader
This commit is contained in:
parent
5577b6ec73
commit
ef970396f6
|
@ -166,24 +166,15 @@ func load_sfx_samples_data(snes_data: Dictionary, buffer: StreamPeerBuffer):
|
||||||
# Called when the node enters the scene tree for the first time.
|
# Called when the node enters the scene tree for the first time.
|
||||||
func load_samples(snes_data: Dictionary, buffer: StreamPeerBuffer):
|
func load_samples(snes_data: Dictionary, buffer: StreamPeerBuffer):
|
||||||
load_sfx_samples_data(snes_data, buffer)
|
load_sfx_samples_data(snes_data, buffer)
|
||||||
# var largest_sample_idx := -1
|
|
||||||
# var largest_sample_sample_count := 0
|
|
||||||
# var total_frames := 0
|
|
||||||
# For some reason, this is a bit slow currently under certain editor conditions. Might optimize later.
|
# For some reason, this is a bit slow currently under certain editor conditions. Might optimize later.
|
||||||
for i in INST_NUM:
|
for i in INST_NUM:
|
||||||
var samp := get_inst_sample_data(snes_data, buffer, i)
|
var samp := get_inst_sample_data(snes_data, buffer, i)
|
||||||
instrument_samples.append(samp)
|
instrument_samples.append(samp)
|
||||||
# total_frames += samp.loop_end
|
|
||||||
# if largest_sample_sample_count < samp.loop_end:
|
|
||||||
# largest_sample_sample_count = samp.loop_end
|
|
||||||
# largest_sample_idx = i
|
|
||||||
# Workaround for Godot 3.x quirk where looping samples are interpolated as if they go to nothing instead of looping
|
# Workaround for Godot 3.x quirk where looping samples are interpolated as if they go to nothing instead of looping
|
||||||
instrument_samples_HACK_EXTENDED_LOOPS.append(HACK_EXTEND_LOOP_SAMPLE(samp))
|
instrument_samples_HACK_EXTENDED_LOOPS.append(HACK_EXTEND_LOOP_SAMPLE(samp))
|
||||||
# print('Instrument %02X has mix_rate %d Hz and %d samples'%[i, samp.mix_rate, len(samp.data)/2])
|
# print('Instrument %02X has mix_rate %d Hz and %d samples'%[i, samp.mix_rate, len(samp.data)/2])
|
||||||
emit_signal('audio_inst_sample_loaded', i)
|
emit_signal('audio_inst_sample_loaded', i)
|
||||||
# samp.save_to_wav('output/instrument%02d(%dHz)(loop from %d).wav' % [i, samp.mix_rate, samp.loop_begin])
|
samp.save_to_wav('output/instrument%02d(%dHz)(loop from %d).wav' % [i, samp.mix_rate, samp.loop_begin])
|
||||||
# print('Largest sample is instrument %d with length %d and mix_rate %d'%[largest_sample_idx, largest_sample_sample_count, instrument_samples[largest_sample_idx].mix_rate])
|
|
||||||
# print('Total frames: %d'%total_frames)
|
|
||||||
|
|
||||||
|
|
||||||
# We start the texture with a bunch of same-size headers
|
# We start the texture with a bunch of same-size headers
|
||||||
|
@ -201,12 +192,8 @@ func samples_to_texture():
|
||||||
var header_length := num_samples * 6
|
var header_length := num_samples * 6
|
||||||
|
|
||||||
# Create header and unwrapped payload separately first
|
# Create header and unwrapped payload separately first
|
||||||
var header_data := PoolByteArray()
|
|
||||||
var header_buffer := StreamPeerBuffer.new()
|
var header_buffer := StreamPeerBuffer.new()
|
||||||
header_buffer.data_array = header_data
|
|
||||||
var payload_data := PoolByteArray()
|
|
||||||
var payload_buffer := StreamPeerBuffer.new()
|
var payload_buffer := StreamPeerBuffer.new()
|
||||||
payload_buffer.data_array = payload_data
|
|
||||||
|
|
||||||
for sample in instrument_samples + sfx_samples:
|
for sample in instrument_samples + sfx_samples:
|
||||||
var loop_end: int = sample.loop_end
|
var loop_end: int = sample.loop_end
|
||||||
|
@ -215,7 +202,7 @@ func samples_to_texture():
|
||||||
if nonlooping:
|
if nonlooping:
|
||||||
loop_begin = loop_end
|
loop_begin = loop_end
|
||||||
loop_end += 1
|
loop_end += 1
|
||||||
header_buffer.put_u16(header_length + (len(payload_data)/2) + FILTER_PAD) # sample_start
|
header_buffer.put_u16(header_length + (payload_buffer.get_position()/2) + FILTER_PAD) # sample_start
|
||||||
header_buffer.put_u16(sample.loop_end + FILTER_PAD) # sample_length
|
header_buffer.put_u16(sample.loop_end + FILTER_PAD) # sample_length
|
||||||
header_buffer.put_u16(sample.loop_begin + FILTER_PAD) # sample_loop_begin
|
header_buffer.put_u16(sample.loop_begin + FILTER_PAD) # sample_loop_begin
|
||||||
header_buffer.put_u16(sample.mix_rate) # sample_mixrate
|
header_buffer.put_u16(sample.mix_rate) # sample_mixrate
|
||||||
|
@ -239,7 +226,7 @@ func samples_to_texture():
|
||||||
payload_buffer.seek(pos)
|
payload_buffer.seek(pos)
|
||||||
payload_buffer.put_16(frame)
|
payload_buffer.put_16(frame)
|
||||||
# Combine the unwrapped arrays
|
# Combine the unwrapped arrays
|
||||||
var data := header_data + payload_data
|
var data := header_buffer.data_array + payload_buffer.data_array
|
||||||
# Now calculate wrapping and rowwise padding for the combined array
|
# Now calculate wrapping and rowwise padding for the combined array
|
||||||
for row in TEX_WIDTH:
|
for row in TEX_WIDTH:
|
||||||
var row_end: int = (row + 1) * TEX_WIDTH * 2 # Remember: 8bit array, 16bit values
|
var row_end: int = (row + 1) * TEX_WIDTH * 2 # Remember: 8bit array, 16bit values
|
||||||
|
|
|
@ -17,8 +17,6 @@ const float x10000 = float(0x10000); // 65536.0
|
||||||
const vec2 INT16_DOT_BE = vec2(xFF00, x00FF);
|
const vec2 INT16_DOT_BE = vec2(xFF00, x00FF);
|
||||||
const vec2 INT16_DOT_LE = vec2(x00FF, xFF00);
|
const vec2 INT16_DOT_LE = vec2(x00FF, xFF00);
|
||||||
|
|
||||||
uniform sampler2D tex : hint_normal;
|
|
||||||
|
|
||||||
float unpack_uint16(vec2 uint16) {
|
float unpack_uint16(vec2 uint16) {
|
||||||
// Convert packed 2byte integer, sampled as two [0.0, 1.0] range floats, to the original int value [0, 65535] in float32
|
// Convert packed 2byte integer, sampled as two [0.0, 1.0] range floats, to the original int value [0, 65535] in float32
|
||||||
return dot(uint16, INT16_DOT_LE);
|
return dot(uint16, INT16_DOT_LE);
|
||||||
|
@ -45,7 +43,7 @@ vec2 pack_float_to_int16(float value) {
|
||||||
return vec2(LSB, MSB);
|
return vec2(LSB, MSB);
|
||||||
}
|
}
|
||||||
|
|
||||||
vec4 test_writeback(vec2 uv) {
|
vec4 test_writeback(sampler2D tex, vec2 uv) {
|
||||||
// Test importing and exporting the samples,
|
// Test importing and exporting the samples,
|
||||||
// and exporting a value derived from the UV
|
// and exporting a value derived from the UV
|
||||||
vec4 output;
|
vec4 output;
|
||||||
|
@ -131,7 +129,8 @@ const int NUM_CHANNEL_NOTE_PROBES = 11; // log2(MAX_CHANNEL_NOTE_EVENTS)
|
||||||
uniform sampler2D midi_events;
|
uniform sampler2D midi_events;
|
||||||
uniform vec2 midi_events_size = vec2(2048.0, 16.0);
|
uniform vec2 midi_events_size = vec2(2048.0, 16.0);
|
||||||
// SDR rendering only gives us [0.0, 1.0] from the sampler2D so we need to rescale it.
|
// SDR rendering only gives us [0.0, 1.0] from the sampler2D so we need to rescale it.
|
||||||
uniform float t_scale = 524.0; // Change this if we need longer than 8min44sec.
|
uniform float sdr_scale = 128.0; //1024.0;
|
||||||
|
// uniform float t_scale = 524.0; // Change this if we need longer than 8min44sec.
|
||||||
// ^ Other things will also need changing, since 4096x4096 = 8MSamples is barely over 524 seconds at 32kHz.
|
// ^ Other things will also need changing, since 4096x4096 = 8MSamples is barely over 524 seconds at 32kHz.
|
||||||
vec4 get_midi_texel(float x, float y) {
|
vec4 get_midi_texel(float x, float y) {
|
||||||
return texture(midi_events, vec2(x, y)/midi_events_size).xyzw;
|
return texture(midi_events, vec2(x, y)/midi_events_size).xyzw;
|
||||||
|
@ -151,31 +150,38 @@ vec4 render_song(float sample_progress) {
|
||||||
vec2 downmixed_stereo = vec2(0.0);
|
vec2 downmixed_stereo = vec2(0.0);
|
||||||
|
|
||||||
// Binary search the channels
|
// Binary search the channels
|
||||||
for (int channel = 0; channel < NUM_CHANNELS; channel++) {
|
for (int channel = 0; channel < 1; channel++) {
|
||||||
|
// for (int channel = 0; channel < NUM_CHANNELS; channel++) {
|
||||||
float row = float(channel * 2);
|
float row = float(channel * 2);
|
||||||
float event_idx = 0.0;
|
float event_idx = 0.0;
|
||||||
for (int i = 0; i < NUM_CHANNEL_NOTE_PROBES; i++) {
|
for (int i = 0; i < NUM_CHANNEL_NOTE_PROBES; i++) {
|
||||||
float step_size = exp2(float(NUM_CHANNEL_NOTE_PROBES - i - 1));
|
float step_size = exp2(float(NUM_CHANNEL_NOTE_PROBES - i - 1));
|
||||||
vec4 note_event = get_midi_texel(event_idx + step_size, row);
|
vec4 note_event = get_midi_texel(event_idx + step_size, row) * sdr_scale;
|
||||||
float t_start = note_event.x;
|
float t_start = note_event.x;
|
||||||
event_idx += (t >= t_start) ? step_size : 0.0;
|
event_idx += (t >= t_start) ? step_size : 0.0;
|
||||||
}
|
}
|
||||||
vec4 note_event = get_midi_texel(event_idx, row);
|
vec4 note_event = get_midi_texel(event_idx, row) * sdr_scale; // scaled to [0.0, 1024.0]
|
||||||
vec4 note_event_supplement = get_midi_texel(event_idx, row+1.0);
|
vec4 note_event_supplement = get_midi_texel(event_idx, row+1.0); // left as [0.0, 1.0]
|
||||||
float t_start = note_event.x * t_scale;
|
float t_start = note_event.x;
|
||||||
float t_end = note_event.y * t_scale;
|
float t_end = note_event.y;
|
||||||
vec2 instrument_and_pitch = unpack_float(note_event.z);
|
// ====================At some point I'll look back into packing floats====================
|
||||||
float instrument_idx = instrument_and_pitch.x * 1023.0;
|
// vec2 instrument_and_pitch = unpack_float(note_event.z);
|
||||||
float pitch_idx = instrument_and_pitch.y * 1023.0; // TODO: Maybe rescale this for fine tuning? Don't use it raw because 2^(127-71) is MASSIVE, keep the power-of-2 calcs in shader.
|
// float instrument_idx = instrument_and_pitch.x * 1023.0;
|
||||||
vec2 velocity_and_pan = unpack_float(note_event_supplement.w); // Can leave these as [0.0, 1.0] and then mix appropriately
|
// float pitch_idx = instrument_and_pitch.y * 1023.0; // TODO: Maybe rescale this for fine tuning? Don't use it raw because 2^(127-71) is MASSIVE, keep the power-of-2 calcs in shader.
|
||||||
float velocity = velocity_and_pan.x;
|
// vec2 velocity_and_pan = unpack_float(note_event.w); // Can leave these as [0.0, 1.0] and then mix appropriately
|
||||||
float pan = velocity_and_pan.y;
|
// float velocity = velocity_and_pan.x;
|
||||||
vec2 attack_and_decay = unpack_float(note_event_supplement.x);
|
// float pan = velocity_and_pan.y;
|
||||||
vec2 sustain_and_release = unpack_float(note_event_supplement.y);
|
// vec2 attack_and_decay = unpack_float(note_event_supplement.x);
|
||||||
|
// vec2 sustain_and_release = unpack_float(note_event_supplement.y);
|
||||||
// TBD = note_event_supplement.zw; - tremolo/vibrato/noise/pan_lfo/pitchbend/echo remain
|
// TBD = note_event_supplement.zw; - tremolo/vibrato/noise/pan_lfo/pitchbend/echo remain
|
||||||
|
// ====================At some point I'll look back into packing floats====================
|
||||||
|
float instrument_idx = note_event.z;
|
||||||
|
float pitch_idx = note_event.w;
|
||||||
|
float velocity = note_event_supplement.x;
|
||||||
|
float pan = note_event_supplement.y;
|
||||||
|
|
||||||
// For now, just branch this
|
// For now, just branch this
|
||||||
if (t_end > t) {
|
if (t < t_end) {
|
||||||
float samp = get_instrument_sample(instrument_idx, get_pitch_scale(pitch_idx), t-t_start, t_end-t_start);
|
float samp = get_instrument_sample(instrument_idx, get_pitch_scale(pitch_idx), t-t_start, t_end-t_start);
|
||||||
samp *= velocity;
|
samp *= velocity;
|
||||||
// TODO: do some ADSR here?
|
// TODO: do some ADSR here?
|
||||||
|
@ -183,15 +189,17 @@ vec4 render_song(float sample_progress) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Convert the stereo float audio to S16LE
|
// Convert the stereo float audio to S16LE
|
||||||
return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(downmixed_stereo.y));
|
// return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(downmixed_stereo.y));
|
||||||
|
return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(mod(t, 2.0) - 1.0));
|
||||||
|
// return vec4(pack_float_to_int16((t/10.0) - 1.0), pack_float_to_int16(mod(t, 2.0) - 1.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
void fragment() {
|
void fragment() {
|
||||||
// GLES2
|
// GLES2
|
||||||
vec2 uv = vec2(UV.x, 1.0-UV.y);
|
vec2 uv = vec2(UV.x, 1.0-UV.y);
|
||||||
uv = (trunc(uv*UV_QUANTIZE)+0.5)/UV_QUANTIZE;
|
// uv = (trunc(uv*UV_QUANTIZE)+0.5)/UV_QUANTIZE;
|
||||||
// COLOR.xyzw = test_writeback(uv);
|
// COLOR.xyzw = test_writeback(TEXTURE, uv);
|
||||||
COLOR.xyzw = render_song(dot(uv, vec2(1.0, midi_events_size.x)));
|
COLOR.xyzw = render_song(dot(trunc(uv*TEX_SIZE), vec2(1.0, TEX_SIZE)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// const int MAX_TEMPO_EVENTS = 256;
|
// const int MAX_TEMPO_EVENTS = 256;
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
extends Control
|
extends Control
|
||||||
|
|
||||||
|
const INPUT_TEX_WIDTH := 2048
|
||||||
|
const INPUT_FORMAT := Image.FORMAT_RGBAF # Image.FORMAT_LA8
|
||||||
|
const INPUT_BYTES_PER_TEXEL := 16 # 2
|
||||||
|
const OUTPUT_WIDTH := 4096
|
||||||
var viewport: Viewport
|
var viewport: Viewport
|
||||||
var render_queue: Array # of PoolByteArrays
|
var render_queue: Array # of Images
|
||||||
var result_queue: Array # of PoolByteArrays
|
var result_queue: Array # of PoolByteArrays
|
||||||
var current_image: Image
|
var current_image: Image
|
||||||
var current_tex: ImageTexture # Needed to prevent GC before draw
|
var current_tex: ImageTexture # Needed to prevent GC before draw
|
||||||
|
@ -17,6 +21,18 @@ func _ready() -> void:
|
||||||
self.current_image = Image.new()
|
self.current_image = Image.new()
|
||||||
self.current_tex = ImageTexture.new()
|
self.current_tex = ImageTexture.new()
|
||||||
|
|
||||||
|
func push_image(img: Image) -> void:
|
||||||
|
self.render_queue.append(img)
|
||||||
|
|
||||||
|
func push_bytes(data: PoolByteArray) -> void:
|
||||||
|
# print(data.subarray(0, 15))
|
||||||
|
var rows = int(pow(2, ceil(log((len(data)/INPUT_BYTES_PER_TEXEL) / INPUT_TEX_WIDTH)/log(2))))
|
||||||
|
var target_length = rows * INPUT_BYTES_PER_TEXEL * INPUT_FORMAT
|
||||||
|
while len(data) < target_length: # This is inefficient, but this function should be called with pre-padded data anyway
|
||||||
|
data.append(0)
|
||||||
|
self.current_image.create_from_data(INPUT_TEX_WIDTH, rows, false, INPUT_FORMAT, data)
|
||||||
|
self.render_queue.append(self.current_image)
|
||||||
|
|
||||||
func _process(_delta) -> void:
|
func _process(_delta) -> void:
|
||||||
update()
|
update()
|
||||||
|
|
||||||
|
@ -35,12 +51,13 @@ func _draw() -> void:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Draw the next ImageTexture
|
# Draw the next ImageTexture
|
||||||
var data: PoolByteArray = self.render_queue.pop_front()
|
self.current_image = self.render_queue.pop_front()
|
||||||
print(data.subarray(0, 15))
|
self.current_tex.create_from_image(self.current_image, 0)
|
||||||
self.current_image.create_from_data(4096, 4096, false, Image.FORMAT_LA8, data)
|
self.material.set_shader_param('midi_events', self.current_tex)
|
||||||
self.current_tex.create_from_image(self.current_image, Texture.FLAG_FILTER)
|
self.material.set_shader_param('midi_events_size', self.current_tex.get_size())
|
||||||
self.material.set_shader_param('tex', self.current_tex)
|
# draw_texture(self.current_tex, Vector2.ZERO)
|
||||||
draw_texture(self.current_tex, Vector2.ZERO)
|
draw_texture(self.viewport.get_texture(), Vector2.ZERO)
|
||||||
|
# draw_rect(Rect2(0, 0, OUTPUT_WIDTH, OUTPUT_WIDTH), Color.white)
|
||||||
self.waiting_for_viewport = true # Grab the result next draw
|
self.waiting_for_viewport = true # Grab the result next draw
|
||||||
|
|
||||||
func get_result() -> void:
|
func get_result() -> void:
|
||||||
|
@ -48,13 +65,13 @@ func get_result() -> void:
|
||||||
var result_image := result_texture.get_data()
|
var result_image := result_texture.get_data()
|
||||||
var result_bytes := result_image.get_data()
|
var result_bytes := result_image.get_data()
|
||||||
|
|
||||||
# Debugging: compare a sequence of all the possible 16bit integers
|
|
||||||
print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11))
|
|
||||||
test_readback(result_bytes)
|
|
||||||
|
|
||||||
self.result_queue.append(result_bytes)
|
self.result_queue.append(result_bytes)
|
||||||
self.waiting_for_viewport = false
|
self.waiting_for_viewport = false
|
||||||
|
|
||||||
|
# # Debugging: compare a sequence of all the possible 16bit integers
|
||||||
|
# print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11))
|
||||||
|
# test_readback(result_bytes)
|
||||||
|
|
||||||
func test_readback(result_bytes: PoolByteArray):
|
func test_readback(result_bytes: PoolByteArray):
|
||||||
# Debugging: compare a sequence of all the possible 16bit integers
|
# Debugging: compare a sequence of all the possible 16bit integers
|
||||||
var buff := StreamPeerBuffer.new()
|
var buff := StreamPeerBuffer.new()
|
||||||
|
|
|
@ -125,22 +125,71 @@ func _ready() -> void:
|
||||||
for i in len(RomLoader.snes_data.bgm_song_pointers):
|
for i in len(RomLoader.snes_data.bgm_song_pointers):
|
||||||
var pointer = RomLoader.snes_data.bgm_song_pointers[i]
|
var pointer = RomLoader.snes_data.bgm_song_pointers[i]
|
||||||
# print('BGM 0x%02X (%02d) at 0x%06X' % [i, i, pointer])
|
# print('BGM 0x%02X (%02d) at 0x%06X' % [i, i, pointer])
|
||||||
|
self.test_rendering()
|
||||||
|
|
||||||
|
onready var audio_renderer := $'%audio_renderer'
|
||||||
|
func test_rendering() -> void:
|
||||||
|
SoundLoader.samples_to_texture()
|
||||||
|
audio_renderer.material.set_shader_param('instrument_samples', SoundLoader.samples_tex)
|
||||||
|
audio_renderer.material.set_shader_param('instrument_samples_size', SoundLoader.samples_tex.get_size())
|
||||||
|
|
||||||
|
var midi_events_bytes := StreamPeerBuffer.new()
|
||||||
|
var midi_events_bytes2 := StreamPeerBuffer.new()
|
||||||
|
var divisor = 128.0 #1024.0 # See sdr_scale in audio_renderer.gdshader
|
||||||
|
for i in 2048:
|
||||||
|
var t = i * 10.0
|
||||||
|
midi_events_bytes.put_float(t/divisor) # t_start
|
||||||
|
midi_events_bytes.put_float((t+3.0)/divisor) # t_end
|
||||||
|
# Try repacking these later
|
||||||
|
midi_events_bytes.put_float((i%35)/divisor) # instrument
|
||||||
|
midi_events_bytes.put_float(71/divisor) # pitch_idx
|
||||||
|
# midi_events_bytes.put_float((35 + (i%40))/divisor) # pitch_idx
|
||||||
|
midi_events_bytes2.put_float(1.0) # velocity
|
||||||
|
midi_events_bytes2.put_float((i%101)/100.0) # pan
|
||||||
|
midi_events_bytes2.put_float(0.0) # TBD
|
||||||
|
midi_events_bytes2.put_float(0.0) # TBD
|
||||||
|
# midi_events_bytes.put_float(((i%35) + 71/1024.0)/1023.0) # instrument_and_pitch
|
||||||
|
# midi_events_bytes.put_float((1.0 + (i*4)/1024.0)/1023.0) # velocity_and_pan
|
||||||
|
# midi_events_bytes2.put_float(0.0) # attack_and_decay
|
||||||
|
# midi_events_bytes2.put_float(0.0) # sustain_and_relase
|
||||||
|
# midi_events_bytes2.put_float(0.0) # TBD
|
||||||
|
# midi_events_bytes2.put_float(0.0) # TBD
|
||||||
|
# for i in 2048-256:
|
||||||
|
# midi_events_bytes.put_float(0.0)
|
||||||
|
# midi_events_bytes.put_float(0.0)
|
||||||
|
# midi_events_bytes.put_float(0.0)
|
||||||
|
# midi_events_bytes.put_float(0.0)
|
||||||
|
# midi_events_bytes2.put_float(0.0)
|
||||||
|
# midi_events_bytes2.put_float(0.0)
|
||||||
|
# midi_events_bytes2.put_float(0.0)
|
||||||
|
# midi_events_bytes2.put_float(0.0)
|
||||||
|
var channel_data = midi_events_bytes.data_array + midi_events_bytes2.data_array
|
||||||
|
audio_renderer.push_bytes(channel_data) # + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data)
|
||||||
|
|
||||||
# var test_payload := PoolByteArray()
|
# var test_payload := PoolByteArray()
|
||||||
# test_payload.resize(4096*4096*2)
|
# test_payload.resize(4096*4096*2)
|
||||||
# # for i in 5:
|
# # for i in 5:
|
||||||
# # test_payload.fill(i*2+10)
|
# # test_payload.fill(i*2+10)
|
||||||
# # $'%audio_renderer'.render_queue.append(test_payload)
|
# # audio_renderer.render_queue.append(test_payload)
|
||||||
# test_payload.fill(0)
|
# test_payload.fill(0)
|
||||||
# for i in 65536:
|
# for i in 65536:
|
||||||
# test_payload.set(i*2, i%256)
|
# test_payload.set(i*2, i%256)
|
||||||
# test_payload.set(i*2+1, i/256)
|
# test_payload.set(i*2+1, i/256)
|
||||||
# $'%audio_renderer'.render_queue.append(test_payload)
|
# audio_renderer.render_queue.append(test_payload)
|
||||||
# # $'%audio_renderer'.render_queue.append(test_payload)
|
# # audio_renderer.render_queue.append(test_payload)
|
||||||
|
|
||||||
# func _process(_delta):
|
func _process(_delta):
|
||||||
# update()
|
update()
|
||||||
|
|
||||||
# func _draw() -> void:
|
func _draw() -> void:
|
||||||
# if $'%audio_renderer'.waiting_for_viewport:
|
if audio_renderer.waiting_for_viewport:
|
||||||
# $'%audio_renderer'.get_result()
|
audio_renderer.get_result()
|
||||||
|
var result = audio_renderer.result_queue[0]
|
||||||
|
var rendered_audio := AudioStreamSample.new()
|
||||||
|
rendered_audio.data = result
|
||||||
|
rendered_audio.stereo = true
|
||||||
|
rendered_audio.mix_rate = 32000
|
||||||
|
rendered_audio.format = AudioStreamSample.FORMAT_16_BITS
|
||||||
|
var error = rendered_audio.save_to_wav('output/rendered_audio.wav')
|
||||||
|
print(error)
|
||||||
|
pass
|
||||||
|
|
|
@ -7,6 +7,13 @@
|
||||||
|
|
||||||
[sub_resource type="ShaderMaterial" id=2]
|
[sub_resource type="ShaderMaterial" id=2]
|
||||||
shader = ExtResource( 4 )
|
shader = ExtResource( 4 )
|
||||||
|
shader_param/instrument_samples_size = Vector2( 2048, 128 )
|
||||||
|
shader_param/instrument_row_padding = 3.0
|
||||||
|
shader_param/instrument_row_payload = 2042.0
|
||||||
|
shader_param/reference_note = 71.0
|
||||||
|
shader_param/output_mixrate = 32000.0
|
||||||
|
shader_param/midi_events_size = Vector2( 2048, 16 )
|
||||||
|
shader_param/t_scale = 524.0
|
||||||
|
|
||||||
[node name="audio_system" type="Node2D"]
|
[node name="audio_system" type="Node2D"]
|
||||||
script = ExtResource( 1 )
|
script = ExtResource( 1 )
|
||||||
|
|
Loading…
Reference in New Issue