JAOT Audio rendering
This commit is contained in:
parent
57d88e876a
commit
0b2a120b1d
|
@ -3,8 +3,15 @@
|
|||
// Unfortunately, this loses type-checking on [0.0, 1.0] vs [0,255] etc. so a lot of this will involve comments declaring ranges.
|
||||
shader_type canvas_item;
|
||||
render_mode blend_premul_alpha;
|
||||
uniform sampler2D instrument_samples;
|
||||
uniform vec2 instrument_samples_size = vec2(2048.0, 128.0);
|
||||
uniform int INT_OUTPUT_WIDTH = 4096;
|
||||
uniform vec2 OUTPUT_FRAMEBUFFER_SIZE = vec2(4096.0, 4096.0);
|
||||
uniform float reference_note = 71.0; // [0, 255], possibly [0, 127]
|
||||
uniform float output_mixrate = 32000.0; // SNES SPC output is 32kHz
|
||||
uniform vec2 midi_events_size = vec2(2048.0, 32.0);
|
||||
uniform int tempo_scale_thousandths = 1000;
|
||||
const int TEMPO_SCALE_MULTIPLIER = 1000;
|
||||
// I feel like these magic numbers are a bit more intuitive in hex
|
||||
const float x00FF = float(0x00FF); // 255.0
|
||||
const float x0100 = float(0x0100); // 256.0
|
||||
|
@ -94,11 +101,7 @@ vec2 pack_float_to_int16(float value) {
|
|||
// leaving 261121 texels for the sample data.
|
||||
|
||||
const float HEADER_LENGTH_TEXELS = 5.0;
|
||||
uniform sampler2D instrument_samples;
|
||||
uniform vec2 instrument_samples_size = vec2(2048.0, 128.0);
|
||||
const int INSTRUMENT_SAMPLES_WIDTH = 2048;
|
||||
uniform float reference_note = 71.0; // [0, 255], possibly [0, 127]
|
||||
uniform float output_mixrate = 32000.0; // SNES SPC output is 32kHz
|
||||
float sinc(float x) {
|
||||
x = abs(x) + 0.00000000000001; // Avoid division by zero
|
||||
return min(sin(x)/x, 1.0);
|
||||
|
@ -152,10 +155,17 @@ float get_instrument_sample(float instrument_index, float note, float t) {
|
|||
const int NUM_CHANNELS = 8;
|
||||
const int MAX_CHANNEL_NOTE_EVENTS = 2048;
|
||||
const int NUM_CHANNEL_NOTE_PROBES = 11; // log2(MAX_CHANNEL_NOTE_EVENTS)
|
||||
uniform vec2 midi_events_size = vec2(2048.0, 32.0);
|
||||
vec4 get_midi_texel(sampler2D tex, float x, float y) {
|
||||
return texture(tex, vec2(x, y)/midi_events_size).xyzw;
|
||||
}
|
||||
int retime_smp(int smp) {
|
||||
// Overflow safety is important as our input values can go up to 2^24, and we multiply by around 2^10
|
||||
int factor = smp / tempo_scale_thousandths;
|
||||
int residue = smp % tempo_scale_thousandths;
|
||||
int a = (residue * TEMPO_SCALE_MULTIPLIER) / tempo_scale_thousandths;
|
||||
int b = factor * TEMPO_SCALE_MULTIPLIER;
|
||||
return a + b;
|
||||
}
|
||||
vec4 render_song(sampler2D tex, int smp) {
|
||||
// Each output texel rendered is a stereo S16LE frame representing 1/32000 of a second
|
||||
// 2048 is an established safe texture dimension so may as well go 2048 wide
|
||||
|
@ -170,11 +180,12 @@ vec4 render_song(sampler2D tex, int smp) {
|
|||
int smp_start;
|
||||
for (int i = 0; i < NUM_CHANNEL_NOTE_PROBES; i++) {
|
||||
float step_size = exp2(float(NUM_CHANNEL_NOTE_PROBES - i - 1));
|
||||
smp_start = int(unpack_int32(get_midi_texel(tex, event_idx + step_size, row)));
|
||||
smp_start = retime_smp(int(unpack_int32(get_midi_texel(tex, event_idx + step_size, row))));
|
||||
event_idx += (smp >= smp_start) ? step_size : 0.0;
|
||||
}
|
||||
smp_start = int(unpack_int32(get_midi_texel(tex, event_idx, row)));
|
||||
int smp_end = int(unpack_int32(get_midi_texel(tex, event_idx, row+1.0)));
|
||||
smp_start = retime_smp(int(unpack_int32(get_midi_texel(tex, event_idx, row))));
|
||||
int smp_end = retime_smp(int(unpack_int32(get_midi_texel(tex, event_idx, row+1.0))));
|
||||
|
||||
vec4 note_event_supplement = get_midi_texel(tex, event_idx, row+2.0); // left as [0.0, 1.0]
|
||||
float instrument_idx = trunc(note_event_supplement.x * 255.0);
|
||||
float pitch_idx = note_event_supplement.y * 255.0;
|
||||
|
|
|
@ -3,6 +3,9 @@ extends Control
|
|||
signal render_initial_ready(key) # A small chunk at the start has been rendered and is ready to play
|
||||
signal render_complete(key) # The full track has been rendered and is ready to pop-in
|
||||
|
||||
enum RENDER_MODE {BATCH, DYNAMIC, JAOT, TEST_READBACK}
|
||||
var render_mode = RENDER_MODE.JAOT
|
||||
|
||||
const INPUT_TEX_WIDTH := 2048
|
||||
const INPUT_FORMAT := Image.FORMAT_RGBA8 # Image.FORMAT_LA8
|
||||
const INPUT_BYTES_PER_TEXEL := 4 # 2
|
||||
|
@ -12,15 +15,19 @@ var OUTPUT_FRAMEBUFFER_SIZE: Vector2
|
|||
var OUTPUT_WIDTH: int
|
||||
var OUTPUT_HEIGHT: int
|
||||
onready var viewport: Viewport = self.get_parent()
|
||||
onready var render_queue: Array = [] # of [desc key, remaining_samples]
|
||||
onready var cached_midis: Dictionary = {} # desc: [target_samples, ImageTexture]
|
||||
onready var cached_renders: Dictionary = {} # desc: [remaining_samples, PoolByteArray]
|
||||
onready var render_queue: Array = [] # of [key, remaining_samples]
|
||||
onready var cached_midis: Dictionary = {} # key: [target_samples, ImageTexture]
|
||||
onready var cached_renders: Dictionary = {} # key: [remaining_samples, PoolByteArray]
|
||||
onready var current_textures: Array = [] # of ImageTextures - Needed to prevent GC before draw
|
||||
onready var waiting_for_viewport: Array = []
|
||||
onready var done_first_draw: bool = false
|
||||
|
||||
func _ready() -> void:
|
||||
self._update_viewport(4096, 4096)
|
||||
match self.render_mode:
|
||||
RENDER_MODE.BATCH, RENDER_MODE.DYNAMIC:
|
||||
self._update_viewport(4096, 4096)
|
||||
RENDER_MODE.JAOT:
|
||||
self._update_viewport(256, 2048)
|
||||
|
||||
func _process(_delta) -> void:
|
||||
update()
|
||||
|
@ -34,11 +41,17 @@ func _draw() -> void:
|
|||
if self.waiting_for_viewport:
|
||||
# Another node later in the draw sequence can call this within the same frame,
|
||||
# otherwise, this picks it up the following frame
|
||||
get_result()
|
||||
self.get_result()
|
||||
|
||||
# self.waiting_for_viewport is cleared at the end of get_result()
|
||||
if self.render_queue:
|
||||
self._render_in_batch()
|
||||
# self._render_one_at_a_time()
|
||||
match self.render_mode:
|
||||
RENDER_MODE.BATCH:
|
||||
self._render_in_batch()
|
||||
RENDER_MODE.DYNAMIC:
|
||||
self._render_dynamic_one_at_a_time()
|
||||
RENDER_MODE.JAOT:
|
||||
self._render_just_ahead_of_time()
|
||||
|
||||
|
||||
func _update_viewport(width: int, height: int) -> void:
|
||||
|
@ -50,21 +63,35 @@ func _update_viewport(width: int, height: int) -> void:
|
|||
self.material.set_shader_param('INT_OUTPUT_WIDTH', OUTPUT_WIDTH)
|
||||
|
||||
|
||||
func _get_cached_midi(key: String) -> Array: # [target_samples: int, tex: ImageTexture, tempo_scale_thousandths: int]
|
||||
if not('-' in key):
|
||||
return self.cached_midis[key] + [1000]
|
||||
var split := key.split('-')
|
||||
var tempo_scale_thousandths := int(split[1])
|
||||
var target_samples_and_tex = self.cached_midis[split[0]]
|
||||
var new_target_samples: int = (target_samples_and_tex[0]*1000)/tempo_scale_thousandths
|
||||
return [new_target_samples, target_samples_and_tex[1], tempo_scale_thousandths]
|
||||
|
||||
|
||||
func _render_midi(key: String, output_rows_drawn_including_this: int, rows_to_draw: int) -> void:
|
||||
var target_samples_and_tex = self.cached_midis[key]
|
||||
var target_samples: int = target_samples_and_tex[0]
|
||||
var tex: ImageTexture = target_samples_and_tex[1]
|
||||
var y_top: int = OUTPUT_HEIGHT - output_rows_drawn_including_this
|
||||
var y_bot: int = y_top + rows_to_draw
|
||||
var uv_inv_v: float = 1 - (rows_to_draw / OUTPUT_FRAMEBUFFER_SIZE.y)
|
||||
var uvs := PoolVector2Array([Vector2(0, uv_inv_v), Vector2(1, uv_inv_v), Vector2(1, 1), Vector2(0, 1)])
|
||||
var points := PoolVector2Array([Vector2(0, y_top), Vector2(OUTPUT_WIDTH, y_top), Vector2(OUTPUT_WIDTH, y_bot), Vector2(0, y_bot)])
|
||||
draw_primitive(points, QUAD_COLOR, uvs, tex)
|
||||
self.waiting_for_viewport.append([rows_to_draw, key]) # Grab the result next draw
|
||||
var target_samples_and_tex_and_tempo = self._get_cached_midi(key)
|
||||
var target_samples: int = target_samples_and_tex_and_tempo[0]
|
||||
var tex: ImageTexture = target_samples_and_tex_and_tempo[1]
|
||||
var tempo_scale_thousandths: int = target_samples_and_tex_and_tempo[2]
|
||||
self.material.set_shader_param('tempo_scale_thousandths', tempo_scale_thousandths)
|
||||
if rows_to_draw <= OUTPUT_HEIGHT: # Full draw
|
||||
var y_top: int = OUTPUT_HEIGHT - output_rows_drawn_including_this
|
||||
var y_bot: int = y_top + rows_to_draw
|
||||
var uv_inv_v: float = 1 - (rows_to_draw / OUTPUT_FRAMEBUFFER_SIZE.y)
|
||||
var uvs := PoolVector2Array([Vector2(0, uv_inv_v), Vector2(1, uv_inv_v), Vector2(1, 1), Vector2(0, 1)])
|
||||
var points := PoolVector2Array([Vector2(0, y_top), Vector2(OUTPUT_WIDTH, y_top), Vector2(OUTPUT_WIDTH, y_bot), Vector2(0, y_bot)])
|
||||
draw_primitive(points, QUAD_COLOR, uvs, tex)
|
||||
self.waiting_for_viewport.append([rows_to_draw, key]) # Grab the result next draw
|
||||
else:
|
||||
print('Could not fit %s into %dx%d output framebuffer, it needs %d rows'%[key, OUTPUT_WIDTH, OUTPUT_HEIGHT, rows_to_draw])
|
||||
|
||||
|
||||
func _render_in_batch() -> void:
|
||||
# self._update_viewport(4096, 4096)
|
||||
self.waiting_for_viewport = []
|
||||
var rows_drawn := 0
|
||||
while self.render_queue:
|
||||
|
@ -73,75 +100,147 @@ func _render_in_batch() -> void:
|
|||
rows_drawn += rows_to_draw
|
||||
if rows_drawn > OUTPUT_HEIGHT:
|
||||
if self.waiting_for_viewport.empty():
|
||||
print('Could not fit %s into %dx%d output framebuffer, it needs %d rows'%[self.render_queue[0][2], OUTPUT_WIDTH, OUTPUT_HEIGHT, rows_to_draw])
|
||||
print('Could not fit %s into %dx%d output framebuffer, it needs %d rows'%[self.render_queue[0][0], OUTPUT_WIDTH, OUTPUT_HEIGHT, rows_to_draw])
|
||||
self.render_queue.pop_front()
|
||||
break
|
||||
# Draw the next ImageTexture
|
||||
self._render_midi(self.render_queue.pop_front()[0], rows_drawn, rows_to_draw)
|
||||
|
||||
|
||||
func _render_one_at_a_time() -> void: # Non power-of-two dimensioned textures should be restricted to GLES3
|
||||
func _render_dynamic_one_at_a_time() -> void: # Non power-of-two dimensioned textures should be restricted to GLES3
|
||||
self.waiting_for_viewport = []
|
||||
var rows_drawn := 0
|
||||
if self.render_queue:
|
||||
var target_samples: int = self.render_queue[0][1]
|
||||
var rows_to_draw := int(ceil(target_samples/float(OUTPUT_WIDTH)))
|
||||
self._update_viewport(4096, rows_to_draw)
|
||||
rows_drawn += rows_to_draw
|
||||
# Draw the next ImageTexture
|
||||
self._render_midi(self.render_queue.pop_front()[0], rows_drawn, rows_to_draw)
|
||||
var entry = self.render_queue.pop_front()
|
||||
var key: String = entry[0]
|
||||
var target_samples: int = entry[1]
|
||||
var rows_to_draw := int(ceil(target_samples/float(OUTPUT_WIDTH)))
|
||||
self._update_viewport(4096, rows_to_draw)
|
||||
# Draw the next ImageTexture
|
||||
self._render_midi(key, rows_to_draw, rows_to_draw)
|
||||
|
||||
|
||||
func push_image(image: Image, target_samples: int, desc: String) -> void:
|
||||
func _render_just_ahead_of_time() -> void: # Optimized for latency
|
||||
var entry = self.render_queue[0]
|
||||
var key: String = entry[0]
|
||||
var remaining_samples: int = entry[1]
|
||||
|
||||
var target_samples_and_tex_and_tempo = self._get_cached_midi(key)
|
||||
var total_target_samples: int = target_samples_and_tex_and_tempo[0]
|
||||
var tex: ImageTexture = target_samples_and_tex_and_tempo[1]
|
||||
var tempo_scale_thousandths: int = target_samples_and_tex_and_tempo[2]
|
||||
|
||||
var total_rows := int(ceil(total_target_samples/float(OUTPUT_WIDTH)))
|
||||
var remaining_rows := int(ceil(remaining_samples/float(OUTPUT_WIDTH)))
|
||||
var rows_to_draw := remaining_rows
|
||||
# Draw the next ImageTexture
|
||||
self.material.set_shader_param('tempo_scale_thousandths', tempo_scale_thousandths)
|
||||
var y_top: int = 0
|
||||
if remaining_rows <= OUTPUT_HEIGHT:
|
||||
y_top = OUTPUT_HEIGHT - rows_to_draw
|
||||
self.render_queue.pop_front()
|
||||
else:
|
||||
rows_to_draw = OUTPUT_HEIGHT
|
||||
self.render_queue[0] = [key, remaining_samples - (rows_to_draw*OUTPUT_WIDTH)]
|
||||
# Draw the next ImageTexture
|
||||
var starting_row := total_rows - remaining_rows # The start of the unrendered rows
|
||||
var ending_row := starting_row + rows_to_draw # The end of this chunk of unrendered rows
|
||||
var uv_top: float = 1 - (ending_row / OUTPUT_FRAMEBUFFER_SIZE.y)
|
||||
var uv_bot: float = 1 - (starting_row / OUTPUT_FRAMEBUFFER_SIZE.y)
|
||||
var uvs := PoolVector2Array([Vector2(0, uv_top), Vector2(1, uv_top), Vector2(1, uv_bot), Vector2(0, uv_bot)])
|
||||
var points := PoolVector2Array([Vector2(0, y_top), Vector2(OUTPUT_WIDTH, y_top), Vector2(OUTPUT_WIDTH, OUTPUT_HEIGHT), Vector2(0, OUTPUT_HEIGHT)])
|
||||
draw_primitive(points, QUAD_COLOR, uvs, tex)
|
||||
self.waiting_for_viewport.append([rows_to_draw, key]) # Grab the result next draw
|
||||
|
||||
|
||||
func push_image(image: Image, target_samples: int, key: String, enqueue: bool = true) -> void:
|
||||
var tex := ImageTexture.new()
|
||||
tex.create_from_image(image, 0)
|
||||
self.cached_midis[desc] = [target_samples, tex]
|
||||
self.cached_midis[key] = [target_samples, tex]
|
||||
self.material.set_shader_param('midi_events_size', tex.get_size()) # Should all be the same size for now, revisit if we need mixed sizes.
|
||||
self.render_queue.append([desc, target_samples])
|
||||
if enqueue:
|
||||
self.render_queue.append([key, target_samples])
|
||||
|
||||
func push_bytes(data: PoolByteArray, target_samples: int, desc: String) -> void:
|
||||
func push_bytes(data: PoolByteArray, target_samples: int, key: String, enqueue: bool = true) -> void:
|
||||
var rows = int(pow(2, ceil(log((len(data)/INPUT_BYTES_PER_TEXEL) / INPUT_TEX_WIDTH)/log(2))))
|
||||
var target_length = rows * INPUT_BYTES_PER_TEXEL * INPUT_FORMAT
|
||||
while len(data) < target_length: # This is inefficient, but this function should be called with pre-padded data anyway
|
||||
data.append(0)
|
||||
var image := Image.new()
|
||||
image.create_from_data(INPUT_TEX_WIDTH, rows, false, INPUT_FORMAT, data)
|
||||
self.push_image(image, target_samples, desc)
|
||||
self.push_image(image, target_samples, key, enqueue)
|
||||
|
||||
func queue_cached_bgm(key: String) -> void:
|
||||
var new_target_samples_etc := self._get_cached_midi(key)
|
||||
self.render_queue.append([key, new_target_samples_etc[0]])
|
||||
|
||||
|
||||
func get_result() -> void:
|
||||
var result_texture := self.viewport.get_texture()
|
||||
var result_image := result_texture.get_data()
|
||||
var result_bytes := result_image.get_data()
|
||||
|
||||
var retrieved_rows := 0
|
||||
for rows_and_desc in self.waiting_for_viewport:
|
||||
var entry_rows: int = rows_and_desc[0]
|
||||
var key: String = rows_and_desc[1]
|
||||
var bytes_start := retrieved_rows * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
|
||||
var bytes_end := (retrieved_rows + entry_rows) * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
|
||||
var entry_bytes := result_bytes.subarray(bytes_start, bytes_end-1)
|
||||
self.cached_renders[key] = [0, entry_bytes]
|
||||
emit_signal('render_initial_ready', key)
|
||||
emit_signal('render_complete', key)
|
||||
retrieved_rows += entry_rows
|
||||
# result_bytes.resize(result_byte_count)
|
||||
match self.render_mode:
|
||||
RENDER_MODE.BATCH: # Multiple complete songs in a fixed-size framebuffer
|
||||
var retrieved_rows := 0
|
||||
for rows_and_key in self.waiting_for_viewport:
|
||||
var entry_rows: int = rows_and_key[0]
|
||||
var key: String = rows_and_key[1]
|
||||
var bytes_start := retrieved_rows * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
|
||||
var bytes_end := (retrieved_rows + entry_rows) * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
|
||||
var entry_bytes := result_bytes.subarray(bytes_start, bytes_end-1)
|
||||
self.cached_renders[key] = [0, entry_bytes]
|
||||
emit_signal('render_initial_ready', key)
|
||||
emit_signal('render_complete', key)
|
||||
retrieved_rows += entry_rows
|
||||
RENDER_MODE.DYNAMIC: # One complete song in a framebuffer sized to fit it
|
||||
var rows_and_key = self.waiting_for_viewport[0]
|
||||
var key: String = rows_and_key[1]
|
||||
self.cached_renders[key] = [0, result_bytes]
|
||||
emit_signal('render_initial_ready', key)
|
||||
emit_signal('render_complete', key)
|
||||
RENDER_MODE.JAOT: # One partial song in a small framebuffer
|
||||
var rows_and_key = self.waiting_for_viewport[0]
|
||||
var key: String = rows_and_key[1]
|
||||
var samples_just_rendered: int = len(result_bytes) / OUTPUT_BYTES_PER_TEXEL
|
||||
if not (key in self.cached_renders):
|
||||
var samples_and_tex = _get_cached_midi(key)
|
||||
var target_samples: int = samples_and_tex[0]
|
||||
if samples_just_rendered >= target_samples:
|
||||
result_bytes.resize(target_samples * OUTPUT_BYTES_PER_TEXEL)
|
||||
self.cached_renders[key] = [0, result_bytes]
|
||||
emit_signal('render_initial_ready', key)
|
||||
emit_signal('render_complete', key)
|
||||
else:
|
||||
var remaining_samples: int = target_samples - samples_just_rendered
|
||||
self.cached_renders[key] = [remaining_samples, result_bytes]
|
||||
emit_signal('render_initial_ready', key)
|
||||
else:
|
||||
var old_remaining_samples_and_data = self.cached_renders[key]
|
||||
var target_samples: int = old_remaining_samples_and_data[0]
|
||||
var existing_data: PoolByteArray = old_remaining_samples_and_data[1]
|
||||
if samples_just_rendered >= target_samples:
|
||||
result_bytes.resize(target_samples * OUTPUT_BYTES_PER_TEXEL)
|
||||
self.cached_renders[key] = [0, existing_data + result_bytes]
|
||||
emit_signal('render_complete', key)
|
||||
else:
|
||||
self.cached_renders[key] = [target_samples - samples_just_rendered, existing_data + result_bytes]
|
||||
RENDER_MODE.TEST_READBACK:
|
||||
pass # This has bitrotted
|
||||
# # Debugging: compare a sequence of all the possible 16bit integers
|
||||
# print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11))
|
||||
# test_readback(result_bytes)
|
||||
self.waiting_for_viewport = []
|
||||
|
||||
# # Debugging: compare a sequence of all the possible 16bit integers
|
||||
# print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11))
|
||||
# test_readback(result_bytes)
|
||||
|
||||
func test_readback(result_bytes: PoolByteArray):
|
||||
# Debugging: compare a sequence of all the possible 16bit integers
|
||||
var buff := StreamPeerBuffer.new()
|
||||
buff.set_data_array(result_bytes)
|
||||
var tex_readback = 0
|
||||
var uv_readback = 0
|
||||
for i in 0x1000:
|
||||
tex_readback = buff.get_u16()
|
||||
uv_readback = buff.get_u16()
|
||||
if tex_readback != i:
|
||||
print('tex readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, tex_readback, tex_readback])
|
||||
if uv_readback != i:
|
||||
print('uv readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, uv_readback, uv_readback])
|
||||
# This has bitrotted
|
||||
# func test_readback(result_bytes: PoolByteArray):
|
||||
# # Debugging: compare a sequence of all the possible 16bit integers
|
||||
# var buff := StreamPeerBuffer.new()
|
||||
# buff.set_data_array(result_bytes)
|
||||
# var tex_readback = 0
|
||||
# var uv_readback = 0
|
||||
# for i in 0x1000:
|
||||
# tex_readback = buff.get_u16()
|
||||
# uv_readback = buff.get_u16()
|
||||
# if tex_readback != i:
|
||||
# print('tex readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, tex_readback, tex_readback])
|
||||
# if uv_readback != i:
|
||||
# print('uv readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, uv_readback, uv_readback])
|
||||
|
|
|
@ -13,8 +13,7 @@ var inst_buttons = []
|
|||
var sfx_buttons = []
|
||||
var initialized_instrument_texture := false
|
||||
var queued_bgm_playback := ''
|
||||
|
||||
# TODO: Add a tempo slider, a uniform in the shader for tempo scale, and use these to demo JAOT rendering
|
||||
var current_bgm_playback := ''
|
||||
|
||||
const NUM_CHANNELS := 8
|
||||
var music_player = null
|
||||
|
@ -94,9 +93,29 @@ func evaluate_bgm(id: int):
|
|||
tracks.append(MusicLoader.unroll_track(buffer.duplicate(), bgm_song_ptr, track_ptr, end_ptr, '%02d:%02d'%[id, i]))
|
||||
bgm_tracksets[id] = tracks
|
||||
|
||||
func _play_bgm_jaot(key: String) -> void:
|
||||
var target_time := 0.0
|
||||
var bgm_key_and_tempo_thou := key.split('-')
|
||||
var bgm_key := bgm_key_and_tempo_thou[0]
|
||||
var tempo_thou := int(bgm_key_and_tempo_thou[1])
|
||||
if audio_player.playing and self.current_bgm_playback.begins_with(bgm_key):
|
||||
var old_tempo_thou := int(self.current_bgm_playback.split('-')[1])
|
||||
var old_playback_pos: float = audio_player.get_playback_position()
|
||||
target_time = old_playback_pos * old_tempo_thou / tempo_thou
|
||||
print('Old temposcale %d, New temposcale %d, Old pos %.2f, New pos %.2f' % [old_tempo_thou, tempo_thou, old_playback_pos, target_time])
|
||||
else:
|
||||
print('audioplayer was not playing same bgm')
|
||||
self.audio_player.stream = self.prerendered_bgms[key]
|
||||
self.audio_player.play(target_time)
|
||||
self.prerendered_bgms.erase(self.current_bgm_playback) # purge previous stream from cache
|
||||
self.current_bgm_playback = key
|
||||
self.queued_bgm_playback = ''
|
||||
|
||||
|
||||
func play_bgm(id: int, live: bool) -> void:
|
||||
self._stop_all()
|
||||
if live:
|
||||
print('@%dms - Playing BGM%02d' % [get_ms(), id])
|
||||
var inst_indices = RomLoader.snes_data.bgm_instrument_indices[id]
|
||||
for i in 16:
|
||||
var inst_idx: int = inst_indices[i]-1
|
||||
|
@ -112,13 +131,16 @@ func play_bgm(id: int, live: bool) -> void:
|
|||
else:
|
||||
# Play prerendered
|
||||
var bgm_key = 'BGM%02d'%id
|
||||
if bgm_key in self.prerendered_bgms:
|
||||
self.audio_player.stream = self.prerendered_bgms[bgm_key]
|
||||
self.audio_player.play()
|
||||
else:
|
||||
var tempo_thou = $slider_tempo.value * 10
|
||||
var new_key = '%s-%04d' % [bgm_key, tempo_thou]
|
||||
print('@%dms - Playing %s' % [get_ms(), new_key])
|
||||
if not (bgm_key in self.prerendered_bgms):
|
||||
self.queue_prerender_bgm(id)
|
||||
self.queued_bgm_playback = bgm_key
|
||||
print('Playing BGM%02d' % id)
|
||||
if new_key in self.prerendered_bgms:
|
||||
self._play_bgm_jaot(new_key)
|
||||
else:
|
||||
audio_renderer.queue_cached_bgm(new_key)
|
||||
self.queued_bgm_playback = new_key
|
||||
|
||||
func _play_bgm_live() -> void:
|
||||
self.play_bgm($sb_bgm.value, true)
|
||||
|
@ -155,6 +177,20 @@ func _update_bgm_label(id = 0) -> void:
|
|||
$lbl_bgm_title.text = ''
|
||||
|
||||
|
||||
func _update_tempo(tempo := 100) -> void:
|
||||
$lbl_tempo.text = 'Tempo scale: %d' % tempo
|
||||
var tempo_thou = tempo * 10
|
||||
if audio_player.is_playing():
|
||||
var bgm_key = 'BGM%02d-%04d' % [$sb_bgm.value, tempo_thou]
|
||||
# TODO: render a tempo-scaled version
|
||||
audio_renderer.queue_cached_bgm(bgm_key)
|
||||
self.queued_bgm_playback = bgm_key
|
||||
|
||||
func _reset_tempo() -> void:
|
||||
self._update_tempo()
|
||||
$slider_tempo.value = 100
|
||||
|
||||
|
||||
func _exit() -> void:
|
||||
self.emit_signal('exit')
|
||||
|
||||
|
@ -162,6 +198,8 @@ func _ready() -> void:
|
|||
self._create_sfx_buttons()
|
||||
self._create_bgm_playback()
|
||||
$btn_stop.connect('pressed', self, '_stop_all')
|
||||
$slider_tempo.connect('value_changed', self, '_update_tempo')
|
||||
$btn_reset_tempo.connect('pressed', self, '_reset_tempo')
|
||||
$btn_exit.connect('pressed', self, '_exit')
|
||||
audio_renderer.connect('render_initial_ready', self, '_on_render_initial_ready')
|
||||
audio_renderer.connect('render_complete', self, '_on_render_complete')
|
||||
|
@ -206,7 +244,7 @@ func queue_prerender_bgm(bgm_id: int) -> void:
|
|||
var target_time = data_and_target_time_and_loops[1]
|
||||
var target_samples = target_time * 32000
|
||||
var bgm_key := 'BGM%02d'%bgm_id
|
||||
audio_renderer.push_bytes(data, target_samples, bgm_key)
|
||||
audio_renderer.push_bytes(data, target_samples, bgm_key, false)
|
||||
self.prerendered_bgm_start_and_end_loops[bgm_key] = data_and_target_time_and_loops[2]
|
||||
|
||||
|
||||
|
@ -226,9 +264,11 @@ var print_batch_results := ''
|
|||
func _get_prerendered_audio():
|
||||
self.print_batch_results = ''
|
||||
audio_renderer.get_result()
|
||||
print('@%dms - Rendered %s without saving' % [get_ms(), self.print_batch_results.right(2)])
|
||||
if self.print_batch_results:
|
||||
print('@%dms - Rendered %s without saving' % [get_ms(), self.print_batch_results.right(2)])
|
||||
|
||||
func _on_render_initial_ready(key: String):
|
||||
print('@%dms - _on_render_initial_ready("%s")' % [get_ms(), key])
|
||||
# Used for JAOT playback
|
||||
var remaining_samples_and_data = audio_renderer.cached_renders[key]
|
||||
|
||||
|
@ -237,33 +277,43 @@ func _on_render_initial_ready(key: String):
|
|||
rendered_audio.stereo = true
|
||||
rendered_audio.mix_rate = 32000
|
||||
rendered_audio.format = AudioStreamSample.FORMAT_16_BITS
|
||||
if prerendered_bgm_start_and_end_loops[key][0] >= 0:
|
||||
rendered_audio.loop_begin = int(round(prerendered_bgm_start_and_end_loops[key][0]))
|
||||
rendered_audio.loop_end = int(round(prerendered_bgm_start_and_end_loops[key][1]))
|
||||
rendered_audio.loop_mode = AudioStreamSample.LOOP_FORWARD
|
||||
var tempo_thou := 1000
|
||||
if '-' in key:
|
||||
var bgm_key_and_tempo := key.split('-')
|
||||
tempo_thou = int(bgm_key_and_tempo[1])
|
||||
var bgm_key = bgm_key_and_tempo[0]
|
||||
if prerendered_bgm_start_and_end_loops[bgm_key][0] >= 0:
|
||||
rendered_audio.loop_begin = int(round(prerendered_bgm_start_and_end_loops[bgm_key][0] * 1000 / tempo_thou))
|
||||
rendered_audio.loop_end = int(round(prerendered_bgm_start_and_end_loops[bgm_key][1] * 1000 / tempo_thou))
|
||||
rendered_audio.loop_mode = AudioStreamSample.LOOP_FORWARD
|
||||
else:
|
||||
if prerendered_bgm_start_and_end_loops[key][0] >= 0:
|
||||
rendered_audio.loop_begin = int(round(prerendered_bgm_start_and_end_loops[key][0]))
|
||||
rendered_audio.loop_end = int(round(prerendered_bgm_start_and_end_loops[key][1]))
|
||||
rendered_audio.loop_mode = AudioStreamSample.LOOP_FORWARD
|
||||
self.prerendered_bgms[key] = rendered_audio
|
||||
|
||||
if self.queued_bgm_playback == key:
|
||||
print('@%dms - Rendered initial chunk of %s for immediate playback' % [get_ms(), key])
|
||||
self.audio_player.stream = rendered_audio
|
||||
self.audio_player.play()
|
||||
self.queued_bgm_playback = ''
|
||||
print('@%dms - Rendered initial chunk of %s for immediate playback (%d samples at 32kHz = %.2fs)' % [get_ms(), key, len(remaining_samples_and_data[1])/4, rendered_audio.get_length()])
|
||||
self._play_bgm_jaot(key)
|
||||
|
||||
func _on_render_complete(key: String):
|
||||
print('@%dms - _on_render_complete("%s")' % [get_ms(), key])
|
||||
# Used for JAOT playback
|
||||
var remaining_samples_and_data = audio_renderer.cached_renders[key]
|
||||
audio_renderer.cached_renders.erase(key)
|
||||
var data: PoolByteArray = remaining_samples_and_data[1]
|
||||
|
||||
if remaining_samples_and_data[0] != 0: # Should be 0
|
||||
print_debug('render_completed signal for incomplete render! %s has %d remaining samples, should be 0'%[key, remaining_samples_and_data[0]])
|
||||
# Assume _on_render_initial_ready was already called and AudioStreamSample has already been created
|
||||
self.prerendered_bgms[key].data = remaining_samples_and_data[1]
|
||||
self.prerendered_bgms[key].data = data
|
||||
if save_prerendered_audio:
|
||||
var error = self.prerendered_bgms[key].save_to_wav('output/rendered_%s.wav'%key)
|
||||
print('@%dms - Saved render of %s (error code %s)' % [get_ms(), key, globals.ERROR_CODE_STRINGS[error]])
|
||||
else:
|
||||
# print('@%dms - Rendered %s without saving' % [get_ms(), key])
|
||||
self.print_batch_results = '%s, %s'%[self.print_batch_results, key]
|
||||
self.print_batch_results = '%s, %s (%.2fs %.2fMiB)'%[self.print_batch_results, key, self.prerendered_bgms[key].get_length(), len(data)/0x100000]
|
||||
|
||||
|
||||
func get_shader_test_pattern() -> PoolByteArray:
|
||||
|
|
|
@ -6,12 +6,13 @@
|
|||
|
||||
[sub_resource type="ShaderMaterial" id=2]
|
||||
shader = ExtResource( 4 )
|
||||
shader_param/instrument_samples_size = Vector2( 2048, 128 )
|
||||
shader_param/INT_OUTPUT_WIDTH = 4096
|
||||
shader_param/OUTPUT_FRAMEBUFFER_SIZE = Vector2( 4096, 4096 )
|
||||
shader_param/instrument_samples_size = Vector2( 2048, 128 )
|
||||
shader_param/reference_note = 71.0
|
||||
shader_param/output_mixrate = 32000.0
|
||||
shader_param/midi_events_size = Vector2( 2048, 16 )
|
||||
shader_param/tempo_scale_thousandths = 1000
|
||||
|
||||
[node name="audio_system" type="Node2D"]
|
||||
script = ExtResource( 1 )
|
||||
|
@ -50,28 +51,29 @@ max_value = 69.0
|
|||
align = 2
|
||||
|
||||
[node name="btn_bgm_live" type="Button" parent="."]
|
||||
margin_top = 216.0
|
||||
margin_right = 86.0
|
||||
margin_bottom = 240.0
|
||||
text = "Play BGM live"
|
||||
margin_left = 190.0
|
||||
margin_top = 150.0
|
||||
margin_right = 384.0
|
||||
margin_bottom = 174.0
|
||||
text = "Play BGM (live sample playback)"
|
||||
|
||||
[node name="btn_bgm_prerendered" type="Button" parent="."]
|
||||
margin_top = 164.0
|
||||
margin_right = 140.0
|
||||
margin_bottom = 188.0
|
||||
text = "Play BGM prerendered"
|
||||
text = "Play BGM (JAOT rendered)"
|
||||
|
||||
[node name="btn_render" type="Button" parent="."]
|
||||
margin_top = 190.0
|
||||
margin_top = 218.0
|
||||
margin_right = 96.0
|
||||
margin_bottom = 214.0
|
||||
margin_bottom = 240.0
|
||||
text = "Render All BGM"
|
||||
|
||||
[node name="btn_stop" type="Button" parent="."]
|
||||
margin_left = 278.0
|
||||
margin_top = 164.0
|
||||
margin_top = 176.0
|
||||
margin_right = 384.0
|
||||
margin_bottom = 188.0
|
||||
margin_bottom = 200.0
|
||||
text = "Stop All Playback"
|
||||
|
||||
[node name="lbl_bgm_title" type="Label" parent="."]
|
||||
|
@ -95,3 +97,28 @@ margin_top = 218.0
|
|||
margin_right = 384.0
|
||||
margin_bottom = 240.0
|
||||
text = "Return to Debug Menu"
|
||||
|
||||
[node name="lbl_tempo" type="Label" parent="."]
|
||||
margin_left = 120.0
|
||||
margin_top = 192.0
|
||||
margin_right = 218.0
|
||||
margin_bottom = 206.0
|
||||
text = "Tempo scale: 100"
|
||||
|
||||
[node name="btn_reset_tempo" type="Button" parent="."]
|
||||
margin_left = 120.0
|
||||
margin_top = 218.0
|
||||
margin_right = 204.0
|
||||
margin_bottom = 240.0
|
||||
text = "Reset Tempo"
|
||||
|
||||
[node name="slider_tempo" type="HSlider" parent="."]
|
||||
margin_left = 4.0
|
||||
margin_top = 205.0
|
||||
margin_right = 380.0
|
||||
margin_bottom = 215.0
|
||||
min_value = 10.0
|
||||
max_value = 300.0
|
||||
step = 2.0
|
||||
value = 100.0
|
||||
rounded = true
|
||||
|
|
Loading…
Reference in New Issue