ChocolateBird/scripts/audio_renderer.gd

263 lines
12 KiB
GDScript

extends Control
signal render_initial_ready(key) # A small chunk at the start has been rendered and is ready to play
signal render_complete(key) # The full track has been rendered and is ready to pop-in
enum RENDER_MODE {BATCH, DYNAMIC, JAOT, TEST_READBACK}
var render_mode = RENDER_MODE.JAOT
const INPUT_TEX_WIDTH := 4096
const INPUT_FORMAT := Image.FORMAT_RGBA8 # Image.FORMAT_LA8
const INPUT_BYTES_PER_TEXEL := 4 # 2
const OUTPUT_BYTES_PER_TEXEL := 4
const QUAD_COLOR := PoolColorArray([Color.white, Color.white, Color.white, Color.white])
var OUTPUT_FRAMEBUFFER_SIZE: Vector2
var OUTPUT_WIDTH: int
var OUTPUT_HEIGHT: int
onready var viewport: Viewport = self.get_parent()
onready var render_queue: Array = [] # of [key, remaining_samples]
onready var cached_midis: Dictionary = {} # key: [target_samples, ImageTexture]
onready var cached_renders: Dictionary = {} # key: [remaining_samples, PoolByteArray]
onready var current_textures: Array = [] # of ImageTextures - Needed to prevent GC before draw
onready var waiting_for_viewport: Array = []
onready var done_first_draw: bool = false
var initialized_instrument_texture := false
func _ready() -> void:
self.material = ShaderMaterial.new()
self.material.shader = preload('res://shaders/audio_renderer.gdshader')
match self.render_mode:
RENDER_MODE.BATCH, RENDER_MODE.DYNAMIC:
self._update_viewport(4096, 4096)
RENDER_MODE.JAOT:
self._update_viewport(256, 2048)
func _process(_delta) -> void:
update()
func _draw() -> void:
# Seems like the first one always fails
if not self.done_first_draw:
self.done_first_draw = true
return
if self.waiting_for_viewport:
# Another node later in the draw sequence can call this within the same frame,
# otherwise, this picks it up the following frame
self.get_result()
# self.waiting_for_viewport is cleared at the end of get_result()
if self.render_queue:
match self.render_mode:
RENDER_MODE.BATCH:
self._render_in_batch()
RENDER_MODE.DYNAMIC:
self._render_dynamic_one_at_a_time()
RENDER_MODE.JAOT:
self._render_just_ahead_of_time()
func _update_viewport(width: int, height: int) -> void:
self.OUTPUT_WIDTH = width
self.OUTPUT_HEIGHT = height
self.OUTPUT_FRAMEBUFFER_SIZE = Vector2(width, height)
self.viewport.size = OUTPUT_FRAMEBUFFER_SIZE
self.material.set_shader_param('OUTPUT_FRAMEBUFFER_SIZE', OUTPUT_FRAMEBUFFER_SIZE)
self.material.set_shader_param('INT_OUTPUT_WIDTH', OUTPUT_WIDTH)
func initialize_instrument_texture(reinitialize := false) -> void:
if not reinitialize and self.initialized_instrument_texture:
return
SoundLoader.samples_to_texture()
self.material.set_shader_param('instrument_samples', SoundLoader.samples_tex)
self.material.set_shader_param('instrument_samples_size', SoundLoader.samples_tex.get_size())
self.initialized_instrument_texture = true
func _get_cached_midi(key: String) -> Array: # [target_samples: int, tex: ImageTexture, tempo_scale_thousandths: int]
if not('-' in key):
return self.cached_midis[key] + [1000]
var split := key.split('-')
var tempo_scale_thousandths := int(split[1])
var target_time_and_tex = self.cached_midis[split[0]]
var new_target_samples: int = (target_time_and_tex[0]*32000*1000)/tempo_scale_thousandths
return [new_target_samples, target_time_and_tex[1], tempo_scale_thousandths]
func _render_midi(key: String, output_rows_drawn_including_this: int, rows_to_draw: int) -> void:
var target_samples_and_tex_and_tempo = self._get_cached_midi(key)
var target_samples: int = target_samples_and_tex_and_tempo[0]
var tex: ImageTexture = target_samples_and_tex_and_tempo[1]
var tempo_scale_thousandths: int = target_samples_and_tex_and_tempo[2]
self.material.set_shader_param('tempo_scale_thousandths', tempo_scale_thousandths)
if rows_to_draw <= OUTPUT_HEIGHT: # Full draw
var y_top: int = OUTPUT_HEIGHT - output_rows_drawn_including_this
var y_bot: int = y_top + rows_to_draw
var uv_inv_v: float = 1 - (rows_to_draw / OUTPUT_FRAMEBUFFER_SIZE.y)
var uvs := PoolVector2Array([Vector2(0, uv_inv_v), Vector2(1, uv_inv_v), Vector2(1, 1), Vector2(0, 1)])
var points := PoolVector2Array([Vector2(0, y_top), Vector2(OUTPUT_WIDTH, y_top), Vector2(OUTPUT_WIDTH, y_bot), Vector2(0, y_bot)])
draw_primitive(points, QUAD_COLOR, uvs, tex)
self.waiting_for_viewport.append([rows_to_draw, key]) # Grab the result next draw
else:
print('Could not fit %s into %dx%d output framebuffer, it needs %d rows'%[key, OUTPUT_WIDTH, OUTPUT_HEIGHT, rows_to_draw])
func _render_in_batch() -> void:
self.waiting_for_viewport = []
var rows_drawn := 0
while self.render_queue:
var target_samples: int = self.render_queue[0][1]
var rows_to_draw := int(ceil(target_samples/float(OUTPUT_WIDTH)))
rows_drawn += rows_to_draw
if rows_drawn > OUTPUT_HEIGHT:
if self.waiting_for_viewport.empty():
print('Could not fit %s into %dx%d output framebuffer, it needs %d rows'%[self.render_queue[0][0], OUTPUT_WIDTH, OUTPUT_HEIGHT, rows_to_draw])
self.render_queue.pop_front()
break
# Draw the next ImageTexture
self._render_midi(self.render_queue.pop_front()[0], rows_drawn, rows_to_draw)
func _render_dynamic_one_at_a_time() -> void: # Non power-of-two dimensioned textures should be restricted to GLES3
self.waiting_for_viewport = []
var entry = self.render_queue.pop_front()
var key: String = entry[0]
var target_samples: int = entry[1]
var rows_to_draw := int(ceil(target_samples/float(OUTPUT_WIDTH)))
self._update_viewport(4096, rows_to_draw)
# Draw the next ImageTexture
self._render_midi(key, rows_to_draw, rows_to_draw)
func _render_just_ahead_of_time() -> void: # Optimized for latency
var entry = self.render_queue[0]
var key: String = entry[0]
var remaining_samples: int = entry[1]
var target_samples_and_tex_and_tempo = self._get_cached_midi(key)
var total_target_samples: int = target_samples_and_tex_and_tempo[0]
var tex: ImageTexture = target_samples_and_tex_and_tempo[1]
var tempo_scale_thousandths: int = target_samples_and_tex_and_tempo[2]
var total_rows := int(ceil(total_target_samples/float(OUTPUT_WIDTH)))
var remaining_rows := int(ceil(remaining_samples/float(OUTPUT_WIDTH)))
var rows_to_draw := remaining_rows
# Draw the next ImageTexture
self.material.set_shader_param('tempo_scale_thousandths', tempo_scale_thousandths)
var y_top: int = 0
if remaining_rows <= OUTPUT_HEIGHT:
y_top = OUTPUT_HEIGHT - rows_to_draw
self.render_queue.pop_front()
else:
rows_to_draw = OUTPUT_HEIGHT
self.render_queue[0] = [key, remaining_samples - (rows_to_draw*OUTPUT_WIDTH)]
# Draw the next ImageTexture
var starting_row := total_rows - remaining_rows # The start of the unrendered rows
var ending_row := starting_row + rows_to_draw # The end of this chunk of unrendered rows
var uv_top: float = 1 - (ending_row / OUTPUT_FRAMEBUFFER_SIZE.y)
var uv_bot: float = 1 - (starting_row / OUTPUT_FRAMEBUFFER_SIZE.y)
var uvs := PoolVector2Array([Vector2(0, uv_top), Vector2(1, uv_top), Vector2(1, uv_bot), Vector2(0, uv_bot)])
var points := PoolVector2Array([Vector2(0, y_top), Vector2(OUTPUT_WIDTH, y_top), Vector2(OUTPUT_WIDTH, OUTPUT_HEIGHT), Vector2(0, OUTPUT_HEIGHT)])
draw_primitive(points, QUAD_COLOR, uvs, tex)
self.waiting_for_viewport.append([rows_to_draw, key]) # Grab the result next draw
func push_image(image: Image, target_time: int, key: String, enqueue: bool = true) -> void:
if not self.initialized_instrument_texture:
self.initialize_instrument_texture()
var tex := ImageTexture.new()
tex.create_from_image(image, 0)
self.cached_midis[key] = [target_time, tex]
self.material.set_shader_param('midi_events_size', tex.get_size()) # Should all be the same size for now, revisit if we need mixed sizes.
if enqueue:
self.render_queue.append([key, target_time])
func push_bytes(data: PoolByteArray, target_time: int, key: String, enqueue: bool = true) -> void:
var rows = int(pow(2, ceil(log((len(data)/INPUT_BYTES_PER_TEXEL) / INPUT_TEX_WIDTH)/log(2))))
var target_length = rows * INPUT_BYTES_PER_TEXEL * INPUT_TEX_WIDTH
while len(data) < target_length: # This is inefficient, but this function should be called with pre-padded data anyway
data.append(0)
var image := Image.new()
image.create_from_data(INPUT_TEX_WIDTH, rows, false, INPUT_FORMAT, data)
self.push_image(image, target_time, key, enqueue)
func queue_cached_bgm(key: String) -> void:
var new_target_time_etc := self._get_cached_midi(key)
self.render_queue.append([key, new_target_time_etc[0]])
func get_result() -> void:
var result_texture := self.viewport.get_texture()
var result_image := result_texture.get_data()
var result_bytes := result_image.get_data()
match self.render_mode:
RENDER_MODE.BATCH: # Multiple complete songs in a fixed-size framebuffer
var retrieved_rows := 0
for rows_and_key in self.waiting_for_viewport:
var entry_rows: int = rows_and_key[0]
var key: String = rows_and_key[1]
var bytes_start := retrieved_rows * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
var bytes_end := (retrieved_rows + entry_rows) * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL
var entry_bytes := result_bytes.subarray(bytes_start, bytes_end-1)
self.cached_renders[key] = [0, entry_bytes]
emit_signal('render_initial_ready', key)
emit_signal('render_complete', key)
retrieved_rows += entry_rows
RENDER_MODE.DYNAMIC: # One complete song in a framebuffer sized to fit it
var rows_and_key = self.waiting_for_viewport[0]
var key: String = rows_and_key[1]
self.cached_renders[key] = [0, result_bytes]
emit_signal('render_initial_ready', key)
emit_signal('render_complete', key)
RENDER_MODE.JAOT: # One partial song in a small framebuffer
var rows_and_key = self.waiting_for_viewport[0]
var key: String = rows_and_key[1]
var samples_just_rendered: int = len(result_bytes) / OUTPUT_BYTES_PER_TEXEL
if not (key in self.cached_renders):
var samples_and_tex = _get_cached_midi(key)
var target_samples: int = samples_and_tex[0]
if samples_just_rendered >= target_samples:
result_bytes.resize(target_samples * OUTPUT_BYTES_PER_TEXEL)
self.cached_renders[key] = [0, result_bytes]
emit_signal('render_initial_ready', key)
emit_signal('render_complete', key)
else:
var remaining_samples: int = target_samples - samples_just_rendered
self.cached_renders[key] = [remaining_samples, result_bytes]
emit_signal('render_initial_ready', key)
else:
var old_remaining_samples_and_data = self.cached_renders[key]
var target_samples: int = old_remaining_samples_and_data[0]
var existing_data: PoolByteArray = old_remaining_samples_and_data[1]
if samples_just_rendered >= target_samples:
result_bytes.resize(target_samples * OUTPUT_BYTES_PER_TEXEL)
self.cached_renders[key] = [0, existing_data + result_bytes]
emit_signal('render_complete', key)
else:
self.cached_renders[key] = [target_samples - samples_just_rendered, existing_data + result_bytes]
RENDER_MODE.TEST_READBACK:
pass # This has bitrotted
# # Debugging: compare a sequence of all the possible 16bit integers
# print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11))
# test_readback(result_bytes)
self.waiting_for_viewport = []
# This has bitrotted
# func test_readback(result_bytes: PoolByteArray):
# # Debugging: compare a sequence of all the possible 16bit integers
# var buff := StreamPeerBuffer.new()
# buff.set_data_array(result_bytes)
# var tex_readback = 0
# var uv_readback = 0
# for i in 0x1000:
# tex_readback = buff.get_u16()
# uv_readback = buff.get_u16()
# if tex_readback != i:
# print('tex readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, tex_readback, tex_readback])
# if uv_readback != i:
# print('uv readback %d (0x%04x) was instead %d (0x%04x)'%[i, i, uv_readback, uv_readback])