diff --git a/scripts/MusicPlayer.gd b/scripts/MusicPlayer.gd index 49ad2b1..187b209 100644 --- a/scripts/MusicPlayer.gd +++ b/scripts/MusicPlayer.gd @@ -245,155 +245,362 @@ func _process(delta: float) -> void: print('BGM finished playing') -func render_channel(channel: int, t_start: float, t_end: float, inst_map: Array) -> PoolByteArray: - var midi_events_bytes_t_start := StreamPeerBuffer.new() - var midi_events_bytes_t_end := StreamPeerBuffer.new() - var midi_events_bytes3 := StreamPeerBuffer.new() - var midi_events_bytes_adsr := StreamPeerBuffer.new() +# TODO: need to interleave channels for tempo and master volume! +const MAX_NOTE_EVENTS := 2048 +class NoteEvent: + var p_start: int # In pulse space + var p_end: int + var instrument: int + var pitch: int + var velocity: float + var adsr_attack: int + var adsr_decay: int + var adsr_sustain: int + var adsr_release: int - var track: Array = self.tracks[channel] - var l := len(track) - var t := t_start - var num_notes: int = 0 - var current_instrument := 0 - while (t < t_end) and (num_notes < 2048): - var ptr: int = self.channel_pointer[channel] - if ptr >= l: - break - var event = track[ptr] - self.channel_pointer[channel] += 1 - match event[0]: # Control codes - EventType.NOTE: - var note = event[1] - var duration = event[2] - if note >= 0: # Don't shift or play rests - note += (12 * self.channel_octave[channel]) + self.channel_transpose[channel] - midi_events_bytes_t_start.put_32(int(t * 32000)) # t_start - midi_events_bytes_t_end.put_32(int((t + duration * self.seconds_per_pulse) * 32000)) # t_end - midi_events_bytes3.put_u8(current_instrument) # instrument - midi_events_bytes3.put_u8(note) # pitch_idx #* self.channel_fine_tuning[channel] - midi_events_bytes3.put_u8((self.channel_velocity[channel]*self.master_volume)/255) # velocity - midi_events_bytes3.put_u8(self.channel_pan[channel]*255/127) # pan - midi_events_bytes_adsr.put_32(0) # ADSR - num_notes += 1 - t += duration * self.seconds_per_pulse - # TODO: Confirm tempo scaling - # return duration # Pulses to next instruction - EventType.VOLUME: - self.channel_velocity[channel] = event[1] - EventType.VOLUME_SLIDE: # TODO: implement slides - var slide_duration: int = event[1] - self.channel_velocity[channel] = event[2] - EventType.PAN: # TODO: implement slides - self.channel_pan[channel] = event[1] - # AudioServer.get_bus_effect(channel+2, 0).set_pan(1.0 - event[1]/127.0) - EventType.PAN_SLIDE: # TODO: implement slides - var slide_duration: int = event[1] - self.channel_pan[channel] = event[2] - # AudioServer.get_bus_effect(channel+2, 0).set_pan(1.0 - event[2]/127.0) - EventType.PITCH_SLIDE: # TODO: implement slides - var slide_duration: int = event[1] - var target_pitch: int = event[2] # Signed - EventType.VIBRATO_ON: - self.channel_vibrato_delay[channel] = event[1] - self.channel_vibrato_rate[channel] = event[2] - self.channel_vibrato_depth[channel] = event[3] - self.channel_vibrato_on[channel] = 1 - EventType.VIBRATO_OFF: - self.channel_vibrato_on[channel] = 0 - EventType.TREMOLO_ON: - self.channel_tremolo_delay[channel] = event[1] - self.channel_tremolo_rate[channel] = event[2] - self.channel_tremolo_depth[channel] = event[3] - self.channel_tremolo_on[channel] = 1 - EventType.TREMOLO_OFF: - self.channel_tremolo_on[channel] = 0 - EventType.PAN_LFO_ON: - self.channel_pan_lfo_depth[channel] = event[1] - self.channel_pan_lfo_rate[channel] = event[2] - self.channel_pan_lfo_on[channel] = 1 - EventType.PAN_LFO_OFF: - self.channel_pan_lfo_on[channel] = 0 - EventType.NOISE_FREQ: - self.channel_noise_freq[channel] = event[1] - EventType.NOISE_ON: - self.channel_noise_on[channel] = 1 - EventType.NOISE_OFF: - self.channel_noise_on[channel] = 0 - EventType.PITCHMOD_ON: - self.channel_pitchmod_on[channel] = 1 - EventType.PITCHMOD_OFF: - self.channel_pitchmod_on[channel] = 0 - EventType.ECHO_ON: - self.channel_echo_on[channel] = 1 - EventType.ECHO_OFF: - self.channel_echo_on[channel] = 0 - EventType.OCTAVE: - self.channel_octave[channel] = event[1] - EventType.OCTAVE_UP: - self.channel_octave[channel] += 1 - EventType.OCTAVE_DOWN: - self.channel_octave[channel] -= 1 - EventType.TRANSPOSE_ABS: - self.channel_transpose[channel] = event[1] - EventType.TRANSPOSE_REL: - self.channel_transpose[channel] += event[1] - EventType.TUNING: - var fine_tune: int = event[1] - var scale: float - if fine_tune < 0x80: - scale = 1.0 + fine_tune/255.0 + +class TrackCurve: # built-in Curve class is too restrictive for this + var default: float + var entries: PoolVector3Array + var baked_integrals: PoolRealArray + func _init(default: float = 0.0): + self.default = default + self.entries = PoolVector3Array() + self.baked_integrals = PoolRealArray() + + func add_point(pulse: int, value: float, ramp_to_next: bool) -> void: + var l := len(self.entries) + var entry := Vector3(float(pulse), value, float(ramp_to_next)) + if l == 0 or self.entries[-1].x < pulse: + self.entries.append(entry) + else: # Find the first entry bigger than pulse, and insert before + for i in l: + if self.entries[i].x > pulse: + self.entries.insert(i, entry) + break + + var last_pulse_block_get: int = -1 # Cache previous position for sequential lookups + func get_pulse(pulse: float) -> float: + var l := len(self.entries) + if l == 0 or pulse < self.entries[-1].x: + return self.default + if pulse > self.entries[-1].x: + return self.entries[-1].y + for i in l-2: + # Find first entry beyond + if pulse < self.entries[i+1].x: + if self.entries[i].z > 0: # ramp_to_next + return range_lerp(pulse, self.entries[i].x, self.entries[i+1].x, self.entries[i].y, self.entries[i+1].y) else: - scale = fine_tune/255.0 - self.channel_fine_tuning[channel] = scale - EventType.PROGCHANGE: - current_instrument = inst_map[event[1]-0x20] - 1 - EventType.ADSR_ATTACK: - self.channel_adsr_attack[channel] = event[1] - EventType.ADSR_DECAY: - self.channel_adsr_decay[channel] = event[1] - EventType.ADSR_SUSTAIN: - self.channel_adsr_sustain[channel] = event[1] - EventType.ADSR_RELEASE: - self.channel_adsr_release[channel] = event[1] - EventType.ADSR_DEFAULT: # TODO - grab instrument envelope - pass - EventType.TEMPO: - self.set_tempo(music.tempo_to_bpm(event[1])) - EventType.TEMPO_SLIDE: - self.set_tempo(music.tempo_to_bpm(event[2])) - var slide_duration: int = event[1] - EventType.ECHO_VOLUME: - self.channel_echo_volume[channel] = event[1] - EventType.ECHO_VOLUME_SLIDE: # TODO: implement slides - self.channel_echo_volume[channel] = event[2] - var slide_duration: int = event[1] - EventType.ECHO_FEEDBACK_FIR: # TODO - var feedback: int = event[1] - var filterIndex: int = event[2] - EventType.MASTER_VOLUME: - self.master_volume = event[1] - EventType.GOTO: - self.channel_pointer[channel] = event[1] - EventType.END: - break - _: - break - # End of track - # Fill up end of notes array with dummies - for i in range(num_notes, 2048): - midi_events_bytes_t_start.put_32(int(t_end*2*32000)) - midi_events_bytes_t_end.put_32(int(t_end*2*32000)) - midi_events_bytes3.put_u8(0) # instrument - midi_events_bytes3.put_u8(0) # pitch_idx - midi_events_bytes3.put_u8(0) # velocity - midi_events_bytes3.put_u8(0) # pan - midi_events_bytes_adsr.put_32(0) # ADSR - return midi_events_bytes_t_start.data_array + midi_events_bytes_t_end.data_array + midi_events_bytes3.data_array + midi_events_bytes_adsr.data_array - # audio_renderer.push_bytes(channel_data) + return self.entries[i].y + return self.default # Should be unreachable -func render_channels(t_start: float, t_end: float, inst_map: Array) -> PoolByteArray: + func bake_integrals(): + # Store the starting integrated value (i.e. time for the tempo curve) of each pulse value + self.baked_integrals.clear() + var last_pulse := 0.0 + var last_value := self.default + var last_integral := 0.0 + var last_ramp := false + for entry in self.entries: + var step_pulse = entry.x - last_pulse + var integral := last_integral + if last_ramp: + # Treat it as a rectangle where the height is the average of the slanted top. + integral += step_pulse * (last_value + entry.y)/2.0 + else: + integral += step_pulse * last_value + self.baked_integrals.append(integral) + last_pulse = entry.x + last_value = entry.y + last_integral = integral + last_ramp = entry.z > 0 + + var last_integral_block_get: int = -1 # Cache previous position for sequential lookups + func get_integral(pulse: float) -> float: + # This is for tempo -> time. Need to bake it to have any hope of efficiency. + if self.baked_integrals.empty(): + self.bake_integrals() + # Find first entry earlier than the pulse + for i in range(len(self.entries)-1, -1, -1): + var entry = self.entries[i] + if pulse > entry.x: + var integral = self.baked_integrals[i] + var step_pulse = pulse - entry.x + if entry.z: # Ramp to next + # Treat it as a rectangle where the height is the average of the slanted top. + integral += step_pulse * (entry.y + entries[i+1].y)/2.0 # If last entry somehow has ramp-to-next (it shouldn't), this will out-of-range error + else: + integral += step_pulse * entry.y + return integral + return 0.0 + + +func render_channels(_t_start: float, _t_end: float, inst_map: Array) -> Array: # [data: PoolByteArray, target_time_length: float in seconds] + # Since some channels contain global events (tempo and global volume for now), + # the strategy will be to preprocess each channel in a global-state-agnostic way, + # then once all the global tracks are known, as well as the longest unlooped length, + # do a second pass to generate the final events + var all_note_events = [] + + var curve_master_volume := TrackCurve.new(100.0/255.0) # [0.0, 1.0] for now + var curve_master_tempo := TrackCurve.new(120.0) # bpm is too big, need pulses per second + + var curve_channel_pans := [] + + for channel in self.num_tracks: + var curve_velocity := TrackCurve.new(100.0/255.0) # [0.0, 1.0] for now + var curve_pan := TrackCurve.new() # [-1.0, 1.0] for now + var channel_note_events = [] + var track: Array = self.tracks[channel] + var l := len(track) + var p := 0 # current pulse + + if l == 0: # Empty channel, move on + all_note_events.append(channel_note_events) + curve_channel_pans.append(curve_pan) + continue + + # var num_notes: int = 0 + var current_instrument := 0 + var current_octave := 5 + var current_transpose := 0 + # var current_velocity := 100 + var current_adsr_attack := 0 + var current_adsr_decay := 0 + var current_adsr_sustain := 0 + var current_adsr_release := 0 + + # First, check if it ends in a GOTO, then store the program counter of the destination + var infinite_loop_target_program_counter = -1 + var infinite_loop_target_pulse = -1 + if track[-1][0] == EventType.GOTO: + infinite_loop_target_program_counter = track[-1][1] + + var program_counter := 0 + while true: #num_notes < MAX_NOTE_EVENTS: + if program_counter >= l: + break + if program_counter == infinite_loop_target_program_counter: + infinite_loop_target_pulse = p + var event = track[program_counter] + program_counter += 1 + match event[0]: # Control codes + EventType.GOTO: # This is a preprocessed event list, so GOTO is a final infinite loop marker + var note_event = NoteEvent.new() + note_event.p_start = p + note_event.p_end = infinite_loop_target_pulse # Fake final note event using p_start > p_end to encode the infinite jump back loop. + # Note that event[1] points to an Event, not a NoteEvent, not a Pulse, so we looked it up earlier + channel_note_events.append(note_event) + break + EventType.MASTER_VOLUME: + curve_master_volume.add_point(p, event[1]/255.0, false) + EventType.TEMPO: + var new_tempo = music.tempo_to_seconds_per_pulse(event[1]) + curve_master_tempo.add_point(p, new_tempo, false) + EventType.TEMPO_SLIDE: + var old_tempo = curve_master_tempo.get_pulse(p) + var new_tempo = music.tempo_to_seconds_per_pulse(event[2]) + var slide_duration: int = event[1] # TODO: work out how this is scaled + curve_master_tempo.add_point(p, old_tempo, true) + curve_master_tempo.add_point(p + slide_duration, new_tempo, false) + EventType.NOTE: + var note = event[1] + var duration = event[2] + if note >= 0: # Don't shift or play rests + note += (12 * current_octave) + current_transpose + var note_event = NoteEvent.new() + note_event.p_start = p + note_event.p_end = p + duration + note_event.instrument = current_instrument + note_event.pitch = note # pitch_idx #* self.channel_fine_tuning[channel] + note_event.velocity = curve_velocity.get_pulse(p) # current_velocity + note_event.adsr_attack = current_adsr_attack + note_event.adsr_decay = current_adsr_decay + note_event.adsr_sustain = current_adsr_sustain + note_event.adsr_release = current_adsr_release + channel_note_events.append(note_event) + # num_notes += 1 + p += duration + EventType.VOLUME: + var new_velocity: float = event[1]/255.0 + curve_velocity.add_point(p, new_velocity, false) + EventType.VOLUME_SLIDE: # TODO: implement slides + var old_velocity = curve_velocity.get_pulse(p) + var slide_duration: int = event[1] + var new_velocity: float = event[2]/255.0 + curve_velocity.add_point(p, old_velocity, true) + curve_velocity.add_point(p + slide_duration, new_velocity, false) + EventType.PAN: + var new_pan = 1.0 - event[1]/127.5 + curve_pan.add_point(p, new_pan, false) + EventType.PAN_SLIDE: # TODO: implement slides + var old_pan = curve_pan.get_pulse(p) + var new_pan = 1.0 - event[2]/127.5 + var slide_duration: int = event[1] # TODO: work out how slides are scaled + curve_pan.add_point(p, old_pan, true) + curve_pan.add_point(p + slide_duration, new_pan, false) + EventType.PITCH_SLIDE: # TODO: implement slides + var slide_duration: int = event[1] + var target_pitch: int = event[2] # Signed + EventType.OCTAVE: + current_octave = event[1] + EventType.OCTAVE_UP: + current_octave += 1 + EventType.OCTAVE_DOWN: + current_octave -= 1 + EventType.TRANSPOSE_ABS: + current_transpose = event[1] + EventType.TRANSPOSE_REL: + current_transpose += event[1] + EventType.TUNING: + var fine_tune: int = event[1] + var scale: float + if fine_tune < 0x80: + scale = 1.0 + fine_tune/255.0 + else: + scale = fine_tune/255.0 + self.channel_fine_tuning[channel] = scale + EventType.PROGCHANGE: + var event_idx = event[1]-0x20 + if event_idx >= 0: + current_instrument = inst_map[event_idx] - 1 + EventType.ADSR_ATTACK: + current_adsr_attack = event[1] + EventType.ADSR_DECAY: + current_adsr_decay = event[1] + EventType.ADSR_SUSTAIN: + current_adsr_sustain = event[1] + EventType.ADSR_RELEASE: + current_adsr_release = event[1] + EventType.ADSR_DEFAULT: # TODO - grab instrument envelope + current_adsr_attack = 0 + current_adsr_decay = 0 + current_adsr_sustain = 0 + current_adsr_release = 0 + EventType.VIBRATO_ON: + self.channel_vibrato_delay[channel] = event[1] + self.channel_vibrato_rate[channel] = event[2] + self.channel_vibrato_depth[channel] = event[3] + self.channel_vibrato_on[channel] = 1 + EventType.VIBRATO_OFF: + self.channel_vibrato_on[channel] = 0 + EventType.TREMOLO_ON: + self.channel_tremolo_delay[channel] = event[1] + self.channel_tremolo_rate[channel] = event[2] + self.channel_tremolo_depth[channel] = event[3] + self.channel_tremolo_on[channel] = 1 + EventType.TREMOLO_OFF: + self.channel_tremolo_on[channel] = 0 + EventType.PAN_LFO_ON: + self.channel_pan_lfo_depth[channel] = event[1] + self.channel_pan_lfo_rate[channel] = event[2] + self.channel_pan_lfo_on[channel] = 1 + EventType.PAN_LFO_OFF: + self.channel_pan_lfo_on[channel] = 0 + EventType.NOISE_FREQ: + self.channel_noise_freq[channel] = event[1] + EventType.NOISE_ON: + self.channel_noise_on[channel] = 1 + EventType.NOISE_OFF: + self.channel_noise_on[channel] = 0 + EventType.PITCHMOD_ON: + self.channel_pitchmod_on[channel] = 1 + EventType.PITCHMOD_OFF: + self.channel_pitchmod_on[channel] = 0 + EventType.ECHO_ON: + self.channel_echo_on[channel] = 1 + EventType.ECHO_OFF: + self.channel_echo_on[channel] = 0 + EventType.ECHO_VOLUME: + self.channel_echo_volume[channel] = event[1] + EventType.ECHO_VOLUME_SLIDE: # TODO: implement slides + self.channel_echo_volume[channel] = event[2] + var slide_duration: int = event[1] + EventType.ECHO_FEEDBACK_FIR: # TODO + var feedback: int = event[1] + var filterIndex: int = event[2] + EventType.END: + break + _: + break + # End of track + all_note_events.append(channel_note_events) + curve_channel_pans.append(curve_pan) + + # Integrate tempo so we can get a pulse->time mapping + curve_master_tempo.bake_integrals() + # Find the longest channel + var channel_loop_p_returns = PoolIntArray() + var channel_loop_p_lengths = PoolIntArray() + var longest_channel_idx = 0 + var longest_channel_p_end = 0 + for channel in self.num_tracks: + if all_note_events[channel].empty(): + channel_loop_p_returns.append(-1) + continue + var note_event: NoteEvent = all_note_events[channel][-1] + var p_end = note_event.p_end + if p_end < note_event.p_start: + # Ends on infinite loop + channel_loop_p_returns.append(p_end) + channel_loop_p_lengths.append(note_event.p_start - p_end) + p_end = note_event.p_start + else: + channel_loop_p_returns.append(-1) + + if p_end > longest_channel_p_end: + longest_channel_p_end = p_end + longest_channel_idx = channel + + var target_pulse_length = longest_channel_p_end + 200 + var target_time_length = curve_master_tempo.get_integral(target_pulse_length) + + # Second pass - encode the notes with the now-known global tempo and volume curves var data := PoolByteArray() for channel in self.num_tracks: - data += self.render_channel(channel, t_start, t_end, inst_map) - return data + var events = all_note_events[channel] + var loop_return_note_event_idx = -1 + var loop_return_p = channel_loop_p_returns[channel] + var curve_pan: TrackCurve = curve_channel_pans[channel] + + var midi_events_bytes_t_start := StreamPeerBuffer.new() + var midi_events_bytes_t_end := StreamPeerBuffer.new() + var midi_events_bytes3 := StreamPeerBuffer.new() + var midi_events_bytes_adsr := StreamPeerBuffer.new() + + var num_notes: int = 0 + var event_ptr := 0 + var l_events := len(events) + var loop_p_offset := 0 + for i in MAX_NOTE_EVENTS: + if event_ptr >= l_events: + break + if (loop_return_p >= 0) and event_ptr == l_events-1: + event_ptr = loop_return_note_event_idx + loop_p_offset += channel_loop_p_lengths[channel] + var event: NoteEvent = events[event_ptr] + var p = event.p_start + if loop_return_note_event_idx < 0 and p >= loop_return_p: + loop_return_note_event_idx = event_ptr + midi_events_bytes_t_start.put_32(int(curve_master_tempo.get_integral(p + loop_p_offset) * 32000)) + midi_events_bytes_t_end.put_32(int(curve_master_tempo.get_integral(event.p_end + loop_p_offset) * 32000)) # t_end + midi_events_bytes3.put_u8(event.instrument) + midi_events_bytes3.put_u8(event.pitch) + midi_events_bytes3.put_u8(int(event.velocity * curve_master_volume.get_pulse(p) * 255.0)) # velocity + midi_events_bytes3.put_u8(int((curve_pan.get_pulse(p)+1.0) * 127.5)) # pan + midi_events_bytes_adsr.put_u8(event.adsr_attack) + midi_events_bytes_adsr.put_u8(event.adsr_decay) + midi_events_bytes_adsr.put_u8(event.adsr_sustain) + midi_events_bytes_adsr.put_u8(event.adsr_release) + + event_ptr += 1 + num_notes += 1 + # Fill up end of notes array with dummies + for i in range(num_notes, MAX_NOTE_EVENTS): + midi_events_bytes_t_start.put_32(0x0FFFFFFF) + midi_events_bytes_t_end.put_32(0x0FFFFFFF) + midi_events_bytes3.put_32(0) + midi_events_bytes_adsr.put_32(0) + data += midi_events_bytes_t_start.data_array + midi_events_bytes_t_end.data_array + midi_events_bytes3.data_array + midi_events_bytes_adsr.data_array + return [data, target_time_length] diff --git a/scripts/loaders/snes/music.gd b/scripts/loaders/snes/music.gd index 01ceaa3..6fca066 100644 --- a/scripts/loaders/snes/music.gd +++ b/scripts/loaders/snes/music.gd @@ -108,6 +108,13 @@ static func tempo_to_bpm(tempo_byte: int) -> float: return 1.0 return (tempo_byte / 255.0) * 60000000.0 / 216000.0 # VGMTrans uses /256.0 but I don't trust that +# bpm * ppqn = ppm (pulses per minute) +# ppm / 60 = pps (pulses per second) +# 1/pps = seconds per pulse +static func tempo_to_seconds_per_pulse(tempo_byte: int) -> float: + # 125 * TIMER0_FREQUENCY = 4500 + return 4500.0 / (1000000.0 * tempo_byte / 255.0) + static func get_int_array(size: int) -> PoolIntArray: var array := PoolIntArray() array.resize(size) diff --git a/shaders/audio_renderer.gdshader b/shaders/audio_renderer.gdshader index 8df9967..72013c9 100644 --- a/shaders/audio_renderer.gdshader +++ b/shaders/audio_renderer.gdshader @@ -190,21 +190,22 @@ vec4 render_song(int smp) { int smp_attack = int(attack) * 2; // Max value is 131072 samples = 4.096 seconds // For now, just branch this - int smp_overrun = smp - smp_end; // 256 samples of linear decay to 0 after note_off - smp_overrun = (smp_overrun < 0) ? 0 : smp_overrun; - if (smp_overrun < 256) { - float t_start = float(smp_start)/output_mixrate; - float attack_factor = min(float(smp - smp_start)/float(smp_attack), 1.0); - float release_factor = float(255-smp_overrun)/255.0; // 256 samples of linear decay to 0 after note_off - float samp = get_instrument_sample(instrument_idx, pitch_idx, t-t_start); - samp *= velocity * attack_factor * release_factor; - // TODO: proper decay and sustain, revisit release - downmixed_stereo += samp * vec2(1.0-pan, pan) * 0.5; // TODO: double it to maintain the mono level on each channel at center=0.5? + if (smp_start < smp) { // First sample may not start at zero! + int smp_overrun = smp - smp_end; // 256 samples of linear decay to 0 after note_off + smp_overrun = (smp_overrun < 0) ? 0 : smp_overrun; + if (smp_overrun < 256) { + float t_start = float(smp_start)/output_mixrate; + float attack_factor = min(float(smp - smp_start)/float(smp_attack), 1.0); + float release_factor = float(255-smp_overrun)/255.0; // 256 samples of linear decay to 0 after note_off + float samp = get_instrument_sample(instrument_idx, pitch_idx, t-t_start); + samp *= velocity * attack_factor * release_factor; + // TODO: proper decay and sustain, revisit release + downmixed_stereo += samp * vec2(1.0-pan, pan) * 0.5; // TODO: double it to maintain the mono level on each channel at center=0.5? + } } } // Convert the stereo float audio to S16LE return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(downmixed_stereo.y)); - // return vec4(pack_float_to_int16(downmixed_stereo.x), pack_float_to_int16(mod(t, 2.0)-1.0)); } void fragment() { @@ -215,30 +216,3 @@ void fragment() { ivec2 xy = ivec2(trunc(uv*TEX_SIZE)); COLOR.xyzw = render_song(xy.x + (xy.y*INT_TEX_SIZE)); } - -// const int MAX_TEMPO_EVENTS = 256; -// const int NUM_TEMPO_PROBES = 8; // log2(MAX_TEMPO_EVENTS) - // Because tempo is dynamic, it will need to be encoded into a header in song_texture - // // Binary search the first row for tempo information - // float tempo_idx = 0.0; - // vec4 tempo_event; - // float t_start; - // for (int i = 0; i < NUM_TEMPO_PROBES; i++) { - // float step_size = exp2(float(NUM_TEMPO_PROBES - i - 1)); - // tempo_event = get_midi_texel(tempo_idx + step_size, 0.0); - // t_start = tempo_event.x; - // tempo_idx += (t >= t_start) ? step_size : 0.0; - // } - // float beat_start = tempo_event.y; - // float tempo_start = tempo_event.z; - // float tempo_end = tempo_event.w; // For tempo slides - // vec4 next_tempo_event = get_midi_texel(tempo_idx + 1.0, 0.0); - // float t_end = next_tempo_event.x; - // float beat_end = next_tempo_event.y; - // // Use the tempo information to convert wall time to beat time - // float t0 = t - t_start; - // float t_length = t_end - t_start; - // float tempo_section_progression = t0 / t_length; - // float tempo_at_t = mix(tempo_start, tempo_end, tempo_section_progression); - // float current_beat = beat_start + (t0 * (tempo_start+tempo_at_t) * 0.5); // Use the average tempo across the period to turn integration into area of a rectangle - // Now that we have our position on the beatmap, diff --git a/test/audio_renderer.gd b/test/audio_renderer.gd index 713a49c..f2468a2 100644 --- a/test/audio_renderer.gd +++ b/test/audio_renderer.gd @@ -3,35 +3,36 @@ extends Control const INPUT_TEX_WIDTH := 2048 const INPUT_FORMAT := Image.FORMAT_RGBA8 # Image.FORMAT_LA8 const INPUT_BYTES_PER_TEXEL := 4 # 2 +const OUTPUT_BYTES_PER_TEXEL := 4 const OUTPUT_WIDTH := 4096 +const QUAD_COLOR := PoolColorArray([Color.white, Color.white, Color.white, Color.white]) var viewport: Viewport var render_queue: Array # of Images var result_queue: Array # of PoolByteArrays -var current_image: Image var current_tex: ImageTexture # Needed to prevent GC before draw -var waiting_for_viewport: bool +var waiting_for_viewport: int var done_first_draw: bool func _ready() -> void: self.viewport = get_parent() self.render_queue = [] self.result_queue = [] - self.waiting_for_viewport = false + self.waiting_for_viewport = 0 self.done_first_draw = false - self.current_image = Image.new() self.current_tex = ImageTexture.new() -func push_image(img: Image) -> void: - self.render_queue.append(img) +func push_image(img: Image, uv_rows: int = 4096) -> void: + self.render_queue.append([img, uv_rows]) -func push_bytes(data: PoolByteArray) -> void: +func push_bytes(data: PoolByteArray, uv_rows: int = 4096) -> void: # print(data.subarray(0, 15)) var rows = int(pow(2, ceil(log((len(data)/INPUT_BYTES_PER_TEXEL) / INPUT_TEX_WIDTH)/log(2)))) var target_length = rows * INPUT_BYTES_PER_TEXEL * INPUT_FORMAT while len(data) < target_length: # This is inefficient, but this function should be called with pre-padded data anyway data.append(0) - self.current_image.create_from_data(INPUT_TEX_WIDTH, rows, false, INPUT_FORMAT, data) - self.render_queue.append(self.current_image) + var image := Image.new() + image.create_from_data(INPUT_TEX_WIDTH, rows, false, INPUT_FORMAT, data) + self.render_queue.append([image, uv_rows]) func _process(_delta) -> void: update() @@ -51,22 +52,27 @@ func _draw() -> void: return # Draw the next ImageTexture - self.current_image = self.render_queue.pop_front() - self.current_tex.create_from_image(self.current_image, 0) + var image_and_uv_rows = self.render_queue.pop_front() + self.current_tex.create_from_image(image_and_uv_rows[0], 0) self.material.set_shader_param('midi_events', self.current_tex) self.material.set_shader_param('midi_events_size', self.current_tex.get_size()) - # draw_texture(self.current_tex, Vector2.ZERO) - draw_texture(self.viewport.get_texture(), Vector2.ZERO) - # draw_rect(Rect2(0, 0, OUTPUT_WIDTH, OUTPUT_WIDTH), Color.white) - self.waiting_for_viewport = true # Grab the result next draw + var uv_rows: int = image_and_uv_rows[1] + var uv_rows_inv: int = 4096 - uv_rows + var uv_v: float = uv_rows / float(OUTPUT_WIDTH) + var points := PoolVector2Array([Vector2(0, uv_rows_inv), Vector2(OUTPUT_WIDTH, uv_rows_inv), Vector2(OUTPUT_WIDTH, OUTPUT_WIDTH), Vector2(0, OUTPUT_WIDTH)]) + var uvs := PoolVector2Array([Vector2(0, 1-uv_v), Vector2(1, 1-uv_v), Vector2(1, 1), Vector2(0, 1)]) + draw_primitive(points, QUAD_COLOR, uvs, self.current_tex) + self.waiting_for_viewport = uv_rows # Grab the result next draw func get_result() -> void: var result_texture := self.viewport.get_texture() var result_image := result_texture.get_data() var result_bytes := result_image.get_data() + var result_byte_count := waiting_for_viewport * OUTPUT_WIDTH * OUTPUT_BYTES_PER_TEXEL + result_bytes.resize(result_byte_count) self.result_queue.append(result_bytes) - self.waiting_for_viewport = false + self.waiting_for_viewport = 0 # # Debugging: compare a sequence of all the possible 16bit integers # print_debug('result_image format is %d and has size'%result_image.get_format(), result_image.get_size(), result_bytes.subarray(0, 11)) diff --git a/test/audio_system.gd b/test/audio_system.gd index 2d8dc8a..86da3e5 100644 --- a/test/audio_system.gd +++ b/test/audio_system.gd @@ -128,6 +128,7 @@ func _ready() -> void: self.test_rendering() onready var audio_renderer := $'%audio_renderer' +onready var load_start_tick := Time.get_ticks_msec() func test_rendering() -> void: SoundLoader.samples_to_texture() audio_renderer.material.set_shader_param('instrument_samples', SoundLoader.samples_tex) @@ -149,10 +150,14 @@ func test_rendering() -> void: # midi_events_bytes3.put_u8(i%256) # pan midi_events_bytes4.put_32(0) # ADSR var channel_data = midi_events_bytes.data_array + midi_events_bytes2.data_array + midi_events_bytes3.data_array + midi_events_bytes4.data_array - var bgm_id := 0 - var mp = MusicPlayer.new(bgm_tracksets[bgm_id], self.inst_sample_map) - channel_data = mp.render_channels(0, 540, RomLoader.snes_data.bgm_instrument_indices[bgm_id]) - audio_renderer.push_bytes(channel_data) # + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data) + for bgm_id in 64: + var mp = MusicPlayer.new(bgm_tracksets[bgm_id], self.inst_sample_map) + var data_and_target_time = mp.render_channels(0, 540, RomLoader.snes_data.bgm_instrument_indices[bgm_id]) + channel_data = data_and_target_time[0] + var target_time = data_and_target_time[1] + var target_samples = target_time * 32000 + var target_rows = ceil(target_samples/4096.0) + audio_renderer.push_bytes(channel_data, target_rows) # + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data + channel_data) # var test_payload := PoolByteArray() # test_payload.resize(4096*4096*2) @@ -169,15 +174,18 @@ func test_rendering() -> void: func _process(_delta): update() +var current_rendered_bgm := 0 func _draw() -> void: if audio_renderer.waiting_for_viewport: audio_renderer.get_result() - var result = audio_renderer.result_queue[0] + var result = audio_renderer.result_queue.pop_back() var rendered_audio := AudioStreamSample.new() rendered_audio.data = result #.subarray(0, (4*120*32000) - 1) rendered_audio.stereo = true rendered_audio.mix_rate = 32000 rendered_audio.format = AudioStreamSample.FORMAT_16_BITS - var error = rendered_audio.save_to_wav('output/rendered_audio.wav') - print(error) + var error = rendered_audio.save_to_wav('output/rendered_bgm_%02d.wav'%current_rendered_bgm) + print('@%dms - Saved render of BGM%02d (error code %s)' % [Time.get_ticks_msec() - load_start_tick, current_rendered_bgm, globals.ERROR_CODE_STRINGS[error]]) + # print('@%dms - Rendered BGM%02d without saving' % [Time.get_ticks_msec() - load_start_tick, current_rendered_bgm]) + current_rendered_bgm += 1 pass diff --git a/test/audio_system.tscn b/test/audio_system.tscn index 2fa24e3..18e458b 100644 --- a/test/audio_system.tscn +++ b/test/audio_system.tscn @@ -1,21 +1,23 @@ -[gd_scene load_steps=6 format=2] +[gd_scene load_steps=7 format=2] [ext_resource path="res://test/audio_system.gd" type="Script" id=1] [ext_resource path="res://theme/menu_theme.tres" type="Theme" id=2] [ext_resource path="res://test/audio_renderer.gd" type="Script" id=3] [ext_resource path="res://shaders/audio_renderer.gdshader" type="Shader" id=4] +[sub_resource type="Curve" id=3] +_data = [ Vector2( 0, 0 ), 0.0, 2.46705, 0, 1, Vector2( 0.335329, 0.827273 ), 2.46705, -1.6562, 1, 1, Vector2( 0.631737, 0.336364 ), -1.6562, 1.80207, 1, 1, Vector2( 1, 1 ), 1.80207, 0.0, 1, 0 ] + [sub_resource type="ShaderMaterial" id=2] shader = ExtResource( 4 ) shader_param/instrument_samples_size = Vector2( 2048, 128 ) -shader_param/instrument_row_padding = 3.0 -shader_param/instrument_row_payload = 2042.0 shader_param/reference_note = 71.0 shader_param/output_mixrate = 32000.0 shader_param/midi_events_size = Vector2( 2048, 16 ) [node name="audio_system" type="Node2D"] script = ExtResource( 1 ) +curve = SubResource( 3 ) [node name="viewport_audio_renderer" type="Viewport" parent="."] size = Vector2( 4096, 4096 )