614 lines
23 KiB
GDScript
614 lines
23 KiB
GDScript
#warning-ignore-all:shadowed_variable
|
|
extends Node
|
|
const music := preload('res://scripts/loaders/snes/music_ff5.gd')
|
|
const EventType := music.EventType
|
|
var MUSIC := music.new()
|
|
|
|
var inst_map: Dictionary # int keys, AudioStreamSample values
|
|
var tracks: Array
|
|
var num_tracks: int
|
|
var players: Array
|
|
var tempo: float # Store as BPM
|
|
var seconds_per_pulse: float
|
|
var master_volume := 255
|
|
var channel_pointer := PoolIntArray()
|
|
var channel_instrument_idx := PoolByteArray()
|
|
var channel_next_pulse := PoolIntArray()
|
|
var channel_current_note := PoolByteArray()
|
|
var channel_velocity := PoolByteArray()
|
|
var channel_pan := PoolByteArray() # Reversed from MIDI
|
|
var channel_octave := PoolByteArray()
|
|
var channel_transpose := PoolByteArray()
|
|
var channel_fine_tuning := PoolRealArray()
|
|
var channel_adsr_attack := PoolByteArray()
|
|
var channel_adsr_decay := PoolByteArray()
|
|
var channel_adsr_sustain := PoolByteArray()
|
|
var channel_adsr_release := PoolByteArray()
|
|
var channel_noise_freq := PoolByteArray()
|
|
var channel_noise_on := PoolByteArray()
|
|
var channel_pan_lfo_rate := PoolByteArray()
|
|
var channel_pan_lfo_depth := PoolByteArray()
|
|
var channel_pan_lfo_on := PoolByteArray()
|
|
var channel_tremolo_delay := PoolByteArray()
|
|
var channel_tremolo_rate := PoolByteArray()
|
|
var channel_tremolo_depth := PoolByteArray()
|
|
var channel_tremolo_on := PoolByteArray()
|
|
var channel_vibrato_delay := PoolByteArray()
|
|
var channel_vibrato_rate := PoolByteArray()
|
|
var channel_vibrato_depth := PoolByteArray()
|
|
var channel_vibrato_on := PoolByteArray()
|
|
var channel_pitchmod_on := PoolByteArray()
|
|
var channel_echo_on := PoolByteArray()
|
|
var channel_echo_volume := PoolByteArray()
|
|
|
|
func set_tempo(tempo: float):
|
|
self.tempo = tempo
|
|
self.seconds_per_pulse = 60.0 / (tempo * music.PPQN)
|
|
|
|
func _init(tracks: Array, instrument_map: Dictionary):
|
|
self.tracks = tracks
|
|
self.num_tracks = len(self.tracks)
|
|
self.inst_map = instrument_map
|
|
self.players = []
|
|
self.set_tempo(120.0)
|
|
for i in num_tracks:
|
|
self.players.append(AudioStreamPlayer.new())
|
|
add_child(self.players[-1])
|
|
self.channel_pointer.append(0)
|
|
self.channel_instrument_idx.append(0)
|
|
self.channel_next_pulse.append(0)
|
|
self.channel_current_note.append(0)
|
|
self.channel_velocity.append(100)
|
|
self.channel_pan.append(0)
|
|
self.channel_octave.append(5)
|
|
self.channel_transpose.append(0)
|
|
self.channel_fine_tuning.append(1.0)
|
|
self.channel_adsr_attack.append(0)
|
|
self.channel_adsr_decay.append(0)
|
|
self.channel_adsr_sustain.append(0)
|
|
self.channel_adsr_release.append(0)
|
|
self.channel_noise_freq.append(0)
|
|
self.channel_noise_on.append(0)
|
|
self.channel_pan_lfo_rate.append(0)
|
|
self.channel_pan_lfo_depth.append(0)
|
|
self.channel_pan_lfo_on.append(0)
|
|
self.channel_tremolo_delay.append(0)
|
|
self.channel_tremolo_rate.append(0)
|
|
self.channel_tremolo_depth.append(0)
|
|
self.channel_tremolo_on.append(0)
|
|
self.channel_vibrato_delay.append(0)
|
|
self.channel_vibrato_rate.append(0)
|
|
self.channel_vibrato_depth.append(0)
|
|
self.channel_vibrato_on.append(0)
|
|
self.channel_pitchmod_on.append(0)
|
|
self.channel_echo_on.append(0)
|
|
self.channel_echo_volume.append(0)
|
|
|
|
func play_channel(channel: int, time_offset: float = 0.0) -> int:
|
|
# Executes the track events until it hits a note/rest, in which case it returns the pulse count to the next action, or the end of the events, in which case it returns -1
|
|
self.players[channel].stop()
|
|
var track: Array = self.tracks[channel]
|
|
var l := len(track)
|
|
var player: AudioStreamPlayer = self.players[channel]
|
|
while true:
|
|
var ptr: int = self.channel_pointer[channel]
|
|
if ptr >= l:
|
|
break
|
|
var event = track[ptr]
|
|
self.channel_pointer[channel] += 1
|
|
match event[0]: # Control codes
|
|
EventType.NOTE:
|
|
var note = event[1]
|
|
var duration = event[2]
|
|
if note >= 0: # Don't shift or play rests
|
|
note += (12 * self.channel_octave[channel]) + self.channel_transpose[channel]
|
|
player.pitch_scale = pow(2.0, (note - MUSIC.REFERENCE_NOTE)/12.0) #* self.channel_fine_tuning[channel]
|
|
player.volume_db = linear2db((self.channel_velocity[channel]/255.0) * (self.master_volume/255.0))
|
|
player.play(max((SoundLoader.PLAY_START - time_offset)/player.pitch_scale, 0))
|
|
self.channel_current_note[channel] = note
|
|
else:
|
|
self.channel_current_note[channel] = -1
|
|
# TODO: Confirm tempo scaling
|
|
return duration # Pulses to next instruction
|
|
EventType.VOLUME:
|
|
self.channel_velocity[channel] = event[1]
|
|
EventType.VOLUME_SLIDE: # TODO: implement slides
|
|
var slide_duration: int = event[1]
|
|
self.channel_velocity[channel] = event[2]
|
|
EventType.PAN: # TODO: implement slides
|
|
self.channel_pan[channel] = event[1]
|
|
EventType.PAN_SLIDE: # TODO: implement slides
|
|
var slide_duration: int = event[1]
|
|
self.channel_pan[channel] = event[2]
|
|
EventType.PITCH_SLIDE: # TODO: implement slides
|
|
var slide_duration: int = event[1]
|
|
var target_pitch: int = event[2] # Signed
|
|
EventType.VIBRATO_ON:
|
|
self.channel_vibrato_delay[channel] = event[1]
|
|
self.channel_vibrato_rate[channel] = event[2]
|
|
self.channel_vibrato_depth[channel] = event[3]
|
|
self.channel_vibrato_on[channel] = 1
|
|
EventType.VIBRATO_OFF:
|
|
self.channel_vibrato_on[channel] = 0
|
|
EventType.TREMOLO_ON:
|
|
self.channel_tremolo_delay[channel] = event[1]
|
|
self.channel_tremolo_rate[channel] = event[2]
|
|
self.channel_tremolo_depth[channel] = event[3]
|
|
self.channel_tremolo_on[channel] = 1
|
|
EventType.TREMOLO_OFF:
|
|
self.channel_tremolo_on[channel] = 0
|
|
EventType.PAN_LFO_ON:
|
|
self.channel_pan_lfo_depth[channel] = event[1]
|
|
self.channel_pan_lfo_rate[channel] = event[2]
|
|
self.channel_pan_lfo_on[channel] = 1
|
|
EventType.PAN_LFO_OFF:
|
|
self.channel_pan_lfo_on[channel] = 0
|
|
EventType.NOISE_FREQ:
|
|
self.channel_noise_freq[channel] = event[1]
|
|
EventType.NOISE_ON:
|
|
self.channel_noise_on[channel] = 1
|
|
EventType.NOISE_OFF:
|
|
self.channel_noise_on[channel] = 0
|
|
EventType.PITCHMOD_ON:
|
|
self.channel_pitchmod_on[channel] = 1
|
|
EventType.PITCHMOD_OFF:
|
|
self.channel_pitchmod_on[channel] = 0
|
|
EventType.ECHO_ON:
|
|
self.channel_echo_on[channel] = 1
|
|
EventType.ECHO_OFF:
|
|
self.channel_echo_on[channel] = 0
|
|
EventType.OCTAVE:
|
|
self.channel_octave[channel] = event[1]
|
|
EventType.OCTAVE_UP:
|
|
self.channel_octave[channel] += 1
|
|
EventType.OCTAVE_DOWN:
|
|
self.channel_octave[channel] -= 1
|
|
EventType.TRANSPOSE_ABS:
|
|
self.channel_transpose[channel] = event[1]
|
|
EventType.TRANSPOSE_REL:
|
|
self.channel_transpose[channel] += event[1]
|
|
EventType.TUNING:
|
|
var fine_tune: int = event[1]
|
|
var scale: float
|
|
if fine_tune < 0x80:
|
|
scale = 1.0 + fine_tune/255.0
|
|
else:
|
|
scale = fine_tune/255.0
|
|
self.channel_fine_tuning[channel] = scale
|
|
EventType.PROGCHANGE:
|
|
self.channel_instrument_idx[channel] = event[1]
|
|
player.stream = self.inst_map[self.channel_instrument_idx[channel]]
|
|
# TODO - grab instrument envelope
|
|
EventType.ADSR_ATTACK:
|
|
self.channel_adsr_attack[channel] = event[1]
|
|
EventType.ADSR_DECAY:
|
|
self.channel_adsr_decay[channel] = event[1]
|
|
EventType.ADSR_SUSTAIN:
|
|
self.channel_adsr_sustain[channel] = event[1]
|
|
EventType.ADSR_RELEASE:
|
|
self.channel_adsr_release[channel] = event[1]
|
|
EventType.ADSR_DEFAULT: # TODO - grab instrument envelope
|
|
pass
|
|
EventType.TEMPO:
|
|
self.set_tempo(music.tempo_to_bpm(event[1]))
|
|
EventType.TEMPO_SLIDE:
|
|
self.set_tempo(music.tempo_to_bpm(event[2]))
|
|
var slide_duration: int = event[1]
|
|
EventType.ECHO_VOLUME:
|
|
self.channel_echo_volume[channel] = event[1]
|
|
EventType.ECHO_VOLUME_SLIDE: # TODO: implement slides
|
|
self.channel_echo_volume[channel] = event[2]
|
|
var slide_duration: int = event[1]
|
|
EventType.ECHO_FEEDBACK_FIR: # TODO
|
|
var feedback: int = event[1]
|
|
var filterIndex: int = event[2]
|
|
EventType.MASTER_VOLUME:
|
|
self.master_volume = event[1]
|
|
EventType.GOTO:
|
|
self.channel_pointer[channel] = event[1]
|
|
EventType.END:
|
|
break
|
|
_:
|
|
break
|
|
return -1 # End of track
|
|
|
|
func play_pulse(time_offset := 0.0) -> bool: # Return true if any channel played
|
|
var active_channels := 0
|
|
for channel in self.num_tracks:
|
|
if self.channel_next_pulse[channel] < 0:
|
|
continue # Channel not playing
|
|
active_channels += 1
|
|
if self.channel_next_pulse[channel] == 0:
|
|
self.channel_next_pulse[channel] = self.play_channel(channel, time_offset)
|
|
self.channel_next_pulse[channel] -= 1
|
|
return active_channels > 0
|
|
|
|
var is_playing := false
|
|
var bgm_timestamp := 0.0 # Note this will be behind by the maximum delay
|
|
func _process(delta: float) -> void:
|
|
if self.is_playing:
|
|
bgm_timestamp += delta
|
|
while bgm_timestamp > seconds_per_pulse:
|
|
bgm_timestamp -= seconds_per_pulse
|
|
self.is_playing = play_pulse(bgm_timestamp)
|
|
if not self.is_playing:
|
|
print('BGM finished playing')
|
|
|
|
|
|
# TODO: need to interleave channels for tempo and master volume!
|
|
const MAX_NOTE_EVENTS := 2048
|
|
class NoteEvent:
|
|
var p_start: int # In pulse space
|
|
var p_end: int
|
|
var instrument: int
|
|
var pitch: int
|
|
var velocity: float
|
|
var adsr_attack: int
|
|
var adsr_decay: int
|
|
var adsr_sustain: int
|
|
var adsr_release: int
|
|
|
|
|
|
class TrackCurve: # built-in Curve class is too restrictive for this
|
|
var default: float
|
|
var entries: PoolVector3Array
|
|
var baked_integrals: PoolRealArray
|
|
func _init(default: float = 0.0):
|
|
self.default = default
|
|
self.entries = PoolVector3Array()
|
|
self.baked_integrals = PoolRealArray()
|
|
|
|
func add_point(pulse: int, value: float, ramp_to_next: bool) -> void:
|
|
var l := len(self.entries)
|
|
var entry := Vector3(float(pulse), value, float(ramp_to_next))
|
|
if l == 0 or self.entries[-1].x < pulse:
|
|
self.entries.append(entry)
|
|
else: # Find the first entry bigger than pulse, and insert before
|
|
for i in l:
|
|
if self.entries[i].x > pulse:
|
|
self.entries.insert(i, entry)
|
|
break
|
|
|
|
var last_pulse_block_get: int = -1 # Cache previous position for sequential lookups
|
|
func get_pulse(pulse: float) -> float:
|
|
var l := len(self.entries)
|
|
if l == 0 or pulse < self.entries[-1].x:
|
|
return self.default
|
|
if pulse > self.entries[-1].x:
|
|
return self.entries[-1].y
|
|
for i in l-2:
|
|
# Find first entry beyond
|
|
if pulse < self.entries[i+1].x:
|
|
if self.entries[i].z > 0: # ramp_to_next
|
|
return range_lerp(pulse, self.entries[i].x, self.entries[i+1].x, self.entries[i].y, self.entries[i+1].y)
|
|
else:
|
|
return self.entries[i].y
|
|
return self.default # Should be unreachable
|
|
|
|
func bake_integrals():
|
|
# Store the starting integrated value (i.e. time for the tempo curve) of each pulse value
|
|
self.baked_integrals.clear()
|
|
var last_pulse := 0.0
|
|
var last_value := self.default
|
|
var last_integral := 0.0
|
|
var last_ramp := false
|
|
for entry in self.entries:
|
|
var step_pulse = entry.x - last_pulse
|
|
var integral := last_integral
|
|
if last_ramp:
|
|
# Treat it as a rectangle where the height is the average of the slanted top.
|
|
integral += step_pulse * (last_value + entry.y)/2.0
|
|
else:
|
|
integral += step_pulse * last_value
|
|
self.baked_integrals.append(integral)
|
|
last_pulse = entry.x
|
|
last_value = entry.y
|
|
last_integral = integral
|
|
last_ramp = entry.z > 0
|
|
|
|
var last_integral_block_get: int = -1 # Cache previous position for sequential lookups
|
|
func get_integral(pulse: float) -> float:
|
|
# This is for tempo -> time. Need to bake it to have any hope of efficiency.
|
|
if self.baked_integrals.empty():
|
|
self.bake_integrals()
|
|
# Find first entry earlier than the pulse
|
|
for i in range(len(self.entries)-1, -1, -1):
|
|
var entry = self.entries[i]
|
|
if pulse > entry.x:
|
|
var integral = self.baked_integrals[i]
|
|
var step_pulse = pulse - entry.x
|
|
if entry.z: # Ramp to next
|
|
# Treat it as a rectangle where the height is the average of the slanted top.
|
|
integral += step_pulse * (entry.y + entries[i+1].y)/2.0 # If last entry somehow has ramp-to-next (it shouldn't), this will out-of-range error
|
|
else:
|
|
integral += step_pulse * entry.y
|
|
return integral
|
|
return 0.0
|
|
|
|
|
|
func render_channels(_t_start: float, _t_end: float, inst_map: Array) -> Array: # [data: PoolByteArray, target_time_length: float in seconds]
|
|
# Since some channels contain global events (tempo and global volume for now),
|
|
# the strategy will be to preprocess each channel in a global-state-agnostic way,
|
|
# then once all the global tracks are known, as well as the longest unlooped length,
|
|
# do a second pass to generate the final events
|
|
var instrument_adsrs = RomLoader.snes_data.bgm_instrument_adsrs # TODO: UNHARDCODE THIS
|
|
var all_note_events = []
|
|
|
|
var curve_master_volume := TrackCurve.new(100.0/255.0) # [0.0, 1.0] for now
|
|
var curve_master_tempo := TrackCurve.new(120.0) # bpm is too big, need pulses per second
|
|
|
|
var curve_channel_pans := []
|
|
|
|
for channel in self.num_tracks:
|
|
var curve_velocity := TrackCurve.new(100.0/255.0) # [0.0, 1.0] for now
|
|
var curve_pan := TrackCurve.new() # [-1.0, 1.0] for now
|
|
var channel_note_events = []
|
|
var track: Array = self.tracks[channel]
|
|
var l := len(track)
|
|
var p := 0 # current pulse
|
|
|
|
if l == 0: # Empty channel, move on
|
|
all_note_events.append(channel_note_events)
|
|
curve_channel_pans.append(curve_pan)
|
|
continue
|
|
|
|
# var num_notes: int = 0
|
|
var current_instrument := 0
|
|
var current_octave := 5
|
|
var current_transpose := 0
|
|
# var current_velocity := 100
|
|
var current_adsr_attack := 0
|
|
var current_adsr_decay := 0
|
|
var current_adsr_sustain := 0
|
|
var current_adsr_release := 0
|
|
|
|
# First, check if it ends in a GOTO, then store the program counter of the destination
|
|
var infinite_loop_target_program_counter = -1
|
|
var infinite_loop_target_pulse = -1
|
|
if track[-1][0] == EventType.GOTO:
|
|
infinite_loop_target_program_counter = track[-1][1]
|
|
|
|
var program_counter := 0
|
|
while true: #num_notes < MAX_NOTE_EVENTS:
|
|
if program_counter >= l:
|
|
break
|
|
if program_counter == infinite_loop_target_program_counter:
|
|
infinite_loop_target_pulse = p
|
|
var event = track[program_counter]
|
|
program_counter += 1
|
|
match event[0]: # Control codes
|
|
EventType.GOTO: # This is a preprocessed event list, so GOTO is a final infinite loop marker
|
|
var note_event = NoteEvent.new()
|
|
note_event.p_start = p
|
|
note_event.p_end = infinite_loop_target_pulse # Fake final note event using p_start > p_end to encode the infinite jump back loop.
|
|
# Note that event[1] points to an Event, not a NoteEvent, not a Pulse, so we looked it up earlier
|
|
channel_note_events.append(note_event)
|
|
break
|
|
EventType.MASTER_VOLUME:
|
|
curve_master_volume.add_point(p, event[1]/255.0, false)
|
|
EventType.TEMPO:
|
|
var new_tempo = music.tempo_to_seconds_per_pulse(event[1])
|
|
curve_master_tempo.add_point(p, new_tempo, false)
|
|
EventType.TEMPO_SLIDE:
|
|
var old_tempo = curve_master_tempo.get_pulse(p)
|
|
var new_tempo = music.tempo_to_seconds_per_pulse(event[2])
|
|
var slide_duration: int = event[1] # TODO: work out how this is scaled
|
|
curve_master_tempo.add_point(p, old_tempo, true)
|
|
curve_master_tempo.add_point(p + slide_duration, new_tempo, false)
|
|
EventType.NOTE:
|
|
var note = event[1]
|
|
var duration = event[2]
|
|
if note >= 0: # Don't shift or play rests
|
|
note += (12 * current_octave) + current_transpose
|
|
var note_event = NoteEvent.new()
|
|
note_event.p_start = p
|
|
note_event.p_end = p + duration
|
|
note_event.instrument = current_instrument
|
|
note_event.pitch = note # pitch_idx #* self.channel_fine_tuning[channel]
|
|
note_event.velocity = curve_velocity.get_pulse(p) # current_velocity
|
|
note_event.adsr_attack = current_adsr_attack
|
|
note_event.adsr_decay = current_adsr_decay
|
|
note_event.adsr_sustain = current_adsr_sustain
|
|
note_event.adsr_release = current_adsr_release
|
|
channel_note_events.append(note_event)
|
|
# num_notes += 1
|
|
p += duration
|
|
EventType.VOLUME:
|
|
var new_velocity: float = event[1]/255.0
|
|
curve_velocity.add_point(p, new_velocity, false)
|
|
EventType.VOLUME_SLIDE: # TODO: implement slides
|
|
var old_velocity = curve_velocity.get_pulse(p)
|
|
var slide_duration: int = event[1]
|
|
var new_velocity: float = event[2]/255.0
|
|
curve_velocity.add_point(p, old_velocity, true)
|
|
curve_velocity.add_point(p + slide_duration, new_velocity, false)
|
|
EventType.PAN:
|
|
var new_pan = 1.0 - event[1]/127.5
|
|
curve_pan.add_point(p, new_pan, false)
|
|
EventType.PAN_SLIDE: # TODO: implement slides
|
|
var old_pan = curve_pan.get_pulse(p)
|
|
var new_pan = 1.0 - event[2]/127.5
|
|
var slide_duration: int = event[1] # TODO: work out how slides are scaled
|
|
curve_pan.add_point(p, old_pan, true)
|
|
curve_pan.add_point(p + slide_duration, new_pan, false)
|
|
EventType.PITCH_SLIDE: # TODO: implement slides
|
|
var slide_duration: int = event[1]
|
|
var target_pitch: int = event[2] # Signed
|
|
EventType.OCTAVE:
|
|
current_octave = event[1]
|
|
EventType.OCTAVE_UP:
|
|
current_octave += 1
|
|
EventType.OCTAVE_DOWN:
|
|
current_octave -= 1
|
|
EventType.TRANSPOSE_ABS:
|
|
current_transpose = event[1]
|
|
EventType.TRANSPOSE_REL:
|
|
current_transpose += event[1]
|
|
EventType.TUNING:
|
|
var fine_tune: int = event[1]
|
|
var scale: float
|
|
if fine_tune < 0x80:
|
|
scale = 1.0 + fine_tune/255.0
|
|
else:
|
|
scale = fine_tune/255.0
|
|
self.channel_fine_tuning[channel] = scale
|
|
EventType.PROGCHANGE:
|
|
var event_idx = event[1]-0x20
|
|
if event_idx >= 0:
|
|
current_instrument = inst_map[event_idx] - 1
|
|
if current_instrument < len(instrument_adsrs) and current_instrument > 0:
|
|
var adsr = instrument_adsrs[current_instrument]
|
|
current_adsr_attack = adsr[2]
|
|
current_adsr_decay = adsr[3]
|
|
current_adsr_sustain = adsr[0]
|
|
current_adsr_release = adsr[1]
|
|
EventType.ADSR_DEFAULT: # TODO - Investigate actual scaling and order
|
|
if current_instrument < len(instrument_adsrs) and current_instrument > 0:
|
|
var adsr = instrument_adsrs[current_instrument]
|
|
current_adsr_attack = adsr[2]
|
|
current_adsr_decay = adsr[3]
|
|
current_adsr_sustain = adsr[0]
|
|
current_adsr_release = adsr[1]
|
|
EventType.ADSR_ATTACK:
|
|
current_adsr_attack = event[1]
|
|
EventType.ADSR_DECAY:
|
|
current_adsr_decay = event[1]
|
|
EventType.ADSR_SUSTAIN:
|
|
current_adsr_sustain = event[1]
|
|
EventType.ADSR_RELEASE:
|
|
current_adsr_release = event[1]
|
|
EventType.VIBRATO_ON:
|
|
self.channel_vibrato_delay[channel] = event[1]
|
|
self.channel_vibrato_rate[channel] = event[2]
|
|
self.channel_vibrato_depth[channel] = event[3]
|
|
self.channel_vibrato_on[channel] = 1
|
|
EventType.VIBRATO_OFF:
|
|
self.channel_vibrato_on[channel] = 0
|
|
EventType.TREMOLO_ON:
|
|
self.channel_tremolo_delay[channel] = event[1]
|
|
self.channel_tremolo_rate[channel] = event[2]
|
|
self.channel_tremolo_depth[channel] = event[3]
|
|
self.channel_tremolo_on[channel] = 1
|
|
EventType.TREMOLO_OFF:
|
|
self.channel_tremolo_on[channel] = 0
|
|
EventType.PAN_LFO_ON:
|
|
self.channel_pan_lfo_depth[channel] = event[1]
|
|
self.channel_pan_lfo_rate[channel] = event[2]
|
|
self.channel_pan_lfo_on[channel] = 1
|
|
EventType.PAN_LFO_OFF:
|
|
self.channel_pan_lfo_on[channel] = 0
|
|
EventType.NOISE_FREQ:
|
|
self.channel_noise_freq[channel] = event[1]
|
|
EventType.NOISE_ON:
|
|
self.channel_noise_on[channel] = 1
|
|
EventType.NOISE_OFF:
|
|
self.channel_noise_on[channel] = 0
|
|
EventType.PITCHMOD_ON:
|
|
self.channel_pitchmod_on[channel] = 1
|
|
EventType.PITCHMOD_OFF:
|
|
self.channel_pitchmod_on[channel] = 0
|
|
EventType.ECHO_ON:
|
|
self.channel_echo_on[channel] = 1
|
|
EventType.ECHO_OFF:
|
|
self.channel_echo_on[channel] = 0
|
|
EventType.ECHO_VOLUME:
|
|
self.channel_echo_volume[channel] = event[1]
|
|
EventType.ECHO_VOLUME_SLIDE: # TODO: implement slides
|
|
self.channel_echo_volume[channel] = event[2]
|
|
var slide_duration: int = event[1]
|
|
EventType.ECHO_FEEDBACK_FIR: # TODO
|
|
var feedback: int = event[1]
|
|
var filterIndex: int = event[2]
|
|
EventType.END:
|
|
break
|
|
_:
|
|
break
|
|
# End of track
|
|
all_note_events.append(channel_note_events)
|
|
curve_channel_pans.append(curve_pan)
|
|
|
|
# Integrate tempo so we can get a pulse->time mapping
|
|
curve_master_tempo.bake_integrals()
|
|
# Find the longest channel
|
|
var channel_loop_p_returns = PoolIntArray()
|
|
var channel_loop_p_lengths = PoolIntArray()
|
|
var longest_channel_idx = 0
|
|
var longest_channel_p_end = 0
|
|
var highest_channel_p_return = -1
|
|
for channel in self.num_tracks:
|
|
if all_note_events[channel].empty():
|
|
channel_loop_p_returns.append(-1)
|
|
continue
|
|
var note_event: NoteEvent = all_note_events[channel][-1]
|
|
var p_end = note_event.p_end
|
|
if p_end < note_event.p_start:
|
|
# Ends on infinite loop
|
|
channel_loop_p_returns.append(p_end)
|
|
channel_loop_p_lengths.append(note_event.p_start - p_end)
|
|
if p_end > highest_channel_p_return:
|
|
highest_channel_p_return = p_end
|
|
p_end = note_event.p_start
|
|
else:
|
|
channel_loop_p_returns.append(-1)
|
|
|
|
if p_end > longest_channel_p_end:
|
|
longest_channel_p_end = p_end
|
|
longest_channel_idx = channel
|
|
|
|
var target_pulse_length = longest_channel_p_end + 200
|
|
var target_time_length = curve_master_tempo.get_integral(target_pulse_length)
|
|
|
|
# Second pass - encode the notes with the now-known global tempo and volume curves
|
|
var data := PoolByteArray()
|
|
for channel in self.num_tracks:
|
|
var events = all_note_events[channel]
|
|
var loop_return_note_event_idx = -1
|
|
var loop_return_p = channel_loop_p_returns[channel]
|
|
var curve_pan: TrackCurve = curve_channel_pans[channel]
|
|
|
|
var midi_events_bytes_t_start := StreamPeerBuffer.new()
|
|
var midi_events_bytes_t_end := StreamPeerBuffer.new()
|
|
var midi_events_bytes3 := StreamPeerBuffer.new()
|
|
var midi_events_bytes_adsr := StreamPeerBuffer.new()
|
|
|
|
var num_notes: int = 0
|
|
var event_ptr := 0
|
|
var l_events := len(events)
|
|
var loop_p_offset := 0
|
|
for i in MAX_NOTE_EVENTS:
|
|
if event_ptr >= l_events:
|
|
break
|
|
if (loop_return_p >= 0) and event_ptr == l_events-1:
|
|
event_ptr = loop_return_note_event_idx
|
|
loop_p_offset += channel_loop_p_lengths[channel]
|
|
var event: NoteEvent = events[event_ptr]
|
|
var p = event.p_start
|
|
if loop_return_note_event_idx < 0 and p >= loop_return_p:
|
|
loop_return_note_event_idx = event_ptr
|
|
midi_events_bytes_t_start.put_32(int(curve_master_tempo.get_integral(p + loop_p_offset) * 32000))
|
|
midi_events_bytes_t_end.put_32(int(curve_master_tempo.get_integral(event.p_end + loop_p_offset) * 32000)) # t_end
|
|
midi_events_bytes3.put_u8(event.instrument)
|
|
midi_events_bytes3.put_u8(event.pitch)
|
|
midi_events_bytes3.put_u8(int(event.velocity * curve_master_volume.get_pulse(p) * 255.0)) # velocity
|
|
midi_events_bytes3.put_u8(int((curve_pan.get_pulse(p)+1.0) * 127.5)) # pan
|
|
midi_events_bytes_adsr.put_u8(event.adsr_attack)
|
|
midi_events_bytes_adsr.put_u8(event.adsr_decay)
|
|
midi_events_bytes_adsr.put_u8(event.adsr_sustain)
|
|
midi_events_bytes_adsr.put_u8(event.adsr_release)
|
|
|
|
event_ptr += 1
|
|
num_notes += 1
|
|
# Fill up end of notes array with dummies
|
|
for i in range(num_notes, MAX_NOTE_EVENTS):
|
|
midi_events_bytes_t_start.put_32(0x0FFFFFFF)
|
|
midi_events_bytes_t_end.put_32(0x0FFFFFFF)
|
|
midi_events_bytes3.put_32(0)
|
|
midi_events_bytes_adsr.put_32(0)
|
|
data += midi_events_bytes_t_start.data_array + midi_events_bytes_t_end.data_array + midi_events_bytes3.data_array + midi_events_bytes_adsr.data_array
|
|
var smp_loop_start = -1
|
|
var smp_loop_end = -1
|
|
if highest_channel_p_return > 0:
|
|
smp_loop_start = curve_master_tempo.get_integral(highest_channel_p_return + 100) * 32000
|
|
smp_loop_end = curve_master_tempo.get_integral(longest_channel_p_end + 100) * 32000
|
|
return [data, target_time_length, [smp_loop_start, smp_loop_end]]
|