AudioMeasurement/Measurement.gd

139 lines
3.7 KiB
GDScript

extends Spatial
var bus_index_analysis: int
var bus_index_mic_a : int
var bus_index_mic_b : int
var record_mic_a : AudioEffect
var record_mic_b : AudioEffect
var analyzer : AudioEffectSpectrumAnalyzerInstance
var recording_time = 0
var recording_running = false
var analysis_running = false
var analysis_player : AudioStreamPlayer
export var sample_time = 1
var recording_mic_a : AudioStreamSample
var recording_mic_b : AudioStreamSample
var waveform = Spatial
var color_inactive = Color(1,1,1, 0.4)
var color_recording = Color(1, 0, 0.5, 0.8)
var color_analysis = Color(0.5, 1, 0, 0.8)
var ui : Control
var ui_light : Control
var ui_text : Control
var ui_select_mic_a : Control
var ui_select_mic_b : Control
var ui_button_record : Control
var ui_button_analyse : Control
#var sweep = load('res://pinknoise.wav')
# Called when the node enters the scene tree for the first time.
func _ready():
self.bus_index_analysis = AudioServer.get_bus_index('Analysis')
self.bus_index_mic_a = AudioServer.get_bus_index('Mic A')
self.bus_index_mic_b = AudioServer.get_bus_index('Mic B')
self.record_mic_a = AudioServer.get_bus_effect(self.bus_index_mic_a, 0)
self.record_mic_b = AudioServer.get_bus_effect(self.bus_index_mic_b, 0)
self.analyzer = AudioServer.get_bus_effect_instance(self.bus_index_analysis, 0)
self.analysis_player = self.get_node('AnalysisPlayer')
# hook up UI
self.waveform = self.get_node('Waveform')
self.ui = self.get_node('UI')
self.ui_light = self.ui.find_node('StatusLight')
self.ui_text = self.ui.find_node('StatusText')
self.ui_select_mic_a = self.ui.find_node('Source Mic A')
self.ui_select_mic_b = self.ui.find_node('Source Mic B')
self.ui_button_record = self.ui.find_node('Record')
self.ui_button_analyse = self.ui.find_node('Analyse')
self.ui_text.text = 'Idle'
#self.ui_button.pressed.connect(self.measure) # 3.5+?
self.ui_button_record.connect('pressed', self, 'recording_take')
self.ui_button_analyse.connect('pressed', self, 'analysis_start')
for device in AudioServer.capture_get_device_list():
self.ui_select_mic_a.add_item(device)
self.ui_select_mic_b.add_item(device)
func recording_take():
self.ui_light.color = self.color_recording
self.ui_text.text = 'Recording'
self.record_mic_a.set_recording_active(true)
#self.record_mic_b.set_recording_active(true)
self.recording_running = true
self.recording_time = 0
self.ui_button_analyse.disabled = true
func recording_finish():
self.recording_running = false
self.ui_light.color = self.color_inactive
self.ui_text.text = 'Idle'
self.ui_button_analyse.disabled = false
self.record_mic_a.set_recording_active(false)
#self.record_mic_b.set_recording_active(false)
self.recording_mic_a = self.record_mic_a.get_recording()
#self.recording_mic_b = self.record_mic_b.get_recording()
func analysis_start():
print("ANALYSIS START")
self.ui_light.color = self.color_analysis
self.ui_text.text = 'Analysing'
self.ui_button_record.disabled = true
self.analysis_running = true
self.analysis_player.stream = self.recording_mic_a
#self.analysis_player.stream = self.sweep
self.analysis_player.play()
func analysis_finish():
self.analysis_running = false
self.ui_light.color = self.color_inactive
self.ui_text.text = 'Idle'
self.ui_button_record.disabled = false
# Called every frame. 'delta' is the elapsed time since the previous frame.
func _process(delta):
if self.analysis_running:
if self.analysis_player.playing:
var frame = [self.analyzer.get_magnitude_for_frequency_range(0, 20000)]
self.waveform.prepare()
self.waveform.add_frame(frame)
self.waveform.end()
else:
self.analysis_finish()
if self.recording_running:
self.recording_time += delta
if self.recording_time >= self.sample_time:
self.recording_finish()