diff --git a/modem/audio.py b/modem/audio.py index 736ef44f..859475b2 100644 --- a/modem/audio.py +++ b/modem/audio.py @@ -210,6 +210,36 @@ def set_audio_volume(datalist: np.ndarray, dB: float) -> np.ndarray: RMS_COUNTER = 0 CHANNEL_BUSY_DELAY = 0 + +def prepare_data_for_fft(data, target_length_samples=400): + """ + Prepare data array for FFT by padding if necessary to match the target length. + Center the data if it's shorter than the target length. + + Parameters: + - data: numpy array of np.int16, representing the input data. + - target_length_samples: int, the target length of the data in samples. + + Returns: + - numpy array of np.int16, padded and/or centered if necessary. + """ + # Calculate the current length in samples + current_length_samples = data.size + + # Check if padding is needed + if current_length_samples < target_length_samples: + # Calculate total padding needed + total_pad_length = target_length_samples - current_length_samples + # Calculate padding on each side + pad_before = total_pad_length // 2 + pad_after = total_pad_length - pad_before + # Pad the data to center it + data_padded = np.pad(data, (pad_before, pad_after), 'constant', constant_values=(0,)) + return data_padded + else: + # No padding needed, return original data + return data + def calculate_fft(data, fft_queue, states) -> None: """ Calculate an average signal strength of the channel to assess @@ -225,6 +255,7 @@ def calculate_fft(data, fft_queue, states) -> None: global RMS_COUNTER, CHANNEL_BUSY_DELAY try: + data = prepare_data_for_fft(data, target_length_samples=800) fftarray = np.fft.rfft(data) # Set value 0 to 1 to avoid division by zero diff --git a/modem/modem.py b/modem/modem.py index 7c16c3ce..3327ee19 100644 --- a/modem/modem.py +++ b/modem/modem.py @@ -280,7 +280,8 @@ class RF: try: if not self.audio_out_queue.empty() and not self.enqueuing_audio: chunk = self.audio_out_queue.get_nowait() - audio.calculate_fft(chunk, self.fft_queue, self.states) + audio_8k = self.resampler.resample48_to_8(chunk) + audio.calculate_fft(audio_8k, self.fft_queue, self.states) outdata[:] = chunk.reshape(outdata.shape) else: