This commit is contained in:
dj2ls 2024-04-01 09:03:58 +02:00
parent 05bff2ff70
commit 6fe921aabc
4 changed files with 10 additions and 6 deletions

View file

@ -454,7 +454,7 @@ export function Spectrum(id, options) {
this.centerHz = options && options.centerHz ? options.centerHz : 1500;
this.spanHz = options && options.spanHz ? options.spanHz : 0;
this.wf_size = options && options.wf_size ? options.wf_size : 0;
this.wf_rows = options && options.wf_rows ? options.wf_rows : 1024;
this.wf_rows = options && options.wf_rows ? options.wf_rows : 512;
this.spectrumPercent =
options && options.spectrumPercent ? options.spectrumPercent : 0;
this.spectrumPercentStep =

View file

@ -225,6 +225,8 @@ def calculate_fft(data, fft_queue, states) -> None:
global RMS_COUNTER, CHANNEL_BUSY_DELAY
try:
data = bytearray()
fftarray = np.fft.rfft(data)
# Set value 0 to 1 to avoid division by zero
@ -321,6 +323,8 @@ def calculate_fft(data, fft_queue, states) -> None:
# erase queue if greater than 3
if fft_queue.qsize() >= 1:
fft_queue = queue.Queue()
fft_queue.put(dfftlist[:315]) # 315 --> bandwidth 3200
#fft_queue.put(dfftlist[:315]) # 315 --> bandwidth 3200
fft_queue.put(dfftlist) # 315 --> bandwidth 3200
except Exception as err:
print(f"[MDM] calculate_fft: Exception: {err}")

View file

@ -11,7 +11,7 @@ class EventManager:
def broadcast(self, data):
for q in self.queues:
#self.logger.debug(f"Event: ", ev=data)
self.logger.debug(f"Event: ", ev=data)
if q.qsize() > 10:
q.queue.clear()
q.put(data)

View file

@ -152,7 +152,7 @@ class RF:
callback=self.sd_input_audio_callback,
device=in_dev_index,
samplerate=self.AUDIO_SAMPLE_RATE,
blocksize=1200,
blocksize=4800,
)
self.sd_input_stream.start()
@ -162,7 +162,7 @@ class RF:
callback=self.sd_output_audio_callback,
device=out_dev_index,
samplerate=self.AUDIO_SAMPLE_RATE,
blocksize=256,
blocksize=512,
)
self.sd_output_stream.start()
@ -259,7 +259,7 @@ class RF:
self.tci_module.wait_until_transmitted(audio_48k)
else:
# slice audio data to needed blocklength
block_size = 256
block_size = 512
pad_length = -len(audio_48k) % block_size
padded_data = np.pad(audio_48k, (0, pad_length), mode='constant')
sliced_audio_data = padded_data.reshape(-1, block_size)