Merge pull request #686 from DJ2LS/develop

This commit is contained in:
DJ2LS 2024-03-16 10:07:28 -07:00 committed by GitHub
commit 6178454250
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 352 additions and 273 deletions

View file

@ -2,7 +2,7 @@
"name": "FreeDATA",
"description": "FreeDATA Client application for connecting to FreeDATA server",
"private": true,
"version": "0.14.3-alpha",
"version": "0.14.4-alpha",
"main": "dist-electron/main/index.js",
"scripts": {
"start": "vite",
@ -63,7 +63,7 @@
"@types/nconf": "^0.10.6",
"@typescript-eslint/eslint-plugin": "6.21.0",
"@vitejs/plugin-vue": "5.0.4",
"electron": "29.1.1",
"electron": "28.2.6",
"electron-builder": "24.9.1",
"eslint": "8.56.0",
"eslint-config-prettier": "9.1.0",

View file

@ -832,7 +832,7 @@ function quickfill() {
<h6>15m</h6>
</div>
</a>
<a href="#" class="list-group-item list-group-item-action" @click="updateFrequencyAndApply(14093000)">
<a href="#" class="list-group-item list-group-item-action" @click="updateFrequencyAndApply(18106000)">
<div class="d-flex w-100 justify-content-between">
<h5 class="mb-1">18.106 MHz</h5>
<small>EU / US</small>

View file

@ -21,6 +21,14 @@ function startStopBeacon() {
}
}
var dxcallPing = ref("");
window.addEventListener(
"stationSelected",
function (eventdata) {
let evt = <CustomEvent>eventdata;
dxcallPing.value = evt.detail;
},
false,
);
</script>
<template>
<div class="card h-100">

View file

@ -21,6 +21,14 @@ function startStopBeacon() {
}
}
var dxcallPing = ref("");
window.addEventListener(
"stationSelected",
function (eventdata) {
let evt = <CustomEvent>eventdata;
dxcallPing.value = evt.detail;
},
false,
);
</script>
<template>
<div class="card h-100">

View file

@ -34,6 +34,10 @@ function getMaidenheadDistance(dxGrid) {
//
}
}
function pushToPing(origin)
{
window.dispatchEvent(new CustomEvent("stationSelected", {bubbles:true, detail: origin }));
}
</script>
<template>
<div class="card h-100">
@ -61,7 +65,7 @@ function getMaidenheadDistance(dxGrid) {
</thead>
<tbody id="gridHeardStations">
<!--https://vuejs.org/guide/essentials/list.html-->
<tr v-for="item in state.heard_stations" :key="item.origin">
<tr v-for="item in state.heard_stations" :key="item.origin" @click="pushToPing(item.origin)">
<td>
{{ getDateTime(item.timestamp) }}
</td>

View file

@ -34,6 +34,10 @@ function getMaidenheadDistance(dxGrid) {
//
}
}
function pushToPing(origin)
{
window.dispatchEvent(new CustomEvent("stationSelected", {bubbles:true, detail: origin }));
}
</script>
<template>
<div class="card h-100">
@ -54,7 +58,7 @@ function getMaidenheadDistance(dxGrid) {
</thead>
<tbody id="miniHeardStations">
<!--https://vuejs.org/guide/essentials/list.html-->
<tr v-for="item in state.heard_stations" :key="item.origin">
<tr v-for="item in state.heard_stations" :key="item.origin" @click="pushToPing(item.origin)">
<td>
<span class="fs-6">{{ getDateTime(item.timestamp) }}</span>
</td>

View file

@ -12,6 +12,15 @@ function transmitPing() {
sendModemPing(dxcallPing.value.toUpperCase());
}
var dxcallPing = ref("");
window.addEventListener(
"stationSelected",
function (eventdata) {
let evt = <CustomEvent>eventdata;
dxcallPing.value = evt.detail;
},
false,
);
</script>
<template>
<div class="input-group" style="width: calc(100% - 24px)">

View file

@ -152,6 +152,12 @@ class ARQSession():
self.snr_histogram.append(self.snr)
self.bpm_histogram.append(stats['bytes_per_minute'])
self.time_histogram.append(datetime.datetime.now().isoformat())
# Limit the size of each histogram to the last 20 entries
self.snr_histogram = self.snr_histogram[-20:]
self.bpm_histogram = self.bpm_histogram[-20:]
self.time_histogram = self.time_histogram[-20:]
return stats
def get_appropriate_speed_level(self, snr):

View file

@ -1,16 +1,12 @@
"""
Gather information about audio devices.
"""
import atexit
import multiprocessing
import crcengine
import sounddevice as sd
import structlog
import numpy as np
import queue
import threading
atexit.register(sd._terminate)
log = structlog.get_logger("audio")

View file

@ -15,7 +15,6 @@ input_device = 5a1c
output_device = bd6c
rx_audio_level = 0
tx_audio_level = 0
enable_auto_tune = False
[RIGCTLD]
ip = 127.0.0.1

View file

@ -26,7 +26,6 @@ class CONFIG:
'output_device': str,
'rx_audio_level': int,
'tx_audio_level': int,
'enable_auto_tune': bool,
},
'RADIO': {
'control': str,

View file

@ -4,10 +4,7 @@ import ctypes
import structlog
import threading
import audio
import os
from modem_frametypes import FRAME_TYPE
import itertools
from time import sleep
TESTMODE = False
@ -28,11 +25,10 @@ class Demodulator():
'decoding_thread': None
}
def __init__(self, config, audio_rx_q, data_q_rx, states, event_manager, fft_queue):
def __init__(self, config, audio_rx_q, data_q_rx, states, event_manager, service_queue, fft_queue):
self.log = structlog.get_logger("Demodulator")
self.rx_audio_level = config['AUDIO']['rx_audio_level']
self.service_queue = service_queue
self.AUDIO_FRAMES_PER_BUFFER_RX = 4800
self.buffer_overflow_counter = [0, 0, 0, 0, 0, 0, 0, 0]
self.is_codec2_traffic_counter = 0
@ -126,35 +122,6 @@ class Demodulator():
)
self.MODE_DICT[mode]['decoding_thread'].start()
def sd_input_audio_callback(self, indata: np.ndarray, frames: int, time, status) -> None:
if status:
self.log.warning("[AUDIO STATUS]", status=status, time=time, frames=frames)
return
try:
audio_48k = np.frombuffer(indata, dtype=np.int16)
audio_8k = self.resampler.resample48_to_8(audio_48k)
audio_8k_level_adjusted = audio.set_audio_volume(audio_8k, self.rx_audio_level)
audio.calculate_fft(audio_8k_level_adjusted, self.fft_queue, self.states)
length_audio_8k_level_adjusted = len(audio_8k_level_adjusted)
# Avoid buffer overflow by filling only if buffer for
# selected datachannel mode is not full
index = 0
for mode in self.MODE_DICT:
mode_data = self.MODE_DICT[mode]
audiobuffer = mode_data['audio_buffer']
decode = mode_data['decode']
index += 1
if audiobuffer:
if (audiobuffer.nbuffer + length_audio_8k_level_adjusted) > audiobuffer.size:
self.buffer_overflow_counter[index] += 1
self.event_manager.send_buffer_overflow(self.buffer_overflow_counter)
elif decode:
audiobuffer.push(audio_8k_level_adjusted)
except Exception as e:
self.log.warning("[AUDIO EXCEPTION]", status=status, time=time, frames=frames, e=e)
def get_frequency_offset(self, freedv: ctypes.c_void_p) -> float:
"""

View file

@ -12,6 +12,8 @@ class EventManager:
def broadcast(self, data):
for q in self.queues:
self.logger.debug(f"Event: ", ev=data)
if q.qsize() > 10:
q.queue.clear()
q.put(data)
def send_ptt_change(self, on:bool = False):

View file

@ -9,10 +9,7 @@ Created on Wed Dec 23 07:04:24 2020
# pylint: disable=invalid-name, line-too-long, c-extension-no-member
# pylint: disable=import-outside-toplevel
import atexit
import ctypes
import queue
import threading
import time
import codec2
import numpy as np
@ -20,9 +17,9 @@ import sounddevice as sd
import structlog
import tci
import cw
from queues import RIGCTLD_COMMAND_QUEUE
import audio
import demodulator
import modulator
TESTMODE = False
@ -44,35 +41,36 @@ class RF:
self.audio_input_device = config['AUDIO']['input_device']
self.audio_output_device = config['AUDIO']['output_device']
self.tx_audio_level = config['AUDIO']['tx_audio_level']
self.enable_audio_auto_tune = config['AUDIO']['enable_auto_tune']
self.tx_delay = config['MODEM']['tx_delay']
self.radiocontrol = config['RADIO']['control']
self.rigctld_ip = config['RIGCTLD']['ip']
self.rigctld_port = config['RIGCTLD']['port']
self.states.setTransmitting(False)
self.ptt_state = False
self.radio_alc = 0.0
self.tci_ip = config['TCI']['tci_ip']
self.tci_port = config['TCI']['tci_port']
self.tx_audio_level = config['AUDIO']['tx_audio_level']
self.rx_audio_level = config['AUDIO']['rx_audio_level']
self.ptt_state = False
self.enqueuing_audio = False # set to True, while we are processing audio
self.AUDIO_SAMPLE_RATE = 48000
self.MODEM_SAMPLE_RATE = codec2.api.FREEDV_FS_8000
self.modem_sample_rate = codec2.api.FREEDV_FS_8000
# 8192 Let's do some tests with very small chunks for TX
self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 if self.radiocontrol in ["tci"] else 2400 * 2
# 8 * (self.AUDIO_SAMPLE_RATE/self.MODEM_SAMPLE_RATE) == 48
#self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 if self.radiocontrol in ["tci"] else 2400 * 2
# 8 * (self.AUDIO_SAMPLE_RATE/self.modem_sample_rate) == 48
self.AUDIO_CHANNELS = 1
self.MODE = 0
self.rms_counter = 0
self.audio_out_queue = queue.Queue()
# Make sure our resampler will work
assert (self.AUDIO_SAMPLE_RATE / self.MODEM_SAMPLE_RATE) == codec2.api.FDMDV_OS_48 # type: ignore
assert (self.AUDIO_SAMPLE_RATE / self.modem_sample_rate) == codec2.api.FDMDV_OS_48 # type: ignore
self.audio_received_queue = queue.Queue()
self.data_queue_received = queue.Queue()
@ -83,9 +81,12 @@ class RF:
self.data_queue_received,
self.states,
self.event_manager,
self.service_queue,
self.fft_queue
)
self.modulator = modulator.Modulator(self.config)
def tci_tx_callback(self, audio_48k) -> None:
@ -103,10 +104,6 @@ class RF:
if not self.init_audio():
raise RuntimeError("Unable to init audio devices")
self.demodulator.start(self.sd_input_stream)
atexit.register(self.sd_input_stream.stop)
# Initialize codec2, rig control, and data threads
self.init_codec2()
return True
@ -152,17 +149,25 @@ class RF:
self.sd_input_stream = sd.InputStream(
channels=1,
dtype="int16",
callback=self.demodulator.sd_input_audio_callback,
callback=self.sd_input_audio_callback,
device=in_dev_index,
samplerate=self.AUDIO_SAMPLE_RATE,
blocksize=4800,
)
self.sd_input_stream.start()
self.sd_output_stream = sd.OutputStream(
channels=1,
dtype="int16",
callback=self.sd_output_audio_callback,
device=out_dev_index,
samplerate=self.AUDIO_SAMPLE_RATE,
blocksize=4800,
)
self.sd_output_stream.start()
return True
except Exception as audioerr:
self.log.error("[MDM] init: starting pyaudio callback failed", e=audioerr)
self.stop_modem()
@ -185,191 +190,7 @@ class RF:
return True
def audio_auto_tune(self):
# enable / disable AUDIO TUNE Feature / ALC correction
if self.enable_audio_auto_tune:
if self.radio_alc == 0.0:
self.tx_audio_level = self.tx_audio_level + 20
elif 0.0 < self.radio_alc <= 0.1:
print("0.0 < self.radio_alc <= 0.1")
self.tx_audio_level = self.tx_audio_level + 2
self.log.debug("[MDM] AUDIO TUNE", audio_level=str(self.tx_audio_level),
alc_level=str(self.radio_alc))
elif 0.1 < self.radio_alc < 0.2:
print("0.1 < self.radio_alc < 0.2")
self.tx_audio_level = self.tx_audio_level
self.log.debug("[MDM] AUDIO TUNE", audio_level=str(self.tx_audio_level),
alc_level=str(self.radio_alc))
elif 0.2 < self.radio_alc < 0.99:
print("0.2 < self.radio_alc < 0.99")
self.tx_audio_level = self.tx_audio_level - 20
self.log.debug("[MDM] AUDIO TUNE", audio_level=str(self.tx_audio_level),
alc_level=str(self.radio_alc))
elif 1.0 >= self.radio_alc:
print("1.0 >= self.radio_alc")
self.tx_audio_level = self.tx_audio_level - 40
self.log.debug("[MDM] AUDIO TUNE", audio_level=str(self.tx_audio_level),
alc_level=str(self.radio_alc))
else:
self.log.debug("[MDM] AUDIO TUNE", audio_level=str(self.tx_audio_level),
alc_level=str(self.radio_alc))
def transmit(
self, mode, repeats: int, repeat_delay: int, frames: bytearray
) -> bool:
"""
Args:
mode:
repeats:
repeat_delay:
frames:
"""
if TESTMODE:
return
self.demodulator.reset_data_sync()
# get freedv instance by mode
mode_transition = {
codec2.FREEDV_MODE.signalling: self.freedv_datac13_tx,
codec2.FREEDV_MODE.datac0: self.freedv_datac0_tx,
codec2.FREEDV_MODE.datac1: self.freedv_datac1_tx,
codec2.FREEDV_MODE.datac3: self.freedv_datac3_tx,
codec2.FREEDV_MODE.datac4: self.freedv_datac4_tx,
codec2.FREEDV_MODE.datac13: self.freedv_datac13_tx,
}
if mode in mode_transition:
freedv = mode_transition[mode]
else:
print("wrong mode.................")
print(mode)
return False
# Wait for some other thread that might be transmitting
self.states.waitForTransmission()
self.states.setTransmitting(True)
#self.states.channel_busy_event.wait()
start_of_transmission = time.time()
# Open codec2 instance
self.MODE = mode
txbuffer = bytes()
# Add empty data to handle ptt toggle time
if self.tx_delay > 0:
self.transmit_add_silence(txbuffer, self.tx_delay)
self.log.debug(
"[MDM] TRANSMIT", mode=self.MODE.name, delay=self.tx_delay
)
if not isinstance(frames, list): frames = [frames]
for _ in range(repeats):
# Create modulation for all frames in the list
for frame in frames:
txbuffer = self.transmit_add_preamble(txbuffer, freedv)
txbuffer = self.transmit_create_frame(txbuffer, freedv, frame)
txbuffer = self.transmit_add_postamble(txbuffer, freedv)
# Add delay to end of frames
self.transmit_add_silence(txbuffer, repeat_delay)
# Re-sample back up to 48k (resampler works on np.int16)
x = np.frombuffer(txbuffer, dtype=np.int16)
self.audio_auto_tune()
x = audio.set_audio_volume(x, self.tx_audio_level)
if self.radiocontrol not in ["tci"]:
txbuffer_out = self.resampler.resample8_to_48(x)
else:
txbuffer_out = x
# transmit audio
self.transmit_audio(txbuffer_out)
self.radio.set_ptt(False)
self.event_manager.send_ptt_change(False)
self.states.setTransmitting(False)
end_of_transmission = time.time()
transmission_time = end_of_transmission - start_of_transmission
self.log.debug("[MDM] ON AIR TIME", time=transmission_time)
return True
def transmit_add_preamble(self, buffer, freedv):
# Init buffer for preample
n_tx_preamble_modem_samples = codec2.api.freedv_get_n_tx_preamble_modem_samples(
freedv
)
mod_out_preamble = ctypes.create_string_buffer(n_tx_preamble_modem_samples * 2)
# Write preamble to txbuffer
codec2.api.freedv_rawdatapreambletx(freedv, mod_out_preamble)
buffer += bytes(mod_out_preamble)
return buffer
def transmit_add_postamble(self, buffer, freedv):
# Init buffer for postamble
n_tx_postamble_modem_samples = (
codec2.api.freedv_get_n_tx_postamble_modem_samples(freedv)
)
mod_out_postamble = ctypes.create_string_buffer(
n_tx_postamble_modem_samples * 2
)
# Write postamble to txbuffer
codec2.api.freedv_rawdatapostambletx(freedv, mod_out_postamble)
# Append postamble to txbuffer
buffer += bytes(mod_out_postamble)
return buffer
def transmit_add_silence(self, buffer, duration):
data_delay = int(self.MODEM_SAMPLE_RATE * (duration / 1000)) # type: ignore
mod_out_silence = ctypes.create_string_buffer(data_delay * 2)
buffer += bytes(mod_out_silence)
return buffer
def transmit_create_frame(self, txbuffer, freedv, frame):
# Get number of bytes per frame for mode
bytes_per_frame = int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8)
payload_bytes_per_frame = bytes_per_frame - 2
# Init buffer for data
n_tx_modem_samples = codec2.api.freedv_get_n_tx_modem_samples(freedv)
mod_out = ctypes.create_string_buffer(n_tx_modem_samples * 2)
# Create buffer for data
# Use this if CRC16 checksum is required (DATAc1-3)
buffer = bytearray(payload_bytes_per_frame)
# Set buffersize to length of data which will be send
buffer[: len(frame)] = frame # type: ignore
# Create crc for data frame -
# Use the crc function shipped with codec2
# to avoid CRC algorithm incompatibilities
# Generate CRC16
crc = ctypes.c_ushort(
codec2.api.freedv_gen_crc16(bytes(buffer), payload_bytes_per_frame)
)
# Convert crc to 2-byte (16-bit) hex string
crc = crc.value.to_bytes(2, byteorder="big")
# Append CRC to data buffer
buffer += crc
assert (bytes_per_frame == len(buffer))
data = (ctypes.c_ubyte * bytes_per_frame).from_buffer_copy(buffer)
# modulate DATA and save it into mod_out pointer
codec2.api.freedv_rawdatatx(freedv, mod_out, data)
txbuffer += bytes(mod_out)
return txbuffer
def transmit_morse(self, repeats, repeat_delay, frames):
self.states.waitForTransmission()
@ -380,31 +201,54 @@ class RF:
)
start_of_transmission = time.time()
txbuffer_out = cw.MorseCodePlayer().text_to_signal("DJ2LS-1")
txbuffer_out = cw.MorseCodePlayer().text_to_signal(self.config['STATION'].mycall)
self.transmit_audio(txbuffer_out)
self.radio.set_ptt(False)
self.event_manager.send_ptt_change(False)
self.states.setTransmitting(False)
# transmit audio
self.enqueue_audio_out(txbuffer_out)
end_of_transmission = time.time()
transmission_time = end_of_transmission - start_of_transmission
self.log.debug("[MDM] ON AIR TIME", time=transmission_time)
def init_codec2(self):
# Open codec2 instances
# INIT TX MODES - here we need all modes.
self.freedv_datac0_tx = codec2.open_instance(codec2.FREEDV_MODE.datac0.value)
self.freedv_datac1_tx = codec2.open_instance(codec2.FREEDV_MODE.datac1.value)
self.freedv_datac3_tx = codec2.open_instance(codec2.FREEDV_MODE.datac3.value)
self.freedv_datac4_tx = codec2.open_instance(codec2.FREEDV_MODE.datac4.value)
self.freedv_datac13_tx = codec2.open_instance(codec2.FREEDV_MODE.datac13.value)
def transmit(
self, mode, repeats: int, repeat_delay: int, frames: bytearray
) -> bool:
self.demodulator.reset_data_sync()
# Wait for some other thread that might be transmitting
self.states.waitForTransmission()
self.states.setTransmitting(True)
# self.states.channel_busy_event.wait()
start_of_transmission = time.time()
txbuffer = self.modulator.create_burst(mode, repeats, repeat_delay, frames)
# Re-sample back up to 48k (resampler works on np.int16)
x = np.frombuffer(txbuffer, dtype=np.int16)
x = audio.set_audio_volume(x, self.tx_audio_level)
if self.radiocontrol not in ["tci"]:
txbuffer_out = self.resampler.resample8_to_48(x)
else:
txbuffer_out = x
# transmit audio
self.enqueue_audio_out(txbuffer_out)
end_of_transmission = time.time()
transmission_time = end_of_transmission - start_of_transmission
self.log.debug("[MDM] ON AIR TIME", time=transmission_time)
# Low level modem audio transmit
def transmit_audio(self, audio_48k) -> None:
def enqueue_audio_out(self, audio_48k) -> None:
self.enqueuing_audio = True
if not self.states.isTransmitting():
self.states.setTransmitting(True)
self.radio.set_ptt(True)
self.event_manager.send_ptt_change(True)
@ -413,5 +257,72 @@ class RF:
# we need to wait manually for tci processing
self.tci_module.wait_until_transmitted(audio_48k)
else:
sd.play(audio_48k, blocksize=4096, blocking=True)
# slice audio data to needed blocklength
block_size = 4800
pad_length = -len(audio_48k) % block_size
padded_data = np.pad(audio_48k, (0, pad_length), mode='constant')
sliced_audio_data = padded_data.reshape(-1, block_size)
# add each block to audio out queue
for block in sliced_audio_data:
self.audio_out_queue.put(block)
self.enqueuing_audio = False
self.states.transmitting_event.wait()
self.radio.set_ptt(False)
self.event_manager.send_ptt_change(False)
return
def sd_output_audio_callback(self, outdata: np.ndarray, frames: int, time, status) -> None:
try:
if not self.audio_out_queue.empty():
chunk = self.audio_out_queue.get_nowait()
audio.calculate_fft(chunk, self.fft_queue, self.states)
outdata[:] = chunk.reshape(outdata.shape)
else:
# reset transmitting state only, if we are not actively processing audio
# for avoiding a ptt toggle state bug
if not self.enqueuing_audio:
self.states.setTransmitting(False)
# Fill with zeros if the queue is empty
outdata.fill(0)
except Exception as e:
self.log.warning("[AUDIO STATUS]", status=status, time=time, frames=frames, e=e)
outdata.fill(0)
def sd_input_audio_callback(self, indata: np.ndarray, frames: int, time, status) -> None:
if status:
self.log.warning("[AUDIO STATUS]", status=status, time=time, frames=frames)
# FIXME on windows input overflows crashing the rx audio stream. Lets restart the server then
#if status.input_overflow:
# self.service_queue.put("restart")
return
try:
audio_48k = np.frombuffer(indata, dtype=np.int16)
audio_8k = self.resampler.resample48_to_8(audio_48k)
audio_8k_level_adjusted = audio.set_audio_volume(audio_8k, self.rx_audio_level)
if not self.states.isTransmitting():
audio.calculate_fft(audio_8k_level_adjusted, self.fft_queue, self.states)
length_audio_8k_level_adjusted = len(audio_8k_level_adjusted)
# Avoid buffer overflow by filling only if buffer for
# selected datachannel mode is not full
index = 0
for mode in self.demodulator.MODE_DICT:
mode_data = self.demodulator.MODE_DICT[mode]
audiobuffer = mode_data['audio_buffer']
decode = mode_data['decode']
index += 1
if audiobuffer:
if (audiobuffer.nbuffer + length_audio_8k_level_adjusted) > audiobuffer.size:
self.demodulator.buffer_overflow_counter[index] += 1
self.event_manager.send_buffer_overflow(self.demodulator.buffer_overflow_counter)
elif decode:
audiobuffer.push(audio_8k_level_adjusted)
except Exception as e:
self.log.warning("[AUDIO EXCEPTION]", status=status, time=time, frames=frames, e=e)

150
modem/modulator.py Normal file
View file

@ -0,0 +1,150 @@
import ctypes
import codec2
import structlog
class Modulator:
log = structlog.get_logger("RF")
def __init__(self, config):
self.config = config
self.tx_delay = config['MODEM']['tx_delay']
self.modem_sample_rate = codec2.api.FREEDV_FS_8000
# Initialize codec2, rig control, and data threads
self.init_codec2()
def init_codec2(self):
# Open codec2 instances
# INIT TX MODES - here we need all modes.
self.freedv_datac0_tx = codec2.open_instance(codec2.FREEDV_MODE.datac0.value)
self.freedv_datac1_tx = codec2.open_instance(codec2.FREEDV_MODE.datac1.value)
self.freedv_datac3_tx = codec2.open_instance(codec2.FREEDV_MODE.datac3.value)
self.freedv_datac4_tx = codec2.open_instance(codec2.FREEDV_MODE.datac4.value)
self.freedv_datac13_tx = codec2.open_instance(codec2.FREEDV_MODE.datac13.value)
def transmit_add_preamble(self, buffer, freedv):
# Init buffer for preample
n_tx_preamble_modem_samples = codec2.api.freedv_get_n_tx_preamble_modem_samples(
freedv
)
mod_out_preamble = ctypes.create_string_buffer(n_tx_preamble_modem_samples * 2)
# Write preamble to txbuffer
codec2.api.freedv_rawdatapreambletx(freedv, mod_out_preamble)
buffer += bytes(mod_out_preamble)
return buffer
def transmit_add_postamble(self, buffer, freedv):
# Init buffer for postamble
n_tx_postamble_modem_samples = (
codec2.api.freedv_get_n_tx_postamble_modem_samples(freedv)
)
mod_out_postamble = ctypes.create_string_buffer(
n_tx_postamble_modem_samples * 2
)
# Write postamble to txbuffer
codec2.api.freedv_rawdatapostambletx(freedv, mod_out_postamble)
# Append postamble to txbuffer
buffer += bytes(mod_out_postamble)
return buffer
def transmit_add_silence(self, buffer, duration):
data_delay = int(self.modem_sample_rate * (duration / 1000)) # type: ignore
mod_out_silence = ctypes.create_string_buffer(data_delay * 2)
buffer += bytes(mod_out_silence)
return buffer
def transmit_create_frame(self, txbuffer, freedv, frame):
# Get number of bytes per frame for mode
bytes_per_frame = int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8)
payload_bytes_per_frame = bytes_per_frame - 2
# Init buffer for data
n_tx_modem_samples = codec2.api.freedv_get_n_tx_modem_samples(freedv)
mod_out = ctypes.create_string_buffer(n_tx_modem_samples * 2)
# Create buffer for data
# Use this if CRC16 checksum is required (DATAc1-3)
buffer = bytearray(payload_bytes_per_frame)
# Set buffersize to length of data which will be send
buffer[: len(frame)] = frame # type: ignore
# Create crc for data frame -
# Use the crc function shipped with codec2
# to avoid CRC algorithm incompatibilities
# Generate CRC16
crc = ctypes.c_ushort(
codec2.api.freedv_gen_crc16(bytes(buffer), payload_bytes_per_frame)
)
# Convert crc to 2-byte (16-bit) hex string
crc = crc.value.to_bytes(2, byteorder="big")
# Append CRC to data buffer
buffer += crc
assert (bytes_per_frame == len(buffer))
data = (ctypes.c_ubyte * bytes_per_frame).from_buffer_copy(buffer)
# modulate DATA and save it into mod_out pointer
codec2.api.freedv_rawdatatx(freedv, mod_out, data)
txbuffer += bytes(mod_out)
return txbuffer
def create_burst(
self, mode, repeats: int, repeat_delay: int, frames: bytearray
) -> bool:
"""
Args:
mode:
repeats:
repeat_delay:
frames:
"""
# get freedv instance by mode
mode_transition = {
codec2.FREEDV_MODE.signalling: self.freedv_datac13_tx,
codec2.FREEDV_MODE.datac0: self.freedv_datac0_tx,
codec2.FREEDV_MODE.datac1: self.freedv_datac1_tx,
codec2.FREEDV_MODE.datac3: self.freedv_datac3_tx,
codec2.FREEDV_MODE.datac4: self.freedv_datac4_tx,
codec2.FREEDV_MODE.datac13: self.freedv_datac13_tx,
}
if mode in mode_transition:
freedv = mode_transition[mode]
else:
print("wrong mode.................")
print(mode)
return False
# Open codec2 instance
self.MODE = mode
self.log.debug(
"[MDM] TRANSMIT", mode=self.MODE.name, delay=self.tx_delay
)
txbuffer = bytes()
# Add empty data to handle ptt toggle time
if self.tx_delay > 0:
txbuffer = self.transmit_add_silence(txbuffer, self.tx_delay)
if not isinstance(frames, list): frames = [frames]
for _ in range(repeats):
# Create modulation for all frames in the list
for frame in frames:
txbuffer = self.transmit_add_preamble(txbuffer, freedv)
txbuffer = self.transmit_create_frame(txbuffer, freedv, frame)
txbuffer = self.transmit_add_postamble(txbuffer, freedv)
# Add delay to end of frames
txbuffer = self.transmit_add_silence(txbuffer, repeat_delay)
return txbuffer

View file

@ -1,3 +1,5 @@
import time
from flask import Flask, request, jsonify, make_response, abort, Response
from flask_sock import Sock
from flask_cors import CORS
@ -20,6 +22,8 @@ import command_test
import command_arq_raw
import command_message_send
import event_manager
import atexit
from message_system_db_manager import DatabaseManager
from message_system_db_messages import DatabaseManagerMessages
from message_system_db_attachments import DatabaseManagerAttachments
@ -29,7 +33,7 @@ from schedule_manager import ScheduleManager
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
sock = Sock(app)
MODEM_VERSION = "0.14.3-alpha"
MODEM_VERSION = "0.14.4-alpha"
# set config file to use
def set_config():
@ -320,7 +324,18 @@ def sock_fft(sock):
def sock_states(sock):
wsm.handle_connection(sock, wsm.states_client_list, app.state_queue)
@atexit.register
def stop_server():
try:
app.service_manager.stop_modem()
if app.service_manager.modem:
app.service_manager.modem.sd_input_stream.stop
audio.sd._terminate()
except Exception as e:
print("Error stopping modem")
time.sleep(1)
print("------------------------------------------")
print('Server shutdown...')
if __name__ == "__main__":
app.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 10}
@ -353,7 +368,8 @@ if __name__ == "__main__":
modemport = conf['NETWORK']['modemport']
if not modemaddress:
modemaddress = '0.0.0.0'
modemaddress = '127.0.0.1'
if not modemport:
modemport = 5000
app.run(modemaddress, modemport)