From 6da24539122e58513cebf6e9ec9106ebba065f62 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Mon, 27 Mar 2023 10:35:42 +0200 Subject: [PATCH 01/28] check for tx chrono --- tnc/tci.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tnc/tci.py b/tnc/tci.py index d1ee0699..029aac48 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -32,6 +32,11 @@ class TCI: ) tci_thread.start() + # flag if we're receiving a tx_chrono + self.tx_chrono = False + + + def connect(self): self.log.info( "[TCI] Starting TCI thread!", ip=self.hostname, port=self.port @@ -49,13 +54,23 @@ class TCI: #rel.dispatch() def on_message(self, ws, message): + + # ready message + # we need to wait until radio is ready before we can push commands if message == "ready;": self.ws.send('audio_samplerate:8000;') self.ws.send('audio_stream_channels:1;') - self.ws.send('AUDIO_STREAM_SAMPLE_TYPE:int16;') - self.ws.send('AUDIO_STREAM_SAMPLES:1200;') + self.ws.send('audio_stream_sample_type:int16;') + self.ws.send('audio_stream_samples:1200;') self.ws.send('audio_start:0;') + # tx chrono frame + if len(message) in {64}: + type = int.from_bytes(message[24:28], "little") + if type == 3: + self.tx_chrono = True + + # audio frame if len(message) in {576, 2464, 4160}: # audio received receiver = message[:4] From 94772a72710175dfb931be8eb1afae8577421516 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Mon, 27 Mar 2023 10:48:14 +0200 Subject: [PATCH 02/28] add tx frame format - broken --- tnc/modem.py | 4 ++-- tnc/tci.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index d8338d47..cd10eb7d 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -335,8 +335,8 @@ class RF: # -----write if len(self.modoutqueue) > 0 and not self.mod_out_locked: - data_out48k = self.modoutqueue.popleft() - self.tci_module.push_audio(data_out48k) + data_out = self.modoutqueue.popleft() + self.tci_module.push_audio(data_out) def tci_rx_callback(self) -> None: """ diff --git a/tnc/tci.py b/tnc/tci.py index 029aac48..605f4c8e 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -103,6 +103,7 @@ class TCI: ) def on_open(self, ws): + self.ws = ws self.log.info( "[TCI] Connected FreeDATA to TCI rig!", ip=self.hostname, port=self.port ) @@ -111,3 +112,32 @@ class TCI: self.log.info( "[TCI] Init...", ip=self.hostname, port=self.port ) + + def push_audio(self, data_out): + + audio = bytearray(4096 + 64) + """ + # audio[:4] = receiver.to_bytes(4,byteorder='little', signed=False) + audio[4:8] = sample_rate.to_bytes(4, byteorder='little', signed=False) + audio[8:12] = format.to_bytes(4, byteorder='little', signed=False) + audio[12:16] = codec.to_bytes(4, byteorder='little', signed=False) + audio[16:20] = crc.to_bytes(4, byteorder='little', signed=False) + audio[20:24] = audio_length.to_bytes(4, byteorder='little', signed=False) + audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=True) + audio[28:32] = channel.to_bytes(4, byteorder='little', signed=False) + audio[32:36] = reserved1.to_bytes(4, byteorder='little', signed=False) + audio[36:40] = reserved2.to_bytes(4, byteorder='little', signed=False) + audio[40:44] = reserved3.to_bytes(4, byteorder='little', signed=False) + audio[44:48] = reserved4.to_bytes(4, byteorder='little', signed=False) + audio[48:52] = reserved5.to_bytes(4, byteorder='little', signed=False) + audio[52:56] = reserved6.to_bytes(4, byteorder='little', signed=False) + audio[56:60] = reserved7.to_bytes(4, byteorder='little', signed=False) + audio[60:64] = reserved8.to_bytes(4, byteorder='little', signed=False) + """ + self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) + + def set_ptt(self, state): + if state: + self.ws.send('trx:0,true,tci;') + else: + self.ws.send('trx:0,false;') \ No newline at end of file From 4d86315b307bc45494b22b9fdc09a774c366baec Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 07:47:31 +0200 Subject: [PATCH 03/28] changed hamlib naming to radio --- tnc/modem.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index cd10eb7d..a443dfe6 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -254,11 +254,13 @@ class RF: sys.exit(1) elif static.HAMLIB_RADIOCONTROL == "rigctld": import rigctld as rig + elif static.AUDIO_ENABLE_TCI: + rig = self.tci_module else: import rigdummy as rig - self.hamlib = rig.radio() - self.hamlib.open_rig( + self.radio = rig.radio() + self.radio.open_rig( rigctld_ip=static.HAMLIB_RIGCTLD_IP, rigctld_port=static.HAMLIB_RIGCTLD_PORT, ) @@ -469,7 +471,7 @@ class RF: if not static.PTT_STATE: # TODO: Moved to this place for testing # Maybe we can avoid moments of silence before transmitting - static.PTT_STATE = self.hamlib.set_ptt(True) + static.PTT_STATE = self.radio.set_ptt(True) jsondata = {"ptt": "True"} data_out = json.dumps(jsondata) sock.SOCKET_QUEUE.put(data_out) @@ -528,7 +530,7 @@ class RF: start_of_transmission = time.time() # TODO: Moved ptt toggle some steps before audio is ready for testing # Toggle ptt early to save some time and send ptt state via socket - # static.PTT_STATE = self.hamlib.set_ptt(True) + # static.PTT_STATE = self.radio.set_ptt(True) # jsondata = {"ptt": "True"} # data_out = json.dumps(jsondata) # sock.SOCKET_QUEUE.put(data_out) @@ -681,7 +683,7 @@ class RF: # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False - static.PTT_STATE = self.hamlib.set_ptt(False) + static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream jsondata = {"ptt": "False"} @@ -1051,10 +1053,10 @@ class RF: cmd = RIGCTLD_COMMAND_QUEUE.get() if cmd[0] == "set_frequency": # [1] = Frequency - self.hamlib.set_frequency(cmd[1]) + self.radio.set_frequency(cmd[1]) if cmd[0] == "set_mode": # [1] = Mode - self.hamlib.set_mode(cmd[1]) + self.radio.set_mode(cmd[1]) def update_rig_data(self) -> None: """ @@ -1067,20 +1069,20 @@ class RF: while True: # this looks weird, but is necessary for avoiding rigctld packet colission sock threading.Event().wait(0.25) - static.HAMLIB_FREQUENCY = self.hamlib.get_frequency() + static.HAMLIB_FREQUENCY = self.radio.get_frequency() threading.Event().wait(0.1) - static.HAMLIB_MODE = self.hamlib.get_mode() + static.HAMLIB_MODE = self.radio.get_mode() threading.Event().wait(0.1) - static.HAMLIB_BANDWIDTH = self.hamlib.get_bandwidth() + static.HAMLIB_BANDWIDTH = self.radio.get_bandwidth() threading.Event().wait(0.1) - static.HAMLIB_STATUS = self.hamlib.get_status() + static.HAMLIB_STATUS = self.radio.get_status() threading.Event().wait(0.1) if static.TRANSMITTING: - static.HAMLIB_ALC = self.hamlib.get_alc() + static.HAMLIB_ALC = self.radio.get_alc() threading.Event().wait(0.1) - #static.HAMLIB_RF = self.hamlib.get_level() + #static.HAMLIB_RF = self.radio.get_level() #threading.Event().wait(0.1) - static.HAMLIB_STRENGTH = self.hamlib.get_strength() + static.HAMLIB_STRENGTH = self.radio.get_strength() #print(f"ALC: {static.HAMLIB_ALC}, RF: {static.HAMLIB_RF}, STRENGTH: {static.HAMLIB_STRENGTH}") From 790068d2d13cb6ae55f8c90b571c6010d5fc0c24 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 07:50:05 +0200 Subject: [PATCH 04/28] added needed functions to tci from rigdummy --- tnc/tci.py | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/tnc/tci.py b/tnc/tci.py index 605f4c8e..849e696c 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -140,4 +140,77 @@ class TCI: if state: self.ws.send('trx:0,true,tci;') else: - self.ws.send('trx:0,false;') \ No newline at end of file + self.ws.send('trx:0,false;') + + def get_frequency(self): + """ """ + return None + + def get_mode(self): + """ """ + return None + + def get_level(self): + """ """ + return None + + def get_alc(self): + """ """ + return None + + def get_meter(self): + """ """ + return None + + def get_bandwidth(self): + """ """ + return None + + def get_strength(self): + """ """ + return None + + def set_bandwidth(self): + """ """ + return None + + def set_mode(self, mode): + """ + + Args: + mode: + + Returns: + + """ + return None + + def set_frequency(self, frequency): + """ + + Args: + frequency: + + Returns: + + """ + return None + + def get_status(self): + """ + + Args: + mode: + + Returns: + + """ + return "connected" + + def get_ptt(self): + """ """ + return None + + def close_rig(self): + """ """ + return From d0f6038dbc8cfe5735836a0a938205b66e88393d Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:22:12 +0200 Subject: [PATCH 05/28] preparing for small test with dummy data --- tnc/modem.py | 2 -- tnc/tci.py | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index a443dfe6..e696680a 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -334,8 +334,6 @@ class RF: while True: threading.Event().wait(0.01) - - # -----write if len(self.modoutqueue) > 0 and not self.mod_out_locked: data_out = self.modoutqueue.popleft() self.tci_module.push_audio(data_out) diff --git a/tnc/tci.py b/tnc/tci.py index 849e696c..424c8f3f 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -4,6 +4,7 @@ import structlog import threading import websocket +import numpy as np from queues import AUDIO_TRANSMIT_QUEUE, AUDIO_RECEIVED_QUEUE """ @@ -35,6 +36,11 @@ class TCI: # flag if we're receiving a tx_chrono self.tx_chrono = False + # audio related parameters, will be updated by tx chrono + self.sample_rate = None + self.format = None + self.codec = None + self.audio_length = None def connect(self): @@ -66,10 +72,30 @@ class TCI: # tx chrono frame if len(message) in {64}: + receiver = message[:4] + sample_rate = int.from_bytes(message[4:8], "little") + format = int.from_bytes(message[8:12], "little") + codec = int.from_bytes(message[12:16], "little") + crc = int.from_bytes(message[16:20], "little") + audio_length = int.from_bytes(message[20:24], "little") type = int.from_bytes(message[24:28], "little") + channel = int.from_bytes(message[28:32], "little") + reserved1 = int.from_bytes(message[32:36], "little") + reserved2 = int.from_bytes(message[36:40], "little") + reserved3 = int.from_bytes(message[40:44], "little") + reserved4 = int.from_bytes(message[44:48], "little") + reserved5 = int.from_bytes(message[48:52], "little") + reserved6 = int.from_bytes(message[52:56], "little") + reserved7 = int.from_bytes(message[56:60], "little") + reserved8 = int.from_bytes(message[60:64], "little") if type == 3: self.tx_chrono = True + self.sample_rate = sample_rate + self.format = format + self.codec = codec + self.audio_length = audio_length + # audio frame if len(message) in {576, 2464, 4160}: # audio received @@ -114,8 +140,8 @@ class TCI: ) def push_audio(self, data_out): + print(data_out) - audio = bytearray(4096 + 64) """ # audio[:4] = receiver.to_bytes(4,byteorder='little', signed=False) audio[4:8] = sample_rate.to_bytes(4, byteorder='little', signed=False) @@ -134,7 +160,30 @@ class TCI: audio[56:60] = reserved7.to_bytes(4, byteorder='little', signed=False) audio[60:64] = reserved8.to_bytes(4, byteorder='little', signed=False) """ - self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) + + print(self.audio_length) + print(self.tx_chrono) + print(self.format) + print(self.codec) + + if self.tx_chrono: + # dummy for now ... + audio = bytearray(4096 + 64) + + # generate sine wave + rate = 8000 # samples per second + T = 3 # sample duration (seconds) + # n = int(rate*T) # number of samples + n = 1200 + t = np.arange(n) / rate # grid of time values + + f = 440.0 # sound frequency (Hz) + x = np.sin(2 * np.pi * f * t) + + # print(len(x)) + audio[64:] = bytes(x) + audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=True) + self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) def set_ptt(self, state): if state: From 5d6fcccbb9246602f594af669f9b13c0e96c0368 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:24:57 +0200 Subject: [PATCH 06/28] fixing hamlib vs tci --- tnc/modem.py | 11 ++++++----- tnc/tci.py | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index e696680a..3c387fd0 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -259,11 +259,12 @@ class RF: else: import rigdummy as rig - self.radio = rig.radio() - self.radio.open_rig( - rigctld_ip=static.HAMLIB_RIGCTLD_IP, - rigctld_port=static.HAMLIB_RIGCTLD_PORT, - ) + if not static.AUDIO_ENABLE_TCI: + self.radio = rig.radio() + self.radio.open_rig( + rigctld_ip=static.HAMLIB_RIGCTLD_IP, + rigctld_port=static.HAMLIB_RIGCTLD_PORT, + ) # --------------------------------------------START DECODER THREAD if static.ENABLE_FFT: diff --git a/tnc/tci.py b/tnc/tci.py index 424c8f3f..59530bfb 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -42,7 +42,6 @@ class TCI: self.codec = None self.audio_length = None - def connect(self): self.log.info( "[TCI] Starting TCI thread!", ip=self.hostname, port=self.port From 66c36c9e39861864690dbe639dd46e28db7cb1ba Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:37:43 +0200 Subject: [PATCH 07/28] fixing hamlib vs tci --- tnc/modem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tnc/modem.py b/tnc/modem.py index 3c387fd0..5f4cbd3b 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -255,7 +255,7 @@ class RF: elif static.HAMLIB_RADIOCONTROL == "rigctld": import rigctld as rig elif static.AUDIO_ENABLE_TCI: - rig = self.tci_module + self.radio = self.tci_module else: import rigdummy as rig From 7ee409c4ac94d6dfccca68a61fa99a15d575383e Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:41:18 +0200 Subject: [PATCH 08/28] adjusted ptt trigger for tci --- tnc/modem.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tnc/modem.py b/tnc/modem.py index 5f4cbd3b..59b3e0b6 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -335,7 +335,12 @@ class RF: while True: threading.Event().wait(0.01) - if len(self.modoutqueue) > 0 and not self.mod_out_locked: + if len(self.modoutqustatic.PTT_STATEeue) > 0 and not self.mod_out_locked: + = self.radio.set_ptt(True) + jsondata = {"ptt": "True"} + data_out = json.dumps(jsondata) + sock.SOCKET_QUEUE.put(data_out) + data_out = self.modoutqueue.popleft() self.tci_module.push_audio(data_out) From a8e4e1ee2f74c0cad239399d7d6d189dcbb45dbc Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:45:23 +0200 Subject: [PATCH 09/28] adjusted ptt trigger for tci --- tnc/modem.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index 59b3e0b6..9e3438c8 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -335,8 +335,8 @@ class RF: while True: threading.Event().wait(0.01) - if len(self.modoutqustatic.PTT_STATEeue) > 0 and not self.mod_out_locked: - = self.radio.set_ptt(True) + if len(self.modoutqueue) > 0 and not self.mod_out_locked: + static.PTT_STATE = self.radio.set_ptt(True) jsondata = {"ptt": "True"} data_out = json.dumps(jsondata) sock.SOCKET_QUEUE.put(data_out) From 263b23f916b50499ad2629ca4c30885c7aa54496 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 11:21:58 +0200 Subject: [PATCH 10/28] fixed TX - not working yet because of too early interruption of transmission --- tnc/modem.py | 6 +++++- tnc/tci.py | 58 +++++++++++++++++++++++++++++++++++----------------- 2 files changed, 44 insertions(+), 20 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index 9e3438c8..661821a9 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -62,9 +62,13 @@ class RF: self.AUDIO_SAMPLE_RATE_RX = 48000 self.AUDIO_SAMPLE_RATE_TX = 48000 self.MODEM_SAMPLE_RATE = codec2.api.FREEDV_FS_8000 + self.AUDIO_FRAMES_PER_BUFFER_RX = 2400 * 2 # 8192 # 8192 Let's do some tests with very small chunks for TX - self.AUDIO_FRAMES_PER_BUFFER_TX = 2400 * 2 + if not static.AUDIO_ENABLE_TCI: + self.AUDIO_FRAMES_PER_BUFFER_TX = 2400 * 2 + else: + self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 # 8 * (self.AUDIO_SAMPLE_RATE_RX/self.MODEM_SAMPLE_RATE) == 48 self.AUDIO_CHANNELS = 1 diff --git a/tnc/tci.py b/tnc/tci.py index 59530bfb..2b88cccf 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -5,6 +5,7 @@ import structlog import threading import websocket import numpy as np +import time from queues import AUDIO_TRANSMIT_QUEUE, AUDIO_RECEIVED_QUEUE """ @@ -13,6 +14,7 @@ trx:0,false; """ + class TCI: def __init__(self, hostname='127.0.0.1', port=50001): # websocket.enableTrace(True) @@ -41,6 +43,8 @@ class TCI: self.format = None self.codec = None self.audio_length = None + self.crc = None + self.channel = None def connect(self): self.log.info( @@ -55,8 +59,8 @@ class TCI: ) self.ws.run_forever(reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if con> - #rel.signal(2, rel.abort) # Keyboard Interrupt - #rel.dispatch() + # rel.signal(2, rel.abort) # Keyboard Interrupt + # rel.dispatch() def on_message(self, ws, message): @@ -94,6 +98,8 @@ class TCI: self.format = format self.codec = codec self.audio_length = audio_length + self.channel = channel + self.crc = crc # audio frame if len(message) in {576, 2464, 4160}: @@ -124,7 +130,8 @@ class TCI: def on_close(self, ws, close_status_code, close_msg): self.log.warning( - "[TCI] Closed FreeDATA to TCI connection!", ip=self.hostname, port=self.port, statu=close_status_code, msg=close_msg + "[TCI] Closed FreeDATA to TCI connection!", ip=self.hostname, port=self.port, statu=close_status_code, + msg=close_msg ) def on_open(self, ws): @@ -133,7 +140,6 @@ class TCI: "[TCI] Connected FreeDATA to TCI rig!", ip=self.hostname, port=self.port ) - self.log.info( "[TCI] Init...", ip=self.hostname, port=self.port ) @@ -160,35 +166,49 @@ class TCI: audio[60:64] = reserved8.to_bytes(4, byteorder='little', signed=False) """ + while not self.tx_chrono: + time.sleep(0.01) + + print(len(data_out)) + print(self.sample_rate) print(self.audio_length) - print(self.tx_chrono) - print(self.format) + print(self.channel) + print(self.crc) print(self.codec) + print(self.tx_chrono) if self.tx_chrono: - # dummy for now ... + print("#############") + print(len(data_out)) + print(len(bytes(data_out))) + print("-------------") audio = bytearray(4096 + 64) - # generate sine wave - rate = 8000 # samples per second - T = 3 # sample duration (seconds) - # n = int(rate*T) # number of samples - n = 1200 - t = np.arange(n) / rate # grid of time values + audio[64:64 + len(bytes(data_out))] = bytes(data_out) + audio[4:8] = self.sample_rate.to_bytes(4, byteorder='little', signed=False) + # audio[8:12] = format.to_bytes(4,byteorder='little', signed=False) + audio[12:16] = self.codec.to_bytes(4, byteorder='little', signed=False) + audio[16:20] = self.crc.to_bytes(4, byteorder='little', signed=False) + audio[20:24] = self.audio_length.to_bytes(4, byteorder='little', signed=False) + audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=False) + audio[28:32] = self.channel.to_bytes(4, byteorder='little', signed=False) + # audio[32:36] = reserved1.to_bytes(4,byteorder='little', signed=False) + # audio[36:40] = reserved2.to_bytes(4,byteorder='little', signed=False) + # audio[40:44] = reserved3.to_bytes(4,byteorder='little', signed=False) + # audio[44:48] = reserved4.to_bytes(4,byteorder='little', signed=False) + # audio[48:52] = reserved5.to_bytes(4,byteorder='little', signed=False) + # audio[52:56] = reserved6.to_bytes(4,byteorder='little', signed=False) + # audio[56:60] = reserved7.to_bytes(4,byteorder='little', signed=False) - f = 440.0 # sound frequency (Hz) - x = np.sin(2 * np.pi * f * t) - - # print(len(x)) - audio[64:] = bytes(x) - audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=True) self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) def set_ptt(self, state): if state: self.ws.send('trx:0,true,tci;') else: + self.ws.send('trx:0,false;') + self.tx_chrono = False def get_frequency(self): """ """ From d8aba44f4e7b95771baf040a49d53c6bd98ddc86 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 12:01:36 +0200 Subject: [PATCH 11/28] wait manually as workaroung for missing information from radio --- tnc/tci.py | 1485 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 1257 insertions(+), 228 deletions(-) diff --git a/tnc/tci.py b/tnc/tci.py index 2b88cccf..06dded35 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -1,284 +1,1313 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Dec 23 07:04:24 2020 +@author: DJ2LS +""" -import structlog +# pylint: disable=invalid-name, line-too-long, c-extension-no-member +# pylint: disable=import-outside-toplevel + +import atexit +import ctypes +import os +import sys import threading -import websocket -import numpy as np import time -from queues import AUDIO_TRANSMIT_QUEUE, AUDIO_RECEIVED_QUEUE +from collections import deque +import wave +import codec2 +import itertools +import numpy as np +import sock +import sounddevice as sd +import static +import structlog +import ujson as json +import tci +from queues import DATA_QUEUE_RECEIVED, MODEM_RECEIVED_QUEUE, MODEM_TRANSMIT_QUEUE, RIGCTLD_COMMAND_QUEUE, AUDIO_RECEIVED_QUEUE, AUDIO_TRANSMIT_QUEUE -""" -trx:0,true; -trx:0,false; +TESTMODE = False +RXCHANNEL = "" +TXCHANNEL = "" -""" +static.TRANSMITTING = False + +# Receive only specific modes to reduce CPU load +RECEIVE_SIG0 = True +RECEIVE_SIG1 = False +RECEIVE_DATAC1 = False +RECEIVE_DATAC3 = False -class TCI: - def __init__(self, hostname='127.0.0.1', port=50001): - # websocket.enableTrace(True) - self.log = structlog.get_logger("TCI") +# state buffer +SIG0_DATAC0_STATE = [] +SIG1_DATAC0_STATE = [] +DAT0_DATAC1_STATE = [] +DAT0_DATAC3_STATE = [] +FSK_LDPC0_STATE = [] +FSK_LDPC1_STATE = [] + +class RF: + """Class to encapsulate interactions between the audio device and codec2""" + + log = structlog.get_logger("RF") + + def __init__(self) -> None: + """ """ + self.sampler_avg = 0 + self.buffer_avg = 0 + + self.AUDIO_SAMPLE_RATE_RX = 48000 + self.AUDIO_SAMPLE_RATE_TX = 48000 + self.MODEM_SAMPLE_RATE = codec2.api.FREEDV_FS_8000 + + self.AUDIO_FRAMES_PER_BUFFER_RX = 2400 * 2 # 8192 + # 8192 Let's do some tests with very small chunks for TX + if not static.AUDIO_ENABLE_TCI: + self.AUDIO_FRAMES_PER_BUFFER_TX = 2400 * 2 + else: + self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 + + # 8 * (self.AUDIO_SAMPLE_RATE_RX/self.MODEM_SAMPLE_RATE) == 48 + self.AUDIO_CHANNELS = 1 + self.MODE = 0 + + # Locking state for mod out so buffer will be filled before we can use it + # https://github.com/DJ2LS/FreeDATA/issues/127 + # https://github.com/DJ2LS/FreeDATA/issues/99 + self.mod_out_locked = True + + # Make sure our resampler will work + assert (self.AUDIO_SAMPLE_RATE_RX / self.MODEM_SAMPLE_RATE) == codec2.api.FDMDV_OS_48 # type: ignore + + # Small hack for initializing codec2 via codec2.py module + # TODO: Need to change the entire modem module to integrate codec2 module + self.c_lib = codec2.api + self.resampler = codec2.resampler() + + self.modem_transmit_queue = MODEM_TRANSMIT_QUEUE + self.modem_received_queue = MODEM_RECEIVED_QUEUE self.audio_received_queue = AUDIO_RECEIVED_QUEUE self.audio_transmit_queue = AUDIO_TRANSMIT_QUEUE - self.hostname = str(hostname) - self.port = str(port) - self.ws = '' + # Init FIFO queue to store modulation out in + self.modoutqueue = deque() - tci_thread = threading.Thread( - target=self.connect, - name="TCI THREAD", - daemon=True, - ) - tci_thread.start() + # Define fft_data buffer + self.fft_data = bytes() - # flag if we're receiving a tx_chrono - self.tx_chrono = False + # Open codec2 instances - # audio related parameters, will be updated by tx chrono - self.sample_rate = None - self.format = None - self.codec = None - self.audio_length = None - self.crc = None - self.channel = None + # DATAC0 + # SIGNALLING MODE 0 - Used for Connecting - Payload 14 Bytes + self.sig0_datac0_freedv, \ + self.sig0_datac0_bytes_per_frame, \ + self.sig0_datac0_bytes_out, \ + self.sig0_datac0_buffer, \ + self.sig0_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) - def connect(self): - self.log.info( - "[TCI] Starting TCI thread!", ip=self.hostname, port=self.port - ) - self.ws = websocket.WebSocketApp( - f"ws://{self.hostname}:{self.port}", - on_open=self.on_open, - on_message=self.on_message, - on_error=self.on_error, - on_close=self.on_close, - ) + # DATAC0 + # SIGNALLING MODE 1 - Used for ACK/NACK - Payload 5 Bytes + self.sig1_datac0_freedv, \ + self.sig1_datac0_bytes_per_frame, \ + self.sig1_datac0_bytes_out, \ + self.sig1_datac0_buffer, \ + self.sig1_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) - self.ws.run_forever(reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if con> - # rel.signal(2, rel.abort) # Keyboard Interrupt - # rel.dispatch() + # DATAC1 + self.dat0_datac1_freedv, \ + self.dat0_datac1_bytes_per_frame, \ + self.dat0_datac1_bytes_out, \ + self.dat0_datac1_buffer, \ + self.dat0_datac1_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) - def on_message(self, ws, message): + # DATAC3 + self.dat0_datac3_freedv, \ + self.dat0_datac3_bytes_per_frame, \ + self.dat0_datac3_bytes_out, \ + self.dat0_datac3_buffer, \ + self.dat0_datac3_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) - # ready message - # we need to wait until radio is ready before we can push commands - if message == "ready;": - self.ws.send('audio_samplerate:8000;') - self.ws.send('audio_stream_channels:1;') - self.ws.send('audio_stream_sample_type:int16;') - self.ws.send('audio_stream_samples:1200;') - self.ws.send('audio_start:0;') + # FSK LDPC - 0 + self.fsk_ldpc_freedv_0, \ + self.fsk_ldpc_bytes_per_frame_0, \ + self.fsk_ldpc_bytes_out_0, \ + self.fsk_ldpc_buffer_0, \ + self.fsk_ldpc_nin_0 = \ + self.init_codec2_mode( + codec2.api.FREEDV_MODE_FSK_LDPC, + codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV + ) - # tx chrono frame - if len(message) in {64}: - receiver = message[:4] - sample_rate = int.from_bytes(message[4:8], "little") - format = int.from_bytes(message[8:12], "little") - codec = int.from_bytes(message[12:16], "little") - crc = int.from_bytes(message[16:20], "little") - audio_length = int.from_bytes(message[20:24], "little") - type = int.from_bytes(message[24:28], "little") - channel = int.from_bytes(message[28:32], "little") - reserved1 = int.from_bytes(message[32:36], "little") - reserved2 = int.from_bytes(message[36:40], "little") - reserved3 = int.from_bytes(message[40:44], "little") - reserved4 = int.from_bytes(message[44:48], "little") - reserved5 = int.from_bytes(message[48:52], "little") - reserved6 = int.from_bytes(message[52:56], "little") - reserved7 = int.from_bytes(message[56:60], "little") - reserved8 = int.from_bytes(message[60:64], "little") - if type == 3: - self.tx_chrono = True + # FSK LDPC - 1 + self.fsk_ldpc_freedv_1, \ + self.fsk_ldpc_bytes_per_frame_1, \ + self.fsk_ldpc_bytes_out_1, \ + self.fsk_ldpc_buffer_1, \ + self.fsk_ldpc_nin_1 = \ + self.init_codec2_mode( + codec2.api.FREEDV_MODE_FSK_LDPC, + codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV + ) - self.sample_rate = sample_rate - self.format = format - self.codec = codec - self.audio_length = audio_length - self.channel = channel - self.crc = crc + # INIT TX MODES + self.freedv_datac0_tx = open_codec2_instance(14) + self.freedv_datac1_tx = open_codec2_instance(10) + self.freedv_datac3_tx = open_codec2_instance(12) + self.freedv_ldpc0_tx = open_codec2_instance(200) + self.freedv_ldpc1_tx = open_codec2_instance(201) + # --------------------------------------------CREATE PYAUDIO INSTANCE + if not TESTMODE and not static.AUDIO_ENABLE_TCI: + try: + self.stream = sd.RawStream( + channels=1, + dtype="int16", + callback=self.callback, + device=(static.AUDIO_INPUT_DEVICE, static.AUDIO_OUTPUT_DEVICE), + samplerate=self.AUDIO_SAMPLE_RATE_RX, + blocksize=4800, + ) + atexit.register(self.stream.stop) + self.log.info("[MDM] init: opened audio devices") + except Exception as err: + self.log.error("[MDM] init: can't open audio device. Exit", e=err) + sys.exit(1) - # audio frame - if len(message) in {576, 2464, 4160}: - # audio received - receiver = message[:4] - sample_rate = int.from_bytes(message[4:8], "little") - format = int.from_bytes(message[8:12], "little") - codec = int.from_bytes(message[12:16], "little") - crc = int.from_bytes(message[16:20], "little") - audio_length = int.from_bytes(message[20:24], "little") - type = int.from_bytes(message[24:28], "little") - channel = int.from_bytes(message[28:32], "little") - reserved1 = int.from_bytes(message[32:36], "little") - reserved2 = int.from_bytes(message[36:40], "little") - reserved3 = int.from_bytes(message[40:44], "little") - reserved4 = int.from_bytes(message[44:48], "little") - reserved5 = int.from_bytes(message[48:52], "little") - reserved6 = int.from_bytes(message[52:56], "little") - reserved7 = int.from_bytes(message[56:60], "little") - reserved8 = int.from_bytes(message[60:64], "little") - audio_data = message[64:] - self.audio_received_queue.put(audio_data) + try: + self.log.debug("[MDM] init: starting pyaudio callback") + # self.audio_stream.start_stream() + self.stream.start() + except Exception as err: + self.log.error("[MDM] init: starting pyaudio callback failed", e=err) - def on_error(self, error): - self.log.error( - "[TCI] Error FreeDATA to TCI rig!", ip=self.hostname, port=self.port, e=error - ) + elif not TESTMODE: + # placeholder area for processing audio via TCI + # https://github.com/maksimus1210/TCI + self.log.warning("[MDM] [TCI] Not yet fully implemented", ip=static.TCI_IP, port=static.TCI_PORT) + # we are trying this by simulating an audio stream Object like with mkfifo + class Object: + """An object for simulating audio stream""" + active = True + self.stream = Object() - def on_close(self, ws, close_status_code, close_msg): - self.log.warning( - "[TCI] Closed FreeDATA to TCI connection!", ip=self.hostname, port=self.port, statu=close_status_code, - msg=close_msg - ) + # lets init TCI module + self.tci_module = tci.TCI() - def on_open(self, ws): - self.ws = ws - self.log.info( - "[TCI] Connected FreeDATA to TCI rig!", ip=self.hostname, port=self.port - ) + tci_rx_callback_thread = threading.Thread( + target=self.tci_rx_callback, + name="TCI RX CALLBACK THREAD", + daemon=True, + ) + tci_rx_callback_thread.start() - self.log.info( - "[TCI] Init...", ip=self.hostname, port=self.port - ) + # let's start the audio tx callback + self.log.debug("[MDM] Starting tci tx callback thread") + tci_tx_callback_thread = threading.Thread( + target=self.tci_tx_callback, + name="TCI TX CALLBACK THREAD", + daemon=True, + ) + tci_tx_callback_thread.start() - def push_audio(self, data_out): - print(data_out) - - """ - # audio[:4] = receiver.to_bytes(4,byteorder='little', signed=False) - audio[4:8] = sample_rate.to_bytes(4, byteorder='little', signed=False) - audio[8:12] = format.to_bytes(4, byteorder='little', signed=False) - audio[12:16] = codec.to_bytes(4, byteorder='little', signed=False) - audio[16:20] = crc.to_bytes(4, byteorder='little', signed=False) - audio[20:24] = audio_length.to_bytes(4, byteorder='little', signed=False) - audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=True) - audio[28:32] = channel.to_bytes(4, byteorder='little', signed=False) - audio[32:36] = reserved1.to_bytes(4, byteorder='little', signed=False) - audio[36:40] = reserved2.to_bytes(4, byteorder='little', signed=False) - audio[40:44] = reserved3.to_bytes(4, byteorder='little', signed=False) - audio[44:48] = reserved4.to_bytes(4, byteorder='little', signed=False) - audio[48:52] = reserved5.to_bytes(4, byteorder='little', signed=False) - audio[52:56] = reserved6.to_bytes(4, byteorder='little', signed=False) - audio[56:60] = reserved7.to_bytes(4, byteorder='little', signed=False) - audio[60:64] = reserved8.to_bytes(4, byteorder='little', signed=False) - """ - - while not self.tx_chrono: - time.sleep(0.01) - - print(len(data_out)) - print(self.sample_rate) - print(self.audio_length) - print(self.channel) - print(self.crc) - print(self.codec) - print(self.tx_chrono) - - if self.tx_chrono: - print("#############") - print(len(data_out)) - print(len(bytes(data_out))) - print("-------------") - audio = bytearray(4096 + 64) - - audio[64:64 + len(bytes(data_out))] = bytes(data_out) - audio[4:8] = self.sample_rate.to_bytes(4, byteorder='little', signed=False) - # audio[8:12] = format.to_bytes(4,byteorder='little', signed=False) - audio[12:16] = self.codec.to_bytes(4, byteorder='little', signed=False) - audio[16:20] = self.crc.to_bytes(4, byteorder='little', signed=False) - audio[20:24] = self.audio_length.to_bytes(4, byteorder='little', signed=False) - audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=False) - audio[28:32] = self.channel.to_bytes(4, byteorder='little', signed=False) - # audio[32:36] = reserved1.to_bytes(4,byteorder='little', signed=False) - # audio[36:40] = reserved2.to_bytes(4,byteorder='little', signed=False) - # audio[40:44] = reserved3.to_bytes(4,byteorder='little', signed=False) - # audio[44:48] = reserved4.to_bytes(4,byteorder='little', signed=False) - # audio[48:52] = reserved5.to_bytes(4,byteorder='little', signed=False) - # audio[52:56] = reserved6.to_bytes(4,byteorder='little', signed=False) - # audio[56:60] = reserved7.to_bytes(4,byteorder='little', signed=False) - - self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) - - def set_ptt(self, state): - if state: - self.ws.send('trx:0,true,tci;') else: - self.ws.send('trx:0,false;') - self.tx_chrono = False + class Object: + """An object for simulating audio stream""" + active = True - def get_frequency(self): - """ """ - return None + self.stream = Object() - def get_mode(self): - """ """ - return None + # Create mkfifo buffers + try: + os.mkfifo(RXCHANNEL) + os.mkfifo(TXCHANNEL) + except Exception as err: + self.log.info(f"[MDM] init:mkfifo: Exception: {err}") - def get_level(self): - """ """ - return None + mkfifo_write_callback_thread = threading.Thread( + target=self.mkfifo_write_callback, + name="MKFIFO WRITE CALLBACK THREAD", + daemon=True, + ) + mkfifo_write_callback_thread.start() - def get_alc(self): - """ """ - return None + self.log.debug("[MDM] Starting mkfifo_read_callback") + mkfifo_read_callback_thread = threading.Thread( + target=self.mkfifo_read_callback, + name="MKFIFO READ CALLBACK THREAD", + daemon=True, + ) + mkfifo_read_callback_thread.start() - def get_meter(self): - """ """ - return None + # --------------------------------------------INIT AND OPEN HAMLIB + # Check how we want to control the radio + # TODO: deprecated feature - we can remove this possibly + if static.HAMLIB_RADIOCONTROL == "direct": + print("direct hamlib support deprecated - not usable anymore") + sys.exit(1) + elif static.HAMLIB_RADIOCONTROL == "rigctl": + print("rigctl support deprecated - not usable anymore") + sys.exit(1) + elif static.HAMLIB_RADIOCONTROL == "rigctld": + import rigctld as rig + elif static.AUDIO_ENABLE_TCI: + self.radio = self.tci_module + else: + import rigdummy as rig - def get_bandwidth(self): - """ """ - return None + if not static.AUDIO_ENABLE_TCI: + self.radio = rig.radio() + self.radio.open_rig( + rigctld_ip=static.HAMLIB_RIGCTLD_IP, + rigctld_port=static.HAMLIB_RIGCTLD_PORT, + ) - def get_strength(self): - """ """ - return None + # --------------------------------------------START DECODER THREAD + if static.ENABLE_FFT: + fft_thread = threading.Thread( + target=self.calculate_fft, name="FFT_THREAD", daemon=True + ) + fft_thread.start() - def set_bandwidth(self): - """ """ - return None + if static.ENABLE_FSK: + audio_thread_fsk_ldpc0 = threading.Thread( + target=self.audio_fsk_ldpc_0, name="AUDIO_THREAD FSK LDPC0", daemon=True + ) + audio_thread_fsk_ldpc0.start() - def set_mode(self, mode): + audio_thread_fsk_ldpc1 = threading.Thread( + target=self.audio_fsk_ldpc_1, name="AUDIO_THREAD FSK LDPC1", daemon=True + ) + audio_thread_fsk_ldpc1.start() + + else: + audio_thread_sig0_datac0 = threading.Thread( + target=self.audio_sig0_datac0, name="AUDIO_THREAD DATAC0 - 0", daemon=True + ) + audio_thread_sig0_datac0.start() + + audio_thread_sig1_datac0 = threading.Thread( + target=self.audio_sig1_datac0, name="AUDIO_THREAD DATAC0 - 1", daemon=True + ) + audio_thread_sig1_datac0.start() + + audio_thread_dat0_datac1 = threading.Thread( + target=self.audio_dat0_datac1, name="AUDIO_THREAD DATAC1", daemon=True + ) + audio_thread_dat0_datac1.start() + + audio_thread_dat0_datac3 = threading.Thread( + target=self.audio_dat0_datac3, name="AUDIO_THREAD DATAC3", daemon=True + ) + audio_thread_dat0_datac3.start() + + + hamlib_thread = threading.Thread( + target=self.update_rig_data, name="HAMLIB_THREAD", daemon=True + ) + hamlib_thread.start() + + hamlib_set_thread = threading.Thread( + target=self.set_rig_data, name="HAMLIB_SET_THREAD", daemon=True + ) + hamlib_set_thread.start() + + # self.log.debug("[MDM] Starting worker_receive") + worker_received = threading.Thread( + target=self.worker_received, name="WORKER_THREAD", daemon=True + ) + worker_received.start() + + worker_transmit = threading.Thread( + target=self.worker_transmit, name="WORKER_THREAD", daemon=True + ) + worker_transmit.start() + + # -------------------------------------------------------------------------------------------------------- + def tci_tx_callback(self) -> None: + """ + Callback for TCI TX + """ + while True: + threading.Event().wait(0.01) + + if len(self.modoutqueue) > 0 and not self.mod_out_locked: + static.PTT_STATE = self.radio.set_ptt(True) + jsondata = {"ptt": "True"} + data_out = json.dumps(jsondata) + sock.SOCKET_QUEUE.put(data_out) + + data_out = self.modoutqueue.popleft() + self.tci_module.push_audio(data_out) + + def tci_rx_callback(self) -> None: + """ + Callback for TCI RX + + data_in48k must be filled with 48000Hz audio raw data + + """ + + while True: + threading.Event().wait(0.01) + + x = self.audio_received_queue.get() + x = np.frombuffer(x, dtype=np.int16) + #x = self.resampler.resample48_to_8(x) + + self.fft_data = x + + length_x = len(x) + for data_buffer, receive in [ + (self.sig0_datac0_buffer, RECEIVE_SIG0), + (self.sig1_datac0_buffer, RECEIVE_SIG1), + (self.dat0_datac1_buffer, RECEIVE_DATAC1), + (self.dat0_datac3_buffer, RECEIVE_DATAC3), + (self.fsk_ldpc_buffer_0, static.ENABLE_FSK), + (self.fsk_ldpc_buffer_1, static.ENABLE_FSK), + ]: + if ( + not (data_buffer.nbuffer + length_x) > data_buffer.size + and receive + ): + data_buffer.push(x) + + + + def mkfifo_read_callback(self) -> None: + """ + Support testing by reading the audio data from a pipe and + depositing the data into the codec data buffers. + """ + while True: + threading.Event().wait(0.01) + # -----read + data_in48k = bytes() + with open(RXCHANNEL, "rb") as fifo: + for line in fifo: + data_in48k += line + + while len(data_in48k) >= 48: + x = np.frombuffer(data_in48k[:48], dtype=np.int16) + x = self.resampler.resample48_to_8(x) + data_in48k = data_in48k[48:] + + length_x = len(x) + for data_buffer, receive in [ + (self.sig0_datac0_buffer, RECEIVE_SIG0), + (self.sig1_datac0_buffer, RECEIVE_SIG1), + (self.dat0_datac1_buffer, RECEIVE_DATAC1), + (self.dat0_datac3_buffer, RECEIVE_DATAC3), + (self.fsk_ldpc_buffer_0, static.ENABLE_FSK), + (self.fsk_ldpc_buffer_1, static.ENABLE_FSK), + ]: + if ( + not (data_buffer.nbuffer + length_x) > data_buffer.size + and receive + ): + data_buffer.push(x) + + def mkfifo_write_callback(self) -> None: + """Support testing by writing the audio data to a pipe.""" + while True: + threading.Event().wait(0.01) + + # -----write + if len(self.modoutqueue) > 0 and not self.mod_out_locked: + data_out48k = self.modoutqueue.popleft() + # print(len(data_out48k)) + + with open(TXCHANNEL, "wb") as fifo_write: + fifo_write.write(data_out48k) + fifo_write.flush() + fifo_write.flush() + + # -------------------------------------------------------------------- + def callback(self, data_in48k, outdata, frames, time, status) -> None: + """ + Receive data into appropriate queue. + + Args: + data_in48k: Incoming data received + outdata: Container for the data returned + frames: Number of frames + time: + status: + + """ + # self.log.debug("[MDM] callback") + x = np.frombuffer(data_in48k, dtype=np.int16) + x = self.resampler.resample48_to_8(x) + + # audio recording for debugging purposes + if static.AUDIO_RECORD: + # static.AUDIO_RECORD_FILE.write(x) + static.AUDIO_RECORD_FILE.writeframes(x) + + # Avoid decoding when transmitting to reduce CPU + # TODO: Overriding this for testing purposes + # if not static.TRANSMITTING: + length_x = len(x) + # Avoid buffer overflow by filling only if buffer for + # selected datachannel mode is not full + for audiobuffer, receive, index in [ + (self.sig0_datac0_buffer, RECEIVE_SIG0, 0), + (self.sig1_datac0_buffer, RECEIVE_SIG1, 1), + (self.dat0_datac1_buffer, RECEIVE_DATAC1, 2), + (self.dat0_datac3_buffer, RECEIVE_DATAC3, 3), + (self.fsk_ldpc_buffer_0, static.ENABLE_FSK, 4), + (self.fsk_ldpc_buffer_1, static.ENABLE_FSK, 5), + ]: + if (audiobuffer.nbuffer + length_x) > audiobuffer.size: + static.BUFFER_OVERFLOW_COUNTER[index] += 1 + elif receive: + audiobuffer.push(x) + # end of "not static.TRANSMITTING" if block + + if not self.modoutqueue or self.mod_out_locked: + data_out48k = np.zeros(frames, dtype=np.int16) + self.fft_data = x + else: + if not static.PTT_STATE: + # TODO: Moved to this place for testing + # Maybe we can avoid moments of silence before transmitting + static.PTT_STATE = self.radio.set_ptt(True) + jsondata = {"ptt": "True"} + data_out = json.dumps(jsondata) + sock.SOCKET_QUEUE.put(data_out) + + data_out48k = self.modoutqueue.popleft() + self.fft_data = data_out48k + + try: + outdata[:] = data_out48k[:frames] + except IndexError as err: + self.log.debug(f"[MDM] callback: IndexError: {err}") + + # return (data_out48k, audio.pyaudio.paContinue) + + # -------------------------------------------------------------------- + def transmit( + self, mode, repeats: int, repeat_delay: int, frames: bytearray + ) -> None: """ Args: mode: - - Returns: + repeats: + repeat_delay: + frames: """ - return None - def set_frequency(self, frequency): - """ - - Args: - frequency: - - Returns: - - """ - return None - - def get_status(self): """ + sig0 = 14 + sig1 = 14 + datac0 = 14 + datac1 = 10 + datac3 = 12 + fsk_ldpc = 9 + fsk_ldpc_0 = 200 + fsk_ldpc_1 = 201 + """ + if mode == 14: + freedv = self.freedv_datac0_tx + elif mode == 10: + freedv = self.freedv_datac1_tx + elif mode == 12: + freedv = self.freedv_datac3_tx + elif mode == 200: + freedv = self.freedv_ldpc0_tx + elif mode == 201: + freedv = self.freedv_ldpc1_tx + else: + return False + + static.TRANSMITTING = True + # if we're transmitting FreeDATA signals, reset channel busy state + static.CHANNEL_BUSY = False + + start_of_transmission = time.time() + # TODO: Moved ptt toggle some steps before audio is ready for testing + # Toggle ptt early to save some time and send ptt state via socket + # static.PTT_STATE = self.radio.set_ptt(True) + # jsondata = {"ptt": "True"} + # data_out = json.dumps(jsondata) + # sock.SOCKET_QUEUE.put(data_out) + + # Open codec2 instance + self.MODE = mode + + # Get number of bytes per frame for mode + bytes_per_frame = int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8) + payload_bytes_per_frame = bytes_per_frame - 2 + + # Init buffer for data + n_tx_modem_samples = codec2.api.freedv_get_n_tx_modem_samples(freedv) + mod_out = ctypes.create_string_buffer(n_tx_modem_samples * 2) + + # Init buffer for preample + n_tx_preamble_modem_samples = codec2.api.freedv_get_n_tx_preamble_modem_samples( + freedv + ) + mod_out_preamble = ctypes.create_string_buffer(n_tx_preamble_modem_samples * 2) + + # Init buffer for postamble + n_tx_postamble_modem_samples = ( + codec2.api.freedv_get_n_tx_postamble_modem_samples(freedv) + ) + mod_out_postamble = ctypes.create_string_buffer( + n_tx_postamble_modem_samples * 2 + ) + + # Add empty data to handle ptt toggle time + if static.TX_DELAY > 0: + data_delay = int(self.MODEM_SAMPLE_RATE * (static.TX_DELAY / 1000)) # type: ignore + mod_out_silence = ctypes.create_string_buffer(data_delay * 2) + txbuffer = bytes(mod_out_silence) + else: + txbuffer = bytes() + + self.log.debug( + "[MDM] TRANSMIT", mode=self.MODE, payload=payload_bytes_per_frame, delay=static.TX_DELAY + ) + + for _ in range(repeats): + # codec2 fsk preamble may be broken - + # at least it sounds like that, so we are disabling it for testing + if self.MODE not in [ + codec2.FREEDV_MODE.fsk_ldpc_0.value, + codec2.FREEDV_MODE.fsk_ldpc_1.value, + ]: + # Write preamble to txbuffer + codec2.api.freedv_rawdatapreambletx(freedv, mod_out_preamble) + txbuffer += bytes(mod_out_preamble) + + # Create modulaton for all frames in the list + for frame in frames: + # Create buffer for data + # Use this if CRC16 checksum is required (DATAc1-3) + buffer = bytearray(payload_bytes_per_frame) + # Set buffersize to length of data which will be send + buffer[: len(frame)] = frame # type: ignore + + # Create crc for data frame - + # Use the crc function shipped with codec2 + # to avoid CRC algorithm incompatibilities + # Generate CRC16 + crc = ctypes.c_ushort( + codec2.api.freedv_gen_crc16(bytes(buffer), payload_bytes_per_frame) + ) + # Convert crc to 2-byte (16-bit) hex string + crc = crc.value.to_bytes(2, byteorder="big") + # Append CRC to data buffer + buffer += crc + + data = (ctypes.c_ubyte * bytes_per_frame).from_buffer_copy(buffer) + # modulate DATA and save it into mod_out pointer + codec2.api.freedv_rawdatatx(freedv, mod_out, data) + txbuffer += bytes(mod_out) + + # codec2 fsk postamble may be broken - + # at least it sounds like that, so we are disabling it for testing + if self.MODE not in [ + codec2.FREEDV_MODE.fsk_ldpc_0.value, + codec2.FREEDV_MODE.fsk_ldpc_1.value, + ]: + # Write postamble to txbuffer + codec2.api.freedv_rawdatapostambletx(freedv, mod_out_postamble) + # Append postamble to txbuffer + txbuffer += bytes(mod_out_postamble) + + # Add delay to end of frames + samples_delay = int(self.MODEM_SAMPLE_RATE * (repeat_delay / 1000)) # type: ignore + mod_out_silence = ctypes.create_string_buffer(samples_delay * 2) + txbuffer += bytes(mod_out_silence) + + # Re-sample back up to 48k (resampler works on np.int16) + print(len(txbuffer)) + x = np.frombuffer(txbuffer, dtype=np.int16) + + # enable / disable AUDIO TUNE Feature / ALC correction + if static.AUDIO_AUTO_TUNE: + if static.HAMLIB_ALC == 0.0: + static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + 20 + elif 0.0 < static.HAMLIB_ALC <= 0.1: + print("0.0 < static.HAMLIB_ALC <= 0.1") + static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + 2 + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + elif 0.1 < static.HAMLIB_ALC < 0.2: + print("0.1 < static.HAMLIB_ALC < 0.2") + static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + elif 0.2 < static.HAMLIB_ALC < 0.99: + print("0.2 < static.HAMLIB_ALC < 0.99") + static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 20 + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + elif 1.0 >=static.HAMLIB_ALC: + print("1.0 >= static.HAMLIB_ALC") + static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 40 + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + else: + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + x = set_audio_volume(x, static.TX_AUDIO_LEVEL) + + txbuffer_48k = self.resampler.resample8_to_48(x) + + # Explicitly lock our usage of mod_out_queue if needed + # This could avoid audio problems on slower CPU + # we will fill our modout list with all data, then start + # processing it in audio callback + self.mod_out_locked = True + + # ------------------------------- + chunk_length = self.AUDIO_FRAMES_PER_BUFFER_TX # 4800 + chunk = [ + txbuffer_48k[i: i + chunk_length] + for i in range(0, len(txbuffer_48k), chunk_length) + ] + for c in chunk: + # Pad the chunk, if needed + if len(c) < chunk_length: + delta = chunk_length - len(c) + delta_zeros = np.zeros(delta, dtype=np.int16) + c = np.append(c, delta_zeros) + # self.log.debug("[MDM] mod out shorter than audio buffer", delta=delta) + + self.modoutqueue.append(c) + + # Release our mod_out_lock, so we can use the queue + self.mod_out_locked = False + + while self.modoutqueue: + threading.Event().wait(0.01) + # if we're transmitting FreeDATA signals, reset channel busy state + static.CHANNEL_BUSY = False + + # we need to wait manually for tci processing + if static.AUDIO_ENABLE_TCI: + # + duration = len(txbuffer) / 8000 + timestamp_to_sleep = time.time() + duration + self.log.debug("[MDM] TCI calculated duration", duration=duration) + while time.time() < timestamp_to_sleep: + threading.Event().wait(0.01) + + static.PTT_STATE = self.radio.set_ptt(False) + + # Push ptt state to socket stream + jsondata = {"ptt": "False"} + data_out = json.dumps(jsondata) + sock.SOCKET_QUEUE.put(data_out) + + # After processing, set the locking state back to true to be prepared for next transmission + self.mod_out_locked = True + + self.modem_transmit_queue.task_done() + static.TRANSMITTING = False + threading.Event().set() + + end_of_transmission = time.time() + transmission_time = end_of_transmission - start_of_transmission + self.log.debug("[MDM] ON AIR TIME", time=transmission_time) + + def demodulate_audio( + self, + audiobuffer: codec2.audio_buffer, + nin: int, + freedv: ctypes.c_void_p, + bytes_out, + bytes_per_frame, + state_buffer, + mode_name, + ) -> int: + """ + De-modulate supplied audio stream with supplied codec2 instance. + Decoded audio is placed into `bytes_out`. + + :param audiobuffer: Incoming audio + :type audiobuffer: codec2.audio_buffer + :param nin: Number of frames codec2 is expecting + :type nin: int + :param freedv: codec2 instance + :type freedv: ctypes.c_void_p + :param bytes_out: Demodulated audio + :type bytes_out: _type_ + :param bytes_per_frame: Number of bytes per frame + :type bytes_per_frame: int + :param state_buffer: modem states + :type state_buffer: int + :param mode_name: mode name + :type mode_name: str + :return: NIN from freedv instance + :rtype: int + """ + nbytes = 0 + try: + while self.stream.active: + threading.Event().wait(0.01) + while audiobuffer.nbuffer >= nin: + # demodulate audio + nbytes = codec2.api.freedv_rawdatarx( + freedv, bytes_out, audiobuffer.buffer.ctypes + ) + # get current modem states and write to list + # 1 trial + # 2 sync + # 3 trial sync + # 6 decoded + # 10 error decoding == NACK + rx_status = codec2.api.freedv_get_rx_status(freedv) + + if rx_status != 0: + # we need to disable this if in testmode as its causing problems with FIFO it seems + if not TESTMODE: + static.IS_CODEC2_TRAFFIC = True + + self.log.debug( + "[MDM] [demod_audio] modem state", mode=mode_name, rx_status=rx_status, + sync_flag=codec2.api.rx_sync_flags_to_text[rx_status] + ) + else: + static.IS_CODEC2_TRAFFIC = False + + if rx_status == 10: + state_buffer.append(rx_status) + + audiobuffer.pop(nin) + nin = codec2.api.freedv_nin(freedv) + if nbytes == bytes_per_frame: + # process commands only if static.LISTEN = True + if static.LISTEN: + self.log.debug( + "[MDM] [demod_audio] Pushing received data to received_queue", nbytes=nbytes + ) + self.modem_received_queue.put([bytes_out, freedv, bytes_per_frame]) + self.get_scatter(freedv) + self.calculate_snr(freedv) + state_buffer = [] + else: + self.log.warning( + "[MDM] [demod_audio] received frame but ignored processing", + listen=static.LISTEN + ) + except Exception as e: + self.log.warning("[MDM] [demod_audio] Stream not active anymore", e=e) + return nin + + def init_codec2_mode(self, mode, adv): + """ + Init codec2 and return some important parameters Args: + self: mode: + adv: Returns: - + c2instance, bytes_per_frame, bytes_out, audio_buffer, nin """ - return "connected" + if adv: + # FSK Long-distance Parity Code 1 - data frames + c2instance = ctypes.cast( + codec2.api.freedv_open_advanced( + codec2.api.FREEDV_MODE_FSK_LDPC, + ctypes.byref(adv), + ), + ctypes.c_void_p, + ) + else: - def get_ptt(self): - """ """ - return None + # create codec2 instance + c2instance = ctypes.cast( + codec2.api.freedv_open(mode), ctypes.c_void_p + ) - def close_rig(self): - """ """ - return + # set tuning range + self.c_lib.freedv_set_tuning_range( + c2instance, + ctypes.c_float(static.TUNING_RANGE_FMIN), + ctypes.c_float(static.TUNING_RANGE_FMAX), + ) + + # get bytes per frame + bytes_per_frame = int( + codec2.api.freedv_get_bits_per_modem_frame(c2instance) / 8 + ) + + # create byte out buffer + bytes_out = ctypes.create_string_buffer(bytes_per_frame) + + # set initial frames per burst + codec2.api.freedv_set_frames_per_burst(c2instance, 1) + + # init audio buffer + audio_buffer = codec2.audio_buffer(2 * self.AUDIO_FRAMES_PER_BUFFER_RX) + + # get initial nin + nin = codec2.api.freedv_nin(c2instance) + + # Additional Datac0-specific information - these are not referenced anywhere else. + # self.sig0_datac0_payload_per_frame = self.sig0_datac0_bytes_per_frame - 2 + # self.sig0_datac0_n_nom_modem_samples = self.c_lib.freedv_get_n_nom_modem_samples( + # self.sig0_datac0_freedv + # ) + # self.sig0_datac0_n_tx_modem_samples = self.c_lib.freedv_get_n_tx_modem_samples( + # self.sig0_datac0_freedv + # ) + # self.sig0_datac0_n_tx_preamble_modem_samples = ( + # self.c_lib.freedv_get_n_tx_preamble_modem_samples(self.sig0_datac0_freedv) + # ) + # self.sig0_datac0_n_tx_postamble_modem_samples = ( + # self.c_lib.freedv_get_n_tx_postamble_modem_samples(self.sig0_datac0_freedv) + # ) + + # return values + return c2instance, bytes_per_frame, bytes_out, audio_buffer, nin + + def audio_sig0_datac0(self) -> None: + """Receive data encoded with datac0 - 0""" + self.sig0_datac0_nin = self.demodulate_audio( + self.sig0_datac0_buffer, + self.sig0_datac0_nin, + self.sig0_datac0_freedv, + self.sig0_datac0_bytes_out, + self.sig0_datac0_bytes_per_frame, + SIG0_DATAC0_STATE, + "sig0-datac0" + ) + + def audio_sig1_datac0(self) -> None: + """Receive data encoded with datac0 - 1""" + self.sig1_datac0_nin = self.demodulate_audio( + self.sig1_datac0_buffer, + self.sig1_datac0_nin, + self.sig1_datac0_freedv, + self.sig1_datac0_bytes_out, + self.sig1_datac0_bytes_per_frame, + SIG1_DATAC0_STATE, + "sig1-datac0" + ) + + def audio_dat0_datac1(self) -> None: + """Receive data encoded with datac1""" + self.dat0_datac1_nin = self.demodulate_audio( + self.dat0_datac1_buffer, + self.dat0_datac1_nin, + self.dat0_datac1_freedv, + self.dat0_datac1_bytes_out, + self.dat0_datac1_bytes_per_frame, + DAT0_DATAC1_STATE, + "dat0-datac1" + ) + + def audio_dat0_datac3(self) -> None: + """Receive data encoded with datac3""" + self.dat0_datac3_nin = self.demodulate_audio( + self.dat0_datac3_buffer, + self.dat0_datac3_nin, + self.dat0_datac3_freedv, + self.dat0_datac3_bytes_out, + self.dat0_datac3_bytes_per_frame, + DAT0_DATAC3_STATE, + "dat0-datac3" + ) + + def audio_fsk_ldpc_0(self) -> None: + """Receive data encoded with FSK + LDPC0""" + self.fsk_ldpc_nin_0 = self.demodulate_audio( + self.fsk_ldpc_buffer_0, + self.fsk_ldpc_nin_0, + self.fsk_ldpc_freedv_0, + self.fsk_ldpc_bytes_out_0, + self.fsk_ldpc_bytes_per_frame_0, + FSK_LDPC0_STATE, + "fsk_ldpc0", + ) + + def audio_fsk_ldpc_1(self) -> None: + """Receive data encoded with FSK + LDPC1""" + self.fsk_ldpc_nin_1 = self.demodulate_audio( + self.fsk_ldpc_buffer_1, + self.fsk_ldpc_nin_1, + self.fsk_ldpc_freedv_1, + self.fsk_ldpc_bytes_out_1, + self.fsk_ldpc_bytes_per_frame_1, + FSK_LDPC1_STATE, + "fsk_ldpc1", + ) + + def worker_transmit(self) -> None: + """Worker for FIFO queue for processing frames to be transmitted""" + while True: + # print queue size for debugging purposes + # TODO: Lets check why we have several frames in our transmit queue which causes sometimes a double transmission + # we could do a cleanup after a transmission so theres no reason sending twice + queuesize = self.modem_transmit_queue.qsize() + self.log.debug("[MDM] self.modem_transmit_queue", qsize=queuesize) + data = self.modem_transmit_queue.get() + + # self.log.debug("[MDM] worker_transmit", mode=data[0]) + self.transmit( + mode=data[0], repeats=data[1], repeat_delay=data[2], frames=data[3] + ) + # self.modem_transmit_queue.task_done() + + def worker_received(self) -> None: + """Worker for FIFO queue for processing received frames""" + while True: + data = self.modem_received_queue.get() + self.log.debug("[MDM] worker_received: received data!") + # data[0] = bytes_out + # data[1] = freedv session + # data[2] = bytes_per_frame + DATA_QUEUE_RECEIVED.put([data[0], data[1], data[2]]) + self.modem_received_queue.task_done() + + def get_frequency_offset(self, freedv: ctypes.c_void_p) -> float: + """ + Ask codec2 for the calculated (audio) frequency offset of the received signal. + Side-effect: sets static.FREQ_OFFSET + + :param freedv: codec2 instance to query + :type freedv: ctypes.c_void_p + :return: Offset of audio frequency in Hz + :rtype: float + """ + modemStats = codec2.MODEMSTATS() + self.c_lib.freedv_get_modem_extended_stats(freedv, ctypes.byref(modemStats)) + offset = round(modemStats.foff) * (-1) + static.FREQ_OFFSET = offset + return offset + + def get_scatter(self, freedv: ctypes.c_void_p) -> None: + """ + Ask codec2 for data about the received signal and calculate the scatter plot. + Side-effect: sets static.SCATTER + + :param freedv: codec2 instance to query + :type freedv: ctypes.c_void_p + """ + if not static.ENABLE_SCATTER: + return + + modemStats = codec2.MODEMSTATS() + ctypes.cast( + self.c_lib.freedv_get_modem_extended_stats(freedv, ctypes.byref(modemStats)), + ctypes.c_void_p, + ) + + scatterdata = [] + # original function before itertool + # for i in range(codec2.MODEM_STATS_NC_MAX): + # for j in range(1, codec2.MODEM_STATS_NR_MAX, 2): + # # print(f"{modemStats.rx_symbols[i][j]} - {modemStats.rx_symbols[i][j]}") + # xsymbols = round(modemStats.rx_symbols[i][j - 1] // 1000) + # ysymbols = round(modemStats.rx_symbols[i][j] // 1000) + # if xsymbols != 0.0 and ysymbols != 0.0: + # scatterdata.append({"x": str(xsymbols), "y": str(ysymbols)}) + + for i, j in itertools.product(range(codec2.MODEM_STATS_NC_MAX), range(1, codec2.MODEM_STATS_NR_MAX, 2)): + # print(f"{modemStats.rx_symbols[i][j]} - {modemStats.rx_symbols[i][j]}") + xsymbols = round(modemStats.rx_symbols[i][j - 1] // 1000) + ysymbols = round(modemStats.rx_symbols[i][j] // 1000) + if xsymbols != 0.0 and ysymbols != 0.0: + scatterdata.append({"x": str(xsymbols), "y": str(ysymbols)}) + + # Send all the data if we have too-few samples, otherwise send a sampling + if 150 > len(scatterdata) > 0: + static.SCATTER = scatterdata + else: + # only take every tenth data point + static.SCATTER = scatterdata[::10] + + def calculate_snr(self, freedv: ctypes.c_void_p) -> float: + """ + Ask codec2 for data about the received signal and calculate + the signal-to-noise ratio. + Side-effect: sets static.SNR + + :param freedv: codec2 instance to query + :type freedv: ctypes.c_void_p + :return: Signal-to-noise ratio of the decoded data + :rtype: float + """ + try: + modem_stats_snr = ctypes.c_float() + modem_stats_sync = ctypes.c_int() + + self.c_lib.freedv_get_modem_stats( + freedv, ctypes.byref(modem_stats_sync), ctypes.byref(modem_stats_snr) + ) + modem_stats_snr = modem_stats_snr.value + modem_stats_sync = modem_stats_sync.value + + snr = round(modem_stats_snr, 1) + self.log.info("[MDM] calculate_snr: ", snr=snr) + static.SNR = snr + # static.SNR = np.clip( + # snr, -127, 127 + # ) # limit to max value of -128/128 as a possible fix of #188 + return static.SNR + except Exception as err: + self.log.error(f"[MDM] calculate_snr: Exception: {err}") + static.SNR = 0 + return static.SNR + + def set_rig_data(self) -> None: + """ + Set rigctld parameters like frequency, mode + THis needs to be processed in a queue + """ + while True: + cmd = RIGCTLD_COMMAND_QUEUE.get() + if cmd[0] == "set_frequency": + # [1] = Frequency + self.radio.set_frequency(cmd[1]) + if cmd[0] == "set_mode": + # [1] = Mode + self.radio.set_mode(cmd[1]) + + def update_rig_data(self) -> None: + """ + Request information about the current state of the radio via hamlib + Side-effect: sets + - static.HAMLIB_FREQUENCY + - static.HAMLIB_MODE + - static.HAMLIB_BANDWIDTH + """ + while True: + # this looks weird, but is necessary for avoiding rigctld packet colission sock + threading.Event().wait(0.25) + static.HAMLIB_FREQUENCY = self.radio.get_frequency() + threading.Event().wait(0.1) + static.HAMLIB_MODE = self.radio.get_mode() + threading.Event().wait(0.1) + static.HAMLIB_BANDWIDTH = self.radio.get_bandwidth() + threading.Event().wait(0.1) + static.HAMLIB_STATUS = self.radio.get_status() + threading.Event().wait(0.1) + if static.TRANSMITTING: + static.HAMLIB_ALC = self.radio.get_alc() + threading.Event().wait(0.1) + #static.HAMLIB_RF = self.radio.get_level() + #threading.Event().wait(0.1) + static.HAMLIB_STRENGTH = self.radio.get_strength() + + #print(f"ALC: {static.HAMLIB_ALC}, RF: {static.HAMLIB_RF}, STRENGTH: {static.HAMLIB_STRENGTH}") + + def calculate_fft(self) -> None: + """ + Calculate an average signal strength of the channel to assess + whether the channel is "busy." + """ + # Initialize channel_busy_delay counter + channel_busy_delay = 0 + + # Initialize dbfs counter + rms_counter = 0 + + while True: + # threading.Event().wait(0.01) + threading.Event().wait(0.01) + # WE NEED TO OPTIMIZE THIS! + + # Start calculating the FFT once enough samples are captured. + if len(self.fft_data) >= 128: + # https://gist.github.com/ZWMiller/53232427efc5088007cab6feee7c6e4c + # Fast Fourier Transform, 10*log10(abs) is to scale it to dB + # and make sure it's not imaginary + try: + fftarray = np.fft.rfft(self.fft_data) + + # Set value 0 to 1 to avoid division by zero + fftarray[fftarray == 0] = 1 + dfft = 10.0 * np.log10(abs(fftarray)) + + # get average of dfft + avg = np.mean(dfft) + + # Detect signals which are higher than the + # average + 10 (+10 smoothes the output). + # Data higher than the average must be a signal. + # Therefore we are setting it to 100 so it will be highlighted + # Have to do this when we are not transmitting so our + # own sending data will not affect this too much + if not static.TRANSMITTING: + dfft[dfft > avg + 15] = 100 + + # Calculate audio dbfs + # https://stackoverflow.com/a/9763652 + # calculate dbfs every 50 cycles for reducing CPU load + rms_counter += 1 + if rms_counter > 50: + d = np.frombuffer(self.fft_data, np.int16).astype(np.float32) + # calculate RMS and then dBFS + # TODO: Need to change static.AUDIO_RMS to AUDIO_DBFS somewhen + # https://dsp.stackexchange.com/questions/8785/how-to-compute-dbfs + # try except for avoiding runtime errors by division/0 + try: + rms = int(np.sqrt(np.max(d ** 2))) + if rms == 0: + raise ZeroDivisionError + static.AUDIO_DBFS = 20 * np.log10(rms / 32768) + except Exception as e: + self.log.warning( + "[MDM] fft calculation error - please check your audio setup", + e=e, + ) + static.AUDIO_DBFS = -100 + + rms_counter = 0 + + # Convert data to int to decrease size + dfft = dfft.astype(int) + + # Create list of dfft for later pushing to static.FFT + dfftlist = dfft.tolist() + + # Reduce area where the busy detection is enabled + # We want to have this in correlation with mode bandwidth + # TODO: This is not correctly and needs to be checked for correct maths + # dfftlist[0:1] = 10,15Hz + # Bandwidth[Hz] / 10,15 + # narrowband = 563Hz = 56 + # wideband = 1700Hz = 167 + # 1500Hz = 148 + # 2700Hz = 266 + # 3200Hz = 315 + + # define the area, we are detecting busy state + dfft = dfft[120:176] if static.LOW_BANDWIDTH_MODE else dfft[65:231] + + # Check for signals higher than average by checking for "100" + # If we have a signal, increment our channel_busy delay counter + # so we have a smoother state toggle + if np.sum(dfft[dfft > avg + 15]) >= 400 and not static.TRANSMITTING: + static.CHANNEL_BUSY = True + # Limit delay counter to a maximum of 200. The higher this value, + # the longer we will wait until releasing state + channel_busy_delay = min(channel_busy_delay + 10, 200) + else: + # Decrement channel busy counter if no signal has been detected. + channel_busy_delay = max(channel_busy_delay - 1, 0) + # When our channel busy counter reaches 0, toggle state to False + if channel_busy_delay == 0: + static.CHANNEL_BUSY = False + + static.FFT = dfftlist[:315] # 315 --> bandwidth 3200 + except Exception as err: + self.log.error(f"[MDM] calculate_fft: Exception: {err}") + self.log.debug("[MDM] Setting fft=0") + # else 0 + static.FFT = [0] + + def set_frames_per_burst(self, frames_per_burst: int) -> None: + """ + Configure codec2 to send the configured number of frames per burst. + + :param frames_per_burst: Number of frames per burst requested + :type frames_per_burst: int + """ + # Limit frames per burst to acceptable values + frames_per_burst = min(frames_per_burst, 1) + frames_per_burst = max(frames_per_burst, 5) + + codec2.api.freedv_set_frames_per_burst(self.dat0_datac1_freedv, frames_per_burst) + codec2.api.freedv_set_frames_per_burst(self.dat0_datac3_freedv, frames_per_burst) + codec2.api.freedv_set_frames_per_burst(self.fsk_ldpc_freedv_0, frames_per_burst) + + +def open_codec2_instance(mode: int) -> ctypes.c_void_p: + """ + Return a codec2 instance of the type `mode` + + :param mode: Type of codec2 instance to return + :type mode: Union[int, str] + :return: C-function of the requested codec2 instance + :rtype: ctypes.c_void_p + """ + if mode in [codec2.FREEDV_MODE.fsk_ldpc_0.value]: + return ctypes.cast( + codec2.api.freedv_open_advanced( + codec2.api.FREEDV_MODE_FSK_LDPC, + ctypes.byref(codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV), + ), + ctypes.c_void_p, + ) + + if mode in [codec2.FREEDV_MODE.fsk_ldpc_1.value]: + return ctypes.cast( + codec2.api.freedv_open_advanced( + codec2.api.FREEDV_MODE_FSK_LDPC, + ctypes.byref(codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV), + ), + ctypes.c_void_p, + ) + + return ctypes.cast(codec2.api.freedv_open(mode), ctypes.c_void_p) + + +def get_bytes_per_frame(mode: int) -> int: + """ + Provide bytes per frame information for accessing from data handler + + :param mode: Codec2 mode to query + :type mode: int or str + :return: Bytes per frame of the supplied codec2 data mode + :rtype: int + """ + freedv = open_codec2_instance(mode) + + # get number of bytes per frame for mode + return int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8) + + +def set_audio_volume(datalist, volume: float) -> np.int16: + """ + Scale values for the provided audio samples by volume, + `volume` is clipped to the range of 0-200 + + :param datalist: Audio samples to scale + :type datalist: NDArray[np.int16] + :param volume: "Percentage" (0-200) to scale samples + :type volume: float + :return: Scaled audio samples + :rtype: np.int16 + """ + # make sure we have float as data type to avoid crash + try: + volume = float(volume) + except Exception as e: + print(f"[MDM] changing audio volume failed with error: {e}") + volume = 100.0 + + # Clip volume provided to acceptable values + volume = np.clip(volume, 0, 200) # limit to max value of 255 + # Scale samples by the ratio of volume / 100.0 + data = np.fromstring(datalist, np.int16) * (volume / 100.0) # type: ignore + return data.astype(np.int16) + + +def get_modem_error_state(): + """ + get current state buffer and return True of contains 10 + + """ + + if RECEIVE_DATAC1 and 10 in DAT0_DATAC1_STATE: + DAT0_DATAC1_STATE.clear() + return True + if RECEIVE_DATAC3 and 10 in DAT0_DATAC3_STATE: + DAT0_DATAC3_STATE.clear() + return True + + return False \ No newline at end of file From ed6476a839004d5ca627810d6a6de886b537935a Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 12:05:13 +0200 Subject: [PATCH 12/28] wait manually as workaroung for missing information from radio --- tnc/modem.py | 9 + tnc/tci.py | 1485 ++++++++------------------------------------------ 2 files changed, 237 insertions(+), 1257 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index 661821a9..c9a73aba 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -691,6 +691,15 @@ class RF: # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False + # we need to wait manually for tci processing + if static.AUDIO_ENABLE_TCI: + # + duration = len(txbuffer) / 8000 + timestamp_to_sleep = time.time() + duration + self.log.debug("[MDM] TCI calculated duration", duration=duration) + while time.time() < timestamp_to_sleep: + threading.Event().wait(0.01) + static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream diff --git a/tnc/tci.py b/tnc/tci.py index 06dded35..2b88cccf 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -1,1313 +1,284 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Dec 23 07:04:24 2020 -@author: DJ2LS -""" -# pylint: disable=invalid-name, line-too-long, c-extension-no-member -# pylint: disable=import-outside-toplevel - -import atexit -import ctypes -import os -import sys -import threading -import time -from collections import deque -import wave -import codec2 -import itertools -import numpy as np -import sock -import sounddevice as sd -import static import structlog -import ujson as json -import tci -from queues import DATA_QUEUE_RECEIVED, MODEM_RECEIVED_QUEUE, MODEM_TRANSMIT_QUEUE, RIGCTLD_COMMAND_QUEUE, AUDIO_RECEIVED_QUEUE, AUDIO_TRANSMIT_QUEUE +import threading +import websocket +import numpy as np +import time +from queues import AUDIO_TRANSMIT_QUEUE, AUDIO_RECEIVED_QUEUE -TESTMODE = False -RXCHANNEL = "" -TXCHANNEL = "" +""" +trx:0,true; +trx:0,false; -static.TRANSMITTING = False - -# Receive only specific modes to reduce CPU load -RECEIVE_SIG0 = True -RECEIVE_SIG1 = False -RECEIVE_DATAC1 = False -RECEIVE_DATAC3 = False +""" -# state buffer -SIG0_DATAC0_STATE = [] -SIG1_DATAC0_STATE = [] -DAT0_DATAC1_STATE = [] -DAT0_DATAC3_STATE = [] -FSK_LDPC0_STATE = [] -FSK_LDPC1_STATE = [] - -class RF: - """Class to encapsulate interactions between the audio device and codec2""" - - log = structlog.get_logger("RF") - - def __init__(self) -> None: - """ """ - self.sampler_avg = 0 - self.buffer_avg = 0 - - self.AUDIO_SAMPLE_RATE_RX = 48000 - self.AUDIO_SAMPLE_RATE_TX = 48000 - self.MODEM_SAMPLE_RATE = codec2.api.FREEDV_FS_8000 - - self.AUDIO_FRAMES_PER_BUFFER_RX = 2400 * 2 # 8192 - # 8192 Let's do some tests with very small chunks for TX - if not static.AUDIO_ENABLE_TCI: - self.AUDIO_FRAMES_PER_BUFFER_TX = 2400 * 2 - else: - self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 - - # 8 * (self.AUDIO_SAMPLE_RATE_RX/self.MODEM_SAMPLE_RATE) == 48 - self.AUDIO_CHANNELS = 1 - self.MODE = 0 - - # Locking state for mod out so buffer will be filled before we can use it - # https://github.com/DJ2LS/FreeDATA/issues/127 - # https://github.com/DJ2LS/FreeDATA/issues/99 - self.mod_out_locked = True - - # Make sure our resampler will work - assert (self.AUDIO_SAMPLE_RATE_RX / self.MODEM_SAMPLE_RATE) == codec2.api.FDMDV_OS_48 # type: ignore - - # Small hack for initializing codec2 via codec2.py module - # TODO: Need to change the entire modem module to integrate codec2 module - self.c_lib = codec2.api - self.resampler = codec2.resampler() - - self.modem_transmit_queue = MODEM_TRANSMIT_QUEUE - self.modem_received_queue = MODEM_RECEIVED_QUEUE +class TCI: + def __init__(self, hostname='127.0.0.1', port=50001): + # websocket.enableTrace(True) + self.log = structlog.get_logger("TCI") self.audio_received_queue = AUDIO_RECEIVED_QUEUE self.audio_transmit_queue = AUDIO_TRANSMIT_QUEUE + self.hostname = str(hostname) + self.port = str(port) - # Init FIFO queue to store modulation out in - self.modoutqueue = deque() + self.ws = '' - # Define fft_data buffer - self.fft_data = bytes() + tci_thread = threading.Thread( + target=self.connect, + name="TCI THREAD", + daemon=True, + ) + tci_thread.start() - # Open codec2 instances + # flag if we're receiving a tx_chrono + self.tx_chrono = False - # DATAC0 - # SIGNALLING MODE 0 - Used for Connecting - Payload 14 Bytes - self.sig0_datac0_freedv, \ - self.sig0_datac0_bytes_per_frame, \ - self.sig0_datac0_bytes_out, \ - self.sig0_datac0_buffer, \ - self.sig0_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + # audio related parameters, will be updated by tx chrono + self.sample_rate = None + self.format = None + self.codec = None + self.audio_length = None + self.crc = None + self.channel = None - # DATAC0 - # SIGNALLING MODE 1 - Used for ACK/NACK - Payload 5 Bytes - self.sig1_datac0_freedv, \ - self.sig1_datac0_bytes_per_frame, \ - self.sig1_datac0_bytes_out, \ - self.sig1_datac0_buffer, \ - self.sig1_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + def connect(self): + self.log.info( + "[TCI] Starting TCI thread!", ip=self.hostname, port=self.port + ) + self.ws = websocket.WebSocketApp( + f"ws://{self.hostname}:{self.port}", + on_open=self.on_open, + on_message=self.on_message, + on_error=self.on_error, + on_close=self.on_close, + ) - # DATAC1 - self.dat0_datac1_freedv, \ - self.dat0_datac1_bytes_per_frame, \ - self.dat0_datac1_bytes_out, \ - self.dat0_datac1_buffer, \ - self.dat0_datac1_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) + self.ws.run_forever(reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if con> + # rel.signal(2, rel.abort) # Keyboard Interrupt + # rel.dispatch() - # DATAC3 - self.dat0_datac3_freedv, \ - self.dat0_datac3_bytes_per_frame, \ - self.dat0_datac3_bytes_out, \ - self.dat0_datac3_buffer, \ - self.dat0_datac3_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) + def on_message(self, ws, message): - # FSK LDPC - 0 - self.fsk_ldpc_freedv_0, \ - self.fsk_ldpc_bytes_per_frame_0, \ - self.fsk_ldpc_bytes_out_0, \ - self.fsk_ldpc_buffer_0, \ - self.fsk_ldpc_nin_0 = \ - self.init_codec2_mode( - codec2.api.FREEDV_MODE_FSK_LDPC, - codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV - ) + # ready message + # we need to wait until radio is ready before we can push commands + if message == "ready;": + self.ws.send('audio_samplerate:8000;') + self.ws.send('audio_stream_channels:1;') + self.ws.send('audio_stream_sample_type:int16;') + self.ws.send('audio_stream_samples:1200;') + self.ws.send('audio_start:0;') - # FSK LDPC - 1 - self.fsk_ldpc_freedv_1, \ - self.fsk_ldpc_bytes_per_frame_1, \ - self.fsk_ldpc_bytes_out_1, \ - self.fsk_ldpc_buffer_1, \ - self.fsk_ldpc_nin_1 = \ - self.init_codec2_mode( - codec2.api.FREEDV_MODE_FSK_LDPC, - codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV - ) + # tx chrono frame + if len(message) in {64}: + receiver = message[:4] + sample_rate = int.from_bytes(message[4:8], "little") + format = int.from_bytes(message[8:12], "little") + codec = int.from_bytes(message[12:16], "little") + crc = int.from_bytes(message[16:20], "little") + audio_length = int.from_bytes(message[20:24], "little") + type = int.from_bytes(message[24:28], "little") + channel = int.from_bytes(message[28:32], "little") + reserved1 = int.from_bytes(message[32:36], "little") + reserved2 = int.from_bytes(message[36:40], "little") + reserved3 = int.from_bytes(message[40:44], "little") + reserved4 = int.from_bytes(message[44:48], "little") + reserved5 = int.from_bytes(message[48:52], "little") + reserved6 = int.from_bytes(message[52:56], "little") + reserved7 = int.from_bytes(message[56:60], "little") + reserved8 = int.from_bytes(message[60:64], "little") + if type == 3: + self.tx_chrono = True - # INIT TX MODES - self.freedv_datac0_tx = open_codec2_instance(14) - self.freedv_datac1_tx = open_codec2_instance(10) - self.freedv_datac3_tx = open_codec2_instance(12) - self.freedv_ldpc0_tx = open_codec2_instance(200) - self.freedv_ldpc1_tx = open_codec2_instance(201) - # --------------------------------------------CREATE PYAUDIO INSTANCE - if not TESTMODE and not static.AUDIO_ENABLE_TCI: - try: - self.stream = sd.RawStream( - channels=1, - dtype="int16", - callback=self.callback, - device=(static.AUDIO_INPUT_DEVICE, static.AUDIO_OUTPUT_DEVICE), - samplerate=self.AUDIO_SAMPLE_RATE_RX, - blocksize=4800, - ) - atexit.register(self.stream.stop) - self.log.info("[MDM] init: opened audio devices") - except Exception as err: - self.log.error("[MDM] init: can't open audio device. Exit", e=err) - sys.exit(1) + self.sample_rate = sample_rate + self.format = format + self.codec = codec + self.audio_length = audio_length + self.channel = channel + self.crc = crc - try: - self.log.debug("[MDM] init: starting pyaudio callback") - # self.audio_stream.start_stream() - self.stream.start() - except Exception as err: - self.log.error("[MDM] init: starting pyaudio callback failed", e=err) + # audio frame + if len(message) in {576, 2464, 4160}: + # audio received + receiver = message[:4] + sample_rate = int.from_bytes(message[4:8], "little") + format = int.from_bytes(message[8:12], "little") + codec = int.from_bytes(message[12:16], "little") + crc = int.from_bytes(message[16:20], "little") + audio_length = int.from_bytes(message[20:24], "little") + type = int.from_bytes(message[24:28], "little") + channel = int.from_bytes(message[28:32], "little") + reserved1 = int.from_bytes(message[32:36], "little") + reserved2 = int.from_bytes(message[36:40], "little") + reserved3 = int.from_bytes(message[40:44], "little") + reserved4 = int.from_bytes(message[44:48], "little") + reserved5 = int.from_bytes(message[48:52], "little") + reserved6 = int.from_bytes(message[52:56], "little") + reserved7 = int.from_bytes(message[56:60], "little") + reserved8 = int.from_bytes(message[60:64], "little") + audio_data = message[64:] + self.audio_received_queue.put(audio_data) - elif not TESTMODE: - # placeholder area for processing audio via TCI - # https://github.com/maksimus1210/TCI - self.log.warning("[MDM] [TCI] Not yet fully implemented", ip=static.TCI_IP, port=static.TCI_PORT) - # we are trying this by simulating an audio stream Object like with mkfifo - class Object: - """An object for simulating audio stream""" - active = True - self.stream = Object() + def on_error(self, error): + self.log.error( + "[TCI] Error FreeDATA to TCI rig!", ip=self.hostname, port=self.port, e=error + ) - # lets init TCI module - self.tci_module = tci.TCI() + def on_close(self, ws, close_status_code, close_msg): + self.log.warning( + "[TCI] Closed FreeDATA to TCI connection!", ip=self.hostname, port=self.port, statu=close_status_code, + msg=close_msg + ) - tci_rx_callback_thread = threading.Thread( - target=self.tci_rx_callback, - name="TCI RX CALLBACK THREAD", - daemon=True, - ) - tci_rx_callback_thread.start() + def on_open(self, ws): + self.ws = ws + self.log.info( + "[TCI] Connected FreeDATA to TCI rig!", ip=self.hostname, port=self.port + ) - # let's start the audio tx callback - self.log.debug("[MDM] Starting tci tx callback thread") - tci_tx_callback_thread = threading.Thread( - target=self.tci_tx_callback, - name="TCI TX CALLBACK THREAD", - daemon=True, - ) - tci_tx_callback_thread.start() + self.log.info( + "[TCI] Init...", ip=self.hostname, port=self.port + ) + def push_audio(self, data_out): + print(data_out) + + """ + # audio[:4] = receiver.to_bytes(4,byteorder='little', signed=False) + audio[4:8] = sample_rate.to_bytes(4, byteorder='little', signed=False) + audio[8:12] = format.to_bytes(4, byteorder='little', signed=False) + audio[12:16] = codec.to_bytes(4, byteorder='little', signed=False) + audio[16:20] = crc.to_bytes(4, byteorder='little', signed=False) + audio[20:24] = audio_length.to_bytes(4, byteorder='little', signed=False) + audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=True) + audio[28:32] = channel.to_bytes(4, byteorder='little', signed=False) + audio[32:36] = reserved1.to_bytes(4, byteorder='little', signed=False) + audio[36:40] = reserved2.to_bytes(4, byteorder='little', signed=False) + audio[40:44] = reserved3.to_bytes(4, byteorder='little', signed=False) + audio[44:48] = reserved4.to_bytes(4, byteorder='little', signed=False) + audio[48:52] = reserved5.to_bytes(4, byteorder='little', signed=False) + audio[52:56] = reserved6.to_bytes(4, byteorder='little', signed=False) + audio[56:60] = reserved7.to_bytes(4, byteorder='little', signed=False) + audio[60:64] = reserved8.to_bytes(4, byteorder='little', signed=False) + """ + + while not self.tx_chrono: + time.sleep(0.01) + + print(len(data_out)) + print(self.sample_rate) + print(self.audio_length) + print(self.channel) + print(self.crc) + print(self.codec) + print(self.tx_chrono) + + if self.tx_chrono: + print("#############") + print(len(data_out)) + print(len(bytes(data_out))) + print("-------------") + audio = bytearray(4096 + 64) + + audio[64:64 + len(bytes(data_out))] = bytes(data_out) + audio[4:8] = self.sample_rate.to_bytes(4, byteorder='little', signed=False) + # audio[8:12] = format.to_bytes(4,byteorder='little', signed=False) + audio[12:16] = self.codec.to_bytes(4, byteorder='little', signed=False) + audio[16:20] = self.crc.to_bytes(4, byteorder='little', signed=False) + audio[20:24] = self.audio_length.to_bytes(4, byteorder='little', signed=False) + audio[24:28] = int(2).to_bytes(4, byteorder='little', signed=False) + audio[28:32] = self.channel.to_bytes(4, byteorder='little', signed=False) + # audio[32:36] = reserved1.to_bytes(4,byteorder='little', signed=False) + # audio[36:40] = reserved2.to_bytes(4,byteorder='little', signed=False) + # audio[40:44] = reserved3.to_bytes(4,byteorder='little', signed=False) + # audio[44:48] = reserved4.to_bytes(4,byteorder='little', signed=False) + # audio[48:52] = reserved5.to_bytes(4,byteorder='little', signed=False) + # audio[52:56] = reserved6.to_bytes(4,byteorder='little', signed=False) + # audio[56:60] = reserved7.to_bytes(4,byteorder='little', signed=False) + + self.ws.send(audio, websocket.ABNF.OPCODE_BINARY) + + def set_ptt(self, state): + if state: + self.ws.send('trx:0,true,tci;') else: - class Object: - """An object for simulating audio stream""" - active = True + self.ws.send('trx:0,false;') + self.tx_chrono = False - self.stream = Object() + def get_frequency(self): + """ """ + return None - # Create mkfifo buffers - try: - os.mkfifo(RXCHANNEL) - os.mkfifo(TXCHANNEL) - except Exception as err: - self.log.info(f"[MDM] init:mkfifo: Exception: {err}") + def get_mode(self): + """ """ + return None - mkfifo_write_callback_thread = threading.Thread( - target=self.mkfifo_write_callback, - name="MKFIFO WRITE CALLBACK THREAD", - daemon=True, - ) - mkfifo_write_callback_thread.start() + def get_level(self): + """ """ + return None - self.log.debug("[MDM] Starting mkfifo_read_callback") - mkfifo_read_callback_thread = threading.Thread( - target=self.mkfifo_read_callback, - name="MKFIFO READ CALLBACK THREAD", - daemon=True, - ) - mkfifo_read_callback_thread.start() + def get_alc(self): + """ """ + return None - # --------------------------------------------INIT AND OPEN HAMLIB - # Check how we want to control the radio - # TODO: deprecated feature - we can remove this possibly - if static.HAMLIB_RADIOCONTROL == "direct": - print("direct hamlib support deprecated - not usable anymore") - sys.exit(1) - elif static.HAMLIB_RADIOCONTROL == "rigctl": - print("rigctl support deprecated - not usable anymore") - sys.exit(1) - elif static.HAMLIB_RADIOCONTROL == "rigctld": - import rigctld as rig - elif static.AUDIO_ENABLE_TCI: - self.radio = self.tci_module - else: - import rigdummy as rig + def get_meter(self): + """ """ + return None - if not static.AUDIO_ENABLE_TCI: - self.radio = rig.radio() - self.radio.open_rig( - rigctld_ip=static.HAMLIB_RIGCTLD_IP, - rigctld_port=static.HAMLIB_RIGCTLD_PORT, - ) + def get_bandwidth(self): + """ """ + return None - # --------------------------------------------START DECODER THREAD - if static.ENABLE_FFT: - fft_thread = threading.Thread( - target=self.calculate_fft, name="FFT_THREAD", daemon=True - ) - fft_thread.start() + def get_strength(self): + """ """ + return None - if static.ENABLE_FSK: - audio_thread_fsk_ldpc0 = threading.Thread( - target=self.audio_fsk_ldpc_0, name="AUDIO_THREAD FSK LDPC0", daemon=True - ) - audio_thread_fsk_ldpc0.start() + def set_bandwidth(self): + """ """ + return None - audio_thread_fsk_ldpc1 = threading.Thread( - target=self.audio_fsk_ldpc_1, name="AUDIO_THREAD FSK LDPC1", daemon=True - ) - audio_thread_fsk_ldpc1.start() - - else: - audio_thread_sig0_datac0 = threading.Thread( - target=self.audio_sig0_datac0, name="AUDIO_THREAD DATAC0 - 0", daemon=True - ) - audio_thread_sig0_datac0.start() - - audio_thread_sig1_datac0 = threading.Thread( - target=self.audio_sig1_datac0, name="AUDIO_THREAD DATAC0 - 1", daemon=True - ) - audio_thread_sig1_datac0.start() - - audio_thread_dat0_datac1 = threading.Thread( - target=self.audio_dat0_datac1, name="AUDIO_THREAD DATAC1", daemon=True - ) - audio_thread_dat0_datac1.start() - - audio_thread_dat0_datac3 = threading.Thread( - target=self.audio_dat0_datac3, name="AUDIO_THREAD DATAC3", daemon=True - ) - audio_thread_dat0_datac3.start() - - - hamlib_thread = threading.Thread( - target=self.update_rig_data, name="HAMLIB_THREAD", daemon=True - ) - hamlib_thread.start() - - hamlib_set_thread = threading.Thread( - target=self.set_rig_data, name="HAMLIB_SET_THREAD", daemon=True - ) - hamlib_set_thread.start() - - # self.log.debug("[MDM] Starting worker_receive") - worker_received = threading.Thread( - target=self.worker_received, name="WORKER_THREAD", daemon=True - ) - worker_received.start() - - worker_transmit = threading.Thread( - target=self.worker_transmit, name="WORKER_THREAD", daemon=True - ) - worker_transmit.start() - - # -------------------------------------------------------------------------------------------------------- - def tci_tx_callback(self) -> None: - """ - Callback for TCI TX - """ - while True: - threading.Event().wait(0.01) - - if len(self.modoutqueue) > 0 and not self.mod_out_locked: - static.PTT_STATE = self.radio.set_ptt(True) - jsondata = {"ptt": "True"} - data_out = json.dumps(jsondata) - sock.SOCKET_QUEUE.put(data_out) - - data_out = self.modoutqueue.popleft() - self.tci_module.push_audio(data_out) - - def tci_rx_callback(self) -> None: - """ - Callback for TCI RX - - data_in48k must be filled with 48000Hz audio raw data - - """ - - while True: - threading.Event().wait(0.01) - - x = self.audio_received_queue.get() - x = np.frombuffer(x, dtype=np.int16) - #x = self.resampler.resample48_to_8(x) - - self.fft_data = x - - length_x = len(x) - for data_buffer, receive in [ - (self.sig0_datac0_buffer, RECEIVE_SIG0), - (self.sig1_datac0_buffer, RECEIVE_SIG1), - (self.dat0_datac1_buffer, RECEIVE_DATAC1), - (self.dat0_datac3_buffer, RECEIVE_DATAC3), - (self.fsk_ldpc_buffer_0, static.ENABLE_FSK), - (self.fsk_ldpc_buffer_1, static.ENABLE_FSK), - ]: - if ( - not (data_buffer.nbuffer + length_x) > data_buffer.size - and receive - ): - data_buffer.push(x) - - - - def mkfifo_read_callback(self) -> None: - """ - Support testing by reading the audio data from a pipe and - depositing the data into the codec data buffers. - """ - while True: - threading.Event().wait(0.01) - # -----read - data_in48k = bytes() - with open(RXCHANNEL, "rb") as fifo: - for line in fifo: - data_in48k += line - - while len(data_in48k) >= 48: - x = np.frombuffer(data_in48k[:48], dtype=np.int16) - x = self.resampler.resample48_to_8(x) - data_in48k = data_in48k[48:] - - length_x = len(x) - for data_buffer, receive in [ - (self.sig0_datac0_buffer, RECEIVE_SIG0), - (self.sig1_datac0_buffer, RECEIVE_SIG1), - (self.dat0_datac1_buffer, RECEIVE_DATAC1), - (self.dat0_datac3_buffer, RECEIVE_DATAC3), - (self.fsk_ldpc_buffer_0, static.ENABLE_FSK), - (self.fsk_ldpc_buffer_1, static.ENABLE_FSK), - ]: - if ( - not (data_buffer.nbuffer + length_x) > data_buffer.size - and receive - ): - data_buffer.push(x) - - def mkfifo_write_callback(self) -> None: - """Support testing by writing the audio data to a pipe.""" - while True: - threading.Event().wait(0.01) - - # -----write - if len(self.modoutqueue) > 0 and not self.mod_out_locked: - data_out48k = self.modoutqueue.popleft() - # print(len(data_out48k)) - - with open(TXCHANNEL, "wb") as fifo_write: - fifo_write.write(data_out48k) - fifo_write.flush() - fifo_write.flush() - - # -------------------------------------------------------------------- - def callback(self, data_in48k, outdata, frames, time, status) -> None: - """ - Receive data into appropriate queue. - - Args: - data_in48k: Incoming data received - outdata: Container for the data returned - frames: Number of frames - time: - status: - - """ - # self.log.debug("[MDM] callback") - x = np.frombuffer(data_in48k, dtype=np.int16) - x = self.resampler.resample48_to_8(x) - - # audio recording for debugging purposes - if static.AUDIO_RECORD: - # static.AUDIO_RECORD_FILE.write(x) - static.AUDIO_RECORD_FILE.writeframes(x) - - # Avoid decoding when transmitting to reduce CPU - # TODO: Overriding this for testing purposes - # if not static.TRANSMITTING: - length_x = len(x) - # Avoid buffer overflow by filling only if buffer for - # selected datachannel mode is not full - for audiobuffer, receive, index in [ - (self.sig0_datac0_buffer, RECEIVE_SIG0, 0), - (self.sig1_datac0_buffer, RECEIVE_SIG1, 1), - (self.dat0_datac1_buffer, RECEIVE_DATAC1, 2), - (self.dat0_datac3_buffer, RECEIVE_DATAC3, 3), - (self.fsk_ldpc_buffer_0, static.ENABLE_FSK, 4), - (self.fsk_ldpc_buffer_1, static.ENABLE_FSK, 5), - ]: - if (audiobuffer.nbuffer + length_x) > audiobuffer.size: - static.BUFFER_OVERFLOW_COUNTER[index] += 1 - elif receive: - audiobuffer.push(x) - # end of "not static.TRANSMITTING" if block - - if not self.modoutqueue or self.mod_out_locked: - data_out48k = np.zeros(frames, dtype=np.int16) - self.fft_data = x - else: - if not static.PTT_STATE: - # TODO: Moved to this place for testing - # Maybe we can avoid moments of silence before transmitting - static.PTT_STATE = self.radio.set_ptt(True) - jsondata = {"ptt": "True"} - data_out = json.dumps(jsondata) - sock.SOCKET_QUEUE.put(data_out) - - data_out48k = self.modoutqueue.popleft() - self.fft_data = data_out48k - - try: - outdata[:] = data_out48k[:frames] - except IndexError as err: - self.log.debug(f"[MDM] callback: IndexError: {err}") - - # return (data_out48k, audio.pyaudio.paContinue) - - # -------------------------------------------------------------------- - def transmit( - self, mode, repeats: int, repeat_delay: int, frames: bytearray - ) -> None: + def set_mode(self, mode): """ Args: mode: - repeats: - repeat_delay: - frames: - - """ - - """ - sig0 = 14 - sig1 = 14 - datac0 = 14 - datac1 = 10 - datac3 = 12 - fsk_ldpc = 9 - fsk_ldpc_0 = 200 - fsk_ldpc_1 = 201 - """ - if mode == 14: - freedv = self.freedv_datac0_tx - elif mode == 10: - freedv = self.freedv_datac1_tx - elif mode == 12: - freedv = self.freedv_datac3_tx - elif mode == 200: - freedv = self.freedv_ldpc0_tx - elif mode == 201: - freedv = self.freedv_ldpc1_tx - else: - return False - - static.TRANSMITTING = True - # if we're transmitting FreeDATA signals, reset channel busy state - static.CHANNEL_BUSY = False - - start_of_transmission = time.time() - # TODO: Moved ptt toggle some steps before audio is ready for testing - # Toggle ptt early to save some time and send ptt state via socket - # static.PTT_STATE = self.radio.set_ptt(True) - # jsondata = {"ptt": "True"} - # data_out = json.dumps(jsondata) - # sock.SOCKET_QUEUE.put(data_out) - - # Open codec2 instance - self.MODE = mode - - # Get number of bytes per frame for mode - bytes_per_frame = int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8) - payload_bytes_per_frame = bytes_per_frame - 2 - - # Init buffer for data - n_tx_modem_samples = codec2.api.freedv_get_n_tx_modem_samples(freedv) - mod_out = ctypes.create_string_buffer(n_tx_modem_samples * 2) - - # Init buffer for preample - n_tx_preamble_modem_samples = codec2.api.freedv_get_n_tx_preamble_modem_samples( - freedv - ) - mod_out_preamble = ctypes.create_string_buffer(n_tx_preamble_modem_samples * 2) - - # Init buffer for postamble - n_tx_postamble_modem_samples = ( - codec2.api.freedv_get_n_tx_postamble_modem_samples(freedv) - ) - mod_out_postamble = ctypes.create_string_buffer( - n_tx_postamble_modem_samples * 2 - ) - - # Add empty data to handle ptt toggle time - if static.TX_DELAY > 0: - data_delay = int(self.MODEM_SAMPLE_RATE * (static.TX_DELAY / 1000)) # type: ignore - mod_out_silence = ctypes.create_string_buffer(data_delay * 2) - txbuffer = bytes(mod_out_silence) - else: - txbuffer = bytes() - - self.log.debug( - "[MDM] TRANSMIT", mode=self.MODE, payload=payload_bytes_per_frame, delay=static.TX_DELAY - ) - - for _ in range(repeats): - # codec2 fsk preamble may be broken - - # at least it sounds like that, so we are disabling it for testing - if self.MODE not in [ - codec2.FREEDV_MODE.fsk_ldpc_0.value, - codec2.FREEDV_MODE.fsk_ldpc_1.value, - ]: - # Write preamble to txbuffer - codec2.api.freedv_rawdatapreambletx(freedv, mod_out_preamble) - txbuffer += bytes(mod_out_preamble) - - # Create modulaton for all frames in the list - for frame in frames: - # Create buffer for data - # Use this if CRC16 checksum is required (DATAc1-3) - buffer = bytearray(payload_bytes_per_frame) - # Set buffersize to length of data which will be send - buffer[: len(frame)] = frame # type: ignore - - # Create crc for data frame - - # Use the crc function shipped with codec2 - # to avoid CRC algorithm incompatibilities - # Generate CRC16 - crc = ctypes.c_ushort( - codec2.api.freedv_gen_crc16(bytes(buffer), payload_bytes_per_frame) - ) - # Convert crc to 2-byte (16-bit) hex string - crc = crc.value.to_bytes(2, byteorder="big") - # Append CRC to data buffer - buffer += crc - - data = (ctypes.c_ubyte * bytes_per_frame).from_buffer_copy(buffer) - # modulate DATA and save it into mod_out pointer - codec2.api.freedv_rawdatatx(freedv, mod_out, data) - txbuffer += bytes(mod_out) - - # codec2 fsk postamble may be broken - - # at least it sounds like that, so we are disabling it for testing - if self.MODE not in [ - codec2.FREEDV_MODE.fsk_ldpc_0.value, - codec2.FREEDV_MODE.fsk_ldpc_1.value, - ]: - # Write postamble to txbuffer - codec2.api.freedv_rawdatapostambletx(freedv, mod_out_postamble) - # Append postamble to txbuffer - txbuffer += bytes(mod_out_postamble) - - # Add delay to end of frames - samples_delay = int(self.MODEM_SAMPLE_RATE * (repeat_delay / 1000)) # type: ignore - mod_out_silence = ctypes.create_string_buffer(samples_delay * 2) - txbuffer += bytes(mod_out_silence) - - # Re-sample back up to 48k (resampler works on np.int16) - print(len(txbuffer)) - x = np.frombuffer(txbuffer, dtype=np.int16) - - # enable / disable AUDIO TUNE Feature / ALC correction - if static.AUDIO_AUTO_TUNE: - if static.HAMLIB_ALC == 0.0: - static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + 20 - elif 0.0 < static.HAMLIB_ALC <= 0.1: - print("0.0 < static.HAMLIB_ALC <= 0.1") - static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + 2 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - elif 0.1 < static.HAMLIB_ALC < 0.2: - print("0.1 < static.HAMLIB_ALC < 0.2") - static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - elif 0.2 < static.HAMLIB_ALC < 0.99: - print("0.2 < static.HAMLIB_ALC < 0.99") - static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 20 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - elif 1.0 >=static.HAMLIB_ALC: - print("1.0 >= static.HAMLIB_ALC") - static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 40 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - else: - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - x = set_audio_volume(x, static.TX_AUDIO_LEVEL) - - txbuffer_48k = self.resampler.resample8_to_48(x) - - # Explicitly lock our usage of mod_out_queue if needed - # This could avoid audio problems on slower CPU - # we will fill our modout list with all data, then start - # processing it in audio callback - self.mod_out_locked = True - - # ------------------------------- - chunk_length = self.AUDIO_FRAMES_PER_BUFFER_TX # 4800 - chunk = [ - txbuffer_48k[i: i + chunk_length] - for i in range(0, len(txbuffer_48k), chunk_length) - ] - for c in chunk: - # Pad the chunk, if needed - if len(c) < chunk_length: - delta = chunk_length - len(c) - delta_zeros = np.zeros(delta, dtype=np.int16) - c = np.append(c, delta_zeros) - # self.log.debug("[MDM] mod out shorter than audio buffer", delta=delta) - - self.modoutqueue.append(c) - - # Release our mod_out_lock, so we can use the queue - self.mod_out_locked = False - - while self.modoutqueue: - threading.Event().wait(0.01) - # if we're transmitting FreeDATA signals, reset channel busy state - static.CHANNEL_BUSY = False - - # we need to wait manually for tci processing - if static.AUDIO_ENABLE_TCI: - # - duration = len(txbuffer) / 8000 - timestamp_to_sleep = time.time() + duration - self.log.debug("[MDM] TCI calculated duration", duration=duration) - while time.time() < timestamp_to_sleep: - threading.Event().wait(0.01) - - static.PTT_STATE = self.radio.set_ptt(False) - - # Push ptt state to socket stream - jsondata = {"ptt": "False"} - data_out = json.dumps(jsondata) - sock.SOCKET_QUEUE.put(data_out) - - # After processing, set the locking state back to true to be prepared for next transmission - self.mod_out_locked = True - - self.modem_transmit_queue.task_done() - static.TRANSMITTING = False - threading.Event().set() - - end_of_transmission = time.time() - transmission_time = end_of_transmission - start_of_transmission - self.log.debug("[MDM] ON AIR TIME", time=transmission_time) - - def demodulate_audio( - self, - audiobuffer: codec2.audio_buffer, - nin: int, - freedv: ctypes.c_void_p, - bytes_out, - bytes_per_frame, - state_buffer, - mode_name, - ) -> int: - """ - De-modulate supplied audio stream with supplied codec2 instance. - Decoded audio is placed into `bytes_out`. - - :param audiobuffer: Incoming audio - :type audiobuffer: codec2.audio_buffer - :param nin: Number of frames codec2 is expecting - :type nin: int - :param freedv: codec2 instance - :type freedv: ctypes.c_void_p - :param bytes_out: Demodulated audio - :type bytes_out: _type_ - :param bytes_per_frame: Number of bytes per frame - :type bytes_per_frame: int - :param state_buffer: modem states - :type state_buffer: int - :param mode_name: mode name - :type mode_name: str - :return: NIN from freedv instance - :rtype: int - """ - nbytes = 0 - try: - while self.stream.active: - threading.Event().wait(0.01) - while audiobuffer.nbuffer >= nin: - # demodulate audio - nbytes = codec2.api.freedv_rawdatarx( - freedv, bytes_out, audiobuffer.buffer.ctypes - ) - # get current modem states and write to list - # 1 trial - # 2 sync - # 3 trial sync - # 6 decoded - # 10 error decoding == NACK - rx_status = codec2.api.freedv_get_rx_status(freedv) - - if rx_status != 0: - # we need to disable this if in testmode as its causing problems with FIFO it seems - if not TESTMODE: - static.IS_CODEC2_TRAFFIC = True - - self.log.debug( - "[MDM] [demod_audio] modem state", mode=mode_name, rx_status=rx_status, - sync_flag=codec2.api.rx_sync_flags_to_text[rx_status] - ) - else: - static.IS_CODEC2_TRAFFIC = False - - if rx_status == 10: - state_buffer.append(rx_status) - - audiobuffer.pop(nin) - nin = codec2.api.freedv_nin(freedv) - if nbytes == bytes_per_frame: - # process commands only if static.LISTEN = True - if static.LISTEN: - self.log.debug( - "[MDM] [demod_audio] Pushing received data to received_queue", nbytes=nbytes - ) - self.modem_received_queue.put([bytes_out, freedv, bytes_per_frame]) - self.get_scatter(freedv) - self.calculate_snr(freedv) - state_buffer = [] - else: - self.log.warning( - "[MDM] [demod_audio] received frame but ignored processing", - listen=static.LISTEN - ) - except Exception as e: - self.log.warning("[MDM] [demod_audio] Stream not active anymore", e=e) - return nin - - def init_codec2_mode(self, mode, adv): - """ - Init codec2 and return some important parameters - - Args: - self: - mode: - adv: Returns: - c2instance, bytes_per_frame, bytes_out, audio_buffer, nin + """ - if adv: - # FSK Long-distance Parity Code 1 - data frames - c2instance = ctypes.cast( - codec2.api.freedv_open_advanced( - codec2.api.FREEDV_MODE_FSK_LDPC, - ctypes.byref(adv), - ), - ctypes.c_void_p, - ) - else: + return None - # create codec2 instance - c2instance = ctypes.cast( - codec2.api.freedv_open(mode), ctypes.c_void_p - ) - - # set tuning range - self.c_lib.freedv_set_tuning_range( - c2instance, - ctypes.c_float(static.TUNING_RANGE_FMIN), - ctypes.c_float(static.TUNING_RANGE_FMAX), - ) - - # get bytes per frame - bytes_per_frame = int( - codec2.api.freedv_get_bits_per_modem_frame(c2instance) / 8 - ) - - # create byte out buffer - bytes_out = ctypes.create_string_buffer(bytes_per_frame) - - # set initial frames per burst - codec2.api.freedv_set_frames_per_burst(c2instance, 1) - - # init audio buffer - audio_buffer = codec2.audio_buffer(2 * self.AUDIO_FRAMES_PER_BUFFER_RX) - - # get initial nin - nin = codec2.api.freedv_nin(c2instance) - - # Additional Datac0-specific information - these are not referenced anywhere else. - # self.sig0_datac0_payload_per_frame = self.sig0_datac0_bytes_per_frame - 2 - # self.sig0_datac0_n_nom_modem_samples = self.c_lib.freedv_get_n_nom_modem_samples( - # self.sig0_datac0_freedv - # ) - # self.sig0_datac0_n_tx_modem_samples = self.c_lib.freedv_get_n_tx_modem_samples( - # self.sig0_datac0_freedv - # ) - # self.sig0_datac0_n_tx_preamble_modem_samples = ( - # self.c_lib.freedv_get_n_tx_preamble_modem_samples(self.sig0_datac0_freedv) - # ) - # self.sig0_datac0_n_tx_postamble_modem_samples = ( - # self.c_lib.freedv_get_n_tx_postamble_modem_samples(self.sig0_datac0_freedv) - # ) - - # return values - return c2instance, bytes_per_frame, bytes_out, audio_buffer, nin - - def audio_sig0_datac0(self) -> None: - """Receive data encoded with datac0 - 0""" - self.sig0_datac0_nin = self.demodulate_audio( - self.sig0_datac0_buffer, - self.sig0_datac0_nin, - self.sig0_datac0_freedv, - self.sig0_datac0_bytes_out, - self.sig0_datac0_bytes_per_frame, - SIG0_DATAC0_STATE, - "sig0-datac0" - ) - - def audio_sig1_datac0(self) -> None: - """Receive data encoded with datac0 - 1""" - self.sig1_datac0_nin = self.demodulate_audio( - self.sig1_datac0_buffer, - self.sig1_datac0_nin, - self.sig1_datac0_freedv, - self.sig1_datac0_bytes_out, - self.sig1_datac0_bytes_per_frame, - SIG1_DATAC0_STATE, - "sig1-datac0" - ) - - def audio_dat0_datac1(self) -> None: - """Receive data encoded with datac1""" - self.dat0_datac1_nin = self.demodulate_audio( - self.dat0_datac1_buffer, - self.dat0_datac1_nin, - self.dat0_datac1_freedv, - self.dat0_datac1_bytes_out, - self.dat0_datac1_bytes_per_frame, - DAT0_DATAC1_STATE, - "dat0-datac1" - ) - - def audio_dat0_datac3(self) -> None: - """Receive data encoded with datac3""" - self.dat0_datac3_nin = self.demodulate_audio( - self.dat0_datac3_buffer, - self.dat0_datac3_nin, - self.dat0_datac3_freedv, - self.dat0_datac3_bytes_out, - self.dat0_datac3_bytes_per_frame, - DAT0_DATAC3_STATE, - "dat0-datac3" - ) - - def audio_fsk_ldpc_0(self) -> None: - """Receive data encoded with FSK + LDPC0""" - self.fsk_ldpc_nin_0 = self.demodulate_audio( - self.fsk_ldpc_buffer_0, - self.fsk_ldpc_nin_0, - self.fsk_ldpc_freedv_0, - self.fsk_ldpc_bytes_out_0, - self.fsk_ldpc_bytes_per_frame_0, - FSK_LDPC0_STATE, - "fsk_ldpc0", - ) - - def audio_fsk_ldpc_1(self) -> None: - """Receive data encoded with FSK + LDPC1""" - self.fsk_ldpc_nin_1 = self.demodulate_audio( - self.fsk_ldpc_buffer_1, - self.fsk_ldpc_nin_1, - self.fsk_ldpc_freedv_1, - self.fsk_ldpc_bytes_out_1, - self.fsk_ldpc_bytes_per_frame_1, - FSK_LDPC1_STATE, - "fsk_ldpc1", - ) - - def worker_transmit(self) -> None: - """Worker for FIFO queue for processing frames to be transmitted""" - while True: - # print queue size for debugging purposes - # TODO: Lets check why we have several frames in our transmit queue which causes sometimes a double transmission - # we could do a cleanup after a transmission so theres no reason sending twice - queuesize = self.modem_transmit_queue.qsize() - self.log.debug("[MDM] self.modem_transmit_queue", qsize=queuesize) - data = self.modem_transmit_queue.get() - - # self.log.debug("[MDM] worker_transmit", mode=data[0]) - self.transmit( - mode=data[0], repeats=data[1], repeat_delay=data[2], frames=data[3] - ) - # self.modem_transmit_queue.task_done() - - def worker_received(self) -> None: - """Worker for FIFO queue for processing received frames""" - while True: - data = self.modem_received_queue.get() - self.log.debug("[MDM] worker_received: received data!") - # data[0] = bytes_out - # data[1] = freedv session - # data[2] = bytes_per_frame - DATA_QUEUE_RECEIVED.put([data[0], data[1], data[2]]) - self.modem_received_queue.task_done() - - def get_frequency_offset(self, freedv: ctypes.c_void_p) -> float: + def set_frequency(self, frequency): """ - Ask codec2 for the calculated (audio) frequency offset of the received signal. - Side-effect: sets static.FREQ_OFFSET - :param freedv: codec2 instance to query - :type freedv: ctypes.c_void_p - :return: Offset of audio frequency in Hz - :rtype: float + Args: + frequency: + + Returns: + """ - modemStats = codec2.MODEMSTATS() - self.c_lib.freedv_get_modem_extended_stats(freedv, ctypes.byref(modemStats)) - offset = round(modemStats.foff) * (-1) - static.FREQ_OFFSET = offset - return offset + return None - def get_scatter(self, freedv: ctypes.c_void_p) -> None: + def get_status(self): """ - Ask codec2 for data about the received signal and calculate the scatter plot. - Side-effect: sets static.SCATTER - :param freedv: codec2 instance to query - :type freedv: ctypes.c_void_p + Args: + mode: + + Returns: + """ - if not static.ENABLE_SCATTER: - return + return "connected" - modemStats = codec2.MODEMSTATS() - ctypes.cast( - self.c_lib.freedv_get_modem_extended_stats(freedv, ctypes.byref(modemStats)), - ctypes.c_void_p, - ) + def get_ptt(self): + """ """ + return None - scatterdata = [] - # original function before itertool - # for i in range(codec2.MODEM_STATS_NC_MAX): - # for j in range(1, codec2.MODEM_STATS_NR_MAX, 2): - # # print(f"{modemStats.rx_symbols[i][j]} - {modemStats.rx_symbols[i][j]}") - # xsymbols = round(modemStats.rx_symbols[i][j - 1] // 1000) - # ysymbols = round(modemStats.rx_symbols[i][j] // 1000) - # if xsymbols != 0.0 and ysymbols != 0.0: - # scatterdata.append({"x": str(xsymbols), "y": str(ysymbols)}) - - for i, j in itertools.product(range(codec2.MODEM_STATS_NC_MAX), range(1, codec2.MODEM_STATS_NR_MAX, 2)): - # print(f"{modemStats.rx_symbols[i][j]} - {modemStats.rx_symbols[i][j]}") - xsymbols = round(modemStats.rx_symbols[i][j - 1] // 1000) - ysymbols = round(modemStats.rx_symbols[i][j] // 1000) - if xsymbols != 0.0 and ysymbols != 0.0: - scatterdata.append({"x": str(xsymbols), "y": str(ysymbols)}) - - # Send all the data if we have too-few samples, otherwise send a sampling - if 150 > len(scatterdata) > 0: - static.SCATTER = scatterdata - else: - # only take every tenth data point - static.SCATTER = scatterdata[::10] - - def calculate_snr(self, freedv: ctypes.c_void_p) -> float: - """ - Ask codec2 for data about the received signal and calculate - the signal-to-noise ratio. - Side-effect: sets static.SNR - - :param freedv: codec2 instance to query - :type freedv: ctypes.c_void_p - :return: Signal-to-noise ratio of the decoded data - :rtype: float - """ - try: - modem_stats_snr = ctypes.c_float() - modem_stats_sync = ctypes.c_int() - - self.c_lib.freedv_get_modem_stats( - freedv, ctypes.byref(modem_stats_sync), ctypes.byref(modem_stats_snr) - ) - modem_stats_snr = modem_stats_snr.value - modem_stats_sync = modem_stats_sync.value - - snr = round(modem_stats_snr, 1) - self.log.info("[MDM] calculate_snr: ", snr=snr) - static.SNR = snr - # static.SNR = np.clip( - # snr, -127, 127 - # ) # limit to max value of -128/128 as a possible fix of #188 - return static.SNR - except Exception as err: - self.log.error(f"[MDM] calculate_snr: Exception: {err}") - static.SNR = 0 - return static.SNR - - def set_rig_data(self) -> None: - """ - Set rigctld parameters like frequency, mode - THis needs to be processed in a queue - """ - while True: - cmd = RIGCTLD_COMMAND_QUEUE.get() - if cmd[0] == "set_frequency": - # [1] = Frequency - self.radio.set_frequency(cmd[1]) - if cmd[0] == "set_mode": - # [1] = Mode - self.radio.set_mode(cmd[1]) - - def update_rig_data(self) -> None: - """ - Request information about the current state of the radio via hamlib - Side-effect: sets - - static.HAMLIB_FREQUENCY - - static.HAMLIB_MODE - - static.HAMLIB_BANDWIDTH - """ - while True: - # this looks weird, but is necessary for avoiding rigctld packet colission sock - threading.Event().wait(0.25) - static.HAMLIB_FREQUENCY = self.radio.get_frequency() - threading.Event().wait(0.1) - static.HAMLIB_MODE = self.radio.get_mode() - threading.Event().wait(0.1) - static.HAMLIB_BANDWIDTH = self.radio.get_bandwidth() - threading.Event().wait(0.1) - static.HAMLIB_STATUS = self.radio.get_status() - threading.Event().wait(0.1) - if static.TRANSMITTING: - static.HAMLIB_ALC = self.radio.get_alc() - threading.Event().wait(0.1) - #static.HAMLIB_RF = self.radio.get_level() - #threading.Event().wait(0.1) - static.HAMLIB_STRENGTH = self.radio.get_strength() - - #print(f"ALC: {static.HAMLIB_ALC}, RF: {static.HAMLIB_RF}, STRENGTH: {static.HAMLIB_STRENGTH}") - - def calculate_fft(self) -> None: - """ - Calculate an average signal strength of the channel to assess - whether the channel is "busy." - """ - # Initialize channel_busy_delay counter - channel_busy_delay = 0 - - # Initialize dbfs counter - rms_counter = 0 - - while True: - # threading.Event().wait(0.01) - threading.Event().wait(0.01) - # WE NEED TO OPTIMIZE THIS! - - # Start calculating the FFT once enough samples are captured. - if len(self.fft_data) >= 128: - # https://gist.github.com/ZWMiller/53232427efc5088007cab6feee7c6e4c - # Fast Fourier Transform, 10*log10(abs) is to scale it to dB - # and make sure it's not imaginary - try: - fftarray = np.fft.rfft(self.fft_data) - - # Set value 0 to 1 to avoid division by zero - fftarray[fftarray == 0] = 1 - dfft = 10.0 * np.log10(abs(fftarray)) - - # get average of dfft - avg = np.mean(dfft) - - # Detect signals which are higher than the - # average + 10 (+10 smoothes the output). - # Data higher than the average must be a signal. - # Therefore we are setting it to 100 so it will be highlighted - # Have to do this when we are not transmitting so our - # own sending data will not affect this too much - if not static.TRANSMITTING: - dfft[dfft > avg + 15] = 100 - - # Calculate audio dbfs - # https://stackoverflow.com/a/9763652 - # calculate dbfs every 50 cycles for reducing CPU load - rms_counter += 1 - if rms_counter > 50: - d = np.frombuffer(self.fft_data, np.int16).astype(np.float32) - # calculate RMS and then dBFS - # TODO: Need to change static.AUDIO_RMS to AUDIO_DBFS somewhen - # https://dsp.stackexchange.com/questions/8785/how-to-compute-dbfs - # try except for avoiding runtime errors by division/0 - try: - rms = int(np.sqrt(np.max(d ** 2))) - if rms == 0: - raise ZeroDivisionError - static.AUDIO_DBFS = 20 * np.log10(rms / 32768) - except Exception as e: - self.log.warning( - "[MDM] fft calculation error - please check your audio setup", - e=e, - ) - static.AUDIO_DBFS = -100 - - rms_counter = 0 - - # Convert data to int to decrease size - dfft = dfft.astype(int) - - # Create list of dfft for later pushing to static.FFT - dfftlist = dfft.tolist() - - # Reduce area where the busy detection is enabled - # We want to have this in correlation with mode bandwidth - # TODO: This is not correctly and needs to be checked for correct maths - # dfftlist[0:1] = 10,15Hz - # Bandwidth[Hz] / 10,15 - # narrowband = 563Hz = 56 - # wideband = 1700Hz = 167 - # 1500Hz = 148 - # 2700Hz = 266 - # 3200Hz = 315 - - # define the area, we are detecting busy state - dfft = dfft[120:176] if static.LOW_BANDWIDTH_MODE else dfft[65:231] - - # Check for signals higher than average by checking for "100" - # If we have a signal, increment our channel_busy delay counter - # so we have a smoother state toggle - if np.sum(dfft[dfft > avg + 15]) >= 400 and not static.TRANSMITTING: - static.CHANNEL_BUSY = True - # Limit delay counter to a maximum of 200. The higher this value, - # the longer we will wait until releasing state - channel_busy_delay = min(channel_busy_delay + 10, 200) - else: - # Decrement channel busy counter if no signal has been detected. - channel_busy_delay = max(channel_busy_delay - 1, 0) - # When our channel busy counter reaches 0, toggle state to False - if channel_busy_delay == 0: - static.CHANNEL_BUSY = False - - static.FFT = dfftlist[:315] # 315 --> bandwidth 3200 - except Exception as err: - self.log.error(f"[MDM] calculate_fft: Exception: {err}") - self.log.debug("[MDM] Setting fft=0") - # else 0 - static.FFT = [0] - - def set_frames_per_burst(self, frames_per_burst: int) -> None: - """ - Configure codec2 to send the configured number of frames per burst. - - :param frames_per_burst: Number of frames per burst requested - :type frames_per_burst: int - """ - # Limit frames per burst to acceptable values - frames_per_burst = min(frames_per_burst, 1) - frames_per_burst = max(frames_per_burst, 5) - - codec2.api.freedv_set_frames_per_burst(self.dat0_datac1_freedv, frames_per_burst) - codec2.api.freedv_set_frames_per_burst(self.dat0_datac3_freedv, frames_per_burst) - codec2.api.freedv_set_frames_per_burst(self.fsk_ldpc_freedv_0, frames_per_burst) - - -def open_codec2_instance(mode: int) -> ctypes.c_void_p: - """ - Return a codec2 instance of the type `mode` - - :param mode: Type of codec2 instance to return - :type mode: Union[int, str] - :return: C-function of the requested codec2 instance - :rtype: ctypes.c_void_p - """ - if mode in [codec2.FREEDV_MODE.fsk_ldpc_0.value]: - return ctypes.cast( - codec2.api.freedv_open_advanced( - codec2.api.FREEDV_MODE_FSK_LDPC, - ctypes.byref(codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV), - ), - ctypes.c_void_p, - ) - - if mode in [codec2.FREEDV_MODE.fsk_ldpc_1.value]: - return ctypes.cast( - codec2.api.freedv_open_advanced( - codec2.api.FREEDV_MODE_FSK_LDPC, - ctypes.byref(codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV), - ), - ctypes.c_void_p, - ) - - return ctypes.cast(codec2.api.freedv_open(mode), ctypes.c_void_p) - - -def get_bytes_per_frame(mode: int) -> int: - """ - Provide bytes per frame information for accessing from data handler - - :param mode: Codec2 mode to query - :type mode: int or str - :return: Bytes per frame of the supplied codec2 data mode - :rtype: int - """ - freedv = open_codec2_instance(mode) - - # get number of bytes per frame for mode - return int(codec2.api.freedv_get_bits_per_modem_frame(freedv) / 8) - - -def set_audio_volume(datalist, volume: float) -> np.int16: - """ - Scale values for the provided audio samples by volume, - `volume` is clipped to the range of 0-200 - - :param datalist: Audio samples to scale - :type datalist: NDArray[np.int16] - :param volume: "Percentage" (0-200) to scale samples - :type volume: float - :return: Scaled audio samples - :rtype: np.int16 - """ - # make sure we have float as data type to avoid crash - try: - volume = float(volume) - except Exception as e: - print(f"[MDM] changing audio volume failed with error: {e}") - volume = 100.0 - - # Clip volume provided to acceptable values - volume = np.clip(volume, 0, 200) # limit to max value of 255 - # Scale samples by the ratio of volume / 100.0 - data = np.fromstring(datalist, np.int16) * (volume / 100.0) # type: ignore - return data.astype(np.int16) - - -def get_modem_error_state(): - """ - get current state buffer and return True of contains 10 - - """ - - if RECEIVE_DATAC1 and 10 in DAT0_DATAC1_STATE: - DAT0_DATAC1_STATE.clear() - return True - if RECEIVE_DATAC3 and 10 in DAT0_DATAC3_STATE: - DAT0_DATAC3_STATE.clear() - return True - - return False \ No newline at end of file + def close_rig(self): + """ """ + return From 9422965df263d1bb5c7cefec5cbc120aecab1aac Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 12:20:06 +0200 Subject: [PATCH 13/28] timeout improvements --- tnc/modem.py | 123 +++++++++++++++++++++++++++------------------------ tnc/tci.py | 24 +++++----- 2 files changed, 78 insertions(+), 69 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index c9a73aba..dbf0e75e 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -26,7 +26,8 @@ import static import structlog import ujson as json import tci -from queues import DATA_QUEUE_RECEIVED, MODEM_RECEIVED_QUEUE, MODEM_TRANSMIT_QUEUE, RIGCTLD_COMMAND_QUEUE, AUDIO_RECEIVED_QUEUE, AUDIO_TRANSMIT_QUEUE +from queues import DATA_QUEUE_RECEIVED, MODEM_RECEIVED_QUEUE, MODEM_TRANSMIT_QUEUE, RIGCTLD_COMMAND_QUEUE, \ + AUDIO_RECEIVED_QUEUE, AUDIO_TRANSMIT_QUEUE TESTMODE = False RXCHANNEL = "" @@ -40,7 +41,6 @@ RECEIVE_SIG1 = False RECEIVE_DATAC1 = False RECEIVE_DATAC3 = False - # state buffer SIG0_DATAC0_STATE = [] SIG1_DATAC0_STATE = [] @@ -49,6 +49,7 @@ DAT0_DATAC3_STATE = [] FSK_LDPC0_STATE = [] FSK_LDPC1_STATE = [] + class RF: """Class to encapsulate interactions between the audio device and codec2""" @@ -93,7 +94,6 @@ class RF: self.audio_received_queue = AUDIO_RECEIVED_QUEUE self.audio_transmit_queue = AUDIO_TRANSMIT_QUEUE - # Init FIFO queue to store modulation out in self.modoutqueue = deque() @@ -105,55 +105,55 @@ class RF: # DATAC0 # SIGNALLING MODE 0 - Used for Connecting - Payload 14 Bytes self.sig0_datac0_freedv, \ - self.sig0_datac0_bytes_per_frame, \ - self.sig0_datac0_bytes_out, \ - self.sig0_datac0_buffer, \ - self.sig0_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + self.sig0_datac0_bytes_per_frame, \ + self.sig0_datac0_bytes_out, \ + self.sig0_datac0_buffer, \ + self.sig0_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) # DATAC0 # SIGNALLING MODE 1 - Used for ACK/NACK - Payload 5 Bytes self.sig1_datac0_freedv, \ - self.sig1_datac0_bytes_per_frame, \ - self.sig1_datac0_bytes_out, \ - self.sig1_datac0_buffer, \ - self.sig1_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + self.sig1_datac0_bytes_per_frame, \ + self.sig1_datac0_bytes_out, \ + self.sig1_datac0_buffer, \ + self.sig1_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) # DATAC1 self.dat0_datac1_freedv, \ - self.dat0_datac1_bytes_per_frame, \ - self.dat0_datac1_bytes_out, \ - self.dat0_datac1_buffer, \ - self.dat0_datac1_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) + self.dat0_datac1_bytes_per_frame, \ + self.dat0_datac1_bytes_out, \ + self.dat0_datac1_buffer, \ + self.dat0_datac1_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) # DATAC3 self.dat0_datac3_freedv, \ - self.dat0_datac3_bytes_per_frame, \ - self.dat0_datac3_bytes_out, \ - self.dat0_datac3_buffer, \ - self.dat0_datac3_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) + self.dat0_datac3_bytes_per_frame, \ + self.dat0_datac3_bytes_out, \ + self.dat0_datac3_buffer, \ + self.dat0_datac3_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) # FSK LDPC - 0 self.fsk_ldpc_freedv_0, \ - self.fsk_ldpc_bytes_per_frame_0, \ - self.fsk_ldpc_bytes_out_0, \ - self.fsk_ldpc_buffer_0, \ - self.fsk_ldpc_nin_0 = \ - self.init_codec2_mode( + self.fsk_ldpc_bytes_per_frame_0, \ + self.fsk_ldpc_bytes_out_0, \ + self.fsk_ldpc_buffer_0, \ + self.fsk_ldpc_nin_0 = \ + self.init_codec2_mode( codec2.api.FREEDV_MODE_FSK_LDPC, codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV ) # FSK LDPC - 1 self.fsk_ldpc_freedv_1, \ - self.fsk_ldpc_bytes_per_frame_1, \ - self.fsk_ldpc_bytes_out_1, \ - self.fsk_ldpc_buffer_1, \ - self.fsk_ldpc_nin_1 = \ - self.init_codec2_mode( + self.fsk_ldpc_bytes_per_frame_1, \ + self.fsk_ldpc_bytes_out_1, \ + self.fsk_ldpc_buffer_1, \ + self.fsk_ldpc_nin_1 = \ + self.init_codec2_mode( codec2.api.FREEDV_MODE_FSK_LDPC, codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV ) @@ -192,10 +192,12 @@ class RF: # placeholder area for processing audio via TCI # https://github.com/maksimus1210/TCI self.log.warning("[MDM] [TCI] Not yet fully implemented", ip=static.TCI_IP, port=static.TCI_PORT) + # we are trying this by simulating an audio stream Object like with mkfifo class Object: """An object for simulating audio stream""" active = True + self.stream = Object() # lets init TCI module @@ -309,7 +311,6 @@ class RF: ) audio_thread_dat0_datac3.start() - hamlib_thread = threading.Thread( target=self.update_rig_data, name="HAMLIB_THREAD", daemon=True ) @@ -361,7 +362,7 @@ class RF: x = self.audio_received_queue.get() x = np.frombuffer(x, dtype=np.int16) - #x = self.resampler.resample48_to_8(x) + # x = self.resampler.resample48_to_8(x) self.fft_data = x @@ -380,8 +381,6 @@ class RF: ): data_buffer.push(x) - - def mkfifo_read_callback(self) -> None: """ Support testing by reading the audio data from a pipe and @@ -642,24 +641,32 @@ class RF: elif 0.0 < static.HAMLIB_ALC <= 0.1: print("0.0 < static.HAMLIB_ALC <= 0.1") static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL + 2 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), + alc_level=str(static.HAMLIB_ALC)) elif 0.1 < static.HAMLIB_ALC < 0.2: print("0.1 < static.HAMLIB_ALC < 0.2") static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), + alc_level=str(static.HAMLIB_ALC)) elif 0.2 < static.HAMLIB_ALC < 0.99: print("0.2 < static.HAMLIB_ALC < 0.99") static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 20 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) - elif 1.0 >=static.HAMLIB_ALC: + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), + alc_level=str(static.HAMLIB_ALC)) + elif 1.0 >= static.HAMLIB_ALC: print("1.0 >= static.HAMLIB_ALC") static.TX_AUDIO_LEVEL = static.TX_AUDIO_LEVEL - 40 - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), + alc_level=str(static.HAMLIB_ALC)) else: - self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), alc_level=str(static.HAMLIB_ALC)) + self.log.debug("[MDM] AUDIO TUNE", audio_level=str(static.TX_AUDIO_LEVEL), + alc_level=str(static.HAMLIB_ALC)) x = set_audio_volume(x, static.TX_AUDIO_LEVEL) - txbuffer_48k = self.resampler.resample8_to_48(x) + if not static.AUDIO_ENABLE_TCI: + txbuffer_out = self.resampler.resample8_to_48(x) + else: + txbuffer_out = x # Explicitly lock our usage of mod_out_queue if needed # This could avoid audio problems on slower CPU @@ -670,8 +677,8 @@ class RF: # ------------------------------- chunk_length = self.AUDIO_FRAMES_PER_BUFFER_TX # 4800 chunk = [ - txbuffer_48k[i: i + chunk_length] - for i in range(0, len(txbuffer_48k), chunk_length) + txbuffer_out[i: i + chunk_length] + for i in range(0, len(txbuffer_out), chunk_length) ] for c in chunk: # Pad the chunk, if needed @@ -686,19 +693,21 @@ class RF: # Release our mod_out_lock, so we can use the queue self.mod_out_locked = False - while self.modoutqueue: - threading.Event().wait(0.01) - # if we're transmitting FreeDATA signals, reset channel busy state - static.CHANNEL_BUSY = False - # we need to wait manually for tci processing if static.AUDIO_ENABLE_TCI: - # - duration = len(txbuffer) / 8000 - timestamp_to_sleep = time.time() + duration + + duration = len(txbuffer_out) / 8000 + timestamp_to_sleep = time.time() + (duration) self.log.debug("[MDM] TCI calculated duration", duration=duration) while time.time() < timestamp_to_sleep: threading.Event().wait(0.01) + else: + timestamp_to_sleep = time.time() + + while self.modoutqueue and time.time() < timestamp_to_sleep: + threading.Event().wait(0.01) + # if we're transmitting FreeDATA signals, reset channel busy state + static.CHANNEL_BUSY = False static.PTT_STATE = self.radio.set_ptt(False) @@ -1097,11 +1106,11 @@ class RF: if static.TRANSMITTING: static.HAMLIB_ALC = self.radio.get_alc() threading.Event().wait(0.1) - #static.HAMLIB_RF = self.radio.get_level() - #threading.Event().wait(0.1) + # static.HAMLIB_RF = self.radio.get_level() + # threading.Event().wait(0.1) static.HAMLIB_STRENGTH = self.radio.get_strength() - #print(f"ALC: {static.HAMLIB_ALC}, RF: {static.HAMLIB_RF}, STRENGTH: {static.HAMLIB_STRENGTH}") + # print(f"ALC: {static.HAMLIB_ALC}, RF: {static.HAMLIB_RF}, STRENGTH: {static.HAMLIB_STRENGTH}") def calculate_fft(self) -> None: """ diff --git a/tnc/tci.py b/tnc/tci.py index 2b88cccf..c8d513ba 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -145,7 +145,7 @@ class TCI: ) def push_audio(self, data_out): - print(data_out) + #print(data_out) """ # audio[:4] = receiver.to_bytes(4,byteorder='little', signed=False) @@ -169,19 +169,19 @@ class TCI: while not self.tx_chrono: time.sleep(0.01) - print(len(data_out)) - print(self.sample_rate) - print(self.audio_length) - print(self.channel) - print(self.crc) - print(self.codec) - print(self.tx_chrono) + #print(len(data_out)) + #print(self.sample_rate) + #print(self.audio_length) + #print(self.channel) + #print(self.crc) + #print(self.codec) + #print(self.tx_chrono) if self.tx_chrono: - print("#############") - print(len(data_out)) - print(len(bytes(data_out))) - print("-------------") + #print("#############") + #print(len(data_out)) + #print(len(bytes(data_out))) + #print("-------------") audio = bytearray(4096 + 64) audio[64:64 + len(bytes(data_out))] = bytes(data_out) From 7031077dabe30fd4d91983b87a9fd2d435c43b3c Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 12:39:34 +0200 Subject: [PATCH 14/28] attempt getting frequency --- tnc/tci.py | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/tnc/tci.py b/tnc/tci.py index c8d513ba..f3938a54 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -46,6 +46,13 @@ class TCI: self.crc = None self.channel = None + self.frequency = None + self.bandwidth = None + self.mode = None + self.alc = None + self.meter = None + self.level = None + def connect(self): self.log.info( "[TCI] Starting TCI thread!", ip=self.hostname, port=self.port @@ -123,6 +130,24 @@ class TCI: audio_data = message[64:] self.audio_received_queue.put(audio_data) + # find frequency + if message.startswith("TX_FREQUENCY:"): + splitted_message = message.split("TX_FREQUENCY:") + self.frequency = splitted_message[1][:-1] + + # find bandwidth + #if message.startswith("rx_filter_band:0,"): + # splitted_message = message.split("rx_filter_band:0,") + # bandwidths = splitted_message[1] + # splitted_bandwidths = bandwidths.split(",") + # lower_bandwidth = int(splitted_bandwidths[0]) + # upper_bandwidth = int(splitted_bandwidths[1][:-1]) + # self.bandwidth = upper_bandwidth - lower_bandwidth + + + + + def on_error(self, error): self.log.error( "[TCI] Error FreeDATA to TCI rig!", ip=self.hostname, port=self.port, e=error @@ -212,27 +237,28 @@ class TCI: def get_frequency(self): """ """ - return None + self.ws.send('TX_FREQUENCY;') + return self.frequency def get_mode(self): """ """ - return None + return self.mode def get_level(self): """ """ - return None + return self.level def get_alc(self): """ """ - return None + return self.alc def get_meter(self): """ """ - return None + return self.meter def get_bandwidth(self): """ """ - return None + return self.bandwidth def get_strength(self): """ """ From fb3fcf0c1ddf64f41208e6ee267f404c3991c19a Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 12:53:56 +0200 Subject: [PATCH 15/28] attempt getting frequency --- tnc/tci.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/tnc/tci.py b/tnc/tci.py index f3938a54..df663f0a 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -130,19 +130,26 @@ class TCI: audio_data = message[64:] self.audio_received_queue.put(audio_data) - # find frequency - if message.startswith("TX_FREQUENCY:"): - splitted_message = message.split("TX_FREQUENCY:") - self.frequency = splitted_message[1][:-1] - # find bandwidth - #if message.startswith("rx_filter_band:0,"): - # splitted_message = message.split("rx_filter_band:0,") - # bandwidths = splitted_message[1] - # splitted_bandwidths = bandwidths.split(",") - # lower_bandwidth = int(splitted_bandwidths[0]) - # upper_bandwidth = int(splitted_bandwidths[1][:-1]) - # self.bandwidth = upper_bandwidth - lower_bandwidth + if len(message)< 64: + # find frequency + if bytes(message, "utf-8").startswith(b"vfo:0,0,"): + splitted_message = message.split("vfo:0,0,") + self.frequency = splitted_message[1][:-1] + + # find mode + if bytes(message, "utf-8").startswith(b"modulation:0,"): + splitted_message = message.split("modulation:0,") + self.mode = splitted_message[1][:-1] + + # find bandwidth + #if message.startswith("rx_filter_band:0,"): + # splitted_message = message.split("rx_filter_band:0,") + # bandwidths = splitted_message[1] + # splitted_bandwidths = bandwidths.split(",") + # lower_bandwidth = int(splitted_bandwidths[0]) + # upper_bandwidth = int(splitted_bandwidths[1][:-1]) + # self.bandwidth = upper_bandwidth - lower_bandwidth @@ -237,11 +244,12 @@ class TCI: def get_frequency(self): """ """ - self.ws.send('TX_FREQUENCY;') + self.ws.send('VFO:0,0;') return self.frequency def get_mode(self): """ """ + self.ws.send('MODULATION:0;') return self.mode def get_level(self): @@ -277,6 +285,7 @@ class TCI: Returns: """ + self.ws.send('MODULATION:0,' + mode + ';') return None def set_frequency(self, frequency): @@ -288,6 +297,7 @@ class TCI: Returns: """ + self.ws.send('VFO:0,0' + frequency + ';') return None def get_status(self): From 0b0ae5507aeb31d5740fa3c95ba695f081b38a38 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:02:40 +0200 Subject: [PATCH 16/28] adding some support for paramters --- tnc/tci.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tnc/tci.py b/tnc/tci.py index df663f0a..e4cd09d3 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -8,13 +8,6 @@ import numpy as np import time from queues import AUDIO_TRANSMIT_QUEUE, AUDIO_RECEIVED_QUEUE -""" -trx:0,true; -trx:0,false; - -""" - - class TCI: def __init__(self, hostname='127.0.0.1', port=50001): # websocket.enableTrace(True) @@ -52,6 +45,7 @@ class TCI: self.alc = None self.meter = None self.level = None + self.ptt def connect(self): self.log.info( @@ -142,6 +136,11 @@ class TCI: splitted_message = message.split("modulation:0,") self.mode = splitted_message[1][:-1] + # find ptt + if bytes(message, "utf-8").startswith(b"trx:0,"): + splitted_message = message.split("trx:0,") + self.ptt = splitted_message[1][:-1] + # find bandwidth #if message.startswith("rx_filter_band:0,"): # splitted_message = message.split("rx_filter_band:0,") @@ -285,7 +284,7 @@ class TCI: Returns: """ - self.ws.send('MODULATION:0,' + mode + ';') + self.ws.send(f'MODULATION:0,{str(mode)};') return None def set_frequency(self, frequency): @@ -297,7 +296,7 @@ class TCI: Returns: """ - self.ws.send('VFO:0,0' + frequency + ';') + self.ws.send(f'VFO:0,0,{str(frequency)};') return None def get_status(self): @@ -313,7 +312,8 @@ class TCI: def get_ptt(self): """ """ - return None + self.ws.send(f'trx:0;') + return self.ptt def close_rig(self): """ """ From e496945dd961e9530c6656cce6b47c6d99767dc3 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:03:19 +0200 Subject: [PATCH 17/28] adding some support for paramters --- tnc/tci.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tnc/tci.py b/tnc/tci.py index e4cd09d3..eb5da6e2 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -45,7 +45,7 @@ class TCI: self.alc = None self.meter = None self.level = None - self.ptt + self.ptt = None def connect(self): self.log.info( From d9349f4742a790f226bb021e2a559730ac978d4d Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:15:57 +0200 Subject: [PATCH 18/28] using tx_enable instead of calcualted tx time --- tnc/modem.py | 18 ++++++------------ tnc/tci.py | 9 +++++++++ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index dbf0e75e..eab2d6bf 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -693,22 +693,16 @@ class RF: # Release our mod_out_lock, so we can use the queue self.mod_out_locked = False - # we need to wait manually for tci processing - if static.AUDIO_ENABLE_TCI: - - duration = len(txbuffer_out) / 8000 - timestamp_to_sleep = time.time() + (duration) - self.log.debug("[MDM] TCI calculated duration", duration=duration) - while time.time() < timestamp_to_sleep: - threading.Event().wait(0.01) - else: - timestamp_to_sleep = time.time() - - while self.modoutqueue and time.time() < timestamp_to_sleep: + while self.modoutqueue: threading.Event().wait(0.01) # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False + # wait until tci protocol released tx state + if static.AUDIO_ENABLE_TCI: + while not self.tci_module.get_tx_enable(): + threading.Event().wait(0.01) + static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream diff --git a/tnc/tci.py b/tnc/tci.py index eb5da6e2..d9326d80 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -46,6 +46,8 @@ class TCI: self.meter = None self.level = None self.ptt = None + self.tx_enable = False + def connect(self): self.log.info( @@ -74,6 +76,10 @@ class TCI: self.ws.send('audio_stream_samples:1200;') self.ws.send('audio_start:0;') + if message == "tx_enable:1,true": + self.tx_enable = True + self.set_ptt(False) + # tx chrono frame if len(message) in {64}: receiver = message[:4] @@ -315,6 +321,9 @@ class TCI: self.ws.send(f'trx:0;') return self.ptt + def get_tx_enable(self): + """ """ + return self.tx_enable def close_rig(self): """ """ return From 3a45ce4e05f3e76085927d27962bfe2bdd53c193 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:24:58 +0200 Subject: [PATCH 19/28] revert using tx_enable instead of calcualted tx time --- tnc/modem.py | 18 ++++++++++++------ tnc/tci.py | 16 ++++------------ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index eab2d6bf..dbf0e75e 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -693,16 +693,22 @@ class RF: # Release our mod_out_lock, so we can use the queue self.mod_out_locked = False - while self.modoutqueue: + # we need to wait manually for tci processing + if static.AUDIO_ENABLE_TCI: + + duration = len(txbuffer_out) / 8000 + timestamp_to_sleep = time.time() + (duration) + self.log.debug("[MDM] TCI calculated duration", duration=duration) + while time.time() < timestamp_to_sleep: + threading.Event().wait(0.01) + else: + timestamp_to_sleep = time.time() + + while self.modoutqueue and time.time() < timestamp_to_sleep: threading.Event().wait(0.01) # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False - # wait until tci protocol released tx state - if static.AUDIO_ENABLE_TCI: - while not self.tci_module.get_tx_enable(): - threading.Event().wait(0.01) - static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream diff --git a/tnc/tci.py b/tnc/tci.py index d9326d80..3d9402f7 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -46,8 +46,6 @@ class TCI: self.meter = None self.level = None self.ptt = None - self.tx_enable = False - def connect(self): self.log.info( @@ -76,10 +74,7 @@ class TCI: self.ws.send('audio_stream_samples:1200;') self.ws.send('audio_start:0;') - if message == "tx_enable:1,true": - self.tx_enable = True - self.set_ptt(False) - + if message == "ready;": # tx chrono frame if len(message) in {64}: receiver = message[:4] @@ -143,9 +138,9 @@ class TCI: self.mode = splitted_message[1][:-1] # find ptt - if bytes(message, "utf-8").startswith(b"trx:0,"): - splitted_message = message.split("trx:0,") - self.ptt = splitted_message[1][:-1] + #if bytes(message, "utf-8").startswith(b"trx:0,"): + # splitted_message = message.split("trx:0,") + # self.ptt = splitted_message[1][:-1] # find bandwidth #if message.startswith("rx_filter_band:0,"): @@ -321,9 +316,6 @@ class TCI: self.ws.send(f'trx:0;') return self.ptt - def get_tx_enable(self): - """ """ - return self.tx_enable def close_rig(self): """ """ return From ea8f29ce093ca8d9119ea23e5a565e4543da1e72 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:25:32 +0200 Subject: [PATCH 20/28] revert using tx_enable instead of calcualted tx time --- tnc/tci.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tnc/tci.py b/tnc/tci.py index 3d9402f7..4876729d 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -74,7 +74,6 @@ class TCI: self.ws.send('audio_stream_samples:1200;') self.ws.send('audio_start:0;') - if message == "ready;": # tx chrono frame if len(message) in {64}: receiver = message[:4] From 90f6178c46f7b1b9599e467c40dc054eeb505f0d Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:33:43 +0200 Subject: [PATCH 21/28] revert using tx_enable instead of calcualted tx time --- tnc/modem.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index dbf0e75e..bdc2b3d7 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -695,9 +695,8 @@ class RF: # we need to wait manually for tci processing if static.AUDIO_ENABLE_TCI: - duration = len(txbuffer_out) / 8000 - timestamp_to_sleep = time.time() + (duration) + timestamp_to_sleep = time.time() + duration self.log.debug("[MDM] TCI calculated duration", duration=duration) while time.time() < timestamp_to_sleep: threading.Event().wait(0.01) From 10d73e91952b6aabb8db29df56fec0c483de67e3 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:34:08 +0200 Subject: [PATCH 22/28] updated version --- tnc/static.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tnc/static.py b/tnc/static.py index 5d1af6cb..7d48fc98 100644 --- a/tnc/static.py +++ b/tnc/static.py @@ -11,7 +11,7 @@ Not nice, suggestions are appreciated :-) import subprocess from enum import Enum -VERSION = "0.8.0-alpha.4" +VERSION = "0.8.0-alpha.4-TCI-exp" ENABLE_EXPLORER = False ENABLE_STATS = False From 318cdf4bdc44382bc7b5950316d69d5be85277d6 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Wed, 29 Mar 2023 11:35:04 +0000 Subject: [PATCH 23/28] 'Refactored by Sourcery' --- tnc/modem.py | 66 ++++++++++++++++++++++++---------------------------- tnc/tci.py | 2 +- 2 files changed, 32 insertions(+), 36 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index bdc2b3d7..b8a1d2ed 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -66,11 +66,7 @@ class RF: self.AUDIO_FRAMES_PER_BUFFER_RX = 2400 * 2 # 8192 # 8192 Let's do some tests with very small chunks for TX - if not static.AUDIO_ENABLE_TCI: - self.AUDIO_FRAMES_PER_BUFFER_TX = 2400 * 2 - else: - self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 - + self.AUDIO_FRAMES_PER_BUFFER_TX = 1200 if static.AUDIO_ENABLE_TCI else 2400 * 2 # 8 * (self.AUDIO_SAMPLE_RATE_RX/self.MODEM_SAMPLE_RATE) == 48 self.AUDIO_CHANNELS = 1 self.MODE = 0 @@ -105,55 +101,55 @@ class RF: # DATAC0 # SIGNALLING MODE 0 - Used for Connecting - Payload 14 Bytes self.sig0_datac0_freedv, \ - self.sig0_datac0_bytes_per_frame, \ - self.sig0_datac0_bytes_out, \ - self.sig0_datac0_buffer, \ - self.sig0_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + self.sig0_datac0_bytes_per_frame, \ + self.sig0_datac0_bytes_out, \ + self.sig0_datac0_buffer, \ + self.sig0_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) # DATAC0 # SIGNALLING MODE 1 - Used for ACK/NACK - Payload 5 Bytes self.sig1_datac0_freedv, \ - self.sig1_datac0_bytes_per_frame, \ - self.sig1_datac0_bytes_out, \ - self.sig1_datac0_buffer, \ - self.sig1_datac0_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) + self.sig1_datac0_bytes_per_frame, \ + self.sig1_datac0_bytes_out, \ + self.sig1_datac0_buffer, \ + self.sig1_datac0_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC0, None) # DATAC1 self.dat0_datac1_freedv, \ - self.dat0_datac1_bytes_per_frame, \ - self.dat0_datac1_bytes_out, \ - self.dat0_datac1_buffer, \ - self.dat0_datac1_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) + self.dat0_datac1_bytes_per_frame, \ + self.dat0_datac1_bytes_out, \ + self.dat0_datac1_buffer, \ + self.dat0_datac1_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC1, None) # DATAC3 self.dat0_datac3_freedv, \ - self.dat0_datac3_bytes_per_frame, \ - self.dat0_datac3_bytes_out, \ - self.dat0_datac3_buffer, \ - self.dat0_datac3_nin = \ - self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) + self.dat0_datac3_bytes_per_frame, \ + self.dat0_datac3_bytes_out, \ + self.dat0_datac3_buffer, \ + self.dat0_datac3_nin = \ + self.init_codec2_mode(codec2.api.FREEDV_MODE_DATAC3, None) # FSK LDPC - 0 self.fsk_ldpc_freedv_0, \ - self.fsk_ldpc_bytes_per_frame_0, \ - self.fsk_ldpc_bytes_out_0, \ - self.fsk_ldpc_buffer_0, \ - self.fsk_ldpc_nin_0 = \ - self.init_codec2_mode( + self.fsk_ldpc_bytes_per_frame_0, \ + self.fsk_ldpc_bytes_out_0, \ + self.fsk_ldpc_buffer_0, \ + self.fsk_ldpc_nin_0 = \ + self.init_codec2_mode( codec2.api.FREEDV_MODE_FSK_LDPC, codec2.api.FREEDV_MODE_FSK_LDPC_0_ADV ) # FSK LDPC - 1 self.fsk_ldpc_freedv_1, \ - self.fsk_ldpc_bytes_per_frame_1, \ - self.fsk_ldpc_bytes_out_1, \ - self.fsk_ldpc_buffer_1, \ - self.fsk_ldpc_nin_1 = \ - self.init_codec2_mode( + self.fsk_ldpc_bytes_per_frame_1, \ + self.fsk_ldpc_bytes_out_1, \ + self.fsk_ldpc_buffer_1, \ + self.fsk_ldpc_nin_1 = \ + self.init_codec2_mode( codec2.api.FREEDV_MODE_FSK_LDPC, codec2.api.FREEDV_MODE_FSK_LDPC_1_ADV ) diff --git a/tnc/tci.py b/tnc/tci.py index 4876729d..6739d9a1 100644 --- a/tnc/tci.py +++ b/tnc/tci.py @@ -312,7 +312,7 @@ class TCI: def get_ptt(self): """ """ - self.ws.send(f'trx:0;') + self.ws.send('trx:0;') return self.ptt def close_rig(self): From 0758dc53fdc461762568198354804dda3935f99c Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:46:20 +0200 Subject: [PATCH 24/28] attempt fixing ctests --- tnc/modem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tnc/modem.py b/tnc/modem.py index b8a1d2ed..fa231295 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -699,7 +699,7 @@ class RF: else: timestamp_to_sleep = time.time() - while self.modoutqueue and time.time() < timestamp_to_sleep: + while self.modoutqueue and time.time() < timestamp_to_sleep and not TESTMODE: threading.Event().wait(0.01) # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False From ddf0f09480d952dea48ec908918fa044f7b217ec Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Thu, 30 Mar 2023 08:00:23 +0200 Subject: [PATCH 25/28] attempt fixing ctests --- tnc/modem.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index fa231295..d8787a23 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -683,7 +683,6 @@ class RF: delta_zeros = np.zeros(delta, dtype=np.int16) c = np.append(c, delta_zeros) # self.log.debug("[MDM] mod out shorter than audio buffer", delta=delta) - self.modoutqueue.append(c) # Release our mod_out_lock, so we can use the queue @@ -699,7 +698,15 @@ class RF: else: timestamp_to_sleep = time.time() - while self.modoutqueue and time.time() < timestamp_to_sleep and not TESTMODE: + tci_timeout_reached = False + while self.modoutqueue and not TESTMODE or not tci_timeout_reached: + if static.AUDIO_ENABLE_TCI: + if time.time() < timestamp_to_sleep: + tci_timeout_reached = False + else: + tci_timeout_reached = True + + threading.Event().wait(0.01) # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False From f9dad47c997c88106542b854dce9dc4970edbcb0 Mon Sep 17 00:00:00 2001 From: DJ2LS <75909252+DJ2LS@users.noreply.github.com> Date: Sat, 1 Apr 2023 10:23:39 +0200 Subject: [PATCH 26/28] and another attempt fixing ctests --- tnc/modem.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tnc/modem.py b/tnc/modem.py index d8787a23..d1fcd328 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -623,6 +623,9 @@ class RF: txbuffer += bytes(mod_out_postamble) # Add delay to end of frames + # for TESTMODE we need some additional delay for making ctests stable because of MKFIFO related problems + if TESTMODE: + repeat_delay = 100 samples_delay = int(self.MODEM_SAMPLE_RATE * (repeat_delay / 1000)) # type: ignore mod_out_silence = ctypes.create_string_buffer(samples_delay * 2) txbuffer += bytes(mod_out_silence) From 396e923a8f475422a7d72f4dc673ca4d14479ead Mon Sep 17 00:00:00 2001 From: DJ2LS Date: Sat, 1 Apr 2023 19:40:57 +0200 Subject: [PATCH 27/28] fixed failing ctest bug --- tnc/modem.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index d8787a23..13a11a16 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -693,24 +693,26 @@ class RF: duration = len(txbuffer_out) / 8000 timestamp_to_sleep = time.time() + duration self.log.debug("[MDM] TCI calculated duration", duration=duration) - while time.time() < timestamp_to_sleep: - threading.Event().wait(0.01) + tci_timeout_reached = False + #while time.time() < timestamp_to_sleep: + # threading.Event().wait(0.01) else: timestamp_to_sleep = time.time() + # set tci timeout reached to True for overriding if not used + tci_timeout_reached = True - tci_timeout_reached = False - while self.modoutqueue and not TESTMODE or not tci_timeout_reached: + while self.modoutqueue or not tci_timeout_reached: if static.AUDIO_ENABLE_TCI: if time.time() < timestamp_to_sleep: tci_timeout_reached = False else: tci_timeout_reached = True - threading.Event().wait(0.01) # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False + print("ENDE GELÄNDE") static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream From e0b80c050c45c12f06f879281b0a43d467a856ad Mon Sep 17 00:00:00 2001 From: DJ2LS Date: Sat, 1 Apr 2023 19:57:14 +0200 Subject: [PATCH 28/28] cleanup --- tnc/modem.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tnc/modem.py b/tnc/modem.py index 940e0e95..996bcd1b 100644 --- a/tnc/modem.py +++ b/tnc/modem.py @@ -623,9 +623,6 @@ class RF: txbuffer += bytes(mod_out_postamble) # Add delay to end of frames - # for TESTMODE we need some additional delay for making ctests stable because of MKFIFO related problems - if TESTMODE: - repeat_delay = 100 samples_delay = int(self.MODEM_SAMPLE_RATE * (repeat_delay / 1000)) # type: ignore mod_out_silence = ctypes.create_string_buffer(samples_delay * 2) txbuffer += bytes(mod_out_silence) @@ -715,7 +712,6 @@ class RF: # if we're transmitting FreeDATA signals, reset channel busy state static.CHANNEL_BUSY = False - print("ENDE GELÄNDE") static.PTT_STATE = self.radio.set_ptt(False) # Push ptt state to socket stream