From 52b1999eb4046bf8cbcf1f0d1f05b64f38cce7c2 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Tue, 7 Jul 2020 15:57:02 +0100 Subject: [PATCH 01/17] Revert "Merge pull request #1 from dbrignoli/dxb" This reverts commit 5d8182fcda64650d42ca164e86ff7e13350d89c2, reversing changes made to 0d74d8f65171cdda2299cd6f2222b435a700ee2b. --- src/msc/__init__.py | 2 - src/msc/datagroups/__init__.py | 94 ++++++++++++---------------------- 2 files changed, 34 insertions(+), 62 deletions(-) diff --git a/src/msc/__init__.py b/src/msc/__init__.py index 273cf34..da00f87 100644 --- a/src/msc/__init__.py +++ b/src/msc/__init__.py @@ -4,8 +4,6 @@ logger = logging.getLogger('msc') -# See Annex E in EN 300 401 -crc16_11021 = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) crcfun = crcmod.predefined.mkPredefinedCrcFun('x25') def calculate_crc(data): return crcfun(data) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 961968b..202a0a2 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -1,4 +1,4 @@ -from msc import bitarray_to_hex, int_to_bitarray, crc16_11021, InvalidCrcError, generate_transport_id +from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError, generate_transport_id from mot import DirectoryEncoder, SortedHeaderInformation from bitarray import bitarray import logging @@ -341,24 +341,22 @@ def tobytes(self): # datagroup header bits += bitarray('0') # (0): ExtensionFlag - 0=no extension bits += bitarray('1' if self.crc_enabled else '0') # (1): CrcFlag - true if there is a CRC at the end of the datagroup - bits += bitarray('0' if self.segment_index is None else '1') # (2): SegmentFlag - 1=segment header included - bits += bitarray('0' if self.transport_id is None else '1') # (3): UserAccessFlag - true + bits += bitarray('1') # (2): SegmentFlag - 1=segment header included + bits += bitarray('1') # (3): UserAccessFlag - true bits += int_to_bitarray(self._type, 4) # (4-7): DataGroupType bits += int_to_bitarray(self.continuity % 16, 4) # (8-11): ContinuityIndex bits += int_to_bitarray(self.repetition, 4) # (12-15): RepetitionIndex - remaining = 0 (only this once) # session header # segment field - if self.segment_index is not None: - bits += bitarray('1' if self.last else '0') # (16): Last - true if the last segment - bits += int_to_bitarray(self.segment_index, 15) # (17-32): SegmentNumber + bits += bitarray('1' if self.last else '0') # (16): Last - true if the last segment + bits += int_to_bitarray(self.segment_index, 15) # (17-32): SegmentNumber # user access field - if self.transport_id is not None: - bits += bitarray('000') # (33-35): RFA - bits += bitarray('1') # (36): TransportId - true to include Transport ID - bits += int_to_bitarray(2, 4) # (37-40): LengthIndicator - length of transport Id and End user address fields (will be 2 bytes as only transport ID defined) - bits += int_to_bitarray(self._transport_id, 16) # (41-56) transport ID + bits += bitarray('000') # (33-35): RFA + bits += bitarray('1') # (36): TransportId - true to include Transport ID + bits += int_to_bitarray(2, 4) # (37-40): LengthIndicator - length of transport Id and End user address fields (will be 2 bytes as only transport ID defined) + bits += int_to_bitarray(self._transport_id, 16) # (41-56) transport ID # data field tmp = bitarray() @@ -367,7 +365,7 @@ def tobytes(self): # CRC crc = 0; - if self.crc_enabled: crc = crc16_11021(bits.tobytes()) + if self.crc_enabled: crc = calculate_crc(bits.tobytes()) bits += int_to_bitarray(crc, 16) return bits.tobytes() @@ -375,32 +373,10 @@ def tobytes(self): @staticmethod def frombits(bits, i=0, check_crc=True): """Parse a datagroup from a bitarray, with an optional offset""" - - # use only the slice indicated by the offset - bits = bits[i:] + # check we have enough header first - ext_flag = bits[0] - crc_flag = bits[1] - seg_flag = bits[2] - uaf_flag = bits[3] - header_size = 16 - if ext_flag: - header_size += 16 - if seg_flag: - header_size += 16 - if uaf_flag: - tid_present = bits[header_size+3] - uaf_sz = int(bits[header_size+4:header_size+8].to01(), 2) - header_size += 8+8*uaf_sz - else: - tid_present = False - - min_size = header_size - if crc_flag: - min_size += 16 - if bits.length() < min_size: - raise IncompleteDatagroupError - + if (bits.length() - i) < ((9 + 2) * 8): raise IncompleteDatagroupError + # datagroup header type = int(bits[4:8].to01(), 2) continuity = int(bits[8:12].to01(), 2) @@ -408,31 +384,29 @@ def frombits(bits, i=0, check_crc=True): # session header # segment field - if seg_flag: - last = bits[16] - segment_index = int(bits[17:32].to01(), 2) - else: - last = False - segment_index = None - + last = bits[16] + segment_index = int(bits[17:32].to01(), 2) + # user access field - if tid_present: - transport_id = int(bits[40:56].to01(), 2) - else: - transport_id = None - - # extract data and compute CRC - if crc_flag: - hdr_plus_data = bits[:-16] + transport_id = int(bits[40:56].to01(), 2) + + # data segment header + size = int(bits[59:72].to01(), 2) # get size to check we have a complete datagroup + if bits.length() < 72 + size * 8 + 16: raise IncompleteDatagroupError + data = bits[72 : 72 + (size*8)] + if check_crc: +<<<<<<< HEAD + crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) + calculated = calculate_crc(bits[:72+data.length()].tobytes()) + if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) +======= + crc_slice = bits[:bits.length()-16] crc = int(bits[bits.length()-16:].to01(), 2) - if check_crc and crc != crc16_11021(hdr_plus_data.tobytes()): - raise InvalidCrcError(crc, crc_slice.tobytes()) - else: - hdr_plus_data = bits + calculated = crc16_11021(crc_slice.tobytes()) + if crc != calculated: raise InvalidCrcError(crc, crc_slice.tobytes()) +>>>>>>> parent of 8fc21cb... Add support for variable datagroup header size - datagroup = Datagroup(transport_id, type, - hdr_plus_data[header_size:].tobytes(), segment_index, continuity, - True, repetition, last) + datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) logger.debug('parsed datagroup: %s', datagroup) return datagroup @@ -443,7 +417,7 @@ def __str__(self): elif self._type == 6: type_description = 'MOT Directory (uncompressed)' elif self._type == 7: type_description = 'MOT Directory (compressed)' else: type_description = 'unknown' - return '[segment=%d bytes], type=%d [%s], transportid=%s, segmentindex=%s, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last) + return '[segment=%d bytes], type=%d [%s], transportid=%d, segmentindex=%d, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last) def __repr__(self): return '' % str(self) From 46373c8f53042f95d9e0d2df6c535bff1118d698 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Tue, 7 Jul 2020 16:03:39 +0100 Subject: [PATCH 02/17] Update __init__.py Fixed unexpected line insertions --- src/msc/datagroups/__init__.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 202a0a2..ce309cb 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -395,17 +395,10 @@ def frombits(bits, i=0, check_crc=True): if bits.length() < 72 + size * 8 + 16: raise IncompleteDatagroupError data = bits[72 : 72 + (size*8)] if check_crc: -<<<<<<< HEAD crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) calculated = calculate_crc(bits[:72+data.length()].tobytes()) - if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) -======= - crc_slice = bits[:bits.length()-16] - crc = int(bits[bits.length()-16:].to01(), 2) - calculated = crc16_11021(crc_slice.tobytes()) - if crc != calculated: raise InvalidCrcError(crc, crc_slice.tobytes()) ->>>>>>> parent of 8fc21cb... Add support for variable datagroup header size - + if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) + datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) logger.debug('parsed datagroup: %s', datagroup) From 816c3a07213e7b258004cd5092e10ab5cef95ae3 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Tue, 7 Jul 2020 17:22:04 +0100 Subject: [PATCH 03/17] Update CRC Algorithm --- src/msc/__init__.py | 2 +- src/msc/datagroups/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/msc/__init__.py b/src/msc/__init__.py index da00f87..dee9dce 100644 --- a/src/msc/__init__.py +++ b/src/msc/__init__.py @@ -4,7 +4,7 @@ logger = logging.getLogger('msc') -crcfun = crcmod.predefined.mkPredefinedCrcFun('x25') +crcfun = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) def calculate_crc(data): return crcfun(data) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index ce309cb..35bde6b 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -397,8 +397,8 @@ def frombits(bits, i=0, check_crc=True): if check_crc: crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) calculated = calculate_crc(bits[:72+data.length()].tobytes()) - if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) - + if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) + datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) logger.debug('parsed datagroup: %s', datagroup) From fc20efa01b713e3a1d865fe484dc901452e1e2a5 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Mon, 7 Mar 2022 09:29:25 +0000 Subject: [PATCH 04/17] Update __init__.py Amended the calculation of DirectorySize at line 186 to include the 13 bytes of the Directory header plus whatever bytes are used by the Directory Parameters. --- src/msc/datagroups/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 35bde6b..1a867d6 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -183,7 +183,7 @@ def encode_directorymode(objects, directory_parameters=None, segmenting_strategy bits = bitarray() bits += bitarray('0') # (0): CompressionFlag: This bit shall be set to 0 bits += bitarray('0') # (1): RFU - bits += int_to_bitarray(len(entries.tobytes()), 30) # (2-31): DirectorySize: total size of the MOT directory in bytes + bits += int_to_bitarray(len(entries.tobytes()) + 13 + len(directory_params.tobytes()), 30) # (2-31): DirectorySize: total size of the MOT directory in bytes, including the 13 header bytes and length of the directory parameter bytes bits += int_to_bitarray(len(objects), 16) # (32-47): NumberOfObjects: Total number of objects described by the directory bits += int_to_bitarray(0, 24) # (48-71): DataCarouselPeriod: Max time in tenths of seconds for the data carousel to complete a cycle. Value of zero for undefined bits += bitarray('000') # (72-74): RFU From 14d2855bc442ee4d611e45c14f5759b8cce53ae4 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Mon, 14 Mar 2022 11:05:59 +0000 Subject: [PATCH 05/17] Amended to make python3 compatible --- src/msc/datagroups/__init__.py | 6 +++--- src/msc/packets/__init__.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 1a867d6..1f5ac2f 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -268,7 +268,7 @@ def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): buf = buf[i:] except IncompleteDatagroupError: break - except InvalidCrcError, ice: + except (InvalidCrcError, ice): if error_callback: error_callback(ice) buf = buf[8:] # attempt to resync? #i += 8 @@ -292,9 +292,9 @@ def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) logger.debug('yielding datagroup: %s', datagroup) yield datagroup - except IncompleteDatagroupError, ide: + except (IncompleteDatagroupError, ide): if error_callback: error_callback(ide) - except InvalidCrcError, ice: + except (InvalidCrcError, ice): if error_callback: error_callback(ice) del buf buf = bitarray() diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index 9acd105..56bfa54 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -132,7 +132,7 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): packet = Packet.frombits(data, i=i, check_crc=check_crc) yield packet i += (size * 8) - except InvalidCrcError, ice: + except (InvalidCrcError, ice): if error_callback: error_callback(ice) if resync: i += 8 else: i += (size * 8) @@ -154,7 +154,7 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): i += (size * 8) except IncompletePacketError: break - except InvalidCrcError, ice: + except (InvalidCrcError, ice): if error_callback: error_callback(ice) if resync: i += 8 else: i += (size * 8) @@ -180,7 +180,7 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): yield packet i += (size * 8) except IncompletePacketError: break - except InvalidCrcError, ice: + except (InvalidCrcError, ice): if error_callback: error_callback(ice) if resync: i += 8 else: i += (size * 8) From 5654289039b05a2b159594c5073989c1d420a927 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Tue, 15 Mar 2022 10:44:50 +0000 Subject: [PATCH 06/17] Fixes to run properly under Python 3 --- src/msc/__init__.py | 6 +- src/msc/__init__.py.bak | 81 +++++ src/msc/datagroups/__init__.py | 8 +- src/msc/datagroups/__init__.py.bak | 460 +++++++++++++++++++++++++++++ src/msc/packets/__init__.py | 2 +- src/msc/packets/__init__.py.bak | 194 ++++++++++++ 6 files changed, 744 insertions(+), 7 deletions(-) create mode 100644 src/msc/__init__.py.bak create mode 100644 src/msc/datagroups/__init__.py.bak create mode 100644 src/msc/packets/__init__.py.bak diff --git a/src/msc/__init__.py b/src/msc/__init__.py index dee9dce..4bb9c5d 100644 --- a/src/msc/__init__.py +++ b/src/msc/__init__.py @@ -4,7 +4,7 @@ logger = logging.getLogger('msc') -crcfun = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) +crcfun = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) def calculate_crc(data): return crcfun(data) @@ -15,7 +15,7 @@ def hex_to_bitarray(hex): return b def int_to_bitarray(i, n): - return bitarray(('{0:0%db}' % n).format(i)) + return bitarray(('{0:0%db}' % n).format(int(i))) def bitarray_to_int(bits): return int(bits.to01(), 2) @@ -61,7 +61,7 @@ def __init__(self): def next(self, name=None): # first check the cache - if name is not None and self.cache.has_key(name): + if name is not None and name in self.cache: return self.cache.get(name) # if we've run out then start recycling from the head diff --git a/src/msc/__init__.py.bak b/src/msc/__init__.py.bak new file mode 100644 index 0000000..dee9dce --- /dev/null +++ b/src/msc/__init__.py.bak @@ -0,0 +1,81 @@ +import crcmod +from bitarray import bitarray +import logging + +logger = logging.getLogger('msc') + +crcfun = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) +def calculate_crc(data): + return crcfun(data) + +def hex_to_bitarray(hex): + b = bitarray() + for byte in hex.split(' '): + b.extend(int_to_bitarray(int('0x%s' % byte, 16), 8)) + return b + +def int_to_bitarray(i, n): + return bitarray(('{0:0%db}' % n).format(i)) + +def bitarray_to_int(bits): + return int(bits.to01(), 2) + +def bitarray_to_hex(bits, width=32): + if not isinstance(bits, bitarray): raise ValueError('object is not a bitarray') + rows = [] + for i in range(0, len(bits), width*8): + rows.append(' '.join(["%02X" % ord(x) for x in bits[i:i+(width*8)].tobytes()]).strip()) + return '\r\n'.join(rows) + +def bitarray_to_binary(bits, width=32): + if not isinstance(bits, bitarray): raise ValueError('object is not a bitarray') + rows = [] + for i in range(0, len(bits), width*8): + bytes = [] + for j in range(i, i+(width*8), 8): + bytes.append(bits[j:j+8].to01()) + rows.append(' '.join(bytes)) + return '\r\n'.join(rows) + +class InvalidCrcError(Exception): + + def __init__(self, crc, data): + self.crc = crc + self.data = data + +class TransportIdGenerator(): + '''interface for classes to generate transport IDs''' + + def next(self, name=None): + pass + + def exists(self, id): + pass + +class MemoryCachedTransportIdGenerator(TransportIdGenerator): + '''generates transport IDs cached in memory''' + + def __init__(self): + self.ids = [] + self.cache = {} + + def next(self, name=None): + # first check the cache + if name is not None and self.cache.has_key(name): + return self.cache.get(name) + + # if we've run out then start recycling from the head + if len(self.ids) >= (1 << 16) - 1: return self.ids.pop(0) + import random + id = None + while id is None or id in self.ids: + id = int(random.random() * (1 << 16)) + self.ids.append(id) + if name is not None: self.cache[name] = id + + return id + +# default transport ID generator +transport_id_generator = MemoryCachedTransportIdGenerator() +def generate_transport_id(name=None): + return transport_id_generator.next(name) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 1f5ac2f..edd6903 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -94,7 +94,7 @@ def _segment(data, strategy): bits += int_to_bitarray(0, 3) # (0-2): Repetition Count remaining (0 = only broadcast) bits += int_to_bitarray(len(segment_data), 13) # (3-16): SegmentSize - segments.append(bits.tobytes() + segment_data) + segments.append((bits.tobytes()) + segment_data) i += segment_size @@ -195,12 +195,14 @@ def encode_directorymode(objects, directory_parameters=None, segmenting_strategy # add directory entries bits += entries + print(len(bits.tobytes())) # segment and add directory datagroups with a new transport ID directory_transport_id = generate_transport_id() segments = _segment(bits.tobytes(), segmenting_strategy) for i, segment in enumerate(segments): header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False) + print("header group length ",len(header_group.tobytes())) tmp = bitarray() tmp.frombytes(header_group.tobytes()) tmp.frombytes(header_group.tobytes()) @@ -456,5 +458,5 @@ def regenerate(self): def __iter__(self): return self.iterator - def next(self): - return self.iterator.next() + def __next__(self): + return next(self.iterator) diff --git a/src/msc/datagroups/__init__.py.bak b/src/msc/datagroups/__init__.py.bak new file mode 100644 index 0000000..1f5ac2f --- /dev/null +++ b/src/msc/datagroups/__init__.py.bak @@ -0,0 +1,460 @@ +from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError, generate_transport_id +from mot import DirectoryEncoder, SortedHeaderInformation +from bitarray import bitarray +import logging +import types +import itertools + +logger = logging.getLogger('msc.datagroups') + +MAX_SEGMENT_SIZE=8189 # maximum data segment size in bytes + +# datagroup types +HEADER = 3 +BODY = 4 +DIRECTORY_UNCOMPRESSED = 6 +DIRECTORY_COMPRESSED = 7 + +class SegmentingStrategy: + + def get_next_segment_size(self, data, position, segments): + """Returns the suggested maximum size of the next segment""" + raise NotImplementedError('strategy has not been implemented properly - expected method: get_next_segment_size(self, data, position, segments)') + +class ConstantSegmentSize(SegmentingStrategy): + """Strategy to ensure that each segment is the same size, apart + from the last one, which may be smaller""" + + def __init__(self, maximum_segment_size=MAX_SEGMENT_SIZE): + self.maximum_segment_size = maximum_segment_size + + def get_next_segment_size(self, data, position, segments): + return self.maximum_segment_size + +class CompletionTriggerSegmentingStrategy(SegmentingStrategy): + """Strategy to ensure the last datagroup is small enough to be held within a single packet + for triggering via the completion of the total set of datagroups. + This is to enable synchronised imagery""" + + def __init__(self, target_final_segment_size, maximum_segment_size=MAX_SEGMENT_SIZE, ): + if target_final_segment_size > maximum_segment_size: raise ValueError('target final segment size must be less than the maximum segment size') + self.maximum_segment_size = maximum_segment_size + +# # calculate the estimated final segment size from parameters +# estimated_final_segment_size = target_final_packet_size +# estimated_final_segment_size -= 2 # packet CRC +# estimated_final_segment_size -= 3 # packet header +# estimated_final_segment_size -= 2 # datagroup CRC +# estimated_final_segment_size -= 7 # datagroup header (typical minimal config) + self.target_final_segment_size = target_final_segment_size + + def calculate_segment_sizes(self, length): + + # need to try for the exact target final segment size, or less + # with equal sizes of the preceding segments - therefore they + # will need to be exactly fitting + X = self.maximum_segment_size + Y = self.target_final_segment_size + while Y > 0: + while X > 0: + if (length - Y + 2) % X == 0: + return X, Y + X -= 1 + Y -= 1 + + def get_next_segment_size(self, data, position, segments): + + if not len(segments): # no calculation done yet + X, Y = self.calculate_segment_sizes(len(data)) + else: + X = len(segments[0]) - 2 + n = 1 + Y = (len(data) / X) % n - 2 + while Y > self.target_final_segment_size: + n += 1 + + if len(data) - position > Y: return X + else: return Y + +def _segment(data, strategy): + + segments = [] + + # partition the segments up using the maximum segment size + i = 0 + if not data: return segments + while i < len(data): + segment_size = strategy.get_next_segment_size(data, i, segments) + + # get segment data + segment_data = data[i:i+segment_size if i+segment_size < len(data) else len(data)] + + # segment header + bits = bitarray() + bits += int_to_bitarray(0, 3) # (0-2): Repetition Count remaining (0 = only broadcast) + bits += int_to_bitarray(len(segment_data), 13) # (3-16): SegmentSize + + segments.append(bits.tobytes() + segment_data) + + i += segment_size + + return segments; + +def encode_headermode(objects, segmenting_strategy=None): + """ + Encode a set of MOT Objects into header mode segments + """ + + datagroups = [] + if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize() + + # backward compatibility + if not isinstance(objects, list): objects = [objects] + logger.debug('encoding %d MOT objects to header mode datagroups', len(objects)) + + for object in objects: + if not object: raise ValueError('object returned is null') + + # split body data into segments + body_data = object.get_body() + body_segments = _segment(body_data, segmenting_strategy) + + # encode header extension parameters + extension_bits = bitarray() + for parameter in object.get_parameters(): + extension_bits += parameter.encode() + + # insert the core parameters into the header + bits = bitarray() + bits += int_to_bitarray(len(body_data) if body_data else 0, 28) # (0-27): BodySize in bytes + bits += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) + bits += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType + bits += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType + bits += extension_bits # (56-n): Header extension data + header_segments = _segment(bits.tobytes(), segmenting_strategy) + + # add header datagroups + for i, segment in enumerate(header_segments): + header_group = Datagroup(object.get_transport_id(), HEADER, segment, i, i%16, last=True if i == len(header_segments) - 1 else False) + datagroups.append(header_group) + + # add body datagroups + for i, segment in enumerate(body_segments): + body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(body_segments) - 1 else False) + datagroups.append(body_group) + + return datagroups; + + +def encode_directorymode(objects, directory_parameters=None, segmenting_strategy=None): + """ + Encode a set of MOT objects into directory mode segments, along with a segmented + directory object + """ + + datagroups = [] + if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize() + + # build the directory entries + entries = bitarray() + for object in objects: + # encode header extension parameters + extension_bits = bitarray() + for parameter in object.get_parameters(): + extension_bits += parameter.encode() + + # transport ID in first 2 bytes + entries += int_to_bitarray(object.get_transport_id(), 16) + + # add the core parameters into the header + entries += int_to_bitarray(len(object.get_body()), 28) # (0-27): BodySize in bytes + entries += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) + entries += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType + entries += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType + entries += extension_bits # (56-n): Header extension data + + # build directory parameters + directory_params = bitarray() + if directory_parameters is not None: + for parameter in directory_parameters: + directory_params += parameter.encode() + + # build directory header + bits = bitarray() + bits += bitarray('0') # (0): CompressionFlag: This bit shall be set to 0 + bits += bitarray('0') # (1): RFU + bits += int_to_bitarray(len(entries.tobytes()) + 13 + len(directory_params.tobytes()), 30) # (2-31): DirectorySize: total size of the MOT directory in bytes, including the 13 header bytes and length of the directory parameter bytes + bits += int_to_bitarray(len(objects), 16) # (32-47): NumberOfObjects: Total number of objects described by the directory + bits += int_to_bitarray(0, 24) # (48-71): DataCarouselPeriod: Max time in tenths of seconds for the data carousel to complete a cycle. Value of zero for undefined + bits += bitarray('000') # (72-74): RFU + bits += int_to_bitarray(0, 13) # (75-87): SegmentSize: Size in bytes that will be used for the segmentation of objects within the MOT carousel. Value of zero indicates that objects can have different segmentation sizes. The last segment of an obect may be smaller than this size. + bits += int_to_bitarray(len(directory_params.tobytes()), 16) # (88-103): DirectoryExtensionLength: Length of following directory extension bytes + + # add directory parameters + bits += directory_params + + # add directory entries + bits += entries + + # segment and add directory datagroups with a new transport ID + directory_transport_id = generate_transport_id() + segments = _segment(bits.tobytes(), segmenting_strategy) + for i, segment in enumerate(segments): + header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False) + tmp = bitarray() + tmp.frombytes(header_group.tobytes()) + tmp.frombytes(header_group.tobytes()) + datagroups.append(header_group) + + # add body datagroups + for object in objects: + segments = _segment(object.get_body(), segmenting_strategy) + for i, segment in enumerate(segments): + body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(segments) - 1 else False) + datagroups.append(body_group) + return datagroups + +import select +def read(fd, n = 1): + poll = select.poll() + poll.register(fd.fileno(), select.POLLIN or select.POLLPRI) + p = poll.poll() + if len(p): + f = p[0] + if f[1] > 0: + return fd.read(n) + +def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): + """ + Generator function to decode datagroups from a bitstream + + The bitstream may be presented as either a bitarray, a file object or a generator + """ + + if isinstance(data, bitarray): + i = 0 + while i < data.length(): + datagroup = Datagroup.frombits(data, i=i, check_crc=check_crc) + yield datagroup + i += (datagroup.size * 8) + elif isinstance(data, file): + logger.debug('decoding datagroups from file: %s', data) + buf = bitarray() + reading = True + while reading: + try: + r = data.read(8) + buf.frombytes(r) + except: + reading = False + logger.exception("error") + if not buf.length(): + logger.debug('buffer is at zero length') + return + i = 0 + #logger.debug('chunking buffer of length %d bytes', buf.length()/8) + length = buf.length()/8 + if length < 9: + continue + size = int(buf[59:72].to01(), 2) + if length < size: + #logger.debug('buffer still not at right size for datagroup size of %d bytes', size) + continue + while i < buf.length(): + try: + datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) + yield datagroup + i = (datagroup.size * 8) + buf = buf[i:] + except IncompleteDatagroupError: + break + except (InvalidCrcError, ice): + if error_callback: error_callback(ice) + buf = buf[8:] # attempt to resync? + #i += 8 + + elif isinstance(data, types.GeneratorType): + logger.debug('decoding datagroups from generator: %s', data) + buf = bitarray() + + i = 0 + in_packet = False + for p in data: + if not in_packet and p.first: + in_packet = True + elif not in_packet: continue + + buf.frombytes(p.data) + + if p.last: + logger.debug('got packet %s - buffer now %d bytes', p, buf.length()/8) + try: + datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) + logger.debug('yielding datagroup: %s', datagroup) + yield datagroup + except (IncompleteDatagroupError, ide): + if error_callback: error_callback(ide) + except (InvalidCrcError, ice): + if error_callback: error_callback(ice) + del buf + buf = bitarray() + in_packet = False + +class IncompleteDatagroupError(Exception): + pass + +class PaddingDatagroup: + + def __init__(self, delay=0): + self.delay = delay + +class Datagroup: + + def __init__(self, transport_id, type, data, segment_index, continuity, crc_enabled=True, repetition=0, last=False): + self._transport_id = transport_id + self._type = type + self._data = data + self.crc_enabled = crc_enabled + self.continuity = continuity + self.repetition = repetition + self.segment_index = segment_index + self.last = last + self.size = 7 + 2 + len(self._data) + 2 # encoded datagroup size for chunking = [dg header] + [segment header] + [data] + [crc] + + def __eq__(self, other): + if not isinstance(other, Datagroup): return False + return self.get_transport_id() == other.get_transport_id() and self.get_type() == other.get_type() and self.segment_index == other.segment_index + + def get_transport_id(self): + return self._transport_id + + def get_type(self): + return self._type + + def get_data(self): + return self._data + + def tobytes(self): + + bits = bitarray() + + # datagroup header + bits += bitarray('0') # (0): ExtensionFlag - 0=no extension + bits += bitarray('1' if self.crc_enabled else '0') # (1): CrcFlag - true if there is a CRC at the end of the datagroup + bits += bitarray('1') # (2): SegmentFlag - 1=segment header included + bits += bitarray('1') # (3): UserAccessFlag - true + bits += int_to_bitarray(self._type, 4) # (4-7): DataGroupType + bits += int_to_bitarray(self.continuity % 16, 4) # (8-11): ContinuityIndex + bits += int_to_bitarray(self.repetition, 4) # (12-15): RepetitionIndex - remaining = 0 (only this once) + + # session header + # segment field + bits += bitarray('1' if self.last else '0') # (16): Last - true if the last segment + bits += int_to_bitarray(self.segment_index, 15) # (17-32): SegmentNumber + + # user access field + bits += bitarray('000') # (33-35): RFA + bits += bitarray('1') # (36): TransportId - true to include Transport ID + bits += int_to_bitarray(2, 4) # (37-40): LengthIndicator - length of transport Id and End user address fields (will be 2 bytes as only transport ID defined) + bits += int_to_bitarray(self._transport_id, 16) # (41-56) transport ID + + # data field + tmp = bitarray() + tmp.frombytes(self._data) + bits += tmp + + # CRC + crc = 0; + if self.crc_enabled: crc = calculate_crc(bits.tobytes()) + bits += int_to_bitarray(crc, 16) + + return bits.tobytes() + + @staticmethod + def frombits(bits, i=0, check_crc=True): + """Parse a datagroup from a bitarray, with an optional offset""" + + # check we have enough header first + if (bits.length() - i) < ((9 + 2) * 8): raise IncompleteDatagroupError + + # datagroup header + type = int(bits[4:8].to01(), 2) + continuity = int(bits[8:12].to01(), 2) + repetition = int(bits[12:16].to01(), 2) + + # session header + # segment field + last = bits[16] + segment_index = int(bits[17:32].to01(), 2) + + # user access field + transport_id = int(bits[40:56].to01(), 2) + + # data segment header + size = int(bits[59:72].to01(), 2) # get size to check we have a complete datagroup + if bits.length() < 72 + size * 8 + 16: raise IncompleteDatagroupError + data = bits[72 : 72 + (size*8)] + if check_crc: + crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) + calculated = calculate_crc(bits[:72+data.length()].tobytes()) + if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) + + datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) + logger.debug('parsed datagroup: %s', datagroup) + + return datagroup + + def __str__(self): + if self._type == 3: type_description = 'MOT Header' + elif self._type == 4: type_description = 'MOT Body' + elif self._type == 6: type_description = 'MOT Directory (uncompressed)' + elif self._type == 7: type_description = 'MOT Directory (compressed)' + else: type_description = 'unknown' + return '[segment=%d bytes], type=%d [%s], transportid=%d, segmentindex=%d, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last) + + def __repr__(self): + return '' % str(self) + +class DirectoryDatagroupEncoder(DirectoryEncoder): + + def __init__(self, segmenting_strategy=None, single=False): + DirectoryEncoder.__init__(self) + self.segmenting_strategy = segmenting_strategy + self.single = single + self.datagroups = [] + self.regenerate() + + def add(self, object): + if object in self.objects: return False + self.objects.append(object) + self.regenerate() + return True + + def remove(self, object): + if object not in self.objects: return False + self.objects.remove(object) + self.regenerate() + return True + + def clear(self): + self.objects = [] + self.regenerate() + return True + + def set(self, objects): + if objects == self.objects: return False + self.objects = objects + self.regenerate() + return True + + def regenerate(self): + """called when the directory needs to regenerate""" + self.datagroups = encode_directorymode(self.objects, directory_parameters=[SortedHeaderInformation()], segmenting_strategy=self.segmenting_strategy) + if self.single: self.iterator = iter(self.datagroups) + else: self.iterator = itertools.cycle(self.datagroups) + + def __iter__(self): + return self.iterator + + def next(self): + return self.iterator.next() diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index 56bfa54..c2fb352 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -94,7 +94,7 @@ def encode_packets(datagroups, address=None, size=None, continuity=None): def get_continuity_index(address): index=0 - if continuity.has_key(address): + if address in continuity: index = continuity[address] index += 1 if index > 3: index = 0 diff --git a/src/msc/packets/__init__.py.bak b/src/msc/packets/__init__.py.bak new file mode 100644 index 0000000..56bfa54 --- /dev/null +++ b/src/msc/packets/__init__.py.bak @@ -0,0 +1,194 @@ +from bitarray import bitarray +from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError +import logging + +logger = logging.getLogger('dabdata.packets') + +class IncompletePacketError(Exception): + pass + +class Packet: + + SIZE_96 = 96 + SIZE_72 = 72 + SIZE_48 = 48 + SIZE_24 = 24 + sizes = [SIZE_24, SIZE_48, SIZE_72, SIZE_96] + + def __init__(self, size, address, data, first, last, index): + self.size = size + self.address = address + self.data = data + self.first = first + self.last = last + self.index = index + + def tobytes(self): + + bits = bitarray() + + # build header + bits += int_to_bitarray((self.size / 24) - 1, 2) # (0-1): packet length + bits += int_to_bitarray(self.index, 2) # (2-3): continuity index + bits += bitarray('1' if self.first else '0') # (4): first packet of datagroup series + bits += bitarray('1' if self.last else '0') # (5): last packet of datagroup series + bits += int_to_bitarray(self.address, 10) # (6-15): packet address + bits += bitarray('0') # (16): Command flag = 0 (data) + bits += int_to_bitarray(len(self.data), 7) # (17-23): useful data length + + # add the packet data + tmp = bitarray() + tmp.frombytes(self.data) + bits += tmp # (24-n): packet data + + # add packet padding if needed + bits += bitarray('0'*(self.size - len(self.data) - 5)*8) + + # add CRC + bits += int_to_bitarray(calculate_crc(bits.tobytes()), 16) + + return bits.tobytes() + + @staticmethod + def frombits(bits, i=0, check_crc=True): + """Parse a packet from a bitarray, with an optional offset""" + + size = (int(bits[i+0:i+2].to01(), 2) + 1) * 24 + if (bits.length() - i) < (size * 8): raise IncompletePacketError('length of bitarray is less than passed data length %d bytes < %d bytes', bits.length() / 8, size) + index = int(bits[i+2:i+4].to01(), 2) + first = bits[i+4] + last = bits[i+5] + address = int(bits[i+6:i+16].to01(), 2) + data_length = int(bits[i+17:i+24].to01(), 2) + data = bits[i+24:i+24+(data_length*8)] + crc = int(bits[i + (size * 8) - 16 : i + (size * 8)].to01(), 2) + if check_crc: + calculated = calculate_crc(bits[i + 0 : i +(size * 8) - 16].tobytes()) + if crc != calculated: + raise InvalidCrcError(crc, bits[i + 0 : i +(size * 8)].tobytes()) + packet = Packet(size, address, data.tobytes(), first, last, index) + logger.debug('parsed packet: %s', packet) + + return packet + + def __str__(self): + return 'size=%d, address=%d, first=%s, last=%s, index=%d, data=%d bytes' % (self.size, self.address, self.first, self.last, self.index, len(self.data)) + + def __repr__(self): + return '' % str(self) + +def encode_packets(datagroups, address=None, size=None, continuity=None): + + """ + Encode a set of datagroups into packets + """ + + if not address: address = 1 + if not size: size = Packet.SIZE_96 + if not continuity: continuity = {} + + if address < 1 or address > 1024: raise ValueError('packet address must be greater than zero and less than 1024') + if size not in Packet.sizes: raise ValueError('packet size %d must be one of: %s' % (size, Packet.sizes)) + + packets = [] + + def get_continuity_index(address): + index=0 + if continuity.has_key(address): + index = continuity[address] + index += 1 + if index > 3: index = 0 + continuity[address] = index + return index + + # encode the datagroups into a continuous datastream + for datagroup in datagroups: + data = datagroup.tobytes() + chunk_size = size - 5 + for i in range(0, len(data), chunk_size): + chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] + packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, get_continuity_index(address)) + packets.append(packet) + + return packets + +def decode_packets(data, error_callback=None, check_crc=True, resync=True): + + """ + Generator function to decode packets from a bitstream + + The bitstream may be presented as either a bitarray, a file object or a socket + """ + + if isinstance(data, bitarray): + logger.debug('decoding packets from bitarray') + i = 0 + while i < data.length(): + while i < data.length(): + if data.length() < 2: break + size = (int(data[i:i+2].to01(), 2) + 1) * 24 + if data.length() < (size * 8): break + try: + packet = Packet.frombits(data, i=i, check_crc=check_crc) + yield packet + i += (size * 8) + except (InvalidCrcError, ice): + if error_callback: error_callback(ice) + if resync: i += 8 + else: i += (size * 8) + elif hasattr(data, 'read'): + logger.debug('decoding packets from file: %s', data) + buf = bitarray() + r = data.read(1024) + while len(r): + buf.frombytes(r) + logger.debug('chunking buffer of length %d bytes', buf.length()/8) + i = 0 + while i < buf.length(): + if buf.length() < 2: break + size = (int(buf[i:i+2].to01(), 2) + 1) * 24 + if buf.length() < (size * 8): break + try: + packet = Packet.frombits(buf, i=i, check_crc=check_crc) + yield packet + i += (size * 8) + except IncompletePacketError: + break + except (InvalidCrcError, ice): + if error_callback: error_callback(ice) + if resync: i += 8 + else: i += (size * 8) + buf = buf[i:] + r = data.read(1024) + elif hasattr(data, 'recv'): + data.setblocking(True) + logger.debug('decoding packets from socket: %s', data) + buf = bitarray() + r = data.recv(1024) + b = bitarray() + b.frombytes(r) + while len(r): + buf.frombytes(r) + logger.debug('chunking buffer of length %d bytes', buf.length()/8) + i = 0 + while i < buf.length(): + if buf.length() < 2: break + size = (int(buf[i:i+2].to01(), 2) + 1) * 24 + if buf.length() < (size * 8): break + try: + packet = Packet.frombits(buf, i=i, check_crc=check_crc) + yield packet + i += (size * 8) + except IncompletePacketError: break + except (InvalidCrcError, ice): + if error_callback: error_callback(ice) + if resync: i += 8 + else: i += (size * 8) + buf = buf[i:] + logger.debug('reading from socket') + r = data.recv(1024) + logger.debug('read %d bytes from socket', len(r)) + else: + raise ValueError('unknown object to decode from: %s' % type(data)) + logger.debug('finished') + return From e7a986fb3216e0c000412d42bd2acecd5db49601 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Tue, 15 Mar 2022 11:59:13 +0000 Subject: [PATCH 07/17] Removed debugging --- src/msc/datagroups/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index edd6903..2202775 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -195,14 +195,12 @@ def encode_directorymode(objects, directory_parameters=None, segmenting_strategy # add directory entries bits += entries - print(len(bits.tobytes())) # segment and add directory datagroups with a new transport ID directory_transport_id = generate_transport_id() segments = _segment(bits.tobytes(), segmenting_strategy) for i, segment in enumerate(segments): header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False) - print("header group length ",len(header_group.tobytes())) tmp = bitarray() tmp.frombytes(header_group.tobytes()) tmp.frombytes(header_group.tobytes()) From a94e688b9f56e6ecbf6eb8906fe76255359cf67c Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 09:50:57 +0000 Subject: [PATCH 08/17] Removed orphan .bak files --- src/msc/datagroups/__init__.py.bak | 460 ----------------------------- src/msc/packets/__init__.py.bak | 194 ------------ 2 files changed, 654 deletions(-) delete mode 100644 src/msc/datagroups/__init__.py.bak delete mode 100644 src/msc/packets/__init__.py.bak diff --git a/src/msc/datagroups/__init__.py.bak b/src/msc/datagroups/__init__.py.bak deleted file mode 100644 index 1f5ac2f..0000000 --- a/src/msc/datagroups/__init__.py.bak +++ /dev/null @@ -1,460 +0,0 @@ -from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError, generate_transport_id -from mot import DirectoryEncoder, SortedHeaderInformation -from bitarray import bitarray -import logging -import types -import itertools - -logger = logging.getLogger('msc.datagroups') - -MAX_SEGMENT_SIZE=8189 # maximum data segment size in bytes - -# datagroup types -HEADER = 3 -BODY = 4 -DIRECTORY_UNCOMPRESSED = 6 -DIRECTORY_COMPRESSED = 7 - -class SegmentingStrategy: - - def get_next_segment_size(self, data, position, segments): - """Returns the suggested maximum size of the next segment""" - raise NotImplementedError('strategy has not been implemented properly - expected method: get_next_segment_size(self, data, position, segments)') - -class ConstantSegmentSize(SegmentingStrategy): - """Strategy to ensure that each segment is the same size, apart - from the last one, which may be smaller""" - - def __init__(self, maximum_segment_size=MAX_SEGMENT_SIZE): - self.maximum_segment_size = maximum_segment_size - - def get_next_segment_size(self, data, position, segments): - return self.maximum_segment_size - -class CompletionTriggerSegmentingStrategy(SegmentingStrategy): - """Strategy to ensure the last datagroup is small enough to be held within a single packet - for triggering via the completion of the total set of datagroups. - This is to enable synchronised imagery""" - - def __init__(self, target_final_segment_size, maximum_segment_size=MAX_SEGMENT_SIZE, ): - if target_final_segment_size > maximum_segment_size: raise ValueError('target final segment size must be less than the maximum segment size') - self.maximum_segment_size = maximum_segment_size - -# # calculate the estimated final segment size from parameters -# estimated_final_segment_size = target_final_packet_size -# estimated_final_segment_size -= 2 # packet CRC -# estimated_final_segment_size -= 3 # packet header -# estimated_final_segment_size -= 2 # datagroup CRC -# estimated_final_segment_size -= 7 # datagroup header (typical minimal config) - self.target_final_segment_size = target_final_segment_size - - def calculate_segment_sizes(self, length): - - # need to try for the exact target final segment size, or less - # with equal sizes of the preceding segments - therefore they - # will need to be exactly fitting - X = self.maximum_segment_size - Y = self.target_final_segment_size - while Y > 0: - while X > 0: - if (length - Y + 2) % X == 0: - return X, Y - X -= 1 - Y -= 1 - - def get_next_segment_size(self, data, position, segments): - - if not len(segments): # no calculation done yet - X, Y = self.calculate_segment_sizes(len(data)) - else: - X = len(segments[0]) - 2 - n = 1 - Y = (len(data) / X) % n - 2 - while Y > self.target_final_segment_size: - n += 1 - - if len(data) - position > Y: return X - else: return Y - -def _segment(data, strategy): - - segments = [] - - # partition the segments up using the maximum segment size - i = 0 - if not data: return segments - while i < len(data): - segment_size = strategy.get_next_segment_size(data, i, segments) - - # get segment data - segment_data = data[i:i+segment_size if i+segment_size < len(data) else len(data)] - - # segment header - bits = bitarray() - bits += int_to_bitarray(0, 3) # (0-2): Repetition Count remaining (0 = only broadcast) - bits += int_to_bitarray(len(segment_data), 13) # (3-16): SegmentSize - - segments.append(bits.tobytes() + segment_data) - - i += segment_size - - return segments; - -def encode_headermode(objects, segmenting_strategy=None): - """ - Encode a set of MOT Objects into header mode segments - """ - - datagroups = [] - if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize() - - # backward compatibility - if not isinstance(objects, list): objects = [objects] - logger.debug('encoding %d MOT objects to header mode datagroups', len(objects)) - - for object in objects: - if not object: raise ValueError('object returned is null') - - # split body data into segments - body_data = object.get_body() - body_segments = _segment(body_data, segmenting_strategy) - - # encode header extension parameters - extension_bits = bitarray() - for parameter in object.get_parameters(): - extension_bits += parameter.encode() - - # insert the core parameters into the header - bits = bitarray() - bits += int_to_bitarray(len(body_data) if body_data else 0, 28) # (0-27): BodySize in bytes - bits += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) - bits += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType - bits += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType - bits += extension_bits # (56-n): Header extension data - header_segments = _segment(bits.tobytes(), segmenting_strategy) - - # add header datagroups - for i, segment in enumerate(header_segments): - header_group = Datagroup(object.get_transport_id(), HEADER, segment, i, i%16, last=True if i == len(header_segments) - 1 else False) - datagroups.append(header_group) - - # add body datagroups - for i, segment in enumerate(body_segments): - body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(body_segments) - 1 else False) - datagroups.append(body_group) - - return datagroups; - - -def encode_directorymode(objects, directory_parameters=None, segmenting_strategy=None): - """ - Encode a set of MOT objects into directory mode segments, along with a segmented - directory object - """ - - datagroups = [] - if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize() - - # build the directory entries - entries = bitarray() - for object in objects: - # encode header extension parameters - extension_bits = bitarray() - for parameter in object.get_parameters(): - extension_bits += parameter.encode() - - # transport ID in first 2 bytes - entries += int_to_bitarray(object.get_transport_id(), 16) - - # add the core parameters into the header - entries += int_to_bitarray(len(object.get_body()), 28) # (0-27): BodySize in bytes - entries += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) - entries += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType - entries += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType - entries += extension_bits # (56-n): Header extension data - - # build directory parameters - directory_params = bitarray() - if directory_parameters is not None: - for parameter in directory_parameters: - directory_params += parameter.encode() - - # build directory header - bits = bitarray() - bits += bitarray('0') # (0): CompressionFlag: This bit shall be set to 0 - bits += bitarray('0') # (1): RFU - bits += int_to_bitarray(len(entries.tobytes()) + 13 + len(directory_params.tobytes()), 30) # (2-31): DirectorySize: total size of the MOT directory in bytes, including the 13 header bytes and length of the directory parameter bytes - bits += int_to_bitarray(len(objects), 16) # (32-47): NumberOfObjects: Total number of objects described by the directory - bits += int_to_bitarray(0, 24) # (48-71): DataCarouselPeriod: Max time in tenths of seconds for the data carousel to complete a cycle. Value of zero for undefined - bits += bitarray('000') # (72-74): RFU - bits += int_to_bitarray(0, 13) # (75-87): SegmentSize: Size in bytes that will be used for the segmentation of objects within the MOT carousel. Value of zero indicates that objects can have different segmentation sizes. The last segment of an obect may be smaller than this size. - bits += int_to_bitarray(len(directory_params.tobytes()), 16) # (88-103): DirectoryExtensionLength: Length of following directory extension bytes - - # add directory parameters - bits += directory_params - - # add directory entries - bits += entries - - # segment and add directory datagroups with a new transport ID - directory_transport_id = generate_transport_id() - segments = _segment(bits.tobytes(), segmenting_strategy) - for i, segment in enumerate(segments): - header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False) - tmp = bitarray() - tmp.frombytes(header_group.tobytes()) - tmp.frombytes(header_group.tobytes()) - datagroups.append(header_group) - - # add body datagroups - for object in objects: - segments = _segment(object.get_body(), segmenting_strategy) - for i, segment in enumerate(segments): - body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(segments) - 1 else False) - datagroups.append(body_group) - return datagroups - -import select -def read(fd, n = 1): - poll = select.poll() - poll.register(fd.fileno(), select.POLLIN or select.POLLPRI) - p = poll.poll() - if len(p): - f = p[0] - if f[1] > 0: - return fd.read(n) - -def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): - """ - Generator function to decode datagroups from a bitstream - - The bitstream may be presented as either a bitarray, a file object or a generator - """ - - if isinstance(data, bitarray): - i = 0 - while i < data.length(): - datagroup = Datagroup.frombits(data, i=i, check_crc=check_crc) - yield datagroup - i += (datagroup.size * 8) - elif isinstance(data, file): - logger.debug('decoding datagroups from file: %s', data) - buf = bitarray() - reading = True - while reading: - try: - r = data.read(8) - buf.frombytes(r) - except: - reading = False - logger.exception("error") - if not buf.length(): - logger.debug('buffer is at zero length') - return - i = 0 - #logger.debug('chunking buffer of length %d bytes', buf.length()/8) - length = buf.length()/8 - if length < 9: - continue - size = int(buf[59:72].to01(), 2) - if length < size: - #logger.debug('buffer still not at right size for datagroup size of %d bytes', size) - continue - while i < buf.length(): - try: - datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) - yield datagroup - i = (datagroup.size * 8) - buf = buf[i:] - except IncompleteDatagroupError: - break - except (InvalidCrcError, ice): - if error_callback: error_callback(ice) - buf = buf[8:] # attempt to resync? - #i += 8 - - elif isinstance(data, types.GeneratorType): - logger.debug('decoding datagroups from generator: %s', data) - buf = bitarray() - - i = 0 - in_packet = False - for p in data: - if not in_packet and p.first: - in_packet = True - elif not in_packet: continue - - buf.frombytes(p.data) - - if p.last: - logger.debug('got packet %s - buffer now %d bytes', p, buf.length()/8) - try: - datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) - logger.debug('yielding datagroup: %s', datagroup) - yield datagroup - except (IncompleteDatagroupError, ide): - if error_callback: error_callback(ide) - except (InvalidCrcError, ice): - if error_callback: error_callback(ice) - del buf - buf = bitarray() - in_packet = False - -class IncompleteDatagroupError(Exception): - pass - -class PaddingDatagroup: - - def __init__(self, delay=0): - self.delay = delay - -class Datagroup: - - def __init__(self, transport_id, type, data, segment_index, continuity, crc_enabled=True, repetition=0, last=False): - self._transport_id = transport_id - self._type = type - self._data = data - self.crc_enabled = crc_enabled - self.continuity = continuity - self.repetition = repetition - self.segment_index = segment_index - self.last = last - self.size = 7 + 2 + len(self._data) + 2 # encoded datagroup size for chunking = [dg header] + [segment header] + [data] + [crc] - - def __eq__(self, other): - if not isinstance(other, Datagroup): return False - return self.get_transport_id() == other.get_transport_id() and self.get_type() == other.get_type() and self.segment_index == other.segment_index - - def get_transport_id(self): - return self._transport_id - - def get_type(self): - return self._type - - def get_data(self): - return self._data - - def tobytes(self): - - bits = bitarray() - - # datagroup header - bits += bitarray('0') # (0): ExtensionFlag - 0=no extension - bits += bitarray('1' if self.crc_enabled else '0') # (1): CrcFlag - true if there is a CRC at the end of the datagroup - bits += bitarray('1') # (2): SegmentFlag - 1=segment header included - bits += bitarray('1') # (3): UserAccessFlag - true - bits += int_to_bitarray(self._type, 4) # (4-7): DataGroupType - bits += int_to_bitarray(self.continuity % 16, 4) # (8-11): ContinuityIndex - bits += int_to_bitarray(self.repetition, 4) # (12-15): RepetitionIndex - remaining = 0 (only this once) - - # session header - # segment field - bits += bitarray('1' if self.last else '0') # (16): Last - true if the last segment - bits += int_to_bitarray(self.segment_index, 15) # (17-32): SegmentNumber - - # user access field - bits += bitarray('000') # (33-35): RFA - bits += bitarray('1') # (36): TransportId - true to include Transport ID - bits += int_to_bitarray(2, 4) # (37-40): LengthIndicator - length of transport Id and End user address fields (will be 2 bytes as only transport ID defined) - bits += int_to_bitarray(self._transport_id, 16) # (41-56) transport ID - - # data field - tmp = bitarray() - tmp.frombytes(self._data) - bits += tmp - - # CRC - crc = 0; - if self.crc_enabled: crc = calculate_crc(bits.tobytes()) - bits += int_to_bitarray(crc, 16) - - return bits.tobytes() - - @staticmethod - def frombits(bits, i=0, check_crc=True): - """Parse a datagroup from a bitarray, with an optional offset""" - - # check we have enough header first - if (bits.length() - i) < ((9 + 2) * 8): raise IncompleteDatagroupError - - # datagroup header - type = int(bits[4:8].to01(), 2) - continuity = int(bits[8:12].to01(), 2) - repetition = int(bits[12:16].to01(), 2) - - # session header - # segment field - last = bits[16] - segment_index = int(bits[17:32].to01(), 2) - - # user access field - transport_id = int(bits[40:56].to01(), 2) - - # data segment header - size = int(bits[59:72].to01(), 2) # get size to check we have a complete datagroup - if bits.length() < 72 + size * 8 + 16: raise IncompleteDatagroupError - data = bits[72 : 72 + (size*8)] - if check_crc: - crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) - calculated = calculate_crc(bits[:72+data.length()].tobytes()) - if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) - - datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) - logger.debug('parsed datagroup: %s', datagroup) - - return datagroup - - def __str__(self): - if self._type == 3: type_description = 'MOT Header' - elif self._type == 4: type_description = 'MOT Body' - elif self._type == 6: type_description = 'MOT Directory (uncompressed)' - elif self._type == 7: type_description = 'MOT Directory (compressed)' - else: type_description = 'unknown' - return '[segment=%d bytes], type=%d [%s], transportid=%d, segmentindex=%d, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last) - - def __repr__(self): - return '' % str(self) - -class DirectoryDatagroupEncoder(DirectoryEncoder): - - def __init__(self, segmenting_strategy=None, single=False): - DirectoryEncoder.__init__(self) - self.segmenting_strategy = segmenting_strategy - self.single = single - self.datagroups = [] - self.regenerate() - - def add(self, object): - if object in self.objects: return False - self.objects.append(object) - self.regenerate() - return True - - def remove(self, object): - if object not in self.objects: return False - self.objects.remove(object) - self.regenerate() - return True - - def clear(self): - self.objects = [] - self.regenerate() - return True - - def set(self, objects): - if objects == self.objects: return False - self.objects = objects - self.regenerate() - return True - - def regenerate(self): - """called when the directory needs to regenerate""" - self.datagroups = encode_directorymode(self.objects, directory_parameters=[SortedHeaderInformation()], segmenting_strategy=self.segmenting_strategy) - if self.single: self.iterator = iter(self.datagroups) - else: self.iterator = itertools.cycle(self.datagroups) - - def __iter__(self): - return self.iterator - - def next(self): - return self.iterator.next() diff --git a/src/msc/packets/__init__.py.bak b/src/msc/packets/__init__.py.bak deleted file mode 100644 index 56bfa54..0000000 --- a/src/msc/packets/__init__.py.bak +++ /dev/null @@ -1,194 +0,0 @@ -from bitarray import bitarray -from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError -import logging - -logger = logging.getLogger('dabdata.packets') - -class IncompletePacketError(Exception): - pass - -class Packet: - - SIZE_96 = 96 - SIZE_72 = 72 - SIZE_48 = 48 - SIZE_24 = 24 - sizes = [SIZE_24, SIZE_48, SIZE_72, SIZE_96] - - def __init__(self, size, address, data, first, last, index): - self.size = size - self.address = address - self.data = data - self.first = first - self.last = last - self.index = index - - def tobytes(self): - - bits = bitarray() - - # build header - bits += int_to_bitarray((self.size / 24) - 1, 2) # (0-1): packet length - bits += int_to_bitarray(self.index, 2) # (2-3): continuity index - bits += bitarray('1' if self.first else '0') # (4): first packet of datagroup series - bits += bitarray('1' if self.last else '0') # (5): last packet of datagroup series - bits += int_to_bitarray(self.address, 10) # (6-15): packet address - bits += bitarray('0') # (16): Command flag = 0 (data) - bits += int_to_bitarray(len(self.data), 7) # (17-23): useful data length - - # add the packet data - tmp = bitarray() - tmp.frombytes(self.data) - bits += tmp # (24-n): packet data - - # add packet padding if needed - bits += bitarray('0'*(self.size - len(self.data) - 5)*8) - - # add CRC - bits += int_to_bitarray(calculate_crc(bits.tobytes()), 16) - - return bits.tobytes() - - @staticmethod - def frombits(bits, i=0, check_crc=True): - """Parse a packet from a bitarray, with an optional offset""" - - size = (int(bits[i+0:i+2].to01(), 2) + 1) * 24 - if (bits.length() - i) < (size * 8): raise IncompletePacketError('length of bitarray is less than passed data length %d bytes < %d bytes', bits.length() / 8, size) - index = int(bits[i+2:i+4].to01(), 2) - first = bits[i+4] - last = bits[i+5] - address = int(bits[i+6:i+16].to01(), 2) - data_length = int(bits[i+17:i+24].to01(), 2) - data = bits[i+24:i+24+(data_length*8)] - crc = int(bits[i + (size * 8) - 16 : i + (size * 8)].to01(), 2) - if check_crc: - calculated = calculate_crc(bits[i + 0 : i +(size * 8) - 16].tobytes()) - if crc != calculated: - raise InvalidCrcError(crc, bits[i + 0 : i +(size * 8)].tobytes()) - packet = Packet(size, address, data.tobytes(), first, last, index) - logger.debug('parsed packet: %s', packet) - - return packet - - def __str__(self): - return 'size=%d, address=%d, first=%s, last=%s, index=%d, data=%d bytes' % (self.size, self.address, self.first, self.last, self.index, len(self.data)) - - def __repr__(self): - return '' % str(self) - -def encode_packets(datagroups, address=None, size=None, continuity=None): - - """ - Encode a set of datagroups into packets - """ - - if not address: address = 1 - if not size: size = Packet.SIZE_96 - if not continuity: continuity = {} - - if address < 1 or address > 1024: raise ValueError('packet address must be greater than zero and less than 1024') - if size not in Packet.sizes: raise ValueError('packet size %d must be one of: %s' % (size, Packet.sizes)) - - packets = [] - - def get_continuity_index(address): - index=0 - if continuity.has_key(address): - index = continuity[address] - index += 1 - if index > 3: index = 0 - continuity[address] = index - return index - - # encode the datagroups into a continuous datastream - for datagroup in datagroups: - data = datagroup.tobytes() - chunk_size = size - 5 - for i in range(0, len(data), chunk_size): - chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] - packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, get_continuity_index(address)) - packets.append(packet) - - return packets - -def decode_packets(data, error_callback=None, check_crc=True, resync=True): - - """ - Generator function to decode packets from a bitstream - - The bitstream may be presented as either a bitarray, a file object or a socket - """ - - if isinstance(data, bitarray): - logger.debug('decoding packets from bitarray') - i = 0 - while i < data.length(): - while i < data.length(): - if data.length() < 2: break - size = (int(data[i:i+2].to01(), 2) + 1) * 24 - if data.length() < (size * 8): break - try: - packet = Packet.frombits(data, i=i, check_crc=check_crc) - yield packet - i += (size * 8) - except (InvalidCrcError, ice): - if error_callback: error_callback(ice) - if resync: i += 8 - else: i += (size * 8) - elif hasattr(data, 'read'): - logger.debug('decoding packets from file: %s', data) - buf = bitarray() - r = data.read(1024) - while len(r): - buf.frombytes(r) - logger.debug('chunking buffer of length %d bytes', buf.length()/8) - i = 0 - while i < buf.length(): - if buf.length() < 2: break - size = (int(buf[i:i+2].to01(), 2) + 1) * 24 - if buf.length() < (size * 8): break - try: - packet = Packet.frombits(buf, i=i, check_crc=check_crc) - yield packet - i += (size * 8) - except IncompletePacketError: - break - except (InvalidCrcError, ice): - if error_callback: error_callback(ice) - if resync: i += 8 - else: i += (size * 8) - buf = buf[i:] - r = data.read(1024) - elif hasattr(data, 'recv'): - data.setblocking(True) - logger.debug('decoding packets from socket: %s', data) - buf = bitarray() - r = data.recv(1024) - b = bitarray() - b.frombytes(r) - while len(r): - buf.frombytes(r) - logger.debug('chunking buffer of length %d bytes', buf.length()/8) - i = 0 - while i < buf.length(): - if buf.length() < 2: break - size = (int(buf[i:i+2].to01(), 2) + 1) * 24 - if buf.length() < (size * 8): break - try: - packet = Packet.frombits(buf, i=i, check_crc=check_crc) - yield packet - i += (size * 8) - except IncompletePacketError: break - except (InvalidCrcError, ice): - if error_callback: error_callback(ice) - if resync: i += 8 - else: i += (size * 8) - buf = buf[i:] - logger.debug('reading from socket') - r = data.recv(1024) - logger.debug('read %d bytes from socket', len(r)) - else: - raise ValueError('unknown object to decode from: %s' % type(data)) - logger.debug('finished') - return From a7dcccc3b0f9f6f88243ae87fb78896053f578ea Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 09:59:14 +0000 Subject: [PATCH 09/17] Changes to support latest versions of bitarray --- setup.py | 5 +++-- src/msc/datagroups/__init__.py | 26 +++++++++++++------------- src/msc/packets/__init__.py | 26 +++++++++++++------------- src/msc/test/test_datagroups.py | 2 +- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/setup.py b/setup.py index e1391a8..6d9f3e3 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -from distutils.core import setup +from setuptools import setup setup(name='dabmsc', version='1.0.1', @@ -11,5 +11,6 @@ download_url='https://github.com/GlobalRadio/python-dabmsc/tarball/1.0.1', packages=['msc', 'msc.datagroups', 'msc.packets'], package_dir = {'' : 'src'}, - keywords = ['dab', 'msc', 'radio'] + keywords = ['dab', 'msc', 'radio'], + install_requires = ['bitarray'] ) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index 2202775..f71da18 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -127,7 +127,7 @@ def encode_headermode(objects, segmenting_strategy=None): # insert the core parameters into the header bits = bitarray() bits += int_to_bitarray(len(body_data) if body_data else 0, 28) # (0-27): BodySize in bytes - bits += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) + bits += int_to_bitarray(len(extension_bits) / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) bits += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType bits += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType bits += extension_bits # (56-n): Header extension data @@ -168,7 +168,7 @@ def encode_directorymode(objects, directory_parameters=None, segmenting_strategy # add the core parameters into the header entries += int_to_bitarray(len(object.get_body()), 28) # (0-27): BodySize in bytes - entries += int_to_bitarray(extension_bits.length() / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) + entries += int_to_bitarray(len(extension_bits) / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension) entries += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType entries += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType entries += extension_bits # (56-n): Header extension data @@ -233,7 +233,7 @@ def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): if isinstance(data, bitarray): i = 0 - while i < data.length(): + while i < len(data): datagroup = Datagroup.frombits(data, i=i, check_crc=check_crc) yield datagroup i += (datagroup.size * 8) @@ -248,19 +248,19 @@ def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): except: reading = False logger.exception("error") - if not buf.length(): + if not len(buf): logger.debug('buffer is at zero length') return i = 0 - #logger.debug('chunking buffer of length %d bytes', buf.length()/8) - length = buf.length()/8 + #logger.debug('chunking buffer of length %d bytes', len(buf)/8) + length = len(buf)/8 if length < 9: continue size = int(buf[59:72].to01(), 2) if length < size: #logger.debug('buffer still not at right size for datagroup size of %d bytes', size) continue - while i < buf.length(): + while i < len(buf): try: datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) yield datagroup @@ -287,7 +287,7 @@ def decode_datagroups(data, error_callback=None, check_crc=True, resync=True): buf.frombytes(p.data) if p.last: - logger.debug('got packet %s - buffer now %d bytes', p, buf.length()/8) + logger.debug('got packet %s - buffer now %d bytes', p, len(buf)/8) try: datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc) logger.debug('yielding datagroup: %s', datagroup) @@ -375,7 +375,7 @@ def frombits(bits, i=0, check_crc=True): """Parse a datagroup from a bitarray, with an optional offset""" # check we have enough header first - if (bits.length() - i) < ((9 + 2) * 8): raise IncompleteDatagroupError + if (len(bits) - i) < ((9 + 2) * 8): raise IncompleteDatagroupError # datagroup header type = int(bits[4:8].to01(), 2) @@ -392,12 +392,12 @@ def frombits(bits, i=0, check_crc=True): # data segment header size = int(bits[59:72].to01(), 2) # get size to check we have a complete datagroup - if bits.length() < 72 + size * 8 + 16: raise IncompleteDatagroupError + if len(bits) < 72 + size * 8 + 16: raise IncompleteDatagroupError data = bits[72 : 72 + (size*8)] if check_crc: - crc = int(bits[72 + data.length() : 72 + data.length() + 16].to01(), 2) - calculated = calculate_crc(bits[:72+data.length()].tobytes()) - if crc != calculated: raise InvalidCrcError(crc, bits[:72+data.length() + 16].tobytes()) + crc = int(bits[72 + len(data) : 72 + len(data) + 16].to01(), 2) + calculated = calculate_crc(bits[:72+len(data)].tobytes()) + if crc != calculated: raise InvalidCrcError(crc, bits[:72+len(data) + 16].tobytes()) datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last) logger.debug('parsed datagroup: %s', datagroup) diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index c2fb352..634aeb2 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -54,7 +54,7 @@ def frombits(bits, i=0, check_crc=True): """Parse a packet from a bitarray, with an optional offset""" size = (int(bits[i+0:i+2].to01(), 2) + 1) * 24 - if (bits.length() - i) < (size * 8): raise IncompletePacketError('length of bitarray is less than passed data length %d bytes < %d bytes', bits.length() / 8, size) + if (len(bits) - i) < (size * 8): raise IncompletePacketError('length of bitarray is less than passed data length %d bytes < %d bytes', len(bits) / 8, size) index = int(bits[i+2:i+4].to01(), 2) first = bits[i+4] last = bits[i+5] @@ -123,11 +123,11 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): if isinstance(data, bitarray): logger.debug('decoding packets from bitarray') i = 0 - while i < data.length(): - while i < data.length(): - if data.length() < 2: break + while i < len(data): + while i < len(data): + if len(data) < 2: break size = (int(data[i:i+2].to01(), 2) + 1) * 24 - if data.length() < (size * 8): break + if len(data) < (size * 8): break try: packet = Packet.frombits(data, i=i, check_crc=check_crc) yield packet @@ -142,12 +142,12 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): r = data.read(1024) while len(r): buf.frombytes(r) - logger.debug('chunking buffer of length %d bytes', buf.length()/8) + logger.debug('chunking buffer of length %d bytes', len(buf)/8) i = 0 - while i < buf.length(): - if buf.length() < 2: break + while i < len(buf): + if len(buf) < 2: break size = (int(buf[i:i+2].to01(), 2) + 1) * 24 - if buf.length() < (size * 8): break + if len(buf) < (size * 8): break try: packet = Packet.frombits(buf, i=i, check_crc=check_crc) yield packet @@ -169,12 +169,12 @@ def decode_packets(data, error_callback=None, check_crc=True, resync=True): b.frombytes(r) while len(r): buf.frombytes(r) - logger.debug('chunking buffer of length %d bytes', buf.length()/8) + logger.debug('chunking buffer of length %d bytes', len(buf)/8) i = 0 - while i < buf.length(): - if buf.length() < 2: break + while i < len(buf): + if len(buf) < 2: break size = (int(buf[i:i+2].to01(), 2) + 1) * 24 - if buf.length() < (size * 8): break + if len(buf) < (size * 8): break try: packet = Packet.frombits(buf, i=i, check_crc=check_crc) yield packet diff --git a/src/msc/test/test_datagroups.py b/src/msc/test/test_datagroups.py index add34ea..c537c42 100644 --- a/src/msc/test/test_datagroups.py +++ b/src/msc/test/test_datagroups.py @@ -10,7 +10,7 @@ def test_blank_headermode(self): """testing header mode with blank image""" # create MOT object - print 'creating MOT object' + print('creating MOT object') object = MotObject("TestObject", "\x00" * 1024, ContentType.IMAGE_JFIF) # encode object From 85bf41a3d9735d37244c70b10f2bd950554eb19b Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 10:02:05 +0000 Subject: [PATCH 10/17] Amend bin/decode to support python3 --- bin/decode | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bin/decode b/bin/decode index d81a848..9c5f09e 100755 --- a/bin/decode +++ b/bin/decode @@ -20,7 +20,7 @@ parser.add_argument('-f', dest='output', help='outfile file directory') args = parser.parse_args() if args.filename: - print 'decoding from', args.filename + print(('decoding from', args.filename)) f = open(args.filename, 'rb') else: f = sys.stdin @@ -54,17 +54,17 @@ logger.debug("decoding function: %s", f); for o in f: if isinstance(o, Packet): - print 'packet:', o + print('packet:', o) elif isinstance(o, Datagroup): - print 'dataroup:', o + print('dataroup:', o) elif isinstance(o, MotObject): - print "=" * 48 - print '{name} {type} ({size} bytes)'.format(name=o.get_name(), type=o.get_type(), size=len(o.get_body())) - print "=" * 48 - print 'parameters:' + print("=" * 48) + print('{name} {type} ({size} bytes)'.format(name=o.get_name(), type=o.get_type(), size=len(o.get_body()))) + print("=" * 48) + print('parameters:') for p in o.get_parameters(): - print '\t', repr(p) - print + print('\t', repr(p)) + print() if args.output: import base64 file_output = open(os.path.join(args.output, base64.urlsafe_b64encode(o.get_name())), 'wb') From 16262597c9d249c243f54eae74b97130a25f52c2 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 10:04:23 +0000 Subject: [PATCH 11/17] Changes to support python3 --- src/msc/transports.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/msc/transports.py b/src/msc/transports.py index af400b2..e6e2c1b 100644 --- a/src/msc/transports.py +++ b/src/msc/transports.py @@ -20,7 +20,7 @@ def clock(self): raise NotImplementedError() class BlockingTransportMixin: """Defines a transport where the clock is set from the system clock elapsing""" - def clock(self): return elapsed_from_clock().next + def clock(self): return elapsed_from_clock().__next__ class UdpTransport(NonBlockingTransportMixin): """Send data over a UDP socket either as Datagroups or DAB Packets""" @@ -46,8 +46,8 @@ def fromurl(url, logger=logger): * bitrate: transport bitrate in bps (default 16kbps) """ - from urlparse import urlparse, parse_qsl - if isinstance(url, basestring): url = urlparse(url) + from urllib.parse import urlparse, parse_qsl + if isinstance(url, str): url = urlparse(url) if url.scheme != 'udp': raise ValueError('url must begin with the udp scheme') if url.path.find('?') >=0 : kwargs = dict(parse_qsl(url.path[url.path.index('?')+1:])) else: kwargs = dict(parse_qsl(url.query)) @@ -106,11 +106,11 @@ def __init__(self, transport): self.transport = transport def __iter__(self): return self - def next(self): + def __next__(self): r = self.transport.elapsed self.transport.elapsed = datetime.timedelta(0) return r - return Iter(self).next + return Iter(self).__next__ def __str__(self): return 'udp://{address}'.format(address=self.address) @@ -135,8 +135,8 @@ def fromurl(url, logger=logger): * bitrate: transport bitrate in bps (default 8kbps) """ - from urlparse import urlparse, parse_qsl - if isinstance(url, basestring): url = urlparse(url) + from urllib.parse import urlparse, parse_qsl + if isinstance(url, str): url = urlparse(url) if url.scheme != 'file': raise ValueError('url must begin with the file scheme') path = url.path[:url.path.index('?')] if url.path.find('?') >= 0 else url.path path = path.strip() @@ -184,12 +184,12 @@ def __init__(self, transport): self.transport = transport def __iter__(self): return self - def next(self): + def __next__(self): r = self.transport.elapsed self.transport.elapsed = datetime.timedelta(0) return r - return Iter(self).next + return Iter(self).__next__ def __str__(self): return 'file://{path}'.format(path=self.path) From 18115771622bfe7cb3c5d74edfad7f4da95fdfa9 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 10:05:55 +0000 Subject: [PATCH 12/17] changes to support python3 --- src/msc/test/test_transports.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/msc/test/test_transports.py b/src/msc/test/test_transports.py index 6362d57..1bcd1ed 100644 --- a/src/msc/test/test_transports.py +++ b/src/msc/test/test_transports.py @@ -1,5 +1,5 @@ import unittest -import urllib2 +import urllib.request, urllib.error, urllib.parse from msc import calculate_crc from mot import MotObject, ContentType @@ -15,8 +15,8 @@ def test_fromurl(self): def test_encode(self): - req = urllib2.Request(url) - response = urllib2.urlopen(req) + req = urllib.request.Request(url) + response = urllib.request.urlopen(req) data = response.read() type = ContentType.IMAGE_JFIF @@ -29,7 +29,7 @@ def test_encode(self): # define callback i = iter(datagroups) def callback(): - return i.next() + return next(i) transport = UdpTransport(address=('10.15.81.160', 5555)) transport.start(callback) @@ -43,8 +43,8 @@ def test_fromurl(self): def test_encode_slide_to_file(self): url = 'http://owdo.thisisglobal.com/2.0/id/25/logo/320x240.jpg' - req = urllib2.Request(url) - response = urllib2.urlopen(req) + req = urllib.request.Request(url) + response = urllib.request.urlopen(req) data = response.read() type = ContentType.IMAGE_JFIF @@ -57,10 +57,10 @@ def test_encode_slide_to_file(self): # define callback i = iter(datagroups) def callback(): - return i.next() + return next(i) - import StringIO - s = StringIO.StringIO() + import io + s = io.StringIO() transport = FileTransport(s) transport.start(callback) From 27efab2e858a8414c79cb60c7d904526993a9439 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 16 Mar 2022 10:49:20 +0000 Subject: [PATCH 13/17] Various changes to support python3 / latest versions of bitarray --- src/msc/transports.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/msc/transports.py b/src/msc/transports.py index e6e2c1b..85a3138 100644 --- a/src/msc/transports.py +++ b/src/msc/transports.py @@ -169,10 +169,10 @@ def start(self, callback): for d in data: b = d.tobytes() if isinstance(d, Datagroup): - self.f.write(b) + self.f.write(b.decode('ascii')) self.elapsed += datetime.timedelta(milliseconds=(8 * float(len(b)) * 1000)/self.bitrate) elif isinstance(d, Packet): - self.f.write(b) + self.f.write(b.decode('ascii')) self.elapsed += datetime.timedelta(milliseconds=24) else: raise TypeError('yarrgh. neither a datagroup nor packet this be: %s', type(d)) self.f.flush() From 2d558a0154674e45e89e5bc47c7a6ba9fc940cd3 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Sun, 6 Oct 2024 12:33:56 +0100 Subject: [PATCH 14/17] Fixed Packet Continuity Index Added the possibility to add padding packets to make sure the final packet continuity index value is always 3 --- .gitignore | 3 ++ src/msc/__init__.py.bak | 81 ------------------------------------- src/msc/packets/__init__.py | 17 ++++++-- 3 files changed, 16 insertions(+), 85 deletions(-) create mode 100644 .gitignore delete mode 100644 src/msc/__init__.py.bak diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a293b6b --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ + +/src/dabmsc.egg-info +/build diff --git a/src/msc/__init__.py.bak b/src/msc/__init__.py.bak deleted file mode 100644 index dee9dce..0000000 --- a/src/msc/__init__.py.bak +++ /dev/null @@ -1,81 +0,0 @@ -import crcmod -from bitarray import bitarray -import logging - -logger = logging.getLogger('msc') - -crcfun = crcmod.mkCrcFun(0x11021, 0x0, False, 0xFFFF) -def calculate_crc(data): - return crcfun(data) - -def hex_to_bitarray(hex): - b = bitarray() - for byte in hex.split(' '): - b.extend(int_to_bitarray(int('0x%s' % byte, 16), 8)) - return b - -def int_to_bitarray(i, n): - return bitarray(('{0:0%db}' % n).format(i)) - -def bitarray_to_int(bits): - return int(bits.to01(), 2) - -def bitarray_to_hex(bits, width=32): - if not isinstance(bits, bitarray): raise ValueError('object is not a bitarray') - rows = [] - for i in range(0, len(bits), width*8): - rows.append(' '.join(["%02X" % ord(x) for x in bits[i:i+(width*8)].tobytes()]).strip()) - return '\r\n'.join(rows) - -def bitarray_to_binary(bits, width=32): - if not isinstance(bits, bitarray): raise ValueError('object is not a bitarray') - rows = [] - for i in range(0, len(bits), width*8): - bytes = [] - for j in range(i, i+(width*8), 8): - bytes.append(bits[j:j+8].to01()) - rows.append(' '.join(bytes)) - return '\r\n'.join(rows) - -class InvalidCrcError(Exception): - - def __init__(self, crc, data): - self.crc = crc - self.data = data - -class TransportIdGenerator(): - '''interface for classes to generate transport IDs''' - - def next(self, name=None): - pass - - def exists(self, id): - pass - -class MemoryCachedTransportIdGenerator(TransportIdGenerator): - '''generates transport IDs cached in memory''' - - def __init__(self): - self.ids = [] - self.cache = {} - - def next(self, name=None): - # first check the cache - if name is not None and self.cache.has_key(name): - return self.cache.get(name) - - # if we've run out then start recycling from the head - if len(self.ids) >= (1 << 16) - 1: return self.ids.pop(0) - import random - id = None - while id is None or id in self.ids: - id = int(random.random() * (1 << 16)) - self.ids.append(id) - if name is not None: self.cache[name] = id - - return id - -# default transport ID generator -transport_id_generator = MemoryCachedTransportIdGenerator() -def generate_transport_id(name=None): - return transport_id_generator.next(name) diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index 634aeb2..0a119c3 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -38,8 +38,9 @@ def tobytes(self): # add the packet data tmp = bitarray() - tmp.frombytes(self.data) - bits += tmp # (24-n): packet data + if len(self.data) > 0: + tmp.frombytes(self.data) + bits += tmp # (24-n): packet data # add packet padding if needed bits += bitarray('0'*(self.size - len(self.data) - 5)*8) @@ -77,7 +78,7 @@ def __str__(self): def __repr__(self): return '' % str(self) -def encode_packets(datagroups, address=None, size=None, continuity=None): +def encode_packets(datagroups, address=None, size=None, continuity=None, padding=False): """ Encode a set of datagroups into packets @@ -86,6 +87,7 @@ def encode_packets(datagroups, address=None, size=None, continuity=None): if not address: address = 1 if not size: size = Packet.SIZE_96 if not continuity: continuity = {} + if not padding: padding = False if address < 1 or address > 1024: raise ValueError('packet address must be greater than zero and less than 1024') if size not in Packet.sizes: raise ValueError('packet size %d must be one of: %s' % (size, Packet.sizes)) @@ -107,8 +109,15 @@ def get_continuity_index(address): chunk_size = size - 5 for i in range(0, len(data), chunk_size): chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] - packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, get_continuity_index(address)) + continuity_index = get_continuity_index(address) + packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, continuity_index) packets.append(packet) + # add padding packets to make sure the Continuity Index ends with 3 + if padding and continuity_index != 3: + while continuity_index !=3: + continuity_index += 1 + packet = Packet(size, address, [], True, True, continuity_index) + packets.append(packet) return packets From 6320fcca0a9ea5c816202de4219e15077384f10a Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Sun, 6 Oct 2024 16:21:51 +0100 Subject: [PATCH 15/17] Fixing Packet Continuity Index This approach ensures the correct sequence of the packet continuity index by repeating the generation of the packet stream until it ends with Continuity Index =3. That means it may produce a file twice or four-times bigger than normal, if padding is enabled. --- src/msc/packets/__init__.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index 0a119c3..30166e7 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -104,20 +104,25 @@ def get_continuity_index(address): return index # encode the datagroups into a continuous datastream - for datagroup in datagroups: - data = datagroup.tobytes() - chunk_size = size - 5 - for i in range(0, len(data), chunk_size): - chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] - continuity_index = get_continuity_index(address) - packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, continuity_index) - packets.append(packet) - # add padding packets to make sure the Continuity Index ends with 3 - if padding and continuity_index != 3: - while continuity_index !=3: - continuity_index += 1 - packet = Packet(size, address, [], True, True, continuity_index) + # repeating sufficient times to make sure the final continuity index is 3 + # this could make the output filesize x2 or x4 the minimum size + while True: + for datagroup in datagroups: + data = datagroup.tobytes() + chunk_size = size - 5 + for i in range(0, len(data), chunk_size): + chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] + continuity_index = get_continuity_index(address) + packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, continuity_index) packets.append(packet) + if padding == False or (padding == True and continuity_index == 3): + break + # add padding packets to make sure the Continuity Index ends with 3 + # if padding and continuity_index != 3: + # while continuity_index !=3: + # continuity_index += 1 + # packet = Packet(size, address, [], True, True, continuity_index) + # packets.append(packet) return packets From ef5b378570a150573c1599912b6da1c05cfe8083 Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Wed, 12 Nov 2025 15:02:07 +0000 Subject: [PATCH 16/17] Update __init__.py Adjusts the calculation of the continuity index to increment on every datagroup, not just within datagroups of the same object --- src/msc/datagroups/__init__.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/msc/datagroups/__init__.py b/src/msc/datagroups/__init__.py index f71da18..82bd570 100644 --- a/src/msc/datagroups/__init__.py +++ b/src/msc/datagroups/__init__.py @@ -197,21 +197,40 @@ def encode_directorymode(objects, directory_parameters=None, segmenting_strategy bits += entries # segment and add directory datagroups with a new transport ID + continuity_directory = 0 directory_transport_id = generate_transport_id() segments = _segment(bits.tobytes(), segmenting_strategy) for i, segment in enumerate(segments): - header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False) + header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, continuity_directory, last=True if i == len(segments) - 1 else False) tmp = bitarray() tmp.frombytes(header_group.tobytes()) tmp.frombytes(header_group.tobytes()) datagroups.append(header_group) + continuity_directory = (continuity_directory + 1) % 16 # add body datagroups + continuity_body = 0 for object in objects: segments = _segment(object.get_body(), segmenting_strategy) for i, segment in enumerate(segments): - body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(segments) - 1 else False) + body_group = Datagroup(object.get_transport_id(), BODY, segment, i, continuity_body, last=True if i == len(segments) - 1 else False) datagroups.append(body_group) + continuity_body = (continuity_body + 1) % 16 + # add empty body datagroups to assure continuity + if continuity_body != 0: + # segment header + bits = bitarray() + bits += int_to_bitarray(0, 3) # (0-2): Repetition Count remaining (0 = only broadcast) + bits += int_to_bitarray(0, 13) # (3-16): SegmentSize + dummysegment = bits.tobytes() + body_group = Datagroup(generate_transport_id(), BODY, dummysegment, 0, continuity_body, last=True) + datagroups.append(body_group) + continuity_body = (continuity_body + 1) % 16 + if continuity_body != 0: + continuity_body = 15 + body_group = Datagroup(generate_transport_id(), BODY, dummysegment, 0, continuity_body, last=True) + datagroups.append(body_group) + return datagroups import select From d23c417e72217365e7cb04ad3b19e6249e21355f Mon Sep 17 00:00:00 2001 From: Nick Piggott Date: Thu, 13 Nov 2025 17:36:47 +0000 Subject: [PATCH 17/17] Update __init__.py --- src/msc/packets/__init__.py | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/src/msc/packets/__init__.py b/src/msc/packets/__init__.py index 30166e7..9717e2f 100644 --- a/src/msc/packets/__init__.py +++ b/src/msc/packets/__init__.py @@ -84,6 +84,27 @@ def encode_packets(datagroups, address=None, size=None, continuity=None, padding Encode a set of datagroups into packets """ + def get_continuity_index(address): + index=0 + if address in continuity: + index = continuity[address] + index += 1 + if index > 3: index = 0 + continuity[address] = index + return index + + def get_required_size(payload_size, max_packet_size): + if payload_size > (max_packet_size-5): + return max_packet_size + if payload_size > (72-5): + return Packet.SIZE_96 + elif payload_size > (48-5): + return Packet.SIZE_72 + elif payload_size > (24-5): + return Packet.SIZE_48 + else: + return Packet.SIZE_24 + if not address: address = 1 if not size: size = Packet.SIZE_96 if not continuity: continuity = {} @@ -94,15 +115,6 @@ def encode_packets(datagroups, address=None, size=None, continuity=None, padding packets = [] - def get_continuity_index(address): - index=0 - if address in continuity: - index = continuity[address] - index += 1 - if index > 3: index = 0 - continuity[address] = index - return index - # encode the datagroups into a continuous datastream # repeating sufficient times to make sure the final continuity index is 3 # this could make the output filesize x2 or x4 the minimum size @@ -113,7 +125,7 @@ def get_continuity_index(address): for i in range(0, len(data), chunk_size): chunk = data[i:i+chunk_size if i+chunk_size < len(data) else len(data)] continuity_index = get_continuity_index(address) - packet = Packet(size, address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, continuity_index) + packet = Packet(get_required_size(len(chunk),size), address, chunk, True if i == 0 else False, True if i+chunk_size >= len(data) else False, continuity_index) packets.append(packet) if padding == False or (padding == True and continuity_index == 3): break