Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • software_public/passoft/sohstationviewer
1 result
Show changes
Commits on Source (12)
Showing
with 789 additions and 122 deletions
......@@ -26,7 +26,7 @@ def get_chan_plot_info(org_chan_id: str, chan_info: Dict, data_type: str,
chan = 'MassPos?'
if org_chan_id.startswith('Event DS'):
chan = 'Event DS?'
if org_chan_id.startswith('DS'):
if org_chan_id.startswith('DS') and 'DSP' not in org_chan_id:
chan = 'DS?'
if org_chan_id.startswith('Disk Usage'):
chan = 'Disk Usage?'
......
......@@ -31,7 +31,7 @@ class DataLoaderWorker(QtCore.QObject):
req_soh_chans: List[str] = [], read_start: float = 0,
read_end: float = constants.HIGHEST_INT,
include_mp123: bool = False, include_mp456: bool = False,
parent_thread=None):
rt130_waveform_data_req: bool = False, parent_thread=None):
super().__init__()
self.data_type = data_type
self.tracking_box = tracking_box
......@@ -43,6 +43,7 @@ class DataLoaderWorker(QtCore.QObject):
self.read_end = read_end
self.include_mp123 = include_mp123
self.include_mp456 = include_mp456
self. rt130_waveform_data_req = rt130_waveform_data_req
self.parent_thread = parent_thread
# display_tracking_info updates a QtWidget, which can only be done in
# the read. Since self.run runs in a background thread, we need to use
......@@ -71,6 +72,7 @@ class DataLoaderWorker(QtCore.QObject):
read_start=self.read_start, read_end=self.read_end,
include_mp123zne=self.include_mp123,
include_mp456uvw=self.include_mp456,
rt130_waveform_data_req=self.rt130_waveform_data_req,
creator_thread=self.parent_thread,
notification_signal=self.notification,
pause_signal=self.button_dialog
......@@ -114,7 +116,8 @@ class DataLoader(QtCore.QObject):
req_soh_chans: List[str] = [], read_start: float = 0,
read_end: float = constants.HIGHEST_INT,
include_mp123: bool = False,
include_mp456: bool = False):
include_mp456: bool = False,
rt130_waveform_data_req: bool = False):
"""
Initialize the data loader. Construct the thread and worker and connect
them together. Separated from the actual loading of the data to allow
......@@ -150,6 +153,7 @@ class DataLoader(QtCore.QObject):
read_end=read_end,
include_mp123=include_mp123,
include_mp456=include_mp456,
rt130_waveform_data_req=rt130_waveform_data_req,
parent_thread=self.thread
)
......
......@@ -43,6 +43,7 @@ class DataTypeModel():
read_end: Optional[float] = UTCDateTime().timestamp,
include_mp123zne: bool = False,
include_mp456uvw: bool = False,
rt130_waveform_data_req: bool = False,
creator_thread: Optional[QtCore.QThread] = None,
notification_signal: Optional[QtCore.Signal] = None,
pause_signal: Optional[QtCore.Signal] = None,
......@@ -60,6 +61,7 @@ class DataTypeModel():
:param read_end: requested end time to read
:param include_mp123zne: if mass position channels 1,2,3 are requested
:param include_mp456uvw: if mass position channels 4,5,6 are requested
:param rt130_waveform_data_req: flag for RT130 to read waveform data
:param creator_thread: the thread the current DataTypeModel instance is
being created in. If None, the DataTypeModel instance is being
created in the main thread
......@@ -78,6 +80,7 @@ class DataTypeModel():
self.read_end = read_end
self.include_mp123zne = include_mp123zne
self.include_mp456uvw = include_mp456uvw
self.rt130_waveform_data_req = rt130_waveform_data_req
if creator_thread is None:
err_msg = (
'A signal is not None while running in main thread'
......@@ -357,7 +360,8 @@ class DataTypeModel():
list_of_rt130_paths,
req_wf_chans=[], req_soh_chans=[],
read_start=0, read_end=constants.HIGHEST_INT,
include_mp123=False, include_mp456=False):
include_mp123=False, include_mp456=False,
rt130_waveform_data_req=False):
"""
Create a DataTypeModel object, with the concrete class being based on
data_type. Run on the same thread as its caller, and so will block the
......@@ -383,7 +387,9 @@ class DataTypeModel():
data_type, tracking_box, folder, list_of_rt130_paths,
reqWFChans=req_wf_chans, reqSOHChans=req_soh_chans,
readStart=read_start, readEnd=read_end,
include_mp123=include_mp123, include_mp456=include_mp456)
include_mp123=include_mp123, include_mp456=include_mp456,
rt130_waveform_data_req=rt130_waveform_data_req
)
else:
from sohstationviewer.model.mseed.mseed import MSeed
data_object = MSeed(
......
......@@ -47,7 +47,10 @@ def check_reftek_header(
if chan_id not in cur_data_dict:
cur_data_dict[chan_id] = {'tracesInfo': [],
'samplerate': samplerate}
if trace.stats.npts == 0:
# this trace isn't available to prevent bug when creating memmap
# with no data
continue
if (starttime <= trace.stats['starttime'] <= endtime or
starttime <= trace.stats['endtime'] <= endtime):
avail_trace_indexes.append(index)
......
def decode_int16(buffer, unpacker):
requested_bytes = buffer[:2]
return unpacker.unpack('h', requested_bytes)[0]
def decode_int24(buffer, unpacker):
requested_bytes = buffer[:3]
byte_order = 'big' if unpacker.byte_order_char == '>' else 'little'
# We delegate to int.from_bytes() because it takes a lot of work to make
# struct.unpack() handle signed 24-bits integers.
# See: https://stackoverflow.com/questions/3783677/how-to-read-integers-from-a-file-that-are-24bit-and-little-endian-using-python # noqa
return int.from_bytes(requested_bytes, byte_order)
def decode_int32(buffer, unpacker):
requested_bytes = buffer[:4]
return unpacker.unpack('i', requested_bytes)[0]
def decode_ieee_float(buffer, unpacker):
requested_bytes = buffer[:4]
return unpacker.unpack('f', requested_bytes)[0]
def decode_ieee_double(buffer, unpacker):
requested_bytes = buffer[:8]
return unpacker.unpack('d', requested_bytes)[0]
def decode_steim(buffer, unpacker):
# The first 4 bytes in a Steim frame is metadata of the record. Since we
# aren't decompressing the data, we are skipping. The next 4 bytes contain
# the first data point of the MSEED data record, which is what we need.
requested_bytes = buffer[4:8]
return unpacker.unpack('i', requested_bytes)[0]
from dataclasses import dataclass
import struct
from enum import Enum
from obspy import UTCDateTime
class Unpacker:
"""
A wrapper around struct.unpack() to unpack binary data without having to
explicitly define the byte order in the format string. Also restrict the
type of format to str and buffer to bytes.
"""
def __init__(self, byte_order_char: str = '') -> None:
self.byte_order_char = byte_order_char
def unpack(self, format: str, buffer: bytes):
"""
Unpack a string of bytes into a tuple of values based on the given
format
:param format: the format used to unpack the byte string
:param buffer: the byte string
:return: a tuple containing the unpacked values.
"""
default_byte_order_chars = ('@', '=', '>', '<', '!')
if format.startswith(default_byte_order_chars):
format = self.byte_order_char + format[:1]
else:
format = self.byte_order_char + format
return struct.unpack(format, buffer)
@dataclass
class FixedHeader:
"""
The fixed portion of the header of a data record. All fields are stored as
bytes to minimize time wasted on decoding unused values.
"""
sequence_number: bytes
data_header_quality_indicator: bytes
reserved: bytes
station: bytes
location: bytes
channel: bytes
net_code: bytes
record_start_time: bytes
sample_count: bytes
sample_rate_factor: bytes
sample_rate_multiplier: bytes
activity_flags: bytes
io_and_clock_flags: bytes
data_quality_flags: bytes
blockette_count: bytes
time_correction: bytes
data_offset: bytes
first_blockette_offset: bytes
@dataclass
class Blockette1000:
"""
Blockette 100 of a data record. All fields are stored as bytes to minimize
time wasted on decoding unused values.
"""
blockette_type: bytes
next_blockette_offset: bytes
encoding_format: bytes
byte_order: bytes
record_length: bytes
reserved_byte: bytes
@dataclass
class RecordMetadata:
"""
The metadata of a data record.
"""
station: str
location: str
channel: str
network: str
start_time: UTCDateTime
sample_count: int
sample_rate: float
class EncodingFormat(Enum):
ASCII = 0
INT_16_BIT = 1
INT_24_BIT = 2
INT_32_BIT = 3
IEEE_FLOAT_32_BIT = 4
IEEE_FLOAT_64_BIT = 5
STEIM_1 = 10
STEIM_2 = 11
def get_header_endianness(header: FixedHeader):
"""
Determine the endianness of the fixed header of a data record. Works by
checking if the decoded record start time has a sane value if the header
is assumed to be big-endian.
WARNING: This check fails on three dates: 2056-1, 2056-256, and 2056-257.
2056 is a palindrome when encoded as a pair of octets, so endianness does
not affect it. Similarly, 257 is also 2-octet-palindromic. Meanwhile, 1 and
256 are counterparts when encoded as pairs of octets. Because they are both
valid values for day of year, it is impossible to make a conclusion about
endianness based on day of year if it is either 1 or 256 in big-endian.
These facts combined means that we cannot determine the endianness of the
header whose record starts on the aforementioned dates. The way this
function was written, the endianness will be recorded as big in these
cases. This problem is also recorded in libmseed.
:param header: the fixed header of the data record
:return: either of the string 'big' or 'little' depending on the extracted
endianness of header
"""
record_start_time_string = header.record_start_time
record_start_time_tuple = struct.unpack('>hhbbbbh',
record_start_time_string)
# libmseed uses 1900 to 2100 as the sane year range. We follow their
# example here.
year_is_good = (1900 <= record_start_time_tuple[0] <= 2100)
# The upper range is 366 to account for leap years.
day_is_good = (1 <= record_start_time_tuple[1] <= 366)
endianness = 'big' if year_is_good and day_is_good else 'little'
return endianness
def get_data_endianness(blockette_1000: Blockette1000):
"""
Get endianness of a data record by examining blockette 1000.
:param blockette_1000: the blockette 1000 of the data record.
"""
# The byte order is only one byte so using big or little endian does not
# matter.
blockette_1000_endianness = int.from_bytes(
blockette_1000.byte_order, 'big'
)
if blockette_1000_endianness:
return 'big'
else:
return 'little'
def calculate_sample_rate(factor: int, multiplier: int) -> float:
"""
Calculate the sample rate using the sample rate factor and multiplier. This
algorithm is described around the start of Chapter 8 in the SEED manual.
:param factor: the sample rate factor
:param multiplier: the sample rate multiplier
:return: the nominal sample rate
"""
sample_rate = 0
if factor == 0:
sample_rate = 0
elif factor > 0 and multiplier > 0:
sample_rate = factor * multiplier
elif factor > 0 and multiplier < 0:
sample_rate = -(factor / multiplier)
elif factor < 0 and multiplier > 0:
sample_rate = -(multiplier / factor)
elif factor < 0 and multiplier < 0:
sample_rate = 1 / (factor * multiplier)
return sample_rate
def get_record_metadata(header: FixedHeader, header_unpacker: Unpacker):
"""
Extract and parse the metadata of a data record from its fixed header.
:param header: the fixed header of the data record
:param header_unpacker: the unpacker corresponding to the data record;
needed so that the correct byte order can be used
:return: the extract record metadata
"""
station = header.station.decode('utf-8').rstrip()
location = header.location.decode('utf-8').rstrip()
channel = header.channel.decode('utf-8').rstrip()
network = header.net_code.decode('utf-8').rstrip()
record_start_time_string = header.record_start_time
record_start_time_tuple = header_unpacker.unpack('HHBBBBH',
record_start_time_string)
record_start_time = UTCDateTime(year=record_start_time_tuple[0],
julday=record_start_time_tuple[1],
hour=record_start_time_tuple[2],
minute=record_start_time_tuple[3],
second=record_start_time_tuple[4],
microsecond=record_start_time_tuple[
6] * 100)
sample_count = header_unpacker.unpack('H', header.sample_count)[0]
sample_rate_factor = header_unpacker.unpack(
'h', header.sample_rate_factor
)[0]
sample_rate_multiplier = header_unpacker.unpack(
'h', header.sample_rate_multiplier
)[0]
sample_rate = calculate_sample_rate(sample_rate_factor,
sample_rate_multiplier)
return RecordMetadata(station, location, channel, network,
record_start_time, sample_count, sample_rate)
from typing import BinaryIO
import obspy
from record_reader import RecordReader
class MSeedReader:
def __init__(self, file: BinaryIO) -> None:
self.file = file
def read(self):
trace = []
while 1:
# We know that end of file is reached when read() returns an empty
# string.
is_eof = (self.file.read(1) == b'')
if is_eof:
break
# We need to move the file pointer back to its position after we
# do the end of file check. Otherwise, we would be off by one
# byte for all the reads afterward.
self.file.seek(-1, 1)
# We save the start of the current record so that after we are
# done reading the record, we can move back. This makes moving
# to the next record a lot easier, seeing as we can simply move
# the file pointer a distance the size of the current record.
current_record_start = self.file.tell()
reader = RecordReader(self.file)
trace.append(reader.get_first_data_point())
# sample_count = reader.record_metadata.sample_count
# sample_rate = reader.record_metadata.sample_rate
# record_time_taken = sample_count / sample_rate
# record_end_time = (reader.record_metadata.start_time +
# record_time_taken)
# MSEED stores the size of a data record as an exponent of a
# power of two, so we have to convert that to actual size before
# doing anything else.
record_length_exp = reader.header_unpacker.unpack(
'B', reader.blockette_1000.record_length
)[0]
record_size = 2 ** record_length_exp
self.file.seek(current_record_start)
self.file.seek(record_size, 1)
if __name__ == '__main__':
# numpy.set_printoptions(threshold=sys.maxsize)
file_path = '/Users/ldam/Documents/GIT/sohstationviewer/tests/test_data/' \
'Q330_mixed_traces/XX-3203_4-20221222183011'
file = open(file_path, 'rb')
stream = obspy.read(file_path)
MSeedReader(file).read()
from numbers import Real
from typing import BinaryIO, Optional, List
from obspy import UTCDateTime
from decode_mseed import (
decode_ieee_float, decode_ieee_double, decode_steim, decode_int16,
decode_int24, decode_int32,
)
from mseed_helper import (
FixedHeader, Blockette1000, get_data_endianness, Unpacker,
get_record_metadata, get_header_endianness, RecordMetadata,
EncodingFormat,
)
class RecordReader:
"""
This class reads one data record from an MSEED file.
"""
def __init__(self, file: BinaryIO) -> None:
# The MSEED file object to read from. The file pointer needs to be
# located at the start of a data record.
self.file = file
self.fixed_header: Optional[FixedHeader] = None
self.blockette_1000: Optional[Blockette1000] = None
self.other_blockettes: List[str] = []
# Utility object that helps unpack byte strings in the header (the
# fixed header and the blockettes).
# Separate from the one for data in case the header has a different
# byte order.
# TODO: change blockettes to use this unpacker as well.
self.header_unpacker: Unpacker = Unpacker()
self.data_unpacker: Unpacker = Unpacker()
self.record_metadata: Optional[RecordMetadata] = None
self.read_header()
def read_header(self) -> None:
"""
Read the header of the current data record. The header includes the
fixed portion, blockette 1000, and any blockettes that follow.
"""
# Save the start of the record so that we can go back after reading the
# header.
record_start = self.file.tell()
self.read_fixed_header()
self.read_blockette_1000()
header_endianness = get_header_endianness(self.fixed_header)
if header_endianness == 'little':
self.header_unpacker.byte_order_char = '<'
else:
self.header_unpacker.byte_order_char = '>'
data_endianness = get_data_endianness(self.blockette_1000)
if data_endianness == 'little':
self.data_unpacker.byte_order_char = '<'
else:
self.data_unpacker.byte_order_char = '>'
self.record_metadata = get_record_metadata(self.fixed_header,
self.header_unpacker)
self.apply_time_correction()
self.read_blockettes()
self.file.seek(record_start)
def read_fixed_header(self) -> None:
"""
Read the fixed header of the current data record and store it in
self.fixed_header.
"""
byte_counts = [6, 1, 1, 5, 2, 3, 2, 10, 2, 2, 2, 1, 1, 1, 1, 4, 2, 2]
fixed_header_sections_values = []
for byte_count in byte_counts:
fixed_header_sections_values.append(self.file.read(byte_count))
self.fixed_header = FixedHeader(*fixed_header_sections_values)
def read_blockette_500(self) -> None:
"""
Read blockette 500 and format its content. The result is stored for
future uses. Assumes that the file pointer is at the start of the
blockette.
"""
blockette_content = {}
# Skip the first four bytes because they contain meta-information about
# the blockettes.
self.file.read(4)
vco_correction = self.file.read(4)
blockette_content['VCO correction'] = self.header_unpacker.unpack(
'f', vco_correction
)[0]
exception_time_bytes = self.file.read(10)
exception_time_tuple = self.header_unpacker.unpack(
'HHBBBBH', exception_time_bytes)
exception_time = UTCDateTime(year=exception_time_tuple[0],
julday=exception_time_tuple[1],
hour=exception_time_tuple[2],
minute=exception_time_tuple[3],
second=exception_time_tuple[4],
microsecond=exception_time_tuple[6] * 100)
blockette_content['Time of exception'] = exception_time.strftime(
'%Y:%j:%H:%M:%S:%f'
)
microsecond = self.file.read(1)
microsecond = self.header_unpacker.unpack('B', microsecond)[0]
start_time_adjustment = microsecond / (10 ** 6)
self.record_metadata.start_time += start_time_adjustment
blockette_content['Micro sec'] = microsecond
reception_quality = self.file.read(1)
blockette_content['Reception Quality'] = self.header_unpacker.unpack(
'B', reception_quality
)[0]
exception_count = self.file.read(4)
blockette_content['Exception Count'] = self.header_unpacker.unpack(
'I', exception_count
)[0]
exception_type = self.file.read(16)
blockette_content['Exception Type'] = self.header_unpacker.unpack(
'16s', exception_type
)[0].decode('utf-8').strip()
clock_model = self.file.read(32)
blockette_content['Clock Model'] = self.header_unpacker.unpack(
'32s', clock_model
)[0].decode('utf-8').strip()
clock_status = self.file.read(128)
blockette_content['Clock Status'] = self.header_unpacker.unpack(
'128s', clock_status
)[0].decode('utf-8').strip()
formatted_blockette = '\n'.join([f'{key}: {value}'
for key, value
in blockette_content.items()])
self.other_blockettes.append(formatted_blockette)
def read_blockette_1000(self) -> None:
"""
Read blockette 1000 of the current data record and store it in
self.blockette_1000.
"""
blockette_1000_section_lengths = [2, 2, 1, 1, 1, 1]
blockette_1000_values = []
for section_length in blockette_1000_section_lengths:
blockette_1000_values.append(self.file.read(section_length))
self.blockette_1000 = Blockette1000(*blockette_1000_values)
def read_blockette_1001(self) -> None:
"""
Read blockette 1001. The only valuable thing in this blockette is the
more precise start time. Assumes that the file pointer is at the start
of the blockette.
"""
self.file.read(5)
start_time_microsecond = self.file.read(1)
start_time_microsecond = self.header_unpacker.unpack(
'b', start_time_microsecond
)[0]
# Convert from microsecond to second so that UTCDateTime can handle it.
start_time_microsecond /= (10 ** 6)
self.record_metadata.start_time += start_time_microsecond
self.file.read(2)
def read_blockette_2000(self) -> None:
pass
def apply_time_correction(self) -> None:
"""
Apply the time correction found in the fixed header to the start time.
"""
# format() is used here instead of bin() because we need to pad the
# resulting bit string with 0 to the left.
activity_flags = format(
self.header_unpacker.unpack(
'B', self.fixed_header.activity_flags)[0],
'0>8b'
)
is_time_correction_applied = int(activity_flags[1])
if is_time_correction_applied:
return
time_correction = self.header_unpacker.unpack(
'L', self.fixed_header.time_correction
)[0]
# We need to convert the unit from 0.0001 seconds to seconds
time_correction *= 0.0001
self.record_metadata.start_time += time_correction
def read_blockettes(self) -> None:
"""
Read all the blockettes in the current data record aside from blockette
1000, which has beem read previously. Currently only handle blockettes
500, 1001, and 2000.
"""
blockette_count = self.header_unpacker.unpack(
'B', self.fixed_header.blockette_count
)[0]
for i in range(1, blockette_count):
# All blockettes store their type in the first two bytes, so we
# read that to determine what to do
next_blockette_type = self.file.read(2)
# Move file pointer back to start of blockette
self.file.seek(-2, 1)
next_blockette_type = self.header_unpacker.unpack(
'H', next_blockette_type
)[0]
if next_blockette_type not in (500, 1000, 1001):
print('We currently only handle blockettes 500, 1000, and'
'1001.')
continue
if next_blockette_type == 500:
self.read_blockette_500()
elif next_blockette_type == 1001:
self.read_blockette_1001()
elif next_blockette_type == 2000:
self.read_blockette_2000()
def get_first_data_point(self) -> Real:
"""
Get the first data point of the current data record.
:return: the first data point of the current data record, whose type is
determined based on the encoding type stored in blockette 1000.
"""
record_start = self.file.tell()
data_start = self.header_unpacker.unpack(
'H', self.fixed_header.data_offset
)[0]
# The data start byte is defined as an offset from the start of the
# data record. Seeing as we should be at the start of the data record
# by seeking there at the end of every major step, we can simply seek
# to the start of the data.
self.file.seek(data_start, 1)
encoding_format = self.blockette_1000.encoding_format
encoding_format = self.header_unpacker.unpack('b', encoding_format)[0]
encoding_format = EncodingFormat(encoding_format)
if encoding_format == EncodingFormat.ASCII:
# We want to read everything in the record if the encoding is
# ASCII.
record_length_exp = self.header_unpacker.unpack(
'B', self.blockette_1000.record_length
)[0]
record_size = 2 ** record_length_exp
# This name does not make much sense with what we are doing here,
# but it will have to do for now.
# The size of the record includes the header, so we have to account
# for that when grabbing the data.
first_data_point = self.file.read(record_size - data_start)
else:
# Currently, we are extracting only the first data point in each
# record. The smallest possible amount of bytes we can extract
# while guaranteeing that we get the first data point in the
# record is 8, with Steim encodings and IEEE double precision
# float needing to use the whole buffer.
buffer = self.file.read(8)
encoding_to_decoder = {
EncodingFormat.INT_16_BIT: decode_int16,
EncodingFormat.INT_24_BIT: decode_int24,
EncodingFormat.INT_32_BIT: decode_int32,
EncodingFormat.IEEE_FLOAT_32_BIT: decode_ieee_float,
EncodingFormat.IEEE_FLOAT_64_BIT: decode_ieee_double,
EncodingFormat.STEIM_1: decode_steim,
EncodingFormat.STEIM_2: decode_steim,
}
first_data_point = encoding_to_decoder[encoding_format](
buffer, self.data_unpacker
)
# Seek back to the start of the record so we can call this method again
# if needed.
self.file.seek(record_start)
return first_data_point
......@@ -68,21 +68,23 @@ class LogInfo():
# TT =2001:253:15:13:59:768 NS: 144005 SPS: 40 ETO: 0
parts = line.split()
data_stream = int(parts[5])
if data_stream not in self.parent.req_data_streams:
return (0, 0)
try:
if parts[8].startswith("00:000"):
if parts[11].startswith("00:000"):
return -1, 0
epoch, _ = get_time_6(parts[11])
if (self.req_data_streams == ['*'] or
data_stream in self.req_data_streams):
try:
if parts[8].startswith("00:000"):
if parts[11].startswith("00:000"):
return -1, 0
epoch, _ = get_time_6(parts[11])
else:
epoch, _ = get_time_6(parts[8])
except AttributeError:
self.parent.processing_log.append((line, LogType.ERROR))
return False
if epoch > 0:
self.min_epoch = min(epoch, self.min_epoch)
self.max_epoch = max(epoch, self.max_epoch)
else:
epoch, _ = get_time_6(parts[8])
except AttributeError:
self.parent.processing_log.append(line, LogType.ERROR)
return False
if epoch > 0:
self.min_epoch = min(epoch, self.min_epoch)
self.max_epoch = max(epoch, self.max_epoch)
return 0, 0
else:
return 0, 0
return epoch, data_stream
......@@ -203,10 +205,10 @@ class LogInfo():
return False
return epoch, disk, val
def read_dps_clock_diff(self, line: str
def read_dsp_clock_diff(self, line: str
) -> Union[bool, Tuple[float, float]]:
"""
Read DPS clock difference
Read DSP clock difference
:param line: str - a line of evt message
:return epoch: float - time when info is recorded
:return total: float - total difference time in milliseconds
......@@ -347,18 +349,17 @@ class LogInfo():
line = line.upper()
if 'FST' in line:
ret = self.read_evt(line)
if ret:
if ret is not False:
epoch, data_stream = ret
if data_stream in self.req_data_streams:
if epoch > 0:
chan_name = 'Event DS%s' % data_stream
self.add_chan_info(chan_name, epoch, 1, idx)
elif epoch == 0:
self.parent.processing_log.append(
line, LogType.WARNING)
else:
self.parent.processing_log.append(
line, LogType.ERROR)
if epoch > 0:
chan_name = 'Event DS%s' % data_stream
self.add_chan_info(chan_name, epoch, 1, idx)
elif epoch == 0:
self.parent.processing_log.append(
(line, LogType.WARNING))
else:
self.parent.processing_log.append(
(line, LogType.ERROR))
elif line.startswith("STATE OF HEALTH"):
epoch = self.read_sh_header(line)
......@@ -415,11 +416,11 @@ class LogInfo():
if epoch:
self.add_chan_info('Jerks/DSP Sets', epoch, 0, idx)
elif "DPS clock diff" in line:
ret = self.read_dps_clock_diff()
elif "DSP CLOCK DIFFERENCE" in line:
ret = self.read_dsp_clock_diff(line)
if ret:
epoch, total = ret
self.add_chan_info('DPS Clock Diff', epoch, total, idx)
self.add_chan_info('DSP Clock Diff', epoch, total, idx)
elif "ACQUISITION STARTED" in line:
epoch = self.simple_read(line)[1]
......@@ -457,7 +458,7 @@ class LogInfo():
elif "EXTERNAL CLOCK IS UNLOCKED" in line:
epoch = self.simple_read(line)[1]
if epoch:
self.add_chan_info('GPS Lk/Unlk', epoch, 0, idx)
self.add_chan_info('GPS Lk/Unlk', epoch, -1, idx)
elif "EXTERNAL CLOCK IS LOCKED" in line:
epoch = self.simple_read(line)[1]
if epoch:
......
......@@ -4,7 +4,7 @@ RT130 object to hold and process RefTek data
import os
from pathlib import Path
from typing import Tuple, List, Union
import traceback
import numpy as np
from sohstationviewer.model.reftek.from_rt2ms import (
......@@ -35,6 +35,11 @@ class RT130(DataTypeModel):
"""
self.req_data_streams: List[Union[int, str]] = self.req_wf_chans
"""
rt130_waveform_data_req: flag to create waveform data according to
req_data_stream
"""
self.rt130_waveform_data_req: bool = kwarg['rt130_waveform_data_req']
"""
found_data_streams: list of data streams found to help inform user
why the selected data streams don't show up
"""
......@@ -89,8 +94,15 @@ class RT130(DataTypeModel):
path2file = Path(path).joinpath(file_name)
if not validate_file(path2file, file_name):
continue
if not self.read_reftek_130(path2file):
read_text(path2file, file_name, self.log_data['TEXT'])
try:
if not self.read_reftek_130(path2file):
read_text(path2file, self.log_data['TEXT'])
except Exception:
fmt = traceback.format_exc()
self.track_info(f"Skip file {path2file} can't be read "
f"due to error: {str(fmt)}",
LogType.WARNING)
count += 1
if count % 50 == 0:
self.track_info(
......@@ -133,7 +145,13 @@ class RT130(DataTypeModel):
:param path2file: absolute path to file
"""
rt130 = core.Reftek130.from_file(path2file)
try:
rt130 = core.Reftek130.from_file(path2file)
except Exception:
fmt = traceback.format_exc()
self.track_info(f"Skip file {path2file} can't be read "
f"due to error: {str(fmt)}", LogType.WARNING)
return
unique, counts = np.unique(rt130._data["packet_type"],
return_counts=True)
nbr_packet_type = dict(zip(unique, counts))
......@@ -189,7 +207,9 @@ class RT130(DataTypeModel):
cur_key = (rt130._data[0]['unit_id'].decode(),
f"{rt130._data[0]['experiment_number']}")
self.populate_cur_key_for_all_data(cur_key)
self.get_ehet_in_log_data(rt130, cur_key)
if data_stream != 9:
# don't get event info for mass position
self.get_ehet_in_log_data(rt130, cur_key)
self.get_mass_pos_data_and_waveform_data(rt130, data_stream, cur_key)
def get_ehet_in_log_data(self, rt130: core.Reftek130,
......@@ -230,8 +250,10 @@ class RT130(DataTypeModel):
"""
if data_stream == 9:
cur_data_dict = self.mass_pos_data[cur_key]
else:
elif self.rt130_waveform_data_req:
cur_data_dict = self.waveform_data[cur_key]
else:
return
avail_trace_indexes = check_reftek_header(
rt130, cur_key, self.read_start, self.read_end,
......
......@@ -386,41 +386,31 @@ class MainWindow(QtWidgets.QMainWindow, UIMainWindow):
:rtype: List[str, int]
"""
req_wf_chans = []
if ((self.all_wf_chans_check_box.isChecked()
or [ds for ds in self.ds_check_boxes if ds.isChecked()] != []
or self.mseed_wildcard_edit.text().strip() != "")
and not self.tps_check_box.isChecked()
and not self.raw_check_box.isChecked()):
raise Exception(
"Waveform channels have been selected but there are none of "
"TPS or RAW checkboxes checked.\nPlease clear the "
"selection of waveform if you don't want to display the data.")
if self.tps_check_box.isChecked() or self.raw_check_box.isChecked():
if self.all_wf_chans_check_box.isChecked():
req_mseed_wildcards = ['*']
req_dss = ['*'] # all data stream
else:
req_dss = []
req_mseed_wildcards = []
for idx, ds_checkbox in enumerate(self.ds_check_boxes):
if ds_checkbox.isChecked():
req_dss.append(idx + 1)
if self.mseed_wildcard_edit.text().strip() != "":
req_mseed_wildcards = self.mseed_wildcard_edit.text(
).split(",")
if self.data_type == 'RT130':
req_wf_chans = req_dss
if req_dss != ['*'] and req_mseed_wildcards != []:
msg = 'MSeed Wildcards will be ignored for RT130.'
self.processing_log.append((msg, LogType.WARNING))
else:
req_wf_chans = req_mseed_wildcards
if req_mseed_wildcards != ['*'] and req_dss != []:
msg = ('Checked data streams will be ignored for '
'none-RT130 data type.')
self.processing_log.append((msg, LogType.WARNING))
if self.all_wf_chans_check_box.isChecked():
req_mseed_wildcards = ['*']
req_dss = ['*'] # all data stream
else:
req_dss = []
req_mseed_wildcards = []
for idx, ds_checkbox in enumerate(self.ds_check_boxes):
if ds_checkbox.isChecked():
req_dss.append(idx + 1)
if self.mseed_wildcard_edit.text().strip() != "":
req_mseed_wildcards = self.mseed_wildcard_edit.text(
).split(",")
if self.data_type == 'RT130':
req_wf_chans = req_dss
if req_dss != ['*'] and req_mseed_wildcards != []:
msg = 'MSeed Wildcards will be ignored for RT130.'
self.processing_log.append((msg, LogType.WARNING))
else:
req_wf_chans = req_mseed_wildcards
if req_mseed_wildcards != ['*'] and req_dss != []:
msg = ('Checked data streams will be ignored for '
'none-RT130 data type.')
self.processing_log.append((msg, LogType.WARNING))
return req_wf_chans
def get_requested_soh_chan(self):
......@@ -540,6 +530,13 @@ class MainWindow(QtWidgets.QMainWindow, UIMainWindow):
else:
self.min_gap = None
# if waveform channels are selected, Event DS will be read from EH/ET
# header
# rt130_waveform_data_req is to read data for wave form data
rt130_waveform_data_req = False
if self.raw_check_box.isChecked() or self.tps_check_box.isChecked():
rt130_waveform_data_req = True
if self.mseed_wildcard_edit.text().strip() != '':
try:
check_chan_wildcards_format(self.mseed_wildcard_edit.text())
......@@ -608,7 +605,8 @@ class MainWindow(QtWidgets.QMainWindow, UIMainWindow):
read_start=self.start_tm,
read_end=self.end_tm,
include_mp123=self.mass_pos_123zne_check_box.isChecked(),
include_mp456=self.mass_pos_456uvw_check_box.isChecked()
include_mp456=self.mass_pos_456uvw_check_box.isChecked(),
rt130_waveform_data_req=rt130_waveform_data_req
)
self.data_loader.worker.finished.connect(self.data_loaded)
......
# class with all plotting functions
import numpy as np
from sohstationviewer.controller.util import get_val
from sohstationviewer.controller.plotting_data import get_masspos_value_colors
......@@ -75,8 +77,10 @@ class Plotting:
if chan_db_info['valueColors'] in [None, 'None', '']:
chan_db_info['valueColors'] = '*:W'
value_colors = chan_db_info['valueColors'].split('|')
colors = []
for vc in value_colors:
v, c = vc.split(':')
colors.append(c)
val = get_val(v)
if c == '_':
prev_val = val
......@@ -104,9 +108,14 @@ class Plotting:
total_samples = len(x)
x = sorted(x)
if len(colors) != 1:
sample_no_colors = [clr['W']]
else:
sample_no_colors = [clr[colors[0]]]
self.plotting_axes.set_axes_info(
ax, [total_samples], chan_db_info=chan_db_info,
linked_ax=linked_ax)
ax, [total_samples], sample_no_colors=sample_no_colors,
chan_db_info=chan_db_info, linked_ax=linked_ax)
if linked_ax is None:
ax.x = x
else:
......@@ -168,6 +177,8 @@ class Plotting:
ax.set_ylim(-2, 2)
self.plotting_axes.set_axes_info(
ax, [len(points_list[0]), len(points_list[1])],
sample_no_colors=[clr[colors[0]], clr[colors[1]]],
sample_no_pos=[0.25, 0.75],
chan_db_info=chan_db_info, linked_ax=linked_ax)
if linked_ax is None:
ax.x = x
......@@ -203,7 +214,8 @@ class Plotting:
x_list = c_data['times']
total_x = sum([len(x) for x in x_list])
self.plotting_axes.set_axes_info(
ax, [total_x], chan_db_info=chan_db_info, linked_ax=linked_ax)
ax, [total_x], sample_no_colors=[clr[color]],
chan_db_info=chan_db_info, linked_ax=linked_ax)
for x in x_list:
ax.plot(x, [0] * len(x), marker='s', markersize=1.5,
......@@ -250,10 +262,7 @@ class Plotting:
self.parent.plotting_bot, plot_h)
x_list, y_list = c_data['times'], c_data['data']
total_x = sum([len(x) for x in x_list])
self.plotting_axes.set_axes_info(
ax, [total_x], chan_db_info=chan_db_info,
info=info, y_list=y_list, linked_ax=linked_ax)
colors = {}
if chan_db_info['valueColors'] not in [None, 'None', '']:
color_parts = chan_db_info['valueColors'].split('|')
......@@ -261,12 +270,27 @@ class Plotting:
obj, c = cStr.split(':')
colors[obj] = c
l_color = 'G'
d_color = 'W'
has_dot = False
if 'L' in colors:
l_color = colors['L']
if 'D' in colors:
d_color = colors['D']
has_dot = True
if chan_id == 'GPS Lk/Unlk':
sample_no_list = []
sample_no_list.append(np.where(y_list[0] == -1)[0].size)
sample_no_list.append(np.where(y_list[0] == 1)[0].size)
sample_no_colors = [clr[d_color], clr[d_color]]
else:
sample_no_list = [sum([len(x) for x in x_list])]
sample_no_colors = [clr[d_color]]
self.plotting_axes.set_axes_info(
ax, sample_no_list, sample_no_colors=sample_no_colors,
chan_db_info=chan_db_info,
info=info, y_list=y_list, linked_ax=linked_ax)
for x, y in zip(x_list, y_list):
if not has_dot:
# set marker to be able to click point for info
......
from typing import List
from typing import List, Optional, Dict
import numpy as np
from matplotlib.axes import Axes
from matplotlib.patches import ConnectionPatch, Rectangle
from matplotlib.ticker import AutoMinorLocator
from matplotlib import pyplot as pl
......@@ -10,6 +12,7 @@ from sohstationviewer.controller.plotting_data import (
get_gaps, get_time_ticks, get_unit_bitweight)
from sohstationviewer.conf import constants
from sohstationviewer.view.util.color import clr
class PlottingAxes:
......@@ -148,24 +151,30 @@ class PlottingAxes:
ax.patch.set_alpha(0)
return ax
def set_axes_info(self, ax, sample_no_list,
label=None, info='', y_list=None, chan_db_info=None,
linked_ax=None):
def set_axes_info(self, ax: Axes,
sample_no_list: List[int],
sample_no_colors: List[str] = [clr['W'], clr['W']],
sample_no_pos: List[float] = [0.05, 0.95],
label: Optional[str] = None,
info: str = '',
y_list: Optional[np.ndarray] = None,
chan_db_info: Optional[Dict] = None,
linked_ax: Optional[Axes] = None):
"""
Draw plot's title, sub title, sample total label, center line, y labels
for a channel.
:param ax: matplotlib.axes.Axes - axes of a channel
:param sample_no_list: [int,] - list of totals of different sample
groups
:param label: str/None - title of the plot.
If None, show chan_db_info['label']
:param info: str - additional info to show in sub title which is
:param ax: axes of a channel
:param sample_no_list: list of totals of different sample groups
:param sample_no_colors: list of color to display sample numbers
:param sample_no_pos: list of position to display sample numbers
top/bottom
:param label: title of the plot. If None, show chan_db_info['label']
:param info: additional info to show in sub title which is
smaller and under title on the left side
:param y: numpy.array - y values of the channel, to show min/max labels
and min/max lines
:param chan_db_info: dict - info of channel from database
:param linked_ax: matplotlib.axes.Axes/None -
:param y: y values of the channel for min/max labels, min/max lines
:param chan_db_info: info of channel from database
:param linked_ax:
if linked_ax is None, this is a main channel, label of channel will
be displayed with title's format, on top right of plot.
if linked_ax is not None, this is a channel using main channel's
......@@ -181,6 +190,7 @@ class PlottingAxes:
if label is None:
label = chan_db_info['label']
title_ver_alignment = 'center'
# set info in subtitle under title
if linked_ax is not None:
......@@ -223,7 +233,7 @@ class PlottingAxes:
verticalalignment='center',
rotation='horizontal',
transform=ax.transAxes,
color=self.parent.display_color['basic'],
color=sample_no_colors[0],
size=self.parent.font_size
)
else:
......@@ -233,30 +243,31 @@ class PlottingAxes:
# on data created in trim_downsample_chan_with_spr_less_or_equal_1
# and won't be changed in set_lim, then don't need to assign a
# variable for it.
# bottom
ax.text(
1.005, 0.25,
1.005, sample_no_pos[0],
sample_no_list[0],
horizontalalignment='left',
verticalalignment='center',
rotation='horizontal',
transform=ax.transAxes,
color=self.parent.display_color['basic'],
color=sample_no_colors[0],
size=self.parent.font_size
)
# top
ax.text(
1.005, 0.75,
1.005, sample_no_pos[1],
sample_no_list[1],
horizontalalignment='left',
verticalalignment='center',
rotation='horizontal',
transform=ax.transAxes,
color=self.parent.display_color['basic'],
color=sample_no_colors[1],
size=self.parent.font_size
)
if linked_ax is not None:
ax.set_yticks([])
return
if y_list is None:
# draw center line
ax.plot([self.parent.min_x, self.parent.max_x],
......@@ -341,17 +352,23 @@ class PlottingAxes:
)
)
def get_height(self, ratio, bw_plots_distance=0.0015):
def get_height(self, ratio: float, bw_plots_distance: float = 0.0015,
pixel_height: float = 19) -> float:
"""
Calculate new plot's bottom position and return plot's height.
:param ratio: float - ratio of the plot height on the BASIC_HEIGHT
:param bw_plots_distance: float - distance between plots
:return plot_h: float - height of the plot
:param ratio: ratio of the plot height on the BASIC_HEIGHT
:param bw_plots_distance: distance between plots
:param pixel_height: height of plot in pixel (
for TPS/TPS legend, height of each day row)
:return plot_h: height of the plot
"""
plot_h = constants.BASIC_HEIGHT * ratio # ratio with figure height
self.parent.plotting_bot -= plot_h + bw_plots_distance
self.parent.plotting_bot_pixel += 19 * ratio
bw_plots_distance_pixel = 3000 * bw_plots_distance
self.parent.plotting_bot_pixel += (pixel_height * ratio +
bw_plots_distance_pixel)
return plot_h
def add_ruler(self, color):
......
......@@ -89,8 +89,10 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
self.is_working = True
self.set_key = key
self.plotting_data1 = d_obj.waveform_data[key]
self.plot_total = len(self.plotting_data1)
self.plotting_bot = const.BOTTOM
self.plotting_bot_pixel = const.BOTTOM_PX
self.processed_channels = []
self.channels = []
self.tps_processors = []
......@@ -111,7 +113,7 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
title = get_title(key, self.min_x, self.max_x, self.date_mode)
self.timestamp_bar_top = self.plotting_axes.add_timestamp_bar(0.)
self.plotting_axes.set_title(title, y=0, v_align='bottom')
self.plotting_axes.set_title(title, y=5, v_align='bottom')
if self.plotting_data1 == {}:
self.is_working = False
......@@ -220,11 +222,12 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
total_days = c_data['tps_data'].shape[0]
plot_h = self.plotting_axes.get_height(
1.5 * total_days, bw_plots_distance=0.003)
total_days/2, bw_plots_distance=0.003, pixel_height=12.1)
ax = self.create_axes(self.plotting_bot, plot_h)
ax.spines[['right', 'left', 'top', 'bottom']].set_visible(False)
ax.text(
-0.1, 1.2,
f"{get_chan_label(chan_id)} {c_data['samplerate']}",
-0.12, 1,
f"{get_chan_label(chan_id)} {c_data['samplerate']}sps",
horizontalalignment='left',
verticalalignment='top',
rotation='horizontal',
......@@ -234,17 +237,17 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
)
zoom_marker1 = ax.plot(
[], [], marker='|', markersize=10,
[], [], marker='|', markersize=5,
markeredgecolor=self.display_color['zoom_marker'])[0]
self.zoom_marker1s.append(zoom_marker1)
zoom_marker2 = ax.plot(
[], [], marker='|', markersize=10,
[], [], marker='|', markersize=5,
markeredgecolor=self.display_color['zoom_marker'])[0]
self.zoom_marker2s.append(zoom_marker2)
ruler = ax.plot(
[], [], marker='s', markersize=5,
[], [], marker='s', markersize=4,
markeredgecolor=self.display_color['time_ruler'],
markerfacecolor='None')[0]
self.rulers.append(ruler)
......@@ -258,8 +261,8 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
# not draw data out of day range
color_set = self.get_color_set(y, square_counts, color_codes)
# (- dayIdx): each day is a line, increase from top to bottom
ax.scatter(x, [- dayIdx] * len(x), marker='|',
c=color_set, s=7, alpha=0.8)
ax.scatter(x, [- dayIdx] * len(x), marker='s',
c=color_set, s=3)
# extra to show highlight square
ax.set_ylim(-(c_data['tps_data'].shape[0] + 1), 1)
......@@ -274,11 +277,13 @@ class TimePowerSquaredWidget(plotting_widget.PlottingWidget):
ax.legend will create one label for each dot.
"""
# set height of legend and distance bw legend and upper ax
plot_h = self.plotting_axes.get_height(7, bw_plots_distance=0.003)
plot_h = self.plotting_axes.get_height(
21, bw_plots_distance=0.004, pixel_height=12)
ax = self.plotting_axes.canvas.figure.add_axes(
[self.plotting_l, self.plotting_bot, self.plotting_w, plot_h],
picker=True
)
ax.axis('off')
ax.patch.set_alpha(0)
c_labels = self.parent.sel_col_labels
clrs = self.parent.color_def # colordef
......