Browse Source

trim trailing whitespaces

pull/1/head
Coin de Gamma 6 years ago
parent
commit
b65f1a552c
  1. 4
      analytics/analytics/analytic_unit_worker.py
  2. 2
      analytics/analytics/config.py
  3. 4
      analytics/analytics/models/custom_model.py
  4. 11
      analytics/analytics/models/general_model.py
  5. 16
      analytics/analytics/models/jump_model.py
  6. 10
      analytics/analytics/models/peak_model.py
  7. 18
      analytics/analytics/models/trough_model.py
  8. 12
      analytics/analytics/services/server_service.py
  9. 16
      analytics/analytics/utils/common.py

4
analytics/analytics/analytic_unit_worker.py

@ -29,11 +29,11 @@ class AnalyticUnitWorker:
return new_cache
except CancelledError as e:
return cache
async def do_predict(self, data: pd.DataFrame, cache: Optional[AnalyticUnitCache]) -> dict:
return self._detector.predict(data, cache)
def cancel(self):
if self._training_feature is not None:
self._training_feature.cancel()

2
analytics/analytics/config.py

@ -23,7 +23,7 @@ def get_config_field(field, default_val = None):
if default_val is not None:
return default_val
raise Exception('Please configure {}'.format(field))
ZMQ_DEV_PORT = get_config_field('ZMQ_DEV_PORT', '8002')

4
analytics/analytics/models/custom_model.py

@ -3,9 +3,9 @@ import utils
import pandas as pd
class CustomModel(Model):
class CustomModel(Model):
def do_fit(self, dataframe: pd.DataFrame, segments: list) -> None:
pass
def do_predict(self, dataframe: pd.DataFrame) -> list:
return []

11
analytics/analytics/models/general_model.py

@ -11,7 +11,6 @@ from scipy.stats import gaussian_kde
from scipy.stats import norm
class GeneralModel(Model):
def __init__(self):
@ -43,10 +42,10 @@ class GeneralModel(Model):
segment_data = utils.get_interval(data, center_ind, self.state['WINDOW_SIZE'])
segment_data = utils.subtract_min_without_nan(segment_data)
patterns_list.append(segment_data)
self.model_gen = utils.get_av_model(patterns_list)
convolve_list = utils.get_convolve(self.ipats, self.model_gen, data, self.state['WINDOW_SIZE'])
del_conv_list = []
for segment in segments:
if segment['deleted']:
@ -63,17 +62,17 @@ class GeneralModel(Model):
self.state['convolve_max'] = float(max(convolve_list))
else:
self.state['convolve_max'] = self.state['WINDOW_SIZE'] / 3
if len(convolve_list) > 0:
self.state['convolve_min'] = float(min(convolve_list))
else:
self.state['convolve_min'] = self.state['WINDOW_SIZE'] / 3
if len(del_conv_list) > 0:
self.state['conv_del_min'] = float(min(del_conv_list))
else:
self.state['conv_del_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_max'] = float(max(del_conv_list))
else:

16
analytics/analytics/models/jump_model.py

@ -41,7 +41,7 @@ class JumpModel(Model):
segment_from_index, segment_to_index, segment_data = parse_segment(segment, dataframe)
percent_of_nans = segment_data.isnull().sum() / len(segment_data)
if percent_of_nans > 0 or len(segment_data) == 0:
continue
continue
confidence = utils.find_confidence(segment_data)
confidences.append(confidence)
segment_cent_index, jump_height, jump_length = utils.find_jump_parameters(segment_data, segment_from_index)
@ -51,10 +51,10 @@ class JumpModel(Model):
labeled_jump = utils.get_interval(data, segment_cent_index, self.state['WINDOW_SIZE'])
labeled_jump = utils.subtract_min_without_nan(labeled_jump)
patterns_list.append(labeled_jump)
self.model_jump = utils.get_av_model(patterns_list)
convolve_list = utils.get_convolve(self.ijumps, self.model_jump, data, self.state['WINDOW_SIZE'])
del_conv_list = []
for segment in segments:
if segment['deleted']:
@ -65,7 +65,7 @@ class JumpModel(Model):
deleted_jump = utils.get_interval(data, segment_cent_index, self.state['WINDOW_SIZE'])
deleted_jump = utils.subtract_min_without_nan(labeled_jump)
del_conv_jump = scipy.signal.fftconvolve(deleted_jump, self.model_jump)
del_conv_list.append(max(del_conv_jump))
del_conv_list.append(max(del_conv_jump))
if len(confidences) > 0:
self.state['confidence'] = float(min(confidences))
@ -76,7 +76,7 @@ class JumpModel(Model):
self.state['convolve_max'] = float(max(convolve_list))
else:
self.state['convolve_max'] = self.state['WINDOW_SIZE']
if len(convolve_list) > 0:
self.state['convolve_min'] = float(min(convolve_list))
else:
@ -91,12 +91,12 @@ class JumpModel(Model):
self.state['JUMP_LENGTH'] = int(max(jump_length_list))
else:
self.state['JUMP_LENGTH'] = 1
if len(del_conv_list) > 0:
self.state['conv_del_min'] = float(min(del_conv_list))
else:
self.state['conv_del_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_max'] = float(max(del_conv_list))
else:
@ -113,7 +113,7 @@ class JumpModel(Model):
variance_error = self.state['WINDOW_SIZE']
close_patterns = utils.close_filtering(segments, variance_error)
segments = utils.best_pat(close_patterns, data, 'max')
if len(segments) == 0 or len(self.ijumps) == 0 :
segments = []
return segments

10
analytics/analytics/models/peak_model.py

@ -48,7 +48,7 @@ class PeakModel(Model):
self.model_peak = utils.get_av_model(patterns_list)
convolve_list = utils.get_convolve(self.ipeaks, self.model_peak, data, self.state['WINDOW_SIZE'])
del_conv_list = []
for segment in segments:
if segment['deleted']:
@ -59,7 +59,7 @@ class PeakModel(Model):
deleted_peak = utils.get_interval(data, del_max_index, self.state['WINDOW_SIZE'])
deleted_peak = utils.subtract_min_without_nan(deleted_peak)
del_conv_peak = scipy.signal.fftconvolve(deleted_peak, self.model_peak)
del_conv_list.append(max(del_conv_peak))
del_conv_list.append(max(del_conv_peak))
if len(confidences) > 0:
self.state['confidence'] = float(min(confidences))
@ -75,12 +75,12 @@ class PeakModel(Model):
self.state['convolve_min'] = float(min(convolve_list))
else:
self.state['convolve_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_min'] = float(min(del_conv_list))
else:
self.state['conv_del_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_max'] = float(max(del_conv_list))
else:
@ -107,7 +107,7 @@ class PeakModel(Model):
variance_error = self.state['WINDOW_SIZE']
close_patterns = utils.close_filtering(segments, variance_error)
segments = utils.best_pat(close_patterns, data, 'max')
if len(segments) == 0 or len(self.ipeaks) == 0:
return []
pattern_data = self.model_peak

18
analytics/analytics/models/trough_model.py

@ -40,15 +40,15 @@ class TroughModel(Model):
continue
confidence = utils.find_confidence(segment_data)
confidences.append(confidence)
segment_min_index = segment_data.idxmin()
segment_min_index = segment_data.idxmin()
self.itroughs.append(segment_min_index)
labeled_trough = utils.get_interval(data, segment_min_index, self.state['WINDOW_SIZE'])
labeled_trough = utils.subtract_min_without_nan(labeled_trough)
patterns_list.append(labeled_trough)
self.model_trough = utils.get_av_model(patterns_list)
convolve_list = utils.get_convolve(self.itroughs, self.model_trough, data, self.state['WINDOW_SIZE'])
del_conv_list = []
for segment in segments:
if segment['deleted']:
@ -60,7 +60,7 @@ class TroughModel(Model):
deleted_trough = utils.get_interval(data, del_min_index, self.state['WINDOW_SIZE'])
deleted_trough = utils.subtract_min_without_nan(deleted_trough)
del_conv_trough = scipy.signal.fftconvolve(deleted_trough, self.model_trough)
del_conv_list.append(max(del_conv_trough))
del_conv_list.append(max(del_conv_trough))
if len(confidences) > 0:
self.state['confidence'] = float(min(confidences))
@ -71,17 +71,17 @@ class TroughModel(Model):
self.state['convolve_max'] = float(max(convolve_list))
else:
self.state['convolve_max'] = self.state['WINDOW_SIZE']
if len(convolve_list) > 0:
self.state['convolve_min'] = float(min(convolve_list))
else:
self.state['convolve_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_min'] = float(min(del_conv_list))
else:
self.state['conv_del_min'] = self.state['WINDOW_SIZE']
if len(del_conv_list) > 0:
self.state['conv_del_max'] = float(max(del_conv_list))
else:
@ -91,7 +91,7 @@ class TroughModel(Model):
data = dataframe['value']
window_size = int(len(data)/SMOOTHING_COEFF) #test ws on flat data
all_mins = argrelextrema(np.array(data), np.less)[0]
extrema_list = []
for i in utils.exponential_smoothing(data - self.state['confidence'], EXP_SMOOTHING_FACTOR):
extrema_list.append(i)
@ -110,7 +110,7 @@ class TroughModel(Model):
segments = utils.best_pat(close_patterns, data, 'min')
if len(segments) == 0 or len(self.itroughs) == 0 :
segments = []
return segments
return segments
pattern_data = self.model_trough
for segment in segments:
if segment > self.state['WINDOW_SIZE']:

12
analytics/analytics/services/server_service.py

@ -16,17 +16,17 @@ class ServerMessage:
self.method = method
self.payload = payload
self.request_id = request_id
def toJSON(self):
result = {
'method': self.method
'method': self.method
}
if self.payload is not None:
result['payload'] = self.payload
if self.request_id is not None:
result['requestId'] = self.request_id
return result
def fromJSON(json: dict):
method = json['method']
payload = None
@ -35,7 +35,7 @@ class ServerMessage:
payload = json['payload']
if 'requestId' in json:
request_id = json['requestId']
return ServerMessage(method, payload, request_id)
class ServerService:
@ -62,7 +62,7 @@ class ServerService:
async def send_message(self, message: ServerMessage):
await self.socket.send_string(json.dumps(message.toJSON()))
async def send_request(self, message: ServerMessage) -> object:
if message.request_id is not None:
raise ValueError('Message can`t have request_id before it is scheduled')
@ -82,7 +82,7 @@ class ServerService:
try:
message_object = json.loads(text)
message = ServerMessage.fromJSON(message_object)
if message.request_id is not None:
self.responses[message_object['requestId']] = message.payload
return

16
analytics/analytics/utils/common.py

@ -73,7 +73,7 @@ def intersection_segment(data, median):
for i in range(1, len(cen_ind)):
if cen_ind[i] == cen_ind[i - 1] + 1:
del_ind.append(i - 1)
return [x for (idx, x) in enumerate(cen_ind) if idx not in del_ind]
def logistic_sigmoid_distribution(self, x1, x2, alpha, height):
@ -123,7 +123,7 @@ def find_ind_median(median, segment_data):
for i in range(len(segment_data)):
f.append(median)
f = np.array(f)
g = []
g = []
for i in segment_data:
g.append(i)
g = np.array(g)
@ -139,7 +139,7 @@ def find_jump_length(segment_data, min_line, max_line):
l.append(max_line)
f = np.array(f)
l = np.array(l)
g = []
g = []
for i in segment_data:
g.append(i)
g = np.array(g)
@ -150,7 +150,7 @@ def find_jump_length(segment_data, min_line, max_line):
else:
print("retard alert!")
return 0
def find_jump(data, height, lenght):
j_list = []
for i in range(len(data)-lenght-1):
@ -168,7 +168,7 @@ def find_drop_length(segment_data, min_line, max_line):
l.append(max_line)
f = np.array(f)
l = np.array(l)
g = []
g = []
for i in segment_data:
g.append(i)
g = np.array(g)
@ -186,11 +186,11 @@ def drop_intersection(segment_data, median_line):
for i in range(len(segment_data)):
f.append(median_line)
f = np.array(f)
g = []
g = []
for i in segment_data:
g.append(i)
g = np.array(g)
idx = np.argwhere(np.diff(np.sign(f - g)) != 0).reshape(-1) + 0
idx = np.argwhere(np.diff(np.sign(f - g)) != 0).reshape(-1) + 0
return idx
def find_drop(data, height, length):
@ -283,7 +283,7 @@ def nan_to_zero(segment, nan_list):
def find_confidence(segment: pd.Series) -> float:
segment_min = min(segment)
segment_max = max(segment)
return 0.2 * (segment_max - segment_min)
return 0.2 * (segment_max - segment_min)
def get_interval(data: pd.Series, center: int, window_size: int) -> pd.Series:
left_bound = center - window_size

Loading…
Cancel
Save