Browse Source

Incorrect work of analytics with nan filled dataset #247 (#248)

pull/1/head
Alexandr Velikiy 6 years ago committed by Alexey Velikiy
parent
commit
7ad0e5360f
  1. 4
      analytics/analytics/analytic_unit_manager.py
  2. 4
      analytics/analytics/models/drop_model.py
  3. 4
      analytics/analytics/models/general_model.py
  4. 4
      analytics/analytics/models/jump_model.py
  5. 4
      analytics/analytics/models/peak_model.py
  6. 4
      analytics/analytics/models/trough_model.py
  7. 4
      analytics/analytics/utils/common.py

4
analytics/analytics/analytic_unit_manager.py

@ -1,5 +1,6 @@
from typing import Dict
import pandas as pd
import numpy as np
import logging, traceback
import detectors
@ -61,6 +62,7 @@ def prepare_data(data: list):
data = pd.DataFrame(data, columns=['timestamp', 'value'])
data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')
data['value'] = data['value'] - min(data['value'])
if not np.isnan(min(data['value'])):
data['value'] = data['value'] - min(data['value'])
return data

4
analytics/analytics/models/drop_model.py

@ -78,8 +78,8 @@ class DropModel(Model):
patterns_list.append(labeled_drop)
self.model_drop = utils.get_av_model(patterns_list)
for n in range(len(segments)):
labeled_drop = data[self.idrops[n] - self.state['WINDOW_SIZE']: self.idrops[n] + self.state['WINDOW_SIZE'] + 1]
for idrop in self.idrops:
labeled_drop = data[idrop - self.state['WINDOW_SIZE']: idrop + self.state['WINDOW_SIZE'] + 1]
labeled_drop = labeled_drop - min(labeled_drop)
auto_convolve = scipy.signal.fftconvolve(labeled_drop, labeled_drop)
convolve_drop = scipy.signal.fftconvolve(labeled_drop, self.model_drop)

4
analytics/analytics/models/general_model.py

@ -48,8 +48,8 @@ class GeneralModel(Model):
patterns_list.append(segment_data)
self.model_gen = utils.get_av_model(patterns_list)
for n in range(len(segments)): #labeled segments
labeled_data = data[self.ipats[n] - self.state['WINDOW_SIZE']: self.ipats[n] + self.state['WINDOW_SIZE'] + 1]
for ipat in self.ipats: #labeled segments
labeled_data = data[ipat - self.state['WINDOW_SIZE']: ipat + self.state['WINDOW_SIZE'] + 1]
labeled_data = labeled_data - min(labeled_data)
auto_convolve = scipy.signal.fftconvolve(labeled_data, labeled_data)
convolve_data = scipy.signal.fftconvolve(labeled_data, self.model_gen)

4
analytics/analytics/models/jump_model.py

@ -79,8 +79,8 @@ class JumpModel(Model):
patterns_list.append(labeled_jump)
self.model_jump = utils.get_av_model(patterns_list)
for n in range(len(segments)):
labeled_jump = data[self.ijumps[n] - self.state['WINDOW_SIZE']: self.ijumps[n] + self.state['WINDOW_SIZE'] + 1]
for ijump in self.ijumps:
labeled_jump = data[ijump - self.state['WINDOW_SIZE']: ijump + self.state['WINDOW_SIZE'] + 1]
labeled_jump = labeled_jump - min(labeled_jump)
auto_convolve = scipy.signal.fftconvolve(labeled_jump, labeled_jump)
convolve_jump = scipy.signal.fftconvolve(labeled_jump, self.model_jump)

4
analytics/analytics/models/peak_model.py

@ -50,8 +50,8 @@ class PeakModel(Model):
patterns_list.append(labeled_peak)
self.model_peak = utils.get_av_model(patterns_list)
for n in range(len(segments)): #labeled segments
labeled_peak = data[self.ipeaks[n] - self.state['WINDOW_SIZE']: self.ipeaks[n] + self.state['WINDOW_SIZE'] + 1]
for ipeak in self.ipeaks: #labeled segments
labeled_peak = data[ipeak - self.state['WINDOW_SIZE']: ipeak + self.state['WINDOW_SIZE'] + 1]
labeled_peak = labeled_peak - min(labeled_peak)
auto_convolve = scipy.signal.fftconvolve(labeled_peak, labeled_peak)
convolve_peak = scipy.signal.fftconvolve(labeled_peak, self.model_peak)

4
analytics/analytics/models/trough_model.py

@ -50,8 +50,8 @@ class TroughModel(Model):
patterns_list.append(labeled_trough)
self.model_trough = utils.get_av_model(patterns_list)
for n in range(len(segments)):
labeled_trough = data[self.itroughs[n] - self.state['WINDOW_SIZE']: self.itroughs[n] + self.state['WINDOW_SIZE'] + 1]
for itrough in self.itroughs:
labeled_trough = data[itrough - self.state['WINDOW_SIZE']: itrough + self.state['WINDOW_SIZE'] + 1]
labeled_trough = labeled_trough - min(labeled_trough)
auto_convolve = scipy.signal.fftconvolve(labeled_trough, labeled_trough)
convolve_trough = scipy.signal.fftconvolve(labeled_trough, self.model_trough)

4
analytics/analytics/utils/common.py

@ -4,7 +4,11 @@ import pandas as pd
def exponential_smoothing(series, alpha):
result = [series[0]]
if np.isnan(result):
result = [0]
for n in range(1, len(series)):
if np.isnan(series[n]):
series[n] = 0
result.append(alpha * series[n] + (1 - alpha) * result[n - 1])
return result

Loading…
Cancel
Save