diff --git a/analytics/models/drop_model.py b/analytics/models/drop_model.py index 15cb130..065d6a3 100644 --- a/analytics/models/drop_model.py +++ b/analytics/models/drop_model.py @@ -101,8 +101,10 @@ class DropModel(Model): def do_predict(self, dataframe: pd.DataFrame): data = dataframe['value'] possible_drops = utils.find_drop(data, self.state['DROP_HEIGHT'], self.state['DROP_LENGTH'] + 1) + filtered = self.__filter_prediction(possible_drops, data) - return [(dataframe['timestamp'][x - 1].value, dataframe['timestamp'][x + 1].value) for x in filtered] + # TODO: convert from ns to ms more proper way (not dividing by 10^6) + return [(dataframe['timestamp'][x - 1].value / 1000000, dataframe['timestamp'][x + 1].value / 1000000) for x in filtered] def __filter_prediction(self, segments: list, data: list): delete_list = [] diff --git a/analytics/models/general_model.py b/analytics/models/general_model.py index 33b5bf4..42ba460 100644 --- a/analytics/models/general_model.py +++ b/analytics/models/general_model.py @@ -79,7 +79,8 @@ class GeneralModel(Model): all_conv_peaks = utils.peak_finder(self.all_conv, WINDOW_SIZE * 2) filtered = self.__filter_prediction(all_conv_peaks, data) - return [(dataframe['timestamp'][x - 1].value, dataframe['timestamp'][x + 1].value) for x in filtered] + # TODO: convert from ns to ms more proper way (not dividing by 10^6) + return [(dataframe['timestamp'][x - 1].value / 1000000, dataframe['timestamp'][x + 1].value / 1000000) for x in filtered] def __filter_prediction(self, segments: list, data: list): if len(segments) == 0 or len(self.ipats) == 0: diff --git a/analytics/models/jump_model.py b/analytics/models/jump_model.py index 67a9929..e17d302 100644 --- a/analytics/models/jump_model.py +++ b/analytics/models/jump_model.py @@ -105,9 +105,10 @@ class JumpModel(Model): def do_predict(self, dataframe: pd.DataFrame): data = dataframe['value'] possible_jumps = utils.find_jump(data, self.state['JUMP_HEIGHT'], self.state['JUMP_LENGTH'] + 1) - filtered = self.__filter_prediction(possible_jumps, data) - return [(dataframe['timestamp'][x - 1].value, dataframe['timestamp'][x + 1].value) for x in filtered] + filtered = self.__filter_prediction(possible_jumps, data) + # TODO: convert from ns to ms more proper way (not dividing by 10^6) + return [(dataframe['timestamp'][x - 1].value / 1000000, dataframe['timestamp'][x + 1].value / 1000000) for x in filtered] def __filter_prediction(self, segments, data): delete_list = [] diff --git a/analytics/models/peak_model.py b/analytics/models/peak_model.py index 5b29cb7..799d86b 100644 --- a/analytics/models/peak_model.py +++ b/analytics/models/peak_model.py @@ -82,7 +82,8 @@ class PeakModel(Model): segments.append(i) filtered = self.__filter_prediction(segments, data) - return [(dataframe['timestamp'][x - 1].value, dataframe['timestamp'][x + 1].value) for x in filtered] + # TODO: convert from ns to ms more proper way (not dividing by 10^6) + return [(dataframe['timestamp'][x - 1].value / 1000000, dataframe['timestamp'][x + 1].value / 1000000) for x in filtered] def __filter_prediction(self, segments: list, all_max_flatten_data: list): delete_list = [] diff --git a/analytics/models/trough_model.py b/analytics/models/trough_model.py index 06ae826..f270edd 100644 --- a/analytics/models/trough_model.py +++ b/analytics/models/trough_model.py @@ -79,9 +79,10 @@ class TroughModel(Model): for i in all_mins: if all_max_flatten_data[i] < extrema_list[i]: segments.append(i) - + test = dataframe['timestamp'][1].value filtered = self.__filter_prediction(segments, data) - return [(dataframe['timestamp'][x - 1].value, dataframe['timestamp'][x + 1].value) for x in filtered] + # TODO: convert from ns to ms more proper way (not dividing by 10^6) + return [(dataframe['timestamp'][x - 1].value / 1000000, dataframe['timestamp'][x + 1].value / 1000000) for x in filtered] def __filter_prediction(self, segments: list, all_max_flatten_data: list): delete_list = []