|
|
@ -2,56 +2,21 @@ from typing import Dict |
|
|
|
import pandas as pd |
|
|
|
import pandas as pd |
|
|
|
import numpy as np |
|
|
|
import numpy as np |
|
|
|
import logging, traceback |
|
|
|
import logging, traceback |
|
|
|
|
|
|
|
from concurrent.futures import Executor, ThreadPoolExecutor |
|
|
|
|
|
|
|
|
|
|
|
import detectors |
|
|
|
import detectors |
|
|
|
from analytic_unit_worker import AnalyticUnitWorker |
|
|
|
from analytic_unit_worker import AnalyticUnitWorker |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger('AnalyticUnitManager') |
|
|
|
logger = logging.getLogger('AnalyticUnitManager') |
|
|
|
|
|
|
|
WORKERS_EXECUTORS = 20 |
|
|
|
|
|
|
|
|
|
|
|
AnalyticUnitId = str |
|
|
|
AnalyticUnitId = str |
|
|
|
analytic_workers: Dict[AnalyticUnitId, AnalyticUnitWorker] = dict() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_detector_by_type(analytic_unit_type) -> detectors.Detector: |
|
|
|
def get_detector_by_type(analytic_unit_type) -> detectors.Detector: |
|
|
|
return detectors.PatternDetector(analytic_unit_type) |
|
|
|
return detectors.PatternDetector(analytic_unit_type) |
|
|
|
|
|
|
|
|
|
|
|
def ensure_worker(analytic_unit_id, analytic_unit_type) -> AnalyticUnitWorker: |
|
|
|
|
|
|
|
if analytic_unit_id in analytic_workers: |
|
|
|
|
|
|
|
# TODO: check that type is the same |
|
|
|
|
|
|
|
return analytic_workers[analytic_unit_id] |
|
|
|
|
|
|
|
detector = get_detector_by_type(analytic_unit_type) |
|
|
|
|
|
|
|
worker = AnalyticUnitWorker(analytic_unit_id, detector) |
|
|
|
|
|
|
|
analytic_workers[analytic_unit_id] = worker |
|
|
|
|
|
|
|
return worker |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def handle_analytic_task(task): |
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
payload = task['payload'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
worker = ensure_worker(task['analyticUnitId'], payload['pattern']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = prepare_data(payload['data']) |
|
|
|
|
|
|
|
result_payload = {} |
|
|
|
|
|
|
|
if task['type'] == 'LEARN': |
|
|
|
|
|
|
|
result_payload = await worker.do_learn(payload['segments'], data, payload['cache']) |
|
|
|
|
|
|
|
elif task['type'] == 'PREDICT': |
|
|
|
|
|
|
|
result_payload = await worker.do_predict(data, payload['cache']) |
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
raise ValueError('Unknown task type "%s"' % task['type']) |
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
'status': 'SUCCESS', |
|
|
|
|
|
|
|
'payload': result_payload |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
|
|
error_text = traceback.format_exc() |
|
|
|
|
|
|
|
logger.error("handle_analytic_task exception: '%s'" % error_text) |
|
|
|
|
|
|
|
# TODO: move result to a class which renders to json for messaging to analytics |
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
'status': 'FAILED', |
|
|
|
|
|
|
|
'error': str(e) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def prepare_data(data: list): |
|
|
|
def prepare_data(data: list): |
|
|
|
""" |
|
|
|
""" |
|
|
|
Takes list |
|
|
|
Takes list |
|
|
@ -66,3 +31,47 @@ def prepare_data(data: list): |
|
|
|
data['value'] = data['value'] - min(data['value']) |
|
|
|
data['value'] = data['value'] - min(data['value']) |
|
|
|
|
|
|
|
|
|
|
|
return data |
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AnalyticUnitManager: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
|
|
self.analytic_workers: Dict[AnalyticUnitId, AnalyticUnitWorker] = dict() |
|
|
|
|
|
|
|
self.workers_executor = ThreadPoolExecutor(max_workers=WORKERS_EXECUTORS) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __ensure_worker(self, analytic_unit_id, analytic_unit_type) -> AnalyticUnitWorker: |
|
|
|
|
|
|
|
if analytic_unit_id in self.analytic_workers: |
|
|
|
|
|
|
|
# TODO: check that type is the same |
|
|
|
|
|
|
|
return self.analytic_workers[analytic_unit_id] |
|
|
|
|
|
|
|
detector = get_detector_by_type(analytic_unit_type) |
|
|
|
|
|
|
|
worker = AnalyticUnitWorker(analytic_unit_id, detector, self.workers_executor) |
|
|
|
|
|
|
|
self.analytic_workers[analytic_unit_id] = worker |
|
|
|
|
|
|
|
return worker |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def handle_analytic_task(self, task): |
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
payload = task['payload'] |
|
|
|
|
|
|
|
worker = self.__ensure_worker(task['analyticUnitId'], payload['pattern']) |
|
|
|
|
|
|
|
data = prepare_data(payload['data']) |
|
|
|
|
|
|
|
result_payload = {} |
|
|
|
|
|
|
|
if task['type'] == 'LEARN': |
|
|
|
|
|
|
|
result_payload = await worker.do_learn(payload['segments'], data, payload['cache']) |
|
|
|
|
|
|
|
elif task['type'] == 'PREDICT': |
|
|
|
|
|
|
|
result_payload = await worker.do_predict(data, payload['cache']) |
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
raise ValueError('Unknown task type "%s"' % task['type']) |
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
'status': 'SUCCESS', |
|
|
|
|
|
|
|
'payload': result_payload |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
|
|
error_text = traceback.format_exc() |
|
|
|
|
|
|
|
logger.error("handle_analytic_task exception: '%s'" % error_text) |
|
|
|
|
|
|
|
# TODO: move result to a class which renders to json for messaging to analytics |
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
'status': 'FAILED', |
|
|
|
|
|
|
|
'error': str(e) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|