Compare commits

..

No commits in common. 'main' and 'detection_runner_updade' have entirely different histories.

  1. 27
      Dockerfile
  2. 36
      README.md
  3. 6
      client/src/components/AnlyticsStatus.vue
  4. 1
      client/src/components/pods/pattern_pod.ts
  5. 31
      client/src/services/analytics.service.ts
  6. 17
      client/src/store/index.ts
  7. 88
      client/src/views/Home.vue
  8. 12
      docker-compose.yml
  9. 17
      server/config.example.toml
  10. 56
      server/src/config.rs
  11. 7
      server/src/main.rs
  12. 113
      server/src/services/analytic_service/analytic_service.rs
  13. 9
      server/src/services/analytic_service/analytic_unit/anomaly_analytic_unit.rs
  14. 1
      server/src/services/analytic_service/analytic_unit/mod.rs
  15. 4
      server/src/services/analytic_service/analytic_unit/pattern_analytic_unit.rs
  16. 3
      server/src/services/analytic_service/analytic_unit/threshold_analytic_unit.rs
  17. 74
      server/src/services/analytic_service/analytic_unit/types.rs
  18. 62
      server/src/services/analytic_service/detection_runner.rs
  19. 3
      server/src/services/analytic_service/types.rs
  20. 175
      server/src/services/analytic_unit_service.rs
  21. 23
      server/src/services/data_service.rs
  22. 7
      server/src/services/metric_service.rs
  23. 3
      server/src/services/mod.rs
  24. 16
      server/src/services/segments_service.rs

27
Dockerfile

@ -1,27 +0,0 @@
FROM rust:1.57.0-bullseye as builder
RUN curl -sL https://deb.nodesource.com/setup_16.x | bash -
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
nodejs \
gcc \
g++ \
make \
musl-tools \
&& rm -rf /var/lib/apt/lists/*
RUN npm install --global yarn
RUN rustup target add x86_64-unknown-linux-musl
ADD . ./
RUN make
FROM debian:bullseye-slim
COPY --from=builder /release/hastic /hastic
COPY --from=builder /release/config.toml /config.toml
COPY --from=builder /release/public /public
CMD ["./hastic"]

36
README.md

@ -17,42 +17,6 @@ instance for getting metrics.
make
```
### Configure
Hastic can be configured using config-file or environment variables.
At first, choose which datasource you'll be using: `prometheus` or `influx`. Only one can be used at a time.
#### Config-file
- copy the config example to the release directory:
```bash
cp config.example.toml release/config.toml
```
- edit the config file, e.g. using `nano`:
```bash
nano release/config.toml
```
#### Environment variables
All config fields are also available as environment variables with `HASTIC_` prefix
Variable name structure:
- for high-level fields: `HASTIC_<field_name>`, e.g. `HASTIC_PORT`
- for nested fields: `HASTIC_<category_name>__<field_name>`, e.g. `HASTIC_PROMETHEUS__URL`
Environment variables can be set either by exporting them (they'll be actual until a bash-session is closed):
```bash
export HASTIC_PORT=8000
export HASTIC_PROMETHEUS__URL=http://localhost:9090
export HASTIC_PROMETHEUS__QUERY=rate(go_memstats_alloc_bytes_total[5m])
```
or specifing them in a run command (they'll be actual only for one run):
```bash
HASTIC_PORT=8000 HASTIC_PROMETHEUS__URL=http://localhost:9090 HASTIC_PROMETHEUS__QUERY=rate(go_memstats_alloc_bytes_total[5m]) ./release/hastic
```
### Run
```
cd release
./hastic

6
client/src/components/AnlyticsStatus.vue

@ -1,12 +1,10 @@
<template>
<div class="analytic-status">
analytic status: <strong> {{ status.message }} </strong>
analytic status: <strong> {{ status }} </strong>
</div>
</template>
<script lang="ts">
import { AnalyticStatus } from "@/store";
import { defineComponent } from 'vue';
@ -15,7 +13,7 @@ export default defineComponent({
components: {
},
computed: {
status(): AnalyticStatus {
status() {
return this.$store.state.analyticStatus;
}
}

1
client/src/components/pods/pattern_pod.ts

@ -79,6 +79,7 @@ export class PatternPod extends HasticPod<UpdateDataCallback> {
} else {
console.log('took from range from default');
}
console.log(from + " ---- " + to);
this.udc({ from, to })
.then(resp => {

31
client/src/services/analytics.service.ts

@ -5,33 +5,20 @@ import axios from 'axios';
import { getGenerator } from '@/utils';
import {
import _ from 'lodash';
import {
AnalyticUnitType, AnlyticUnitConfig,
PatternConfig, ThresholdConfig, AnomalyConfig
} from "@/types/analytic_units";
import { AnomalyHSR } from "@/types";
import { AnalyticStatus } from "@/store";
import _ from 'lodash';
const ANALYTICS_API_URL = API_URL + "analytics/";
export async function getStatus(): Promise<AnalyticStatus> {
export async function getStatus(): Promise<string> {
const uri = ANALYTICS_API_URL + `status`;
try {
const res = await axios.get<{ status: string }>(uri);
const data = res.data;
return {
available: true,
message: data.status
};
} catch (e) {
return {
available: false,
message: e.message
};
}
const res = await axios.get(uri);
const data = res['data'] as any;
return data.status;
}
export async function getConfig(): Promise<[AnalyticUnitType, AnlyticUnitConfig]> {
@ -67,8 +54,8 @@ export async function patchConfig(patchObj: any) {
await axios.patch(uri, patchObj);
}
export function getStatusGenerator(): AsyncIterableIterator<AnalyticStatus> {
return getGenerator<AnalyticStatus>(100, getStatus);
export function getStatusGenerator(): AsyncIterableIterator<string> {
return getGenerator<string>(100, getStatus);
}
@ -81,6 +68,6 @@ export async function getHSRAnomaly(from: number, to: number): Promise<AnomalyHS
const res = await axios.get(uri);
const values = res["data"]["AnomalyHSR"];
return values as AnomalyHSR;
}

17
client/src/store/index.ts

@ -12,30 +12,23 @@ const _SET_STATUS_GENERATOR = '_SET_STATUS_GENERATOR';
// TODO: consts for actions
export type AnalyticStatus = {
available: boolean,
message: string,
}
type State = {
analyticStatus: AnalyticStatus,
analyticStatus: string,
analyticUnitType?: AnalyticUnitType,
analyticUnitConfig?: AnlyticUnitConfig,
_statusGenerator: AsyncIterableIterator<AnalyticStatus>
_statusGenerator: AsyncIterableIterator<string>
}
const store = createStore<State>({
state: {
analyticStatus: {
available: false,
message: 'loading...',
},
analyticStatus: 'loading...',
analyticUnitType: null,
analyticUnitConfig: null,
_statusGenerator: null
},
mutations: {
[SET_ANALYTICS_STATUS](state, status: AnalyticStatus) {
[SET_ANALYTICS_STATUS](state, status: string) {
state.analyticStatus = status;
},
[SET_DETECTOR_CONFIG](state, { analyticUnitType, analyticUnitConfig }) {
@ -45,7 +38,7 @@ const store = createStore<State>({
// [PATCH_CONFIG](state, patchObj) {
// patchConfig(patchConfig)
// }
[_SET_STATUS_GENERATOR](state, generator: AsyncIterableIterator<AnalyticStatus>) {
[_SET_STATUS_GENERATOR](state, generator: AsyncIterableIterator<string>) {
state._statusGenerator = generator;
}
},

88
client/src/views/Home.vue

@ -2,53 +2,50 @@
<div class="home">
<img alt="Vue logo" src="../assets/logo.png">
<graph ref="graph" />
<analytic-status />
<template v-if="analyticStatus.available">
<div>
Analytic unit type:
<select :value="analyticUnitType" @change="changeAnalyticUnitType">
<option disabled value="">Please Select</option>
<option v-bind:key="option" v-for="option in analyticUnitTypes" :value="option">{{option}}</option>
</select> <br/><br/>
<div>
Analytic unit type:
<select :value="analyticUnitType" @change="changeAnalyticUnitType">
<option disabled value="">Please Select</option>
<option v-bind:key="option" v-for="option in analyticUnitTypes" :value="option">{{option}}</option>
</select> <br/><br/>
</div>
<div id="controls">
<div v-if="analyticUnitType == analyticUnitTypes[0]">
Threshold:
<input :value="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
</div>
<div id="controls">
<div v-if="analyticUnitType == analyticUnitTypes[0]">
Threshold:
<input v-model="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[1]">
Hold <pre>S</pre> to label patterns;
Hold <pre>A</pre> to label anti patterns <br/>
Hold <pre>D</pre> to delete patterns
<br/>
<hr/>
Correlation score:
<input v-model="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
Anti correlation score:
<input v-model="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
Model score:
<input v-model="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
Threshold score:
<input v-model="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<button @click="clearAllLabeling"> clear all labeling </button>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[2]">
Hold <pre>Z</pre> to set seasonality timespan
<hr/>
<!-- Alpha:
<input :value="analyticUnitConfig.alpha" @change="alphaChange" /> <br/> -->
Confidence:
<input v-model="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
Seasonality:
<input v-model="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
Seasonality iterations:
<input v-model="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[1]">
Hold <pre>S</pre> to label patterns;
Hold <pre>A</pre> to label anti patterns <br/>
Hold <pre>D</pre> to delete patterns
<br/>
<hr/>
Correlation score:
<input :value="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
Anti correlation score:
<input :value="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
Model score:
<input :value="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
Threshold score:
<input :value="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<button @click="clearAllLabeling"> clear all labeling </button>
</div>
</template>
<div v-if="analyticUnitType == analyticUnitTypes[2]">
Hold <pre>Z</pre> to set seasonality timespan
<hr/>
<!-- Alpha:
<input :value="analyticUnitConfig.alpha" @change="alphaChange" /> <br/> -->
Confidence:
<input :value="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
Seasonality:
<input :value="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
Seasonality iterations:
<input :value="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<br/>
</div>
</div>
</div>
</template>
@ -143,9 +140,6 @@ export default defineComponent({
},
analyticUnitConfig() {
return this.$store.state.analyticUnitConfig;
},
analyticStatus() {
return this.$store.state.analyticStatus;
}
}
});

12
docker-compose.yml

@ -1,12 +0,0 @@
version: '3'
services:
app:
image: hastic/hastic:latest
restart: always
environment:
HASTIC_PORT: "4347"
HASTIC_PROMETHEUS__URL: "http://demo.robustperception.io:9090"
HASTIC_PROMETHEUS__QUERY: "rate(go_memstats_alloc_bytes_total[1m])"
ports:
- "4347:4347"

17
server/config.example.toml

@ -1,10 +1,8 @@
port = 4347
# one of datasource sections (prometheus / influx) should be uncommented and edited corresponding to your environment
# [prometheus]
# url = "http://localhost:9090"
# query = "rate(go_memstats_alloc_bytes_total[5m])"
[prometheus]
url = "http://localhost:9090"
query = "rate(go_memstats_alloc_bytes_total[5m])"
# [influx]
@ -18,7 +16,8 @@ port = 4347
# |> yield(name: "mean")
# """
# [alerting]
# type = "webhook"
# interval = 10 # in seconds
# endpoint = "http://localhost:9092"
[alerting]
type = "webhook"
interval = 10 # in seconds
endpoint = "http://localhost:9092"

56
server/src/config.rs

@ -1,7 +1,5 @@
use subbeat::types::{DatasourceConfig, InfluxConfig, PrometheusConfig};
use std::env;
#[derive(Clone)]
pub struct WebhookAlertingConfig {
pub endpoint: String,
@ -27,7 +25,6 @@ pub struct Config {
fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfig> {
if config.get::<String>("prometheus.url").is_ok() {
println!("using Prometheus");
return Ok(DatasourceConfig::Prometheus(PrometheusConfig {
url: config.get("prometheus.url")?,
query: config.get("prometheus.query")?,
@ -35,7 +32,6 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}
if config.get::<String>("influx.url").is_ok() {
println!("using Influx");
return Ok(DatasourceConfig::Influx(InfluxConfig {
url: config.get("influx.url")?,
org_id: config.get("influx.org_id")?,
@ -44,7 +40,7 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}));
}
return Err(anyhow::format_err!("please configure a datasource"));
return Err(anyhow::format_err!("no datasource found"));
}
fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingConfig>> {
@ -66,7 +62,7 @@ fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingCo
let analytic_type = config.get::<String>("alerting.type").unwrap();
if analytic_type != "webhook" {
return Err(anyhow::format_err!(
"unknown alerting type: {}",
"unknown alerting typy {}",
analytic_type
));
}
@ -79,46 +75,6 @@ fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingCo
}));
}
// config::Environment doesn't support nested configs, e.g. `alerting.type`,
// so I've copied this from:
// https://github.com/rust-lang/mdBook/blob/f3e5fce6bf5e290c713f4015947dc0f0ad172d20/src/config.rs#L132
// so that `__` can be used in env variables instead of `.`,
// e.g. `HASTIC_ALERTING__TYPE` -> alerting.type
pub fn update_from_env(config: &mut config::Config) {
let overrides =
env::vars().filter_map(|(key, value)| parse_env(&key).map(|index| (index, value)));
for (key, value) in overrides {
config.set(&key, value).unwrap();
}
}
pub fn print_config(config: config::Config) {
// TODO: support any nesting level
let sections = config.to_owned().cache.into_table().unwrap();
for (section_name, values) in sections {
match values.clone().into_table() {
Err(_) => println!("{} => {}", section_name, values),
Ok(section) => {
for (key, value) in section {
println!("{}.{} => {}", section_name, key, value);
}
}
}
}
}
fn parse_env(key: &str) -> Option<String> {
const PREFIX: &str = "HASTIC_";
if key.starts_with(PREFIX) {
let key = &key[PREFIX.len()..];
Some(key.to_lowercase().replace("__", "."))
} else {
None
}
}
impl Config {
pub fn new() -> anyhow::Result<Config> {
let mut config = config::Config::default();
@ -127,14 +83,14 @@ impl Config {
config.merge(config::File::with_name("config")).unwrap();
}
update_from_env(&mut config);
config
.merge(config::Environment::with_prefix("HASTIC"))
.unwrap();
if config.get::<u16>("port").is_err() {
config.set("port", 4347).unwrap();
config.set("port", "8000").unwrap();
}
print_config(config.clone());
Ok(Config {
port: config.get::<u16>("port").unwrap(),
datasource_config: resolve_datasource(&config)?,

7
server/src/main.rs

@ -1,6 +1,6 @@
mod api;
use hastic::services::{analytic_service, analytic_unit_service, metric_service, segments_service, data_service};
use hastic::services::{analytic_service, metric_service, segments_service, analytic_unit_service};
use anyhow;
@ -9,10 +9,9 @@ async fn main() -> anyhow::Result<()> {
let config = hastic::config::Config::new()?;
let cfg_clone = config.clone();
let data_service = data_service::DataService::new()?;
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new(&data_service)?;
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new()?;
let metric_service = metric_service::MetricService::new(&config.datasource_config);
let segments_service = segments_service::SegmentsService::new(&data_service)?;
let segments_service = segments_service::SegmentsService::new()?;
let mut analytic_service = analytic_service::AnalyticService::new(
analytic_unit_service.clone(),

113
server/src/services/analytic_service/analytic_service.rs

@ -23,7 +23,7 @@ use crate::services::analytic_service::analytic_unit::types::{AnalyticUnit, Lear
use anyhow;
use chrono::{DateTime, Utc};
use chrono::{DateTime, TimeZone, Utc};
use tokio::sync::{mpsc, oneshot};
// TODO: now it's basically single analytic unit, service will operate on many AU
@ -59,20 +59,18 @@ impl AnalyticService {
segments_service: segments_service::SegmentsService,
alerting: Option<AlertingConfig>,
) -> AnalyticService {
// TODO: move buffer size to config
let (tx, rx) = mpsc::channel::<AnalyticServiceMessage>(32);
let aus = analytic_unit_service.clone();
AnalyticService {
analytic_unit_service: aus,
analytic_unit_service,
metric_service,
segments_service,
alerting,
// TODO: get it from persistance
analytic_unit: None,
analytic_unit_config: analytic_unit_service.get_active_config().unwrap(),
analytic_unit_config: AnalyticUnitConfig::Pattern(Default::default()),
analytic_unit_learning_status: LearningStatus::Initialization,
tx,
@ -137,7 +135,7 @@ impl AnalyticService {
};
let tx = self.tx.clone();
let au = self.analytic_unit.as_ref().unwrap().clone();
let dr = DetectionRunner::new(tx,self.metric_service.clone(), drcfg, au);
let dr = DetectionRunner::new(tx, drcfg, au);
self.detection_runner = Some(dr);
self.detection_runner.as_mut().unwrap().run(from);
@ -241,12 +239,7 @@ impl AnalyticService {
println!("Detection runner started from {}", from)
}
ResponseType::DetectionRunnerUpdate(id, timestamp) => {
self.analytic_unit_service
.set_last_detection(id, timestamp)
.unwrap();
}
ResponseType::DetectionRunnerDetection(from, to) => {
println!("detection: {} {}", from, to);
self.analytic_unit_service.set_last_detection(id, timestamp).unwrap();
}
ResponseType::LearningStarted => {
self.analytic_unit_learning_status = LearningStatus::Learning
@ -270,84 +263,48 @@ impl AnalyticService {
}
}
// TODO: create custom DatasourceError error type
Err(err) => {
Err(_) => {
self.analytic_unit = None;
self.analytic_unit_learning_status = LearningStatus::Error(err.to_string());
self.analytic_unit_learning_status = LearningStatus::Error;
}
}
}
fn patch_config(&mut self, patch: PatchConfig, tx: oneshot::Sender<()>) {
let my_id = self
.analytic_unit_service
.get_config_id(&self.analytic_unit_config);
let patch_id = patch.get_type_id();
let same_type = my_id == patch_id;
// TODO: need_learning and same_type logic overlaps, there is a way to optimise this
let need_learning = self.analytic_unit_config.patch_needs_learning(&patch);
if same_type {
// TODO: check when learning should be started
let new_conf = patch.get_new_config();
self.analytic_unit_config = new_conf.clone();
self.analytic_unit_service
.update_config_by_id(&my_id, &new_conf)
.unwrap();
let (new_conf, need_learning) = self.analytic_unit_config.patch(patch);
self.analytic_unit_config = new_conf;
if need_learning {
self.consume_request(RequestType::RunLearning);
// TODO: it's not fullu correct: we need to wait when the learning starts
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
println!("Can`t send patch config notification");
}
}
} else {
if self.analytic_unit.is_some() {
if need_learning {
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
return;
} else {
tokio::spawn({
let au = self.analytic_unit.clone();
let cfg = self.analytic_unit_config.clone();
async move {
au.unwrap().write().await.set_config(cfg);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
tokio::spawn({
let au = self.analytic_unit.clone();
let cfg = self.analytic_unit_config.clone();
async move {
au.unwrap().write().await.set_config(cfg);
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
println!("Can`t send patch config notification");
}
}
});
}
}
});
} else {
// TODO: check if we need this else
match tx.send(()) {
Ok(_) => {}
Err(e) => {
Err(_e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
} else {
let new_conf = self
.analytic_unit_service
.get_config_by_id(&patch_id)
.unwrap();
self.analytic_unit_config = new_conf.clone();
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
}
@ -376,11 +333,9 @@ impl AnalyticService {
ms: MetricService,
ss: SegmentsService,
) {
let mut au = match aus.resolve(&aucfg) {
let mut au = match aus.resolve(aucfg) {
Ok(a) => a,
Err(e) => {
panic!("{}", e);
}
Err(e) => { panic!("{}", e); }
};
match tx

9
server/src/services/analytic_service/analytic_unit/anomaly_analytic_unit.rs

@ -50,10 +50,7 @@ impl SARIMA {
// TODO: trend detection
if ts.len() < 2 {
return Err(anyhow::format_err!(
"too short timeserie to learn from, timeserie length: {}",
ts.len()
));
return Err(anyhow::format_err!("to short timeserie to learn from"));
}
// TODO: ensure capacity with seasonality size
let mut res_ts = Vec::<(u64, f64)>::new();
@ -167,10 +164,6 @@ impl AnalyticUnit for AnomalyAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
// TODO: return window based on real petterns info
return DETECTION_STEP;
}
fn set_config(&mut self, config: AnalyticUnitConfig) {
if let AnalyticUnitConfig::Anomaly(cfg) = config {
self.config = cfg;

1
server/src/services/analytic_service/analytic_unit/mod.rs

@ -7,3 +7,4 @@ use self::{
anomaly_analytic_unit::AnomalyAnalyticUnit, pattern_analytic_unit::PatternAnalyticUnit,
threshold_analytic_unit::ThresholdAnalyticUnit, types::AnalyticUnitConfig,
};

4
server/src/services/analytic_service/analytic_unit/pattern_analytic_unit.rs

@ -219,10 +219,6 @@ impl AnalyticUnit for PatternAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
let lr = self.learning_results.as_ref().unwrap();
return lr.avg_pattern_length as u64;
}
fn set_config(&mut self, config: AnalyticUnitConfig) {
if let AnalyticUnitConfig::Pattern(cfg) = config {
self.config = cfg;

3
server/src/services/analytic_service/analytic_unit/threshold_analytic_unit.rs

@ -25,9 +25,6 @@ impl AnalyticUnit for ThresholdAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
return DETECTION_STEP;
}
async fn learn(
&mut self,
_ms: MetricService,

74
server/src/services/analytic_service/analytic_unit/types.rs

@ -57,61 +57,13 @@ impl Default for ThresholdConfig {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum AnalyticUnitConfig {
Threshold(ThresholdConfig),
Pattern(PatternConfig),
Threshold(ThresholdConfig),
Anomaly(AnomalyConfig),
}
impl AnalyticUnitConfig {
pub fn get_default_by_id(id: &String) -> AnalyticUnitConfig {
let iid = id.as_str();
match iid {
"1" => AnalyticUnitConfig::Threshold(Default::default()),
"2" => AnalyticUnitConfig::Pattern(Default::default()),
"3" => AnalyticUnitConfig::Anomaly(Default::default()),
_ => panic!("bad id for getting get_default_by_id"),
}
}
pub fn patch_needs_learning(&self, patch: &PatchConfig) -> bool {
// TODO: maybe use type id's to optimise code
match patch {
PatchConfig::Pattern(tcfg) => match self.clone() {
AnalyticUnitConfig::Pattern(_) => {
return false;
}
_ => return true,
},
PatchConfig::Anomaly(tcfg) => match self.clone() {
AnalyticUnitConfig::Anomaly(scfg) => {
if tcfg.is_some() {
let t = tcfg.as_ref().unwrap();
let mut need_learning = t.seasonality != scfg.seasonality;
need_learning |= t.seasonality_iterations != scfg.seasonality_iterations;
return need_learning;
} else {
return false;
}
}
_ => {
return true;
}
},
PatchConfig::Threshold(tcfg) => match self.clone() {
AnalyticUnitConfig::Threshold(_) => {
return false;
}
_ => {
return true;
}
},
}
}
// TODO: maybe this method depricated
// return true if need needs relearning
// return true if patch is different type
pub fn patch(&self, patch: PatchConfig) -> (AnalyticUnitConfig, bool) {
match patch {
PatchConfig::Pattern(tcfg) => match self.clone() {
@ -119,7 +71,6 @@ impl AnalyticUnitConfig {
if tcfg.is_some() {
return (AnalyticUnitConfig::Pattern(tcfg.unwrap()), false);
} else {
// TODO: it should be extraced from db
return (AnalyticUnitConfig::Pattern(Default::default()), false);
}
}
@ -180,7 +131,6 @@ pub enum LearningResult {
#[async_trait]
pub trait AnalyticUnit {
fn get_id(&self) -> String;
fn get_detection_window(&self) -> u64;
async fn learn(
&mut self,
ms: MetricService,
@ -203,23 +153,3 @@ pub enum PatchConfig {
Threshold(Option<ThresholdConfig>),
Anomaly(Option<AnomalyConfig>),
}
impl PatchConfig {
pub fn get_type_id(&self) -> String {
match &self {
PatchConfig::Threshold(_) => "1".to_string(),
PatchConfig::Pattern(_) => "2".to_string(),
PatchConfig::Anomaly(_) => "3".to_string(),
}
}
pub fn get_new_config(&self) -> AnalyticUnitConfig {
match &self {
PatchConfig::Threshold(cfg) => {
AnalyticUnitConfig::Threshold(cfg.as_ref().unwrap().clone())
}
PatchConfig::Pattern(cfg) => AnalyticUnitConfig::Pattern(cfg.as_ref().unwrap().clone()),
PatchConfig::Anomaly(cfg) => AnalyticUnitConfig::Anomaly(cfg.as_ref().unwrap().clone()),
}
}
}

62
server/src/services/analytic_service/detection_runner.rs

@ -1,16 +1,12 @@
use chrono::{DateTime, Utc};
use chrono::{Utc, DateTime};
use tokio::sync::mpsc;
use crate::services::metric_service::MetricService;
use tokio::sync::{mpsc, RwLock};
use super::types::{AnalyticServiceMessage, AnalyticUnitRF, DetectionRunnerConfig, ResponseType};
use tokio::time::{sleep, Duration};
pub struct DetectionRunner {
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
running_handler: Option<tokio::task::JoinHandle<()>>,
@ -19,12 +15,10 @@ pub struct DetectionRunner {
impl DetectionRunner {
pub fn new(
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
) -> DetectionRunner {
DetectionRunner {
metric_service,
tx,
config,
analytic_unit,
@ -33,26 +27,22 @@ impl DetectionRunner {
}
pub fn run(&mut self, from: u64) {
// TODO: get last detection timestamp from persistance
// TODO: set last detection from "now"
if self.running_handler.is_some() {
self.running_handler.as_mut().unwrap().abort();
}
self.running_handler = Some(tokio::spawn({
// TODO: clone channel
let cfg = self.config.clone();
let ms = self.metric_service.clone();
let tx = self.tx.clone();
let au = self.analytic_unit.clone();
async move {
// TODO: run detection "from" for big timespan
// TODO: parse detections to webhooks
// TODO: define window for detection
// TODO: save last detection
// TODO: handle case when detection is in the end and continues after "now"
// it's better to make an issue on github
// TODO: find place to update analytic unit model
let window_size = au.as_ref().read().await.get_detection_window();
let detection_step = ms.get_detection_step();
let mut t_from = from - window_size;
let mut t_to = from;
match tx
.send(AnalyticServiceMessage::Response(Ok(
@ -65,39 +55,17 @@ impl DetectionRunner {
}
loop {
let a = au.as_ref().read().await;
let detections = a.detect(ms.clone(), t_from, t_to).await.unwrap();
for d in detections {
match tx
.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerDetection(d.0, d.1),
)))
.await
{
Ok(_) => {}
Err(_e) => println!("Fail to send detection runner detection notification"),
}
}
// TODO: send info about detections to tx
match tx
.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerUpdate(
au.as_ref().read().await.get_id(),
t_to,
),
)))
.await
{
Ok(_) => {}
// TODO: don't use DateTime, but count timestamp by steps
let now: DateTime<Utc> = Utc::now();
let to = now.timestamp() as u64;
// TODO: run detection periodically
sleep(Duration::from_secs(cfg.interval)).await;
match tx.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerUpdate(au.as_ref().read().await.get_id(), to)
))).await {
Ok(_) => {},
Err(_e) => println!("Fail to send detection runner started notification"),
}
sleep(Duration::from_secs(cfg.interval)).await;
t_from += detection_step;
t_to += detection_step;
}
}
}));

3
server/src/services/analytic_service/types.rs

@ -22,7 +22,7 @@ pub enum LearningStatus {
Initialization,
Starting,
Learning,
Error(String),
Error,
Ready,
}
@ -45,7 +45,6 @@ impl Default for LearningTrain {
pub enum ResponseType {
DetectionRunnerStarted(u64),
DetectionRunnerUpdate(String, u64), // analytic_unit id and timestamp
DetectionRunnerDetection(u64, u64), // TODO: add more into about analytic unit and more
LearningStarted,
LearningFinished(Box<dyn AnalyticUnit + Send + Sync>),
LearningFinishedEmpty,

175
server/src/services/analytic_unit_service.rs

@ -1,195 +1,76 @@
use std::sync::{Arc, Mutex};
use rusqlite::{params, Connection};
use crate::utils::get_random_str;
use super::analytic_service::analytic_unit::{
anomaly_analytic_unit::AnomalyAnalyticUnit,
pattern_analytic_unit::PatternAnalyticUnit,
threshold_analytic_unit::ThresholdAnalyticUnit,
types::{self, AnalyticUnitConfig},
};
use rusqlite::{params, Connection, Row};
use warp::hyper::rt::Executor;
use super::data_service::DataService;
use super::analytic_service::analytic_unit::{types::{AnalyticUnitConfig, self}, threshold_analytic_unit::ThresholdAnalyticUnit, pattern_analytic_unit::PatternAnalyticUnit, anomaly_analytic_unit::AnomalyAnalyticUnit};
#[derive(Clone)]
pub struct AnalyticUnitService {
connection: Arc<Mutex<Connection>>,
// TODO: resolve by setting id for 3 types
// TODO: create database
// TODO: update detection
connection: Arc<Mutex<Connection>>
}
// TODO: get DataService
impl AnalyticUnitService {
pub fn new(ds: &DataService) -> anyhow::Result<AnalyticUnitService> {
let conn = ds.analytic_units_connection.clone();
pub fn new() -> anyhow::Result<AnalyticUnitService> {
// TODO: remove repetitoin with segment_service
std::fs::create_dir_all("./data").unwrap();
let conn = Connection::open("./data/analytic_units.db")?;
// TODO: add learning results field
conn.lock().unwrap().execute(
conn.execute(
"CREATE TABLE IF NOT EXISTS analytic_unit (
id TEXT PRIMARY KEY,
last_detection INTEGER,
active BOOLEAN,
type INTEGER,
config TEXT
last_detection INTEGER
)",
[],
)?;
Ok(AnalyticUnitService {
connection: conn
connection: Arc::new(Mutex::new(conn)),
})
}
// TODO: optional id
pub fn resolve_au(
&self,
cfg: &AnalyticUnitConfig,
) -> Box<dyn types::AnalyticUnit + Send + Sync> {
pub fn resolve_au(&self, cfg: AnalyticUnitConfig) -> Box<dyn types::AnalyticUnit + Send + Sync> {
match cfg {
AnalyticUnitConfig::Threshold(c) => {
Box::new(ThresholdAnalyticUnit::new("1".to_string(), c.clone()))
}
AnalyticUnitConfig::Pattern(c) => {
Box::new(PatternAnalyticUnit::new("2".to_string(), c.clone()))
}
AnalyticUnitConfig::Anomaly(c) => {
Box::new(AnomalyAnalyticUnit::new("3".to_string(), c.clone()))
}
AnalyticUnitConfig::Threshold(c) => Box::new(ThresholdAnalyticUnit::new("1".to_string(), c.clone())),
AnalyticUnitConfig::Pattern(c) => Box::new(PatternAnalyticUnit::new("2".to_string(), c.clone())),
AnalyticUnitConfig::Anomaly(c) => Box::new(AnomalyAnalyticUnit::new("3".to_string(), c.clone())),
}
}
// TODO: get id of analytic_unit which be used also as it's type
pub fn resolve(
&self,
cfg: &AnalyticUnitConfig,
) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
pub fn resolve(&self, cfg: AnalyticUnitConfig) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
let au = self.resolve_au(cfg);
let id = au.as_ref().get_id();
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT id from analytic_unit WHERE id = ?1")?;
let mut stmt = conn.prepare(
"SELECT id from analytic_unit WHERE id = ?1",
)?;
let res = stmt.exists(params![id])?;
if res == false {
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"INSERT INTO analytic_unit (id, type, config) VALUES (?1, ?1, ?2)",
params![id, cfg_json],
"INSERT INTO analytic_unit (id) VALUES (?1)",
params![id]
)?;
}
conn.execute(
"UPDATE analytic_unit set active = FALSE where active = TRUE",
params![],
)?;
conn.execute(
"UPDATE analytic_unit set active = TRUE where id = ?1",
params![id],
)?;
return Ok(au);
}
// TODO: resolve with saving by id
pub fn set_last_detection(&self, id: String, last_detection: u64) -> anyhow::Result<()> {
let conn = self.connection.lock().unwrap();
conn.execute(
"UPDATE analytic_unit SET last_detection = ?1 WHERE id = ?2",
params![last_detection, id],
params![last_detection, id]
)?;
Ok(())
}
pub fn get_active(&self) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
// TODO: return default when there is no active
let conn = self.connection.lock().unwrap();
let mut stmt =
conn.prepare("SELECT id, type, config from analytic_unit WHERE active = TRUE")?;
let au = stmt.query_row([], |row| {
let c: String = row.get(2)?;
let cfg: AnalyticUnitConfig = serde_json::from_str(&c).unwrap();
Ok(self.resolve(&cfg))
})??;
return Ok(au);
}
pub fn get_active_config(&self) -> anyhow::Result<AnalyticUnitConfig> {
let exists = {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE active = TRUE")?;
stmt.exists([])?
};
if exists == false {
let c = AnalyticUnitConfig::Pattern(Default::default());
self.resolve(&c)?;
return Ok(c);
} else {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE active = TRUE")?;
let acfg = stmt.query_row([], |row| {
let c: String = row.get(0)?;
let cfg = serde_json::from_str(&c).unwrap();
Ok(cfg)
})?;
return Ok(acfg);
}
}
pub fn get_config_by_id(&self, id: &String) -> anyhow::Result<AnalyticUnitConfig> {
let exists = {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE id = ?1")?;
stmt.exists([id])?
};
if exists == false {
let c = AnalyticUnitConfig::get_default_by_id(id);
self.resolve(&c)?;
return Ok(c);
} else {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE id = ?1")?;
let acfg = stmt.query_row([id], |row| {
let c: String = row.get(0)?;
let cfg = serde_json::from_str(&c).unwrap();
Ok(cfg)
})?;
return Ok(acfg);
}
}
pub fn get_config_id(&self, cfg: &AnalyticUnitConfig) -> String {
match cfg {
AnalyticUnitConfig::Threshold(_) => "1".to_string(),
AnalyticUnitConfig::Pattern(_) => "2".to_string(),
AnalyticUnitConfig::Anomaly(_) => "3".to_string(),
}
}
pub fn update_config_by_id(&self, id: &String, cfg: &AnalyticUnitConfig) -> anyhow::Result<()> {
// TODO: it's possble that config doesn't exist, but we trying to update it
let conn = self.connection.lock().unwrap();
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"UPDATE analytic_unit SET config = ?1 WHERE id = ?2",
params![cfg_json, id],
)?;
return Ok(());
}
pub fn update_active_config(&self, cfg: &AnalyticUnitConfig) -> anyhow::Result<()> {
let conn = self.connection.lock().unwrap();
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"UPDATE analytic_unit SET config = ?1 WHERE active = TRUE",
params![cfg_json],
)?;
return Ok(());
}
}
}

23
server/src/services/data_service.rs

@ -1,23 +0,0 @@
use std::sync::{Arc, Mutex};
use rusqlite::{Connection};
pub struct DataService {
pub analytic_units_connection: Arc<Mutex<Connection>>,
pub segments_connection: Arc<Mutex<Connection>>
}
impl DataService {
pub fn new() -> anyhow::Result<DataService> {
std::fs::create_dir_all("./data").unwrap();
let analytic_units_connection = Connection::open("./data/analytic_units.db")?;
let segments_connection = Connection::open("./data/segments.db")?;
Ok(DataService {
analytic_units_connection: Arc::new(Mutex::new(analytic_units_connection)),
segments_connection: Arc::new(Mutex::new(segments_connection))
})
}
}

7
server/src/services/metric_service.rs

@ -32,11 +32,4 @@ impl MetricService {
}
return Ok(mr);
}
// TODO: it a hack for DetectionRunner: it should vary for different analytic units
// and it's config
pub fn get_detection_step(&self) -> u64 {
return 10;
}
}

3
server/src/services/mod.rs

@ -1,6 +1,5 @@
pub mod analytic_service;
pub mod data_service;
pub mod analytic_unit_service;
pub mod metric_service;
pub mod segments_service;
pub mod user_service;
pub mod analytic_unit_service;

16
server/src/services/segments_service.rs

@ -6,13 +6,9 @@ use serde::{Deserialize, Serialize};
use std::sync::{Arc, Mutex};
use super::data_service::DataService;
pub const ID_LENGTH: usize = 20;
pub type SegmentId = String;
// TODO: make logic with this enum shorter
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
pub enum SegmentType {
@ -62,19 +58,19 @@ impl Segment {
}
}
// TODO: get DataService
#[derive(Clone)]
pub struct SegmentsService {
connection: Arc<Mutex<Connection>>,
}
impl SegmentsService {
pub fn new(ds: &DataService) -> anyhow::Result<SegmentsService> {
pub fn new() -> anyhow::Result<SegmentsService> {
// TODO: move it to data service
std::fs::create_dir_all("./data").unwrap();
// TODO: add unilytic_unit id as a new field
let conn = ds.segments_connection.clone();
conn.lock().unwrap().execute(
let conn = Connection::open("./data/segments.db")?;
conn.execute(
"CREATE TABLE IF NOT EXISTS segment (
id TEXT PRIMARY KEY,
start INTEGER NOT NULL,
@ -85,7 +81,7 @@ impl SegmentsService {
)?;
Ok(SegmentsService {
connection: conn
connection: Arc::new(Mutex::new(conn)),
})
}

Loading…
Cancel
Save