Compare commits

...

71 Commits

Author SHA1 Message Date
rozetko 77c5d3d3ce Merge pull request 'hotfix: `configuration property "influx.org_id" not found`' (#111) from influx-docker into main 2 years ago
rozetko d099998562 fix `configuration property "influx.org_id" not found` 2 years ago
rozetko abbadbc155 Merge pull request 'Make influx work in Docker' (#110) from influx-docker into main 2 years ago
rozetko fce0f30e9e docker-compose: rm "network-mode: host" option 2 years ago
rozetko f4226a5245 upd config example 2 years ago
rozetko 57854909f9 print config on start 2 years ago
rozetko e16410d407 make influx work in docker 2 years ago
Alexey Velikiy 5f16e006a8 rm range print in cleint 2 years ago
Alexey Velikiy 55c0fca371 debug error in config patch notification 2 years ago
glitch4347 aa4008662a
Merge pull request #108 from hastic/get-notified-in-analytic-service-about-new-detections-#51 2 years ago
Alexey Velikiy a9d2995281 detection runner detection notification 2 years ago
glitch4347 eee437d779
Merge pull request #106 from hastic/data-folder-creation-single-place-#102 2 years ago
Alexey Velikiy b7fb951590 data_service 2 years ago
Alexey Velikiy ad95c1e49f rm console log 2 years ago
glitch4347 f684d8ee6a
Merge pull request #100 from hastic/ui-not-editable-confidence-param-#99 2 years ago
Alexey Velikiy 0d756d3f42 use v-model instead of :value 2 years ago
Alexey Velikiy a4da3b5ea3 todo++ 2 years ago
Alexey Velikiy cafb50cce3 small refactorign 2 years ago
Alexey Velikiy c7284f4305 todos++ 2 years ago
glitch4347 cb87a98e39
Merge pull request #96 from hastic/env-vars-docs-#90 2 years ago
glitch4347 49c2e30acc
Merge pull request #94 from rusdacent/main 2 years ago
rozetko 8e4b29508b minor fixes 2 years ago
rozetko ed28530955 readme: initial configuration docs 2 years ago
rusdacent e0e4d93ebb Set always restart 2 years ago
rusdacent 8121292450 Set ports for Hastic 2 years ago
rusdacent 2d5786d639 Set image for docker-compose 2 years ago
rusdacent 0733a5d002 Add docker-compose 2 years ago
rusdacent e20a5affcc Add Dockerfile 2 years ago
glitch4347 545c932f6b
Merge pull request #91 from hastic/show-error-if-server-is-down-#48 2 years ago
rozetko 936d207a74 change default port: 8000 -> 4347 2 years ago
rozetko 4ed8ba1193 do not display anything below the Analytic Status if analytic is not available 2 years ago
rozetko 70c49fe91b change analytic status type && handle GET /status errors 2 years ago
glitch4347 052e5a5987
Merge pull request #89 from hastic/env-variables-configuration-#55 2 years ago
glitch4347 8ab0718326
Update server/src/config.rs 2 years ago
glitch4347 4f66b59b7e
Merge pull request #85 from hastic/backet-detection-window-for-pattern-analytic-unit-#83 2 years ago
rozetko 7d190c426a env variables configuration #55 2 years ago
glitch4347 637eef2105
Update server/src/services/analytic_service/detection_runner.rs 2 years ago
Alexey Velikiy 7731e2f56b detection window for pattern detector 2 years ago
Alexey Velikiy 6b44428586 detection step in matric service and update of t_from \ t_to 2 years ago
Alexey Velikiy 6f6f946b5b DetectionRunner todos 2 years ago
glitch4347 278ea473f2
Merge pull request #72 from hastic/analytic-unit-type-and-meta-in-db-#65 2 years ago
Alexey Velikiy 81069e9782 code format 2 years ago
Alexey Velikiy 21f4b6eaaa need_learning on patch case 2 years ago
Alexey Velikiy a200e3d450 basic seving of new analytic unit config 2 years ago
Alexey Velikiy d7b283f1ff basic save of analytic_unit 2 years ago
Alexey Velikiy 1af18e1118 rm 'same_type' in pathc config and patching continue 2 years ago
Alexey Velikiy df3be270b8 better patch begin 2 years ago
glitch4347 f1d31b9637
Merge pull request #67 from hastic/active-analytic-unit-#66 2 years ago
glitch4347 2498c40bed
Update server/src/services/analytic_unit_service.rs 2 years ago
Alexey Velikiy b5f960a6ea save config for patched 2 years ago
Alexey Velikiy 3313a92039 update active config begin 2 years ago
Alexey Velikiy a982f8f1f5 update_active_config begin 2 years ago
Alexey Velikiy 1f2d6f551e patch config: same type param 2 years ago
Alexey Velikiy 5bf13b171b get_active_config++ 2 years ago
Alexey Velikiy 090b9bf42e active config++ 2 years ago
Alexey Velikiy 86a1f7e3a6 get active with config 2 years ago
Alexey Velikiy b26d36fad2 serialize config in analytic_unit 2 years ago
Alexey Velikiy 4b6e95e784 Merge branch 'main' into active-analytic-unit-#66 2 years ago
glitch4347 5fa9cf8ba2
Merge pull request #69 from hastic/analytic-is-stuck-at-learning-#42 2 years ago
rozetko ef572afcaa prettify "too short timeserie to learn from" error message 2 years ago
rozetko 4774f5d6b5 LearningStatus::Error : error message argument 2 years ago
Alexey Velikiy 311e5a8041 get_active++ 2 years ago
Alexey Velikiy 1565c156e0 TODO++ 2 years ago
Alexey Velikiy d23af0482b TODOS and type field 2 years ago
Alexey Velikiy 187ec15f1b active analytic unit begin 2 years ago
glitch4347 0eff1204f2
Merge pull request #63 from hastic/basic-periodic-update-of-detection-runner-#62 2 years ago
Alexey Velikiy c79993bc52 rm todos 2 years ago
Alexey Velikiy a338b3339b detectoin println fix 2 years ago
Alexey Velikiy 9904e4d449 metric_service to detections runner + basic detections 2 years ago
Alexey Velikiy 0761e4d9b2 detections window begin 2 years ago
glitch4347 1b89075ad0
Merge pull request #60 from hastic/detection_runner_updade 2 years ago
  1. 27
      Dockerfile
  2. 36
      README.md
  3. 6
      client/src/components/AnlyticsStatus.vue
  4. 1
      client/src/components/pods/pattern_pod.ts
  5. 31
      client/src/services/analytics.service.ts
  6. 17
      client/src/store/index.ts
  7. 88
      client/src/views/Home.vue
  8. 12
      docker-compose.yml
  9. 17
      server/config.example.toml
  10. 56
      server/src/config.rs
  11. 7
      server/src/main.rs
  12. 113
      server/src/services/analytic_service/analytic_service.rs
  13. 9
      server/src/services/analytic_service/analytic_unit/anomaly_analytic_unit.rs
  14. 1
      server/src/services/analytic_service/analytic_unit/mod.rs
  15. 4
      server/src/services/analytic_service/analytic_unit/pattern_analytic_unit.rs
  16. 3
      server/src/services/analytic_service/analytic_unit/threshold_analytic_unit.rs
  17. 74
      server/src/services/analytic_service/analytic_unit/types.rs
  18. 62
      server/src/services/analytic_service/detection_runner.rs
  19. 3
      server/src/services/analytic_service/types.rs
  20. 175
      server/src/services/analytic_unit_service.rs
  21. 23
      server/src/services/data_service.rs
  22. 7
      server/src/services/metric_service.rs
  23. 3
      server/src/services/mod.rs
  24. 16
      server/src/services/segments_service.rs

27
Dockerfile

@ -0,0 +1,27 @@
FROM rust:1.57.0-bullseye as builder
RUN curl -sL https://deb.nodesource.com/setup_16.x | bash -
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
nodejs \
gcc \
g++ \
make \
musl-tools \
&& rm -rf /var/lib/apt/lists/*
RUN npm install --global yarn
RUN rustup target add x86_64-unknown-linux-musl
ADD . ./
RUN make
FROM debian:bullseye-slim
COPY --from=builder /release/hastic /hastic
COPY --from=builder /release/config.toml /config.toml
COPY --from=builder /release/public /public
CMD ["./hastic"]

36
README.md

@ -17,6 +17,42 @@ instance for getting metrics.
make
```
### Configure
Hastic can be configured using config-file or environment variables.
At first, choose which datasource you'll be using: `prometheus` or `influx`. Only one can be used at a time.
#### Config-file
- copy the config example to the release directory:
```bash
cp config.example.toml release/config.toml
```
- edit the config file, e.g. using `nano`:
```bash
nano release/config.toml
```
#### Environment variables
All config fields are also available as environment variables with `HASTIC_` prefix
Variable name structure:
- for high-level fields: `HASTIC_<field_name>`, e.g. `HASTIC_PORT`
- for nested fields: `HASTIC_<category_name>__<field_name>`, e.g. `HASTIC_PROMETHEUS__URL`
Environment variables can be set either by exporting them (they'll be actual until a bash-session is closed):
```bash
export HASTIC_PORT=8000
export HASTIC_PROMETHEUS__URL=http://localhost:9090
export HASTIC_PROMETHEUS__QUERY=rate(go_memstats_alloc_bytes_total[5m])
```
or specifing them in a run command (they'll be actual only for one run):
```bash
HASTIC_PORT=8000 HASTIC_PROMETHEUS__URL=http://localhost:9090 HASTIC_PROMETHEUS__QUERY=rate(go_memstats_alloc_bytes_total[5m]) ./release/hastic
```
### Run
```
cd release
./hastic

6
client/src/components/AnlyticsStatus.vue

@ -1,10 +1,12 @@
<template>
<div class="analytic-status">
analytic status: <strong> {{ status }} </strong>
analytic status: <strong> {{ status.message }} </strong>
</div>
</template>
<script lang="ts">
import { AnalyticStatus } from "@/store";
import { defineComponent } from 'vue';
@ -13,7 +15,7 @@ export default defineComponent({
components: {
},
computed: {
status() {
status(): AnalyticStatus {
return this.$store.state.analyticStatus;
}
}

1
client/src/components/pods/pattern_pod.ts

@ -79,7 +79,6 @@ export class PatternPod extends HasticPod<UpdateDataCallback> {
} else {
console.log('took from range from default');
}
console.log(from + " ---- " + to);
this.udc({ from, to })
.then(resp => {

31
client/src/services/analytics.service.ts

@ -5,20 +5,33 @@ import axios from 'axios';
import { getGenerator } from '@/utils';
import _ from 'lodash';
import {
import {
AnalyticUnitType, AnlyticUnitConfig,
PatternConfig, ThresholdConfig, AnomalyConfig
} from "@/types/analytic_units";
import { AnomalyHSR } from "@/types";
import { AnalyticStatus } from "@/store";
import _ from 'lodash';
const ANALYTICS_API_URL = API_URL + "analytics/";
export async function getStatus(): Promise<string> {
export async function getStatus(): Promise<AnalyticStatus> {
const uri = ANALYTICS_API_URL + `status`;
const res = await axios.get(uri);
const data = res['data'] as any;
return data.status;
try {
const res = await axios.get<{ status: string }>(uri);
const data = res.data;
return {
available: true,
message: data.status
};
} catch (e) {
return {
available: false,
message: e.message
};
}
}
export async function getConfig(): Promise<[AnalyticUnitType, AnlyticUnitConfig]> {
@ -54,8 +67,8 @@ export async function patchConfig(patchObj: any) {
await axios.patch(uri, patchObj);
}
export function getStatusGenerator(): AsyncIterableIterator<string> {
return getGenerator<string>(100, getStatus);
export function getStatusGenerator(): AsyncIterableIterator<AnalyticStatus> {
return getGenerator<AnalyticStatus>(100, getStatus);
}
@ -68,6 +81,6 @@ export async function getHSRAnomaly(from: number, to: number): Promise<AnomalyHS
const res = await axios.get(uri);
const values = res["data"]["AnomalyHSR"];
return values as AnomalyHSR;
}

17
client/src/store/index.ts

@ -12,23 +12,30 @@ const _SET_STATUS_GENERATOR = '_SET_STATUS_GENERATOR';
// TODO: consts for actions
export type AnalyticStatus = {
available: boolean,
message: string,
}
type State = {
analyticStatus: string,
analyticStatus: AnalyticStatus,
analyticUnitType?: AnalyticUnitType,
analyticUnitConfig?: AnlyticUnitConfig,
_statusGenerator: AsyncIterableIterator<string>
_statusGenerator: AsyncIterableIterator<AnalyticStatus>
}
const store = createStore<State>({
state: {
analyticStatus: 'loading...',
analyticStatus: {
available: false,
message: 'loading...',
},
analyticUnitType: null,
analyticUnitConfig: null,
_statusGenerator: null
},
mutations: {
[SET_ANALYTICS_STATUS](state, status: string) {
[SET_ANALYTICS_STATUS](state, status: AnalyticStatus) {
state.analyticStatus = status;
},
[SET_DETECTOR_CONFIG](state, { analyticUnitType, analyticUnitConfig }) {
@ -38,7 +45,7 @@ const store = createStore<State>({
// [PATCH_CONFIG](state, patchObj) {
// patchConfig(patchConfig)
// }
[_SET_STATUS_GENERATOR](state, generator: AsyncIterableIterator<string>) {
[_SET_STATUS_GENERATOR](state, generator: AsyncIterableIterator<AnalyticStatus>) {
state._statusGenerator = generator;
}
},

88
client/src/views/Home.vue

@ -2,50 +2,53 @@
<div class="home">
<img alt="Vue logo" src="../assets/logo.png">
<graph ref="graph" />
<analytic-status />
<div>
Analytic unit type:
<select :value="analyticUnitType" @change="changeAnalyticUnitType">
<option disabled value="">Please Select</option>
<option v-bind:key="option" v-for="option in analyticUnitTypes" :value="option">{{option}}</option>
</select> <br/><br/>
</div>
<div id="controls">
<div v-if="analyticUnitType == analyticUnitTypes[0]">
Threshold:
<input :value="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[1]">
Hold <pre>S</pre> to label patterns;
Hold <pre>A</pre> to label anti patterns <br/>
Hold <pre>D</pre> to delete patterns
<br/>
<hr/>
Correlation score:
<input :value="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
Anti correlation score:
<input :value="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
Model score:
<input :value="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
Threshold score:
<input :value="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<button @click="clearAllLabeling"> clear all labeling </button>
<template v-if="analyticStatus.available">
<div>
Analytic unit type:
<select :value="analyticUnitType" @change="changeAnalyticUnitType">
<option disabled value="">Please Select</option>
<option v-bind:key="option" v-for="option in analyticUnitTypes" :value="option">{{option}}</option>
</select> <br/><br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[2]">
Hold <pre>Z</pre> to set seasonality timespan
<hr/>
<!-- Alpha:
<input :value="analyticUnitConfig.alpha" @change="alphaChange" /> <br/> -->
Confidence:
<input :value="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
Seasonality:
<input :value="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
Seasonality iterations:
<input :value="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<br/>
<div id="controls">
<div v-if="analyticUnitType == analyticUnitTypes[0]">
Threshold:
<input v-model="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[1]">
Hold <pre>S</pre> to label patterns;
Hold <pre>A</pre> to label anti patterns <br/>
Hold <pre>D</pre> to delete patterns
<br/>
<hr/>
Correlation score:
<input v-model="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
Anti correlation score:
<input v-model="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
Model score:
<input v-model="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
Threshold score:
<input v-model="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<button @click="clearAllLabeling"> clear all labeling </button>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[2]">
Hold <pre>Z</pre> to set seasonality timespan
<hr/>
<!-- Alpha:
<input :value="analyticUnitConfig.alpha" @change="alphaChange" /> <br/> -->
Confidence:
<input v-model="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
Seasonality:
<input v-model="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
Seasonality iterations:
<input v-model="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<br/>
</div>
</div>
</div>
</template>
</div>
</template>
@ -140,6 +143,9 @@ export default defineComponent({
},
analyticUnitConfig() {
return this.$store.state.analyticUnitConfig;
},
analyticStatus() {
return this.$store.state.analyticStatus;
}
}
});

12
docker-compose.yml

@ -0,0 +1,12 @@
version: '3'
services:
app:
image: hastic/hastic:latest
restart: always
environment:
HASTIC_PORT: "4347"
HASTIC_PROMETHEUS__URL: "http://demo.robustperception.io:9090"
HASTIC_PROMETHEUS__QUERY: "rate(go_memstats_alloc_bytes_total[1m])"
ports:
- "4347:4347"

17
server/config.example.toml

@ -1,8 +1,10 @@
port = 4347
[prometheus]
url = "http://localhost:9090"
query = "rate(go_memstats_alloc_bytes_total[5m])"
# one of datasource sections (prometheus / influx) should be uncommented and edited corresponding to your environment
# [prometheus]
# url = "http://localhost:9090"
# query = "rate(go_memstats_alloc_bytes_total[5m])"
# [influx]
@ -16,8 +18,7 @@ query = "rate(go_memstats_alloc_bytes_total[5m])"
# |> yield(name: "mean")
# """
[alerting]
type = "webhook"
interval = 10 # in seconds
endpoint = "http://localhost:9092"
# [alerting]
# type = "webhook"
# interval = 10 # in seconds
# endpoint = "http://localhost:9092"

56
server/src/config.rs

@ -1,5 +1,7 @@
use subbeat::types::{DatasourceConfig, InfluxConfig, PrometheusConfig};
use std::env;
#[derive(Clone)]
pub struct WebhookAlertingConfig {
pub endpoint: String,
@ -25,6 +27,7 @@ pub struct Config {
fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfig> {
if config.get::<String>("prometheus.url").is_ok() {
println!("using Prometheus");
return Ok(DatasourceConfig::Prometheus(PrometheusConfig {
url: config.get("prometheus.url")?,
query: config.get("prometheus.query")?,
@ -32,6 +35,7 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}
if config.get::<String>("influx.url").is_ok() {
println!("using Influx");
return Ok(DatasourceConfig::Influx(InfluxConfig {
url: config.get("influx.url")?,
org_id: config.get("influx.org_id")?,
@ -40,7 +44,7 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}));
}
return Err(anyhow::format_err!("no datasource found"));
return Err(anyhow::format_err!("please configure a datasource"));
}
fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingConfig>> {
@ -62,7 +66,7 @@ fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingCo
let analytic_type = config.get::<String>("alerting.type").unwrap();
if analytic_type != "webhook" {
return Err(anyhow::format_err!(
"unknown alerting typy {}",
"unknown alerting type: {}",
analytic_type
));
}
@ -75,6 +79,46 @@ fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingCo
}));
}
// config::Environment doesn't support nested configs, e.g. `alerting.type`,
// so I've copied this from:
// https://github.com/rust-lang/mdBook/blob/f3e5fce6bf5e290c713f4015947dc0f0ad172d20/src/config.rs#L132
// so that `__` can be used in env variables instead of `.`,
// e.g. `HASTIC_ALERTING__TYPE` -> alerting.type
pub fn update_from_env(config: &mut config::Config) {
let overrides =
env::vars().filter_map(|(key, value)| parse_env(&key).map(|index| (index, value)));
for (key, value) in overrides {
config.set(&key, value).unwrap();
}
}
pub fn print_config(config: config::Config) {
// TODO: support any nesting level
let sections = config.to_owned().cache.into_table().unwrap();
for (section_name, values) in sections {
match values.clone().into_table() {
Err(_) => println!("{} => {}", section_name, values),
Ok(section) => {
for (key, value) in section {
println!("{}.{} => {}", section_name, key, value);
}
}
}
}
}
fn parse_env(key: &str) -> Option<String> {
const PREFIX: &str = "HASTIC_";
if key.starts_with(PREFIX) {
let key = &key[PREFIX.len()..];
Some(key.to_lowercase().replace("__", "."))
} else {
None
}
}
impl Config {
pub fn new() -> anyhow::Result<Config> {
let mut config = config::Config::default();
@ -83,14 +127,14 @@ impl Config {
config.merge(config::File::with_name("config")).unwrap();
}
config
.merge(config::Environment::with_prefix("HASTIC"))
.unwrap();
update_from_env(&mut config);
if config.get::<u16>("port").is_err() {
config.set("port", "8000").unwrap();
config.set("port", 4347).unwrap();
}
print_config(config.clone());
Ok(Config {
port: config.get::<u16>("port").unwrap(),
datasource_config: resolve_datasource(&config)?,

7
server/src/main.rs

@ -1,6 +1,6 @@
mod api;
use hastic::services::{analytic_service, metric_service, segments_service, analytic_unit_service};
use hastic::services::{analytic_service, analytic_unit_service, metric_service, segments_service, data_service};
use anyhow;
@ -9,9 +9,10 @@ async fn main() -> anyhow::Result<()> {
let config = hastic::config::Config::new()?;
let cfg_clone = config.clone();
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new()?;
let data_service = data_service::DataService::new()?;
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new(&data_service)?;
let metric_service = metric_service::MetricService::new(&config.datasource_config);
let segments_service = segments_service::SegmentsService::new()?;
let segments_service = segments_service::SegmentsService::new(&data_service)?;
let mut analytic_service = analytic_service::AnalyticService::new(
analytic_unit_service.clone(),

113
server/src/services/analytic_service/analytic_service.rs

@ -23,7 +23,7 @@ use crate::services::analytic_service::analytic_unit::types::{AnalyticUnit, Lear
use anyhow;
use chrono::{DateTime, TimeZone, Utc};
use chrono::{DateTime, Utc};
use tokio::sync::{mpsc, oneshot};
// TODO: now it's basically single analytic unit, service will operate on many AU
@ -59,18 +59,20 @@ impl AnalyticService {
segments_service: segments_service::SegmentsService,
alerting: Option<AlertingConfig>,
) -> AnalyticService {
// TODO: move buffer size to config
let (tx, rx) = mpsc::channel::<AnalyticServiceMessage>(32);
let aus = analytic_unit_service.clone();
AnalyticService {
analytic_unit_service,
analytic_unit_service: aus,
metric_service,
segments_service,
alerting,
// TODO: get it from persistance
analytic_unit: None,
analytic_unit_config: AnalyticUnitConfig::Pattern(Default::default()),
analytic_unit_config: analytic_unit_service.get_active_config().unwrap(),
analytic_unit_learning_status: LearningStatus::Initialization,
tx,
@ -135,7 +137,7 @@ impl AnalyticService {
};
let tx = self.tx.clone();
let au = self.analytic_unit.as_ref().unwrap().clone();
let dr = DetectionRunner::new(tx, drcfg, au);
let dr = DetectionRunner::new(tx,self.metric_service.clone(), drcfg, au);
self.detection_runner = Some(dr);
self.detection_runner.as_mut().unwrap().run(from);
@ -239,7 +241,12 @@ impl AnalyticService {
println!("Detection runner started from {}", from)
}
ResponseType::DetectionRunnerUpdate(id, timestamp) => {
self.analytic_unit_service.set_last_detection(id, timestamp).unwrap();
self.analytic_unit_service
.set_last_detection(id, timestamp)
.unwrap();
}
ResponseType::DetectionRunnerDetection(from, to) => {
println!("detection: {} {}", from, to);
}
ResponseType::LearningStarted => {
self.analytic_unit_learning_status = LearningStatus::Learning
@ -263,48 +270,84 @@ impl AnalyticService {
}
}
// TODO: create custom DatasourceError error type
Err(_) => {
Err(err) => {
self.analytic_unit = None;
self.analytic_unit_learning_status = LearningStatus::Error;
self.analytic_unit_learning_status = LearningStatus::Error(err.to_string());
}
}
}
fn patch_config(&mut self, patch: PatchConfig, tx: oneshot::Sender<()>) {
let (new_conf, need_learning) = self.analytic_unit_config.patch(patch);
self.analytic_unit_config = new_conf;
if need_learning {
self.consume_request(RequestType::RunLearning);
// TODO: it's not fullu correct: we need to wait when the learning starts
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
println!("Can`t send patch config notification");
}
}
} else {
let my_id = self
.analytic_unit_service
.get_config_id(&self.analytic_unit_config);
let patch_id = patch.get_type_id();
let same_type = my_id == patch_id;
// TODO: need_learning and same_type logic overlaps, there is a way to optimise this
let need_learning = self.analytic_unit_config.patch_needs_learning(&patch);
if same_type {
// TODO: check when learning should be started
let new_conf = patch.get_new_config();
self.analytic_unit_config = new_conf.clone();
self.analytic_unit_service
.update_config_by_id(&my_id, &new_conf)
.unwrap();
if self.analytic_unit.is_some() {
tokio::spawn({
let au = self.analytic_unit.clone();
let cfg = self.analytic_unit_config.clone();
async move {
au.unwrap().write().await.set_config(cfg);
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
println!("Can`t send patch config notification");
}
if need_learning {
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
});
return;
} else {
tokio::spawn({
let au = self.analytic_unit.clone();
let cfg = self.analytic_unit_config.clone();
async move {
au.unwrap().write().await.set_config(cfg);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
});
}
} else {
// TODO: check if we need this else
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
} else {
let new_conf = self
.analytic_unit_service
.get_config_by_id(&patch_id)
.unwrap();
self.analytic_unit_config = new_conf.clone();
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
}
@ -333,9 +376,11 @@ impl AnalyticService {
ms: MetricService,
ss: SegmentsService,
) {
let mut au = match aus.resolve(aucfg) {
let mut au = match aus.resolve(&aucfg) {
Ok(a) => a,
Err(e) => { panic!("{}", e); }
Err(e) => {
panic!("{}", e);
}
};
match tx

9
server/src/services/analytic_service/analytic_unit/anomaly_analytic_unit.rs

@ -50,7 +50,10 @@ impl SARIMA {
// TODO: trend detection
if ts.len() < 2 {
return Err(anyhow::format_err!("to short timeserie to learn from"));
return Err(anyhow::format_err!(
"too short timeserie to learn from, timeserie length: {}",
ts.len()
));
}
// TODO: ensure capacity with seasonality size
let mut res_ts = Vec::<(u64, f64)>::new();
@ -164,6 +167,10 @@ impl AnalyticUnit for AnomalyAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
// TODO: return window based on real petterns info
return DETECTION_STEP;
}
fn set_config(&mut self, config: AnalyticUnitConfig) {
if let AnalyticUnitConfig::Anomaly(cfg) = config {
self.config = cfg;

1
server/src/services/analytic_service/analytic_unit/mod.rs

@ -7,4 +7,3 @@ use self::{
anomaly_analytic_unit::AnomalyAnalyticUnit, pattern_analytic_unit::PatternAnalyticUnit,
threshold_analytic_unit::ThresholdAnalyticUnit, types::AnalyticUnitConfig,
};

4
server/src/services/analytic_service/analytic_unit/pattern_analytic_unit.rs

@ -219,6 +219,10 @@ impl AnalyticUnit for PatternAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
let lr = self.learning_results.as_ref().unwrap();
return lr.avg_pattern_length as u64;
}
fn set_config(&mut self, config: AnalyticUnitConfig) {
if let AnalyticUnitConfig::Pattern(cfg) = config {
self.config = cfg;

3
server/src/services/analytic_service/analytic_unit/threshold_analytic_unit.rs

@ -25,6 +25,9 @@ impl AnalyticUnit for ThresholdAnalyticUnit {
fn get_id(&self) -> String {
return self.id.to_owned();
}
fn get_detection_window(&self) -> u64 {
return DETECTION_STEP;
}
async fn learn(
&mut self,
_ms: MetricService,

74
server/src/services/analytic_service/analytic_unit/types.rs

@ -57,13 +57,61 @@ impl Default for ThresholdConfig {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum AnalyticUnitConfig {
Pattern(PatternConfig),
Threshold(ThresholdConfig),
Pattern(PatternConfig),
Anomaly(AnomalyConfig),
}
impl AnalyticUnitConfig {
// return true if patch is different type
pub fn get_default_by_id(id: &String) -> AnalyticUnitConfig {
let iid = id.as_str();
match iid {
"1" => AnalyticUnitConfig::Threshold(Default::default()),
"2" => AnalyticUnitConfig::Pattern(Default::default()),
"3" => AnalyticUnitConfig::Anomaly(Default::default()),
_ => panic!("bad id for getting get_default_by_id"),
}
}
pub fn patch_needs_learning(&self, patch: &PatchConfig) -> bool {
// TODO: maybe use type id's to optimise code
match patch {
PatchConfig::Pattern(tcfg) => match self.clone() {
AnalyticUnitConfig::Pattern(_) => {
return false;
}
_ => return true,
},
PatchConfig::Anomaly(tcfg) => match self.clone() {
AnalyticUnitConfig::Anomaly(scfg) => {
if tcfg.is_some() {
let t = tcfg.as_ref().unwrap();
let mut need_learning = t.seasonality != scfg.seasonality;
need_learning |= t.seasonality_iterations != scfg.seasonality_iterations;
return need_learning;
} else {
return false;
}
}
_ => {
return true;
}
},
PatchConfig::Threshold(tcfg) => match self.clone() {
AnalyticUnitConfig::Threshold(_) => {
return false;
}
_ => {
return true;
}
},
}
}
// TODO: maybe this method depricated
// return true if need needs relearning
pub fn patch(&self, patch: PatchConfig) -> (AnalyticUnitConfig, bool) {
match patch {
PatchConfig::Pattern(tcfg) => match self.clone() {
@ -71,6 +119,7 @@ impl AnalyticUnitConfig {
if tcfg.is_some() {
return (AnalyticUnitConfig::Pattern(tcfg.unwrap()), false);
} else {
// TODO: it should be extraced from db
return (AnalyticUnitConfig::Pattern(Default::default()), false);
}
}
@ -131,6 +180,7 @@ pub enum LearningResult {
#[async_trait]
pub trait AnalyticUnit {
fn get_id(&self) -> String;
fn get_detection_window(&self) -> u64;
async fn learn(
&mut self,
ms: MetricService,
@ -153,3 +203,23 @@ pub enum PatchConfig {
Threshold(Option<ThresholdConfig>),
Anomaly(Option<AnomalyConfig>),
}
impl PatchConfig {
pub fn get_type_id(&self) -> String {
match &self {
PatchConfig::Threshold(_) => "1".to_string(),
PatchConfig::Pattern(_) => "2".to_string(),
PatchConfig::Anomaly(_) => "3".to_string(),
}
}
pub fn get_new_config(&self) -> AnalyticUnitConfig {
match &self {
PatchConfig::Threshold(cfg) => {
AnalyticUnitConfig::Threshold(cfg.as_ref().unwrap().clone())
}
PatchConfig::Pattern(cfg) => AnalyticUnitConfig::Pattern(cfg.as_ref().unwrap().clone()),
PatchConfig::Anomaly(cfg) => AnalyticUnitConfig::Anomaly(cfg.as_ref().unwrap().clone()),
}
}
}

62
server/src/services/analytic_service/detection_runner.rs

@ -1,12 +1,16 @@
use chrono::{Utc, DateTime};
use chrono::{DateTime, Utc};
use tokio::sync::{mpsc, RwLock};
use tokio::sync::mpsc;
use crate::services::metric_service::MetricService;
use super::types::{AnalyticServiceMessage, AnalyticUnitRF, DetectionRunnerConfig, ResponseType};
use tokio::time::{sleep, Duration};
pub struct DetectionRunner {
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
running_handler: Option<tokio::task::JoinHandle<()>>,
@ -15,10 +19,12 @@ pub struct DetectionRunner {
impl DetectionRunner {
pub fn new(
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
) -> DetectionRunner {
DetectionRunner {
metric_service,
tx,
config,
analytic_unit,
@ -27,22 +33,26 @@ impl DetectionRunner {
}
pub fn run(&mut self, from: u64) {
// TODO: get last detection timestamp from persistance
// TODO: set last detection from "now"
if self.running_handler.is_some() {
self.running_handler.as_mut().unwrap().abort();
}
self.running_handler = Some(tokio::spawn({
// TODO: clone channel
let cfg = self.config.clone();
let ms = self.metric_service.clone();
let tx = self.tx.clone();
let au = self.analytic_unit.clone();
async move {
// TODO: run detection "from" for big timespan
// TODO: parse detections to webhooks
// TODO: define window for detection
// TODO: save last detection
// TODO: handle case when detection is in the end and continues after "now"
// it's better to make an issue on github
// TODO: find place to update analytic unit model
let window_size = au.as_ref().read().await.get_detection_window();
let detection_step = ms.get_detection_step();
let mut t_from = from - window_size;
let mut t_to = from;
match tx
.send(AnalyticServiceMessage::Response(Ok(
@ -55,17 +65,39 @@ impl DetectionRunner {
}
loop {
// TODO: don't use DateTime, but count timestamp by steps
let now: DateTime<Utc> = Utc::now();
let to = now.timestamp() as u64;
// TODO: run detection periodically
sleep(Duration::from_secs(cfg.interval)).await;
match tx.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerUpdate(au.as_ref().read().await.get_id(), to)
))).await {
Ok(_) => {},
let a = au.as_ref().read().await;
let detections = a.detect(ms.clone(), t_from, t_to).await.unwrap();
for d in detections {
match tx
.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerDetection(d.0, d.1),
)))
.await
{
Ok(_) => {}
Err(_e) => println!("Fail to send detection runner detection notification"),
}
}
// TODO: send info about detections to tx
match tx
.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerUpdate(
au.as_ref().read().await.get_id(),
t_to,
),
)))
.await
{
Ok(_) => {}
Err(_e) => println!("Fail to send detection runner started notification"),
}
sleep(Duration::from_secs(cfg.interval)).await;
t_from += detection_step;
t_to += detection_step;
}
}
}));

3
server/src/services/analytic_service/types.rs

@ -22,7 +22,7 @@ pub enum LearningStatus {
Initialization,
Starting,
Learning,
Error,
Error(String),
Ready,
}
@ -45,6 +45,7 @@ impl Default for LearningTrain {
pub enum ResponseType {
DetectionRunnerStarted(u64),
DetectionRunnerUpdate(String, u64), // analytic_unit id and timestamp
DetectionRunnerDetection(u64, u64), // TODO: add more into about analytic unit and more
LearningStarted,
LearningFinished(Box<dyn AnalyticUnit + Send + Sync>),
LearningFinishedEmpty,

175
server/src/services/analytic_unit_service.rs

@ -1,76 +1,195 @@
use std::sync::{Arc, Mutex};
use crate::utils::get_random_str;
use rusqlite::{params, Connection};
use rusqlite::{params, Connection, Row};
use warp::hyper::rt::Executor;
use super::analytic_service::analytic_unit::{
anomaly_analytic_unit::AnomalyAnalyticUnit,
pattern_analytic_unit::PatternAnalyticUnit,
threshold_analytic_unit::ThresholdAnalyticUnit,
types::{self, AnalyticUnitConfig},
};
use super::analytic_service::analytic_unit::{types::{AnalyticUnitConfig, self}, threshold_analytic_unit::ThresholdAnalyticUnit, pattern_analytic_unit::PatternAnalyticUnit, anomaly_analytic_unit::AnomalyAnalyticUnit};
use super::data_service::DataService;
#[derive(Clone)]
pub struct AnalyticUnitService {
// TODO: resolve by setting id for 3 types
// TODO: create database
// TODO: update detection
connection: Arc<Mutex<Connection>>
connection: Arc<Mutex<Connection>>,
}
// TODO: get DataService
impl AnalyticUnitService {
pub fn new() -> anyhow::Result<AnalyticUnitService> {
// TODO: remove repetitoin with segment_service
std::fs::create_dir_all("./data").unwrap();
let conn = Connection::open("./data/analytic_units.db")?;
pub fn new(ds: &DataService) -> anyhow::Result<AnalyticUnitService> {
let conn = ds.analytic_units_connection.clone();
// TODO: add learning results field
conn.execute(
conn.lock().unwrap().execute(
"CREATE TABLE IF NOT EXISTS analytic_unit (
id TEXT PRIMARY KEY,
last_detection INTEGER
last_detection INTEGER,
active BOOLEAN,
type INTEGER,
config TEXT
)",
[],
)?;
Ok(AnalyticUnitService {
connection: Arc::new(Mutex::new(conn)),
connection: conn
})
}
// TODO: optional id
pub fn resolve_au(&self, cfg: AnalyticUnitConfig) -> Box<dyn types::AnalyticUnit + Send + Sync> {
pub fn resolve_au(
&self,
cfg: &AnalyticUnitConfig,
) -> Box<dyn types::AnalyticUnit + Send + Sync> {
match cfg {
AnalyticUnitConfig::Threshold(c) => Box::new(ThresholdAnalyticUnit::new("1".to_string(), c.clone())),
AnalyticUnitConfig::Pattern(c) => Box::new(PatternAnalyticUnit::new("2".to_string(), c.clone())),
AnalyticUnitConfig::Anomaly(c) => Box::new(AnomalyAnalyticUnit::new("3".to_string(), c.clone())),
AnalyticUnitConfig::Threshold(c) => {
Box::new(ThresholdAnalyticUnit::new("1".to_string(), c.clone()))
}
AnalyticUnitConfig::Pattern(c) => {
Box::new(PatternAnalyticUnit::new("2".to_string(), c.clone()))
}
AnalyticUnitConfig::Anomaly(c) => {
Box::new(AnomalyAnalyticUnit::new("3".to_string(), c.clone()))
}
}
}
pub fn resolve(&self, cfg: AnalyticUnitConfig) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
// TODO: get id of analytic_unit which be used also as it's type
pub fn resolve(
&self,
cfg: &AnalyticUnitConfig,
) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
let au = self.resolve_au(cfg);
let id = au.as_ref().get_id();
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id from analytic_unit WHERE id = ?1",
)?;
let mut stmt = conn.prepare("SELECT id from analytic_unit WHERE id = ?1")?;
let res = stmt.exists(params![id])?;
if res == false {
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"INSERT INTO analytic_unit (id) VALUES (?1)",
params![id]
"INSERT INTO analytic_unit (id, type, config) VALUES (?1, ?1, ?2)",
params![id, cfg_json],
)?;
}
conn.execute(
"UPDATE analytic_unit set active = FALSE where active = TRUE",
params![],
)?;
conn.execute(
"UPDATE analytic_unit set active = TRUE where id = ?1",
params![id],
)?;
return Ok(au);
}
// TODO: resolve with saving by id
pub fn set_last_detection(&self, id: String, last_detection: u64) -> anyhow::Result<()> {
let conn = self.connection.lock().unwrap();
conn.execute(
"UPDATE analytic_unit SET last_detection = ?1 WHERE id = ?2",
params![last_detection, id]
params![last_detection, id],
)?;
Ok(())
}
}
pub fn get_active(&self) -> anyhow::Result<Box<dyn types::AnalyticUnit + Send + Sync>> {
// TODO: return default when there is no active
let conn = self.connection.lock().unwrap();
let mut stmt =
conn.prepare("SELECT id, type, config from analytic_unit WHERE active = TRUE")?;
let au = stmt.query_row([], |row| {
let c: String = row.get(2)?;
let cfg: AnalyticUnitConfig = serde_json::from_str(&c).unwrap();
Ok(self.resolve(&cfg))
})??;
return Ok(au);
}
pub fn get_active_config(&self) -> anyhow::Result<AnalyticUnitConfig> {
let exists = {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE active = TRUE")?;
stmt.exists([])?
};
if exists == false {
let c = AnalyticUnitConfig::Pattern(Default::default());
self.resolve(&c)?;
return Ok(c);
} else {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE active = TRUE")?;
let acfg = stmt.query_row([], |row| {
let c: String = row.get(0)?;
let cfg = serde_json::from_str(&c).unwrap();
Ok(cfg)
})?;
return Ok(acfg);
}
}
pub fn get_config_by_id(&self, id: &String) -> anyhow::Result<AnalyticUnitConfig> {
let exists = {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE id = ?1")?;
stmt.exists([id])?
};
if exists == false {
let c = AnalyticUnitConfig::get_default_by_id(id);
self.resolve(&c)?;
return Ok(c);
} else {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT config from analytic_unit WHERE id = ?1")?;
let acfg = stmt.query_row([id], |row| {
let c: String = row.get(0)?;
let cfg = serde_json::from_str(&c).unwrap();
Ok(cfg)
})?;
return Ok(acfg);
}
}
pub fn get_config_id(&self, cfg: &AnalyticUnitConfig) -> String {
match cfg {
AnalyticUnitConfig::Threshold(_) => "1".to_string(),
AnalyticUnitConfig::Pattern(_) => "2".to_string(),
AnalyticUnitConfig::Anomaly(_) => "3".to_string(),
}
}
pub fn update_config_by_id(&self, id: &String, cfg: &AnalyticUnitConfig) -> anyhow::Result<()> {
// TODO: it's possble that config doesn't exist, but we trying to update it
let conn = self.connection.lock().unwrap();
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"UPDATE analytic_unit SET config = ?1 WHERE id = ?2",
params![cfg_json, id],
)?;
return Ok(());
}
pub fn update_active_config(&self, cfg: &AnalyticUnitConfig) -> anyhow::Result<()> {
let conn = self.connection.lock().unwrap();
let cfg_json = serde_json::to_string(&cfg)?;
conn.execute(
"UPDATE analytic_unit SET config = ?1 WHERE active = TRUE",
params![cfg_json],
)?;
return Ok(());
}
}

23
server/src/services/data_service.rs

@ -0,0 +1,23 @@
use std::sync::{Arc, Mutex};
use rusqlite::{Connection};
pub struct DataService {
pub analytic_units_connection: Arc<Mutex<Connection>>,
pub segments_connection: Arc<Mutex<Connection>>
}
impl DataService {
pub fn new() -> anyhow::Result<DataService> {
std::fs::create_dir_all("./data").unwrap();
let analytic_units_connection = Connection::open("./data/analytic_units.db")?;
let segments_connection = Connection::open("./data/segments.db")?;
Ok(DataService {
analytic_units_connection: Arc::new(Mutex::new(analytic_units_connection)),
segments_connection: Arc::new(Mutex::new(segments_connection))
})
}
}

7
server/src/services/metric_service.rs

@ -32,4 +32,11 @@ impl MetricService {
}
return Ok(mr);
}
// TODO: it a hack for DetectionRunner: it should vary for different analytic units
// and it's config
pub fn get_detection_step(&self) -> u64 {
return 10;
}
}

3
server/src/services/mod.rs

@ -1,5 +1,6 @@
pub mod analytic_service;
pub mod data_service;
pub mod analytic_unit_service;
pub mod metric_service;
pub mod segments_service;
pub mod user_service;
pub mod analytic_unit_service;

16
server/src/services/segments_service.rs

@ -6,9 +6,13 @@ use serde::{Deserialize, Serialize};
use std::sync::{Arc, Mutex};
use super::data_service::DataService;
pub const ID_LENGTH: usize = 20;
pub type SegmentId = String;
// TODO: make logic with this enum shorter
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
pub enum SegmentType {
@ -58,19 +62,19 @@ impl Segment {
}
}
// TODO: get DataService
#[derive(Clone)]
pub struct SegmentsService {
connection: Arc<Mutex<Connection>>,
}
impl SegmentsService {
pub fn new() -> anyhow::Result<SegmentsService> {
// TODO: move it to data service
std::fs::create_dir_all("./data").unwrap();
pub fn new(ds: &DataService) -> anyhow::Result<SegmentsService> {
// TODO: add unilytic_unit id as a new field
let conn = Connection::open("./data/segments.db")?;
conn.execute(
let conn = ds.segments_connection.clone();
conn.lock().unwrap().execute(
"CREATE TABLE IF NOT EXISTS segment (
id TEXT PRIMARY KEY,
start INTEGER NOT NULL,
@ -81,7 +85,7 @@ impl SegmentsService {
)?;
Ok(SegmentsService {
connection: Arc::new(Mutex::new(conn)),
connection: conn
})
}

Loading…
Cancel
Save