Compare commits

...

26 Commits

Author SHA1 Message Date
rozetko 77c5d3d3ce Merge pull request 'hotfix: `configuration property "influx.org_id" not found`' (#111) from influx-docker into main 2 years ago
rozetko d099998562 fix `configuration property "influx.org_id" not found` 2 years ago
rozetko abbadbc155 Merge pull request 'Make influx work in Docker' (#110) from influx-docker into main 2 years ago
rozetko fce0f30e9e docker-compose: rm "network-mode: host" option 2 years ago
rozetko f4226a5245 upd config example 2 years ago
rozetko 57854909f9 print config on start 2 years ago
rozetko e16410d407 make influx work in docker 2 years ago
Alexey Velikiy 5f16e006a8 rm range print in cleint 2 years ago
Alexey Velikiy 55c0fca371 debug error in config patch notification 2 years ago
glitch4347 aa4008662a
Merge pull request #108 from hastic/get-notified-in-analytic-service-about-new-detections-#51 2 years ago
Alexey Velikiy a9d2995281 detection runner detection notification 2 years ago
glitch4347 eee437d779
Merge pull request #106 from hastic/data-folder-creation-single-place-#102 2 years ago
Alexey Velikiy b7fb951590 data_service 2 years ago
Alexey Velikiy ad95c1e49f rm console log 2 years ago
glitch4347 f684d8ee6a
Merge pull request #100 from hastic/ui-not-editable-confidence-param-#99 2 years ago
Alexey Velikiy 0d756d3f42 use v-model instead of :value 2 years ago
Alexey Velikiy a4da3b5ea3 todo++ 2 years ago
Alexey Velikiy cafb50cce3 small refactorign 2 years ago
Alexey Velikiy c7284f4305 todos++ 2 years ago
glitch4347 cb87a98e39
Merge pull request #96 from hastic/env-vars-docs-#90 2 years ago
glitch4347 49c2e30acc
Merge pull request #94 from rusdacent/main 2 years ago
rusdacent e0e4d93ebb Set always restart 2 years ago
rusdacent 8121292450 Set ports for Hastic 2 years ago
rusdacent 2d5786d639 Set image for docker-compose 2 years ago
rusdacent 0733a5d002 Add docker-compose 2 years ago
rusdacent e20a5affcc Add Dockerfile 2 years ago
  1. 27
      Dockerfile
  2. 1
      client/src/components/pods/pattern_pod.ts
  3. 16
      client/src/views/Home.vue
  4. 12
      docker-compose.yml
  5. 17
      server/config.example.toml
  6. 27
      server/src/config.rs
  7. 7
      server/src/main.rs
  8. 17
      server/src/services/analytic_service/analytic_service.rs
  9. 20
      server/src/services/analytic_service/detection_runner.rs
  10. 1
      server/src/services/analytic_service/types.rs
  11. 15
      server/src/services/analytic_unit_service.rs
  12. 23
      server/src/services/data_service.rs
  13. 1
      server/src/services/mod.rs
  14. 16
      server/src/services/segments_service.rs

27
Dockerfile

@ -0,0 +1,27 @@
FROM rust:1.57.0-bullseye as builder
RUN curl -sL https://deb.nodesource.com/setup_16.x | bash -
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
nodejs \
gcc \
g++ \
make \
musl-tools \
&& rm -rf /var/lib/apt/lists/*
RUN npm install --global yarn
RUN rustup target add x86_64-unknown-linux-musl
ADD . ./
RUN make
FROM debian:bullseye-slim
COPY --from=builder /release/hastic /hastic
COPY --from=builder /release/config.toml /config.toml
COPY --from=builder /release/public /public
CMD ["./hastic"]

1
client/src/components/pods/pattern_pod.ts

@ -79,7 +79,6 @@ export class PatternPod extends HasticPod<UpdateDataCallback> {
} else {
console.log('took from range from default');
}
console.log(from + " ---- " + to);
this.udc({ from, to })
.then(resp => {

16
client/src/views/Home.vue

@ -16,7 +16,7 @@
<div id="controls">
<div v-if="analyticUnitType == analyticUnitTypes[0]">
Threshold:
<input :value="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
<input v-model="analyticUnitConfig.threshold" @change="thresholdChange" /> <br/><br/>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[1]">
Hold <pre>S</pre> to label patterns;
@ -25,13 +25,13 @@
<br/>
<hr/>
Correlation score:
<input :value="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
<input v-model="analyticUnitConfig.correlation_score" @change="correlationScoreChange" /> <br/>
Anti correlation score:
<input :value="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
<input v-model="analyticUnitConfig.anti_correlation_score" @change="antiCorrelationScoreChange" /> <br/>
Model score:
<input :value="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
<input v-model="analyticUnitConfig.model_score" @change="modelScoreChange" /> <br/>
Threshold score:
<input :value="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<input v-model="analyticUnitConfig.threshold_score" @change="thresholdScoreChange" /> <br/><br/>
<button @click="clearAllLabeling"> clear all labeling </button>
</div>
<div v-if="analyticUnitType == analyticUnitTypes[2]">
@ -40,11 +40,11 @@
<!-- Alpha:
<input :value="analyticUnitConfig.alpha" @change="alphaChange" /> <br/> -->
Confidence:
<input :value="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
<input v-model="analyticUnitConfig.confidence" @change="confidenceChange" /> <br/>
Seasonality:
<input :value="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
<input v-model="analyticUnitConfig.seasonality" @change="seasonalityChange" /> <br/>
Seasonality iterations:
<input :value="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<input v-model="analyticUnitConfig.seasonality_iterations" @change="seasonalityIterationsChange" /> <br/>
<br/>
</div>
</div>

12
docker-compose.yml

@ -0,0 +1,12 @@
version: '3'
services:
app:
image: hastic/hastic:latest
restart: always
environment:
HASTIC_PORT: "4347"
HASTIC_PROMETHEUS__URL: "http://demo.robustperception.io:9090"
HASTIC_PROMETHEUS__QUERY: "rate(go_memstats_alloc_bytes_total[1m])"
ports:
- "4347:4347"

17
server/config.example.toml

@ -1,8 +1,10 @@
port = 4347
[prometheus]
url = "http://localhost:9090"
query = "rate(go_memstats_alloc_bytes_total[5m])"
# one of datasource sections (prometheus / influx) should be uncommented and edited corresponding to your environment
# [prometheus]
# url = "http://localhost:9090"
# query = "rate(go_memstats_alloc_bytes_total[5m])"
# [influx]
@ -16,8 +18,7 @@ query = "rate(go_memstats_alloc_bytes_total[5m])"
# |> yield(name: "mean")
# """
[alerting]
type = "webhook"
interval = 10 # in seconds
endpoint = "http://localhost:9092"
# [alerting]
# type = "webhook"
# interval = 10 # in seconds
# endpoint = "http://localhost:9092"

27
server/src/config.rs

@ -27,6 +27,7 @@ pub struct Config {
fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfig> {
if config.get::<String>("prometheus.url").is_ok() {
println!("using Prometheus");
return Ok(DatasourceConfig::Prometheus(PrometheusConfig {
url: config.get("prometheus.url")?,
query: config.get("prometheus.query")?,
@ -34,6 +35,7 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}
if config.get::<String>("influx.url").is_ok() {
println!("using Influx");
return Ok(DatasourceConfig::Influx(InfluxConfig {
url: config.get("influx.url")?,
org_id: config.get("influx.org_id")?,
@ -42,7 +44,7 @@ fn resolve_datasource(config: &config::Config) -> anyhow::Result<DatasourceConfi
}));
}
return Err(anyhow::format_err!("no datasource found"));
return Err(anyhow::format_err!("please configure a datasource"));
}
fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingConfig>> {
@ -80,24 +82,38 @@ fn resolve_alerting(config: &config::Config) -> anyhow::Result<Option<AlertingCo
// config::Environment doesn't support nested configs, e.g. `alerting.type`,
// so I've copied this from:
// https://github.com/rust-lang/mdBook/blob/f3e5fce6bf5e290c713f4015947dc0f0ad172d20/src/config.rs#L132
// so that `__` can be used in env variables instead of `.`,
// so that `__` can be used in env variables instead of `.`,
// e.g. `HASTIC_ALERTING__TYPE` -> alerting.type
pub fn update_from_env(config: &mut config::Config) {
let overrides =
env::vars().filter_map(|(key, value)| parse_env(&key).map(|index| (index, value)));
for (key, value) in overrides {
println!("{} => {}", key, value);
config.set(&key, value).unwrap();
}
}
pub fn print_config(config: config::Config) {
// TODO: support any nesting level
let sections = config.to_owned().cache.into_table().unwrap();
for (section_name, values) in sections {
match values.clone().into_table() {
Err(_) => println!("{} => {}", section_name, values),
Ok(section) => {
for (key, value) in section {
println!("{}.{} => {}", section_name, key, value);
}
}
}
}
}
fn parse_env(key: &str) -> Option<String> {
const PREFIX: &str = "HASTIC_";
if key.starts_with(PREFIX) {
let key = &key[PREFIX.len()..];
Some(key.to_lowercase().replace("__", ".").replace("_", "-"))
Some(key.to_lowercase().replace("__", "."))
} else {
None
}
@ -117,7 +133,8 @@ impl Config {
config.set("port", 4347).unwrap();
}
// TODO: print resulted config (perfectly, it needs adding `derive(Debug)` in `subbeat`'s `DatasourceConfig`)
print_config(config.clone());
Ok(Config {
port: config.get::<u16>("port").unwrap(),
datasource_config: resolve_datasource(&config)?,

7
server/src/main.rs

@ -1,6 +1,6 @@
mod api;
use hastic::services::{analytic_service, analytic_unit_service, metric_service, segments_service};
use hastic::services::{analytic_service, analytic_unit_service, metric_service, segments_service, data_service};
use anyhow;
@ -9,9 +9,10 @@ async fn main() -> anyhow::Result<()> {
let config = hastic::config::Config::new()?;
let cfg_clone = config.clone();
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new()?;
let data_service = data_service::DataService::new()?;
let analytic_unit_service = analytic_unit_service::AnalyticUnitService::new(&data_service)?;
let metric_service = metric_service::MetricService::new(&config.datasource_config);
let segments_service = segments_service::SegmentsService::new()?;
let segments_service = segments_service::SegmentsService::new(&data_service)?;
let mut analytic_service = analytic_service::AnalyticService::new(
analytic_unit_service.clone(),

17
server/src/services/analytic_service/analytic_service.rs

@ -137,7 +137,7 @@ impl AnalyticService {
};
let tx = self.tx.clone();
let au = self.analytic_unit.as_ref().unwrap().clone();
let dr = DetectionRunner::new(self.metric_service.clone(), tx, drcfg, au);
let dr = DetectionRunner::new(tx,self.metric_service.clone(), drcfg, au);
self.detection_runner = Some(dr);
self.detection_runner.as_mut().unwrap().run(from);
@ -245,6 +245,9 @@ impl AnalyticService {
.set_last_detection(id, timestamp)
.unwrap();
}
ResponseType::DetectionRunnerDetection(from, to) => {
println!("detection: {} {}", from, to);
}
ResponseType::LearningStarted => {
self.analytic_unit_learning_status = LearningStatus::Learning
}
@ -299,8 +302,9 @@ impl AnalyticService {
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
return;
@ -312,8 +316,9 @@ impl AnalyticService {
au.unwrap().write().await.set_config(cfg);
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
@ -323,8 +328,9 @@ impl AnalyticService {
// TODO: check if we need this else
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}
@ -337,8 +343,9 @@ impl AnalyticService {
self.consume_request(RequestType::RunLearning);
match tx.send(()) {
Ok(_) => {}
Err(_e) => {
Err(e) => {
println!("Can`t send patch config notification");
println!("{:?}", e);
}
}
}

20
server/src/services/analytic_service/detection_runner.rs

@ -9,8 +9,8 @@ use tokio::time::{sleep, Duration};
pub struct DetectionRunner {
metric_service: MetricService,
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
running_handler: Option<tokio::task::JoinHandle<()>>,
@ -18,8 +18,8 @@ pub struct DetectionRunner {
impl DetectionRunner {
pub fn new(
metric_service: MetricService,
tx: mpsc::Sender<AnalyticServiceMessage>,
metric_service: MetricService,
config: DetectionRunnerConfig,
analytic_unit: AnalyticUnitRF,
) -> DetectionRunner {
@ -33,7 +33,6 @@ impl DetectionRunner {
}
pub fn run(&mut self, from: u64) {
// TODO: get last detection timestamp from persistance
// TODO: set last detection from "now"
if self.running_handler.is_some() {
self.running_handler.as_mut().unwrap().abort();
@ -46,8 +45,9 @@ impl DetectionRunner {
async move {
// TODO: run detection "from" for big timespan
// TODO: parse detections to webhooks
// TODO: define window for detection
// TODO: handle case when detection is in the end and continues after "now"
// it's better to make an issue on github
// TODO: find place to update analytic unit model
let window_size = au.as_ref().read().await.get_detection_window();
let detection_step = ms.get_detection_step();
@ -69,10 +69,18 @@ impl DetectionRunner {
let detections = a.detect(ms.clone(), t_from, t_to).await.unwrap();
for d in detections {
println!("detection: {} {}", d.0, d.1);
match tx
.send(AnalyticServiceMessage::Response(Ok(
ResponseType::DetectionRunnerDetection(d.0, d.1),
)))
.await
{
Ok(_) => {}
Err(_e) => println!("Fail to send detection runner detection notification"),
}
}
// TODO: set info about detections to tx
// TODO: send info about detections to tx
match tx
.send(AnalyticServiceMessage::Response(Ok(

1
server/src/services/analytic_service/types.rs

@ -45,6 +45,7 @@ impl Default for LearningTrain {
pub enum ResponseType {
DetectionRunnerStarted(u64),
DetectionRunnerUpdate(String, u64), // analytic_unit id and timestamp
DetectionRunnerDetection(u64, u64), // TODO: add more into about analytic unit and more
LearningStarted,
LearningFinished(Box<dyn AnalyticUnit + Send + Sync>),
LearningFinishedEmpty,

15
server/src/services/analytic_unit_service.rs

@ -1,5 +1,3 @@
use serde::{Deserialize, Serialize};
use serde_json::{Result, Value};
use std::sync::{Arc, Mutex};
use rusqlite::{params, Connection};
@ -11,19 +9,20 @@ use super::analytic_service::analytic_unit::{
types::{self, AnalyticUnitConfig},
};
use super::data_service::DataService;
#[derive(Clone)]
pub struct AnalyticUnitService {
connection: Arc<Mutex<Connection>>,
}
// TODO: get DataService
impl AnalyticUnitService {
pub fn new() -> anyhow::Result<AnalyticUnitService> {
// TODO: remove repetitoin with segment_service
std::fs::create_dir_all("./data").unwrap();
let conn = Connection::open("./data/analytic_units.db")?;
pub fn new(ds: &DataService) -> anyhow::Result<AnalyticUnitService> {
let conn = ds.analytic_units_connection.clone();
// TODO: add learning results field
conn.execute(
conn.lock().unwrap().execute(
"CREATE TABLE IF NOT EXISTS analytic_unit (
id TEXT PRIMARY KEY,
last_detection INTEGER,
@ -35,7 +34,7 @@ impl AnalyticUnitService {
)?;
Ok(AnalyticUnitService {
connection: Arc::new(Mutex::new(conn)),
connection: conn
})
}

23
server/src/services/data_service.rs

@ -0,0 +1,23 @@
use std::sync::{Arc, Mutex};
use rusqlite::{Connection};
pub struct DataService {
pub analytic_units_connection: Arc<Mutex<Connection>>,
pub segments_connection: Arc<Mutex<Connection>>
}
impl DataService {
pub fn new() -> anyhow::Result<DataService> {
std::fs::create_dir_all("./data").unwrap();
let analytic_units_connection = Connection::open("./data/analytic_units.db")?;
let segments_connection = Connection::open("./data/segments.db")?;
Ok(DataService {
analytic_units_connection: Arc::new(Mutex::new(analytic_units_connection)),
segments_connection: Arc::new(Mutex::new(segments_connection))
})
}
}

1
server/src/services/mod.rs

@ -1,4 +1,5 @@
pub mod analytic_service;
pub mod data_service;
pub mod analytic_unit_service;
pub mod metric_service;
pub mod segments_service;

16
server/src/services/segments_service.rs

@ -6,9 +6,13 @@ use serde::{Deserialize, Serialize};
use std::sync::{Arc, Mutex};
use super::data_service::DataService;
pub const ID_LENGTH: usize = 20;
pub type SegmentId = String;
// TODO: make logic with this enum shorter
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
pub enum SegmentType {
@ -58,19 +62,19 @@ impl Segment {
}
}
// TODO: get DataService
#[derive(Clone)]
pub struct SegmentsService {
connection: Arc<Mutex<Connection>>,
}
impl SegmentsService {
pub fn new() -> anyhow::Result<SegmentsService> {
// TODO: move it to data service
std::fs::create_dir_all("./data").unwrap();
pub fn new(ds: &DataService) -> anyhow::Result<SegmentsService> {
// TODO: add unilytic_unit id as a new field
let conn = Connection::open("./data/segments.db")?;
conn.execute(
let conn = ds.segments_connection.clone();
conn.lock().unwrap().execute(
"CREATE TABLE IF NOT EXISTS segment (
id TEXT PRIMARY KEY,
start INTEGER NOT NULL,
@ -81,7 +85,7 @@ impl SegmentsService {
)?;
Ok(SegmentsService {
connection: Arc::new(Mutex::new(conn)),
connection: conn
})
}

Loading…
Cancel
Save