Fix the Sin of Man

- Migrate to ClickHouse
- Simplify serde renaming
- Simplify backfill logic
- Compartmentalize database columns

Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
2024-01-15 23:51:53 +00:00
parent 63a9ca950f
commit de3989ec35
45 changed files with 1120 additions and 2718 deletions

View File

@@ -1,71 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp > $2 ORDER BY timestamp ASC",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "timestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 1,
"name": "asset_symbol",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "open",
"type_info": "Float8"
},
{
"ordinal": 3,
"name": "high",
"type_info": "Float8"
},
{
"ordinal": 4,
"name": "low",
"type_info": "Float8"
},
{
"ordinal": 5,
"name": "close",
"type_info": "Float8"
},
{
"ordinal": 6,
"name": "volume",
"type_info": "Float8"
},
{
"ordinal": 7,
"name": "num_trades",
"type_info": "Int8"
},
{
"ordinal": 8,
"name": "volume_weighted",
"type_info": "Float8"
}
],
"parameters": {
"Left": [
"Text",
"Timestamptz"
]
},
"nullable": [
false,
false,
true,
true,
true,
true,
false,
false,
false
]
},
"hash": "073ee42ebcc5a5dffd34abaf3e1f4ce3a9318721bed46666cd4cd74542bc24ba"
}

View File

@@ -1,116 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO assets (symbol, class, exchange, trading, timestamp_added, timestamp_first, timestamp_last) VALUES ($1, $2::CLASS, $3::EXCHANGE, $4, $5, $6, $7)\n RETURNING symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text",
{
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
},
{
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
},
"Bool",
"Timestamptz",
"Timestamptz",
"Timestamptz"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "503ed46c4f7f8bb7d418a101ed80712731ae3449ac49d1f278ca4a4b8a2c9497"
}

View File

@@ -1,71 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp < $2 AND open IS NOT NULL AND high IS NOT NULL AND low IS NOT NULL AND close IS NOT NULL ORDER BY timestamp DESC LIMIT 1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "timestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 1,
"name": "asset_symbol",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "open",
"type_info": "Float8"
},
{
"ordinal": 3,
"name": "high",
"type_info": "Float8"
},
{
"ordinal": 4,
"name": "low",
"type_info": "Float8"
},
{
"ordinal": 5,
"name": "close",
"type_info": "Float8"
},
{
"ordinal": 6,
"name": "volume",
"type_info": "Float8"
},
{
"ordinal": 7,
"name": "num_trades",
"type_info": "Int8"
},
{
"ordinal": 8,
"name": "volume_weighted",
"type_info": "Float8"
}
],
"parameters": {
"Left": [
"Text",
"Timestamptz"
]
},
"nullable": [
false,
false,
true,
true,
true,
true,
false,
false,
false
]
},
"hash": "6d9509cd482fbc022bfd157af8e59a1a32f0fbd8802cfec980e05706fb697b58"
}

View File

@@ -1,84 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM assets WHERE symbol = $1\n RETURNING symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "742fef7dab68fe792675866c57394b9515cedf85d4b7432142a859638772aaf7"
}

View File

@@ -1,84 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE symbol = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "7adf5172b6a3c8641f2a9fee848715642e95b6d0a2982792c6ee6853d33c6c5a"
}

View File

@@ -1,85 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE assets SET trading = $1 WHERE symbol = $2\n RETURNING symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Bool",
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "8e8173b6e769fe9299a33a6e62a1554ca40f35d2c87f3acf67c0d284c693cc8b"
}

View File

@@ -1,94 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE class = $1::CLASS",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
{
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "9d1121766d12528f51b3352d2fe857e73ae69df9152ece55a8569698a5b13f8b"
}

View File

@@ -1,82 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last FROM assets",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "cb8a317dff39b7624abc7e78d6a8a16634fe25ac2d8ef86084fe5b554e7ce832"
}

View File

@@ -1,22 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\n ON CONFLICT (timestamp, asset_symbol) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Timestamptz",
"Text",
"Float8",
"Float8",
"Float8",
"Float8",
"Float8",
"Int8",
"Float8"
]
},
"nullable": []
},
"hash": "e594f833a1e3435039c6e28e2c6b163c3beb6709fbefd8039b9b82f343d5a6c2"
}

View File

@@ -1,85 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE assets SET timestamp_last = $1 WHERE symbol = $2\n RETURNING symbol, class as \"class: Class\", exchange as \"exchange: Exchange\", trading, timestamp_added, timestamp_first, timestamp_last",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "symbol",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "class: Class",
"type_info": {
"Custom": {
"name": "class",
"kind": {
"Enum": [
"us_equity",
"crypto"
]
}
}
}
},
{
"ordinal": 2,
"name": "exchange: Exchange",
"type_info": {
"Custom": {
"name": "exchange",
"kind": {
"Enum": [
"AMEX",
"ARCA",
"BATS",
"NASDAQ",
"NYSE",
"NYSEARCA",
"OTC",
"CRYPTO"
]
}
}
}
},
{
"ordinal": 3,
"name": "trading",
"type_info": "Bool"
},
{
"ordinal": 4,
"name": "timestamp_added",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "timestamp_first",
"type_info": "Timestamptz"
},
{
"ordinal": 6,
"name": "timestamp_last",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Timestamptz",
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "e7d8b69f3f4eede80c1ce1451e301ac5ca7ccfc1414fc39b28f66beff87e328e"
}

View File

@@ -1,78 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\n ON CONFLICT (timestamp, asset_symbol) DO UPDATE SET open = $3, high = $4, low = $5, close = $6, volume = $7, num_trades = $8, volume_weighted = $9\n RETURNING timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "timestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 1,
"name": "asset_symbol",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "open",
"type_info": "Float8"
},
{
"ordinal": 3,
"name": "high",
"type_info": "Float8"
},
{
"ordinal": 4,
"name": "low",
"type_info": "Float8"
},
{
"ordinal": 5,
"name": "close",
"type_info": "Float8"
},
{
"ordinal": 6,
"name": "volume",
"type_info": "Float8"
},
{
"ordinal": 7,
"name": "num_trades",
"type_info": "Int8"
},
{
"ordinal": 8,
"name": "volume_weighted",
"type_info": "Float8"
}
],
"parameters": {
"Left": [
"Timestamptz",
"Text",
"Float8",
"Float8",
"Float8",
"Float8",
"Float8",
"Int8",
"Float8"
]
},
"nullable": [
false,
false,
true,
true,
true,
true,
false,
false,
false
]
},
"hash": "ece42c3a72569b95f1b0d77faffe71bf99e5d92a7ee1e5c13090706afde9147c"
}

12
.vscode/settings.json vendored
View File

@@ -1,14 +1,16 @@
{
"sqltools.connections": [
{
"previewLimit": 50,
"server": "localhost",
"port": 5432,
"driver": "PostgreSQL",
"name": "QRust",
"port": 8123,
"useHTTPS": false,
"database": "qrust",
"username": "qrust",
"password": "qrust"
"enableTls": false,
"previewLimit": 50,
"password": "qrust",
"driver": "ClickHouse",
"name": "qrust"
}
]
}

1519
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,34 +12,37 @@ codegen-units = 1
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = "0.6.20"
axum = "0.7.4"
dotenv = "0.15.0"
sqlx = { version = "0.7.1", features = [
"uuid",
"time",
"postgres",
"runtime-tokio",
] }
tokio = { version = "1.32.0", features = [
"macros",
"rt-multi-thread",
] }
serde = "1.0.188"
log = "0.4.20"
serde_json = "1.0.105"
log4rs = "1.2.0"
time = { version = "0.3.27", features = [
"serde",
tokio-tungstenite = { version = "0.21.0", features = [
"tokio-native-tls",
"native-tls",
] }
log = "0.4.20"
log4rs = "1.2.0"
serde = "1.0.188"
serde_json = "1.0.105"
serde_repr = "0.1.18"
futures-util = "0.3.28"
reqwest = { version = "0.11.20", features = [
"json",
"serde_json",
] }
tokio-tungstenite = { version = "0.20.0", features = [
"tokio-native-tls",
"native-tls",
] }
http = "0.2.9"
http = "1.0.0"
governor = "0.6.0"
indexmap = "2.0.0"
clickhouse = { version = "0.11.6", features = [
"watch",
"time",
"uuid",
] }
uuid = "1.6.1"
time = { version = "0.3.31", features = [
"serde",
"formatting",
"macros",
"serde-well-known",
] }

View File

@@ -3,10 +3,8 @@ RUN apk add --no-cache pkgconf musl-dev openssl-dev
WORKDIR /usr/src/qrust
ENV SQLX_OFFLINE=true
RUN mkdir src && echo "fn main() {}" > src/main.rs
COPY Cargo.toml .sqlx ./
COPY Cargo.toml ./
RUN cargo build --release
RUN rm -rf src

View File

@@ -1,8 +1,8 @@
services:
timescaledb:
clickhouse:
extends:
file: support/timescaledb/docker-compose.yml
service: timescaledb
file: support/clickhouse/docker-compose.yml
service: clickhouse
qrust:
build:
@@ -13,10 +13,10 @@ services:
ports:
- 7878:7878
depends_on:
- timescaledb
- clickhouse
env_file:
- .env.docker
volumes:
timescaledb-data:
timescaledb-logs:
clickhouse-lib:
clickhouse-log:

View File

@@ -1,8 +1,6 @@
use crate::types::Source;
use governor::{DefaultDirectRateLimiter, Quota, RateLimiter};
use http::HeaderMap;
use reqwest::Client;
use sqlx::{postgres::PgPoolOptions, PgPool};
use reqwest::{header::HeaderMap, Client};
use std::{env, num::NonZeroU32, sync::Arc};
use time::{format_description::FormatItem, macros::format_description};
use tokio::time::Duration;
@@ -15,8 +13,6 @@ pub const ALPACA_CRYPTO_WEBSOCKET_URL: &str = "wss://stream.data.alpaca.markets/
pub const ALPACA_TIMESTAMP_FORMAT: &[FormatItem] =
format_description!("[year]-[month]-[day]T[hour]:[minute]:[second]Z");
const NUM_CLIENTS: u32 = 10;
pub struct Config {
pub alpaca_api_key: String,
pub alpaca_api_secret: String,
@@ -24,12 +20,11 @@ pub struct Config {
pub alpaca_rate_limit: DefaultDirectRateLimiter,
pub alpaca_historical_offset: Duration,
pub alpaca_source: Source,
pub postgres_pool: PgPool,
pub clickhouse_client: clickhouse::Client,
}
impl Config {
pub async fn from_env() -> Self {
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set.");
pub fn from_env() -> Self {
let alpaca_api_key = env::var("ALPACA_API_KEY").expect("ALPACA_API_KEY must be set.");
let alpaca_api_secret =
env::var("ALPACA_API_SECRET").expect("ALPACA_API_SECRET must be set.");
@@ -38,6 +33,12 @@ impl Config {
.parse()
.expect("ALPACA_SOURCE must be a either 'iex' or 'sip'.");
let clickhouse_url = env::var("CLICKHOUSE_URL").expect("CLICKHOUSE_URL must be set.");
let clickhouse_user = env::var("CLICKHOUSE_USER").expect("CLICKHOUSE_USER must be set.");
let clickhouse_password =
env::var("CLICKHOUSE_PASSWORD").expect("CLICKHOUSE_PASSWORD must be set.");
let clickhouse_db = env::var("CLICKHOUSE_DB").expect("CLICKHOUSE_DB must be set.");
Self {
alpaca_api_key: alpaca_api_key.clone(),
alpaca_api_secret: alpaca_api_secret.clone(),
@@ -59,15 +60,15 @@ impl Config {
Source::Sip => 0,
}),
alpaca_source,
postgres_pool: PgPoolOptions::new()
.max_connections(NUM_CLIENTS)
.connect(&database_url)
.await
.unwrap(),
clickhouse_client: clickhouse::Client::default()
.with_url(clickhouse_url)
.with_user(clickhouse_user)
.with_password(clickhouse_password)
.with_database(clickhouse_db),
}
}
pub async fn arc_from_env() -> Arc<Self> {
Arc::new(Self::from_env().await)
pub fn arc_from_env() -> Arc<Self> {
Arc::new(Self::from_env())
}
}

View File

@@ -5,11 +5,11 @@ use crate::{
},
data::authenticate_websocket,
database,
time::{duration_until, last_minute, next_30s, next_minute, ONE_MINUTE, THIRTY_SECONDS},
time::{duration_until, last_minute, next_minute, ONE_MINUTE},
types::{
api,
api::incoming,
asset::{self, Asset},
websocket, Bar, BroadcastMessage, Class,
websocket, Bar, BarValidity, BroadcastMessage, Class,
},
};
use core::panic;
@@ -17,16 +17,12 @@ use futures_util::{
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use http::StatusCode;
use indexmap::IndexMap;
use log::{error, info, warn};
use serde_json::from_str;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::Instant,
};
use time::OffsetDateTime;
use tokio::{
net::TcpStream,
spawn,
@@ -34,15 +30,14 @@ use tokio::{
broadcast::{Receiver, Sender},
RwLock,
},
task::spawn_blocking,
time::{interval_at, sleep},
time::sleep,
};
use tokio_tungstenite::{connect_async, tungstenite::Message, MaybeTlsStream, WebSocketStream};
pub async fn run(
app_config: Arc<Config>,
class: Class,
asset_broadcast_sender: Sender<BroadcastMessage>,
broadcast_sender: Sender<BroadcastMessage>,
) {
info!("Running live data threads for {:?}.", class);
@@ -59,38 +54,35 @@ pub async fn run(
authenticate_websocket(&app_config, &mut stream, &mut sink).await;
let sink = Arc::new(RwLock::new(sink));
let backfilled = Arc::new(RwLock::new(HashMap::new()));
spawn(websocket_broadcast_handler(
spawn(broadcast_handler(
class,
sink.clone(),
asset_broadcast_sender.subscribe(),
broadcast_sender.subscribe(),
));
database::assets::select_where_class(&app_config.postgres_pool, class)
database::assets::select_where_class(&app_config.clickhouse_client, class)
.await
.into_iter()
.for_each(|asset| {
asset_broadcast_sender
broadcast_sender
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Added(
asset,
)))
.unwrap();
});
spawn(null_handler(app_config.clone(), backfilled.clone()));
websocket_message_handler(app_config, class, stream, sink, backfilled).await;
websocket_handler(app_config, class, stream, sink).await;
unreachable!()
}
async fn websocket_broadcast_handler(
async fn broadcast_handler(
class: Class,
sink: Arc<RwLock<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>>>,
mut asset_broadcast_receiver: Receiver<BroadcastMessage>,
mut broadcast_receiver: Receiver<BroadcastMessage>,
) {
loop {
match asset_broadcast_receiver.recv().await.unwrap() {
match broadcast_receiver.recv().await.unwrap() {
BroadcastMessage::Asset(asset::BroadcastMessage::Added(asset))
if asset.class == class =>
{
@@ -128,13 +120,14 @@ async fn websocket_broadcast_handler(
}
}
async fn websocket_message_handler(
async fn websocket_handler(
app_config: Arc<Config>,
class: Class,
mut stream: SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
sink: Arc<RwLock<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>>>,
backfilled: Arc<RwLock<HashMap<String, bool>>>,
) {
let backfilled = Arc::new(RwLock::new(HashMap::new()));
loop {
match stream.next().await {
Some(Ok(Message::Text(data))) => {
@@ -147,7 +140,7 @@ async fn websocket_message_handler(
}
for message in parsed_data.unwrap_or_default() {
websocket_handle_text_message(&app_config, class, message, &backfilled).await;
websocket_handle_message(&app_config, class, &backfilled, message).await;
}
}
Some(Ok(Message::Ping(_))) => sink
@@ -162,11 +155,11 @@ async fn websocket_message_handler(
}
}
async fn websocket_handle_text_message(
async fn websocket_handle_message(
app_config: &Arc<Config>,
class: Class,
message: websocket::data::incoming::Message,
backfilled: &Arc<RwLock<HashMap<String, bool>>>,
message: websocket::data::incoming::Message,
) {
match message {
websocket::data::incoming::Message::Subscription(subscription_message) => {
@@ -185,20 +178,35 @@ async fn websocket_handle_text_message(
let deleted_assets = old_assets.difference(&new_assets).collect::<HashSet<_>>();
for asset_symbol in &added_assets {
let asset =
database::assets::select_where_symbol(&app_config.postgres_pool, asset_symbol)
let asset = database::assets::select_where_symbol(
&app_config.clickhouse_client,
asset_symbol,
)
.await
.unwrap();
backfilled.write().await.insert(asset.symbol.clone(), false);
let bar_validity = BarValidity::none(asset.symbol.clone());
database::bars::upsert_validity(&app_config.clickhouse_client, &bar_validity).await;
spawn(backfill(
app_config.clone(),
asset.clone(),
backfilled.clone(),
asset.clone(),
));
}
for asset_symbol in &deleted_assets {
database::bars::delete_validity_where_symbol(
&app_config.clickhouse_client,
asset_symbol,
)
.await;
database::bars::delete_where_symbol(&app_config.clickhouse_client, asset_symbol)
.await;
backfilled.write().await.remove(*asset_symbol);
}
@@ -207,156 +215,45 @@ async fn websocket_handle_text_message(
class, added_assets, deleted_assets
);
}
websocket::data::incoming::Message::Bars(bar_message) => {
websocket::data::incoming::Message::Bars(bar_message)
| websocket::data::incoming::Message::UpdatedBars(bar_message) => {
let bar = Bar::from(bar_message);
info!(
"websocket::Incoming bar for {}: {}",
bar.asset_symbol, bar.timestamp
);
database::bars::upsert(
&app_config.postgres_pool,
&bar,
backfilled.read().await[&bar.asset_symbol],
)
.await;
}
websocket::data::incoming::Message::UpdatedBars(bar_message) => {
let bar = Bar::from(bar_message);
info!(
"websocket::Incoming bar for {}: {}",
bar.asset_symbol, bar.timestamp
);
info!("websocket::Incoming bar for {}: {}", bar.symbol, bar.time);
let transaction = app_config.postgres_pool.begin().await.unwrap();
let backfilled_asset_symbol = backfilled.read().await[&bar.asset_symbol];
database::bars::upsert(&app_config.postgres_pool, &bar, backfilled_asset_symbol).await;
if backfilled_asset_symbol {
database::assets::update_timestamp_last_where_symbol(
&app_config.postgres_pool,
&bar.asset_symbol,
&bar.timestamp,
)
.await;
database::bars::upsert(&app_config.clickhouse_client, &bar).await;
if backfilled.read().await[&bar.symbol] {
database::bars::upsert_validity(&app_config.clickhouse_client, &bar.into()).await;
}
transaction.commit().await.unwrap();
}
_ => {}
}
}
#[allow(clippy::significant_drop_in_scrutinee)]
async fn null_handler(app_config: Arc<Config>, backfilled: Arc<RwLock<HashMap<String, bool>>>) {
#[derive(PartialEq)]
enum NullHandlerState {
Bars,
UpdatedBars,
}
let next_30s = next_30s();
let mut state = if next_30s.unix_timestamp() % 30 == 0 {
NullHandlerState::Bars
} else {
NullHandlerState::UpdatedBars
};
let mut interval = interval_at(
(Instant::now() + duration_until(next_30s)).into(),
THIRTY_SECONDS,
);
loop {
interval.tick().await;
let timestamp = last_minute() - ONE_MINUTE;
let backfilled = backfilled.read().await;
for asset_symbol in backfilled.keys().cloned() {
let bar = Bar::empty(timestamp, asset_symbol);
let transaction = app_config.postgres_pool.begin().await.unwrap();
database::bars::insert_or_skip(
&app_config.postgres_pool,
&bar,
backfilled[&bar.asset_symbol],
)
.await;
if backfilled[&bar.asset_symbol] && state == NullHandlerState::Bars {
database::assets::update_timestamp_last_where_symbol(
&app_config.postgres_pool,
&bar.asset_symbol,
&bar.timestamp,
)
.await;
}
transaction.commit().await.unwrap();
}
state = match state {
NullHandlerState::Bars => NullHandlerState::UpdatedBars,
NullHandlerState::UpdatedBars => NullHandlerState::Bars,
};
websocket::data::incoming::Message::Success(_) => {}
}
}
pub async fn backfill(
app_config: Arc<Config>,
asset: Asset,
backfilled: Arc<RwLock<HashMap<String, bool>>>,
asset: Asset,
) {
info!("Backfilling historical data for {}...", asset.symbol);
let bar_validity =
database::bars::select_validity_where_symbol(&app_config.clickhouse_client, &asset.symbol)
.await
.unwrap();
let task_run_offsetdatetime = next_minute() + app_config.alpaca_historical_offset;
let fetch_from = asset.timestamp_last + ONE_MINUTE;
let fetch_until = task_run_offsetdatetime - app_config.alpaca_historical_offset - ONE_MINUTE;
let fetch_from = bar_validity.time_last + ONE_MINUTE;
let fetch_until = last_minute();
if fetch_from > fetch_until {
return;
}
let wait_duration = task_run_offsetdatetime - OffsetDateTime::now_utc();
if wait_duration.is_positive() {
sleep(wait_duration.unsigned_abs()).await;
}
info!("Queing historical data backfill for {}...", asset.symbol);
let task_run_offsetdatetime = next_minute() + app_config.alpaca_historical_offset;
sleep(duration_until(task_run_offsetdatetime)).await;
let bars = backfill_bars_from_api(&app_config, &asset, fetch_from, fetch_until).await;
let transaction = app_config.postgres_pool.begin().await.unwrap();
database::bars::upsert_batch(&app_config.postgres_pool, &bars, true).await;
database::assets::update_timestamp_last_where_symbol(
&app_config.postgres_pool,
&asset.symbol,
&fetch_until,
)
.await;
derive_recent_nulls(&app_config, &asset, &fetch_until, &backfilled).await;
transaction.commit().await.unwrap();
info!("Backfilled historical data for {}.", asset.symbol);
}
fn generate_per_minute_bars(
from: OffsetDateTime,
until: OffsetDateTime,
asset: &Asset,
) -> IndexMap<OffsetDateTime, Bar> {
let mut bars = IndexMap::new();
let mut current_time = from;
while current_time <= until {
bars.insert(current_time, Bar::empty(current_time, asset.symbol.clone()));
current_time += ONE_MINUTE;
}
bars
}
async fn backfill_bars_from_api(
app_config: &Arc<Config>,
asset: &Asset,
from: OffsetDateTime,
until: OffsetDateTime,
) -> Vec<Bar> {
let asset_clone = asset.clone();
let mut bars = spawn_blocking(move || generate_per_minute_bars(from, until, &asset_clone))
.await
.unwrap();
info!("Running historical data backfill for {}...", asset.symbol);
let mut bars = Vec::new();
let mut next_page_token = None;
loop {
let request = app_config
.alpaca_client
@@ -369,11 +266,17 @@ async fn backfill_bars_from_api(
("timeframe", &String::from("1Min")),
(
"start",
&from.format(ALPACA_TIMESTAMP_FORMAT).unwrap().to_string(),
&fetch_from
.format(ALPACA_TIMESTAMP_FORMAT)
.unwrap()
.to_string(),
),
(
"end",
&until.format(ALPACA_TIMESTAMP_FORMAT).unwrap().to_string(),
&fetch_until
.format(ALPACA_TIMESTAMP_FORMAT)
.unwrap()
.to_string(),
),
("limit", &String::from("10000")),
("page_token", &next_page_token.clone().unwrap_or_default()),
@@ -381,17 +284,14 @@ async fn backfill_bars_from_api(
app_config.alpaca_rate_limit.until_ready().await;
let response = request.send().await.unwrap();
let mut response = if response.status() == StatusCode::OK {
response
.json::<api::incoming::bar::Message>()
.await
.unwrap()
let mut response = if response.status() == reqwest::StatusCode::OK {
response.json::<incoming::bar::Message>().await.unwrap()
} else {
error!(
"Failed to backfill historical data for {} from {} to {}: {}",
asset.symbol,
from,
until,
fetch_from,
fetch_until,
response.text().await.unwrap()
);
break;
@@ -403,7 +303,7 @@ async fn backfill_bars_from_api(
.unwrap_or_default()
.unwrap_or_default()
{
bars.insert(bar.timestamp, Bar::from((bar, asset.symbol.clone())));
bars.push(Bar::from((bar, asset.symbol.clone())));
}
if response.next_page_token.is_none() {
@@ -412,29 +312,13 @@ async fn backfill_bars_from_api(
next_page_token = response.next_page_token;
}
bars.into_values().collect::<Vec<Bar>>()
}
database::bars::upsert_batch(&app_config.clickhouse_client, &bars).await;
if let Some(last_bar) = bars.last() {
database::bars::upsert_validity(&app_config.clickhouse_client, &last_bar.clone().into())
.await;
}
#[allow(clippy::significant_drop_tightening)]
async fn derive_recent_nulls(
app_config: &Arc<Config>,
asset: &Asset,
from: &OffsetDateTime,
backfilled: &Arc<RwLock<HashMap<String, bool>>>,
) {
let mut backfilled = backfilled.write().await;
let bars = database::bars::select_where_symbol_where_timestamp_larger_than(
&app_config.postgres_pool,
&asset.symbol,
from,
)
.await;
database::bars::upsert_batch(&app_config.postgres_pool, &bars, true).await;
database::assets::update_timestamp_last_where_symbol(
&app_config.postgres_pool,
&asset.symbol,
&bars.last().unwrap().timestamp,
)
.await;
backfilled.insert(asset.symbol.clone(), true);
backfilled.write().await.insert(asset.symbol.clone(), true);
info!("Backfilled historical data for {}.", asset.symbol);
}

View File

@@ -20,11 +20,9 @@ async fn authenticate_websocket(
Some(Ok(Message::Text(data)))
if from_str::<Vec<websocket::data::incoming::Message>>(&data)
.unwrap()
.get(0)
.first()
== Some(&websocket::data::incoming::Message::Success(
websocket::data::incoming::success::Message {
msg: websocket::data::incoming::success::MessageType::Connected,
},
websocket::data::incoming::success::Message::Connected,
)) => {}
_ => panic!(),
}
@@ -45,11 +43,9 @@ async fn authenticate_websocket(
Some(Ok(Message::Text(data)))
if from_str::<Vec<websocket::data::incoming::Message>>(&data)
.unwrap()
.get(0)
.first()
== Some(&websocket::data::incoming::Message::Success(
websocket::data::incoming::success::Message {
msg: websocket::data::incoming::success::MessageType::Authenticated,
},
websocket::data::incoming::success::Message::Authenticated,
)) => {}
_ => panic!(),
};

View File

@@ -1,92 +1,43 @@
use crate::types::{Asset, Class, Exchange};
use sqlx::{query_as, PgPool};
use std::convert::Into;
use time::OffsetDateTime;
use crate::types::{Asset, Class};
use clickhouse::Client;
pub async fn select(postgres_pool: &PgPool) -> Vec<Asset> {
query_as!(
Asset,
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets"#
)
.fetch_all(postgres_pool)
pub async fn select(clickhouse_client: &Client) -> Vec<Asset> {
clickhouse_client
.query("SELECT ?fields FROM assets")
.fetch_all::<Asset>()
.await
.unwrap()
}
pub async fn select_where_class(postgres_pool: &PgPool, class: Class) -> Vec<Asset> {
query_as!(
Asset,
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE class = $1::CLASS"#,
class as Class
)
.fetch_all(postgres_pool)
pub async fn select_where_class(clickhouse_client: &Client, class: Class) -> Vec<Asset> {
clickhouse_client
.query("SELECT ?fields FROM assets WHERE class = ?")
.bind(class)
.fetch_all::<Asset>()
.await
.unwrap()
}
pub async fn select_where_symbol(postgres_pool: &PgPool, symbol: &str) -> Option<Asset> {
query_as!(
Asset,
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE symbol = $1"#,
symbol
)
.fetch_optional(postgres_pool)
pub async fn select_where_symbol(clickhouse_client: &Client, symbol: &str) -> Option<Asset> {
clickhouse_client
.query("SELECT ?fields FROM assets WHERE symbol = ?")
.bind(symbol)
.fetch_optional::<Asset>()
.await
.unwrap()
}
pub async fn insert(postgres_pool: &PgPool, asset: &Asset) -> Asset {
query_as!(
Asset,
r#"INSERT INTO assets (symbol, class, exchange, trading, timestamp_added, timestamp_first, timestamp_last) VALUES ($1, $2::CLASS, $3::EXCHANGE, $4, $5, $6, $7)
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
asset.symbol, asset.class as Class, asset.exchange as Exchange, asset.trading, asset.timestamp_added, asset.timestamp_first, asset.timestamp_last
)
.fetch_one(postgres_pool)
.await
.unwrap()
pub async fn insert(clickhouse_client: &Client, asset: &Asset) {
let mut insert = clickhouse_client.insert("assets").unwrap();
insert.write(asset).await.unwrap();
insert.end().await.unwrap();
}
pub async fn update_trading_where_symbol(
postgres_pool: &PgPool,
symbol: &str,
trading: &bool,
) -> Option<Asset> {
query_as!(
Asset,
r#"UPDATE assets SET trading = $1 WHERE symbol = $2
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
trading, symbol
)
.fetch_optional(postgres_pool)
pub async fn delete_where_symbol(clickhouse_client: &Client, symbol: &str) {
clickhouse_client
.query("DELETE FROM assets WHERE symbol = ?")
.bind(symbol)
.execute()
.await
.unwrap()
}
pub async fn update_timestamp_last_where_symbol(
postgres_pool: &PgPool,
symbol: &str,
timestamp_last: &OffsetDateTime,
) -> Option<Asset> {
query_as!(
Asset,
r#"UPDATE assets SET timestamp_last = $1 WHERE symbol = $2
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
timestamp_last, symbol
)
.fetch_optional(postgres_pool)
.await
.unwrap()
}
pub async fn delete_where_symbol(postgres_pool: &PgPool, symbol: &str) -> Option<Asset> {
query_as!(
Asset,
r#"DELETE FROM assets WHERE symbol = $1
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
symbol
)
.fetch_optional(postgres_pool)
.await
.unwrap()
.unwrap();
}

View File

@@ -1,161 +1,52 @@
use crate::types::Bar;
use sqlx::{query_as, PgPool, Postgres};
use time::OffsetDateTime;
use crate::types::{Bar, BarValidity};
use clickhouse::Client;
pub async fn select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
postgres_pool: &PgPool,
symbol: &str,
timestamp: &OffsetDateTime,
) -> Bar {
query_as!(
Bar,
r#"SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp < $2 AND open IS NOT NULL AND high IS NOT NULL AND low IS NOT NULL AND close IS NOT NULL ORDER BY timestamp DESC LIMIT 1"#,
symbol,
timestamp
)
.fetch_one(postgres_pool)
.await
.unwrap()
pub async fn upsert(clickhouse_client: &Client, bar: &Bar) {
let mut insert = clickhouse_client.insert("bars").unwrap();
insert.write(bar).await.unwrap();
insert.end().await.unwrap();
}
pub async fn select_where_symbol_where_timestamp_larger_than(
postgres_pool: &PgPool,
symbol: &str,
timestamp: &OffsetDateTime,
) -> Vec<Bar> {
query_as!(
Bar,
r#"SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp > $2 ORDER BY timestamp ASC"#,
symbol,
timestamp
)
.fetch_all(postgres_pool)
.await
.unwrap()
}
pub async fn upsert(postgres_pool: &PgPool, bar: &Bar, backfill: bool) -> Bar {
let mut bar = bar.clone();
if backfill
&& (bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none())
{
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
postgres_pool,
&bar.asset_symbol,
&bar.timestamp,
).await;
bar.merge_empty(&filled_bar);
pub async fn upsert_batch(clickhouse_client: &Client, bars: &[Bar]) {
let mut insert = clickhouse_client.insert("bars").unwrap();
for bar in bars {
insert.write(bar).await.unwrap();
}
query_as!(
Bar,
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (timestamp, asset_symbol) DO UPDATE SET open = $3, high = $4, low = $5, close = $6, volume = $7, num_trades = $8, volume_weighted = $9
RETURNING timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted"#,
bar.timestamp, bar.asset_symbol, bar.open, bar.high, bar.low, bar.close, bar.volume, bar.num_trades, bar.volume_weighted
)
.fetch_one(postgres_pool)
.await
.unwrap()
insert.end().await.unwrap();
}
pub async fn insert_or_skip(postgres_pool: &PgPool, bar: &Bar, backfill: bool) {
let mut bar = bar.clone();
if backfill
&& (bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none())
{
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
postgres_pool,
&bar.asset_symbol,
&bar.timestamp,
).await;
bar.merge_empty(&filled_bar);
}
query_as!(
Bar,
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (timestamp, asset_symbol) DO NOTHING"#,
bar.timestamp, bar.asset_symbol, bar.open, bar.high, bar.low, bar.close, bar.volume, bar.num_trades, bar.volume_weighted
)
.execute(postgres_pool)
pub async fn delete_where_symbol(clickhouse_client: &Client, symbol: &str) {
clickhouse_client
.query("DELETE FROM bars WHERE symbol = ?")
.bind(symbol)
.execute()
.await
.unwrap();
}
pub async fn upsert_batch(postgres_pool: &PgPool, bars: &[Bar], backfill: bool) -> Vec<Bar> {
let mut bars = bars.to_vec();
if bars.is_empty() {
return bars;
}
if backfill
&& (bars[0].open.is_none()
|| bars[0].high.is_none()
|| bars[0].low.is_none()
|| bars[0].close.is_none())
{
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
postgres_pool,
&bars[0].asset_symbol,
&bars[0].timestamp,
).await;
bars[0].merge_empty(&filled_bar);
}
let mut timestamp = Vec::with_capacity(bars.len());
let mut asset_symbol = Vec::with_capacity(bars.len());
let mut open = Vec::with_capacity(bars.len());
let mut high = Vec::with_capacity(bars.len());
let mut low = Vec::with_capacity(bars.len());
let mut close = Vec::with_capacity(bars.len());
let mut volume = Vec::with_capacity(bars.len());
let mut num_trades = Vec::with_capacity(bars.len());
let mut volume_weighted = Vec::with_capacity(bars.len());
let mut last_filled_bar = bars[0].clone();
for mut bar in bars {
if backfill {
if bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none()
{
bar.merge_empty(&last_filled_bar);
} else {
last_filled_bar = bar.clone();
}
}
timestamp.push(bar.timestamp);
asset_symbol.push(bar.asset_symbol.clone());
open.push(bar.open);
high.push(bar.high);
low.push(bar.low);
close.push(bar.close);
volume.push(bar.volume);
num_trades.push(bar.num_trades);
volume_weighted.push(bar.volume_weighted);
}
// No type-safety here because of NULLABLE bulk insert
query_as::<Postgres, Bar>(
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted)
SELECT * FROM UNNEST($1::timestamptz[], $2::text[], $3::float8[], $4::float8[], $5::float8[], $6::float8[], $7::float8[], $8::int8[], $9::float8[])
ON CONFLICT (timestamp, asset_symbol) DO UPDATE SET open = EXCLUDED.open, high = EXCLUDED.high, low = EXCLUDED.low, close = EXCLUDED.close, volume = EXCLUDED.volume, num_trades = EXCLUDED.num_trades, volume_weighted = EXCLUDED.volume_weighted
RETURNING timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted"#,
)
.bind(timestamp)
.bind(asset_symbol)
.bind(open)
.bind(high)
.bind(low)
.bind(close)
.bind(volume)
.bind(num_trades)
.bind(volume_weighted)
.fetch_all(postgres_pool)
pub async fn select_validity_where_symbol(
clickhouse_client: &Client,
symbol: &str,
) -> Option<BarValidity> {
clickhouse_client
.query("SELECT ?fields FROM bars_validity FINAL WHERE symbol = ?")
.bind(symbol)
.fetch_optional::<BarValidity>()
.await
.unwrap()
}
pub async fn upsert_validity(clickhouse_client: &Client, bar_validity: &BarValidity) {
let mut insert = clickhouse_client.insert("bars_validity").unwrap();
insert.write(bar_validity).await.unwrap();
insert.end().await.unwrap();
}
pub async fn delete_validity_where_symbol(clickhouse_client: &Client, symbol: &str) {
clickhouse_client
.query("DELETE FROM bars_validity WHERE symbol = ?")
.bind(symbol)
.execute()
.await
.unwrap();
}

View File

@@ -11,35 +11,32 @@ mod types;
use config::Config;
use dotenv::dotenv;
use log4rs::config::Deserializers;
use sqlx::error::BoxDynError;
use std::error::Error;
use tokio::{spawn, sync::broadcast};
use types::{BroadcastMessage, Class};
#[tokio::main]
async fn main() -> Result<(), BoxDynError> {
async fn main() -> Result<(), Box<dyn Error>> {
dotenv().ok();
log4rs::init_file("log4rs.yaml", Deserializers::default())?;
let app_config = Config::arc_from_env().await;
let app_config = Config::arc_from_env();
let mut threads = Vec::new();
let (asset_broadcast_sender, _) = broadcast::channel::<BroadcastMessage>(100);
let (broadcast_sender, _) = broadcast::channel::<BroadcastMessage>(100);
threads.push(spawn(data::market::run(
app_config.clone(),
Class::UsEquity,
asset_broadcast_sender.clone(),
broadcast_sender.clone(),
)));
threads.push(spawn(data::market::run(
app_config.clone(),
Class::Crypto,
asset_broadcast_sender.clone(),
broadcast_sender.clone(),
)));
threads.push(spawn(routes::run(
app_config.clone(),
asset_broadcast_sender,
)));
threads.push(spawn(routes::run(app_config.clone(), broadcast_sender)));
for thread in threads {
thread.await?;

View File

@@ -1,29 +1,28 @@
use crate::config::{
Config, ALPACA_ASSET_API_URL, ALPACA_CRYPTO_DATA_URL, ALPACA_STOCK_DATA_URL,
ALPACA_TIMESTAMP_FORMAT,
};
use crate::config::{Config, ALPACA_ASSET_API_URL};
use crate::database;
use crate::types::Class;
use crate::types::{api::incoming, asset, Asset, BroadcastMessage, Status};
use axum::{extract::Path, http::StatusCode, Extension, Json};
use crate::types::{
api::incoming::{self, asset::Status},
asset, Asset, BroadcastMessage,
};
use axum::{extract::Path, Extension, Json};
use http::StatusCode;
use log::info;
use serde::Deserialize;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::sync::broadcast::Sender;
pub async fn get_all(
Extension(app_config): Extension<Arc<Config>>,
) -> Result<(StatusCode, Json<Vec<Asset>>), StatusCode> {
let assets = database::assets::select(&app_config.postgres_pool).await;
Ok((StatusCode::OK, Json(assets)))
}
pub async fn get(
Extension(app_config): Extension<Arc<Config>>,
) -> Result<(StatusCode, Json<Vec<Asset>>), StatusCode> {
let assets = database::assets::select(&app_config.clickhouse_client).await;
Ok((StatusCode::OK, Json(assets)))
}
pub async fn get_where_symbol(
Extension(app_config): Extension<Arc<Config>>,
Path(symbol): Path<String>,
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
let asset = database::assets::select_where_symbol(&app_config.postgres_pool, &symbol).await;
let asset = database::assets::select_where_symbol(&app_config.clickhouse_client, &symbol).await;
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
Ok((StatusCode::OK, Json(asset)))
})
@@ -32,15 +31,14 @@ pub async fn get(
#[derive(Deserialize)]
pub struct AddAssetRequest {
symbol: String,
trading: Option<bool>,
}
pub async fn add(
Extension(app_config): Extension<Arc<Config>>,
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
Extension(broadcast_sender): Extension<Sender<BroadcastMessage>>,
Json(request): Json<AddAssetRequest>,
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
if database::assets::select_where_symbol(&app_config.postgres_pool, &request.symbol)
if database::assets::select_where_symbol(&app_config.clickhouse_client, &request.symbol)
.await
.is_some()
{
@@ -53,66 +51,25 @@ pub async fn add(
.get(&format!("{}/{}", ALPACA_ASSET_API_URL, request.symbol))
.send()
.await
.map_err(|e| match e.status() {
Some(StatusCode::NOT_FOUND) => StatusCode::NOT_FOUND,
_ => panic!(),
})?;
.map_err(|e| {
if e.status() == Some(reqwest::StatusCode::NOT_FOUND) {
StatusCode::NOT_FOUND
} else {
panic!()
}
})
.unwrap();
let asset = asset.json::<incoming::Asset>().await.unwrap();
let asset = asset.json::<incoming::asset::Asset>().await.unwrap();
if asset.status != Status::Active || !asset.tradable || !asset.fractionable {
return Err(StatusCode::FORBIDDEN);
}
let mut earliest_bar_request = app_config
.alpaca_client
.get(match asset.class {
Class::UsEquity => ALPACA_STOCK_DATA_URL,
Class::Crypto => ALPACA_CRYPTO_DATA_URL,
})
.query(&[
("symbols", &asset.symbol),
("timeframe", &String::from("1Min")),
(
"start",
&OffsetDateTime::UNIX_EPOCH
.format(ALPACA_TIMESTAMP_FORMAT)
.unwrap(),
),
("limit", &String::from("1")),
]);
let asset = Asset::from(asset);
database::assets::insert(&app_config.clickhouse_client, &asset).await;
if asset.class == Class::UsEquity {
earliest_bar_request =
earliest_bar_request.query(&[("feed", &app_config.alpaca_source.to_string())]);
}
let earliest_bar = earliest_bar_request
.send()
.await
.unwrap()
.json::<incoming::bar::Message>()
.await
.unwrap();
let earliest_bar = earliest_bar
.bars
.get(&asset.symbol)
.ok_or(StatusCode::NOT_FOUND)?
.as_ref()
.ok_or(StatusCode::NOT_FOUND)?
.first()
.ok_or(StatusCode::NOT_FOUND)?;
let asset = Asset::from((
asset,
request.trading.unwrap_or(false),
earliest_bar.timestamp,
));
database::assets::insert(&app_config.postgres_pool, &asset).await;
asset_broadcast_sender
broadcast_sender
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Added(
asset.clone(),
)))
@@ -122,50 +79,24 @@ pub async fn add(
Ok((StatusCode::CREATED, Json(asset)))
}
#[allow(dead_code)]
#[derive(Deserialize)]
pub struct UpdateAssetRequest {
trading: bool,
}
pub async fn update(
Extension(app_config): Extension<Arc<Config>>,
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
Path(symbol): Path<String>,
Json(request): Json<UpdateAssetRequest>,
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
let asset = database::assets::update_trading_where_symbol(
&app_config.postgres_pool,
&symbol,
&request.trading,
)
.await;
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
asset_broadcast_sender
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Updated(
asset.clone(),
)))
.unwrap();
info!("Updated asset {}.", symbol);
Ok((StatusCode::OK, Json(asset)))
})
}
pub async fn delete(
Extension(app_config): Extension<Arc<Config>>,
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
Extension(broadcast_sender): Extension<Sender<BroadcastMessage>>,
Path(symbol): Path<String>,
) -> Result<StatusCode, StatusCode> {
let asset = database::assets::delete_where_symbol(&app_config.postgres_pool, &symbol).await;
let asset = database::assets::select_where_symbol(&app_config.clickhouse_client, &symbol)
.await
.ok_or(StatusCode::NOT_FOUND)
.unwrap();
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
asset_broadcast_sender
broadcast_sender
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Deleted(
asset,
)))
.unwrap();
database::assets::delete_where_symbol(&app_config.clickhouse_client, &symbol).await;
info!("Deleted asset {}.", symbol);
Ok(StatusCode::NO_CONTENT)
})
}

View File

@@ -1,30 +1,26 @@
use crate::{config::Config, types::BroadcastMessage};
use axum::{
routing::{delete, get, post},
Extension, Router, Server,
serve, Extension, Router,
};
use log::info;
use std::{net::SocketAddr, sync::Arc};
use tokio::sync::broadcast::Sender;
use tokio::{net::TcpListener, sync::broadcast::Sender};
pub mod assets;
pub async fn run(app_config: Arc<Config>, asset_broadcast_sender: Sender<BroadcastMessage>) {
pub async fn run(app_config: Arc<Config>, broadcast_sender: Sender<BroadcastMessage>) {
let app = Router::new()
.route("/assets", get(assets::get_all))
.route("/assets/:symbol", get(assets::get))
.route("/assets", get(assets::get))
.route("/assets/:symbol", get(assets::get_where_symbol))
.route("/assets", post(assets::add))
.route("/assets/:symbol", post(assets::update))
.route("/assets/:symbol", delete(assets::delete))
.layer(Extension(app_config))
.layer(Extension(asset_broadcast_sender));
.layer(Extension(broadcast_sender));
let addr = SocketAddr::from(([0, 0, 0, 0], 7878));
let listener = TcpListener::bind(addr).await.unwrap();
info!("Listening on {}.", addr);
Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
serve(listener, app).await.unwrap();
unreachable!()
}

View File

@@ -1,7 +1,6 @@
use std::time::Duration;
use time::OffsetDateTime;
pub const THIRTY_SECONDS: Duration = Duration::from_secs(30);
pub const ONE_MINUTE: Duration = Duration::from_secs(60);
pub fn last_minute() -> OffsetDateTime {
@@ -13,18 +12,8 @@ pub fn next_minute() -> OffsetDateTime {
last_minute() + ONE_MINUTE
}
pub fn last_30s() -> OffsetDateTime {
let now_timestamp = OffsetDateTime::now_utc().unix_timestamp();
OffsetDateTime::from_unix_timestamp(now_timestamp - now_timestamp % 30).unwrap()
}
pub fn next_30s() -> OffsetDateTime {
last_30s() + THIRTY_SECONDS
}
pub fn duration_until(time: OffsetDateTime) -> Duration {
let now = OffsetDateTime::now_utc();
let duration = time - now;
let duration = time - OffsetDateTime::now_utc();
if duration.is_positive() {
duration.unsigned_abs()

View File

@@ -1,9 +1,51 @@
#![allow(clippy::struct_excessive_bools)]
use crate::types::{Class, Exchange, Status};
use serde::Deserialize;
use crate::types::api::impl_from_enum;
use serde::{Deserialize, Serialize};
#[derive(Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum Class {
UsEquity,
Crypto,
}
impl_from_enum!(crate::types::Class, Class, UsEquity, Crypto);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
pub enum Exchange {
Amex,
Arca,
Bats,
Nyse,
Nasdaq,
Nysearca,
Otc,
Crypto,
}
impl_from_enum!(
crate::types::Exchange,
Exchange,
Amex,
Arca,
Bats,
Nyse,
Nasdaq,
Nysearca,
Otc,
Crypto
);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Status {
Active,
Inactive,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Asset {
pub id: String,
pub class: Class,
@@ -19,3 +61,14 @@ pub struct Asset {
pub maintenance_margin_requirement: Option<f32>,
pub attributes: Option<Vec<String>>,
}
impl From<Asset> for crate::types::Asset {
fn from(item: Asset) -> Self {
Self {
symbol: item.symbol,
class: item.class.into(),
exchange: item.exchange.into(),
time_added: time::OffsetDateTime::now_utc(),
}
}
}

View File

@@ -1,12 +1,12 @@
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use time::OffsetDateTime;
#[derive(Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Bar {
#[serde(rename = "t")]
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
pub time: OffsetDateTime,
#[serde(rename = "o")]
pub open: f64,
#[serde(rename = "h")]
@@ -16,14 +16,30 @@ pub struct Bar {
#[serde(rename = "c")]
pub close: f64,
#[serde(rename = "v")]
pub volume: f64,
pub volume: i64,
#[serde(rename = "n")]
pub num_trades: i64,
pub trades: i64,
#[serde(rename = "vw")]
pub volume_weighted: f64,
pub vwap: f64,
}
#[derive(Deserialize)]
impl From<(Bar, String)> for crate::types::Bar {
fn from((bar, symbol): (Bar, String)) -> Self {
Self {
time: bar.time,
symbol,
open: bar.open,
high: bar.high,
low: bar.low,
close: bar.close,
volume: bar.volume,
trades: bar.trades,
vwap: bar.vwap,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Message {
pub bars: HashMap<String, Option<Vec<Bar>>>,
pub next_page_token: Option<String>,

View File

@@ -1,29 +0,0 @@
use serde::{Deserialize, Deserializer};
use time::{macros::format_description, Date, Time};
#[derive(Deserialize)]
pub struct CalendarDate {
#[serde(deserialize_with = "deserialize_date")]
pub date: Date,
#[serde(deserialize_with = "deserialize_time")]
pub open: Time,
#[serde(deserialize_with = "deserialize_time")]
pub close: Time,
}
fn deserialize_date<'de, D>(deserializer: D) -> Result<Date, D::Error>
where
D: Deserializer<'de>,
{
let date_str = String::deserialize(deserializer)?;
Date::parse(&date_str, format_description!("[year]-[month]-[day]"))
.map_err(serde::de::Error::custom)
}
fn deserialize_time<'de, D>(deserializer: D) -> Result<Time, D::Error>
where
D: Deserializer<'de>,
{
let time_str = String::deserialize(deserializer)?;
Time::parse(&time_str, format_description!("[hour]:[minute]")).map_err(serde::de::Error::custom)
}

View File

@@ -1,7 +1,2 @@
pub mod asset;
pub mod bar;
pub mod calendar_date;
pub use asset::Asset;
pub use bar::Bar;
pub use calendar_date::CalendarDate;

View File

@@ -1 +1,23 @@
pub mod incoming;
macro_rules! impl_from_enum {
($source:ty, $target:ty, $( $variant:ident ),* ) => {
impl From<$source> for $target {
fn from(item: $source) -> Self {
match item {
$( <$source>::$variant => <$target>::$variant, )*
}
}
}
impl From<$target> for $source {
fn from(item: $target) -> Self {
match item {
$( <$target>::$variant => <$source>::$variant, )*
}
}
}
};
}
use impl_from_enum;

View File

@@ -1,86 +1,39 @@
use super::api;
use clickhouse::Row;
use serde::{Deserialize, Serialize};
use sqlx::{FromRow, Type};
use serde_repr::{Deserialize_repr, Serialize_repr};
use time::OffsetDateTime;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Type)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)]
#[repr(u8)]
pub enum Class {
#[sqlx(rename = "us_equity")]
#[serde(rename = "us_equity")]
UsEquity,
#[sqlx(rename = "crypto")]
#[serde(rename = "crypto")]
Crypto,
UsEquity = 1,
Crypto = 2,
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize, Type)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)]
#[repr(u8)]
pub enum Exchange {
#[sqlx(rename = "AMEX")]
#[serde(rename = "AMEX")]
Amex,
#[sqlx(rename = "ARCA")]
#[serde(rename = "ARCA")]
Arca,
#[sqlx(rename = "BATS")]
#[serde(rename = "BATS")]
Bats,
#[sqlx(rename = "NYSE")]
#[serde(rename = "NYSE")]
Nyse,
#[sqlx(rename = "NASDAQ")]
#[serde(rename = "NASDAQ")]
Nasdaq,
#[sqlx(rename = "NYSEARCA")]
#[serde(rename = "NYSEARCA")]
Nysearca,
#[sqlx(rename = "OTC")]
#[serde(rename = "OTC")]
Otc,
#[sqlx(rename = "CRYPTO")]
#[serde(rename = "CRYPTO")]
Crypto,
Amex = 1,
Arca = 2,
Bats = 3,
Nyse = 4,
Nasdaq = 5,
Nysearca = 6,
Otc = 7,
Crypto = 8,
}
#[derive(PartialEq, Eq, Deserialize, Type)]
pub enum Status {
#[sqlx(rename = "active")]
#[serde(rename = "active")]
Active,
#[sqlx(rename = "inactive")]
#[serde(rename = "inactive")]
Inactive,
}
#[derive(Clone, Debug, FromRow, Serialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Row)]
pub struct Asset {
pub symbol: String,
pub class: Class,
pub exchange: Exchange,
pub trading: bool,
pub timestamp_added: OffsetDateTime,
pub timestamp_first: OffsetDateTime,
pub timestamp_last: OffsetDateTime,
#[serde(with = "clickhouse::serde::time::datetime")]
pub time_added: OffsetDateTime,
}
impl From<(api::incoming::Asset, bool, OffsetDateTime)> for Asset {
fn from(
(asset, trading, timestamp_first): (api::incoming::Asset, bool, OffsetDateTime),
) -> Self {
Self {
symbol: asset.symbol,
class: asset.class,
exchange: asset.exchange,
trading,
timestamp_added: OffsetDateTime::now_utc(),
timestamp_first,
timestamp_last: timestamp_first,
}
}
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BroadcastMessage {
Added(Asset),
Updated(Asset),
Deleted(Asset),
}

View File

@@ -1,72 +1,44 @@
use super::{api, websocket};
use serde::Serialize;
use sqlx::FromRow;
#![allow(clippy::module_name_repetitions)]
use clickhouse::Row;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
#[derive(Clone, Debug, FromRow, Serialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Row)]
pub struct Bar {
pub timestamp: OffsetDateTime,
pub asset_symbol: String,
pub open: Option<f64>,
pub high: Option<f64>,
pub low: Option<f64>,
pub close: Option<f64>,
pub volume: f64,
pub num_trades: i64,
pub volume_weighted: f64,
#[serde(with = "clickhouse::serde::time::datetime")]
pub time: OffsetDateTime,
pub symbol: String,
pub open: f64,
pub high: f64,
pub low: f64,
pub close: f64,
pub volume: i64,
pub trades: i64,
pub vwap: f64,
}
impl Bar {
pub const fn empty(timestamp: OffsetDateTime, asset_symbol: String) -> Self {
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Row)]
pub struct BarValidity {
pub symbol: String,
#[serde(with = "clickhouse::serde::time::datetime")]
pub time_last: OffsetDateTime,
}
impl BarValidity {
pub const fn none(symbol: String) -> Self {
Self {
timestamp,
asset_symbol,
open: None,
high: None,
low: None,
close: None,
volume: 0.0,
num_trades: 0,
volume_weighted: 0.0,
symbol,
time_last: OffsetDateTime::UNIX_EPOCH,
}
}
pub fn merge_empty(&mut self, other: &Self) {
self.open = other.open;
self.high = other.high;
self.low = other.low;
self.close = other.close;
}
}
impl From<websocket::data::incoming::bar::Message> for Bar {
fn from(bar_message: websocket::data::incoming::bar::Message) -> Self {
impl From<Bar> for BarValidity {
fn from(bar: Bar) -> Self {
Self {
timestamp: bar_message.timestamp,
asset_symbol: bar_message.symbol,
open: Some(bar_message.open),
high: Some(bar_message.high),
low: Some(bar_message.low),
close: Some(bar_message.close),
volume: bar_message.volume,
num_trades: bar_message.num_trades,
volume_weighted: bar_message.volume_weighted,
}
}
}
impl From<(api::incoming::Bar, String)> for Bar {
fn from((bar, asset_symbol): (api::incoming::Bar, String)) -> Self {
Self {
timestamp: bar.timestamp,
asset_symbol,
open: Some(bar.open),
high: Some(bar.high),
low: Some(bar.low),
close: Some(bar.close),
volume: bar.volume,
num_trades: bar.num_trades,
volume_weighted: bar.volume_weighted,
symbol: bar.symbol,
time_last: bar.time,
}
}
}

View File

@@ -4,11 +4,11 @@ pub mod bar;
pub mod source;
pub mod websocket;
pub use asset::{Asset, Class, Exchange, Status};
pub use bar::Bar;
pub use asset::{Asset, Class, Exchange};
pub use bar::{Bar, BarValidity};
pub use source::Source;
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BroadcastMessage {
Asset(asset::BroadcastMessage),
}

View File

@@ -3,7 +3,7 @@ use std::{
str::FromStr,
};
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Source {
Iex,
Sip,

View File

@@ -1,11 +1,11 @@
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
#[derive(PartialEq, Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Message {
#[serde(rename = "t")]
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
pub time: OffsetDateTime,
#[serde(rename = "S")]
pub symbol: String,
#[serde(rename = "o")]
@@ -17,9 +17,25 @@ pub struct Message {
#[serde(rename = "c")]
pub close: f64,
#[serde(rename = "v")]
pub volume: f64,
pub volume: i64,
#[serde(rename = "n")]
pub num_trades: i64,
pub trades: i64,
#[serde(rename = "vw")]
pub volume_weighted: f64,
pub vwap: f64,
}
impl From<Message> for crate::types::Bar {
fn from(bar: Message) -> Self {
Self {
time: bar.time,
symbol: bar.symbol,
open: bar.open,
high: bar.high,
low: bar.low,
close: bar.close,
volume: bar.volume,
trades: bar.trades,
vwap: bar.vwap,
}
}
}

View File

@@ -2,9 +2,9 @@ pub mod bar;
pub mod subscription;
pub mod success;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(tag = "T")]
pub enum Message {
#[serde(rename = "success")]

View File

@@ -1,17 +1,15 @@
use serde::Deserialize;
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Eq, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Message {
pub trades: Vec<String>,
pub quotes: Vec<String>,
pub bars: Vec<String>,
#[serde(rename = "updatedBars")]
pub updated_bars: Vec<String>,
#[serde(rename = "dailyBars")]
pub daily_bars: Vec<String>,
pub orderbooks: Option<Vec<String>>,
pub statuses: Option<Vec<String>>,
pub lulds: Option<Vec<String>>,
#[serde(rename = "cancelErrors")]
pub cancel_errors: Option<Vec<String>>,
}

View File

@@ -1,14 +1,9 @@
use serde::Deserialize;
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Eq, Deserialize)]
pub enum MessageType {
#[serde(rename = "connected")]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "msg")]
#[serde(rename_all = "camelCase")]
pub enum Message {
Connected,
#[serde(rename = "authenticated")]
Authenticated,
}
#[derive(PartialEq, Eq, Deserialize)]
pub struct Message {
pub msg: MessageType,
}

View File

@@ -5,11 +5,9 @@ use serde::Serialize;
#[derive(Serialize)]
#[serde(tag = "action")]
#[serde(rename_all = "camelCase")]
pub enum Message {
#[serde(rename = "auth")]
Auth(auth::Message),
#[serde(rename = "subscribe")]
Subscribe(subscribe::Message),
#[serde(rename = "unsubscribe")]
Unsubscribe(subscribe::Message),
}

View File

@@ -1,9 +1,9 @@
use serde::Serialize;
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Message {
bars: Vec<String>,
#[serde(rename = "updatedBars")]
updated_bars: Vec<String>,
}

View File

@@ -0,0 +1,29 @@
services:
clickhouse:
image: clickhouse/clickhouse-server
hostname: clickhouse
restart: unless-stopped
volumes:
- clickhouse-lib:/var/lib/clickhouse
- clickhouse-log:/var/log/clickhouse-server
- ./config.d:/etc/clickhouse-server/config.d
- ./users.d:/etc/clickhouse-server/users.d
- ./docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
environment:
- CLICKHOUSE_USER=${CLICKHOUSE_USER}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD}
- CLICKHOUSE_DB=${CLICKHOUSE_DB}
- CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1
network_mode: host
ulimits:
nofile:
soft: 262144
hard: 262144
cap_add:
- SYS_NICE
- NET_ADMIN
- IPC_LOCK
volumes:
clickhouse-data:
clickhouse-logs:

View File

@@ -0,0 +1,38 @@
CREATE TABLE IF NOT EXISTS qrust.assets (
symbol String,
class Enum('us_equity' = 1, 'crypto' = 2),
exchange Enum(
'AMEX' = 1,
'ARCA' = 2,
'BATS' = 3,
'NASDAQ' = 4,
'NYSE' = 5,
'NYSEARCA' = 6,
'OTC' = 7,
'CRYPTO' = 8
),
time_added DateTime DEFAULT now()
)
ENGINE = ReplacingMergeTree()
PRIMARY KEY symbol;
CREATE TABLE IF NOT EXISTS qrust.bars (
symbol String,
time DateTime,
open Float64,
high Float64,
low Float64,
close Float64,
volume Int64,
trades Int64,
vwap Float64
)
ENGINE = ReplacingMergeTree()
PRIMARY KEY (symbol, time);
CREATE TABLE IF NOT EXISTS qrust.bars_validity (
symbol String,
time_last DateTime
)
ENGINE = ReplacingMergeTree()
PRIMARY KEY symbol;

View File

@@ -0,0 +1,18 @@
<clickhouse>
<!-- Docs: <https://clickhouse.com/docs/en/operations/settings/settings_users/> -->
<users>
<!-- Remove default user -->
<default remove="remove">
</default>
<qrust>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<password>qrust</password>
<quota>default</quota>
<access_management>1</access_management>
</qrust>
</users>
</clickhouse>

View File

@@ -1,48 +0,0 @@
#!/bin/bash
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE TYPE CLASS AS ENUM ('us_equity', 'crypto');
CREATE TYPE EXCHANGE AS ENUM (
'AMEX',
'ARCA',
'BATS',
'NASDAQ',
'NYSE',
'NYSEARCA',
'OTC',
'CRYPTO'
);
CREATE TABLE assets (
symbol TEXT PRIMARY KEY,
class CLASS NOT NULL,
exchange EXCHANGE NOT NULL,
trading BOOLEAN NOT NULL DEFAULT FALSE,
timestamp_added TIMESTAMPTZ NOT NULL DEFAULT NOW(),
timestamp_first TIMESTAMPTZ NOT NULL,
timestamp_last TIMESTAMPTZ NOT NULL
);
CREATE TABLE bars (
timestamp TIMESTAMPTZ,
asset_symbol TEXT REFERENCES assets(symbol) ON DELETE CASCADE ON UPDATE CASCADE,
open DOUBLE PRECISION,
high DOUBLE PRECISION,
low DOUBLE PRECISION,
close DOUBLE PRECISION,
volume DOUBLE PRECISION NOT NULL,
num_trades BIGINT NOT NULL,
volume_weighted DOUBLE PRECISION NOT NULL,
PRIMARY KEY (asset_symbol, timestamp)
);
SELECT create_hypertable('bars', 'timestamp', 'asset_symbol', 15);
ALTER TABLE bars SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'asset_symbol'
);
SELECT add_compression_policy('bars', INTERVAL '30 days');
EOSQL

View File

@@ -1,20 +0,0 @@
services:
timescaledb:
image: timescale/timescaledb-ha:pg15-all
hostname: timescaledb
restart: unless-stopped
ports:
- 5432:5432
volumes:
- timescaledb-data:/home/postgres/pgdata/data
- timescaledb-logs:/home/postgres/pg_log
- ./999_init.sh:/docker-entrypoint-initdb.d/999_init.sh
environment:
- TIMESCALEDB_TELEMETRY=off
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes:
timescaledb-data:
timescaledb-logs: