Fix the Sin of Man
- Migrate to ClickHouse - Simplify serde renaming - Simplify backfill logic - Compartmentalize database columns Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
@@ -1,8 +1,6 @@
|
||||
use crate::types::Source;
|
||||
use governor::{DefaultDirectRateLimiter, Quota, RateLimiter};
|
||||
use http::HeaderMap;
|
||||
use reqwest::Client;
|
||||
use sqlx::{postgres::PgPoolOptions, PgPool};
|
||||
use reqwest::{header::HeaderMap, Client};
|
||||
use std::{env, num::NonZeroU32, sync::Arc};
|
||||
use time::{format_description::FormatItem, macros::format_description};
|
||||
use tokio::time::Duration;
|
||||
@@ -15,8 +13,6 @@ pub const ALPACA_CRYPTO_WEBSOCKET_URL: &str = "wss://stream.data.alpaca.markets/
|
||||
pub const ALPACA_TIMESTAMP_FORMAT: &[FormatItem] =
|
||||
format_description!("[year]-[month]-[day]T[hour]:[minute]:[second]Z");
|
||||
|
||||
const NUM_CLIENTS: u32 = 10;
|
||||
|
||||
pub struct Config {
|
||||
pub alpaca_api_key: String,
|
||||
pub alpaca_api_secret: String,
|
||||
@@ -24,12 +20,11 @@ pub struct Config {
|
||||
pub alpaca_rate_limit: DefaultDirectRateLimiter,
|
||||
pub alpaca_historical_offset: Duration,
|
||||
pub alpaca_source: Source,
|
||||
pub postgres_pool: PgPool,
|
||||
pub clickhouse_client: clickhouse::Client,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub async fn from_env() -> Self {
|
||||
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set.");
|
||||
pub fn from_env() -> Self {
|
||||
let alpaca_api_key = env::var("ALPACA_API_KEY").expect("ALPACA_API_KEY must be set.");
|
||||
let alpaca_api_secret =
|
||||
env::var("ALPACA_API_SECRET").expect("ALPACA_API_SECRET must be set.");
|
||||
@@ -38,6 +33,12 @@ impl Config {
|
||||
.parse()
|
||||
.expect("ALPACA_SOURCE must be a either 'iex' or 'sip'.");
|
||||
|
||||
let clickhouse_url = env::var("CLICKHOUSE_URL").expect("CLICKHOUSE_URL must be set.");
|
||||
let clickhouse_user = env::var("CLICKHOUSE_USER").expect("CLICKHOUSE_USER must be set.");
|
||||
let clickhouse_password =
|
||||
env::var("CLICKHOUSE_PASSWORD").expect("CLICKHOUSE_PASSWORD must be set.");
|
||||
let clickhouse_db = env::var("CLICKHOUSE_DB").expect("CLICKHOUSE_DB must be set.");
|
||||
|
||||
Self {
|
||||
alpaca_api_key: alpaca_api_key.clone(),
|
||||
alpaca_api_secret: alpaca_api_secret.clone(),
|
||||
@@ -59,15 +60,15 @@ impl Config {
|
||||
Source::Sip => 0,
|
||||
}),
|
||||
alpaca_source,
|
||||
postgres_pool: PgPoolOptions::new()
|
||||
.max_connections(NUM_CLIENTS)
|
||||
.connect(&database_url)
|
||||
.await
|
||||
.unwrap(),
|
||||
clickhouse_client: clickhouse::Client::default()
|
||||
.with_url(clickhouse_url)
|
||||
.with_user(clickhouse_user)
|
||||
.with_password(clickhouse_password)
|
||||
.with_database(clickhouse_db),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn arc_from_env() -> Arc<Self> {
|
||||
Arc::new(Self::from_env().await)
|
||||
pub fn arc_from_env() -> Arc<Self> {
|
||||
Arc::new(Self::from_env())
|
||||
}
|
||||
}
|
||||
|
@@ -5,11 +5,11 @@ use crate::{
|
||||
},
|
||||
data::authenticate_websocket,
|
||||
database,
|
||||
time::{duration_until, last_minute, next_30s, next_minute, ONE_MINUTE, THIRTY_SECONDS},
|
||||
time::{duration_until, last_minute, next_minute, ONE_MINUTE},
|
||||
types::{
|
||||
api,
|
||||
api::incoming,
|
||||
asset::{self, Asset},
|
||||
websocket, Bar, BroadcastMessage, Class,
|
||||
websocket, Bar, BarValidity, BroadcastMessage, Class,
|
||||
},
|
||||
};
|
||||
use core::panic;
|
||||
@@ -17,16 +17,12 @@ use futures_util::{
|
||||
stream::{SplitSink, SplitStream},
|
||||
SinkExt, StreamExt,
|
||||
};
|
||||
use http::StatusCode;
|
||||
use indexmap::IndexMap;
|
||||
use log::{error, info, warn};
|
||||
use serde_json::from_str;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::{
|
||||
net::TcpStream,
|
||||
spawn,
|
||||
@@ -34,15 +30,14 @@ use tokio::{
|
||||
broadcast::{Receiver, Sender},
|
||||
RwLock,
|
||||
},
|
||||
task::spawn_blocking,
|
||||
time::{interval_at, sleep},
|
||||
time::sleep,
|
||||
};
|
||||
use tokio_tungstenite::{connect_async, tungstenite::Message, MaybeTlsStream, WebSocketStream};
|
||||
|
||||
pub async fn run(
|
||||
app_config: Arc<Config>,
|
||||
class: Class,
|
||||
asset_broadcast_sender: Sender<BroadcastMessage>,
|
||||
broadcast_sender: Sender<BroadcastMessage>,
|
||||
) {
|
||||
info!("Running live data threads for {:?}.", class);
|
||||
|
||||
@@ -59,38 +54,35 @@ pub async fn run(
|
||||
authenticate_websocket(&app_config, &mut stream, &mut sink).await;
|
||||
let sink = Arc::new(RwLock::new(sink));
|
||||
|
||||
let backfilled = Arc::new(RwLock::new(HashMap::new()));
|
||||
|
||||
spawn(websocket_broadcast_handler(
|
||||
spawn(broadcast_handler(
|
||||
class,
|
||||
sink.clone(),
|
||||
asset_broadcast_sender.subscribe(),
|
||||
broadcast_sender.subscribe(),
|
||||
));
|
||||
|
||||
database::assets::select_where_class(&app_config.postgres_pool, class)
|
||||
database::assets::select_where_class(&app_config.clickhouse_client, class)
|
||||
.await
|
||||
.into_iter()
|
||||
.for_each(|asset| {
|
||||
asset_broadcast_sender
|
||||
broadcast_sender
|
||||
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Added(
|
||||
asset,
|
||||
)))
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
spawn(null_handler(app_config.clone(), backfilled.clone()));
|
||||
websocket_message_handler(app_config, class, stream, sink, backfilled).await;
|
||||
websocket_handler(app_config, class, stream, sink).await;
|
||||
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
async fn websocket_broadcast_handler(
|
||||
async fn broadcast_handler(
|
||||
class: Class,
|
||||
sink: Arc<RwLock<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>>>,
|
||||
mut asset_broadcast_receiver: Receiver<BroadcastMessage>,
|
||||
mut broadcast_receiver: Receiver<BroadcastMessage>,
|
||||
) {
|
||||
loop {
|
||||
match asset_broadcast_receiver.recv().await.unwrap() {
|
||||
match broadcast_receiver.recv().await.unwrap() {
|
||||
BroadcastMessage::Asset(asset::BroadcastMessage::Added(asset))
|
||||
if asset.class == class =>
|
||||
{
|
||||
@@ -128,13 +120,14 @@ async fn websocket_broadcast_handler(
|
||||
}
|
||||
}
|
||||
|
||||
async fn websocket_message_handler(
|
||||
async fn websocket_handler(
|
||||
app_config: Arc<Config>,
|
||||
class: Class,
|
||||
mut stream: SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
|
||||
sink: Arc<RwLock<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>>>,
|
||||
backfilled: Arc<RwLock<HashMap<String, bool>>>,
|
||||
) {
|
||||
let backfilled = Arc::new(RwLock::new(HashMap::new()));
|
||||
|
||||
loop {
|
||||
match stream.next().await {
|
||||
Some(Ok(Message::Text(data))) => {
|
||||
@@ -147,7 +140,7 @@ async fn websocket_message_handler(
|
||||
}
|
||||
|
||||
for message in parsed_data.unwrap_or_default() {
|
||||
websocket_handle_text_message(&app_config, class, message, &backfilled).await;
|
||||
websocket_handle_message(&app_config, class, &backfilled, message).await;
|
||||
}
|
||||
}
|
||||
Some(Ok(Message::Ping(_))) => sink
|
||||
@@ -162,11 +155,11 @@ async fn websocket_message_handler(
|
||||
}
|
||||
}
|
||||
|
||||
async fn websocket_handle_text_message(
|
||||
async fn websocket_handle_message(
|
||||
app_config: &Arc<Config>,
|
||||
class: Class,
|
||||
message: websocket::data::incoming::Message,
|
||||
backfilled: &Arc<RwLock<HashMap<String, bool>>>,
|
||||
message: websocket::data::incoming::Message,
|
||||
) {
|
||||
match message {
|
||||
websocket::data::incoming::Message::Subscription(subscription_message) => {
|
||||
@@ -185,20 +178,35 @@ async fn websocket_handle_text_message(
|
||||
let deleted_assets = old_assets.difference(&new_assets).collect::<HashSet<_>>();
|
||||
|
||||
for asset_symbol in &added_assets {
|
||||
let asset =
|
||||
database::assets::select_where_symbol(&app_config.postgres_pool, asset_symbol)
|
||||
.await
|
||||
.unwrap();
|
||||
let asset = database::assets::select_where_symbol(
|
||||
&app_config.clickhouse_client,
|
||||
asset_symbol,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
backfilled.write().await.insert(asset.symbol.clone(), false);
|
||||
|
||||
let bar_validity = BarValidity::none(asset.symbol.clone());
|
||||
database::bars::upsert_validity(&app_config.clickhouse_client, &bar_validity).await;
|
||||
|
||||
spawn(backfill(
|
||||
app_config.clone(),
|
||||
asset.clone(),
|
||||
backfilled.clone(),
|
||||
asset.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
for asset_symbol in &deleted_assets {
|
||||
database::bars::delete_validity_where_symbol(
|
||||
&app_config.clickhouse_client,
|
||||
asset_symbol,
|
||||
)
|
||||
.await;
|
||||
|
||||
database::bars::delete_where_symbol(&app_config.clickhouse_client, asset_symbol)
|
||||
.await;
|
||||
|
||||
backfilled.write().await.remove(*asset_symbol);
|
||||
}
|
||||
|
||||
@@ -207,156 +215,45 @@ async fn websocket_handle_text_message(
|
||||
class, added_assets, deleted_assets
|
||||
);
|
||||
}
|
||||
websocket::data::incoming::Message::Bars(bar_message) => {
|
||||
websocket::data::incoming::Message::Bars(bar_message)
|
||||
| websocket::data::incoming::Message::UpdatedBars(bar_message) => {
|
||||
let bar = Bar::from(bar_message);
|
||||
info!(
|
||||
"websocket::Incoming bar for {}: {}",
|
||||
bar.asset_symbol, bar.timestamp
|
||||
);
|
||||
database::bars::upsert(
|
||||
&app_config.postgres_pool,
|
||||
&bar,
|
||||
backfilled.read().await[&bar.asset_symbol],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
websocket::data::incoming::Message::UpdatedBars(bar_message) => {
|
||||
let bar = Bar::from(bar_message);
|
||||
info!(
|
||||
"websocket::Incoming bar for {}: {}",
|
||||
bar.asset_symbol, bar.timestamp
|
||||
);
|
||||
info!("websocket::Incoming bar for {}: {}", bar.symbol, bar.time);
|
||||
|
||||
let transaction = app_config.postgres_pool.begin().await.unwrap();
|
||||
let backfilled_asset_symbol = backfilled.read().await[&bar.asset_symbol];
|
||||
database::bars::upsert(&app_config.postgres_pool, &bar, backfilled_asset_symbol).await;
|
||||
if backfilled_asset_symbol {
|
||||
database::assets::update_timestamp_last_where_symbol(
|
||||
&app_config.postgres_pool,
|
||||
&bar.asset_symbol,
|
||||
&bar.timestamp,
|
||||
)
|
||||
.await;
|
||||
database::bars::upsert(&app_config.clickhouse_client, &bar).await;
|
||||
if backfilled.read().await[&bar.symbol] {
|
||||
database::bars::upsert_validity(&app_config.clickhouse_client, &bar.into()).await;
|
||||
}
|
||||
transaction.commit().await.unwrap();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::significant_drop_in_scrutinee)]
|
||||
async fn null_handler(app_config: Arc<Config>, backfilled: Arc<RwLock<HashMap<String, bool>>>) {
|
||||
#[derive(PartialEq)]
|
||||
enum NullHandlerState {
|
||||
Bars,
|
||||
UpdatedBars,
|
||||
}
|
||||
|
||||
let next_30s = next_30s();
|
||||
let mut state = if next_30s.unix_timestamp() % 30 == 0 {
|
||||
NullHandlerState::Bars
|
||||
} else {
|
||||
NullHandlerState::UpdatedBars
|
||||
};
|
||||
let mut interval = interval_at(
|
||||
(Instant::now() + duration_until(next_30s)).into(),
|
||||
THIRTY_SECONDS,
|
||||
);
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let timestamp = last_minute() - ONE_MINUTE;
|
||||
|
||||
let backfilled = backfilled.read().await;
|
||||
for asset_symbol in backfilled.keys().cloned() {
|
||||
let bar = Bar::empty(timestamp, asset_symbol);
|
||||
|
||||
let transaction = app_config.postgres_pool.begin().await.unwrap();
|
||||
database::bars::insert_or_skip(
|
||||
&app_config.postgres_pool,
|
||||
&bar,
|
||||
backfilled[&bar.asset_symbol],
|
||||
)
|
||||
.await;
|
||||
if backfilled[&bar.asset_symbol] && state == NullHandlerState::Bars {
|
||||
database::assets::update_timestamp_last_where_symbol(
|
||||
&app_config.postgres_pool,
|
||||
&bar.asset_symbol,
|
||||
&bar.timestamp,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
transaction.commit().await.unwrap();
|
||||
}
|
||||
|
||||
state = match state {
|
||||
NullHandlerState::Bars => NullHandlerState::UpdatedBars,
|
||||
NullHandlerState::UpdatedBars => NullHandlerState::Bars,
|
||||
};
|
||||
websocket::data::incoming::Message::Success(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn backfill(
|
||||
app_config: Arc<Config>,
|
||||
asset: Asset,
|
||||
backfilled: Arc<RwLock<HashMap<String, bool>>>,
|
||||
asset: Asset,
|
||||
) {
|
||||
info!("Backfilling historical data for {}...", asset.symbol);
|
||||
let bar_validity =
|
||||
database::bars::select_validity_where_symbol(&app_config.clickhouse_client, &asset.symbol)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let task_run_offsetdatetime = next_minute() + app_config.alpaca_historical_offset;
|
||||
let fetch_from = asset.timestamp_last + ONE_MINUTE;
|
||||
let fetch_until = task_run_offsetdatetime - app_config.alpaca_historical_offset - ONE_MINUTE;
|
||||
let fetch_from = bar_validity.time_last + ONE_MINUTE;
|
||||
let fetch_until = last_minute();
|
||||
if fetch_from > fetch_until {
|
||||
return;
|
||||
}
|
||||
|
||||
let wait_duration = task_run_offsetdatetime - OffsetDateTime::now_utc();
|
||||
if wait_duration.is_positive() {
|
||||
sleep(wait_duration.unsigned_abs()).await;
|
||||
}
|
||||
info!("Queing historical data backfill for {}...", asset.symbol);
|
||||
let task_run_offsetdatetime = next_minute() + app_config.alpaca_historical_offset;
|
||||
sleep(duration_until(task_run_offsetdatetime)).await;
|
||||
|
||||
let bars = backfill_bars_from_api(&app_config, &asset, fetch_from, fetch_until).await;
|
||||
|
||||
let transaction = app_config.postgres_pool.begin().await.unwrap();
|
||||
database::bars::upsert_batch(&app_config.postgres_pool, &bars, true).await;
|
||||
database::assets::update_timestamp_last_where_symbol(
|
||||
&app_config.postgres_pool,
|
||||
&asset.symbol,
|
||||
&fetch_until,
|
||||
)
|
||||
.await;
|
||||
derive_recent_nulls(&app_config, &asset, &fetch_until, &backfilled).await;
|
||||
transaction.commit().await.unwrap();
|
||||
|
||||
info!("Backfilled historical data for {}.", asset.symbol);
|
||||
}
|
||||
|
||||
fn generate_per_minute_bars(
|
||||
from: OffsetDateTime,
|
||||
until: OffsetDateTime,
|
||||
asset: &Asset,
|
||||
) -> IndexMap<OffsetDateTime, Bar> {
|
||||
let mut bars = IndexMap::new();
|
||||
let mut current_time = from;
|
||||
while current_time <= until {
|
||||
bars.insert(current_time, Bar::empty(current_time, asset.symbol.clone()));
|
||||
current_time += ONE_MINUTE;
|
||||
}
|
||||
bars
|
||||
}
|
||||
|
||||
async fn backfill_bars_from_api(
|
||||
app_config: &Arc<Config>,
|
||||
asset: &Asset,
|
||||
from: OffsetDateTime,
|
||||
until: OffsetDateTime,
|
||||
) -> Vec<Bar> {
|
||||
let asset_clone = asset.clone();
|
||||
let mut bars = spawn_blocking(move || generate_per_minute_bars(from, until, &asset_clone))
|
||||
.await
|
||||
.unwrap();
|
||||
info!("Running historical data backfill for {}...", asset.symbol);
|
||||
|
||||
let mut bars = Vec::new();
|
||||
let mut next_page_token = None;
|
||||
|
||||
loop {
|
||||
let request = app_config
|
||||
.alpaca_client
|
||||
@@ -369,11 +266,17 @@ async fn backfill_bars_from_api(
|
||||
("timeframe", &String::from("1Min")),
|
||||
(
|
||||
"start",
|
||||
&from.format(ALPACA_TIMESTAMP_FORMAT).unwrap().to_string(),
|
||||
&fetch_from
|
||||
.format(ALPACA_TIMESTAMP_FORMAT)
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
),
|
||||
(
|
||||
"end",
|
||||
&until.format(ALPACA_TIMESTAMP_FORMAT).unwrap().to_string(),
|
||||
&fetch_until
|
||||
.format(ALPACA_TIMESTAMP_FORMAT)
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
),
|
||||
("limit", &String::from("10000")),
|
||||
("page_token", &next_page_token.clone().unwrap_or_default()),
|
||||
@@ -381,17 +284,14 @@ async fn backfill_bars_from_api(
|
||||
|
||||
app_config.alpaca_rate_limit.until_ready().await;
|
||||
let response = request.send().await.unwrap();
|
||||
let mut response = if response.status() == StatusCode::OK {
|
||||
response
|
||||
.json::<api::incoming::bar::Message>()
|
||||
.await
|
||||
.unwrap()
|
||||
let mut response = if response.status() == reqwest::StatusCode::OK {
|
||||
response.json::<incoming::bar::Message>().await.unwrap()
|
||||
} else {
|
||||
error!(
|
||||
"Failed to backfill historical data for {} from {} to {}: {}",
|
||||
asset.symbol,
|
||||
from,
|
||||
until,
|
||||
fetch_from,
|
||||
fetch_until,
|
||||
response.text().await.unwrap()
|
||||
);
|
||||
break;
|
||||
@@ -403,7 +303,7 @@ async fn backfill_bars_from_api(
|
||||
.unwrap_or_default()
|
||||
.unwrap_or_default()
|
||||
{
|
||||
bars.insert(bar.timestamp, Bar::from((bar, asset.symbol.clone())));
|
||||
bars.push(Bar::from((bar, asset.symbol.clone())));
|
||||
}
|
||||
|
||||
if response.next_page_token.is_none() {
|
||||
@@ -412,29 +312,13 @@ async fn backfill_bars_from_api(
|
||||
next_page_token = response.next_page_token;
|
||||
}
|
||||
|
||||
bars.into_values().collect::<Vec<Bar>>()
|
||||
}
|
||||
database::bars::upsert_batch(&app_config.clickhouse_client, &bars).await;
|
||||
if let Some(last_bar) = bars.last() {
|
||||
database::bars::upsert_validity(&app_config.clickhouse_client, &last_bar.clone().into())
|
||||
.await;
|
||||
}
|
||||
|
||||
#[allow(clippy::significant_drop_tightening)]
|
||||
async fn derive_recent_nulls(
|
||||
app_config: &Arc<Config>,
|
||||
asset: &Asset,
|
||||
from: &OffsetDateTime,
|
||||
backfilled: &Arc<RwLock<HashMap<String, bool>>>,
|
||||
) {
|
||||
let mut backfilled = backfilled.write().await;
|
||||
let bars = database::bars::select_where_symbol_where_timestamp_larger_than(
|
||||
&app_config.postgres_pool,
|
||||
&asset.symbol,
|
||||
from,
|
||||
)
|
||||
.await;
|
||||
database::bars::upsert_batch(&app_config.postgres_pool, &bars, true).await;
|
||||
database::assets::update_timestamp_last_where_symbol(
|
||||
&app_config.postgres_pool,
|
||||
&asset.symbol,
|
||||
&bars.last().unwrap().timestamp,
|
||||
)
|
||||
.await;
|
||||
backfilled.insert(asset.symbol.clone(), true);
|
||||
backfilled.write().await.insert(asset.symbol.clone(), true);
|
||||
|
||||
info!("Backfilled historical data for {}.", asset.symbol);
|
||||
}
|
||||
|
@@ -20,11 +20,9 @@ async fn authenticate_websocket(
|
||||
Some(Ok(Message::Text(data)))
|
||||
if from_str::<Vec<websocket::data::incoming::Message>>(&data)
|
||||
.unwrap()
|
||||
.get(0)
|
||||
.first()
|
||||
== Some(&websocket::data::incoming::Message::Success(
|
||||
websocket::data::incoming::success::Message {
|
||||
msg: websocket::data::incoming::success::MessageType::Connected,
|
||||
},
|
||||
websocket::data::incoming::success::Message::Connected,
|
||||
)) => {}
|
||||
_ => panic!(),
|
||||
}
|
||||
@@ -45,11 +43,9 @@ async fn authenticate_websocket(
|
||||
Some(Ok(Message::Text(data)))
|
||||
if from_str::<Vec<websocket::data::incoming::Message>>(&data)
|
||||
.unwrap()
|
||||
.get(0)
|
||||
.first()
|
||||
== Some(&websocket::data::incoming::Message::Success(
|
||||
websocket::data::incoming::success::Message {
|
||||
msg: websocket::data::incoming::success::MessageType::Authenticated,
|
||||
},
|
||||
websocket::data::incoming::success::Message::Authenticated,
|
||||
)) => {}
|
||||
_ => panic!(),
|
||||
};
|
||||
|
@@ -1,92 +1,43 @@
|
||||
use crate::types::{Asset, Class, Exchange};
|
||||
use sqlx::{query_as, PgPool};
|
||||
use std::convert::Into;
|
||||
use time::OffsetDateTime;
|
||||
use crate::types::{Asset, Class};
|
||||
use clickhouse::Client;
|
||||
|
||||
pub async fn select(postgres_pool: &PgPool) -> Vec<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets"#
|
||||
)
|
||||
.fetch_all(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn select(clickhouse_client: &Client) -> Vec<Asset> {
|
||||
clickhouse_client
|
||||
.query("SELECT ?fields FROM assets")
|
||||
.fetch_all::<Asset>()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn select_where_class(postgres_pool: &PgPool, class: Class) -> Vec<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE class = $1::CLASS"#,
|
||||
class as Class
|
||||
)
|
||||
.fetch_all(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn select_where_class(clickhouse_client: &Client, class: Class) -> Vec<Asset> {
|
||||
clickhouse_client
|
||||
.query("SELECT ?fields FROM assets WHERE class = ?")
|
||||
.bind(class)
|
||||
.fetch_all::<Asset>()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn select_where_symbol(postgres_pool: &PgPool, symbol: &str) -> Option<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"SELECT symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last FROM assets WHERE symbol = $1"#,
|
||||
symbol
|
||||
)
|
||||
.fetch_optional(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn select_where_symbol(clickhouse_client: &Client, symbol: &str) -> Option<Asset> {
|
||||
clickhouse_client
|
||||
.query("SELECT ?fields FROM assets WHERE symbol = ?")
|
||||
.bind(symbol)
|
||||
.fetch_optional::<Asset>()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn insert(postgres_pool: &PgPool, asset: &Asset) -> Asset {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"INSERT INTO assets (symbol, class, exchange, trading, timestamp_added, timestamp_first, timestamp_last) VALUES ($1, $2::CLASS, $3::EXCHANGE, $4, $5, $6, $7)
|
||||
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
|
||||
asset.symbol, asset.class as Class, asset.exchange as Exchange, asset.trading, asset.timestamp_added, asset.timestamp_first, asset.timestamp_last
|
||||
)
|
||||
.fetch_one(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn insert(clickhouse_client: &Client, asset: &Asset) {
|
||||
let mut insert = clickhouse_client.insert("assets").unwrap();
|
||||
insert.write(asset).await.unwrap();
|
||||
insert.end().await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn update_trading_where_symbol(
|
||||
postgres_pool: &PgPool,
|
||||
symbol: &str,
|
||||
trading: &bool,
|
||||
) -> Option<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"UPDATE assets SET trading = $1 WHERE symbol = $2
|
||||
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
|
||||
trading, symbol
|
||||
)
|
||||
.fetch_optional(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn update_timestamp_last_where_symbol(
|
||||
postgres_pool: &PgPool,
|
||||
symbol: &str,
|
||||
timestamp_last: &OffsetDateTime,
|
||||
) -> Option<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"UPDATE assets SET timestamp_last = $1 WHERE symbol = $2
|
||||
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
|
||||
timestamp_last, symbol
|
||||
)
|
||||
.fetch_optional(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn delete_where_symbol(postgres_pool: &PgPool, symbol: &str) -> Option<Asset> {
|
||||
query_as!(
|
||||
Asset,
|
||||
r#"DELETE FROM assets WHERE symbol = $1
|
||||
RETURNING symbol, class as "class: Class", exchange as "exchange: Exchange", trading, timestamp_added, timestamp_first, timestamp_last"#,
|
||||
symbol
|
||||
)
|
||||
.fetch_optional(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn delete_where_symbol(clickhouse_client: &Client, symbol: &str) {
|
||||
clickhouse_client
|
||||
.query("DELETE FROM assets WHERE symbol = ?")
|
||||
.bind(symbol)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
@@ -1,161 +1,52 @@
|
||||
use crate::types::Bar;
|
||||
use sqlx::{query_as, PgPool, Postgres};
|
||||
use time::OffsetDateTime;
|
||||
use crate::types::{Bar, BarValidity};
|
||||
use clickhouse::Client;
|
||||
|
||||
pub async fn select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
|
||||
postgres_pool: &PgPool,
|
||||
pub async fn upsert(clickhouse_client: &Client, bar: &Bar) {
|
||||
let mut insert = clickhouse_client.insert("bars").unwrap();
|
||||
insert.write(bar).await.unwrap();
|
||||
insert.end().await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn upsert_batch(clickhouse_client: &Client, bars: &[Bar]) {
|
||||
let mut insert = clickhouse_client.insert("bars").unwrap();
|
||||
for bar in bars {
|
||||
insert.write(bar).await.unwrap();
|
||||
}
|
||||
insert.end().await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn delete_where_symbol(clickhouse_client: &Client, symbol: &str) {
|
||||
clickhouse_client
|
||||
.query("DELETE FROM bars WHERE symbol = ?")
|
||||
.bind(symbol)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn select_validity_where_symbol(
|
||||
clickhouse_client: &Client,
|
||||
symbol: &str,
|
||||
timestamp: &OffsetDateTime,
|
||||
) -> Bar {
|
||||
query_as!(
|
||||
Bar,
|
||||
r#"SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp < $2 AND open IS NOT NULL AND high IS NOT NULL AND low IS NOT NULL AND close IS NOT NULL ORDER BY timestamp DESC LIMIT 1"#,
|
||||
symbol,
|
||||
timestamp
|
||||
)
|
||||
.fetch_one(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
) -> Option<BarValidity> {
|
||||
clickhouse_client
|
||||
.query("SELECT ?fields FROM bars_validity FINAL WHERE symbol = ?")
|
||||
.bind(symbol)
|
||||
.fetch_optional::<BarValidity>()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn select_where_symbol_where_timestamp_larger_than(
|
||||
postgres_pool: &PgPool,
|
||||
symbol: &str,
|
||||
timestamp: &OffsetDateTime,
|
||||
) -> Vec<Bar> {
|
||||
query_as!(
|
||||
Bar,
|
||||
r#"SELECT * FROM bars WHERE asset_symbol = $1 AND timestamp > $2 ORDER BY timestamp ASC"#,
|
||||
symbol,
|
||||
timestamp
|
||||
)
|
||||
.fetch_all(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn upsert_validity(clickhouse_client: &Client, bar_validity: &BarValidity) {
|
||||
let mut insert = clickhouse_client.insert("bars_validity").unwrap();
|
||||
insert.write(bar_validity).await.unwrap();
|
||||
insert.end().await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn upsert(postgres_pool: &PgPool, bar: &Bar, backfill: bool) -> Bar {
|
||||
let mut bar = bar.clone();
|
||||
|
||||
if backfill
|
||||
&& (bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none())
|
||||
{
|
||||
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
|
||||
postgres_pool,
|
||||
&bar.asset_symbol,
|
||||
&bar.timestamp,
|
||||
).await;
|
||||
bar.merge_empty(&filled_bar);
|
||||
}
|
||||
|
||||
query_as!(
|
||||
Bar,
|
||||
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT (timestamp, asset_symbol) DO UPDATE SET open = $3, high = $4, low = $5, close = $6, volume = $7, num_trades = $8, volume_weighted = $9
|
||||
RETURNING timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted"#,
|
||||
bar.timestamp, bar.asset_symbol, bar.open, bar.high, bar.low, bar.close, bar.volume, bar.num_trades, bar.volume_weighted
|
||||
)
|
||||
.fetch_one(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn insert_or_skip(postgres_pool: &PgPool, bar: &Bar, backfill: bool) {
|
||||
let mut bar = bar.clone();
|
||||
|
||||
if backfill
|
||||
&& (bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none())
|
||||
{
|
||||
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
|
||||
postgres_pool,
|
||||
&bar.asset_symbol,
|
||||
&bar.timestamp,
|
||||
).await;
|
||||
bar.merge_empty(&filled_bar);
|
||||
}
|
||||
|
||||
query_as!(
|
||||
Bar,
|
||||
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT (timestamp, asset_symbol) DO NOTHING"#,
|
||||
bar.timestamp, bar.asset_symbol, bar.open, bar.high, bar.low, bar.close, bar.volume, bar.num_trades, bar.volume_weighted
|
||||
)
|
||||
.execute(postgres_pool)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn upsert_batch(postgres_pool: &PgPool, bars: &[Bar], backfill: bool) -> Vec<Bar> {
|
||||
let mut bars = bars.to_vec();
|
||||
|
||||
if bars.is_empty() {
|
||||
return bars;
|
||||
}
|
||||
|
||||
if backfill
|
||||
&& (bars[0].open.is_none()
|
||||
|| bars[0].high.is_none()
|
||||
|| bars[0].low.is_none()
|
||||
|| bars[0].close.is_none())
|
||||
{
|
||||
let filled_bar = select_not_null_where_symbol_where_timestamp_smaller_than_order_by_timestamp_desc_limit_one(
|
||||
postgres_pool,
|
||||
&bars[0].asset_symbol,
|
||||
&bars[0].timestamp,
|
||||
).await;
|
||||
bars[0].merge_empty(&filled_bar);
|
||||
}
|
||||
|
||||
let mut timestamp = Vec::with_capacity(bars.len());
|
||||
let mut asset_symbol = Vec::with_capacity(bars.len());
|
||||
let mut open = Vec::with_capacity(bars.len());
|
||||
let mut high = Vec::with_capacity(bars.len());
|
||||
let mut low = Vec::with_capacity(bars.len());
|
||||
let mut close = Vec::with_capacity(bars.len());
|
||||
let mut volume = Vec::with_capacity(bars.len());
|
||||
let mut num_trades = Vec::with_capacity(bars.len());
|
||||
let mut volume_weighted = Vec::with_capacity(bars.len());
|
||||
|
||||
let mut last_filled_bar = bars[0].clone();
|
||||
|
||||
for mut bar in bars {
|
||||
if backfill {
|
||||
if bar.open.is_none() || bar.high.is_none() || bar.low.is_none() || bar.close.is_none()
|
||||
{
|
||||
bar.merge_empty(&last_filled_bar);
|
||||
} else {
|
||||
last_filled_bar = bar.clone();
|
||||
}
|
||||
}
|
||||
|
||||
timestamp.push(bar.timestamp);
|
||||
asset_symbol.push(bar.asset_symbol.clone());
|
||||
open.push(bar.open);
|
||||
high.push(bar.high);
|
||||
low.push(bar.low);
|
||||
close.push(bar.close);
|
||||
volume.push(bar.volume);
|
||||
num_trades.push(bar.num_trades);
|
||||
volume_weighted.push(bar.volume_weighted);
|
||||
}
|
||||
|
||||
// No type-safety here because of NULLABLE bulk insert
|
||||
query_as::<Postgres, Bar>(
|
||||
r#"INSERT INTO bars (timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted)
|
||||
SELECT * FROM UNNEST($1::timestamptz[], $2::text[], $3::float8[], $4::float8[], $5::float8[], $6::float8[], $7::float8[], $8::int8[], $9::float8[])
|
||||
ON CONFLICT (timestamp, asset_symbol) DO UPDATE SET open = EXCLUDED.open, high = EXCLUDED.high, low = EXCLUDED.low, close = EXCLUDED.close, volume = EXCLUDED.volume, num_trades = EXCLUDED.num_trades, volume_weighted = EXCLUDED.volume_weighted
|
||||
RETURNING timestamp, asset_symbol, open, high, low, close, volume, num_trades, volume_weighted"#,
|
||||
)
|
||||
.bind(timestamp)
|
||||
.bind(asset_symbol)
|
||||
.bind(open)
|
||||
.bind(high)
|
||||
.bind(low)
|
||||
.bind(close)
|
||||
.bind(volume)
|
||||
.bind(num_trades)
|
||||
.bind(volume_weighted)
|
||||
.fetch_all(postgres_pool)
|
||||
.await
|
||||
.unwrap()
|
||||
pub async fn delete_validity_where_symbol(clickhouse_client: &Client, symbol: &str) {
|
||||
clickhouse_client
|
||||
.query("DELETE FROM bars_validity WHERE symbol = ?")
|
||||
.bind(symbol)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
17
src/main.rs
17
src/main.rs
@@ -11,35 +11,32 @@ mod types;
|
||||
use config::Config;
|
||||
use dotenv::dotenv;
|
||||
use log4rs::config::Deserializers;
|
||||
use sqlx::error::BoxDynError;
|
||||
use std::error::Error;
|
||||
use tokio::{spawn, sync::broadcast};
|
||||
use types::{BroadcastMessage, Class};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), BoxDynError> {
|
||||
async fn main() -> Result<(), Box<dyn Error>> {
|
||||
dotenv().ok();
|
||||
log4rs::init_file("log4rs.yaml", Deserializers::default())?;
|
||||
let app_config = Config::arc_from_env().await;
|
||||
let app_config = Config::arc_from_env();
|
||||
let mut threads = Vec::new();
|
||||
|
||||
let (asset_broadcast_sender, _) = broadcast::channel::<BroadcastMessage>(100);
|
||||
let (broadcast_sender, _) = broadcast::channel::<BroadcastMessage>(100);
|
||||
|
||||
threads.push(spawn(data::market::run(
|
||||
app_config.clone(),
|
||||
Class::UsEquity,
|
||||
asset_broadcast_sender.clone(),
|
||||
broadcast_sender.clone(),
|
||||
)));
|
||||
|
||||
threads.push(spawn(data::market::run(
|
||||
app_config.clone(),
|
||||
Class::Crypto,
|
||||
asset_broadcast_sender.clone(),
|
||||
broadcast_sender.clone(),
|
||||
)));
|
||||
|
||||
threads.push(spawn(routes::run(
|
||||
app_config.clone(),
|
||||
asset_broadcast_sender,
|
||||
)));
|
||||
threads.push(spawn(routes::run(app_config.clone(), broadcast_sender)));
|
||||
|
||||
for thread in threads {
|
||||
thread.await?;
|
||||
|
@@ -1,29 +1,28 @@
|
||||
use crate::config::{
|
||||
Config, ALPACA_ASSET_API_URL, ALPACA_CRYPTO_DATA_URL, ALPACA_STOCK_DATA_URL,
|
||||
ALPACA_TIMESTAMP_FORMAT,
|
||||
};
|
||||
use crate::config::{Config, ALPACA_ASSET_API_URL};
|
||||
use crate::database;
|
||||
use crate::types::Class;
|
||||
use crate::types::{api::incoming, asset, Asset, BroadcastMessage, Status};
|
||||
use axum::{extract::Path, http::StatusCode, Extension, Json};
|
||||
use crate::types::{
|
||||
api::incoming::{self, asset::Status},
|
||||
asset, Asset, BroadcastMessage,
|
||||
};
|
||||
use axum::{extract::Path, Extension, Json};
|
||||
use http::StatusCode;
|
||||
use log::info;
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::broadcast::Sender;
|
||||
|
||||
pub async fn get_all(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
) -> Result<(StatusCode, Json<Vec<Asset>>), StatusCode> {
|
||||
let assets = database::assets::select(&app_config.postgres_pool).await;
|
||||
Ok((StatusCode::OK, Json(assets)))
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
) -> Result<(StatusCode, Json<Vec<Asset>>), StatusCode> {
|
||||
let assets = database::assets::select(&app_config.clickhouse_client).await;
|
||||
Ok((StatusCode::OK, Json(assets)))
|
||||
}
|
||||
|
||||
pub async fn get_where_symbol(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
Path(symbol): Path<String>,
|
||||
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
|
||||
let asset = database::assets::select_where_symbol(&app_config.postgres_pool, &symbol).await;
|
||||
let asset = database::assets::select_where_symbol(&app_config.clickhouse_client, &symbol).await;
|
||||
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
|
||||
Ok((StatusCode::OK, Json(asset)))
|
||||
})
|
||||
@@ -32,15 +31,14 @@ pub async fn get(
|
||||
#[derive(Deserialize)]
|
||||
pub struct AddAssetRequest {
|
||||
symbol: String,
|
||||
trading: Option<bool>,
|
||||
}
|
||||
|
||||
pub async fn add(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
|
||||
Extension(broadcast_sender): Extension<Sender<BroadcastMessage>>,
|
||||
Json(request): Json<AddAssetRequest>,
|
||||
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
|
||||
if database::assets::select_where_symbol(&app_config.postgres_pool, &request.symbol)
|
||||
if database::assets::select_where_symbol(&app_config.clickhouse_client, &request.symbol)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
@@ -53,66 +51,25 @@ pub async fn add(
|
||||
.get(&format!("{}/{}", ALPACA_ASSET_API_URL, request.symbol))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| match e.status() {
|
||||
Some(StatusCode::NOT_FOUND) => StatusCode::NOT_FOUND,
|
||||
_ => panic!(),
|
||||
})?;
|
||||
.map_err(|e| {
|
||||
if e.status() == Some(reqwest::StatusCode::NOT_FOUND) {
|
||||
StatusCode::NOT_FOUND
|
||||
} else {
|
||||
panic!()
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let asset = asset.json::<incoming::Asset>().await.unwrap();
|
||||
let asset = asset.json::<incoming::asset::Asset>().await.unwrap();
|
||||
|
||||
if asset.status != Status::Active || !asset.tradable || !asset.fractionable {
|
||||
return Err(StatusCode::FORBIDDEN);
|
||||
}
|
||||
|
||||
let mut earliest_bar_request = app_config
|
||||
.alpaca_client
|
||||
.get(match asset.class {
|
||||
Class::UsEquity => ALPACA_STOCK_DATA_URL,
|
||||
Class::Crypto => ALPACA_CRYPTO_DATA_URL,
|
||||
})
|
||||
.query(&[
|
||||
("symbols", &asset.symbol),
|
||||
("timeframe", &String::from("1Min")),
|
||||
(
|
||||
"start",
|
||||
&OffsetDateTime::UNIX_EPOCH
|
||||
.format(ALPACA_TIMESTAMP_FORMAT)
|
||||
.unwrap(),
|
||||
),
|
||||
("limit", &String::from("1")),
|
||||
]);
|
||||
let asset = Asset::from(asset);
|
||||
database::assets::insert(&app_config.clickhouse_client, &asset).await;
|
||||
|
||||
if asset.class == Class::UsEquity {
|
||||
earliest_bar_request =
|
||||
earliest_bar_request.query(&[("feed", &app_config.alpaca_source.to_string())]);
|
||||
}
|
||||
|
||||
let earliest_bar = earliest_bar_request
|
||||
.send()
|
||||
.await
|
||||
.unwrap()
|
||||
.json::<incoming::bar::Message>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let earliest_bar = earliest_bar
|
||||
.bars
|
||||
.get(&asset.symbol)
|
||||
.ok_or(StatusCode::NOT_FOUND)?
|
||||
.as_ref()
|
||||
.ok_or(StatusCode::NOT_FOUND)?
|
||||
.first()
|
||||
.ok_or(StatusCode::NOT_FOUND)?;
|
||||
|
||||
let asset = Asset::from((
|
||||
asset,
|
||||
request.trading.unwrap_or(false),
|
||||
earliest_bar.timestamp,
|
||||
));
|
||||
|
||||
database::assets::insert(&app_config.postgres_pool, &asset).await;
|
||||
|
||||
asset_broadcast_sender
|
||||
broadcast_sender
|
||||
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Added(
|
||||
asset.clone(),
|
||||
)))
|
||||
@@ -122,50 +79,24 @@ pub async fn add(
|
||||
Ok((StatusCode::CREATED, Json(asset)))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize)]
|
||||
pub struct UpdateAssetRequest {
|
||||
trading: bool,
|
||||
}
|
||||
|
||||
pub async fn update(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
|
||||
Path(symbol): Path<String>,
|
||||
Json(request): Json<UpdateAssetRequest>,
|
||||
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
|
||||
let asset = database::assets::update_trading_where_symbol(
|
||||
&app_config.postgres_pool,
|
||||
&symbol,
|
||||
&request.trading,
|
||||
)
|
||||
.await;
|
||||
|
||||
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
|
||||
asset_broadcast_sender
|
||||
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Updated(
|
||||
asset.clone(),
|
||||
)))
|
||||
.unwrap();
|
||||
info!("Updated asset {}.", symbol);
|
||||
Ok((StatusCode::OK, Json(asset)))
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn delete(
|
||||
Extension(app_config): Extension<Arc<Config>>,
|
||||
Extension(asset_broadcast_sender): Extension<Sender<BroadcastMessage>>,
|
||||
Extension(broadcast_sender): Extension<Sender<BroadcastMessage>>,
|
||||
Path(symbol): Path<String>,
|
||||
) -> Result<StatusCode, StatusCode> {
|
||||
let asset = database::assets::delete_where_symbol(&app_config.postgres_pool, &symbol).await;
|
||||
let asset = database::assets::select_where_symbol(&app_config.clickhouse_client, &symbol)
|
||||
.await
|
||||
.ok_or(StatusCode::NOT_FOUND)
|
||||
.unwrap();
|
||||
|
||||
asset.map_or(Err(StatusCode::NOT_FOUND), |asset| {
|
||||
asset_broadcast_sender
|
||||
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Deleted(
|
||||
asset,
|
||||
)))
|
||||
.unwrap();
|
||||
info!("Deleted asset {}.", symbol);
|
||||
Ok(StatusCode::NO_CONTENT)
|
||||
})
|
||||
broadcast_sender
|
||||
.send(BroadcastMessage::Asset(asset::BroadcastMessage::Deleted(
|
||||
asset,
|
||||
)))
|
||||
.unwrap();
|
||||
|
||||
database::assets::delete_where_symbol(&app_config.clickhouse_client, &symbol).await;
|
||||
|
||||
info!("Deleted asset {}.", symbol);
|
||||
Ok(StatusCode::NO_CONTENT)
|
||||
}
|
||||
|
@@ -1,30 +1,26 @@
|
||||
use crate::{config::Config, types::BroadcastMessage};
|
||||
use axum::{
|
||||
routing::{delete, get, post},
|
||||
Extension, Router, Server,
|
||||
serve, Extension, Router,
|
||||
};
|
||||
use log::info;
|
||||
use std::{net::SocketAddr, sync::Arc};
|
||||
use tokio::sync::broadcast::Sender;
|
||||
use tokio::{net::TcpListener, sync::broadcast::Sender};
|
||||
|
||||
pub mod assets;
|
||||
|
||||
pub async fn run(app_config: Arc<Config>, asset_broadcast_sender: Sender<BroadcastMessage>) {
|
||||
pub async fn run(app_config: Arc<Config>, broadcast_sender: Sender<BroadcastMessage>) {
|
||||
let app = Router::new()
|
||||
.route("/assets", get(assets::get_all))
|
||||
.route("/assets/:symbol", get(assets::get))
|
||||
.route("/assets", get(assets::get))
|
||||
.route("/assets/:symbol", get(assets::get_where_symbol))
|
||||
.route("/assets", post(assets::add))
|
||||
.route("/assets/:symbol", post(assets::update))
|
||||
.route("/assets/:symbol", delete(assets::delete))
|
||||
.layer(Extension(app_config))
|
||||
.layer(Extension(asset_broadcast_sender));
|
||||
.layer(Extension(broadcast_sender));
|
||||
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], 7878));
|
||||
let listener = TcpListener::bind(addr).await.unwrap();
|
||||
info!("Listening on {}.", addr);
|
||||
Server::bind(&addr)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
serve(listener, app).await.unwrap();
|
||||
unreachable!()
|
||||
}
|
||||
|
13
src/time.rs
13
src/time.rs
@@ -1,7 +1,6 @@
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub const THIRTY_SECONDS: Duration = Duration::from_secs(30);
|
||||
pub const ONE_MINUTE: Duration = Duration::from_secs(60);
|
||||
|
||||
pub fn last_minute() -> OffsetDateTime {
|
||||
@@ -13,18 +12,8 @@ pub fn next_minute() -> OffsetDateTime {
|
||||
last_minute() + ONE_MINUTE
|
||||
}
|
||||
|
||||
pub fn last_30s() -> OffsetDateTime {
|
||||
let now_timestamp = OffsetDateTime::now_utc().unix_timestamp();
|
||||
OffsetDateTime::from_unix_timestamp(now_timestamp - now_timestamp % 30).unwrap()
|
||||
}
|
||||
|
||||
pub fn next_30s() -> OffsetDateTime {
|
||||
last_30s() + THIRTY_SECONDS
|
||||
}
|
||||
|
||||
pub fn duration_until(time: OffsetDateTime) -> Duration {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
let duration = time - now;
|
||||
let duration = time - OffsetDateTime::now_utc();
|
||||
|
||||
if duration.is_positive() {
|
||||
duration.unsigned_abs()
|
||||
|
@@ -1,9 +1,51 @@
|
||||
#![allow(clippy::struct_excessive_bools)]
|
||||
|
||||
use crate::types::{Class, Exchange, Status};
|
||||
use serde::Deserialize;
|
||||
use crate::types::api::impl_from_enum;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum Class {
|
||||
UsEquity,
|
||||
Crypto,
|
||||
}
|
||||
|
||||
impl_from_enum!(crate::types::Class, Class, UsEquity, Crypto);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum Exchange {
|
||||
Amex,
|
||||
Arca,
|
||||
Bats,
|
||||
Nyse,
|
||||
Nasdaq,
|
||||
Nysearca,
|
||||
Otc,
|
||||
Crypto,
|
||||
}
|
||||
|
||||
impl_from_enum!(
|
||||
crate::types::Exchange,
|
||||
Exchange,
|
||||
Amex,
|
||||
Arca,
|
||||
Bats,
|
||||
Nyse,
|
||||
Nasdaq,
|
||||
Nysearca,
|
||||
Otc,
|
||||
Crypto
|
||||
);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Status {
|
||||
Active,
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Asset {
|
||||
pub id: String,
|
||||
pub class: Class,
|
||||
@@ -19,3 +61,14 @@ pub struct Asset {
|
||||
pub maintenance_margin_requirement: Option<f32>,
|
||||
pub attributes: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl From<Asset> for crate::types::Asset {
|
||||
fn from(item: Asset) -> Self {
|
||||
Self {
|
||||
symbol: item.symbol,
|
||||
class: item.class.into(),
|
||||
exchange: item.exchange.into(),
|
||||
time_added: time::OffsetDateTime::now_utc(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,12 +1,12 @@
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Bar {
|
||||
#[serde(rename = "t")]
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub timestamp: OffsetDateTime,
|
||||
pub time: OffsetDateTime,
|
||||
#[serde(rename = "o")]
|
||||
pub open: f64,
|
||||
#[serde(rename = "h")]
|
||||
@@ -16,14 +16,30 @@ pub struct Bar {
|
||||
#[serde(rename = "c")]
|
||||
pub close: f64,
|
||||
#[serde(rename = "v")]
|
||||
pub volume: f64,
|
||||
pub volume: i64,
|
||||
#[serde(rename = "n")]
|
||||
pub num_trades: i64,
|
||||
pub trades: i64,
|
||||
#[serde(rename = "vw")]
|
||||
pub volume_weighted: f64,
|
||||
pub vwap: f64,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
impl From<(Bar, String)> for crate::types::Bar {
|
||||
fn from((bar, symbol): (Bar, String)) -> Self {
|
||||
Self {
|
||||
time: bar.time,
|
||||
symbol,
|
||||
open: bar.open,
|
||||
high: bar.high,
|
||||
low: bar.low,
|
||||
close: bar.close,
|
||||
volume: bar.volume,
|
||||
trades: bar.trades,
|
||||
vwap: bar.vwap,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
pub bars: HashMap<String, Option<Vec<Bar>>>,
|
||||
pub next_page_token: Option<String>,
|
||||
|
@@ -1,29 +0,0 @@
|
||||
use serde::{Deserialize, Deserializer};
|
||||
use time::{macros::format_description, Date, Time};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct CalendarDate {
|
||||
#[serde(deserialize_with = "deserialize_date")]
|
||||
pub date: Date,
|
||||
#[serde(deserialize_with = "deserialize_time")]
|
||||
pub open: Time,
|
||||
#[serde(deserialize_with = "deserialize_time")]
|
||||
pub close: Time,
|
||||
}
|
||||
|
||||
fn deserialize_date<'de, D>(deserializer: D) -> Result<Date, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let date_str = String::deserialize(deserializer)?;
|
||||
Date::parse(&date_str, format_description!("[year]-[month]-[day]"))
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
|
||||
fn deserialize_time<'de, D>(deserializer: D) -> Result<Time, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let time_str = String::deserialize(deserializer)?;
|
||||
Time::parse(&time_str, format_description!("[hour]:[minute]")).map_err(serde::de::Error::custom)
|
||||
}
|
@@ -1,7 +1,2 @@
|
||||
pub mod asset;
|
||||
pub mod bar;
|
||||
pub mod calendar_date;
|
||||
|
||||
pub use asset::Asset;
|
||||
pub use bar::Bar;
|
||||
pub use calendar_date::CalendarDate;
|
||||
|
@@ -1 +1,23 @@
|
||||
pub mod incoming;
|
||||
|
||||
macro_rules! impl_from_enum {
|
||||
($source:ty, $target:ty, $( $variant:ident ),* ) => {
|
||||
impl From<$source> for $target {
|
||||
fn from(item: $source) -> Self {
|
||||
match item {
|
||||
$( <$source>::$variant => <$target>::$variant, )*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$target> for $source {
|
||||
fn from(item: $target) -> Self {
|
||||
match item {
|
||||
$( <$target>::$variant => <$source>::$variant, )*
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
use impl_from_enum;
|
||||
|
@@ -1,86 +1,39 @@
|
||||
use super::api;
|
||||
use clickhouse::Row;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{FromRow, Type};
|
||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Type)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)]
|
||||
#[repr(u8)]
|
||||
pub enum Class {
|
||||
#[sqlx(rename = "us_equity")]
|
||||
#[serde(rename = "us_equity")]
|
||||
UsEquity,
|
||||
#[sqlx(rename = "crypto")]
|
||||
#[serde(rename = "crypto")]
|
||||
Crypto,
|
||||
UsEquity = 1,
|
||||
Crypto = 2,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, Type)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)]
|
||||
#[repr(u8)]
|
||||
pub enum Exchange {
|
||||
#[sqlx(rename = "AMEX")]
|
||||
#[serde(rename = "AMEX")]
|
||||
Amex,
|
||||
#[sqlx(rename = "ARCA")]
|
||||
#[serde(rename = "ARCA")]
|
||||
Arca,
|
||||
#[sqlx(rename = "BATS")]
|
||||
#[serde(rename = "BATS")]
|
||||
Bats,
|
||||
#[sqlx(rename = "NYSE")]
|
||||
#[serde(rename = "NYSE")]
|
||||
Nyse,
|
||||
#[sqlx(rename = "NASDAQ")]
|
||||
#[serde(rename = "NASDAQ")]
|
||||
Nasdaq,
|
||||
#[sqlx(rename = "NYSEARCA")]
|
||||
#[serde(rename = "NYSEARCA")]
|
||||
Nysearca,
|
||||
#[sqlx(rename = "OTC")]
|
||||
#[serde(rename = "OTC")]
|
||||
Otc,
|
||||
#[sqlx(rename = "CRYPTO")]
|
||||
#[serde(rename = "CRYPTO")]
|
||||
Crypto,
|
||||
Amex = 1,
|
||||
Arca = 2,
|
||||
Bats = 3,
|
||||
Nyse = 4,
|
||||
Nasdaq = 5,
|
||||
Nysearca = 6,
|
||||
Otc = 7,
|
||||
Crypto = 8,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Deserialize, Type)]
|
||||
pub enum Status {
|
||||
#[sqlx(rename = "active")]
|
||||
#[serde(rename = "active")]
|
||||
Active,
|
||||
#[sqlx(rename = "inactive")]
|
||||
#[serde(rename = "inactive")]
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, FromRow, Serialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Row)]
|
||||
pub struct Asset {
|
||||
pub symbol: String,
|
||||
pub class: Class,
|
||||
pub exchange: Exchange,
|
||||
pub trading: bool,
|
||||
pub timestamp_added: OffsetDateTime,
|
||||
pub timestamp_first: OffsetDateTime,
|
||||
pub timestamp_last: OffsetDateTime,
|
||||
#[serde(with = "clickhouse::serde::time::datetime")]
|
||||
pub time_added: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl From<(api::incoming::Asset, bool, OffsetDateTime)> for Asset {
|
||||
fn from(
|
||||
(asset, trading, timestamp_first): (api::incoming::Asset, bool, OffsetDateTime),
|
||||
) -> Self {
|
||||
Self {
|
||||
symbol: asset.symbol,
|
||||
class: asset.class,
|
||||
exchange: asset.exchange,
|
||||
trading,
|
||||
timestamp_added: OffsetDateTime::now_utc(),
|
||||
timestamp_first,
|
||||
timestamp_last: timestamp_first,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum BroadcastMessage {
|
||||
Added(Asset),
|
||||
Updated(Asset),
|
||||
Deleted(Asset),
|
||||
}
|
||||
|
@@ -1,72 +1,44 @@
|
||||
use super::{api, websocket};
|
||||
use serde::Serialize;
|
||||
use sqlx::FromRow;
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use clickhouse::Row;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(Clone, Debug, FromRow, Serialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Row)]
|
||||
pub struct Bar {
|
||||
pub timestamp: OffsetDateTime,
|
||||
pub asset_symbol: String,
|
||||
pub open: Option<f64>,
|
||||
pub high: Option<f64>,
|
||||
pub low: Option<f64>,
|
||||
pub close: Option<f64>,
|
||||
pub volume: f64,
|
||||
pub num_trades: i64,
|
||||
pub volume_weighted: f64,
|
||||
#[serde(with = "clickhouse::serde::time::datetime")]
|
||||
pub time: OffsetDateTime,
|
||||
pub symbol: String,
|
||||
pub open: f64,
|
||||
pub high: f64,
|
||||
pub low: f64,
|
||||
pub close: f64,
|
||||
pub volume: i64,
|
||||
pub trades: i64,
|
||||
pub vwap: f64,
|
||||
}
|
||||
|
||||
impl Bar {
|
||||
pub const fn empty(timestamp: OffsetDateTime, asset_symbol: String) -> Self {
|
||||
Self {
|
||||
timestamp,
|
||||
asset_symbol,
|
||||
open: None,
|
||||
high: None,
|
||||
low: None,
|
||||
close: None,
|
||||
volume: 0.0,
|
||||
num_trades: 0,
|
||||
volume_weighted: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge_empty(&mut self, other: &Self) {
|
||||
self.open = other.open;
|
||||
self.high = other.high;
|
||||
self.low = other.low;
|
||||
self.close = other.close;
|
||||
}
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Row)]
|
||||
pub struct BarValidity {
|
||||
pub symbol: String,
|
||||
#[serde(with = "clickhouse::serde::time::datetime")]
|
||||
pub time_last: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl From<websocket::data::incoming::bar::Message> for Bar {
|
||||
fn from(bar_message: websocket::data::incoming::bar::Message) -> Self {
|
||||
impl BarValidity {
|
||||
pub const fn none(symbol: String) -> Self {
|
||||
Self {
|
||||
timestamp: bar_message.timestamp,
|
||||
asset_symbol: bar_message.symbol,
|
||||
open: Some(bar_message.open),
|
||||
high: Some(bar_message.high),
|
||||
low: Some(bar_message.low),
|
||||
close: Some(bar_message.close),
|
||||
volume: bar_message.volume,
|
||||
num_trades: bar_message.num_trades,
|
||||
volume_weighted: bar_message.volume_weighted,
|
||||
symbol,
|
||||
time_last: OffsetDateTime::UNIX_EPOCH,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(api::incoming::Bar, String)> for Bar {
|
||||
fn from((bar, asset_symbol): (api::incoming::Bar, String)) -> Self {
|
||||
impl From<Bar> for BarValidity {
|
||||
fn from(bar: Bar) -> Self {
|
||||
Self {
|
||||
timestamp: bar.timestamp,
|
||||
asset_symbol,
|
||||
open: Some(bar.open),
|
||||
high: Some(bar.high),
|
||||
low: Some(bar.low),
|
||||
close: Some(bar.close),
|
||||
volume: bar.volume,
|
||||
num_trades: bar.num_trades,
|
||||
volume_weighted: bar.volume_weighted,
|
||||
symbol: bar.symbol,
|
||||
time_last: bar.time,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,11 +4,11 @@ pub mod bar;
|
||||
pub mod source;
|
||||
pub mod websocket;
|
||||
|
||||
pub use asset::{Asset, Class, Exchange, Status};
|
||||
pub use bar::Bar;
|
||||
pub use asset::{Asset, Class, Exchange};
|
||||
pub use bar::{Bar, BarValidity};
|
||||
pub use source::Source;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum BroadcastMessage {
|
||||
Asset(asset::BroadcastMessage),
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ use std::{
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Source {
|
||||
Iex,
|
||||
Sip,
|
||||
|
@@ -1,11 +1,11 @@
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(PartialEq, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
#[serde(rename = "t")]
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub timestamp: OffsetDateTime,
|
||||
pub time: OffsetDateTime,
|
||||
#[serde(rename = "S")]
|
||||
pub symbol: String,
|
||||
#[serde(rename = "o")]
|
||||
@@ -17,9 +17,25 @@ pub struct Message {
|
||||
#[serde(rename = "c")]
|
||||
pub close: f64,
|
||||
#[serde(rename = "v")]
|
||||
pub volume: f64,
|
||||
pub volume: i64,
|
||||
#[serde(rename = "n")]
|
||||
pub num_trades: i64,
|
||||
pub trades: i64,
|
||||
#[serde(rename = "vw")]
|
||||
pub volume_weighted: f64,
|
||||
pub vwap: f64,
|
||||
}
|
||||
|
||||
impl From<Message> for crate::types::Bar {
|
||||
fn from(bar: Message) -> Self {
|
||||
Self {
|
||||
time: bar.time,
|
||||
symbol: bar.symbol,
|
||||
open: bar.open,
|
||||
high: bar.high,
|
||||
low: bar.low,
|
||||
close: bar.close,
|
||||
volume: bar.volume,
|
||||
trades: bar.trades,
|
||||
vwap: bar.vwap,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -2,9 +2,9 @@ pub mod bar;
|
||||
pub mod subscription;
|
||||
pub mod success;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(PartialEq, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(tag = "T")]
|
||||
pub enum Message {
|
||||
#[serde(rename = "success")]
|
||||
|
@@ -1,17 +1,15 @@
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(PartialEq, Eq, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Message {
|
||||
pub trades: Vec<String>,
|
||||
pub quotes: Vec<String>,
|
||||
pub bars: Vec<String>,
|
||||
#[serde(rename = "updatedBars")]
|
||||
pub updated_bars: Vec<String>,
|
||||
#[serde(rename = "dailyBars")]
|
||||
pub daily_bars: Vec<String>,
|
||||
pub orderbooks: Option<Vec<String>>,
|
||||
pub statuses: Option<Vec<String>>,
|
||||
pub lulds: Option<Vec<String>>,
|
||||
#[serde(rename = "cancelErrors")]
|
||||
pub cancel_errors: Option<Vec<String>>,
|
||||
}
|
||||
|
@@ -1,14 +1,9 @@
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(PartialEq, Eq, Deserialize)]
|
||||
pub enum MessageType {
|
||||
#[serde(rename = "connected")]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "msg")]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Message {
|
||||
Connected,
|
||||
#[serde(rename = "authenticated")]
|
||||
Authenticated,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Deserialize)]
|
||||
pub struct Message {
|
||||
pub msg: MessageType,
|
||||
}
|
||||
|
@@ -5,11 +5,9 @@ use serde::Serialize;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(tag = "action")]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Message {
|
||||
#[serde(rename = "auth")]
|
||||
Auth(auth::Message),
|
||||
#[serde(rename = "subscribe")]
|
||||
Subscribe(subscribe::Message),
|
||||
#[serde(rename = "unsubscribe")]
|
||||
Unsubscribe(subscribe::Message),
|
||||
}
|
||||
|
@@ -1,9 +1,9 @@
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Message {
|
||||
bars: Vec<String>,
|
||||
#[serde(rename = "updatedBars")]
|
||||
updated_bars: Vec<String>,
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user