Remove asset_status thread

Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
2024-02-07 20:40:11 +00:00
parent 85eef2bf0b
commit 52e88f4bc9
23 changed files with 796 additions and 774 deletions

View File

@@ -23,27 +23,27 @@ async fn main() {
cleanup(&app_config.clickhouse_client).await;
let (asset_status_sender, asset_status_receiver) =
mpsc::channel::<threads::data::asset_status::Message>(100);
let (data_sender, data_receiver) = mpsc::channel::<threads::data::Message>(100);
let (clock_sender, clock_receiver) = mpsc::channel::<threads::clock::Message>(1);
spawn(threads::data::run(
app_config.clone(),
asset_status_receiver,
data_receiver,
clock_receiver,
));
spawn(threads::clock::run(app_config.clone(), clock_sender));
let assets = database::assets::select(&app_config.clickhouse_client).await;
let (asset_status_message, asset_status_receiver) =
threads::data::asset_status::Message::new(threads::data::asset_status::Action::Add, assets);
asset_status_sender
.send(asset_status_message)
let assets = database::assets::select(&app_config.clickhouse_client)
.await
.unwrap();
asset_status_receiver.await.unwrap();
.into_iter()
.map(|asset| (asset.symbol, asset.class))
.collect::<Vec<_>>();
routes::run(app_config, asset_status_sender).await;
let (data_message, data_receiver) =
threads::data::Message::new(threads::data::Action::Add, assets);
data_sender.send(data_message).await.unwrap();
data_receiver.await.unwrap();
routes::run(app_config, data_sender).await;
}

View File

@@ -1,14 +1,9 @@
use crate::{
config::{Config, ALPACA_ASSET_API_URL},
config::Config,
database, threads,
types::{
alpaca::api::incoming::{self, asset::Status},
Asset,
},
types::{alpaca::api::incoming, Asset},
};
use axum::{extract::Path, Extension, Json};
use backoff::{future::retry, ExponentialBackoff};
use core::panic;
use http::StatusCode;
use serde::Deserialize;
use std::sync::Arc;
@@ -38,9 +33,9 @@ pub struct AddAssetRequest {
pub async fn add(
Extension(app_config): Extension<Arc<Config>>,
Extension(asset_status_sender): Extension<mpsc::Sender<threads::data::asset_status::Message>>,
Extension(data_sender): Extension<mpsc::Sender<threads::data::Message>>,
Json(request): Json<AddAssetRequest>,
) -> Result<(StatusCode, Json<Asset>), StatusCode> {
) -> Result<StatusCode, StatusCode> {
if database::assets::select_where_symbol(&app_config.clickhouse_client, &request.symbol)
.await
.is_some()
@@ -48,66 +43,38 @@ pub async fn add(
return Err(StatusCode::CONFLICT);
}
let asset = retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(&format!("{}/{}", ALPACA_ASSET_API_URL, request.symbol))
.send()
.await?
.error_for_status()
.map_err(|e| match e.status() {
Some(reqwest::StatusCode::NOT_FOUND) => backoff::Error::Permanent(e),
_ => e.into(),
})?
.json::<incoming::asset::Asset>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.map_err(|e| match e.status() {
Some(reqwest::StatusCode::NOT_FOUND) => StatusCode::NOT_FOUND,
_ => panic!("Unexpected error: {}.", e),
})?;
if asset.status != Status::Active || !asset.tradable || !asset.fractionable {
let asset = incoming::asset::get_by_symbol(&app_config, &request.symbol).await?;
if !asset.tradable || !asset.fractionable {
return Err(StatusCode::FORBIDDEN);
}
let asset = Asset::from(asset);
let (asset_status_message, asset_status_response) = threads::data::asset_status::Message::new(
threads::data::asset_status::Action::Add,
vec![asset.clone()],
let (data_message, data_response) = threads::data::Message::new(
threads::data::Action::Add,
vec![(asset.symbol, asset.class)],
);
asset_status_sender
.send(asset_status_message)
.await
.unwrap();
asset_status_response.await.unwrap();
data_sender.send(data_message).await.unwrap();
data_response.await.unwrap();
Ok((StatusCode::CREATED, Json(asset)))
Ok(StatusCode::CREATED)
}
pub async fn delete(
Extension(app_config): Extension<Arc<Config>>,
Extension(asset_status_sender): Extension<mpsc::Sender<threads::data::asset_status::Message>>,
Extension(data_sender): Extension<mpsc::Sender<threads::data::Message>>,
Path(symbol): Path<String>,
) -> Result<StatusCode, StatusCode> {
let asset = database::assets::select_where_symbol(&app_config.clickhouse_client, &symbol)
.await
.ok_or(StatusCode::NOT_FOUND)?;
let (asset_status_message, asset_status_response) = threads::data::asset_status::Message::new(
threads::data::asset_status::Action::Remove,
vec![asset],
let (asset_status_message, asset_status_response) = threads::data::Message::new(
threads::data::Action::Remove,
vec![(asset.symbol, asset.class)],
);
asset_status_sender
.send(asset_status_message)
.await
.unwrap();
data_sender.send(asset_status_message).await.unwrap();
asset_status_response.await.unwrap();
Ok(StatusCode::NO_CONTENT)

View File

@@ -10,10 +10,7 @@ use log::info;
use std::{net::SocketAddr, sync::Arc};
use tokio::{net::TcpListener, sync::mpsc};
pub async fn run(
app_config: Arc<Config>,
asset_status_sender: mpsc::Sender<threads::data::asset_status::Message>,
) {
pub async fn run(app_config: Arc<Config>, data_sender: mpsc::Sender<threads::data::Message>) {
let app = Router::new()
.route("/health", get(health::get))
.route("/assets", get(assets::get))
@@ -21,7 +18,7 @@ pub async fn run(
.route("/assets", post(assets::add))
.route("/assets/:symbol", delete(assets::delete))
.layer(Extension(app_config))
.layer(Extension(asset_status_sender));
.layer(Extension(data_sender));
let addr = SocketAddr::from(([0, 0, 0, 0], 7878));
let listener = TcpListener::bind(addr).await.unwrap();

View File

@@ -1,9 +1,4 @@
use crate::{
config::{Config, ALPACA_CLOCK_API_URL},
types::alpaca,
utils::duration_until,
};
use backoff::{future::retry, ExponentialBackoff};
use crate::{config::Config, types::alpaca, utils::duration_until};
use log::info;
use std::sync::Arc;
use time::OffsetDateTime;
@@ -37,20 +32,7 @@ impl From<alpaca::api::incoming::clock::Clock> for Message {
pub async fn run(app_config: Arc<Config>, sender: mpsc::Sender<Message>) {
loop {
let clock = retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(ALPACA_CLOCK_API_URL)
.send()
.await?
.error_for_status()?
.json::<alpaca::api::incoming::clock::Clock>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.unwrap();
let clock = alpaca::api::incoming::clock::get(&app_config).await;
let sleep_until = duration_until(if clock.is_open {
info!("Market is open, will close at {}.", clock.next_close);

View File

@@ -1,218 +0,0 @@
use super::{Guard, ThreadType};
use crate::{
config::Config,
database,
types::{alpaca::websocket, Asset},
};
use async_trait::async_trait;
use futures_util::{stream::SplitSink, SinkExt};
use log::info;
use serde_json::to_string;
use std::sync::Arc;
use tokio::{
join,
net::TcpStream,
spawn,
sync::{mpsc, oneshot, Mutex, RwLock},
};
use tokio_tungstenite::{tungstenite, MaybeTlsStream, WebSocketStream};
#[derive(Clone)]
pub enum Action {
Add,
Remove,
}
pub struct Message {
pub action: Action,
pub assets: Vec<Asset>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, assets: Vec<Asset>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel::<()>();
(
Self {
action,
assets,
response: sender,
},
receiver,
)
}
}
#[async_trait]
pub trait Handler: Send + Sync {
async fn add_assets(&self, assets: Vec<Asset>, symbols: Vec<String>);
async fn remove_assets(&self, assets: Vec<Asset>, symbols: Vec<String>);
}
pub async fn run(
handler: Arc<Box<dyn Handler>>,
guard: Arc<RwLock<Guard>>,
mut receiver: mpsc::Receiver<Message>,
) {
loop {
let message = receiver.recv().await.unwrap();
spawn(handle_asset_status_message(
handler.clone(),
guard.clone(),
message,
));
}
}
#[allow(clippy::significant_drop_tightening)]
async fn handle_asset_status_message(
handler: Arc<Box<dyn Handler>>,
guard: Arc<RwLock<Guard>>,
message: Message,
) {
let symbols = message
.assets
.clone()
.into_iter()
.map(|asset| asset.symbol)
.collect::<Vec<_>>();
match message.action {
Action::Add => {
let mut guard = guard.write().await;
guard.assets.extend(
message
.assets
.iter()
.map(|asset| (asset.clone(), asset.symbol.clone())),
);
guard.pending_subscriptions.extend(message.assets.clone());
handler.add_assets(message.assets, symbols).await;
}
Action::Remove => {
let mut guard = guard.write().await;
guard
.assets
.retain(|asset, _| !message.assets.contains(asset));
guard.pending_unsubscriptions.extend(message.assets.clone());
handler.remove_assets(message.assets, symbols).await;
}
}
message.response.send(()).unwrap();
}
pub fn create_asset_status_handler(
thread_type: ThreadType,
app_config: Arc<Config>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
) -> Box<dyn Handler> {
match thread_type {
ThreadType::Bars(_) => Box::new(BarsHandler {
app_config,
websocket_sender,
}),
ThreadType::News => Box::new(NewsHandler { websocket_sender }),
}
}
struct BarsHandler {
app_config: Arc<Config>,
websocket_sender:
Arc<Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>>,
}
#[async_trait]
impl Handler for BarsHandler {
async fn add_assets(&self, assets: Vec<Asset>, symbols: Vec<String>) {
let database_future =
database::assets::upsert_batch(&self.app_config.clickhouse_client, assets);
let symbols_clone = symbols.clone();
let websocket_future = async move {
self.websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Subscribe(
websocket::outgoing::subscribe::Message::new_market(symbols_clone),
))
.unwrap(),
))
.await
.unwrap();
};
join!(database_future, websocket_future);
info!("Added {:?}.", symbols);
}
async fn remove_assets(&self, _: Vec<Asset>, symbols: Vec<String>) {
let symbols_clone = symbols.clone();
let database_future = database::assets::delete_where_symbols(
&self.app_config.clickhouse_client,
&symbols_clone,
);
let symbols_clone = symbols.clone();
let websocket_future = async move {
self.websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Unsubscribe(
websocket::outgoing::subscribe::Message::new_market(symbols_clone),
))
.unwrap(),
))
.await
.unwrap();
};
join!(database_future, websocket_future);
info!("Removed {:?}.", symbols);
}
}
struct NewsHandler {
websocket_sender:
Arc<Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>>,
}
#[async_trait]
impl Handler for NewsHandler {
async fn add_assets(&self, _: Vec<Asset>, symbols: Vec<String>) {
self.websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Subscribe(
websocket::outgoing::subscribe::Message::new_news(symbols),
))
.unwrap(),
))
.await
.unwrap();
}
async fn remove_assets(&self, _: Vec<Asset>, symbols: Vec<String>) {
self.websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Unsubscribe(
websocket::outgoing::subscribe::Message::new_news(symbols),
))
.unwrap(),
))
.await
.unwrap();
}
}

View File

@@ -1,26 +1,29 @@
use super::{Guard, ThreadType};
use super::ThreadType;
use crate::{
config::{Config, ALPACA_CRYPTO_DATA_URL, ALPACA_NEWS_DATA_URL, ALPACA_STOCK_DATA_URL},
config::{Config, ALPACA_CRYPTO_DATA_URL, ALPACA_STOCK_DATA_URL},
database,
types::{
alpaca::{
self,
api::{self, outgoing::Sort},
Source,
},
news::Prediction,
Asset, Bar, Class, News, Subset,
Bar, Class, News,
},
utils::{
duration_until, last_minute, remove_slash_from_pair, FIFTEEN_MINUTES, ONE_MINUTE,
ONE_SECOND,
},
utils::{duration_until, last_minute, remove_slash_from_pair, FIFTEEN_MINUTES, ONE_MINUTE},
};
use async_trait::async_trait;
use backoff::{future::retry, ExponentialBackoff};
use futures_util::future::join_all;
use log::{error, info, warn};
use log::{info, warn};
use std::{collections::HashMap, sync::Arc};
use time::OffsetDateTime;
use tokio::{
join, spawn,
sync::{mpsc, oneshot, Mutex, RwLock},
sync::{mpsc, oneshot, Mutex},
task::{block_in_place, JoinHandle},
time::sleep,
};
@@ -30,19 +33,28 @@ pub enum Action {
Purge,
}
impl From<super::Action> for Action {
fn from(action: super::Action) -> Self {
match action {
super::Action::Add => Self::Backfill,
super::Action::Remove => Self::Purge,
}
}
}
pub struct Message {
pub action: Action,
pub assets: Subset<Asset>,
pub symbols: Vec<String>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, assets: Subset<Asset>) -> (Self, oneshot::Receiver<()>) {
pub fn new(action: Action, symbols: Vec<String>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel::<()>();
(
Self {
action,
assets,
symbols,
response: sender,
},
receiver,
@@ -60,58 +72,31 @@ pub trait Handler: Send + Sync {
fn log_string(&self) -> &'static str;
}
pub async fn run(
handler: Arc<Box<dyn Handler>>,
guard: Arc<RwLock<Guard>>,
mut receiver: mpsc::Receiver<Message>,
) {
pub async fn run(handler: Arc<Box<dyn Handler>>, mut receiver: mpsc::Receiver<Message>) {
let backfill_jobs = Arc::new(Mutex::new(HashMap::new()));
loop {
let message = receiver.recv().await.unwrap();
spawn(handle_backfill_message(
handler.clone(),
guard.clone(),
backfill_jobs.clone(),
message,
));
}
}
#[allow(clippy::significant_drop_tightening)]
#[allow(clippy::too_many_lines)]
async fn handle_backfill_message(
handler: Arc<Box<dyn Handler>>,
guard: Arc<RwLock<Guard>>,
backfill_jobs: Arc<Mutex<HashMap<String, JoinHandle<()>>>>,
message: Message,
) {
let guard = guard.read().await;
let mut backfill_jobs = backfill_jobs.lock().await;
let symbols = match message.assets {
Subset::All => guard
.assets
.clone()
.into_iter()
.map(|(_, symbol)| symbol)
.collect(),
Subset::Some(assets) => assets
.into_iter()
.map(|asset| asset.symbol)
.filter(|symbol| match message.action {
Action::Backfill => guard.assets.contains_right(symbol),
Action::Purge => !guard.assets.contains_right(symbol),
})
.collect::<Vec<_>>(),
};
match message.action {
Action::Backfill => {
let log_string = handler.log_string();
for symbol in symbols {
for symbol in message.symbols {
if let Some(job) = backfill_jobs.get(&symbol) {
if !job.is_finished() {
warn!(
@@ -131,7 +116,7 @@ async fn handle_backfill_message(
.await
.as_ref()
.map_or(OffsetDateTime::UNIX_EPOCH, |backfill| {
backfill.time + ONE_MINUTE
backfill.time + ONE_SECOND
});
let fetch_to = last_minute();
@@ -148,7 +133,7 @@ async fn handle_backfill_message(
}
}
Action::Purge => {
for symbol in &symbols {
for symbol in &message.symbols {
if let Some(job) = backfill_jobs.remove(symbol) {
if !job.is_finished() {
job.abort();
@@ -158,8 +143,8 @@ async fn handle_backfill_message(
}
join!(
handler.delete_backfills(&symbols),
handler.delete_data(&symbols)
handler.delete_backfills(&message.symbols),
handler.delete_data(&message.symbols)
);
}
}
@@ -167,25 +152,6 @@ async fn handle_backfill_message(
message.response.send(()).unwrap();
}
pub fn create_backfill_handler(
thread_type: ThreadType,
app_config: Arc<Config>,
) -> Box<dyn Handler> {
match thread_type {
ThreadType::Bars(Class::UsEquity) => Box::new(BarHandler {
app_config,
data_url: ALPACA_STOCK_DATA_URL,
api_query_constructor: us_equity_query_constructor,
}),
ThreadType::Bars(Class::Crypto) => Box::new(BarHandler {
app_config,
data_url: ALPACA_CRYPTO_DATA_URL,
api_query_constructor: crypto_query_constructor,
}),
ThreadType::News => Box::new(NewsHandler { app_config }),
}
}
struct BarHandler {
app_config: Arc<Config>,
data_url: &'static str,
@@ -277,35 +243,19 @@ impl Handler for BarHandler {
let mut next_page_token = None;
loop {
let message = retry(ExponentialBackoff::default(), || async {
self.app_config.alpaca_rate_limit.until_ready().await;
self.app_config
.alpaca_client
.get(self.data_url)
.query(&(self.api_query_constructor)(
&self.app_config,
symbol.clone(),
fetch_from,
fetch_to,
next_page_token.clone(),
))
.send()
.await?
.error_for_status()?
.json::<api::incoming::bar::Message>()
.await
.map_err(backoff::Error::Permanent)
})
let message = alpaca::api::incoming::bar::get_historical(
&self.app_config,
self.data_url,
&(self.api_query_constructor)(
&self.app_config,
symbol.clone(),
fetch_from,
fetch_to,
next_page_token.clone(),
),
)
.await;
let message = match message {
Ok(message) => message,
Err(e) => {
error!("Failed to backfill bars for {}: {}.", symbol, e);
return;
}
};
message.bars.into_iter().for_each(|(symbol, bar_vec)| {
for bar in bar_vec {
bars.push(Bar::from((bar, symbol.clone())));
@@ -381,38 +331,21 @@ impl Handler for NewsHandler {
let mut next_page_token = None;
loop {
let message = retry(ExponentialBackoff::default(), || async {
self.app_config.alpaca_rate_limit.until_ready().await;
self.app_config
.alpaca_client
.get(ALPACA_NEWS_DATA_URL)
.query(&api::outgoing::news::News {
symbols: vec![remove_slash_from_pair(&symbol)],
start: Some(fetch_from),
end: Some(fetch_to),
limit: Some(50),
include_content: Some(true),
exclude_contentless: Some(false),
page_token: next_page_token.clone(),
sort: Some(Sort::Asc),
})
.send()
.await?
.error_for_status()?
.json::<api::incoming::news::Message>()
.await
.map_err(backoff::Error::Permanent)
})
let message = alpaca::api::incoming::news::get_historical(
&self.app_config,
&api::outgoing::news::News {
symbols: vec![remove_slash_from_pair(&symbol)],
start: Some(fetch_from),
end: Some(fetch_to),
limit: Some(50),
include_content: Some(true),
exclude_contentless: Some(false),
page_token: next_page_token.clone(),
sort: Some(Sort::Asc),
},
)
.await;
let message = match message {
Ok(message) => message,
Err(e) => {
error!("Failed to backfill news for {}: {}.", symbol, e);
return;
}
};
message.news.into_iter().for_each(|news_item| {
news.push(News::from(news_item));
});
@@ -480,3 +413,19 @@ impl Handler for NewsHandler {
"news"
}
}
pub fn create_handler(thread_type: ThreadType, app_config: Arc<Config>) -> Box<dyn Handler> {
match thread_type {
ThreadType::Bars(Class::UsEquity) => Box::new(BarHandler {
app_config,
data_url: ALPACA_STOCK_DATA_URL,
api_query_constructor: us_equity_query_constructor,
}),
ThreadType::Bars(Class::Crypto) => Box::new(BarHandler {
app_config,
data_url: ALPACA_CRYPTO_DATA_URL,
api_query_constructor: crypto_query_constructor,
}),
ThreadType::News => Box::new(NewsHandler { app_config }),
}
}

View File

@@ -1,24 +1,50 @@
pub mod asset_status;
pub mod backfill;
pub mod websocket;
use self::asset_status::create_asset_status_handler;
use super::{clock, guard::Guard};
use super::clock;
use crate::{
config::{
Config, ALPACA_CRYPTO_WEBSOCKET_URL, ALPACA_NEWS_WEBSOCKET_URL, ALPACA_STOCK_WEBSOCKET_URL,
},
types::{Class, Subset},
utils::authenticate,
database,
types::{alpaca, Asset, Class},
utils::{authenticate, cleanup},
};
use futures_util::StreamExt;
use futures_util::{future::join_all, StreamExt};
use itertools::{Either, Itertools};
use std::sync::Arc;
use tokio::{
join, select, spawn,
sync::{mpsc, Mutex, RwLock},
sync::{mpsc, oneshot},
};
use tokio_tungstenite::connect_async;
#[derive(Clone)]
pub enum Action {
Add,
Remove,
}
pub struct Message {
pub action: Action,
pub assets: Vec<(String, Class)>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, assets: Vec<(String, Class)>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel();
(
Self {
action,
assets,
response: sender,
},
receiver,
)
}
}
#[derive(Clone, Copy, Debug)]
pub enum ThreadType {
Bars(Class),
@@ -27,36 +53,39 @@ pub enum ThreadType {
pub async fn run(
app_config: Arc<Config>,
mut asset_receiver: mpsc::Receiver<asset_status::Message>,
mut receiver: mpsc::Receiver<Message>,
mut clock_receiver: mpsc::Receiver<clock::Message>,
) {
let (bars_us_equity_asset_status_sender, bars_us_equity_backfill_sender) =
let (bars_us_equity_websocket_sender, bars_us_equity_backfill_sender) =
init_thread(app_config.clone(), ThreadType::Bars(Class::UsEquity)).await;
let (bars_crypto_asset_status_sender, bars_crypto_backfill_sender) =
let (bars_crypto_websocket_sender, bars_crypto_backfill_sender) =
init_thread(app_config.clone(), ThreadType::Bars(Class::Crypto)).await;
let (news_asset_status_sender, news_backfill_sender) =
let (news_websocket_sender, news_backfill_sender) =
init_thread(app_config.clone(), ThreadType::News).await;
loop {
select! {
Some(asset_message) = asset_receiver.recv() => {
spawn(handle_asset_message(
bars_us_equity_asset_status_sender.clone(),
bars_crypto_asset_status_sender.clone(),
news_asset_status_sender.clone(),
asset_message,
Some(message) = receiver.recv() => {
spawn(handle_message(
app_config.clone(),
bars_us_equity_websocket_sender.clone(),
bars_us_equity_backfill_sender.clone(),
bars_crypto_websocket_sender.clone(),
bars_crypto_backfill_sender.clone(),
news_websocket_sender.clone(),
news_backfill_sender.clone(),
message,
));
}
Some(_) = clock_receiver.recv() => {
spawn(handle_clock_message(
app_config.clone(),
bars_us_equity_backfill_sender.clone(),
bars_crypto_backfill_sender.clone(),
news_backfill_sender.clone(),
));
}
else => {
panic!("Communication channel unexpectedly closed.")
}
else => panic!("Communication channel unexpectedly closed.")
}
}
}
@@ -65,11 +94,9 @@ async fn init_thread(
app_config: Arc<Config>,
thread_type: ThreadType,
) -> (
mpsc::Sender<asset_status::Message>,
mpsc::Sender<websocket::Message>,
mpsc::Sender<backfill::Message>,
) {
let guard = Arc::new(RwLock::new(Guard::new()));
let websocket_url = match thread_type {
ThreadType::Bars(Class::UsEquity) => format!(
"{}/{}",
@@ -80,130 +107,190 @@ async fn init_thread(
};
let (websocket, _) = connect_async(websocket_url).await.unwrap();
let (mut websocket_sender, mut websocket_receiver) = websocket.split();
authenticate(&app_config, &mut websocket_sender, &mut websocket_receiver).await;
let websocket_sender = Arc::new(Mutex::new(websocket_sender));
let (asset_status_sender, asset_status_receiver) = mpsc::channel(100);
spawn(asset_status::run(
Arc::new(create_asset_status_handler(
thread_type,
app_config.clone(),
websocket_sender.clone(),
)),
guard.clone(),
asset_status_receiver,
));
let (mut websocket_sink, mut websocket_stream) = websocket.split();
authenticate(&app_config, &mut websocket_sink, &mut websocket_stream).await;
let (backfill_sender, backfill_receiver) = mpsc::channel(100);
spawn(backfill::run(
Arc::new(backfill::create_backfill_handler(
thread_type,
app_config.clone(),
)),
guard.clone(),
Arc::new(backfill::create_handler(thread_type, app_config.clone())),
backfill_receiver,
));
let (websocket_sender, websocket_receiver) = mpsc::channel(100);
spawn(websocket::run(
app_config.clone(),
guard.clone(),
websocket_sender,
Arc::new(websocket::create_handler(thread_type, app_config.clone())),
websocket_receiver,
backfill_sender.clone(),
websocket_stream,
websocket_sink,
));
(asset_status_sender, backfill_sender)
(websocket_sender, backfill_sender)
}
async fn handle_asset_message(
bars_us_equity_asset_status_sender: mpsc::Sender<asset_status::Message>,
bars_crypto_asset_status_sender: mpsc::Sender<asset_status::Message>,
news_asset_status_sender: mpsc::Sender<asset_status::Message>,
asset_status_message: asset_status::Message,
macro_rules! create_send_await {
($sender:expr, $action:expr, $($contents:expr),*) => {
let (message, receiver) = $action($($contents),*);
$sender.send(message).await.unwrap();
receiver.await.unwrap();
};
}
#[allow(clippy::too_many_arguments)]
async fn handle_message(
app_config: Arc<Config>,
bars_us_equity_websocket_sender: mpsc::Sender<websocket::Message>,
bars_us_equity_backfill_sender: mpsc::Sender<backfill::Message>,
bars_crypto_websocket_sender: mpsc::Sender<websocket::Message>,
bars_crypto_backfill_sender: mpsc::Sender<backfill::Message>,
news_websocket_sender: mpsc::Sender<websocket::Message>,
news_backfill_sender: mpsc::Sender<backfill::Message>,
message: Message,
) {
let (us_equity_assets, crypto_assets): (Vec<_>, Vec<_>) = asset_status_message
let (us_equity_symbols, crypto_symbols): (Vec<_>, Vec<_>) = message
.assets
.clone()
.into_iter()
.partition(|asset| asset.class == Class::UsEquity);
.partition_map(|asset| match asset.1 {
Class::UsEquity => Either::Left(asset.0),
Class::Crypto => Either::Right(asset.0),
});
let symbols = message
.assets
.into_iter()
.map(|(symbol, _)| symbol)
.collect::<Vec<_>>();
let bars_us_equity_future = async {
if !us_equity_assets.is_empty() {
let (bars_us_equity_asset_status_message, bars_us_equity_asset_status_receiver) =
asset_status::Message::new(asset_status_message.action.clone(), us_equity_assets);
bars_us_equity_asset_status_sender
.send(bars_us_equity_asset_status_message)
.await
.unwrap();
bars_us_equity_asset_status_receiver.await.unwrap();
if us_equity_symbols.is_empty() {
return;
}
create_send_await!(
bars_us_equity_websocket_sender,
websocket::Message::new,
message.action.clone().into(),
us_equity_symbols.clone()
);
create_send_await!(
bars_us_equity_backfill_sender,
backfill::Message::new,
message.action.clone().into(),
us_equity_symbols
);
};
let bars_crypto_future = async {
if !crypto_assets.is_empty() {
let (crypto_asset_status_message, crypto_asset_status_receiver) =
asset_status::Message::new(asset_status_message.action.clone(), crypto_assets);
bars_crypto_asset_status_sender
.send(crypto_asset_status_message)
.await
.unwrap();
crypto_asset_status_receiver.await.unwrap();
if crypto_symbols.is_empty() {
return;
}
create_send_await!(
bars_crypto_websocket_sender,
websocket::Message::new,
message.action.clone().into(),
crypto_symbols.clone()
);
create_send_await!(
bars_crypto_backfill_sender,
backfill::Message::new,
message.action.clone().into(),
crypto_symbols
);
};
let news_future = async {
if !asset_status_message.assets.is_empty() {
let (news_asset_status_message, news_asset_status_receiver) =
asset_status::Message::new(
asset_status_message.action.clone(),
asset_status_message.assets,
);
news_asset_status_sender
.send(news_asset_status_message)
.await
.unwrap();
news_asset_status_receiver.await.unwrap();
}
create_send_await!(
news_websocket_sender,
websocket::Message::new,
message.action.clone().into(),
symbols.clone()
);
create_send_await!(
news_backfill_sender,
backfill::Message::new,
message.action.clone().into(),
symbols.clone()
);
};
join!(bars_us_equity_future, bars_crypto_future, news_future);
asset_status_message.response.send(()).unwrap();
match message.action {
Action::Add => {
let assets =
join_all(symbols.into_iter().map(|symbol| {
let app_config = app_config.clone();
async move {
alpaca::api::incoming::asset::get_by_symbol(&app_config, &symbol).await
}
}))
.await
.into_iter()
.map(|result| Asset::from(result.unwrap()))
.collect::<Vec<_>>();
database::assets::upsert_batch(&app_config.clickhouse_client, assets).await;
}
Action::Remove => {
database::assets::delete_where_symbols(&app_config.clickhouse_client, &symbols).await;
}
}
message.response.send(()).unwrap();
}
async fn handle_clock_message(
app_config: Arc<Config>,
bars_us_equity_backfill_sender: mpsc::Sender<backfill::Message>,
bars_crypto_backfill_sender: mpsc::Sender<backfill::Message>,
news_backfill_sender: mpsc::Sender<backfill::Message>,
) {
cleanup(&app_config.clickhouse_client).await;
let assets = database::assets::select(&app_config.clickhouse_client).await;
let (us_equity_symbols, crypto_symbols): (Vec<_>, Vec<_>) = assets
.clone()
.into_iter()
.partition_map(|asset| match asset.class {
Class::UsEquity => Either::Left(asset.symbol),
Class::Crypto => Either::Right(asset.symbol),
});
let symbols = assets
.into_iter()
.map(|asset| asset.symbol)
.collect::<Vec<_>>();
let bars_us_equity_future = async {
let (bars_us_equity_backfill_message, bars_us_equity_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
bars_us_equity_backfill_sender
.send(bars_us_equity_backfill_message)
.await
.unwrap();
bars_us_equity_backfill_receiver.await.unwrap();
create_send_await!(
bars_us_equity_backfill_sender,
backfill::Message::new,
backfill::Action::Backfill,
us_equity_symbols.clone()
);
};
let bars_crypto_future = async {
let (bars_crypto_backfill_message, bars_crypto_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
bars_crypto_backfill_sender
.send(bars_crypto_backfill_message)
.await
.unwrap();
bars_crypto_backfill_receiver.await.unwrap();
create_send_await!(
bars_crypto_backfill_sender,
backfill::Message::new,
backfill::Action::Backfill,
crypto_symbols.clone()
);
};
let news_future = async {
let (news_backfill_message, news_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
news_backfill_sender
.send(news_backfill_message)
.await
.unwrap();
news_backfill_receiver.await.unwrap();
create_send_await!(
news_backfill_sender,
backfill::Message::new,
backfill::Action::Backfill,
symbols
);
};
join!(bars_us_equity_future, bars_crypto_future, news_future);

View File

@@ -1,51 +1,192 @@
use super::{backfill, Guard};
use super::ThreadType;
use crate::{
config::Config,
database,
types::{alpaca::websocket, news::Prediction, Bar, News, Subset},
types::{alpaca::websocket, news::Prediction, Bar, News},
utils::add_slash_to_pair,
};
use async_trait::async_trait;
use futures_util::{
future::join_all,
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use log::{debug, error, info, warn};
use serde_json::from_str;
use std::{collections::HashSet, sync::Arc};
use log::{debug, error, info};
use serde_json::{from_str, to_string};
use std::{collections::HashMap, sync::Arc};
use tokio::{
join,
net::TcpStream,
spawn,
sync::{mpsc, Mutex, RwLock},
select, spawn,
sync::{mpsc, oneshot, Mutex, RwLock},
task::block_in_place,
};
use tokio_tungstenite::{tungstenite, MaybeTlsStream, WebSocketStream};
pub async fn run(
app_config: Arc<Config>,
guard: Arc<RwLock<Guard>>,
sender: Arc<Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>>,
mut receiver: SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
backfill_sender: mpsc::Sender<backfill::Message>,
) {
loop {
let message = receiver.next().await.unwrap().unwrap();
pub enum Action {
Subscribe,
Unsubscribe,
}
spawn(handle_websocket_message(
app_config.clone(),
guard.clone(),
sender.clone(),
backfill_sender.clone(),
message,
));
impl From<super::Action> for Action {
fn from(action: super::Action) -> Self {
match action {
super::Action::Add => Self::Subscribe,
super::Action::Remove => Self::Unsubscribe,
}
}
}
pub struct Message {
pub action: Action,
pub symbols: Vec<String>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, symbols: Vec<String>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel();
(
Self {
action,
symbols,
response: sender,
},
receiver,
)
}
}
pub struct Pending {
pub subscriptions: HashMap<String, oneshot::Sender<()>>,
pub unsubscriptions: HashMap<String, oneshot::Sender<()>>,
}
#[async_trait]
pub trait Handler: Send + Sync {
fn create_subscription_message(
&self,
symbols: Vec<String>,
) -> websocket::outgoing::subscribe::Message;
async fn handle_parsed_websocket_message(
&self,
pending: Arc<RwLock<Pending>>,
message: websocket::incoming::Message,
);
}
pub async fn run(
handler: Arc<Box<dyn Handler>>,
mut receiver: mpsc::Receiver<Message>,
mut websocket_stream: SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
websocket_sink: SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>,
) {
let pending = Arc::new(RwLock::new(Pending {
subscriptions: HashMap::new(),
unsubscriptions: HashMap::new(),
}));
let websocket_sink = Arc::new(Mutex::new(websocket_sink));
loop {
select! {
Some(message) = receiver.recv() => {
spawn(handle_message(
handler.clone(),
pending.clone(),
websocket_sink.clone(),
message,
));
}
Some(Ok(message)) = websocket_stream.next() => {
spawn(handle_websocket_message(
handler.clone(),
pending.clone(),
websocket_sink.clone(),
message,
));
}
else => panic!("Communication channel unexpectedly closed.")
}
}
}
async fn handle_message(
handler: Arc<Box<dyn Handler>>,
pending: Arc<RwLock<Pending>>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
message: Message,
) {
match message.action {
Action::Subscribe => {
let (pending_subscriptions, receivers): (Vec<_>, Vec<_>) = message
.symbols
.iter()
.map(|symbol| {
let (sender, receiver) = oneshot::channel();
((symbol.clone(), sender), receiver)
})
.unzip();
pending
.write()
.await
.subscriptions
.extend(pending_subscriptions);
websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Subscribe(
handler.create_subscription_message(message.symbols),
))
.unwrap(),
))
.await
.unwrap();
join_all(receivers).await;
}
Action::Unsubscribe => {
let (pending_unsubscriptions, receivers): (Vec<_>, Vec<_>) = message
.symbols
.iter()
.map(|symbol| {
let (sender, receiver) = oneshot::channel();
((symbol.clone(), sender), receiver)
})
.unzip();
pending
.write()
.await
.unsubscriptions
.extend(pending_unsubscriptions);
websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Unsubscribe(
handler.create_subscription_message(message.symbols.clone()),
))
.unwrap(),
))
.await
.unwrap();
join_all(receivers).await;
}
}
message.response.send(()).unwrap();
}
async fn handle_websocket_message(
app_config: Arc<Config>,
guard: Arc<RwLock<Guard>>,
handler: Arc<Box<dyn Handler>>,
pending: Arc<RwLock<Pending>>,
sender: Arc<Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>>,
backfill_sender: mpsc::Sender<backfill::Message>,
message: tungstenite::Message,
) {
match message {
@@ -54,12 +195,14 @@ async fn handle_websocket_message(
if let Ok(message) = message {
for message in message {
spawn(handle_parsed_websocket_message(
app_config.clone(),
guard.clone(),
backfill_sender.clone(),
message,
));
let handler = handler.clone();
let pending = pending.clone();
spawn(async move {
handler
.handle_parsed_websocket_message(pending, message)
.await;
});
}
} else {
error!("Failed to deserialize websocket message: {:?}", message);
@@ -77,143 +220,190 @@ async fn handle_websocket_message(
}
}
#[allow(clippy::significant_drop_tightening)]
#[allow(clippy::too_many_lines)]
async fn handle_parsed_websocket_message(
struct BarsHandler {
app_config: Arc<Config>,
guard: Arc<RwLock<Guard>>,
backfill_sender: mpsc::Sender<backfill::Message>,
message: websocket::incoming::Message,
) {
match message {
websocket::incoming::Message::Subscription(message) => {
let (symbols, log_string) = match message {
websocket::incoming::subscription::Message::Market { bars, .. } => (bars, "bars"),
websocket::incoming::subscription::Message::News { news } => (
news.into_iter()
.map(|symbol| add_slash_to_pair(&symbol))
.collect(),
"news",
),
};
}
let mut guard = guard.write().await;
#[async_trait]
impl Handler for BarsHandler {
fn create_subscription_message(
&self,
symbols: Vec<String>,
) -> websocket::outgoing::subscribe::Message {
websocket::outgoing::subscribe::Message::new_market(symbols)
}
let newly_subscribed = guard
.pending_subscriptions
.extract_if(|asset| symbols.contains(&asset.symbol))
.collect::<HashSet<_>>();
async fn handle_parsed_websocket_message(
&self,
pending: Arc<RwLock<Pending>>,
message: websocket::incoming::Message,
) {
match message {
websocket::incoming::Message::Subscription(message) => {
let websocket::incoming::subscription::Message::Market { bars: symbols, .. } =
message
else {
unreachable!()
};
let newly_unsubscribed = guard
.pending_unsubscriptions
.extract_if(|asset| !symbols.contains(&asset.symbol))
.collect::<HashSet<_>>();
let mut pending = pending.write().await;
drop(guard);
let newly_subscribed = pending
.subscriptions
.extract_if(|symbol, _| symbols.contains(symbol))
.collect::<HashMap<_, _>>();
let newly_unsubscribed = pending
.unsubscriptions
.extract_if(|symbol, _| !symbols.contains(symbol))
.collect::<HashMap<_, _>>();
drop(pending);
let newly_subscribed_future = async {
if !newly_subscribed.is_empty() {
info!(
"Subscribed to {} for {:?}.",
log_string,
newly_subscribed
.iter()
.map(|asset| asset.symbol.clone())
.collect::<Vec<_>>()
"Subscribed to bars for {:?}.",
newly_subscribed.keys().collect::<Vec<_>>()
);
let (backfill_message, backfill_receiver) = backfill::Message::new(
backfill::Action::Backfill,
Subset::Some(newly_subscribed.into_iter().collect::<Vec<_>>()),
);
backfill_sender.send(backfill_message).await.unwrap();
backfill_receiver.await.unwrap();
for sender in newly_subscribed.into_values() {
sender.send(()).unwrap();
}
}
};
let newly_unsubscribed_future = async {
if !newly_unsubscribed.is_empty() {
info!(
"Unsubscribed from {} for {:?}.",
log_string,
newly_unsubscribed
.iter()
.map(|asset| asset.symbol.clone())
.collect::<Vec<_>>()
"Unsubscribed from bars for {:?}.",
newly_unsubscribed.keys().collect::<Vec<_>>()
);
let (purge_message, purge_receiver) = backfill::Message::new(
backfill::Action::Purge,
Subset::Some(newly_unsubscribed.into_iter().collect::<Vec<_>>()),
);
backfill_sender.send(purge_message).await.unwrap();
purge_receiver.await.unwrap();
for sender in newly_unsubscribed.into_values() {
sender.send(()).unwrap();
}
}
};
join!(newly_subscribed_future, newly_unsubscribed_future);
}
websocket::incoming::Message::Bar(message)
| websocket::incoming::Message::UpdatedBar(message) => {
let bar = Bar::from(message);
let guard = guard.read().await;
if !guard.assets.contains_right(&bar.symbol) {
warn!(
"Race condition: received bar for unsubscribed symbol: {:?}.",
bar.symbol
);
return;
}
debug!("Received bar for {}: {}.", bar.symbol, bar.time);
database::bars::upsert(&app_config.clickhouse_client, &bar).await;
}
websocket::incoming::Message::News(message) => {
let news = News::from(message);
let guard = guard.read().await;
if !news
.symbols
.iter()
.any(|symbol| guard.assets.contains_right(symbol))
{
warn!(
"Race condition: received news for unsubscribed symbols: {:?}.",
news.symbols
);
return;
websocket::incoming::Message::Bar(message)
| websocket::incoming::Message::UpdatedBar(message) => {
let bar = Bar::from(message);
debug!("Received bar for {}: {}.", bar.symbol, bar.time);
database::bars::upsert(&self.app_config.clickhouse_client, &bar).await;
}
debug!(
"Received news for {:?}: {}.",
news.symbols, news.time_created
);
let input = format!("{}\n\n{}", news.headline, news.content);
let sequence_classifier = app_config.sequence_classifier.lock().await;
let prediction = block_in_place(|| {
sequence_classifier
.predict(vec![input.as_str()])
.into_iter()
.map(|label| Prediction::try_from(label).unwrap())
.collect::<Vec<_>>()[0]
});
drop(sequence_classifier);
let news = News {
sentiment: prediction.sentiment,
confidence: prediction.confidence,
..news
};
database::news::upsert(&app_config.clickhouse_client, &news).await;
}
websocket::incoming::Message::Success(_) => {}
websocket::incoming::Message::Error(message) => {
error!("Received error message: {}.", message.message);
websocket::incoming::Message::Success(_) => {}
websocket::incoming::Message::Error(message) => {
error!("Received error message: {}.", message.message);
}
websocket::incoming::Message::News(_) => unreachable!(),
}
}
}
struct NewsHandler {
app_config: Arc<Config>,
}
#[async_trait]
impl Handler for NewsHandler {
fn create_subscription_message(
&self,
symbols: Vec<String>,
) -> websocket::outgoing::subscribe::Message {
websocket::outgoing::subscribe::Message::new_news(symbols)
}
async fn handle_parsed_websocket_message(
&self,
pending: Arc<RwLock<Pending>>,
message: websocket::incoming::Message,
) {
match message {
websocket::incoming::Message::Subscription(message) => {
let websocket::incoming::subscription::Message::News { news: symbols } = message
else {
unreachable!()
};
let symbols = symbols
.into_iter()
.map(|symbol| add_slash_to_pair(&symbol))
.collect::<Vec<_>>();
let mut pending = pending.write().await;
let newly_subscribed = pending
.subscriptions
.extract_if(|symbol, _| symbols.contains(symbol))
.collect::<HashMap<_, _>>();
let newly_unsubscribed = pending
.unsubscriptions
.extract_if(|symbol, _| !symbols.contains(symbol))
.collect::<HashMap<_, _>>();
drop(pending);
if !newly_subscribed.is_empty() {
info!(
"Subscribed to news for {:?}.",
newly_subscribed.keys().collect::<Vec<_>>()
);
for sender in newly_subscribed.into_values() {
sender.send(()).unwrap();
}
}
if !newly_unsubscribed.is_empty() {
info!(
"Unsubscribed from news for {:?}.",
newly_unsubscribed.keys().collect::<Vec<_>>()
);
for sender in newly_unsubscribed.into_values() {
sender.send(()).unwrap();
}
}
}
websocket::incoming::Message::News(message) => {
let news = News::from(message);
debug!(
"Received news for {:?}: {}.",
news.symbols, news.time_created
);
let input = format!("{}\n\n{}", news.headline, news.content);
let sequence_classifier = self.app_config.sequence_classifier.lock().await;
let prediction = block_in_place(|| {
sequence_classifier
.predict(vec![input.as_str()])
.into_iter()
.map(|label| Prediction::try_from(label).unwrap())
.collect::<Vec<_>>()[0]
});
drop(sequence_classifier);
let news = News {
sentiment: prediction.sentiment,
confidence: prediction.confidence,
..news
};
database::news::upsert(&self.app_config.clickhouse_client, &news).await;
}
websocket::incoming::Message::Success(_) => {}
websocket::incoming::Message::Error(message) => {
error!("Received error message: {}.", message.message);
}
websocket::incoming::Message::Bar(_) | websocket::incoming::Message::UpdatedBar(_) => {
unreachable!()
}
}
}
}
pub fn create_handler(thread_type: ThreadType, app_config: Arc<Config>) -> Box<dyn Handler> {
match thread_type {
ThreadType::Bars(_) => Box::new(BarsHandler { app_config }),
ThreadType::News => Box::new(NewsHandler { app_config }),
}
}

View File

@@ -1,19 +0,0 @@
use crate::types::Asset;
use bimap::BiMap;
use std::collections::HashSet;
pub struct Guard {
pub assets: BiMap<Asset, String>,
pub pending_subscriptions: HashSet<Asset>,
pub pending_unsubscriptions: HashSet<Asset>,
}
impl Guard {
pub fn new() -> Self {
Self {
assets: BiMap::new(),
pending_subscriptions: HashSet::new(),
pending_unsubscriptions: HashSet::new(),
}
}
}

View File

@@ -1,3 +1,2 @@
pub mod clock;
pub mod data;
pub mod guard;

View File

@@ -1,3 +0,0 @@
pub mod subset;
pub use subset::Subset;

View File

@@ -1,5 +0,0 @@
#[derive(Clone, Debug)]
pub enum Subset<T> {
Some(Vec<T>),
All,
}

View File

@@ -1,5 +1,11 @@
use crate::types::{self, alpaca::api::impl_from_enum};
use crate::{
config::{Config, ALPACA_ASSET_API_URL},
types::{self, alpaca::api::impl_from_enum},
};
use backoff::{future::retry, ExponentialBackoff};
use http::StatusCode;
use serde::Deserialize;
use std::sync::Arc;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "snake_case")]
@@ -80,3 +86,27 @@ impl From<Asset> for types::Asset {
}
}
}
pub async fn get_by_symbol(app_config: &Arc<Config>, symbol: &str) -> Result<Asset, StatusCode> {
retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(&format!("{ALPACA_ASSET_API_URL}/{symbol}"))
.send()
.await?
.error_for_status()
.map_err(|e| match e.status() {
Some(reqwest::StatusCode::NOT_FOUND) => backoff::Error::Permanent(e),
_ => e.into(),
})?
.json::<Asset>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.map_err(|e| match e.status() {
Some(reqwest::StatusCode::NOT_FOUND) => StatusCode::NOT_FOUND,
_ => panic!("Unexpected error: {e}."),
})
}

View File

@@ -1,6 +1,10 @@
use crate::types;
use crate::{
config::Config,
types::{self, alpaca::api::outgoing},
};
use backoff::{future::retry, ExponentialBackoff};
use serde::Deserialize;
use std::collections::HashMap;
use std::{collections::HashMap, sync::Arc};
use time::OffsetDateTime;
#[derive(Clone, Debug, PartialEq, Deserialize)]
@@ -45,3 +49,25 @@ pub struct Message {
pub bars: HashMap<String, Vec<Bar>>,
pub next_page_token: Option<String>,
}
pub async fn get_historical(
app_config: &Arc<Config>,
data_url: &str,
query: &outgoing::bar::Bar,
) -> Message {
retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(data_url)
.query(query)
.send()
.await?
.error_for_status()?
.json::<Message>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.unwrap()
}

View File

@@ -1,4 +1,7 @@
use crate::config::{Config, ALPACA_CLOCK_API_URL};
use backoff::{future::retry, ExponentialBackoff};
use serde::Deserialize;
use std::sync::Arc;
use time::OffsetDateTime;
#[derive(Clone, Debug, PartialEq, Eq, Deserialize)]
@@ -11,3 +14,19 @@ pub struct Clock {
#[serde(with = "time::serde::rfc3339")]
pub next_close: OffsetDateTime,
}
pub async fn get(app_config: &Arc<Config>) -> Clock {
retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(ALPACA_CLOCK_API_URL)
.send()
.await?
.json::<Clock>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.unwrap()
}

View File

@@ -1,8 +1,11 @@
use crate::{
types,
config::{Config, ALPACA_NEWS_DATA_URL},
types::{self, alpaca::api::outgoing},
utils::{add_slash_to_pair, normalize_news_content},
};
use backoff::{future::retry, ExponentialBackoff};
use serde::Deserialize;
use std::sync::Arc;
use time::OffsetDateTime;
#[derive(Clone, Debug, PartialEq, Eq, Deserialize)]
@@ -66,3 +69,21 @@ pub struct Message {
pub news: Vec<News>,
pub next_page_token: Option<String>,
}
pub async fn get_historical(app_config: &Arc<Config>, query: &outgoing::news::News) -> Message {
retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(ALPACA_NEWS_DATA_URL)
.query(query)
.send()
.await?
.error_for_status()?
.json::<Message>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.unwrap()
}

View File

@@ -1,11 +1,9 @@
pub mod algebraic;
pub mod alpaca;
pub mod asset;
pub mod backfill;
pub mod bar;
pub mod news;
pub use algebraic::Subset;
pub use asset::{Asset, Class, Exchange};
pub use backfill::Backfill;
pub use bar::Bar;

View File

@@ -3,9 +3,9 @@ use clickhouse::Client;
use tokio::join;
pub async fn cleanup(clickhouse_client: &Client) {
let bars_future = database::bars::cleanup(clickhouse_client);
let news_future = database::news::cleanup(clickhouse_client);
let backfills_future = database::backfills::cleanup(clickhouse_client);
join!(bars_future, news_future, backfills_future);
join!(
database::bars::cleanup(clickhouse_client),
database::news::cleanup(clickhouse_client),
database::backfills::cleanup(clickhouse_client)
);
}

View File

@@ -5,5 +5,5 @@ pub mod websocket;
pub use cleanup::cleanup;
pub use news::{add_slash_to_pair, normalize_news_content, remove_slash_from_pair};
pub use time::{duration_until, last_minute, FIFTEEN_MINUTES, ONE_MINUTE};
pub use time::{duration_until, last_minute, FIFTEEN_MINUTES, ONE_MINUTE, ONE_SECOND};
pub use websocket::authenticate;

View File

@@ -1,6 +1,7 @@
use std::time::Duration;
use time::OffsetDateTime;
pub const ONE_SECOND: Duration = Duration::from_secs(1);
pub const ONE_MINUTE: Duration = Duration::from_secs(60);
pub const FIFTEEN_MINUTES: Duration = Duration::from_secs(60 * 15);

View File

@@ -11,10 +11,10 @@ use tokio_tungstenite::{tungstenite::Message, MaybeTlsStream, WebSocketStream};
pub async fn authenticate(
app_config: &Arc<Config>,
sender: &mut SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>,
receiver: &mut SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
sink: &mut SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>,
stream: &mut SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
) {
match receiver.next().await.unwrap().unwrap() {
match stream.next().await.unwrap().unwrap() {
Message::Text(data)
if from_str::<Vec<websocket::incoming::Message>>(&data)
.unwrap()
@@ -25,20 +25,19 @@ pub async fn authenticate(
_ => panic!("Failed to connect to Alpaca websocket."),
}
sender
.send(Message::Text(
to_string(&websocket::outgoing::Message::Auth(
websocket::outgoing::auth::Message {
key: app_config.alpaca_api_key.clone(),
secret: app_config.alpaca_api_secret.clone(),
},
))
.unwrap(),
sink.send(Message::Text(
to_string(&websocket::outgoing::Message::Auth(
websocket::outgoing::auth::Message {
key: app_config.alpaca_api_key.clone(),
secret: app_config.alpaca_api_secret.clone(),
},
))
.await
.unwrap();
.unwrap(),
))
.await
.unwrap();
match receiver.next().await.unwrap().unwrap() {
match stream.next().await.unwrap().unwrap() {
Message::Text(data)
if from_str::<Vec<websocket::incoming::Message>>(&data)
.unwrap()