Add news data support

- Refactor everything in the process, oops

Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
2024-01-25 10:46:42 +00:00
parent 178a062c25
commit 002f70e299
53 changed files with 1683 additions and 677 deletions

66
src/threads/clock.rs Normal file
View File

@@ -0,0 +1,66 @@
use crate::{
config::{Config, ALPACA_CLOCK_API_URL},
types::alpaca,
utils::duration_until,
};
use backoff::{future::retry, ExponentialBackoff};
use log::info;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::{sync::mpsc, time::sleep};
pub enum Status {
Open,
Closed,
}
pub struct Message {
pub status: Status,
pub next_switch: OffsetDateTime,
}
impl From<alpaca::api::incoming::clock::Clock> for Message {
fn from(clock: alpaca::api::incoming::clock::Clock) -> Self {
if clock.is_open {
Self {
status: Status::Open,
next_switch: clock.next_close,
}
} else {
Self {
status: Status::Closed,
next_switch: clock.next_open,
}
}
}
}
pub async fn run(app_config: Arc<Config>, clock_sender: mpsc::Sender<Message>) {
loop {
let clock = retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(ALPACA_CLOCK_API_URL)
.send()
.await?
.error_for_status()?
.json::<alpaca::api::incoming::clock::Clock>()
.await
.map_err(backoff::Error::Permanent)
})
.await
.unwrap();
let sleep_until = duration_until(if clock.is_open {
info!("Market is open, will close at {}.", clock.next_close);
clock.next_close
} else {
info!("Market is closed, will reopen at {}.", clock.next_open);
clock.next_open
});
sleep(sleep_until).await;
clock_sender.send(clock.into()).await.unwrap();
}
}

View File

@@ -0,0 +1,174 @@
use super::{Guard, ThreadType};
use crate::{
config::Config,
database,
types::{alpaca::websocket, Asset},
};
use futures_util::{stream::SplitSink, SinkExt};
use log::info;
use serde_json::to_string;
use std::sync::Arc;
use tokio::{
join,
net::TcpStream,
spawn,
sync::{mpsc, oneshot, Mutex, RwLock},
};
use tokio_tungstenite::{tungstenite, MaybeTlsStream, WebSocketStream};
#[derive(Clone)]
pub enum Action {
Add,
Remove,
}
pub struct Message {
pub action: Action,
pub assets: Vec<Asset>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, assets: Vec<Asset>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel::<()>();
(
Self {
action,
assets,
response: sender,
},
receiver,
)
}
}
pub async fn run(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
mut asset_status_receiver: mpsc::Receiver<Message>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
) {
loop {
let app_config = app_config.clone();
let guard = guard.clone();
let websocket_sender = websocket_sender.clone();
let message = asset_status_receiver.recv().await.unwrap();
spawn(handle_asset_status_message(
app_config,
thread_type,
guard,
websocket_sender,
message,
));
}
}
#[allow(clippy::significant_drop_tightening)]
async fn handle_asset_status_message(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
message: Message,
) {
let symbols = message
.assets
.clone()
.into_iter()
.map(|asset| match thread_type {
ThreadType::Bars(_) => asset.symbol,
ThreadType::News => asset.abbreviation,
})
.collect::<Vec<_>>();
match message.action {
Action::Add => {
let mut guard = guard.write().await;
guard.symbols.extend(symbols.clone());
guard
.pending_subscriptions
.extend(symbols.clone().into_iter().zip(message.assets.clone()));
info!("{:?} - Added {:?}.", thread_type, symbols);
let database_future = async {
if matches!(thread_type, ThreadType::Bars(_)) {
database::assets::upsert_batch(&app_config.clickhouse_client, message.assets)
.await;
}
};
let websocket_future = async move {
websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Subscribe(
websocket_market_message_factory(thread_type, symbols),
))
.unwrap(),
))
.await
.unwrap();
};
join!(database_future, websocket_future);
}
Action::Remove => {
let mut guard = guard.write().await;
guard.symbols.retain(|symbol| !symbols.contains(symbol));
guard
.pending_unsubscriptions
.extend(symbols.clone().into_iter().zip(message.assets.clone()));
info!("{:?} - Removed {:?}.", thread_type, symbols);
let sybols_clone = symbols.clone();
let database_future = database::assets::delete_where_symbols(
&app_config.clickhouse_client,
&sybols_clone,
);
let websocket_future = async move {
websocket_sender
.lock()
.await
.send(tungstenite::Message::Text(
to_string(&websocket::outgoing::Message::Unsubscribe(
websocket_market_message_factory(thread_type, symbols),
))
.unwrap(),
))
.await
.unwrap();
};
join!(database_future, websocket_future);
}
}
message.response.send(()).unwrap();
}
fn websocket_market_message_factory(
thread_type: ThreadType,
symbols: Vec<String>,
) -> websocket::outgoing::subscribe::Message {
match thread_type {
ThreadType::Bars(_) => websocket::outgoing::subscribe::Message::Market(
websocket::outgoing::subscribe::MarketMessage::new(symbols),
),
ThreadType::News => websocket::outgoing::subscribe::Message::News(
websocket::outgoing::subscribe::NewsMessage::new(symbols),
),
}
}

View File

@@ -0,0 +1,374 @@
use super::{Guard, ThreadType};
use crate::{
config::{Config, ALPACA_CRYPTO_DATA_URL, ALPACA_NEWS_DATA_URL, ALPACA_STOCK_DATA_URL},
database,
types::{
alpaca::{api, Source},
Asset, Bar, Class, News, Subset,
},
utils::{duration_until, last_minute, FIFTEEN_MINUTES, ONE_MINUTE},
};
use backoff::{future::retry, ExponentialBackoff};
use log::{error, info};
use std::{collections::HashMap, sync::Arc};
use time::OffsetDateTime;
use tokio::{
join, spawn,
sync::{mpsc, oneshot, Mutex, RwLock},
task::JoinHandle,
time::sleep,
};
pub enum Action {
Backfill,
Purge,
}
pub struct Message {
pub action: Action,
pub assets: Subset<Asset>,
pub response: oneshot::Sender<()>,
}
impl Message {
pub fn new(action: Action, assets: Subset<Asset>) -> (Self, oneshot::Receiver<()>) {
let (sender, receiver) = oneshot::channel::<()>();
(
Self {
action,
assets,
response: sender,
},
receiver,
)
}
}
pub async fn run(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
mut backfill_receiver: mpsc::Receiver<Message>,
) {
let backfill_jobs = Arc::new(Mutex::new(HashMap::new()));
let data_url = match thread_type {
ThreadType::Bars(Class::UsEquity) => ALPACA_STOCK_DATA_URL.to_string(),
ThreadType::Bars(Class::Crypto) => ALPACA_CRYPTO_DATA_URL.to_string(),
ThreadType::News => ALPACA_NEWS_DATA_URL.to_string(),
};
loop {
let app_config = app_config.clone();
let guard = guard.clone();
let backfill_jobs = backfill_jobs.clone();
let data_url = data_url.clone();
let message = backfill_receiver.recv().await.unwrap();
spawn(handle_backfill_message(
app_config,
thread_type,
guard,
data_url,
backfill_jobs,
message,
));
}
}
#[allow(clippy::significant_drop_tightening)]
#[allow(clippy::too_many_lines)]
async fn handle_backfill_message(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
data_url: String,
backfill_jobs: Arc<Mutex<HashMap<String, JoinHandle<()>>>>,
message: Message,
) {
let guard = guard.read().await;
let mut backfill_jobs = backfill_jobs.lock().await;
let symbols = match message.assets {
Subset::All => guard.symbols.clone().into_iter().collect::<Vec<_>>(),
Subset::Some(assets) => assets
.into_iter()
.map(|asset| match thread_type {
ThreadType::Bars(_) => asset.symbol,
ThreadType::News => asset.abbreviation,
})
.filter(|symbol| match message.action {
Action::Backfill => guard.symbols.contains(symbol),
Action::Purge => !guard.symbols.contains(symbol),
})
.collect::<Vec<_>>(),
};
match message.action {
Action::Backfill => {
for symbol in symbols {
if let Some(job) = backfill_jobs.remove(&symbol) {
if !job.is_finished() {
job.abort();
}
job.await.unwrap_err();
}
let app_config = app_config.clone();
let data_url = data_url.clone();
backfill_jobs.insert(
symbol.clone(),
spawn(async move {
let (fetch_from, fetch_to) =
queue_backfill(&app_config, thread_type, &symbol).await;
match thread_type {
ThreadType::Bars(_) => {
execute_backfill_bars(
app_config,
thread_type,
data_url,
symbol,
fetch_from,
fetch_to,
)
.await;
}
ThreadType::News => {
execute_backfill_news(
app_config,
thread_type,
data_url,
symbol,
fetch_from,
fetch_to,
)
.await;
}
}
}),
);
}
}
Action::Purge => {
for symbol in &symbols {
if let Some(job) = backfill_jobs.remove(symbol) {
if !job.is_finished() {
job.abort();
}
job.await.unwrap_err();
}
}
let backfills_future = database::backfills::delete_where_symbols(
&app_config.clickhouse_client,
&thread_type,
&symbols,
);
let data_future = async {
match thread_type {
ThreadType::Bars(_) => {
database::bars::delete_where_symbols(
&app_config.clickhouse_client,
&symbols,
)
.await;
}
ThreadType::News => {
database::news::delete_where_symbols(
&app_config.clickhouse_client,
&symbols,
)
.await;
}
}
};
join!(backfills_future, data_future);
}
}
message.response.send(()).unwrap();
}
async fn queue_backfill(
app_config: &Arc<Config>,
thread_type: ThreadType,
symbol: &String,
) -> (OffsetDateTime, OffsetDateTime) {
let latest_backfill = database::backfills::select_latest_where_symbol(
&app_config.clickhouse_client,
&thread_type,
&symbol,
)
.await;
let fetch_from = latest_backfill
.as_ref()
.map_or(OffsetDateTime::UNIX_EPOCH, |backfill| {
backfill.time + ONE_MINUTE
});
let fetch_to = last_minute();
if app_config.alpaca_source == Source::Iex {
let run_delay = duration_until(fetch_to + FIFTEEN_MINUTES + ONE_MINUTE);
info!(
"{:?} - Queing backfill for {} in {:?}.",
thread_type, symbol, run_delay
);
sleep(run_delay).await;
}
(fetch_from, fetch_to)
}
async fn execute_backfill_bars(
app_config: Arc<Config>,
thread_type: ThreadType,
data_url: String,
symbol: String,
fetch_from: OffsetDateTime,
fetch_to: OffsetDateTime,
) {
if fetch_from > fetch_to {
return;
}
info!("{:?} - Backfilling data for {}.", thread_type, symbol);
let mut bars = Vec::new();
let mut next_page_token = None;
loop {
let message = retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(&data_url)
.query(&api::outgoing::bar::Bar::new(
vec![symbol.clone()],
ONE_MINUTE,
fetch_from,
fetch_to,
10000,
next_page_token.clone(),
))
.send()
.await?
.error_for_status()?
.json::<api::incoming::bar::Message>()
.await
.map_err(backoff::Error::Permanent)
})
.await;
let message = match message {
Ok(message) => message,
Err(e) => {
error!(
"{:?} - Failed to backfill data for {}: {}.",
thread_type, symbol, e
);
return;
}
};
message.bars.into_iter().for_each(|(symbol, bar_vec)| {
for bar in bar_vec {
bars.push(Bar::from((bar, symbol.clone())));
}
});
if message.next_page_token.is_none() {
break;
}
next_page_token = message.next_page_token;
}
if bars.is_empty() {
return;
}
let backfill = bars.last().unwrap().clone().into();
database::bars::upsert_batch(&app_config.clickhouse_client, bars).await;
database::backfills::upsert(&app_config.clickhouse_client, &thread_type, &backfill).await;
info!("{:?} - Backfilled data for {}.", thread_type, symbol);
}
async fn execute_backfill_news(
app_config: Arc<Config>,
thread_type: ThreadType,
data_url: String,
symbol: String,
fetch_from: OffsetDateTime,
fetch_to: OffsetDateTime,
) {
if fetch_from > fetch_to {
return;
}
info!("{:?} - Backfilling data for {}.", thread_type, symbol);
let mut news = Vec::new();
let mut next_page_token = None;
loop {
let message = retry(ExponentialBackoff::default(), || async {
app_config.alpaca_rate_limit.until_ready().await;
app_config
.alpaca_client
.get(&data_url)
.query(&api::outgoing::news::News::new(
vec![symbol.clone()],
fetch_from,
fetch_to,
50,
true,
false,
next_page_token.clone(),
))
.send()
.await?
.error_for_status()?
.json::<api::incoming::news::Message>()
.await
.map_err(backoff::Error::Permanent)
})
.await;
let message = match message {
Ok(message) => message,
Err(e) => {
error!(
"{:?} - Failed to backfill data for {}: {}.",
thread_type, symbol, e
);
return;
}
};
message.news.into_iter().for_each(|news_item| {
news.push(News::from(news_item));
});
if message.next_page_token.is_none() {
break;
}
next_page_token = message.next_page_token;
}
if news.is_empty() {
return;
}
let backfill = (news.last().unwrap().clone(), symbol.clone()).into();
database::news::upsert_batch(&app_config.clickhouse_client, news).await;
database::backfills::upsert(&app_config.clickhouse_client, &thread_type, &backfill).await;
info!("{:?} - Backfilled data for {}.", thread_type, symbol);
}

233
src/threads/data/mod.rs Normal file
View File

@@ -0,0 +1,233 @@
pub mod asset_status;
pub mod backfill;
pub mod websocket;
use super::clock;
use crate::{
config::{
Config, ALPACA_CRYPTO_WEBSOCKET_URL, ALPACA_NEWS_WEBSOCKET_URL, ALPACA_STOCK_WEBSOCKET_URL,
},
types::{Asset, Class, Subset},
utils::authenticate,
};
use futures_util::StreamExt;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use tokio::{
join, select, spawn,
sync::{mpsc, Mutex, RwLock},
};
use tokio_tungstenite::connect_async;
pub struct Guard {
pub symbols: HashSet<String>,
pub pending_subscriptions: HashMap<String, Asset>,
pub pending_unsubscriptions: HashMap<String, Asset>,
}
impl Guard {
pub fn new() -> Self {
Self {
symbols: HashSet::new(),
pending_subscriptions: HashMap::new(),
pending_unsubscriptions: HashMap::new(),
}
}
}
#[derive(Clone, Copy, Debug)]
pub enum ThreadType {
Bars(Class),
News,
}
pub async fn run(
app_config: Arc<Config>,
mut asset_receiver: mpsc::Receiver<asset_status::Message>,
mut clock_receiver: mpsc::Receiver<clock::Message>,
) {
let (bars_us_equity_asset_status_sender, bars_us_equity_backfill_sender) =
init_thread(app_config.clone(), ThreadType::Bars(Class::UsEquity)).await;
let (bars_crypto_asset_status_sender, bars_crypto_backfill_sender) =
init_thread(app_config.clone(), ThreadType::Bars(Class::Crypto)).await;
let (news_asset_status_sender, news_backfill_sender) =
init_thread(app_config.clone(), ThreadType::News).await;
loop {
select! {
Some(asset_message) = asset_receiver.recv() => {
let bars_us_equity_asset_status_sender = bars_us_equity_asset_status_sender.clone();
let bars_crypto_asset_status_sender = bars_crypto_asset_status_sender.clone();
let news_asset_status_sender = news_asset_status_sender.clone();
spawn(handle_asset_message(
bars_us_equity_asset_status_sender,
bars_crypto_asset_status_sender,
news_asset_status_sender,
asset_message,
));
}
Some(_) = clock_receiver.recv() => {
let bars_us_equity_backfill_sender = bars_us_equity_backfill_sender.clone();
let bars_crypto_backfill_sender = bars_crypto_backfill_sender.clone();
let news_backfill_sender = news_backfill_sender.clone();
spawn(handle_clock_message(
bars_us_equity_backfill_sender,
bars_crypto_backfill_sender,
news_backfill_sender,
));
}
else => {
panic!("Communication channel unexpectedly closed.")
}
}
}
}
async fn init_thread(
app_config: Arc<Config>,
thread_type: ThreadType,
) -> (
mpsc::Sender<asset_status::Message>,
mpsc::Sender<backfill::Message>,
) {
let guard = Arc::new(RwLock::new(Guard::new()));
let websocket_url = match thread_type {
ThreadType::Bars(Class::UsEquity) => format!(
"{}/{}",
ALPACA_STOCK_WEBSOCKET_URL, &app_config.alpaca_source
),
ThreadType::Bars(Class::Crypto) => ALPACA_CRYPTO_WEBSOCKET_URL.into(),
ThreadType::News => ALPACA_NEWS_WEBSOCKET_URL.into(),
};
let (websocket, _) = connect_async(websocket_url).await.unwrap();
let (mut websocket_sender, mut websocket_receiver) = websocket.split();
authenticate(&app_config, &mut websocket_sender, &mut websocket_receiver).await;
let websocket_sender = Arc::new(Mutex::new(websocket_sender));
let (asset_status_sender, asset_status_receiver) = mpsc::channel(100);
spawn(asset_status::run(
app_config.clone(),
thread_type,
guard.clone(),
asset_status_receiver,
websocket_sender.clone(),
));
let (backfill_sender, backfill_receiver) = mpsc::channel(100);
spawn(backfill::run(
app_config.clone(),
thread_type,
guard.clone(),
backfill_receiver,
));
spawn(websocket::run(
app_config.clone(),
thread_type,
guard.clone(),
websocket_sender,
websocket_receiver,
backfill_sender.clone(),
));
(asset_status_sender, backfill_sender)
}
async fn handle_asset_message(
bars_us_equity_asset_status_sender: mpsc::Sender<asset_status::Message>,
bars_crypto_asset_status_sender: mpsc::Sender<asset_status::Message>,
news_asset_status_sender: mpsc::Sender<asset_status::Message>,
asset_status_message: asset_status::Message,
) {
let (us_equity_assets, crypto_assets): (Vec<_>, Vec<_>) = asset_status_message
.assets
.clone()
.into_iter()
.partition(|asset| asset.class == Class::UsEquity);
let bars_us_equity_future = async {
if !us_equity_assets.is_empty() {
let (bars_us_equity_asset_status_message, bars_us_equity_asset_status_receiver) =
asset_status::Message::new(asset_status_message.action.clone(), us_equity_assets);
bars_us_equity_asset_status_sender
.send(bars_us_equity_asset_status_message)
.await
.unwrap();
bars_us_equity_asset_status_receiver.await.unwrap();
}
};
let bars_crypto_future = async {
if !crypto_assets.is_empty() {
let (crypto_asset_status_message, crypto_asset_status_receiver) =
asset_status::Message::new(asset_status_message.action.clone(), crypto_assets);
bars_crypto_asset_status_sender
.send(crypto_asset_status_message)
.await
.unwrap();
crypto_asset_status_receiver.await.unwrap();
}
};
let news_future = async {
if !asset_status_message.assets.is_empty() {
let (news_asset_status_message, news_asset_status_receiver) =
asset_status::Message::new(
asset_status_message.action.clone(),
asset_status_message.assets,
);
news_asset_status_sender
.send(news_asset_status_message)
.await
.unwrap();
news_asset_status_receiver.await.unwrap();
}
};
join!(bars_us_equity_future, bars_crypto_future, news_future);
asset_status_message.response.send(()).unwrap();
}
async fn handle_clock_message(
bars_us_equity_backfill_sender: mpsc::Sender<backfill::Message>,
bars_crypto_backfill_sender: mpsc::Sender<backfill::Message>,
news_backfill_sender: mpsc::Sender<backfill::Message>,
) {
let bars_us_equity_future = async {
let (bars_us_equity_backfill_message, bars_us_equity_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
bars_us_equity_backfill_sender
.send(bars_us_equity_backfill_message)
.await
.unwrap();
bars_us_equity_backfill_receiver.await.unwrap();
};
let bars_crypto_future = async {
let (bars_crypto_backfill_message, bars_crypto_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
bars_crypto_backfill_sender
.send(bars_crypto_backfill_message)
.await
.unwrap();
bars_crypto_backfill_receiver.await.unwrap();
};
let news_future = async {
let (news_backfill_message, news_backfill_receiver) =
backfill::Message::new(backfill::Action::Backfill, Subset::All);
news_backfill_sender
.send(news_backfill_message)
.await
.unwrap();
news_backfill_receiver.await.unwrap();
};
join!(bars_us_equity_future, bars_crypto_future, news_future);
}

View File

@@ -0,0 +1,217 @@
use super::{backfill, Guard, ThreadType};
use crate::{
config::Config,
database,
types::{alpaca::websocket, Bar, News, Subset},
};
use futures_util::{
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use log::{error, info, warn};
use serde_json::from_str;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use tokio::{
join,
net::TcpStream,
spawn,
sync::{mpsc, Mutex, RwLock},
};
use tokio_tungstenite::{tungstenite, MaybeTlsStream, WebSocketStream};
pub async fn run(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
mut websocket_receiver: SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>,
backfill_sender: mpsc::Sender<backfill::Message>,
) {
loop {
let app_config = app_config.clone();
let guard = guard.clone();
let websocket_sender = websocket_sender.clone();
let backfill_sender = backfill_sender.clone();
let message = websocket_receiver.next().await.unwrap().unwrap();
spawn(handle_websocket_message(
app_config,
thread_type,
guard,
websocket_sender,
backfill_sender,
message,
));
}
}
async fn handle_websocket_message(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
websocket_sender: Arc<
Mutex<SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, tungstenite::Message>>,
>,
backfill_sender: mpsc::Sender<backfill::Message>,
message: tungstenite::Message,
) {
match message {
tungstenite::Message::Text(message) => {
let message = from_str::<Vec<websocket::incoming::Message>>(&message);
if let Ok(message) = message {
for message in message {
let app_config = app_config.clone();
let guard = guard.clone();
let backfill_sender = backfill_sender.clone();
spawn(handle_parsed_websocket_message(
app_config,
thread_type,
guard,
backfill_sender,
message,
));
}
} else {
error!(
"{:?} - Failed to deserialize websocket message: {:?}",
thread_type, message
);
}
}
tungstenite::Message::Ping(_) => {
websocket_sender
.lock()
.await
.send(tungstenite::Message::Pong(vec![]))
.await
.unwrap();
}
_ => error!(
"{:?} - Unexpected websocket message: {:?}",
thread_type, message
),
}
}
#[allow(clippy::significant_drop_tightening)]
async fn handle_parsed_websocket_message(
app_config: Arc<Config>,
thread_type: ThreadType,
guard: Arc<RwLock<Guard>>,
backfill_sender: mpsc::Sender<backfill::Message>,
message: websocket::incoming::Message,
) {
match message {
websocket::incoming::Message::Subscription(message) => {
let symbols = match message {
websocket::incoming::subscription::Message::Market(message) => message.bars,
websocket::incoming::subscription::Message::News(message) => message.news,
};
let mut guard = guard.write().await;
let newly_subscribed = guard
.pending_subscriptions
.extract_if(|symbol, _| symbols.contains(symbol))
.collect::<HashMap<_, _>>();
let newly_unsubscribed = guard
.pending_unsubscriptions
.extract_if(|symbol, _| !symbols.contains(symbol))
.collect::<HashMap<_, _>>();
drop(guard);
let newly_subscribed_future = async {
if !newly_subscribed.is_empty() {
info!(
"{:?} - Subscribed to {:?}.",
thread_type,
newly_subscribed.keys().collect::<Vec<_>>()
);
let (backfill_message, backfill_receiver) = backfill::Message::new(
backfill::Action::Backfill,
Subset::Some(newly_subscribed.into_values().collect::<Vec<_>>()),
);
backfill_sender.send(backfill_message).await.unwrap();
backfill_receiver.await.unwrap();
}
};
let newly_unsubscribed_future = async {
if !newly_unsubscribed.is_empty() {
info!(
"{:?} - Unsubscribed from {:?}.",
thread_type,
newly_unsubscribed.keys().collect::<Vec<_>>()
);
let (purge_message, purge_receiver) = backfill::Message::new(
backfill::Action::Purge,
Subset::Some(newly_unsubscribed.into_values().collect::<Vec<_>>()),
);
backfill_sender.send(purge_message).await.unwrap();
purge_receiver.await.unwrap();
}
};
join!(newly_subscribed_future, newly_unsubscribed_future);
}
websocket::incoming::Message::Bar(message)
| websocket::incoming::Message::UpdatedBar(message) => {
let bar = Bar::from(message);
let guard = guard.read().await;
if guard.symbols.get(&bar.symbol).is_none() {
warn!(
"{:?} - Race condition: received bar for unsubscribed symbol: {:?}.",
thread_type, bar.symbol
);
return;
}
info!(
"{:?} - Received bar for {}: {}.",
thread_type, bar.symbol, bar.time
);
database::bars::upsert(&app_config.clickhouse_client, &bar).await;
}
websocket::incoming::Message::News(message) => {
let news = News::from(message);
let symbols = news.symbols.clone().into_iter().collect::<HashSet<_>>();
let guard = guard.read().await;
if !guard.symbols.iter().any(|symbol| symbols.contains(symbol)) {
warn!(
"{:?} - Race condition: received news for unsubscribed symbols: {:?}.",
thread_type, news.symbols
);
return;
}
info!(
"{:?} - Received news for {:?}: {}.",
thread_type, news.symbols, news.time_created
);
database::news::upsert(&app_config.clickhouse_client, &news).await;
}
websocket::incoming::Message::Success(_) => {}
websocket::incoming::Message::Error(message) => {
error!(
"{:?} - Received error message: {}.",
thread_type, message.message
);
}
}
}

2
src/threads/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod clock;
pub mod data;