Implemented basic metastate sharing and message resend upon failure

This commit is contained in:
Philip (a-0) 2024-05-03 19:34:26 +02:00
parent 95050d5c61
commit a30e30febe
5 changed files with 87 additions and 36 deletions

View file

@ -37,6 +37,7 @@ pub fn handle(state: &CommState, peer: &PeerId, message: Message) {
.set_own_family(family.to_owned()) .set_own_family(family.to_owned())
.expect("State failed"); .expect("State failed");
state.set_metastate_share(metastate_share.to_owned()); state.set_metastate_share(metastate_share.to_owned());
debug!("Joined family, requesting metastate");
state.send_to_peers( state.send_to_peers(
MessageContent::Get { MessageContent::Get {
shares: vec![metastate_share.to_owned()], shares: vec![metastate_share.to_owned()],

View file

@ -9,6 +9,7 @@ use std::collections::HashMap;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use anyhow::{anyhow, bail}; use anyhow::{anyhow, bail};
use i2p::net::{I2pAddr, I2pListener, I2pListenerBuilder, I2pSocketAddr, I2pStream}; use i2p::net::{I2pAddr, I2pListener, I2pListenerBuilder, I2pSocketAddr, I2pStream};
@ -93,11 +94,34 @@ impl CommHandle {
} }
} }
pub async fn send(&self, dest: &I2pSocketAddr, msg: Message) -> anyhow::Result<()> { pub async fn send(&self, dest: &I2pSocketAddr, msg: Message, custom_retry_settings: Option<(u64, u64)>) -> anyhow::Result<()> {
debug!("Sending message...\nFrom '{:?}'\nTo '{dest:?}'\n Message: '{msg:?}", self.own_peer_id().unwrap().addr());
match serde_json::to_string(&msg) { match serde_json::to_string(&msg) {
Ok(msg_string) => { Ok(msg_string) => {
self.send_to_addr(dest, msg_string.as_bytes()).await?; // Default retry settings
let (tries, base_delay) = match custom_retry_settings {
Some((ct, d)) => (ct, d),
None => (3, 200),
};
// Send message until success or too many retries
let mut ctr = 0;
while ctr < tries {
debug!("Sending message...\nFrom '{:?}'\nTo '{dest:?}'\nAttempt loop ctr: {ctr}\nMessage: '{msg:?}", self.own_peer_id().unwrap().addr());
match self.send_to_addr(dest, msg_string.as_bytes()).await {
Ok(_) => break,
Err(e) => {
debug!("{e:?}");
// Return last error if still unsuccessful
if ctr >= tries - 1 {
bail!(e)
}
// Continue loop otherwise
ctr = ctr + 1;
tokio::time::sleep(Duration::from_millis(base_delay * ctr)).await;
}
}
}
Ok(()) Ok(())
} }
Err(e) => bail!(e), Err(e) => bail!(e),

View file

@ -79,7 +79,11 @@ impl Ubisync {
pub async fn request_family_join(&self, peer: PeerId) -> anyhow::Result<()> { pub async fn request_family_join(&self, peer: PeerId) -> anyhow::Result<()> {
self.state_handle.add_family_join_request(peer.clone()); self.state_handle.add_family_join_request(peer.clone());
self.comm_handle self.comm_handle
.send(peer.addr_ref(), Message::new(MessageContent::JoinFamily)) .send(
peer.addr_ref(),
Message::new(MessageContent::JoinFamily),
None,
)
.await .await
} }
@ -99,6 +103,7 @@ impl Ubisync {
family, family,
metastate_share, metastate_share,
}), }),
None,
) )
.await .await
.expect("Could not send family join confirmation to peer"); .expect("Could not send family join confirmation to peer");
@ -180,14 +185,14 @@ mod tests {
std::process::exit(0); std::process::exit(0);
} }
#[tokio::test(flavor = "multi_thread")] #[tokio::test(flavor = "multi_thread")]
async fn sync_metastate() { async fn sync_metastate() {
/* /*
create & join family create & join family
*/ */
tracing_subscriber::fmt() tracing_subscriber::fmt()
.pretty() .compact()
.with_file(true)
.with_max_level(Level::DEBUG) .with_max_level(Level::DEBUG)
.init(); .init();
@ -214,8 +219,7 @@ mod tests {
tokio::time::sleep(Duration::from_millis(5000)).await; tokio::time::sleep(Duration::from_millis(5000)).await;
/* /*
create & sync metastate create & sync metastate
*/ */
} }
} }

View file

@ -58,17 +58,18 @@ impl StateDB {
} }
pub fn set_metastate_share(&self, share: ShareId) { pub fn set_metastate_share(&self, share: ShareId) {
let _ = self.db.set_key("metastate_share", &share).execute(); self.db
.set_key("metastate_share", &share)
.execute()
.expect("Could not set metastate share");
} }
pub fn get_metastate_share(&self) -> Option<ShareId> { pub fn get_metastate_share(&self) -> Option<ShareId> {
self.db self.db
.get_key("metastate_share") .get_key("metastate_share")
.query() .query()
.ok() .ok()?
.flatten() .and_then(|val| val.deserialize().ok())?
.map(|val| val.deserialize().ok())
.flatten()
} }
pub const fn apps(&self) -> Apps { pub const fn apps(&self) -> Apps {

View file

@ -15,7 +15,7 @@ use ubisync_lib::{
}, },
}; };
use anyhow::Error; use anyhow::{anyhow, bail, Error};
use tracing::{debug, warn}; use tracing::{debug, warn};
mod api_state; mod api_state;
@ -177,10 +177,7 @@ impl State {
}, },
members: metastate_members, members: metastate_members,
}); });
let _ = self let _ = self.db.pots().add(metastate_potid.clone(), "".to_string());
.db
.pots()
.add(metastate_potid.clone(), "".to_string());
debug!("{:?}", self.db.pots().get_all().unwrap()); debug!("{:?}", self.db.pots().get_all().unwrap());
let _ = self.db.elements().add( let _ = self.db.elements().add(
@ -224,20 +221,42 @@ impl State {
members: HashSet::from([my_id.clone(), peer.clone()]), members: HashSet::from([my_id.clone(), peer.clone()]),
}; };
self.db.families().add(family.clone())?; self.db.families().add(family.clone())?;
let metastate_pot = PotId::new();
self.db
.pots()
.add(metastate_pot.clone(), "ubisync".to_string())
.expect("Could not add metastate pot");
let metastate_share = ShareId::new();
let mut metastate_members = HashMap::new();
metastate_members.insert(family.id.clone(), SharePermissions::Owner);
self.db
.shares()
.add(Share {
id: metastate_share.clone(),
content: ShareContent {
pots: vec![metastate_pot],
},
members: metastate_members,
})
.expect("Could not add metastate share");
self.db.set_metastate_share(metastate_share);
family family
} }
}; };
self.send_to_peers( let mc = MessageContent::AddedToFamily {
MessageContent::AddedToFamily { family: my_family,
family: my_family, metastate_share: match self.db.get_metastate_share() {
metastate_share: self Some(s) => s,
.db None => {
.get_metastate_share() bail!("Metastate not found")
.expect("Node is in a family, but does not know metastate ShareId"), }
}, },
vec![peer], };
); self.send_to_peers(mc, vec![peer]);
Ok(()) Ok(())
} }
@ -277,14 +296,10 @@ impl State {
} }
pub fn emit_node_event(&self, ev: UbisyncNodeEvent) -> anyhow::Result<()> { pub fn emit_node_event(&self, ev: UbisyncNodeEvent) -> anyhow::Result<()> {
debug!("1");
match self.node_event_callback.read() { match self.node_event_callback.read() {
Ok(readguard) => { Ok(readguard) => {
debug!("2");
debug!("{:?}", readguard.is_some());
// If a callback is set, call it. // If a callback is set, call it.
if let Some(cb) = readguard.as_ref() { if let Some(cb) = readguard.as_ref() {
debug!("3");
(*cb)(ev); (*cb)(ev);
} }
// Whether a callback is set or not, return Ok(_) // Whether a callback is set or not, return Ok(_)
@ -328,15 +343,21 @@ impl State {
} }
pub fn send_to_peers(&self, ct: MessageContent, peers: Vec<PeerId>) { pub fn send_to_peers(&self, ct: MessageContent, peers: Vec<PeerId>) {
debug!("1");
match self.comm_handle.read() { match self.comm_handle.read() {
Ok(opt) => { Ok(opt) => {
debug!("2");
if opt.is_some() { if opt.is_some() {
debug!("3");
let arc = opt.as_ref().unwrap().clone(); let arc = opt.as_ref().unwrap().clone();
tokio::task::spawn_blocking(|| { tokio::task::spawn_blocking(|| {
debug!("4");
tokio::spawn(async move { tokio::spawn(async move {
debug!("5");
for peer in peers { for peer in peers {
debug!("6");
let _ = arc let _ = arc
.send(peer.addr_ref(), Message::new(ct.clone())) .send(peer.addr_ref(), Message::new(ct.clone()), None)
.await .await
.map_err(|e| { .map_err(|e| {
debug!( debug!(