Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
SecretStore: merge two types of errors into single one + Error::is_no…
Browse files Browse the repository at this point in the history
…n_fatal (#8357)

* SecretStore: error unify initial commit

SecretStore: pass real error in error messages

SecretStore: is_internal_error -> Error::is_non_fatal

warnings

SecretStore: ConsensusTemporaryUnreachable

fix after merge

removed comments

removed comments

SecretStore: updated HTTP error responses

SecretStore: more ConsensusTemporaryUnreachable tests

fix after rebase

* fixed grumbles

* use HashSet in tests
  • Loading branch information
svyatonik authored and 5chdn committed May 1, 2018
1 parent 849f5d9 commit d1f5284
Show file tree
Hide file tree
Showing 34 changed files with 577 additions and 440 deletions.
4 changes: 2 additions & 2 deletions secret_store/src/acl_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use ethcore::client::{BlockId, ChainNotify, CallContract, RegistryInfo};
use ethereum_types::{H256, Address};
use bytes::Bytes;
use trusted_client::TrustedClient;
use types::all::{Error, ServerKeyId};
use types::{Error, ServerKeyId};

use_contract!(acl_storage, "AclStorage", "res/acl_storage.json");

Expand Down Expand Up @@ -113,7 +113,7 @@ impl CachedContract {
self.contract.functions()
.check_permissions()
.call(requester, document.clone(), &do_call)
.map_err(|e| Error::Internal(e.to_string()))
.map_err(|e| Error::Internal(format!("ACL checker call error: {}", e.to_string())))
},
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
}
Expand Down
4 changes: 2 additions & 2 deletions secret_store/src/key_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use super::key_storage::KeyStorage;
use super::key_server_set::KeyServerSet;
use key_server_cluster::{math, ClusterCore};
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair};
use types::all::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId};
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};

Expand Down Expand Up @@ -238,7 +238,7 @@ pub mod tests {
use key_server_set::tests::MapKeyServerSet;
use key_server_cluster::math;
use ethereum_types::{H256, H520};
use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
use types::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature,
Requester, NodeId};
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ pub struct FastestResultComputer {
self_node_id: NodeId,
/// Threshold (if known).
threshold: Option<usize>,
/// Count of all configured key server nodes.
configured_nodes_count: usize,
/// Count of all connected key server nodes.
connected_nodes_count: usize,
}

/// Selects version with most support, waiting for responses from all nodes.
Expand Down Expand Up @@ -185,7 +189,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
/// Return result computer reference.
pub fn version_holders(&self, version: &H256) -> Result<BTreeSet<NodeId>, Error> {
Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)?
.get(version).ok_or(Error::KeyStorage("key version not found".into()))?
.get(version).ok_or(Error::ServerKeyIsNotFound)?
.clone())
}

Expand Down Expand Up @@ -236,7 +240,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
// try to complete session
Self::try_complete(&self.core, &mut *data);
if no_confirmations_required && data.state != SessionState::Finished {
return Err(Error::MissingKeyShare);
return Err(Error::ServerKeyIsNotFound);
} else if data.state == SessionState::Finished {
return Ok(());
}
Expand Down Expand Up @@ -266,7 +270,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
&KeyVersionNegotiationMessage::KeyVersions(ref message) =>
self.on_key_versions(sender, message),
&KeyVersionNegotiationMessage::KeyVersionsError(ref message) => {
self.on_session_error(sender, Error::Io(message.error.clone()));
self.on_session_error(sender, message.error.clone());
Ok(())
},
}
Expand Down Expand Up @@ -388,7 +392,7 @@ impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
if data.state != SessionState::Finished {
warn!("{}: key version negotiation session failed with timeout", self.core.meta.self_node_id);

data.result = Some(Err(Error::ConsensusUnreachable));
data.result = Some(Err(Error::ConsensusTemporaryUnreachable));
self.core.completed.notify_all();
}
}
Expand Down Expand Up @@ -431,19 +435,21 @@ impl SessionTransport for IsolatedSessionTransport {
}

impl FastestResultComputer {
pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>) -> Self {
pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>, configured_nodes_count: usize, connected_nodes_count: usize) -> Self {
let threshold = key_share.map(|ks| ks.threshold);
FastestResultComputer {
self_node_id: self_node_id,
threshold: threshold,
self_node_id,
threshold,
configured_nodes_count,
connected_nodes_count,
}
}}

impl SessionResultComputer for FastestResultComputer {
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
match self.threshold.or(threshold) {
// if there's no versions at all && we're not waiting for confirmations anymore
_ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::MissingKeyShare)),
_ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::ServerKeyIsNotFound)),
// if we have key share on this node
Some(threshold) => {
// select version this node have, with enough participants
Expand All @@ -459,7 +465,17 @@ impl SessionResultComputer for FastestResultComputer {
.find(|&(_, ref n)| n.len() >= threshold + 1)
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
.expect("version is only inserted when there's at least one owner; qed"))))
.unwrap_or(Err(Error::ConsensusUnreachable))),
// if there's no version consensus among all connected nodes
// AND we're connected to ALL configured nodes
// OR there are less than required nodes for key restore
// => this means that we can't restore key with CURRENT configuration => respond with fatal error
// otherwise we could try later, after all nodes are connected
.unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count
|| self.configured_nodes_count < threshold + 1 {
Error::ConsensusUnreachable
} else {
Error::ConsensusTemporaryUnreachable
}))),
}
},
// if we do not have share, then wait for all confirmations
Expand All @@ -469,7 +485,11 @@ impl SessionResultComputer for FastestResultComputer {
.max_by_key(|&(_, ref n)| n.len())
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
.expect("version is only inserted when there's at least one owner; qed"))))
.unwrap_or(Err(Error::ConsensusUnreachable))),
.unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count {
Error::ConsensusUnreachable
} else {
Error::ConsensusTemporaryUnreachable
}))),
}
}
}
Expand All @@ -480,7 +500,7 @@ impl SessionResultComputer for LargestSupportResultComputer {
return None;
}
if versions.is_empty() {
return Some(Err(Error::MissingKeyShare));
return Some(Err(Error::ServerKeyIsNotFound));
}

versions.iter()
Expand Down Expand Up @@ -552,12 +572,15 @@ mod tests {
id: Default::default(),
self_node_id: node_id.clone(),
master_node_id: master_node_id.clone(),
configured_nodes_count: nodes.len(),
connected_nodes_count: nodes.len(),
},
sub_session: sub_sesion.clone(),
key_share: key_storage.get(&Default::default()).unwrap(),
result_computer: Arc::new(FastestResultComputer::new(
node_id.clone(),
key_storage.get(&Default::default()).unwrap().as_ref(),
nodes.len(), nodes.len()
)),
transport: DummyTransport {
cluster: cluster,
Expand Down Expand Up @@ -723,13 +746,15 @@ mod tests {
let computer = FastestResultComputer {
self_node_id: Default::default(),
threshold: None,
configured_nodes_count: 1,
connected_nodes_count: 1,
};
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare)));
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound)));
}

#[test]
fn largest_computer_returns_missing_share_if_no_versions_returned() {
let computer = LargestSupportResultComputer;
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare)));
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound)));
}
}
6 changes: 6 additions & 0 deletions secret_store/src/key_server_cluster/admin_sessions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ pub struct ShareChangeSessionMeta {
pub master_node_id: NodeId,
/// Id of node, on which this session is running.
pub self_node_id: NodeId,
/// Count of all configured key server nodes.
pub configured_nodes_count: usize,
/// Count of all connected key server nodes.
pub connected_nodes_count: usize,
}

impl ShareChangeSessionMeta {
Expand All @@ -42,6 +46,8 @@ impl ShareChangeSessionMeta {
master_node_id: self.master_node_id,
self_node_id: self.self_node_id,
threshold: all_nodes_set_len.checked_sub(1).ok_or(Error::ConsensusUnreachable)?,
configured_nodes_count: self.configured_nodes_count,
connected_nodes_count: self.connected_nodes_count,
})
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ impl SessionImpl {
&ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) =>
self.on_share_add_message(sender, message),
&ServersSetChangeMessage::ServersSetChangeError(ref message) => {
self.on_session_error(sender, Error::Io(message.error.clone()));
self.on_session_error(sender, message.error.clone());
Ok(())
},
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
Expand Down Expand Up @@ -416,12 +416,14 @@ impl SessionImpl {
match &message.message {
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) if sender == &self.core.meta.master_node_id => {
let key_id = message.session.clone().into();
let key_share = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
let key_share = self.core.key_storage.get(&key_id)?;
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
meta: ShareChangeSessionMeta {
id: key_id.clone(),
self_node_id: self.core.meta.self_node_id.clone(),
master_node_id: sender.clone(),
configured_nodes_count: self.core.meta.configured_nodes_count,
connected_nodes_count: self.core.meta.connected_nodes_count,
},
sub_session: message.sub_session.clone().into(),
key_share: key_share,
Expand Down Expand Up @@ -492,7 +494,7 @@ impl SessionImpl {

// on nodes, holding selected key share version, we could check if master node plan is correct
let master_node_id = message.master_node_id.clone().into();
if let Some(key_share) = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))? {
if let Some(key_share) = self.core.key_storage.get(&key_id)? {
let version = message.version.clone().into();
if let Ok(key_version) = key_share.version(&version) {
let key_share_owners = key_version.id_numbers.keys().cloned().collect();
Expand Down Expand Up @@ -660,7 +662,7 @@ impl SessionImpl {
if !data.new_nodes_set.as_ref()
.expect("new_nodes_set is filled during initialization; session is completed after initialization; qed")
.contains(&self.core.meta.self_node_id) {
self.core.key_storage.clear().map_err(|e| Error::KeyStorage(e.into()))?;
self.core.key_storage.clear()?;
}

data.state = SessionState::Finished;
Expand Down Expand Up @@ -709,6 +711,8 @@ impl SessionImpl {
id: key_id,
self_node_id: core.meta.self_node_id.clone(),
master_node_id: master_node_id,
configured_nodes_count: core.meta.configured_nodes_count,
connected_nodes_count: core.meta.connected_nodes_count,
},
cluster: core.cluster.clone(),
key_storage: core.key_storage.clone(),
Expand All @@ -731,12 +735,14 @@ impl SessionImpl {
Some(Ok(key_id)) => key_id,
};

let key_share = core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
let key_share = core.key_storage.get(&key_id)?;
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
meta: ShareChangeSessionMeta {
id: key_id,
self_node_id: core.meta.self_node_id.clone(),
master_node_id: core.meta.self_node_id.clone(),
configured_nodes_count: core.meta.configured_nodes_count,
connected_nodes_count: core.meta.connected_nodes_count,
},
sub_session: math::generate_random_scalar()?,
key_share: key_share,
Expand Down Expand Up @@ -888,7 +894,7 @@ impl SessionImpl {
if !data.new_nodes_set.as_ref()
.expect("new_nodes_set is filled during initialization; session is completed after initialization; qed")
.contains(&core.meta.self_node_id) {
core.key_storage.clear().map_err(|e| Error::KeyStorage(e.into()))?;
core.key_storage.clear()?;
}

data.state = SessionState::Finished;
Expand Down Expand Up @@ -1011,9 +1017,9 @@ impl KeyVersionNegotiationTransport for ServersSetChangeKeyVersionNegotiationTra
}

fn check_nodes_set(all_nodes_set: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<(), Error> {
// all new nodes must be a part of all nodes set
// all_nodes_set is the set of nodes we're currently connected to (and configured for)
match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) {
true => Err(Error::InvalidNodesConfiguration),
true => Err(Error::NodeDisconnected),
false => Ok(())
}
}
Expand Down Expand Up @@ -1104,6 +1110,8 @@ pub mod tests {
self_node_id: master_node_id.clone(),
master_node_id: master_node_id.clone(),
id: SessionId::default(),
configured_nodes_count: all_nodes_set.len(),
connected_nodes_count: all_nodes_set.len(),
};

let old_nodes = gml.nodes.iter().map(|n| create_node(meta.clone(), admin_public.clone(), all_nodes_set.clone(), n.1));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,7 @@ pub struct IsolatedSessionTransport {
impl<T> SessionImpl<T> where T: SessionTransport {
/// Create new share addition session.
pub fn new(params: SessionParams<T>) -> Result<Self, Error> {
let key_share = params.key_storage
.get(&params.meta.id)
.map_err(|e| Error::KeyStorage(e.into()))?;
let key_share = params.key_storage.get(&params.meta.id)?;

Ok(SessionImpl {
core: SessionCore {
Expand Down Expand Up @@ -257,8 +255,8 @@ impl<T> SessionImpl<T> where T: SessionTransport {
let admin_public = self.core.admin_public.as_ref().cloned().ok_or(Error::ConsensusUnreachable)?;

// key share version is required on ShareAdd master node
let key_share = self.core.key_share.as_ref().ok_or_else(|| Error::KeyStorage("key share is not found on master node".into()))?;
let key_version = key_share.version(&version).map_err(|e| Error::KeyStorage(e.into()))?;
let key_share = self.core.key_share.as_ref().ok_or_else(|| Error::ServerKeyIsNotFound)?;
let key_version = key_share.version(&version)?;

// old nodes set is all non-isolated owners of version holders
let non_isolated_nodes = self.core.transport.nodes();
Expand Down Expand Up @@ -326,7 +324,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
&ShareAddMessage::NewKeysDissemination(ref message) =>
self.on_new_keys_dissemination(sender, message),
&ShareAddMessage::ShareAddError(ref message) => {
self.on_session_error(sender, Error::Io(message.error.clone().into()));
self.on_session_error(sender, message.error.clone());
Ok(())
},
}
Expand Down Expand Up @@ -714,10 +712,10 @@ impl<T> SessionImpl<T> where T: SessionTransport {
// save encrypted data to the key storage
data.state = SessionState::Finished;
if core.key_share.is_some() {
core.key_storage.update(core.meta.id.clone(), refreshed_key_share.clone())
core.key_storage.update(core.meta.id.clone(), refreshed_key_share.clone())?;
} else {
core.key_storage.insert(core.meta.id.clone(), refreshed_key_share.clone())
}.map_err(|e| Error::KeyStorage(e.into()))?;
core.key_storage.insert(core.meta.id.clone(), refreshed_key_share.clone())?;
}

// signal session completion
data.state = SessionState::Finished;
Expand Down Expand Up @@ -851,7 +849,7 @@ impl SessionTransport for IsolatedSessionTransport {
#[cfg(test)]
pub mod tests {
use std::sync::Arc;
use std::collections::{VecDeque, BTreeMap, BTreeSet};
use std::collections::{VecDeque, BTreeMap, BTreeSet, HashSet};
use ethkey::{Random, Generator, Public, KeyPair, Signature, sign};
use ethereum_types::H256;
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
Expand Down Expand Up @@ -952,6 +950,8 @@ pub mod tests {
id: SessionId::default(),
self_node_id: NodeId::default(),
master_node_id: master_node_id,
configured_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::<HashSet<_>>().len(),
connected_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::<HashSet<_>>().len(),
};
let new_nodes = new_nodes_set.iter()
.filter(|n| !old_nodes_set.contains(&n))
Expand Down Expand Up @@ -992,6 +992,8 @@ pub mod tests {
id: SessionId::default(),
self_node_id: NodeId::default(),
master_node_id: master_node_id,
configured_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::<BTreeSet<_>>().len(),
connected_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::<BTreeSet<_>>().len(),
};
let old_nodes_set = ml.nodes.keys().cloned().collect();
let nodes = ml.nodes.iter()
Expand Down Expand Up @@ -1102,7 +1104,7 @@ pub mod tests {
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set),
Some(ml.old_set_signature.clone()),
Some(ml.new_set_signature.clone())
).unwrap_err(), Error::KeyStorage("key share is not found on master node".into()));
).unwrap_err(), Error::ServerKeyIsNotFound);
}

#[test]
Expand Down
Loading

0 comments on commit d1f5284

Please sign in to comment.