Skip to content

Commit

Permalink
deps: bump Rust to 1.75 and fix clippy lints
Browse files Browse the repository at this point in the history
Pull-Request: #5043.
  • Loading branch information
thomaseizinger committed Dec 30, 2023
1 parent 4fc911e commit 9862187
Show file tree
Hide file tree
Showing 10 changed files with 48 additions and 46 deletions.
5 changes: 2 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,8 @@ jobs:
fail-fast: false
matrix:
rust-version: [
# 1.72.0, # current stable
# beta,
nightly-2023-09-10
1.75.0, # current stable
beta,
]
steps:
- uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion muxers/mplex/benches/split_send_size.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ fn prepare(c: &mut Criterion) {
fn run(
receiver_trans: &mut BenchTransport,
sender_trans: &mut BenchTransport,
payload: &Vec<u8>,
payload: &[u8],
listen_addr: &Multiaddr,
) {
receiver_trans
Expand Down
6 changes: 3 additions & 3 deletions muxers/mplex/src/io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ where
// yield to give the current task a chance to read
// from the respective substreams.
if num_buffered == self.config.max_buffer_len {
cx.waker().clone().wake();
cx.waker().wake_by_ref();
return Poll::Pending;
}

Expand Down Expand Up @@ -456,7 +456,7 @@ where
// next frame for `id`, yield to give the current task
// a chance to read from the other substream(s).
if num_buffered == self.config.max_buffer_len {
cx.waker().clone().wake();
cx.waker().wake_by_ref();
return Poll::Pending;
}

Expand Down Expand Up @@ -663,7 +663,7 @@ where
connection=%self.id,
"No task to read from blocked stream. Waking current task."
);
cx.waker().clone().wake();
cx.waker().wake_by_ref();
} else if let Some(id) = stream_id {
// We woke some other task, but are still interested in
// reading `Data` frames from the current stream when unblocked.
Expand Down
36 changes: 21 additions & 15 deletions protocols/kad/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ where
};
let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) {
kbucket::Entry::Present(mut entry, _) => {
Some(kbucket::Entry::Present(mut entry, _)) => {
if entry.value().insert(address) {
self.queued_events
.push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated {
Expand All @@ -536,11 +536,11 @@ where
}
RoutingUpdate::Success
}
kbucket::Entry::Pending(mut entry, _) => {
Some(kbucket::Entry::Pending(mut entry, _)) => {
entry.value().insert(address);
RoutingUpdate::Pending
}
kbucket::Entry::Absent(entry) => {
Some(kbucket::Entry::Absent(entry)) => {
let addresses = Addresses::new(address);
let status = if self.connected_peers.contains(peer) {
NodeStatus::Connected
Expand Down Expand Up @@ -578,7 +578,7 @@ where
}
}
}
kbucket::Entry::SelfEntry => RoutingUpdate::Failed,
None => RoutingUpdate::Failed,
}
}

Expand All @@ -599,7 +599,7 @@ where
) -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> {
let address = &address.to_owned().with_p2p(*peer).ok()?;
let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) {
match self.kbuckets.entry(&key)? {
kbucket::Entry::Present(mut entry, _) => {
if entry.value().remove(address).is_err() {
Some(entry.remove()) // it is the last address, thus remove the peer.
Expand All @@ -614,7 +614,7 @@ where
None
}
}
kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None,
kbucket::Entry::Absent(..) => None,
}
}

Expand All @@ -627,10 +627,10 @@ where
peer: &PeerId,
) -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> {
let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) {
match self.kbuckets.entry(&key)? {
kbucket::Entry::Present(entry, _) => Some(entry.remove()),
kbucket::Entry::Pending(entry, _) => Some(entry.remove()),
kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None,
kbucket::Entry::Absent(..) => None,
}
}

Expand Down Expand Up @@ -1164,7 +1164,8 @@ where
let key = kbucket::Key::from(node_id);
kbuckets
.entry(&key)
.view()
.as_mut()
.and_then(|e| e.view())
.map(|e| e.node.value.clone().into_vec())
}
} else {
Expand Down Expand Up @@ -1220,7 +1221,7 @@ where
) {
let key = kbucket::Key::from(peer);
match self.kbuckets.entry(&key) {
kbucket::Entry::Present(mut entry, old_status) => {
Some(kbucket::Entry::Present(mut entry, old_status)) => {
if old_status != new_status {
entry.update(new_status)
}
Expand All @@ -1243,7 +1244,7 @@ where
}
}

kbucket::Entry::Pending(mut entry, old_status) => {
Some(kbucket::Entry::Pending(mut entry, old_status)) => {
if let Some(address) = address {
entry.value().insert(address);
}
Expand All @@ -1252,7 +1253,7 @@ where
}
}

kbucket::Entry::Absent(entry) => {
Some(kbucket::Entry::Absent(entry)) => {
// Only connected nodes with a known address are newly inserted.
if new_status != NodeStatus::Connected {
return;
Expand Down Expand Up @@ -1863,7 +1864,7 @@ where
fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) {
let key = kbucket::Key::from(peer_id);

if let Some(addrs) = self.kbuckets.entry(&key).value() {
if let Some(addrs) = self.kbuckets.entry(&key).as_mut().and_then(|e| e.value()) {
// TODO: Ideally, the address should only be removed if the error can
// be classified as "permanent" but since `err` is currently a borrowed
// trait object without a `'static` bound, even downcasting for inspection
Expand Down Expand Up @@ -1931,7 +1932,12 @@ where
let (old, new) = (old.get_remote_address(), new.get_remote_address());

// Update routing table.
if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() {
if let Some(addrs) = self
.kbuckets
.entry(&kbucket::Key::from(peer))
.as_mut()
.and_then(|e| e.value())
{
if addrs.replace(old, new) {
tracing::debug!(
%peer,
Expand Down Expand Up @@ -2132,7 +2138,7 @@ where
// the addresses of that peer in the k-buckets.
let key = kbucket::Key::from(peer_id);
let mut peer_addrs =
if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) {
if let Some(kbucket::Entry::Present(mut entry, _)) = self.kbuckets.entry(&key) {
let addrs = entry.value().iter().cloned().collect::<Vec<_>>();
debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table.");
addrs
Expand Down
37 changes: 17 additions & 20 deletions protocols/kad/src/kbucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,17 +172,16 @@ where

/// Returns an `Entry` for the given key, representing the state of the entry
/// in the routing table.
pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> {
let index = BucketIndex::new(&self.local_key.as_ref().distance(key));
if let Some(i) = index {
let bucket = &mut self.buckets[i.get()];
if let Some(applied) = bucket.apply_pending() {
self.applied_pending.push_back(applied)
}
Entry::new(bucket, key)
} else {
Entry::SelfEntry
///
/// Returns `None` in case the key points to the local node.
pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Option<Entry<'a, TKey, TVal>> {
let index = BucketIndex::new(&self.local_key.as_ref().distance(key))?;

let bucket = &mut self.buckets[index.get()];
if let Some(applied) = bucket.apply_pending() {
self.applied_pending.push_back(applied)
}
Some(Entry::new(bucket, key))
}

/// Returns an iterator over all buckets.
Expand Down Expand Up @@ -627,7 +626,7 @@ mod tests {
let other_id = Key::from(PeerId::random());

let mut table = KBucketsTable::<_, ()>::new(local_key, Duration::from_secs(5));
if let Entry::Absent(entry) = table.entry(&other_id) {
if let Some(Entry::Absent(entry)) = table.entry(&other_id) {
match entry.insert((), NodeStatus::Connected) {
InsertResult::Inserted => (),
_ => panic!(),
Expand All @@ -645,10 +644,8 @@ mod tests {
fn entry_self() {
let local_key = Key::from(PeerId::random());
let mut table = KBucketsTable::<_, ()>::new(local_key.clone(), Duration::from_secs(5));
match table.entry(&local_key) {
Entry::SelfEntry => (),
_ => panic!(),
}

assert!(table.entry(&local_key).is_none())
}

#[test]
Expand All @@ -661,7 +658,7 @@ mod tests {
break;
}
let key = Key::from(PeerId::random());
if let Entry::Absent(e) = table.entry(&key) {
if let Some(Entry::Absent(e)) = table.entry(&key) {
match e.insert((), NodeStatus::Connected) {
InsertResult::Inserted => count += 1,
_ => continue,
Expand Down Expand Up @@ -694,10 +691,10 @@ mod tests {
let full_bucket_index;
loop {
let key = Key::from(PeerId::random());
if let Entry::Absent(e) = table.entry(&key) {
if let Some(Entry::Absent(e)) = table.entry(&key) {
match e.insert((), NodeStatus::Disconnected) {
InsertResult::Full => {
if let Entry::Absent(e) = table.entry(&key) {
if let Some(Entry::Absent(e)) = table.entry(&key) {
match e.insert((), NodeStatus::Connected) {
InsertResult::Pending { disconnected } => {
expected_applied = AppliedPending {
Expand Down Expand Up @@ -732,12 +729,12 @@ mod tests {
full_bucket.pending_mut().unwrap().set_ready_at(elapsed);

match table.entry(&expected_applied.inserted.key) {
Entry::Present(_, NodeStatus::Connected) => {}
Some(Entry::Present(_, NodeStatus::Connected)) => {}
x => panic!("Unexpected entry: {x:?}"),
}

match table.entry(&expected_applied.evicted.as_ref().unwrap().key) {
Entry::Absent(_) => {}
Some(Entry::Absent(_)) => {}
x => panic!("Unexpected entry: {x:?}"),
}

Expand Down
3 changes: 0 additions & 3 deletions protocols/kad/src/kbucket/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,6 @@ pub(crate) enum Entry<'a, TPeerId, TVal> {
Pending(PendingEntry<'a, TPeerId, TVal>, NodeStatus),
/// The entry is absent and may be inserted.
Absent(AbsentEntry<'a, TPeerId, TVal>),
/// The entry represents the local node.
SelfEntry,
}

/// The internal representation of the different states of an `Entry`,
Expand Down Expand Up @@ -144,7 +142,6 @@ where
Entry::Present(entry, _) => Some(entry.value()),
Entry::Pending(entry, _) => Some(entry.value()),
Entry::Absent(_) => None,
Entry::SelfEntry => None,
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion protocols/relay/src/behaviour/rate_limiter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl<Id: Eq + PartialEq + Hash + Clone> GenericRateLimiter<Id> {
// Note when used with a high number of buckets: This loop refills all the to-be-refilled
// buckets at once, thus potentially delaying the parent call to `try_next`.
loop {
match self.refill_schedule.get(0) {
match self.refill_schedule.front() {
// Only continue if (a) there is a bucket and (b) the bucket has not already been
// refilled recently.
Some((last_refill, _)) if now.duration_since(*last_refill) >= self.interval => {}
Expand Down
1 change: 1 addition & 0 deletions transports/dns/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,7 @@ pub enum Error<TErr> {
/// The underlying transport encountered an error.
Transport(TErr),
/// DNS resolution failed.
#[allow(clippy::enum_variant_names)]
ResolveError(ResolveError),
/// DNS resolution was successful, but the underlying transport refused the resolved address.
MultiaddrNotSupported(Multiaddr),
Expand Down
1 change: 1 addition & 0 deletions transports/noise/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ pub enum Error {
#[error("failed to decode protobuf ")]
InvalidPayload(#[from] DecodeError),
#[error(transparent)]
#[allow(clippy::enum_variant_names)]
SigningError(#[from] libp2p_identity::SigningError),
#[error("Expected WebTransport certhashes ({}) are not a subset of received ones ({})", certhashes_to_string(.0), certhashes_to_string(.1))]
UnknownWebTransportCerthashes(HashSet<Multihash<64>>, HashSet<Multihash<64>>),
Expand Down
1 change: 1 addition & 0 deletions transports/webtransport-websys/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pub enum Error {
Noise(#[from] libp2p_noise::Error),

#[error("JavaScript error: {0}")]
#[allow(clippy::enum_variant_names)]
JsError(String),

#[error("JavaScript typecasting failed")]
Expand Down

0 comments on commit 9862187

Please sign in to comment.