From 5c1317895be92896879d8edabcd924abc8c2afc0 Mon Sep 17 00:00:00 2001 From: Jay Date: Sat, 29 Feb 2020 00:03:30 +0800 Subject: [PATCH] raftstore/hibernate: wake up on proposal (#6736) Signed-off-by: Jay Lee --- src/raftstore/store/peer.rs | 2 + .../integrations/raftstore/test_hibernate.rs | 97 ++++++++++++++++++- 2 files changed, 97 insertions(+), 2 deletions(-) diff --git a/src/raftstore/store/peer.rs b/src/raftstore/store/peer.rs index fde3c617ea5c..02e5c68a76a2 100644 --- a/src/raftstore/store/peer.rs +++ b/src/raftstore/store/peer.rs @@ -1669,6 +1669,7 @@ impl Peer { // possible. self.raft_group.skip_bcast_commit(false); } + self.should_wake_up = true; let meta = ProposalMeta { index: idx, term: self.term(), @@ -2016,6 +2017,7 @@ impl Peer { let read_proposal = ReadIndexRequest::with_command(id, req, cb, renew_lease_time); self.pending_reads.reads.push_back(read_proposal); + self.should_wake_up = true; // TimeoutNow has been sent out, so we need to propose explicitly to // update leader lease. diff --git a/tests/integrations/raftstore/test_hibernate.rs b/tests/integrations/raftstore/test_hibernate.rs index 042086369686..9a5446fc29ba 100644 --- a/tests/integrations/raftstore/test_hibernate.rs +++ b/tests/integrations/raftstore/test_hibernate.rs @@ -4,11 +4,104 @@ use std::sync::*; use std::thread; use std::time::*; -use raft::eraftpb::MessageType; - +use futures::Future; +use tikv::pd_client::PdClient; +use raft::eraftpb::{ConfChangeType, MessageType}; use test_raftstore::*; use tikv_util::HandyRwLock; +#[test] +fn test_proposal_prevent_sleep() { + let mut cluster = new_node_cluster(0, 3); + configure_for_hibernate(&mut cluster); + cluster.run(); + cluster.must_transfer_leader(1, new_peer(1, 1)); + cluster.must_put(b"k1", b"v1"); + must_get_equal(&cluster.get_engine(3), b"k1", b"v1"); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let region = cluster + .pd_client + .get_region_by_id(1) + .wait() + .unwrap() + .unwrap(); + + let put = new_put_cmd(b"k2", b"v2"); + let mut req = new_request(1, region.get_region_epoch().clone(), vec![put], true); + req.mut_header().set_peer(new_peer(1, 1)); + // ignore error, we just want to send this command to peer (1, 1), + // and the command can't be executed because we have only one peer, + // so here will return timeout error, we should ignore it. + let _ = cluster.call_command(req, Duration::from_millis(10)); + cluster.clear_send_filters(); + must_get_equal(&cluster.get_engine(3), b"k2", b"v2"); + assert_eq!(cluster.leader_of_region(1), Some(new_peer(1, 1))); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let mut request = new_request( + region.get_id(), + region.get_region_epoch().clone(), + vec![new_read_index_cmd()], + true, + ); + request.mut_header().set_peer(new_peer(1, 1)); + let (cb, rx) = make_cb(&request); + // send to peer 2 + cluster + .sim + .rl() + .async_command_on_node(1, request, cb) + .unwrap(); + thread::sleep(Duration::from_millis(10)); + cluster.clear_send_filters(); + let resp = rx.recv_timeout(Duration::from_secs(5)).unwrap(); + assert!( + !resp.get_header().has_error(), + "{:?}", + resp.get_header().get_error() + ); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let conf_change = new_change_peer_request(ConfChangeType::RemoveNode, new_peer(3, 3)); + let mut admin_req = new_admin_request(1, ®ion.get_region_epoch(), conf_change); + admin_req.mut_header().set_peer(new_peer(1, 1)); + let (cb, _rx) = make_cb(&admin_req); + cluster + .sim + .rl() + .async_command_on_node(1, admin_req, cb) + .unwrap(); + thread::sleep(Duration::from_millis(10)); + cluster.clear_send_filters(); + cluster.pd_client.must_none_peer(1, new_peer(3, 3)); +} + /// Tests whether single voter still replicates log to learner after restart. /// /// A voter will become leader in a single tick. The case check if the role