diff --git a/src/raftstore/store/peer.rs b/src/raftstore/store/peer.rs index a147b1b4888b..f5b8f23a7824 100644 --- a/src/raftstore/store/peer.rs +++ b/src/raftstore/store/peer.rs @@ -1612,6 +1612,7 @@ impl Peer { // possible. self.raft_group.skip_bcast_commit(false); } + self.should_wake_up = true; let meta = ProposalMeta { index: idx, term: self.term(), @@ -1980,6 +1981,7 @@ impl Peer { let read = ReadIndexRequest::with_command(id, req, cb, renew_lease_time); self.pending_reads.push_back(read, self.is_leader()); + self.should_wake_up = true; debug!( "request to get a read index"; diff --git a/tests/integrations/raftstore/test_hibernate.rs b/tests/integrations/raftstore/test_hibernate.rs index 042086369686..e0557deb9617 100644 --- a/tests/integrations/raftstore/test_hibernate.rs +++ b/tests/integrations/raftstore/test_hibernate.rs @@ -4,11 +4,104 @@ use std::sync::*; use std::thread; use std::time::*; -use raft::eraftpb::MessageType; - +use futures::Future; +use tikv::pd::PdClient; +use raft::eraftpb::{ConfChangeType, MessageType}; use test_raftstore::*; use tikv_util::HandyRwLock; +#[test] +fn test_proposal_prevent_sleep() { + let mut cluster = new_node_cluster(0, 3); + configure_for_hibernate(&mut cluster); + cluster.run(); + cluster.must_transfer_leader(1, new_peer(1, 1)); + cluster.must_put(b"k1", b"v1"); + must_get_equal(&cluster.get_engine(3), b"k1", b"v1"); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let region = cluster + .pd_client + .get_region_by_id(1) + .wait() + .unwrap() + .unwrap(); + + let put = new_put_cmd(b"k2", b"v2"); + let mut req = new_request(1, region.get_region_epoch().clone(), vec![put], true); + req.mut_header().set_peer(new_peer(1, 1)); + // ignore error, we just want to send this command to peer (1, 1), + // and the command can't be executed because we have only one peer, + // so here will return timeout error, we should ignore it. + let _ = cluster.call_command(req, Duration::from_millis(10)); + cluster.clear_send_filters(); + must_get_equal(&cluster.get_engine(3), b"k2", b"v2"); + assert_eq!(cluster.leader_of_region(1), Some(new_peer(1, 1))); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let mut request = new_request( + region.get_id(), + region.get_region_epoch().clone(), + vec![new_read_index_cmd()], + true, + ); + request.mut_header().set_peer(new_peer(1, 1)); + let (cb, rx) = make_cb(&request); + // send to peer 2 + cluster + .sim + .rl() + .async_command_on_node(1, request, cb) + .unwrap(); + thread::sleep(Duration::from_millis(10)); + cluster.clear_send_filters(); + let resp = rx.recv_timeout(Duration::from_secs(5)).unwrap(); + assert!( + !resp.get_header().has_error(), + "{:?}", + resp.get_header().get_error() + ); + + // Wait till leader peer goes to sleep. + thread::sleep( + cluster.cfg.raft_store.raft_base_tick_interval.0 + * 2 + * cluster.cfg.raft_store.raft_election_timeout_ticks as u32, + ); + cluster.add_send_filter(CloneFilterFactory( + RegionPacketFilter::new(1, 1).direction(Direction::Send), + )); + let conf_change = new_change_peer_request(ConfChangeType::RemoveNode, new_peer(3, 3)); + let mut admin_req = new_admin_request(1, ®ion.get_region_epoch(), conf_change); + admin_req.mut_header().set_peer(new_peer(1, 1)); + let (cb, _rx) = make_cb(&admin_req); + cluster + .sim + .rl() + .async_command_on_node(1, admin_req, cb) + .unwrap(); + thread::sleep(Duration::from_millis(10)); + cluster.clear_send_filters(); + cluster.pd_client.must_none_peer(1, new_peer(3, 3)); +} + /// Tests whether single voter still replicates log to learner after restart. /// /// A voter will become leader in a single tick. The case check if the role