Skip to content

Commit

Permalink
raftstore/hibernate: wake up on proposal (tikv#6736)
Browse files Browse the repository at this point in the history
Signed-off-by: Jay Lee <BusyJayLee@gmail.com>
  • Loading branch information
BusyJay committed Feb 28, 2020
1 parent 792a1cb commit 4159c95
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 2 deletions.
2 changes: 2 additions & 0 deletions src/raftstore/store/peer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1612,6 +1612,7 @@ impl Peer {
// possible.
self.raft_group.skip_bcast_commit(false);
}
self.should_wake_up = true;
let meta = ProposalMeta {
index: idx,
term: self.term(),
Expand Down Expand Up @@ -1980,6 +1981,7 @@ impl Peer {

let read = ReadIndexRequest::with_command(id, req, cb, renew_lease_time);
self.pending_reads.push_back(read, self.is_leader());
self.should_wake_up = true;

debug!(
"request to get a read index";
Expand Down
97 changes: 95 additions & 2 deletions tests/integrations/raftstore/test_hibernate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,104 @@ use std::sync::*;
use std::thread;
use std::time::*;

use raft::eraftpb::MessageType;

use futures::Future;
use pd_client::PdClient;
use raft::eraftpb::{ConfChangeType, MessageType};
use test_raftstore::*;
use tikv_util::HandyRwLock;

#[test]
fn test_proposal_prevent_sleep() {
let mut cluster = new_node_cluster(0, 3);
configure_for_hibernate(&mut cluster);
cluster.run();
cluster.must_transfer_leader(1, new_peer(1, 1));
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");

// Wait till leader peer goes to sleep.
thread::sleep(
cluster.cfg.raft_store.raft_base_tick_interval.0
* 2
* cluster.cfg.raft_store.raft_election_timeout_ticks as u32,
);

cluster.add_send_filter(CloneFilterFactory(
RegionPacketFilter::new(1, 1).direction(Direction::Send),
));
let region = cluster
.pd_client
.get_region_by_id(1)
.wait()
.unwrap()
.unwrap();

let put = new_put_cmd(b"k2", b"v2");
let mut req = new_request(1, region.get_region_epoch().clone(), vec![put], true);
req.mut_header().set_peer(new_peer(1, 1));
// ignore error, we just want to send this command to peer (1, 1),
// and the command can't be executed because we have only one peer,
// so here will return timeout error, we should ignore it.
let _ = cluster.call_command(req, Duration::from_millis(10));
cluster.clear_send_filters();
must_get_equal(&cluster.get_engine(3), b"k2", b"v2");
assert_eq!(cluster.leader_of_region(1), Some(new_peer(1, 1)));

// Wait till leader peer goes to sleep.
thread::sleep(
cluster.cfg.raft_store.raft_base_tick_interval.0
* 2
* cluster.cfg.raft_store.raft_election_timeout_ticks as u32,
);
cluster.add_send_filter(CloneFilterFactory(
RegionPacketFilter::new(1, 1).direction(Direction::Send),
));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_read_index_cmd()],
true,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
// send to peer 2
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
thread::sleep(Duration::from_millis(10));
cluster.clear_send_filters();
let resp = rx.recv_timeout(Duration::from_secs(5)).unwrap();
assert!(
!resp.get_header().has_error(),
"{:?}",
resp.get_header().get_error()
);

// Wait till leader peer goes to sleep.
thread::sleep(
cluster.cfg.raft_store.raft_base_tick_interval.0
* 2
* cluster.cfg.raft_store.raft_election_timeout_ticks as u32,
);
cluster.add_send_filter(CloneFilterFactory(
RegionPacketFilter::new(1, 1).direction(Direction::Send),
));
let conf_change = new_change_peer_request(ConfChangeType::RemoveNode, new_peer(3, 3));
let mut admin_req = new_admin_request(1, &region.get_region_epoch(), conf_change);
admin_req.mut_header().set_peer(new_peer(1, 1));
let (cb, _rx) = make_cb(&admin_req);
cluster
.sim
.rl()
.async_command_on_node(1, admin_req, cb)
.unwrap();
thread::sleep(Duration::from_millis(10));
cluster.clear_send_filters();
cluster.pd_client.must_none_peer(1, new_peer(3, 3));
}

/// Tests whether single voter still replicates log to learner after restart.
///
/// A voter will become leader in a single tick. The case check if the role
Expand Down

0 comments on commit 4159c95

Please sign in to comment.