Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: expose the region migration replay_timeout argument #3129

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions src/meta-srv/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ etcd-client.workspace = true
futures.workspace = true
h2 = "0.3"
http-body = "0.4"
humantime = "2.1"
humantime-serde.workspace = true
itertools.workspace = true
lazy_static.workspace = true
Expand Down
9 changes: 8 additions & 1 deletion src/meta-srv/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,12 @@ pub enum Error {
location: Location,
source: servers::error::Error,
},
#[snafu(display("Failed to parse duration {}", duration))]
ParseDuration {
duration: String,
#[snafu(source)]
error: humantime::DurationError,
},
#[snafu(display("Failed to parse address {}", addr))]
ParseAddr {
addr: String,
Expand Down Expand Up @@ -652,7 +658,6 @@ impl ErrorExt for Error {
| Error::LockNotConfig { .. }
| Error::ExceededRetryLimit { .. }
| Error::SendShutdownSignal { .. }
| Error::ParseAddr { .. }
| Error::SchemaAlreadyExists { .. }
| Error::PusherNotFound { .. }
| Error::PushMessage { .. }
Expand All @@ -678,6 +683,8 @@ impl ErrorExt for Error {
| Error::InvalidStatKey { .. }
| Error::InvalidInactiveRegionKey { .. }
| Error::ParseNum { .. }
| Error::ParseAddr { .. }
| Error::ParseDuration { .. }
| Error::UnsupportedSelectorType { .. }
| Error::InvalidArguments { .. }
| Error::InitExportMetricsTask { .. }
Expand Down
9 changes: 8 additions & 1 deletion src/meta-srv/src/procedure/region_migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,13 @@ pub struct PersistentContext {
to_peer: Peer,
/// The [RegionId] of migration region.
region_id: RegionId,
/// The timeout of waiting for a candidate to replay the WAL.
#[serde(with = "humantime_serde", default = "default_replay_timeout")]
replay_timeout: Duration,
}

fn default_replay_timeout() -> Duration {
Duration::from_secs(1)
}

impl PersistentContext {
Expand Down Expand Up @@ -475,7 +482,7 @@ mod tests {

let serialized = procedure.dump().unwrap();

let expected = r#"{"persistent_ctx":{"cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
let expected = r#"{"persistent_ctx":{"cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"replay_timeout":"1s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
assert_eq!(expected, serialized);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ impl Default for DowngradeLeaderRegion {
#[typetag::serde]
impl State for DowngradeLeaderRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let replay_timeout = ctx.persistent_ctx.replay_timeout;
// Ensures the `leader_region_lease_deadline` must exist after recovering.
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
Expand All @@ -69,7 +70,10 @@ impl State for DowngradeLeaderRegion {
}

Ok((
Box::<UpgradeCandidateRegion>::default(),
Box::new(UpgradeCandidateRegion {
replay_timeout,
..Default::default()
}),
Status::executing(false),
))
}
Expand Down Expand Up @@ -226,6 +230,7 @@ mod tests {
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
replay_timeout: Duration::from_millis(1000),
}
}

Expand Down
20 changes: 19 additions & 1 deletion src/meta-srv/src/procedure/region_migration/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::{Arc, RwLock};
use std::time::Duration;

use common_meta::key::table_route::TableRouteValue;
use common_meta::peer::Peer;
Expand Down Expand Up @@ -61,15 +62,23 @@ pub struct RegionMigrationProcedureTask {
pub(crate) region_id: RegionId,
pub(crate) from_peer: Peer,
pub(crate) to_peer: Peer,
pub(crate) replay_timeout: Duration,
}

impl RegionMigrationProcedureTask {
pub fn new(cluster_id: ClusterId, region_id: RegionId, from_peer: Peer, to_peer: Peer) -> Self {
pub fn new(
cluster_id: ClusterId,
region_id: RegionId,
from_peer: Peer,
to_peer: Peer,
replay_timeout: Duration,
) -> Self {
Self {
cluster_id,
region_id,
from_peer,
to_peer,
replay_timeout,
}
}
}
Expand All @@ -91,13 +100,15 @@ impl From<RegionMigrationProcedureTask> for PersistentContext {
region_id,
from_peer,
to_peer,
replay_timeout,
}: RegionMigrationProcedureTask,
) -> Self {
PersistentContext {
cluster_id,
from_peer,
to_peer,
region_id,
replay_timeout,
}
}
}
Expand Down Expand Up @@ -319,6 +330,7 @@ mod test {
region_id,
from_peer: Peer::empty(2),
to_peer: Peer::empty(1),
replay_timeout: Duration::from_millis(1000),
};
// Inserts one
manager
Expand All @@ -342,6 +354,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(1),
replay_timeout: Duration::from_millis(1000),
};

let err = manager.submit_procedure(task).await.unwrap_err();
Expand All @@ -359,6 +372,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
replay_timeout: Duration::from_millis(1000),
};

let err = manager.submit_procedure(task).await.unwrap_err();
Expand All @@ -376,6 +390,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
replay_timeout: Duration::from_millis(1000),
};

let table_info = new_test_table_info(1024, vec![1]).into();
Expand Down Expand Up @@ -403,6 +418,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
replay_timeout: Duration::from_millis(1000),
};

let table_info = new_test_table_info(1024, vec![1]).into();
Expand Down Expand Up @@ -434,6 +450,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
replay_timeout: Duration::from_millis(1000),
};

let table_info = new_test_table_info(1024, vec![1]).into();
Expand All @@ -460,6 +477,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
replay_timeout: Duration::from_millis(1000),
};

let err = manager
Expand Down
2 changes: 2 additions & 0 deletions src/meta-srv/src/procedure/region_migration/test_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;

use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
Expand Down Expand Up @@ -281,6 +282,7 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
to_peer: Peer::empty(to),
region_id,
cluster_id: 0,
replay_timeout: Duration::from_millis(1000),
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ use crate::service::mailbox::Channel;
#[derive(Debug, Serialize, Deserialize)]
pub struct UpgradeCandidateRegion {
// The optimistic retry times.
optimistic_retry: usize,
pub(crate) optimistic_retry: usize,
// The retry initial interval.
retry_initial_interval: Duration,
pub(crate) retry_initial_interval: Duration,
// The replay timeout of a instruction.
replay_timeout: Duration,
pub(crate) replay_timeout: Duration,
// If it's true it requires the candidate region MUST replay the WAL to the latest entry id.
// Otherwise, it will rollback to the old leader region.
require_ready: bool,
pub(crate) require_ready: bool,
}

impl Default for UpgradeCandidateRegion {
Expand Down Expand Up @@ -236,6 +236,7 @@ mod tests {
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
replay_timeout: Duration::from_millis(1000),
}
}

Expand Down
17 changes: 17 additions & 0 deletions src/meta-srv/src/service/admin/region_migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
use std::collections::HashMap;
use std::num::ParseIntError;
use std::str::FromStr;
use std::time::Duration;

use common_meta::peer::Peer;
use common_meta::{distributed_time_constants, ClusterId};
use humantime::parse_duration;
use serde::Serialize;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionId;
Expand All @@ -43,6 +45,7 @@ struct SubmitRegionMigrationTaskRequest {
region_id: RegionId,
from_peer_id: u64,
to_peer_id: u64,
replay_timeout: Duration,
}

#[derive(Debug, Serialize)]
Expand Down Expand Up @@ -71,6 +74,8 @@ where
Ok(parse_result)
}

const DEFAULT_REPLAY_TIMEOUT: Duration = Duration::from_millis(1000);

impl TryFrom<&HashMap<String, String>> for SubmitRegionMigrationTaskRequest {
type Error = Error;

Expand All @@ -89,11 +94,18 @@ impl TryFrom<&HashMap<String, String>> for SubmitRegionMigrationTaskRequest {
error::MissingRequiredParameterSnafu { param: key }.fail()
})?;

let replay_timeout = if let Some(duration) = params.get("replay_timeout") {
parse_duration(duration).context(error::ParseDurationSnafu { duration })?
} else {
DEFAULT_REPLAY_TIMEOUT
};

Ok(SubmitRegionMigrationTaskRequest {
cluster_id,
region_id: RegionId::from_u64(region_id),
from_peer_id,
to_peer_id,
replay_timeout,
})
}
}
Expand Down Expand Up @@ -131,6 +143,7 @@ impl SubmitRegionMigrationTaskHandler {
region_id,
from_peer_id,
to_peer_id,
replay_timeout,
} = task;

let from_peer = self.lookup_peer(cluster_id, from_peer_id).await?.context(
Expand All @@ -150,6 +163,7 @@ impl SubmitRegionMigrationTaskHandler {
region_id,
from_peer,
to_peer,
replay_timeout,
})
.await?;

Expand Down Expand Up @@ -187,6 +201,7 @@ mod tests {
use std::collections::HashMap;

use crate::error;
use crate::service::admin::region_migration::DEFAULT_REPLAY_TIMEOUT;

#[test]
fn test_parse_migration_task_req() {
Expand All @@ -212,6 +227,7 @@ mod tests {
region_id: RegionId::new(1024, 1),
from_peer_id: 1,
to_peer_id: 2,
replay_timeout: DEFAULT_REPLAY_TIMEOUT
},
task_req
);
Expand All @@ -233,6 +249,7 @@ mod tests {
region_id: RegionId::new(1024, 1),
from_peer_id: 1,
to_peer_id: 2,
replay_timeout: DEFAULT_REPLAY_TIMEOUT
},
task_req
);
Expand Down
8 changes: 8 additions & 0 deletions tests-integration/tests/region_migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -207,6 +208,7 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -299,6 +301,7 @@ pub async fn test_region_migration_multiple_regions(
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -345,6 +348,7 @@ pub async fn test_region_migration_multiple_regions(
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -426,6 +430,7 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -473,6 +478,7 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
Duration::from_millis(1000),
))
.await
.unwrap();
Expand Down Expand Up @@ -543,6 +549,7 @@ pub async fn test_region_migration_incorrect_from_peer(
region_id,
peer_factory(5),
peer_factory(1),
Duration::from_millis(1000),
))
.await
.unwrap_err();
Expand Down Expand Up @@ -617,6 +624,7 @@ pub async fn test_region_migration_incorrect_region_id(
region_id,
peer_factory(2),
peer_factory(1),
Duration::from_millis(1000),
))
.await
.unwrap_err();
Expand Down
Loading