Skip to content

Commit

Permalink
chore: pull in new rustfmt
Browse files Browse the repository at this point in the history
  • Loading branch information
mogery committed Jan 26, 2024
1 parent d2ad10e commit 5feab4f
Show file tree
Hide file tree
Showing 116 changed files with 13,519 additions and 12,445 deletions.
4 changes: 1 addition & 3 deletions .rustfmt.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
unstable_features = true
max_width = 120
struct_lit_width = 120
tab_spaces = 2
struct_lit_width = 60
imports_granularity = "Module"
group_imports = "StdExternalCrate"
132 changes: 68 additions & 64 deletions autogen/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,92 +12,96 @@ static JSON_SCHEMA_FILE: &'static str = "../examples/.tailcallrc.schema.json";

#[tokio::main]
async fn main() {
logger_init();
let args: Vec<String> = env::args().collect();
let arg = args.get(1);
logger_init();
let args: Vec<String> = env::args().collect();
let arg = args.get(1);

if arg.is_none() {
log::error!("An argument required, you can pass either `fix` or `check` argument");
return;
}
match arg.unwrap().as_str() {
"fix" => {
let result = mode_fix().await;
if let Err(e) = result {
log::error!("{}", e);
exit(1);
}
if arg.is_none() {
log::error!("An argument required, you can pass either `fix` or `check` argument");
return;
}
"check" => {
let result = mode_check().await;
if let Err(e) = result {
log::error!("{}", e);
exit(1);
}
match arg.unwrap().as_str() {
"fix" => {
let result = mode_fix().await;
if let Err(e) = result {
log::error!("{}", e);
exit(1);
}
}
"check" => {
let result = mode_check().await;
if let Err(e) = result {
log::error!("{}", e);
exit(1);
}
}
&_ => {
log::error!("Unknown argument, you can pass either `fix` or `check` argument");
return;
}
}
&_ => {
log::error!("Unknown argument, you can pass either `fix` or `check` argument");
return;
}
}
}

async fn mode_check() -> Result<()> {
let json_schema = get_file_path();
let file_io = init_file();
let content = file_io
.read(json_schema.to_str().ok_or(anyhow!("Unable to determine path"))?)
.await?;
let content = serde_json::from_str::<Value>(&content)?;
let schema = get_updated_json().await?;
match content.eq(&schema) {
true => Ok(()),
false => Err(anyhow!("Schema mismatch")),
}
let json_schema = get_file_path();
let file_io = init_file();
let content = file_io
.read(
json_schema
.to_str()
.ok_or(anyhow!("Unable to determine path"))?,
)
.await?;
let content = serde_json::from_str::<Value>(&content)?;
let schema = get_updated_json().await?;
match content.eq(&schema) {
true => Ok(()),
false => Err(anyhow!("Schema mismatch")),
}
}

async fn mode_fix() -> Result<()> {
update_json().await?;
// update_gql().await?;
Ok(())
update_json().await?;
// update_gql().await?;
Ok(())
}

async fn update_json() -> Result<()> {
let path = get_file_path();
let schema = serde_json::to_string_pretty(&get_updated_json().await?)?;
let file_io = init_file();
log::info!("Updating JSON Schema: {}", path.to_str().unwrap());
file_io
.write(
path.to_str().ok_or(anyhow!("Unable to determine path"))?,
schema.as_bytes(),
)
.await?;
Ok(())
let path = get_file_path();
let schema = serde_json::to_string_pretty(&get_updated_json().await?)?;
let file_io = init_file();
log::info!("Updating JSON Schema: {}", path.to_str().unwrap());
file_io
.write(
path.to_str().ok_or(anyhow!("Unable to determine path"))?,
schema.as_bytes(),
)
.await?;
Ok(())
}

fn get_file_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(JSON_SCHEMA_FILE)
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(JSON_SCHEMA_FILE)
}

async fn get_updated_json() -> Result<Value> {
let schema = schemars::schema_for!(Config);
let schema = json!(schema);
Ok(schema)
let schema = schemars::schema_for!(Config);
let schema = json!(schema);
Ok(schema)
}

fn logger_init() {
// set the log level
const LONG_ENV_FILTER_VAR_NAME: &str = "TAILCALL_SCHEMA_LOG_LEVEL";
const SHORT_ENV_FILTER_VAR_NAME: &str = "TC_SCHEMA_LOG_LEVEL";
// set the log level
const LONG_ENV_FILTER_VAR_NAME: &str = "TAILCALL_SCHEMA_LOG_LEVEL";
const SHORT_ENV_FILTER_VAR_NAME: &str = "TC_SCHEMA_LOG_LEVEL";

// Select which env variable to use for the log level filter. This is because filter_or doesn't allow picking between multiple env_var for the filter value
let filter_env_name = env::var(LONG_ENV_FILTER_VAR_NAME)
.map(|_| LONG_ENV_FILTER_VAR_NAME)
.unwrap_or_else(|_| SHORT_ENV_FILTER_VAR_NAME);
// Select which env variable to use for the log level filter. This is because filter_or doesn't allow picking between multiple env_var for the filter value
let filter_env_name = env::var(LONG_ENV_FILTER_VAR_NAME)
.map(|_| LONG_ENV_FILTER_VAR_NAME)
.unwrap_or_else(|_| SHORT_ENV_FILTER_VAR_NAME);

// use the log level from the env if there is one, otherwise use the default.
let env = env_logger::Env::new().filter_or(filter_env_name, "info");
// use the log level from the env if there is one, otherwise use the default.
let env = env_logger::Env::new().filter_or(filter_env_name, "info");

env_logger::Builder::from_env(env).init();
env_logger::Builder::from_env(env).init();
}
68 changes: 38 additions & 30 deletions benches/data_loader_bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,43 +12,51 @@ use tailcall::HttpIO;

#[derive(Clone)]
struct MockHttpClient {
// To keep track of number of times execute is called
request_count: Arc<AtomicUsize>,
// To keep track of number of times execute is called
request_count: Arc<AtomicUsize>,
}

#[async_trait::async_trait]
impl HttpIO for MockHttpClient {
async fn execute(&self, _req: Request) -> anyhow::Result<Response<Bytes>> {
Ok(Response::empty())
}
async fn execute(&self, _req: Request) -> anyhow::Result<Response<Bytes>> {
Ok(Response::empty())
}
}

fn benchmark_data_loader(c: &mut Criterion) {
c.bench_function("test_data_loader", |b| {
b.iter(|| {
tokio::runtime::Runtime::new().unwrap().spawn(async {
let client = Arc::new(MockHttpClient { request_count: Arc::new(AtomicUsize::new(0)) });
let loader = HttpDataLoader::new(client.clone(), None, false);
let loader = loader.to_data_loader(Batch::default().delay(1));

let request1 = reqwest::Request::new(reqwest::Method::GET, "http://example.com/1".parse().unwrap());
let request2 = reqwest::Request::new(reqwest::Method::GET, "http://example.com/2".parse().unwrap());

let headers_to_consider = BTreeSet::from(["Header1".to_string(), "Header2".to_string()]);
let key1 = DataLoaderRequest::new(request1, headers_to_consider.clone());
let key2 = DataLoaderRequest::new(request2, headers_to_consider);

let futures1 = (0..100).map(|_| loader.load_one(key1.clone()));
let futures2 = (0..100).map(|_| loader.load_one(key2.clone()));
let _ = join_all(futures1.chain(futures2)).await;
assert_eq!(
client.request_count.load(Ordering::SeqCst),
2,
"Only one request should be made for the same key"
);
})
})
});
c.bench_function("test_data_loader", |b| {
b.iter(|| {
tokio::runtime::Runtime::new().unwrap().spawn(async {
let client =
Arc::new(MockHttpClient { request_count: Arc::new(AtomicUsize::new(0)) });
let loader = HttpDataLoader::new(client.clone(), None, false);
let loader = loader.to_data_loader(Batch::default().delay(1));

let request1 = reqwest::Request::new(
reqwest::Method::GET,
"http://example.com/1".parse().unwrap(),
);
let request2 = reqwest::Request::new(
reqwest::Method::GET,
"http://example.com/2".parse().unwrap(),
);

let headers_to_consider =
BTreeSet::from(["Header1".to_string(), "Header2".to_string()]);
let key1 = DataLoaderRequest::new(request1, headers_to_consider.clone());
let key2 = DataLoaderRequest::new(request2, headers_to_consider);

let futures1 = (0..100).map(|_| loader.load_one(key1.clone()));
let futures2 = (0..100).map(|_| loader.load_one(key2.clone()));
let _ = join_all(futures1.chain(futures2)).await;
assert_eq!(
client.request_count.load(Ordering::SeqCst),
2,
"Only one request should be made for the same key"
);
})
})
});
}

criterion_group! {
Expand Down
Loading

1 comment on commit 5feab4f

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Running 30s test @ http://localhost:8000/graphql

4 threads and 100 connections

Thread Stats Avg Stdev Max +/- Stdev
Latency 6.43ms 2.72ms 29.70ms 69.12%
Req/Sec 3.92k 195.45 4.23k 91.75%

467930 requests in 30.00s, 2.35GB read

Requests/sec: 15595.89

Transfer/sec: 80.05MB

Please sign in to comment.