From a7325f29c5e40db35642aa7629539d90335f29b7 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:04:14 +0200
Subject: [PATCH 01/41] refactor: Remove heroku deployment files
---
Aptfile | 2 --
Procfile | 1 -
2 files changed, 3 deletions(-)
delete mode 100644 Aptfile
delete mode 100644 Procfile
diff --git a/Aptfile b/Aptfile
deleted file mode 100644
index 3590f35..0000000
--- a/Aptfile
+++ /dev/null
@@ -1,2 +0,0 @@
-libspatialindex-dev
-python3-rtree
diff --git a/Procfile b/Procfile
deleted file mode 100644
index 91e5c97..0000000
--- a/Procfile
+++ /dev/null
@@ -1 +0,0 @@
-web: pip install -e . && pip install -r requirements-app.txt && uvicorn --reload --workers 1 --host 0.0.0.0 --port=${PORT:-5000} app.main:app
From 339b5c34e18ac6633c54512505621aca640569fc Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:09:24 +0200
Subject: [PATCH 02/41] refactor: Remove DVC CML pipelines
---
.dvc/.gitignore | 3 --
.dvc/config | 6 ----
.dvcignore | 3 --
dags/dvc/predict/dvc.lock | 11 -------
dags/dvc/predict/dvc.yaml | 10 -------
dags/dvc/train/dvc.lock | 61 ---------------------------------------
dags/dvc/train/dvc.yaml | 36 -----------------------
7 files changed, 130 deletions(-)
delete mode 100644 .dvc/.gitignore
delete mode 100644 .dvc/config
delete mode 100644 .dvcignore
delete mode 100644 dags/dvc/predict/dvc.lock
delete mode 100644 dags/dvc/predict/dvc.yaml
delete mode 100644 dags/dvc/train/dvc.lock
delete mode 100644 dags/dvc/train/dvc.yaml
diff --git a/.dvc/.gitignore b/.dvc/.gitignore
deleted file mode 100644
index 528f30c..0000000
--- a/.dvc/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/config.local
-/tmp
-/cache
diff --git a/.dvc/config b/.dvc/config
deleted file mode 100644
index 432e549..0000000
--- a/.dvc/config
+++ /dev/null
@@ -1,6 +0,0 @@
-['remote "artifacts-registry"']
- url = gdrive://1fyD6xyuWWhyjNPoCiCazKshp0yJ8xKs9
- gdrive_use_service_account = true
- gdrive_service_account_json_file_path = tmp/credentials.json
-[core]
- remote = artifacts-registry
\ No newline at end of file
diff --git a/.dvcignore b/.dvcignore
deleted file mode 100644
index 5197305..0000000
--- a/.dvcignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# Add patterns of files dvc should ignore, which could improve
-# the performance. Learn more at
-# https://dvc.org/doc/user-guide/dvcignore
diff --git a/dags/dvc/predict/dvc.lock b/dags/dvc/predict/dvc.lock
deleted file mode 100644
index a91248e..0000000
--- a/dags/dvc/predict/dvc.lock
+++ /dev/null
@@ -1,11 +0,0 @@
-schema: '2.0'
-stages:
- download:
- cmd: pyrorisks download inputs --day 2020-05-05
- predict:
- cmd: pyrorisks predict --day 2020-05-05
- outs:
- - path: .cache/predictions_registry
- md5: d9926db4f35854aa453bcf54e9aa394e.dir
- size: 12414168
- nfiles: 3
diff --git a/dags/dvc/predict/dvc.yaml b/dags/dvc/predict/dvc.yaml
deleted file mode 100644
index 5b2b800..0000000
--- a/dags/dvc/predict/dvc.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-stages:
- download:
- wdir: ../../../
- cmd: pyrorisks download inputs --day 2020-05-05
-
- predict:
- wdir: ../../../
- cmd: pyrorisks predict --day 2020-05-05
- outs:
- - .cache/predictions_registry
diff --git a/dags/dvc/train/dvc.lock b/dags/dvc/train/dvc.lock
deleted file mode 100644
index 7209457..0000000
--- a/dags/dvc/train/dvc.lock
+++ /dev/null
@@ -1,61 +0,0 @@
-schema: '2.0'
-stages:
- download:
- cmd: pyrorisks download
- deps:
- - path: pyro_risks/pipeline/load.py
- md5: d0d749f7b2c050296d98f23f93783dac
- size: 2060
- outs:
- - path: .cache/data_registry/merged_era_viirs.csv
- md5: ac964a338a6cdddce2fdd4f1847f173a
- size: 83716501
- train_xgboost:
- cmd: pyrorisks train --model XGBOOST --destination .cache/model_registry
- deps:
- - path: .cache/data_registry/merged_era_viirs.csv
- md5: ac964a338a6cdddce2fdd4f1847f173a
- size: 83716501
- - path: pyro_risks/pipeline/train.py
- md5: 2072ca8311d7bb9105e9210e7cd61b5a
- size: 5224
- outs:
- - path: .cache/model_registry/XGBOOST.joblib
- md5: a5f6213848af99db0d8030c4e7ac4b59
- size: 1817148
- train_rf:
- cmd: pyrorisks train --model RF --destination .cache/model_registry
- deps:
- - path: .cache/data_registry/merged_era_viirs.csv
- md5: ac964a338a6cdddce2fdd4f1847f173a
- size: 83716501
- - path: pyro_risks/pipeline/train.py
- md5: 2072ca8311d7bb9105e9210e7cd61b5a
- size: 5224
- outs:
- - path: .cache/model_registry/RF.joblib
- md5: 64a825e6a9ea80345beec0ced6ef4dd4
- size: 11947436
- evaluate:
- cmd: "pyrorisks evaluate --pipeline .cache/model_registry/RF.joblib --threshold\
- \ .cache/model_registry/RF_threshold.json --prefix RF --destination .cache/metadata_registry\
- \ \npyrorisks evaluate --pipeline .cache/model_registry/XGBOOST.joblib --threshold\
- \ .cache/model_registry/XGBOOST_threshold.json --prefix XGBOOST --destination\
- \ .cache/metadata_registry\n"
- deps:
- - path: .cache/model_registry/RF.joblib
- md5: 64a825e6a9ea80345beec0ced6ef4dd4
- size: 11947436
- - path: .cache/model_registry/XGBOOST.joblib
- md5: a5f6213848af99db0d8030c4e7ac4b59
- size: 1817148
- - path: pyro_risks/pipeline/train.py
- md5: 2072ca8311d7bb9105e9210e7cd61b5a
- size: 5224
- outs:
- - path: .cache/metadata_registry/RF_classification_report.json
- md5: 47d86b5779e035dc9ff38d38330cd9fd
- size: 237
- - path: .cache/metadata_registry/XGBOOST_classification_report.json
- md5: 137fe7ce77c7dc4fdc948e11885f5557
- size: 237
diff --git a/dags/dvc/train/dvc.yaml b/dags/dvc/train/dvc.yaml
deleted file mode 100644
index cbd5d17..0000000
--- a/dags/dvc/train/dvc.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-stages:
- download:
- wdir: ../../../
- cmd: pyrorisks download dataset
- deps:
- - pyro_risks/pipeline/load.py
- outs:
- - .cache/data_registry/merged_era_viirs.csv
- train_xgboost:
- wdir: ../../../
- cmd: pyrorisks train --model XGBOOST --destination .cache/model_registry
- deps:
- - pyro_risks/pipeline/train.py
- - .cache/data_registry/merged_era_viirs.csv
- outs:
- - .cache/model_registry/XGBOOST.joblib
- train_rf:
- wdir: ../../../
- cmd: pyrorisks train --model RF --destination .cache/model_registry
- deps:
- - pyro_risks/pipeline/train.py
- - .cache/data_registry/merged_era_viirs.csv
- outs:
- - .cache/model_registry/RF.joblib
- evaluate:
- wdir: ../../../
- cmd: |
- pyrorisks evaluate --pipeline .cache/model_registry/RF.joblib --threshold .cache/model_registry/RF_threshold.json --prefix RF --destination .cache/metadata_registry
- pyrorisks evaluate --pipeline .cache/model_registry/XGBOOST.joblib --threshold .cache/model_registry/XGBOOST_threshold.json --prefix XGBOOST --destination .cache/metadata_registry
- deps:
- - pyro_risks/pipeline/train.py
- - .cache/model_registry/XGBOOST.joblib
- - .cache/model_registry/RF.joblib
- metrics:
- - .cache/metadata_registry/XGBOOST_classification_report.json
- - .cache/metadata_registry/RF_classification_report.json
From 4db972a5512ab009dcf99e29cc0eabddceb3d5b6 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:19:07 +0200
Subject: [PATCH 03/41] chore: Remove notebooks from package
---
{pyro_risks/notebooks => notebooks}/EFFIS FWI formatting.ipynb | 0
{pyro_risks/notebooks => notebooks}/requetes_apis.ipynb | 0
{pyro_risks/notebooks => notebooks}/s3_tutorial.ipynb | 0
{pyro_risks/notebooks => notebooks}/tif_explorer.ipynb | 0
4 files changed, 0 insertions(+), 0 deletions(-)
rename {pyro_risks/notebooks => notebooks}/EFFIS FWI formatting.ipynb (100%)
rename {pyro_risks/notebooks => notebooks}/requetes_apis.ipynb (100%)
rename {pyro_risks/notebooks => notebooks}/s3_tutorial.ipynb (100%)
rename {pyro_risks/notebooks => notebooks}/tif_explorer.ipynb (100%)
diff --git a/pyro_risks/notebooks/EFFIS FWI formatting.ipynb b/notebooks/EFFIS FWI formatting.ipynb
similarity index 100%
rename from pyro_risks/notebooks/EFFIS FWI formatting.ipynb
rename to notebooks/EFFIS FWI formatting.ipynb
diff --git a/pyro_risks/notebooks/requetes_apis.ipynb b/notebooks/requetes_apis.ipynb
similarity index 100%
rename from pyro_risks/notebooks/requetes_apis.ipynb
rename to notebooks/requetes_apis.ipynb
diff --git a/pyro_risks/notebooks/s3_tutorial.ipynb b/notebooks/s3_tutorial.ipynb
similarity index 100%
rename from pyro_risks/notebooks/s3_tutorial.ipynb
rename to notebooks/s3_tutorial.ipynb
diff --git a/pyro_risks/notebooks/tif_explorer.ipynb b/notebooks/tif_explorer.ipynb
similarity index 100%
rename from pyro_risks/notebooks/tif_explorer.ipynb
rename to notebooks/tif_explorer.ipynb
From c39bdec842fa4101a5607954dd0a3bae19818bc3 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:25:15 +0200
Subject: [PATCH 04/41] refactor: Deprecate pyro_risks modules
---
pyro_risks/config/__init__.py | 2 -
pyro_risks/config/datasets.py | 147 --------
pyro_risks/config/models.py | 145 --------
pyro_risks/datasets/ERA5.py | 218 -----------
pyro_risks/datasets/__init__.py | 7 -
pyro_risks/datasets/datasets_mergers.py | 249 -------------
pyro_risks/datasets/era_fwi_viirs.py | 165 ---------
pyro_risks/datasets/fwi.py | 258 -------------
pyro_risks/datasets/masks.py | 29 --
pyro_risks/datasets/nasa_wildfires.py | 243 -------------
pyro_risks/datasets/queries_api.py | 464 ------------------------
pyro_risks/datasets/utils.py | 370 -------------------
pyro_risks/datasets/weather.py | 80 ----
pyro_risks/datasets/wildfires.py | 43 ---
pyro_risks/models/__init__.py | 3 -
pyro_risks/models/pipelines.py | 49 ---
pyro_risks/models/transformers.py | 349 ------------------
pyro_risks/models/utils.py | 67 ----
pyro_risks/pipeline/__init__.py | 4 -
pyro_risks/pipeline/evaluate.py | 165 ---------
pyro_risks/pipeline/load.py | 64 ----
pyro_risks/pipeline/predict.py | 269 --------------
pyro_risks/pipeline/train.py | 159 --------
setup.py | 70 ----
24 files changed, 3619 deletions(-)
delete mode 100644 pyro_risks/config/__init__.py
delete mode 100644 pyro_risks/config/datasets.py
delete mode 100644 pyro_risks/config/models.py
delete mode 100644 pyro_risks/datasets/ERA5.py
delete mode 100644 pyro_risks/datasets/__init__.py
delete mode 100644 pyro_risks/datasets/datasets_mergers.py
delete mode 100644 pyro_risks/datasets/era_fwi_viirs.py
delete mode 100644 pyro_risks/datasets/fwi.py
delete mode 100644 pyro_risks/datasets/masks.py
delete mode 100644 pyro_risks/datasets/nasa_wildfires.py
delete mode 100644 pyro_risks/datasets/queries_api.py
delete mode 100644 pyro_risks/datasets/utils.py
delete mode 100644 pyro_risks/datasets/weather.py
delete mode 100644 pyro_risks/datasets/wildfires.py
delete mode 100644 pyro_risks/models/__init__.py
delete mode 100644 pyro_risks/models/pipelines.py
delete mode 100644 pyro_risks/models/transformers.py
delete mode 100644 pyro_risks/models/utils.py
delete mode 100644 pyro_risks/pipeline/__init__.py
delete mode 100644 pyro_risks/pipeline/evaluate.py
delete mode 100644 pyro_risks/pipeline/load.py
delete mode 100644 pyro_risks/pipeline/predict.py
delete mode 100644 pyro_risks/pipeline/train.py
delete mode 100644 setup.py
diff --git a/pyro_risks/config/__init__.py b/pyro_risks/config/__init__.py
deleted file mode 100644
index 4e943a6..0000000
--- a/pyro_risks/config/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .datasets import *
-from .models import *
diff --git a/pyro_risks/config/datasets.py b/pyro_risks/config/datasets.py
deleted file mode 100644
index 158c181..0000000
--- a/pyro_risks/config/datasets.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import os
-from dotenv import load_dotenv
-
-# If there is an .env, load it
-load_dotenv()
-
-FR_GEOJSON: str = "https://france-geojson.gregoiredavid.fr/repo/departements.geojson"
-DATA_FALLBACK: str = (
- "https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data"
-)
-FR_GEOJSON_FALLBACK: str = f"{DATA_FALLBACK}/departements.geojson"
-FR_FIRES_FALLBACK: str = f"{DATA_FALLBACK}/export_BDIFF_incendies_20201027.csv"
-FR_WEATHER_FALLBACK: str = f"{DATA_FALLBACK}/noaa_weather_20201025.csv"
-FR_NASA_FIRMS_FALLBACK: str = f"{DATA_FALLBACK}/NASA_FIRMS.json"
-FR_NASA_VIIRS_FALLBACK: str = f"{DATA_FALLBACK}/NASA_FIRMS_VIIRS_2018_2020.csv"
-FR_FWI_2019_FALLBACK: str = f"{DATA_FALLBACK}/JRC_FWI_2019.zip"
-FR_FWI_2020_FALLBACK: str = f"{DATA_FALLBACK}/JRC_FWI_2020.zip"
-FR_ERA5LAND_FALLBACK: str = f"{DATA_FALLBACK}/ERA5_2019.nc"
-FR_ERA5T_FALLBACK: str = f"{DATA_FALLBACK}/era5t_2019.nc"
-DATASET: str = "merged_era_viirs.csv"
-ERA5T_VIIRS_PIPELINE: str = f"{DATA_FALLBACK}/merged_era_viirs.csv"
-TEST_FR_ERA5LAND_FALLBACK: str = f"{DATA_FALLBACK}/test_data_ERA5_2018.nc"
-TEST_FR_FIRMS_CSV_FALLBACK: str = f"{DATA_FALLBACK}/test_data_FIRMS.csv"
-TEST_FR_FIRMS_XLSX_FALLBACK: str = f"{DATA_FALLBACK}/test_data_FIRMS.xlsx"
-TEST_FR_VIIRS_XLSX_FALLBACK: str = f"{DATA_FALLBACK}/test_data_VIIRS.xlsx"
-TEST_FR_VIIRS_JSON_FALLBACK: str = f"{DATA_FALLBACK}/test_data_VIIRS.json"
-TEST_FR_ERA5_2019_FALLBACK: str = f"{DATA_FALLBACK}/test_data_ERA5_2019.nc"
-TEST_FR_ERA5T_FALLBACK: str = f"{DATA_FALLBACK}/test_era5t_to_merge.nc"
-TEST_FWI_FALLBACK: str = f"{DATA_FALLBACK}/test_data_FWI.csv"
-TEST_FWI_TO_PREDICT: str = f"{DATA_FALLBACK}/fwi_test_to_predict.csv"
-TEST_ERA_TO_PREDICT: str = f"{DATA_FALLBACK}/era_test_to_predict.csv"
-
-REPO_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
-ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
-
-
-CDS_URL = "https://cds.climate.copernicus.eu/api/v2"
-CDS_UID = os.getenv("CDS_UID")
-CDS_API_KEY = os.getenv("CDS_API_KEY")
-
-RFMODEL_PATH_FALLBACK: str = f"{DATA_FALLBACK}/pyrorisk_rfc_111220.pkl"
-RFMODEL_ERA5T_PATH_FALLBACK: str = f"{DATA_FALLBACK}/pyrorisk_rfc_era5t_151220.pkl"
-XGBMODEL_PATH_FALLBACK: str = f"{DATA_FALLBACK}/pyrorisk_xgb_091220.pkl"
-XGBMODEL_ERA5T_PATH_FALLBACK: str = f"{DATA_FALLBACK}/pyrorisk_xgb_era5t_151220.pkl"
-
-FWI_VARS = ["fwi", "ffmc", "dmc", "dc", "isi", "bui", "dsr"]
-
-WEATHER_VARS = [
- "u10",
- "v10",
- "d2m",
- "t2m",
- "fal",
- "lai_hv",
- "lai_lv",
- "skt",
- "asn",
- "snowc",
- "rsn",
- "sde",
- "sd",
- "sf",
- "smlt",
- "stl1",
- "stl2",
- "stl3",
- "stl4",
- "slhf",
- "ssr",
- "str",
- "sp",
- "sshf",
- "ssrd",
- "strd",
- "tsn",
- "tp",
-]
-
-WEATHER_ERA5T_VARS = [
- "asn",
- "d2m",
- "e",
- "es",
- "fal",
- "lai_hv",
- "lai_lv",
- "lblt",
- "licd",
- "lict",
- "lmld",
- "lmlt",
- "lshf",
- "ltlt",
- "pev",
- "ro",
- "rsn",
- "sd",
- "sf",
- "skt",
- "slhf",
- "smlt",
- "sp",
- "src",
- "sro",
- "sshf",
- "ssr",
- "ssrd",
- "ssro",
- "stl1",
- "stl2",
- "stl3",
- "stl4",
- "str",
- "strd",
- "swvl1",
- "swvl2",
- "swvl3",
- "swvl4",
- "t2m",
- "tp",
- "tsn",
- "u10",
- "v10",
-]
-
-CACHE_FOLDER: str = "./.cache/"
-
-DATA_REGISTRY = os.path.join(CACHE_FOLDER, "data_registry/")
-MODEL_REGISTRY = os.path.join(CACHE_FOLDER, "model_registry/")
-METADATA_REGISTRY = os.path.join(CACHE_FOLDER, "metadata_registry/")
-PREDICTIONS_REGISTRY = os.path.join(CACHE_FOLDER, "predictions_registry/")
-
-DATASET_PATH = os.path.join(DATA_REGISTRY, DATASET)
-PIPELINE_INPUT_PATH = os.path.join(PREDICTIONS_REGISTRY, "pipeline_inputs.csv")
-RFMODEL_ERA5T_PATH = os.path.join(MODEL_REGISTRY, "RF.joblib")
-XGBMODEL_ERA5T_PATH = os.path.join(MODEL_REGISTRY, "XGBOOST.joblib")
-
-os.makedirs(CACHE_FOLDER, exist_ok=True)
-os.makedirs(DATA_REGISTRY, exist_ok=True)
-os.makedirs(MODEL_REGISTRY, exist_ok=True)
-os.makedirs(METADATA_REGISTRY, exist_ok=True)
-os.makedirs(PREDICTIONS_REGISTRY, exist_ok=True)
diff --git a/pyro_risks/config/models.py b/pyro_risks/config/models.py
deleted file mode 100644
index d78fe1a..0000000
--- a/pyro_risks/config/models.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-ZONE_VAR = "departement"
-
-DATE_VAR = "day"
-
-TARGET = "fires"
-
-PIPELINE_ERA5T_VARS = [
- "strd_min",
- "isi_min",
- "strd_max",
- "d2m_mean",
- "lai_hv_mean",
- "str_mean",
- "ffmc_mean",
- "strd_mean",
- "swvl1_mean",
- "asn_min",
- "fwi_mean",
- "asn_std",
- "ssr_mean",
- "str_max",
- "d2m_min",
- "rsn_std",
- "ssrd_min",
- "isi_mean",
- "ssrd_mean",
- "isi_max",
- "ffmc_max",
- "ffmc_min",
- "ssr_min",
- "str_min",
- "ffmc_std",
-]
-
-MODEL_ERA5T_VARS = [
- "str_max",
- "str_mean",
- "ffmc_min",
- "str_min",
- "ffmc_mean",
- "str_mean_lag1",
- "str_max_lag1",
- "str_min_lag1",
- "isi_min",
- "ffmc_min_lag1",
- "isi_mean",
- "ffmc_mean_lag1",
- "ffmc_std",
- "ffmc_max",
- "isi_min_lag1",
- "isi_mean_lag1",
- "ffmc_max_lag1",
- "asn_std",
- "strd_max",
- "ssrd_min",
- "strd_mean",
- "isi_max",
- "strd_min",
- "d2m_min",
- "asn_min",
- "ssr_min",
- "ffmc_min_lag3",
- "ffmc_std_lag1",
- "lai_hv_mean_lag7",
- "str_max_lag3",
- "str_mean_lag3",
- "rsn_std_lag1",
- "fwi_mean",
- "ssr_mean",
- "ssrd_mean",
- "swvl1_mean",
- "rsn_std_lag3",
- "isi_max_lag1",
- "d2m_mean",
- "rsn_std",
-]
-
-SELECTED_DEP = [
- "Aisne",
- "Alpes-Maritimes",
- "Ardèche",
- "Ariège",
- "Aude",
- "Aveyron",
- "Cantal",
- "Eure",
- "Eure-et-Loir",
- "Gironde",
- "Haute-Corse",
- "Hautes-Pyrénées",
- "Hérault",
- "Indre",
- "Landes",
- "Loiret",
- "Lozère",
- "Marne",
- "Oise",
- "Pyrénées-Atlantiques",
- "Pyrénées-Orientales",
- "Sarthe",
- "Somme",
- "Yonne",
-]
-
-LAG_ERA5T_VARS = ["_".join(x.split("_")[:-1]) for x in MODEL_ERA5T_VARS if "_lag" in x]
-
-USECOLS = [DATE_VAR, ZONE_VAR, TARGET] + PIPELINE_ERA5T_VARS
-
-PIPELINE_COLS = [DATE_VAR, ZONE_VAR] + PIPELINE_ERA5T_VARS
-
-TEST_SIZE = 0.2
-
-RANDOM_STATE = 42
-
-RF_PARAMS = {
- "n_estimators": 500,
- "min_samples_leaf": 10,
- "max_features": "sqrt",
- "class_weight": "balanced",
- "criterion": "gini",
- "random_state": 10,
- "n_jobs": -1,
- "verbose": 3,
-}
-
-XGB_PARAMS = {
- "n_estimators": 1000,
- "max_depth": 10,
- "learning_rate": 0.01,
- "min_child_weight": 10,
- "subsample": 0.8,
- "colsample_bytree": 0.8,
- "objective": "binary:logistic",
- "random_state": 10,
- "n_jobs": -1,
- "verbosity": 2,
-}
-
-
-XGB_FIT_PARAMS = {"early_stopping_rounds": 50, "eval_metric": ["logloss", "aucpr"]}
diff --git a/pyro_risks/datasets/ERA5.py b/pyro_risks/datasets/ERA5.py
deleted file mode 100644
index 1b3a7b2..0000000
--- a/pyro_risks/datasets/ERA5.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-from typing import Optional
-
-import os
-import geopandas as gpd
-import pandas as pd
-import numpy as np
-import requests
-import xarray as xr
-import tempfile
-
-from pyro_risks import config as cfg
-from .masks import get_french_geom
-from pyro_risks.datasets.queries_api import call_era5land, call_era5t
-
-
-__all__ = ["ERA5Land", "ERA5T"]
-
-
-def get_data_era5land_for_predict(date: str) -> pd.DataFrame:
- """
- Get ERA5Land dataframe for given date using call to cdsapi
- and appropriate class.
-
- Args:
- date: str
- Date with the following format: "YEAR-MONTH-DAY" eg. "2020-05-12"
-
- Returns: pd.DataFrame
- Dataframe containing ERA5 Land data for the requested day.
- """
- with tempfile.TemporaryDirectory() as tmp:
- year, month, day = date.split("-")
- call_era5land(tmp, year, month, day)
- # TODO: make sure that the directory works when on server
- data = ERA5Land(
- source_path=os.path.join(tmp, f"era5land_{year}_{month}_{day}.nc")
- )
-
- # Lag J-1
- lag = np.datetime64(date) - np.timedelta64(1, "D")
- year, month, day = str(lag).split("-")
- call_era5land(tmp, year, month, day)
- dataJ1 = ERA5Land(
- source_path=os.path.join(tmp, f"era5land_{year}_{month}_{day}.nc")
- )
-
- # Lag J-3
- lag = np.datetime64(date) - np.timedelta64(3, "D")
- year, month, day = str(lag).split("-")
- call_era5land(tmp, year, month, day)
- dataJ3 = ERA5Land(
- source_path=os.path.join(tmp, f"era5land_{year}_{month}_{day}.nc")
- )
-
- # Lag J-7
- lag = np.datetime64(date) - np.timedelta64(7, "D")
- year, month, day = str(lag).split("-")
- call_era5land(tmp, year, month, day)
- dataJ7 = ERA5Land(
- source_path=os.path.join(tmp, f"era5land_{year}_{month}_{day}.nc")
- )
-
- merged_data = pd.concat([data, dataJ1, dataJ3, dataJ7], ignore_index=True)
- return merged_data
-
-
-def get_data_era5t_for_predict(date: str) -> pd.DataFrame:
- """
- Get ERA5T dataframe for given date using call to cdsapi
- and appropriate class.
-
- Args:
- date: str
- Date with the following format: "YEAR-MONTH-DAY" eg. "2020-05-12"
-
- Returns: pd.DataFrame
- Dataframe containing ERA5T data for the requested day.
- """
- with tempfile.TemporaryDirectory() as tmp:
- year, month, day = date.split("-")
- call_era5t(tmp, year, month, day)
- # TODO: make sure that the directory works when on server
- data = ERA5T(source_path=os.path.join(tmp, f"era5t_{year}_{month}_{day}.nc"))
- # Lag J-1
- lag = np.datetime64(f"{year}-{month}-{day}") - np.timedelta64(1, "D")
- year, month, day = str(lag).split("-")
- call_era5t(tmp, year, month, day)
- dataJ1 = ERA5T(source_path=os.path.join(tmp, f"era5t_{year}_{month}_{day}.nc"))
- # Lag J-3
- lag = np.datetime64(f"{year}-{month}-{day}") - np.timedelta64(3, "D")
- year, month, day = str(lag).split("-")
- call_era5t(tmp, year, month, day)
- dataJ3 = ERA5T(source_path=os.path.join(tmp, f"era5t_{year}_{month}_{day}.nc"))
- # Lag J-7
- lag = np.datetime64(f"{year}-{month}-{day}") - np.timedelta64(7, "D")
- year, month, day = str(lag).split("-")
- call_era5t(tmp, year, month, day)
- dataJ7 = ERA5T(source_path=os.path.join(tmp, f"era5t_{year}_{month}_{day}.nc"))
- merged_data = pd.concat([data, dataJ1, dataJ3, dataJ7], ignore_index=True)
- return merged_data
-
-
-class ERA5Land(pd.DataFrame):
- """Provides ERA5-Land clean dataset as a pandas dataframe.
-
- ERA5-Land is a reanalysis dataset providing a consistent view of the evolution of land variables
- over several decades at an enhanced resolution compared to ERA5. ERA5-Land uses as input to
- control the simulated land fields ERA5 atmospheric variables, such as air temperature and air humidity.
- Using cdaspi https://pypi.org/project/cdsapi/ with access key, the user can get the dataset
- at https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-land?tab=overview
-
- The provided dataset has to be in netCDF4 format here.
-
- Args:
- source_path: str
- Path or URL to your version of the source data
- """
-
- def __init__(self, source_path: Optional[str] = None) -> None:
- """
- Args:
- source_path: Optional[str]
- Path or URL to your version of the source data
- """
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_ERA5LAND_FALLBACK
-
- if source_path.startswith("http"):
- with requests.get(source_path) as resp:
- ds = xr.open_dataset(resp.content)
- data = ds.to_dataframe()
- else:
- ds = xr.open_dataset(source_path)
- data = ds.to_dataframe()
-
- # Drop NaNs which correspond to no land
- data = data.dropna()
- data = data.reset_index()
-
- data["time"] = pd.to_datetime(
- data["time"], format="%Y-%m-%d %H:%M:%S", errors="coerce"
- )
- data["time"] = data["time"].dt.normalize()
-
- # Transform into geopandas dataframe
- geo_data = gpd.GeoDataFrame(
- data,
- geometry=gpd.points_from_xy(data["longitude"], data["latitude"]),
- crs="EPSG:4326",
- )
-
- # Match the polygons using the ones of each predefined country area
- geo_masks = get_french_geom()
- geo_df = gpd.sjoin(geo_masks, geo_data, how="inner")
- super().__init__(geo_df.drop(["index_right", "geometry"], axis=1))
-
-
-class ERA5T(pd.DataFrame):
- """Provides ERA5T clean dataset as a pandas dataframe.
-
- The provided dataset has to be in netCDF4 format here.
-
- Args:
- source_path: str
- Path or URL to your version of the source data
- """
-
- def __init__(self, source_path: Optional[str] = None) -> None:
- """
- Args:
- source_path: Optional[str]
- Path or URL to your version of the source data
- """
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_ERA5T_FALLBACK
-
- if source_path.startswith("http"):
- with requests.get(source_path) as resp:
- ds = xr.open_dataset(resp.content)
- data = ds.to_dataframe()
- else:
- ds = xr.open_dataset(source_path)
- data = ds.to_dataframe()
-
- # Drop columns with NaNs
- data = data.dropna(axis=1)
- data = data.reset_index()
-
- data["time"] = pd.to_datetime(
- data["time"], format="%Y-%m-%d %H:%M:%S", errors="coerce"
- )
- data["time"] = data["time"].dt.normalize()
-
- # Transform into geopandas dataframe
- geo_data = gpd.GeoDataFrame(
- data,
- geometry=gpd.points_from_xy(data["longitude"], data["latitude"]),
- crs="EPSG:4326",
- )
-
- # Match the polygons using the ones of each predefined country area
- geo_masks = get_french_geom()
- geo_df = gpd.sjoin(geo_masks, geo_data, how="inner")
- super().__init__(geo_df.drop(["index_right", "geometry"], axis=1))
diff --git a/pyro_risks/datasets/__init__.py b/pyro_risks/datasets/__init__.py
deleted file mode 100644
index 29ac9cf..0000000
--- a/pyro_risks/datasets/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .nasa_wildfires import *
-from .weather import *
-from .wildfires import *
-from .masks import *
-from .ERA5 import *
-from .era_fwi_viirs import *
-from . import utils
diff --git a/pyro_risks/datasets/datasets_mergers.py b/pyro_risks/datasets/datasets_mergers.py
deleted file mode 100644
index 3401948..0000000
--- a/pyro_risks/datasets/datasets_mergers.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import pandas as pd
-
-from .utils import (
- find_closest_weather_station,
- find_closest_location,
- get_nearest_points,
-)
-
-
-def merge_datasets_by_departements(
- dataframe1: pd.DataFrame,
- time_col1: str,
- geometry_col1: str,
- dataframe2: pd.DataFrame,
- time_col2: str,
- geometry_col2: str,
- how: str,
-) -> pd.DataFrame:
- """
- Merge two datasets containing some kind of geometry and date columns.
- The merge is down on [time_col1, time_col2] and [geometry_col1, geometry_col2].
- Here the geometry is based on French departements. Therefore the geometry columns
- should contains either the code on the departement or its geometry (should be
- consistent throughout both datasets).
-
- Finally the merge is done according to the `how` parameter. Keep me mind that
- this parameter must be so that the merged dataframe keeps similar dimensions to the
- weather dataframe. This is because if there is an inner join, we will keep only the days
- where wildfires were declared. Therefore if the weather dataframe is the left frame, then
- `how` must be left, if it is the right frame, `how` must be right.
-
- Args:
- dataframe1: pd.DataFrame
- First dataframe, containing a time column and a geometry one.
- time_col1: str
- Name of the time column of dataframe1 on which the merge will be done.
- geometry_col1: str
- Name of the geometry column of dataframe1 on which the merge will be done.
- dataframe2: pd.DataFrame
- Second dataframe, containing a time column and a geometry one.
- time_col2: str
- Name of the time column of dataframe2 on which the merge will be done.
- geometry_col2: str
- Name of the geometry column of dataframe2 on which the merge will be done.
- how:
- Parameter of the merge, should correspond to which of the left or right frame
- the weather dataframe is.
-
- Returns: pd.DataFrame
- Merged dataset on French departement.
- """
- merged_data = pd.merge(
- dataframe1,
- dataframe2,
- left_on=[time_col1, geometry_col1],
- right_on=[time_col2, geometry_col2],
- how=how,
- )
- return merged_data
-
-
-def merge_datasets_by_closest_weather_station(
- df_weather: pd.DataFrame,
- time_col_weather: str,
- df_fires: pd.DataFrame,
- time_col_fires: str,
-) -> pd.DataFrame:
- """
- Merge two datasets: one of weather conditions and the other of wildfires history data.
- Each dataset must contain a time column, and the weather dataset must have a `STATION`
- column which allows to identify uniquely each station. The merge is done by finding the
- closest weather station to each (lat, lon) point of the wildfires history dataset. The
- latter is then grouped by date and closest_weather_station, which then allows to join it
- with the weather conditions dataframe.
-
- Args:
- df_weather: pd.DataFrame
- Weather conditions dataframe. Must have a `STATION` column to identify each
- weather station.
- time_col_weather: str
- Name of the time column in `df_weather`.
- df_fires: pd.DataFrame
- Wildfires history dataset, must have points described by their latitude and
- longitude.
- time_col_fires: str
- Name of the time column in `df_fires`.
-
- Returns: pd.DataFrame
- Merged dataset by weather station proximity.
- """
- # For wildfires dataframe, need to find for each point the closest weather station
- df_fires["closest_weather_station"] = df_fires.apply(
- lambda row: find_closest_weather_station(
- df_weather, row["latitude"], row["longitude"]
- ),
- axis=1,
- )
-
- grouped_fires = (
- df_fires.groupby(["closest_weather_station", "acq_date"], observed=True)
- .first()
- .reset_index()
- )
-
- merged_data = pd.merge(
- df_weather,
- grouped_fires,
- left_on=[time_col_weather, "STATION"],
- right_on=[time_col_fires, "closest_weather_station"],
- how="left",
- )
- return merged_data
-
-
-def merge_datasets_by_closest_weather_point(
- df_weather: pd.DataFrame,
- time_col_weather: str,
- df_fires: pd.DataFrame,
- time_col_fires: str,
-) -> pd.DataFrame:
- """
- Merge weather and fire datasets when the weather dataset is provided using satellite
- data such as ERA5 Land hourly dataset provided here
- https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-land?tab=form
- and accessible through cdsapi.
-
- Args:
- df_weather: pd.DataFrame
- Weather conditions dataframe, must have "latitude" and "longitude" columns.
- time_col_weather: str
- Name of the time column in `df_weather`.
- df_fires: pd.DataFrame
- Wildfires history dataset, must have points described by their latitude and
- longitude.
- time_col_fires: str
- Name of the time column in `df_fires`.
-
- Returns: pd.DataFrame
- Merged dataset by weather station proximity.
- """
- # For wildfires dataframe, need to find for each point the closest weather station
- df_fires["closest_weather_point"] = df_fires.apply(
- lambda row: find_closest_location(
- df_weather, row["latitude"], row["longitude"]
- ),
- axis=1,
- )
-
- grouped_fires = (
- df_fires.groupby(["closest_weather_point", "acq_date"], observed=True)
- .first()
- .reset_index()
- )
-
- grouped_fires["weather_lat"], grouped_fires["weather_lon"] = (
- grouped_fires["closest_weather_point"].str[0],
- grouped_fires["closest_weather_point"].str[1],
- )
-
- merged_data = pd.merge(
- df_weather,
- grouped_fires,
- left_on=[time_col_weather, "latitude", "longitude"],
- right_on=[time_col_fires, "weather_lat", "weather_lon"],
- how="left",
- )
- return merged_data
-
-
-def merge_by_proximity(
- df_left: pd.DataFrame,
- time_col_left: str,
- df_right: pd.DataFrame,
- time_col_right: str,
- how: str,
-) -> pd.DataFrame:
- """
- Merge df_left and df_right by finding in among all points in df_left, the closest point in df_right.
- For instance, df_left can be a history wildfires dataset and df_right a weather conditions datasets and
- we want to match each wildfire with its closest weather point.
- This can also be used if, for instance, we want to merge FWI dataset (df_left) with ERA5/VIIRS datatset
- (df_right).
-
- Args:
- df_left: pd.DataFrame
- Left dataframe, must have "latitude" and "longitude" columns.
- time_col_left: str
- Name of the time column in `df_left`.
- df_right: pd.DataFrame
- Right dataset, must have points described by their latitude and
- longitude.
- time_col_right: str
- Name of the time column in `df_right`.
- how: str
- How the pandas merge needs to be done.
-
- Returns:
- Merged dataset by point (lat/lon) proximity.
- """
- # get all df_right points in adequate format
- df_tmp = df_right.drop_duplicates(subset=["latitude", "longitude"])
- df_tmp = df_tmp.reset_index(drop=True)
- lat_right = df_tmp["latitude"].values
- lon_right = df_tmp["longitude"].values
- candidates = list(zip(lat_right, lon_right))
-
- df_tmp2 = df_left.drop_duplicates(subset=["latitude", "longitude"])
- source_points = list(zip(df_tmp2["latitude"].values, df_tmp2["longitude"].values))
-
- indices, _ = get_nearest_points(source_points, candidates)
-
- dict_idx_lat_lon = {}
- for idx in set(indices):
- df_tmp3 = df_tmp[df_tmp.index == idx]
- dict_idx_lat_lon[idx] = (
- df_tmp3["latitude"].values[0],
- df_tmp3["longitude"].values[0],
- )
-
- dict_source_idx = dict(zip(source_points, indices))
-
- df_left["point"] = list(zip(df_left["latitude"], df_left["longitude"]))
-
- df_left["corresponding_index"] = df_left["point"].map(dict_source_idx)
-
- df_left["closest_point"] = df_left["corresponding_index"].map(dict_idx_lat_lon)
-
- df_left["closest_lat"], df_left["closest_lon"] = (
- df_left["closest_point"].str[0],
- df_left["closest_point"].str[1],
- )
-
- merged_data = pd.merge(
- df_left,
- df_right,
- left_on=[time_col_left, "closest_lat", "closest_lon"],
- right_on=[time_col_right, "latitude", "longitude"],
- how=how,
- )
-
- merged_data = merged_data.drop(
- ["point", "closest_point", "corresponding_index"], axis=1
- )
- return merged_data
diff --git a/pyro_risks/datasets/era_fwi_viirs.py b/pyro_risks/datasets/era_fwi_viirs.py
deleted file mode 100644
index b866801..0000000
--- a/pyro_risks/datasets/era_fwi_viirs.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-import pandas as pd
-from typing import Optional
-
-from pyro_risks.datasets import NASAFIRMS_VIIRS, ERA5Land, ERA5T
-from pyro_risks.datasets.utils import get_intersection_range
-from pyro_risks.datasets.fwi import GwisFwi
-from pyro_risks import config as cfg
-
-__all__ = ["MergedEraFwiViirs"]
-
-
-logger = logging.getLogger("uvicorn.info")
-
-
-def process_dataset_to_predict(fwi: pd.DataFrame, era: pd.DataFrame) -> pd.DataFrame:
- """Groupby and merge fwi and era5 datasets for model predictions.
-
- Args:
- fwi (pd.DataFrame): Fwi dataset
- era (pd.DataFrame): Era5 dataset
-
- Returns:
- pd.DataFrame: one line per department and day
- """
- weather = era.copy()
- weather["time"] = pd.to_datetime(
- weather["time"], format="%Y-%m-%d", errors="coerce"
- )
- fwi_df = fwi.copy()
- fwi_df["day"] = pd.to_datetime(fwi_df["day"], format="%Y-%m-%d", errors="coerce")
-
- # Group fwi dataframe by day and department and compute min, max, mean, std
- agg_fwi_df = (
- fwi_df.groupby(["day", "nom"])[cfg.FWI_VARS]
- .agg(["min", "max", "mean", "std"])
- .reset_index()
- )
- agg_fwi_df.columns = ["day", "nom"] + [
- x[0] + "_" + x[1] for x in agg_fwi_df.columns if x[1] != ""
- ]
-
- logger.info("Finished aggregationg of FWI")
-
- # Group weather dataframe by day and department and compute min, max, mean, std
- agg_wth_df = (
- weather.groupby(["time", "nom"])[cfg.WEATHER_ERA5T_VARS]
- .agg(["min", "max", "mean", "std"])
- .reset_index()
- )
- agg_wth_df.columns = ["day", "nom"] + [
- x[0] + "_" + x[1] for x in agg_wth_df.columns if x[1] != ""
- ]
-
- logger.info("Finished aggregationg of weather data")
-
- # Merge fwi and weather together
- res_df = pd.merge(agg_fwi_df, agg_wth_df, on=["day", "nom"], how="inner")
- logger.info("Finished merging")
- return res_df
-
-
-class MergedEraFwiViirs(pd.DataFrame):
- """Create dataframe for modeling described in models/score_v0.py.
-
- Get weather, nasafirms viirs fires and fwi datasets, then filter some of the lines corresponding
- to vegetation fires excluding low confidence ones merges. Finally aggregated versions of the
- dataframes by department and by day.
- For each of the features of weather and fwi datasets creates min, max, mean and std.
- Fires are counted for each department and day.
-
- Returns:
- pd.DataFrame
- """
-
- def __init__(
- self,
- era_source_path: Optional[str] = None,
- viirs_source_path: Optional[str] = None,
- fwi_source_path: Optional[str] = None,
- ) -> None:
- """Define the merged era-fwi-viirs dataframe.
-
- Args:
- era_source_path (str, optional): Era5 data source path. Defaults to None.
- viirs_source_path (str, optional): Viirs data source path. Defaults to None.
- fwi_source_path (str, optional): Fwi data source path. Defaults to None.
- """
- weather = ERA5T(era_source_path) # ERA5Land(era_source_path)
- nasa_firms = NASAFIRMS_VIIRS(viirs_source_path)
-
- # Time span selection
- date_range = get_intersection_range(weather.time, nasa_firms.acq_date)
- weather = weather[weather.time.isin(date_range)]
- nasa_firms = nasa_firms[nasa_firms.acq_date.isin(date_range)]
-
- # Keep only vegetation wildfires and remove thermal anomalies with low confidence
- where = (nasa_firms["confidence"] != "l") & (nasa_firms["type"] == 0)
- nasa_firms = nasa_firms[where]
-
- # Get FWI dataset for year 2019 (1st september missing)
- if fwi_source_path is None:
- days = [
- x.strftime("%Y%m%d")
- for x in pd.date_range(start="2019-01-01", end="2019-12-31")
- ]
- days.remove("20190901")
- fwi_df = GwisFwi(days_list=days)
- else:
- fwi_df = pd.read_csv(fwi_source_path)
-
- # Load FWI dataset
- fwi_df["day"] = pd.to_datetime(fwi_df["day"], format="%Y%m%d", errors="coerce")
-
- # Group fwi dataframe by day and department and compute min, max, mean, std
- agg_fwi_df = (
- fwi_df.groupby(["day", "departement"])[cfg.FWI_VARS]
- .agg(["min", "max", "mean", "std"])
- .reset_index()
- )
- agg_fwi_df.columns = ["day", "departement"] + [
- x[0] + "_" + x[1] for x in agg_fwi_df.columns if x[1] != ""
- ]
-
- # Group weather dataframe by day and department and compute min, max, mean, std
- agg_wth_df = (
- weather.groupby(["time", "nom"])[cfg.WEATHER_ERA5T_VARS]
- .agg(["min", "max", "mean", "std"])
- .reset_index()
- )
- agg_wth_df.columns = ["day", "departement"] + [
- x[0] + "_" + x[1] for x in agg_wth_df.columns if x[1] != ""
- ]
-
- # Merge fwi and weather together
- mid_df = pd.merge(
- agg_fwi_df, agg_wth_df, on=["day", "departement"], how="inner"
- )
-
- # Count fires by day and department
- fires_count = (
- nasa_firms.groupby(["acq_date", "nom"])["confidence"]
- .count()
- .to_frame()
- .reset_index()
- )
- fires_count = fires_count.rename({"confidence": "fires"}, axis=1)
-
- # Merge fires
- final_df = pd.merge(
- mid_df,
- fires_count,
- left_on=["day", "departement"],
- right_on=["acq_date", "nom"],
- how="left",
- ).drop(["acq_date", "nom"], axis=1)
-
- # Fill lines with no fires with 0
- final_df["fires"] = final_df["fires"].fillna(0)
- super().__init__(final_df)
diff --git a/pyro_risks/datasets/fwi.py b/pyro_risks/datasets/fwi.py
deleted file mode 100644
index 7a9cbc1..0000000
--- a/pyro_risks/datasets/fwi.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import pandas as pd
-import numpy as np
-from netCDF4 import Dataset
-import geopandas as gpd
-from typing import Optional, List, Dict, Any
-
-import requests
-import zipfile
-import os
-import urllib.request
-import json
-import logging
-import tempfile
-
-from shapely.geometry import Point
-from shapely import geometry
-
-from pyro_risks import config as cfg
-from pyro_risks.datasets.queries_api import call_fwi
-from pyro_risks.datasets.masks import get_french_geom
-
-
-def load_data(output_path: str) -> None:
- """Load FWI zipped data from github repo and unzip data in folder output_path.
-
- Args:
- output_path (str): absolute, relative or temporary path
- """
- results = requests.get(cfg.FR_FWI_2019_FALLBACK)
-
- os.makedirs(output_path, exist_ok=True)
- with open(os.path.join(output_path, "fwi_folder.zip"), "wb") as f:
- f.write(results.content)
-
- file = zipfile.ZipFile(os.path.join(output_path, "fwi_folder.zip"))
- file.extractall(path=os.path.join(output_path, "fwi_unzipped"))
-
-
-def include_department(row: pd.Series, polygons_json: Dict[str, Any]) -> str:
- """Given a row of a dataframe containing longitude and latitude returns name of french department.
-
- This function makes use of shapely to return if a polygon contains a point.
- Args:
- row (pd.Series): row of dataframe
- polygons_json (dict): dict with polygons of the departments
-
- Returns:
- str: name of department or empty string
- """
- for i_dep in range(len(polygons_json["features"])):
- geom = geometry.shape(polygons_json["features"][i_dep]["geometry"])
- if geom.contains(Point((row["longitude"], row["latitude"]))):
- return polygons_json["features"][i_dep]["properties"]["nom"]
- return ""
-
-
-def get_fwi_from_api(date: str) -> gpd.GeoDataFrame:
- """Call the CDS API and return all fwi variables as a dataframe with geo coordinates and departments.
-
- When calling the API we get a zip file that must be extracted (in a tmp directory), then handle
- each queried variable which is in a separate netcdf file. A dataframe is created with all the variables
- and then finally we join codes and departments with geopandas.
-
- Args:
- date (str)
-
- Returns:
- pd.DataFrame
- """
-
- year, month, day = date.split("-")
- date_concat = date.replace("-", "")
- with tempfile.TemporaryDirectory() as tmp:
- call_fwi(tmp, year, month, day)
-
- file = zipfile.ZipFile(os.path.join(tmp, f"fwi_{year}_{month}_{day}.zip"))
- file.extractall(path=os.path.join(tmp, f"fwi_{year}_{month}_{day}"))
-
- df0 = pd.DataFrame({})
- for var_name in ["BUI", "DC", "DMC", "DSR", "FFMC", "FWI", "ISI"]:
- var_path = os.path.join(
- tmp,
- f"fwi_{year}_{month}_{day}/ECMWF_FWI_{var_name}_{date_concat}_1200_hr_v3.1_int.nc",
- )
- nc = Dataset(var_path, "r")
- lats = nc.variables["latitude"][:]
- var = nc.variables[var_name.lower()][:]
- nc.close()
-
- lons = np.arange(-180, 180.25, 0.25)
- var_cyclic = np.ma.hstack([var[0][:, 720:], var[0][:, :721]])
- lon2d, lat2d = np.meshgrid(lons, lats)
- df = pd.DataFrame(
- {
- "latitude": lat2d.flatten(),
- "longitude": lon2d.flatten(),
- var_name.lower(): var_cyclic.flatten(),
- }
- )
- df = df.dropna(subset=[var_name.lower()])
- df = df.reset_index(drop=True)
- if var_name == "BUI":
- df0 = pd.concat([df0, df], axis=1)
- else:
- df0 = pd.merge(df0, df, on=["latitude", "longitude"], how="inner")
- geo_data = gpd.GeoDataFrame(
- df0,
- geometry=gpd.points_from_xy(df0["longitude"], df0["latitude"]),
- crs="EPSG:4326",
- )
- geo_masks = get_french_geom()
- geo_df = gpd.sjoin(geo_masks, geo_data, how="inner")
- return geo_df.drop(["index_right", "geometry"], axis=1)
-
-
-def get_fwi_data_for_predict(date: str) -> pd.DataFrame:
- """Run CDS API queries for dates required by the model and return fwi dataset for predict step.
-
- This takes care principally of the lags required for the modelling step.
-
- Args:
- date (str)
-
- Returns:
- pd.DataFrame
- """
- data = get_fwi_from_api(date)
- data["day"] = date
- # Lag J-1
- lag = np.datetime64(date) - np.timedelta64(1, "D")
- dataJ1 = get_fwi_from_api(str(lag))
- dataJ1["day"] = str(lag)
- # Lag J-3
- lag = np.datetime64(date) - np.timedelta64(3, "D")
- dataJ3 = get_fwi_from_api(str(lag))
- dataJ3["day"] = str(lag)
- # Lag J-7
- lag = np.datetime64(date) - np.timedelta64(7, "D")
- dataJ7 = get_fwi_from_api(str(lag))
- dataJ7["day"] = str(lag)
- merged_data = pd.concat([data, dataJ1, dataJ3, dataJ7], ignore_index=True)
- return merged_data
-
-
-def get_fwi_data(source_path: str, day: Optional[str] = "20190101") -> pd.DataFrame:
- """Load and handle netcdf data for selected day.
-
- Return pandas dataframe with longitude, latitude, day and fwi indices
- (fwi, ffmc, dmc, dc, isi, bui, dsr, dr).
- Args:
- source_path (str): path with unzipped netcdf fwi data, usually got from load_data.
- day (str, optional): which day to load. Defaults to '20190101'.
-
- Returns:
- pd.DataFrame: dataframe with all fwi indices for selected day
- """
- nc = Dataset(
- os.path.join(source_path, "fwi_unzipped/JRC_FWI_{}.nc".format(day)), "r"
- )
- try:
- lons = nc.variables["lon"][:]
- lats = nc.variables["lat"][:]
- fwi = nc.variables["fwi"][:]
- ffmc = nc.variables["ffmc"][:]
- dmc = nc.variables["dmc"][:]
- dc = nc.variables["dc"][:]
- isi = nc.variables["isi"][:]
- bui = nc.variables["bui"][:]
- dsr = nc.variables["dsr"][:]
- dr = nc.variables["danger_risk"][:]
- except KeyError:
- print("Some reading error with: ", day)
- nc.close()
-
- lon2d, lat2d = np.meshgrid(lons, lats)
-
- df = pd.DataFrame(
- {
- "latitude": lat2d.flatten(),
- "longitude": lon2d.flatten(),
- "day": day,
- "fwi": fwi[0, :, :].flatten(),
- "ffmc": ffmc[0, :, :].flatten(),
- "dmc": dmc[0, :, :].flatten(),
- "dc": dc[0, :, :].flatten(),
- "isi": isi[0, :, :].flatten(),
- "bui": bui[0, :, :].flatten(),
- "dsr": dsr[0, :, :].flatten(),
- "dr": dr[0, :, :].flatten(),
- }
- )
- df = df.dropna(subset=["fwi", "ffmc", "dmc", "dc", "isi", "bui", "dsr", "dr"])
- df = df.reset_index(drop=True)
- return df
-
-
-def create_departement_df(day_data: pd.DataFrame) -> pd.DataFrame:
- """Create dataframe with lon, lat coordinates and corresponding departments.
-
- Load json with the department polygons and run function include_department to get the
- name of departments corresponding to each row of input data, typically one day of FWI data
- got with load_data. This may take a few minutes due to the shapely process.
- Args:
- day_data (pd.Dataframe): df with longitudes and latitudes
-
- Returns:
- pd.DataFrame: dataframe with lat, lon and department
- """
- df = day_data.copy()
-
- with urllib.request.urlopen(cfg.FR_GEOJSON) as url:
- dep_polygons = json.loads(url.read().decode())
-
- deps = [include_department(df.iloc[i], dep_polygons) for i in range(df.shape[0])]
- df["departement"] = deps
- df = df[df["departement"] != ""]
- dep_geo_df = df[["latitude", "longitude", "departement"]]
- return dep_geo_df
-
-
-class GwisFwi(pd.DataFrame):
- """GWIS FWI dataframe (8 km resolution) on French territory based on 2019-2020 data."""
-
- def __init__(self, days_list: Optional[List[str]] = None) -> None:
- """Create dataframe with fwi indices data corresponding to days_list and join french department.
-
- Args:
- days_list: list of str, format year month day (all concatenated)
- """
- days_list = ["20190101"] if days_list is None else days_list
- fwi_df = pd.DataFrame(
- columns=[
- "latitude",
- "longitude",
- "day",
- "fwi",
- "ffmc",
- "dmc",
- "dc",
- "isi",
- "bui",
- "dsr",
- "dr",
- ]
- )
- with tempfile.TemporaryDirectory() as tmp:
- load_data(output_path=tmp)
- for day in days_list:
- df = get_fwi_data(source_path=tmp, day=day)
- fwi_df = pd.concat([fwi_df, df])
- dep_geo_df = create_departement_df(df)
- fwi_df = pd.merge(fwi_df, dep_geo_df, on=["latitude", "longitude"])
- super().__init__(fwi_df)
diff --git a/pyro_risks/datasets/masks.py b/pyro_risks/datasets/masks.py
deleted file mode 100644
index 17036e1..0000000
--- a/pyro_risks/datasets/masks.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-import geopandas as gpd
-from typing import Optional
-
-from pyro_risks import config as cfg
-
-
-__all__ = ["get_french_geom"]
-
-
-def get_french_geom(path: Optional[str] = None) -> gpd.GeoDataFrame:
- """Creates the dataframe with the geometry of French departments
-
- Args:
- path: optional path to your local geojson
- """
- if isinstance(path, str):
- return gpd.read_file(path)
- else:
- try:
- return gpd.read_file(cfg.FR_GEOJSON)
- except Exception:
- logging.warning(f"Unable to access {cfg.FR_GEOJSON}, trying fallback.")
- return gpd.read_file(cfg.FR_GEOJSON_FALLBACK)
diff --git a/pyro_risks/datasets/nasa_wildfires.py b/pyro_risks/datasets/nasa_wildfires.py
deleted file mode 100644
index a6ba398..0000000
--- a/pyro_risks/datasets/nasa_wildfires.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-from typing import List, Optional
-
-import geopandas as gpd
-import pandas as pd
-
-from pyro_risks import config as cfg
-
-__all__ = ["NASAFIRMS", "NASAFIRMS_VIIRS"]
-
-from .masks import get_french_geom
-
-
-class NASAFIRMS(pd.DataFrame):
- """Wildfire history dataset on French territory, using data from
- NASA satellites. Accessible by completing the form at
- https://effis.jrc.ec.europa.eu/applications/data-request-form/
-
- Careful when completing the form, you can either choose to get the
- dataset in json format or xlsx format.
- However if your source data is in a csv format, you can still use
- this class to clean it using the parameter `fmt`.
-
- By default, the format is considered to be json.
-
- Args:
- source_path: str
- Path or URL to your version of the source data
- fmt: str
- Format of the source data, can either be "csv", "xlsx"
- or "json". Default is "json".
- use_cols: List[str]
- List of columns to read from the source
- """
-
- kept_cols = [
- "latitude",
- "longitude",
- "acq_date",
- "acq_time",
- "confidence",
- "bright_t31",
- "frp",
- ]
- fmt = "json"
-
- def __init__(
- self,
- source_path: Optional[str] = None,
- fmt: Optional[str] = None,
- use_cols: Optional[List[str]] = None,
- ) -> None:
- """
- Args:
- source_path: Optional[str]
- Path or URL to your version of the source data
- fmt: Optional[str]
- Format of the source data, can either be
- "csv", "xlsx" or "json".
- use_cols: Optional[List[str]]
- List of columns to keep in the dataframe
- """
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_NASA_FIRMS_FALLBACK
- if not isinstance(fmt, str):
- fmt = self.fmt
- if not isinstance(use_cols, list):
- use_cols = self.kept_cols
-
- if fmt == "json":
- data = pd.read_json(source_path, orient="records")
- data = pd.json_normalize(data["features"])
- # remove unnecessary prefix
- data.columns = [col.split(".")[-1] for col in data.columns]
- # keep defined columns
- data = data[use_cols]
-
- elif fmt == "xlsx":
- data = pd.read_excel(source_path, usecols=use_cols)
-
- elif fmt == "csv":
- data = pd.read_csv(source_path, usecols=use_cols)
- # if csv format, the `acq_time` column needs to be changed
- # the raw data as the format "HHMM", we will transform it
- # so that it has the format "HHMMSS"
- # convert type to str
- data["acq_time"] = data["acq_time"].astype(str)
- # fill with 0
- data["acq_time"] = data["acq_time"].str.ljust(6, "0")
- # prepare for datetime needs
- data["acq_time"] = data["acq_time"].apply(
- lambda s: ":".join(map("{}{}".format, *(s[::2], s[1::2])))
- )
-
- else:
- raise ValueError(
- "The given format cannot be read, it should be either csv, xlsx or json."
- )
-
- data["acq_date_time"] = (
- data["acq_date"].astype(str) + " " + data["acq_time"].astype(str)
- )
- data["acq_date"] = pd.to_datetime(
- data["acq_date"], format="%Y-%m-%d", errors="coerce"
- )
- data["acq_date_time"] = pd.to_datetime(
- data["acq_date_time"], format="%Y-%m-%d %H:%M:%S", errors="coerce"
- )
- data["latitude"] = data["latitude"].astype(float)
- data["longitude"] = data["longitude"].astype(float)
- data["bright_t31"] = data["bright_t31"].astype(float)
- data["frp"] = data["frp"].astype(float)
-
- # add departements geometry to allow for departements merging
- geo_data = gpd.GeoDataFrame(
- data,
- geometry=gpd.points_from_xy(data["longitude"], data["latitude"]),
- crs="EPSG:4326",
- )
- # Match the polygons using the ones of each predefined country area
- geo_masks = get_french_geom()
- geo_df = gpd.sjoin(geo_masks, geo_data, how="inner")
- super().__init__(geo_df.drop(["acq_time", "index_right", "geometry"], axis=1))
-
-
-class NASAFIRMS_VIIRS(pd.DataFrame):
- """Wildfire history dataset on French territory, using data from
- VIIRS.
-
- Args:
- source_path: str
- Path or URL to your version of the source data
- fmt: str
- Format of the source data, can either be "csv", "xlsx"
- or "json". Default is "json".
- use_cols: List[str]
- List of columns to read from the source
- """
-
- kept_cols = [
- "latitude",
- "longitude",
- "acq_date",
- "acq_time",
- "confidence",
- "bright_ti4",
- "bright_ti5",
- "frp",
- "type",
- ]
- fmt = "csv"
-
- def __init__(
- self,
- source_path: Optional[str] = None,
- fmt: Optional[str] = None,
- use_cols: Optional[List[str]] = None,
- ) -> None:
- """
- Args:
- source_path: Optional[str]
- Path or URL to your version of the source data
- fmt: Optional[str]
- Format of the source data, can either be
- "csv", "xlsx" or "json".
- use_cols: Optional[List[str]]
- List of columns to keep in the dataframe
- """
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_NASA_VIIRS_FALLBACK
- if not isinstance(fmt, str):
- fmt = self.fmt
- if not isinstance(use_cols, list):
- use_cols = self.kept_cols
-
- if fmt == "json":
- data = pd.read_json(source_path, orient="records")
- data = pd.json_normalize(data["features"])
- # remove unnecessary prefix
- data.columns = [col.split(".")[-1] for col in data.columns]
- # keep defined columns
- data = data[use_cols]
-
- elif fmt == "xlsx":
- data = pd.read_excel(source_path, usecols=use_cols)
-
- elif fmt == "csv":
- data = pd.read_csv(source_path, usecols=use_cols)
- # if csv format, the `acq_time` column needs to be changed
- # the raw data as the format "HHMM", we will transform it
- # so that it has the format "HHMMSS"
- # convert type to str
- data["acq_time"] = data["acq_time"].astype(str)
- # fill with 0
- data["acq_time"] = data["acq_time"].str.ljust(6, "0")
- # prepare for datetime needs
- data["acq_time"] = data["acq_time"].apply(
- lambda s: ":".join(map("{}{}".format, *(s[::2], s[1::2])))
- )
-
- else:
- raise ValueError(
- "The given format cannot be read, it should be either csv, xlsx or json."
- )
-
- data["acq_date_time"] = (
- data["acq_date"].astype(str) + " " + data["acq_time"].astype(str)
- )
- data["acq_date"] = pd.to_datetime(
- data["acq_date"], format="%Y-%m-%d", errors="coerce"
- )
- data["acq_date_time"] = pd.to_datetime(
- data["acq_date_time"], format="%Y-%m-%d %H:%M:%S", errors="coerce"
- )
- data["latitude"] = data["latitude"].astype(float)
- data["longitude"] = data["longitude"].astype(float)
- data["bright_ti4"] = data["bright_ti4"].astype(float)
- data["bright_ti5"] = data["bright_ti5"].astype(float)
- data["frp"] = data["frp"].astype(float)
-
- # add departements geometry to allow for departements merging
- geo_data = gpd.GeoDataFrame(
- data,
- geometry=gpd.points_from_xy(data["longitude"], data["latitude"]),
- crs="EPSG:4326",
- )
- # Match the polygons using the ones of each predefined country area
- geo_masks = get_french_geom()
- geo_df = gpd.sjoin(geo_masks, geo_data, how="inner")
- super().__init__(geo_df.drop(["acq_time", "index_right", "geometry"], axis=1))
diff --git a/pyro_risks/datasets/queries_api.py b/pyro_risks/datasets/queries_api.py
deleted file mode 100644
index 79e43eb..0000000
--- a/pyro_risks/datasets/queries_api.py
+++ /dev/null
@@ -1,464 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import cdsapi
-import os
-import logging
-import urllib3
-
-from pyro_risks import config as cfg
-
-
-urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
-logger = logging.getLogger("uvicorn.info")
-
-
-def call_era5land(output_path: str, year: str, month: str, day: str) -> None:
- """Call cdpaspi to get ERA5Land data as file nc format for given date.
-
- By default "time" = "14:00". It is not an issue since we get these ERA5 Land data
- with a 2 months delay.
-
- Args:
- output_path: str
- year: str
- month: str
- day: str
- """
- file_path = os.path.join(output_path, f"era5land_{year}_{month}_{day}.nc")
-
- if os.path.exists(file_path):
- logger.info(f"Using cached {file_path}")
- return
-
- c = cdsapi.Client(url=cfg.CDS_URL, key=f"{cfg.CDS_UID}:{cfg.CDS_API_KEY}", verify=0)
-
- c.retrieve(
- "reanalysis-era5-land",
- {
- "variable": [
- "10m_u_component_of_wind",
- "10m_v_component_of_wind",
- "2m_dewpoint_temperature",
- "2m_temperature",
- "evaporation_from_bare_soil",
- "evaporation_from_open_water_surfaces_excluding_oceans",
- "evaporation_from_the_top_of_canopy",
- "evaporation_from_vegetation_transpiration",
- "forecast_albedo",
- "lake_bottom_temperature",
- "lake_ice_depth",
- "lake_ice_temperature",
- "lake_mix_layer_depth",
- "lake_mix_layer_temperature",
- "lake_shape_factor",
- "lake_total_layer_temperature",
- "leaf_area_index_high_vegetation",
- "leaf_area_index_low_vegetation",
- "potential_evaporation",
- "runoff",
- "skin_reservoir_content",
- "skin_temperature",
- "snow_albedo",
- "snow_cover",
- "snow_density",
- "snow_depth",
- "snow_depth_water_equivalent",
- "snow_evaporation",
- "snowfall",
- "snowmelt",
- "soil_temperature_level_1",
- "soil_temperature_level_2",
- "soil_temperature_level_3",
- "soil_temperature_level_4",
- "sub_surface_runoff",
- "surface_latent_heat_flux",
- "surface_net_solar_radiation",
- "surface_net_thermal_radiation",
- "surface_pressure",
- "surface_runoff",
- "surface_sensible_heat_flux",
- "surface_solar_radiation_downwards",
- "surface_thermal_radiation_downwards",
- "temperature_of_snow_layer",
- "total_evaporation",
- "total_precipitation",
- "volumetric_soil_water_layer_1",
- "volumetric_soil_water_layer_2",
- "volumetric_soil_water_layer_3",
- "volumetric_soil_water_layer_4",
- ],
- "year": year,
- "month": month,
- "day": day,
- "time": "14:00",
- "area": [
- 51,
- -6,
- 41,
- 10,
- ],
- "format": "netcdf",
- },
- file_path,
- )
-
-
-def call_era5t(output_path: str, year: str, month: str, day: str) -> None:
- """Call cdpaspi to get ERA5T data as file nc format for given date.
-
- Most recent available data is Day -5.
- By default "time" = "14:00". It is not an issue since we get these ERA5T data
- with a 5 days delay.
-
- Args:
- output_path: str
- year: str
- month: str
- day: str
- """
- file_path = os.path.join(output_path, f"era5t_{year}_{month}_{day}.nc")
-
- if os.path.exists(file_path):
- logger.info(f"Using cached {file_path}")
- return
-
- c = cdsapi.Client(url=cfg.CDS_URL, key=f"{cfg.CDS_UID}:{cfg.CDS_API_KEY}", verify=0)
-
- c.retrieve(
- "reanalysis-era5-single-levels",
- {
- "product_type": "reanalysis",
- "variable": [
- "100m_u_component_of_wind",
- "100m_v_component_of_wind",
- "10m_u_component_of_neutral_wind",
- "10m_u_component_of_wind",
- "10m_v_component_of_neutral_wind",
- "10m_v_component_of_wind",
- "10m_wind_gust_since_previous_post_processing",
- "2m_dewpoint_temperature",
- "2m_temperature",
- "air_density_over_the_oceans",
- "altimeter_corrected_wave_height",
- "altimeter_range_relative_correction",
- "altimeter_wave_height",
- "angle_of_sub_gridscale_orography",
- "anisotropy_of_sub_gridscale_orography",
- "benjamin_feir_index",
- "boundary_layer_dissipation",
- "boundary_layer_height",
- "charnock",
- "clear_sky_direct_solar_radiation_at_surface",
- "cloud_base_height",
- "coefficient_of_drag_with_waves",
- "convective_available_potential_energy",
- "convective_inhibition",
- "convective_precipitation",
- "convective_rain_rate",
- "convective_snowfall",
- "convective_snowfall_rate_water_equivalent",
- "downward_uv_radiation_at_the_surface",
- "duct_base_height",
- "eastward_gravity_wave_surface_stress",
- "eastward_turbulent_surface_stress",
- "evaporation",
- "forecast_albedo",
- "forecast_logarithm_of_surface_roughness_for_heat",
- "forecast_surface_roughness",
- "free_convective_velocity_over_the_oceans",
- "friction_velocity",
- "gravity_wave_dissipation",
- "high_cloud_cover",
- "high_vegetation_cover",
- "ice_temperature_layer_1",
- "ice_temperature_layer_2",
- "ice_temperature_layer_3",
- "ice_temperature_layer_4",
- "instantaneous_10m_wind_gust",
- "instantaneous_eastward_turbulent_surface_stress",
- "instantaneous_large_scale_surface_precipitation_fraction",
- "instantaneous_moisture_flux",
- "instantaneous_northward_turbulent_surface_stress",
- "instantaneous_surface_sensible_heat_flux",
- "k_index",
- "lake_bottom_temperature",
- "lake_cover",
- "lake_depth",
- "lake_ice_depth",
- "lake_ice_temperature",
- "lake_mix_layer_depth",
- "lake_mix_layer_temperature",
- "lake_shape_factor",
- "lake_total_layer_temperature",
- "land_sea_mask",
- "large_scale_precipitation",
- "large_scale_precipitation_fraction",
- "large_scale_rain_rate",
- "large_scale_snowfall",
- "large_scale_snowfall_rate_water_equivalent",
- "leaf_area_index_high_vegetation",
- "leaf_area_index_low_vegetation",
- "low_cloud_cover",
- "low_vegetation_cover",
- "maximum_2m_temperature_since_previous_post_processing",
- "maximum_individual_wave_height",
- "maximum_total_precipitation_rate_since_previous_post_processing",
- "mean_boundary_layer_dissipation",
- "mean_convective_precipitation_rate",
- "mean_convective_snowfall_rate",
- "mean_direction_of_total_swell",
- "mean_direction_of_wind_waves",
- "mean_eastward_gravity_wave_surface_stress",
- "mean_eastward_turbulent_surface_stress",
- "mean_evaporation_rate",
- "mean_gravity_wave_dissipation",
- "mean_large_scale_precipitation_fraction",
- "mean_large_scale_precipitation_rate",
- "mean_large_scale_snowfall_rate",
- "mean_northward_gravity_wave_surface_stress",
- "mean_northward_turbulent_surface_stress",
- "mean_period_of_total_swell",
- "mean_period_of_wind_waves",
- "mean_potential_evaporation_rate",
- "mean_runoff_rate",
- "mean_sea_level_pressure",
- "mean_snow_evaporation_rate",
- "mean_snowfall_rate",
- "mean_snowmelt_rate",
- "mean_square_slope_of_waves",
- "mean_sub_surface_runoff_rate",
- "mean_surface_direct_short_wave_radiation_flux",
- "mean_surface_direct_short_wave_radiation_flux_clear_sky",
- "mean_surface_downward_long_wave_radiation_flux",
- "mean_surface_downward_long_wave_radiation_flux_clear_sky",
- "mean_surface_downward_short_wave_radiation_flux",
- "mean_surface_downward_short_wave_radiation_flux_clear_sky",
- "mean_surface_downward_uv_radiation_flux",
- "mean_surface_latent_heat_flux",
- "mean_surface_net_long_wave_radiation_flux",
- "mean_surface_net_long_wave_radiation_flux_clear_sky",
- "mean_surface_net_short_wave_radiation_flux",
- "mean_surface_net_short_wave_radiation_flux_clear_sky",
- "mean_surface_runoff_rate",
- "mean_surface_sensible_heat_flux",
- "mean_top_downward_short_wave_radiation_flux",
- "mean_top_net_long_wave_radiation_flux",
- "mean_top_net_long_wave_radiation_flux_clear_sky",
- "mean_top_net_short_wave_radiation_flux",
- "mean_top_net_short_wave_radiation_flux_clear_sky",
- "mean_total_precipitation_rate",
- "mean_vertical_gradient_of_refractivity_inside_trapping_layer",
- "mean_vertically_integrated_moisture_divergence",
- "mean_wave_direction",
- "mean_wave_direction_of_first_swell_partition",
- "mean_wave_direction_of_second_swell_partition",
- "mean_wave_direction_of_third_swell_partition",
- "mean_wave_period",
- "mean_wave_period_based_on_first_moment",
- "mean_wave_period_based_on_first_moment_for_swell",
- "mean_wave_period_based_on_first_moment_for_wind_waves",
- "mean_wave_period_based_on_second_moment_for_swell",
- "mean_wave_period_based_on_second_moment_for_wind_waves",
- "mean_wave_period_of_first_swell_partition",
- "mean_wave_period_of_second_swell_partition",
- "mean_wave_period_of_third_swell_partition",
- "mean_zero_crossing_wave_period",
- "medium_cloud_cover",
- "minimum_2m_temperature_since_previous_post_processing",
- "minimum_total_precipitation_rate_since_previous_post_processing",
- "minimum_vertical_gradient_of_refractivity_inside_trapping_layer",
- "model_bathymetry",
- "near_ir_albedo_for_diffuse_radiation",
- "near_ir_albedo_for_direct_radiation",
- "normalized_energy_flux_into_ocean",
- "normalized_energy_flux_into_waves",
- "normalized_stress_into_ocean",
- "northward_gravity_wave_surface_stress",
- "northward_turbulent_surface_stress",
- "ocean_surface_stress_equivalent_10m_neutral_wind_direction",
- "ocean_surface_stress_equivalent_10m_neutral_wind_speed",
- "orography",
- "peak_wave_period",
- "period_corresponding_to_maximum_individual_wave_height",
- "potential_evaporation",
- "precipitation_type",
- "runoff",
- "sea_ice_cover",
- "sea_surface_temperature",
- "significant_height_of_combined_wind_waves_and_swell",
- "significant_height_of_total_swell",
- "significant_height_of_wind_waves",
- "significant_wave_height_of_first_swell_partition",
- "significant_wave_height_of_second_swell_partition",
- "significant_wave_height_of_third_swell_partition",
- "skin_reservoir_content",
- "skin_temperature",
- "slope_of_sub_gridscale_orography",
- "snow_albedo",
- "snow_density",
- "snow_depth",
- "snow_evaporation",
- "snowfall",
- "snowmelt",
- "soil_temperature_level_1",
- "soil_temperature_level_2",
- "soil_temperature_level_3",
- "soil_temperature_level_4",
- "soil_type",
- "standard_deviation_of_filtered_subgrid_orography",
- "standard_deviation_of_orography",
- "sub_surface_runoff",
- "surface_latent_heat_flux",
- "surface_net_solar_radiation",
- "surface_net_solar_radiation_clear_sky",
- "surface_net_thermal_radiation",
- "surface_net_thermal_radiation_clear_sky",
- "surface_pressure",
- "surface_runoff",
- "surface_sensible_heat_flux",
- "surface_solar_radiation_downward_clear_sky",
- "surface_solar_radiation_downwards",
- "surface_thermal_radiation_downward_clear_sky",
- "surface_thermal_radiation_downwards",
- "temperature_of_snow_layer",
- "toa_incident_solar_radiation",
- "top_net_solar_radiation",
- "top_net_solar_radiation_clear_sky",
- "top_net_thermal_radiation",
- "top_net_thermal_radiation_clear_sky",
- "total_cloud_cover",
- "total_column_cloud_ice_water",
- "total_column_cloud_liquid_water",
- "total_column_ozone",
- "total_column_rain_water",
- "total_column_snow_water",
- "total_column_supercooled_liquid_water",
- "total_column_water",
- "total_column_water_vapour",
- "total_precipitation",
- "total_sky_direct_solar_radiation_at_surface",
- "total_totals_index",
- "trapping_layer_base_height",
- "trapping_layer_top_height",
- "type_of_high_vegetation",
- "type_of_low_vegetation",
- "u_component_stokes_drift",
- "uv_visible_albedo_for_diffuse_radiation",
- "uv_visible_albedo_for_direct_radiation",
- "v_component_stokes_drift",
- "vertical_integral_of_divergence_of_cloud_frozen_water_flux",
- "vertical_integral_of_divergence_of_cloud_liquid_water_flux",
- "vertical_integral_of_divergence_of_geopotential_flux",
- "vertical_integral_of_divergence_of_kinetic_energy_flux",
- "vertical_integral_of_divergence_of_mass_flux",
- "vertical_integral_of_divergence_of_moisture_flux",
- "vertical_integral_of_divergence_of_ozone_flux",
- "vertical_integral_of_divergence_of_thermal_energy_flux",
- "vertical_integral_of_divergence_of_total_energy_flux",
- "vertical_integral_of_eastward_cloud_frozen_water_flux",
- "vertical_integral_of_eastward_cloud_liquid_water_flux",
- "vertical_integral_of_eastward_geopotential_flux",
- "vertical_integral_of_eastward_heat_flux",
- "vertical_integral_of_eastward_kinetic_energy_flux",
- "vertical_integral_of_eastward_mass_flux",
- "vertical_integral_of_eastward_ozone_flux",
- "vertical_integral_of_eastward_total_energy_flux",
- "vertical_integral_of_eastward_water_vapour_flux",
- "vertical_integral_of_energy_conversion",
- "vertical_integral_of_kinetic_energy",
- "vertical_integral_of_mass_of_atmosphere",
- "vertical_integral_of_mass_tendency",
- "vertical_integral_of_northward_cloud_frozen_water_flux",
- "vertical_integral_of_northward_cloud_liquid_water_flux",
- "vertical_integral_of_northward_geopotential_flux",
- "vertical_integral_of_northward_heat_flux",
- "vertical_integral_of_northward_kinetic_energy_flux",
- "vertical_integral_of_northward_mass_flux",
- "vertical_integral_of_northward_ozone_flux",
- "vertical_integral_of_northward_total_energy_flux",
- "vertical_integral_of_northward_water_vapour_flux",
- "vertical_integral_of_potential_and_internal_energy",
- "vertical_integral_of_potential_internal_and_latent_energy",
- "vertical_integral_of_temperature",
- "vertical_integral_of_thermal_energy",
- "vertical_integral_of_total_energy",
- "vertically_integrated_moisture_divergence",
- "volumetric_soil_water_layer_1",
- "volumetric_soil_water_layer_2",
- "volumetric_soil_water_layer_3",
- "volumetric_soil_water_layer_4",
- "wave_spectral_directional_width",
- "wave_spectral_directional_width_for_swell",
- "wave_spectral_directional_width_for_wind_waves",
- "wave_spectral_kurtosis",
- "wave_spectral_peakedness",
- "wave_spectral_skewness",
- "zero_degree_level",
- ],
- "year": year,
- "month": month,
- "day": day,
- "time": "14:00",
- "area": [
- 51,
- -6,
- 41,
- 10,
- ],
- "format": "netcdf",
- },
- file_path,
- )
- # TODO : take only needed variables for the model
-
-
-def call_fwi(output_path: str, year: str, month: str, day: str) -> None:
- """Get data from Fire danger indices historical data from the Copernicus Climate Data Store.
-
- Information on FWI can be found here:
- https://datastore.copernicus-climate.eu/c3s/published-forms/c3sprod/cems-fire-historical/Fire_In_CDS.pdf
-
- Please follow the instructions before using the CDS API: https://cds.climate.copernicus.eu/api-how-to
- Most recent available data is Day -2
-
- Args:
- output_path: str
- year: str
- month: str
- day: str
- """
-
- file_path = os.path.join(output_path, f"fwi_{year}_{month}_{day}.zip")
-
- if os.path.exists(file_path):
- logger.info(f"Using cached {file_path}")
- return
-
- c = cdsapi.Client(url=cfg.CDS_URL, key=f"{cfg.CDS_UID}:{cfg.CDS_API_KEY}", verify=0)
-
- c.retrieve(
- "cems-fire-historical",
- {
- "format": "zip",
- "dataset": "Intermediate dataset",
- "year": year,
- "month": month,
- "version": "3.1",
- "variable": [
- "build_up_index",
- "danger_risk",
- "drought_code",
- "duff_moisture_code",
- "fine_fuel_moisture_code",
- "fire_daily_severity_rating",
- "fire_weather_index",
- "initial_fire_spread_index",
- ],
- "product_type": "reanalysis",
- "day": day,
- },
- file_path,
- )
diff --git a/pyro_risks/datasets/utils.py b/pyro_risks/datasets/utils.py
deleted file mode 100644
index c55a4c1..0000000
--- a/pyro_risks/datasets/utils.py
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import requests
-import os
-import gzip
-import tarfile
-import shutil
-import warnings
-
-from scipy import spatial
-from typing import Tuple, Optional, List, Any
-
-from io import BytesIO
-from datetime import datetime
-from urllib.parse import urlparse
-from zipfile import ZipFile
-
-import numpy as np
-import pandas as pd
-
-
-def get_intersection_range(ts1: pd.Series, ts2: pd.Series) -> pd.DatetimeIndex:
- """Computes the intersecting date range of two series.
-
- Args:
- ts1: time series
- ts2: time series
- """
- # Time span selection
- time_range1 = max(ts1.min(), ts2.min())
- time_range2 = min(ts1.max(), ts2.max())
- if time_range1 > time_range2:
- raise ValueError("Extracts do not have intersecting date range")
-
- return pd.date_range(time_range1, time_range2)
-
-
-def find_closest_weather_station(
- df_weather: pd.DataFrame, latitude: pd.DataFrame, longitude: pd.DataFrame
-) -> int:
- """
- The weather dataframe SHOULD contain a "STATION" column giving the id of
- each weather station in the dataset.
-
- Args:
- df_weather: pd.DataFrame
- Dataframe of weather conditions
- latitude: float
- Latitude of the point to which we want to find the closest
- weather station
- longitude: float
- Longitude of the point to which we want to find the closest
- weather station
-
- Returns: int
- Id of the closest weather station of the point (lat, lon)
-
- """
- if "STATION" not in df_weather.columns:
- raise ValueError("STATION column is missing in given weather dataframe.")
-
- weather = df_weather.drop_duplicates(subset=["STATION", "LATITUDE", "LONGITUDE"])
-
- zipped_station_lat_lon = zip(
- weather["STATION"].values.tolist(),
- weather["LATITUDE"].values.tolist(),
- weather["LONGITUDE"].values.tolist(),
- )
- list_station_lat_lon = list(zipped_station_lat_lon)
-
- reference_station = list_station_lat_lon[0][0]
- latitude_0 = list_station_lat_lon[0][1]
- longitude_0 = list_station_lat_lon[0][2]
-
- min_distance = np.sqrt(
- (latitude - latitude_0) ** 2 + (longitude - longitude_0) ** 2
- )
-
- for k in range(1, weather.shape[0]):
- current_latitude = list_station_lat_lon[k][1]
- current_longitude = list_station_lat_lon[k][2]
- current_distance = np.sqrt(
- (latitude - current_latitude) ** 2 + (longitude - current_longitude) ** 2
- )
-
- if current_distance < min_distance:
- min_distance = current_distance
- reference_station = list_station_lat_lon[k][0]
-
- return int(reference_station)
-
-
-def find_closest_location(
- df_weather: pd.DataFrame, latitude: float, longitude: float
-) -> Tuple[float, float]:
- """
- For a given point (`latitude`, `longitude`), get the closest point which exists in `df_weather`.
- This function is to be used when the user do not choose to use weather stations data but satellite data
- e.g. ERA5 Land variables.
-
- Args:
- df_weather: pd.DataFrame
- Dataframe of land/weather conditions
- latitude: float
- Latitude of the point to which we want to find the closest point in `df_weather`.
- longitude: float
- Longitude of the point to which we want to find the closest in `df_weather`.
-
- Returns: Tuple(float, float)
- Tuple of the closest weather point (closest_lat, closest_lon) of the point (lat, lon)
- """
- if "STATION" in df_weather.columns:
- raise ValueError(
- "STATION is in the columns, should use `find_closest_weather_station`."
- )
-
- weather = df_weather.drop_duplicates(subset=["latitude", "longitude"])
-
- zipped_points_lat_lon = zip(
- weather["latitude"].values.tolist(), weather["longitude"].values.tolist()
- )
- list_station_lat_lon = list(zipped_points_lat_lon)
-
- latitude_0 = list_station_lat_lon[0][0]
- longitude_0 = list_station_lat_lon[0][1]
- reference_point = (latitude_0, longitude_0)
-
- min_distance = np.sqrt(
- (latitude - latitude_0) ** 2 + (longitude - longitude_0) ** 2
- )
-
- for k in range(1, weather.shape[0]):
- current_latitude = list_station_lat_lon[k][0]
- current_longitude = list_station_lat_lon[k][1]
- current_distance = np.sqrt(
- (latitude - current_latitude) ** 2 + (longitude - current_longitude) ** 2
- )
-
- if current_distance < min_distance:
- min_distance = current_distance
- reference_point = (current_latitude, current_longitude)
-
- return reference_point
-
-
-def url_retrieve(url: str, timeout: Optional[float] = None) -> bytes:
- """Retrives and pass the content of an URL request.
-
- Args:
- url: URL to request
- timeout: number of seconds before the request times out. Defaults to 4.
-
- Raises:
- requests.exceptions.ConnectionError:
-
- Return:
- Content of the response
- """
- response = requests.get(url, timeout=timeout, allow_redirects=True)
- if response.status_code != 200:
- raise requests.exceptions.ConnectionError(
- f"Error code {response.status_code} - could not download {url}"
- )
- return response.content
-
-
-def get_fname(url: str) -> Tuple[str, Optional[str], Optional[str]]:
- """Find file name, extension and compression of an archive located by an URL.
-
- Args:
- url: URL of the compressed archive
-
- Raises:
- ValueError: if URL contains more than one extension
- ValueError: if URL contains more than one compression format
-
- Returns:
- A tuple containing the base file name, extension and compression format
- """
- supported_compressions = ["tar", "gz", "zip"]
- supported_extensions = ["csv", "geojson", "shp", "shx", "nc"]
-
- archive_name = urlparse(url).path.rpartition("/")[-1]
-
- base = archive_name.split(".")[0]
-
- list_extensions = list(set(supported_extensions) & set(archive_name.split(".")))
- list_compressions = list(set(supported_compressions) & set(archive_name.split(".")))
-
- if len(list_extensions) == 0:
- extension = None
- elif len(list_extensions) == 1:
- extension = list_extensions[0]
- else:
- raise ValueError(f"Error {url} contains more than one extension")
-
- if len(list_compressions) == 0:
- compression = None
-
- elif len(list_compressions) == 1:
- compression = list_compressions[0]
-
- elif len(list_compressions) == 2:
- compression = "tar.gz"
-
- else:
- raise ValueError(f"Error {url} contains more than one compression format")
-
- return (base, extension, compression)
-
-
-def download(
- url: str,
- default_extension: str,
- unzip: Optional[bool] = True,
- destination: str = "./tmp",
-) -> None:
- """Helper function for downloading, unzipping and saving compressed file from a given URL.
-
- Args:
- url: URL of the compressed archive
- default_extension: extension of the archive
- unzip: whether archive should be unzipped. Defaults to True.
- destination: folder where the file should be saved. Defaults to '.'.
- """
- base, extension, compression = get_fname(url)
- content = url_retrieve(url)
-
- if unzip and compression == "zip":
- os.makedirs(os.path.dirname(destination), exist_ok=True)
- with ZipFile(BytesIO(content)) as zip_file:
- zip_file.extractall(destination)
-
- elif unzip and compression == "tar.gz":
- os.makedirs(os.path.dirname(destination), exist_ok=True)
- with tarfile.open(fileobj=BytesIO(content), mode="r:gz") as tar_file:
- tar_file.extractall(path=destination)
-
- elif unzip and compression == "gz":
- file_name = (
- f"{base}.{extension}"
- if extension is not None
- else f"{base}.{default_extension}"
- )
- full_path = os.path.join(destination, file_name)
- os.makedirs(os.path.dirname(full_path), exist_ok=True)
- with gzip.open(BytesIO(content)) as gzip_file, open(
- full_path, "wb+"
- ) as unzipped_file:
- shutil.copyfileobj(gzip_file, unzipped_file)
-
- elif not unzip and compression is None:
- file_name = (
- f"{base}.{extension}"
- if extension is not None
- else f"{base}.{default_extension}"
- )
- full_path = os.path.join(destination, file_name)
- os.makedirs(os.path.dirname(full_path), exist_ok=True)
- with open(full_path, "wb+") as file:
- file.write(content)
-
- elif not unzip and isinstance(compression, str):
- file_name = f"{base}.{compression}"
- full_path = os.path.join(destination, file_name)
- os.makedirs(os.path.dirname(full_path), exist_ok=True)
- with open(full_path, "wb+") as file:
- file.write(content)
-
- else:
- raise ValueError("If the file is not compressed set unzip to False")
-
-
-def get_ghcn(
- start_year: Optional[int] = None,
- end_year: Optional[int] = None,
- destination: str = "./ghcn",
-) -> None:
- """Download yearly Global Historical Climatology Network - Daily (GHCN-Daily) (.csv) From (NCEI).
-
- Args:
- start_year: first year to be retrieved. Defaults to None.
- end_year: first that will not be retrieved. Defaults to None.
- destination: destination directory. Defaults to './ghcn'.
- """
- start_year = datetime.now().year if start_year is None else start_year
- end_year = (
- datetime.now().year + 1
- if end_year is None or start_year == end_year
- else end_year
- )
-
- for year in range(start_year, end_year):
- url = f"https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/{year}.csv.gz"
- download(url=url, default_extension="csv", unzip=True, destination=destination)
-
-
-def get_modis(
- start_year: Optional[int] = None,
- end_year: Optional[int] = None,
- yearly: Optional[bool] = False,
- destination: str = "./firms",
-) -> None:
- """Download last 24H or yearly France active fires from the FIRMS NASA.
- Args:
- start_year: first year to be retrieved. Defaults to None.
- end_year: first that will not be retrieved. Defaults to None.
- yearly: whether to download yearly active fires or not. Defaults to False.
- destination: destination directory. Defaults to './firms'.]
- """
- if yearly is True:
- start_year = datetime.now().year - 1 if start_year is None else start_year
- end_year = (
- datetime.now().year
- if end_year is None or start_year == end_year
- else end_year
- )
-
- for year in range(start_year, end_year):
- assert (
- start_year != 2020 or end_year != 2021
- ), "MODIS active fire archives are only available for the years from 2000 to 2019"
- url = f"https://firms.modaps.eosdis.nasa.gov/data/country/modis/{year}/modis_{year}_France.csv"
- download(
- url=url, default_extension="csv", unzip=False, destination=destination
- )
-
- else:
- if start_year is not None:
- raise BaseException(
- warnings.warn(
- "The active fires from the last 24H of the MODIS Satellite will be download."
- )
- ) # type: ignore
- else:
- url = "https://firms.modaps.eosdis.nasa.gov/data/active_fire/c6/csv/MODIS_C6_Europe_24h.csv"
- download(
- url=url, default_extension="csv", unzip=False, destination=destination
- )
-
-
-def get_nearest_points(
- source_points: List[Tuple[Any, Any]], candidates: List[Tuple[Any, Any]]
-) -> Tuple:
- """
- Find nearest neighbor for all source points from a set of candidate points
- using KDTree algorithm.
-
- Args:
- source_points: List[Tuple]
- List of tuples (lat, lon) for which you want to find the closest point in candidates.
- candidates: List[Tuple]
- List of tuples (lat, lon) which are all possible closest points.
-
- Returns: Tuple
- indices : array of integers
- The locations of the neighbors in candidates.
- distances : array of floats
- The distances to the nearest neighbors..
- """
- # Create tree from the candidate points
- tree = spatial.cKDTree(candidates)
-
- # Find closest points and distances
- distances, indices = tree.query(source_points, k=1)
-
- return indices, distances
diff --git a/pyro_risks/datasets/weather.py b/pyro_risks/datasets/weather.py
deleted file mode 100644
index d510abd..0000000
--- a/pyro_risks/datasets/weather.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-import pandas as pd
-import geopandas as gpd
-from typing import List, Optional
-
-from pyro_risks import config as cfg
-from .masks import get_french_geom
-
-__all__ = ["NOAAWeather"]
-
-
-class NOAAWeather(pd.DataFrame):
- """Weather dataset on French territory, accessible upon request to NOAA. Requests are to be made at:
- https://www.ncdc.noaa.gov/cdo-web.
-
- Args:
- source_path: path or URL to your version of the source data
- use_cols: columns to read from source
- """
-
- kept_cols = [
- "STATION",
- "DATE",
- "LATITUDE",
- "LONGITUDE",
- "ELEVATION",
- "DEWP",
- "DEWP_ATTRIBUTES",
- "FRSHTT",
- "GUST",
- "MAX",
- "MIN",
- "MXSPD",
- "PRCP",
- "SLP",
- "SLP_ATTRIBUTES",
- "SNDP",
- "STP",
- "STP_ATTRIBUTES",
- "TEMP",
- "TEMP_ATTRIBUTES",
- "VISIB",
- "VISIB_ATTRIBUTES",
- "WDSP",
- "WDSP_ATTRIBUTES",
- ]
-
- def __init__(
- self, source_path: Optional[str] = None, use_cols: Optional[List[str]] = None
- ) -> None:
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_WEATHER_FALLBACK
- if not isinstance(use_cols, list):
- use_cols = self.kept_cols
- data = pd.read_csv(source_path, usecols=use_cols)
- geo_df = gpd.GeoDataFrame(
- data,
- geometry=gpd.points_from_xy(data["LONGITUDE"], data["LATITUDE"]),
- crs="EPSG:4326",
- )
- # Match the polygons using the ones of each predefined country area
- geo_masks = get_french_geom()
- geo_data = gpd.sjoin(geo_masks, geo_df, how="inner")
- # Drop NA
- geo_data = geo_data.dropna(axis=1)
- # Convert
- geo_data["DATE"] = pd.to_datetime(
- geo_data["DATE"], format="%Y-%m-%d", errors="coerce"
- )
- # Drop Cols
- super().__init__(geo_data.drop(["index_right", "geometry"], axis=1))
diff --git a/pyro_risks/datasets/wildfires.py b/pyro_risks/datasets/wildfires.py
deleted file mode 100644
index e8c9f57..0000000
--- a/pyro_risks/datasets/wildfires.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import logging
-import pandas as pd
-from typing import List, Optional
-
-from pyro_risks import config as cfg
-
-
-__all__ = ["BDIFFHistory"]
-
-
-class BDIFFHistory(pd.DataFrame):
- """Wildfire history dataset on French territory, accessible at https://bdiff.agriculture.gouv.fr/.
-
- Args:
- source_path: path or URL to your version of the source data
- use_cols: columns to read from source
- """
-
- kept_cols = ["Date de première alerte", "Département", "Statut"]
-
- def __init__(
- self, source_path: Optional[str] = None, use_cols: Optional[List[str]] = None
- ) -> None:
- if not isinstance(source_path, str):
- # Download in cache
- logging.warning(
- f"No data source specified for {self.__class__.__name__}, trying fallback."
- )
- source_path = cfg.FR_FIRES_FALLBACK
- if not isinstance(use_cols, list):
- use_cols = self.kept_cols
- data = pd.read_csv(source_path, sep=";", usecols=use_cols)
- tmp = pd.to_datetime(
- data["Date de première alerte"], format="%Y-%m-%d %H:%M:%S", errors="coerce"
- )
- data["date"] = tmp.dt.normalize() # Set time to 00:00:00 for each entry
- # Drop Cols
- super().__init__(data.drop(["Date de première alerte"], axis=1))
diff --git a/pyro_risks/models/__init__.py b/pyro_risks/models/__init__.py
deleted file mode 100644
index c790e5f..0000000
--- a/pyro_risks/models/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .pipelines import *
-from .transformers import *
-from .utils import *
diff --git a/pyro_risks/models/pipelines.py b/pyro_risks/models/pipelines.py
deleted file mode 100644
index 1a2267b..0000000
--- a/pyro_risks/models/pipelines.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from imblearn.pipeline import Pipeline
-from .transformers import (
- TargetDiscretizer,
- CategorySelector,
- Imputer,
- LagTransformer,
- FeatureSubsetter,
-)
-from .utils import discretizer
-
-from sklearn.ensemble import RandomForestClassifier
-
-from xgboost import XGBClassifier
-
-import pyro_risks.config as cfg
-
-__all__ = ["rf_pipeline", "xgb_pipeline"]
-
-# pipeline base steps definition
-base_steps = [
- (
- "filter_dep",
- CategorySelector(variable=cfg.ZONE_VAR, category=cfg.SELECTED_DEP),
- ),
- (
- "add_lags",
- LagTransformer(
- date_column=cfg.DATE_VAR,
- zone_column=cfg.ZONE_VAR,
- columns=cfg.LAG_ERA5T_VARS,
- ),
- ),
- ("imputer", Imputer(columns=cfg.MODEL_ERA5T_VARS, strategy="median")),
- ("binarize_target", TargetDiscretizer(discretizer=discretizer)),
- ("subset_features", FeatureSubsetter(columns=cfg.MODEL_ERA5T_VARS)),
-]
-
-# Add estimator to base step lists
-xgb_steps = [*base_steps, ("xgboost", XGBClassifier(**cfg.XGB_PARAMS))]
-rf_steps = [*base_steps, ("random_forest", RandomForestClassifier(**cfg.RF_PARAMS))]
-
-# Define sklearn / imblearn pipelines
-xgb_pipeline = Pipeline(xgb_steps)
-rf_pipeline = Pipeline(rf_steps)
diff --git a/pyro_risks/models/transformers.py b/pyro_risks/models/transformers.py
deleted file mode 100644
index 395d6df..0000000
--- a/pyro_risks/models/transformers.py
+++ /dev/null
@@ -1,349 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from typing import List, Union, Optional, Tuple, Callable
-from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.impute import SimpleImputer
-from .utils import check_xy, check_x
-
-import pandas as pd
-import numpy as np
-
-
-class TargetDiscretizer(BaseEstimator):
- """Discretize numerical target variable.
-
- The `TargetDiscretizer` transformer maps target variable values to discrete values using
- a user defined function.
-
- Parameters:
- discretizer: user defined function.
- """
-
- def __init__(self, discretizer: Callable) -> None:
- if callable(discretizer):
- self.discretizer = discretizer
- else:
- raise TypeError(f"{self.__class__.__name__} constructor expect a callable")
-
- def fit_resample(
- self, X: pd.DataFrame, y: pd.Series
- ) -> Tuple[pd.DataFrame, pd.Series]:
- """Discretize the target variable.
-
- The `fit_resample` method allows for discretizing the target variable.
- The method does not resample the dataset, the naming convention ensure
- the compatibility of the transformer with imbalanced-learn `Pipeline`
- object.
-
- Args:
- X: Training dataset features
- y: Training dataset target
-
- Returns:
- Training dataset features and target tuple.
- """
- X, y = check_xy(X, y)
-
- y = y.apply(self.discretizer)
-
- return X, y
-
-
-class CategorySelector(BaseEstimator):
- """Select features and targets rows.
-
- The `CategorySelector` transformer select features and targets rows
- belonging to given variable categories.
-
- Parameters:
- variable: variable to be used for selection.
- category: modalities to be selected.
- """
-
- def __init__(self, variable: str, category: Union[str, list]) -> None:
- self.variable = variable
- # Catch or prevent key errors
- if isinstance(category, str):
- self.category = [category]
- elif isinstance(category, list):
- self.category = category
- else:
- raise TypeError(
- f"{self.__class__.__name__} constructor category argument expect a string or a list"
- )
-
- def fit_resample(
- self, X: pd.DataFrame, y: Optional[pd.Series] = None
- ) -> Tuple[pd.DataFrame, pd.Series]:
- """Select features and targets rows.
-
- The `fit_resample` method allows for selecting the features and target
- rows. The method does not resample the dataset, the naming convention ensure
- the compatibility of the transformer with imbalanced-learn `Pipeline`
- object.
-
- Args:
- X: Training dataset features
- y: Training dataset target
-
- Returns:
- Training dataset features and target tuple.
- """
- if isinstance(X, pd.DataFrame) and isinstance(y, pd.Series):
- mask = X[self.variable].isin(self.category)
- XR = X[mask].copy()
- yr = y[mask].copy()
-
- else:
- raise TypeError(
- f"{self.__class__.__name__} fit_resample methods expect pd.DataFrame and\
- pd.Series as inputs."
- )
-
- return XR, yr
-
-
-class Imputer(SimpleImputer):
- """Impute missing values.
-
- The `Imputer` transformer wraps scikit-learn SimpleImputer transformer.
-
- Parameters:
- missing_values: the placeholder for the missing values.
- strategy: the imputation strategy (mean, median, most_frequent, constant).
- fill_value: fill_value is used to replace all occurrences of missing_values (default to 0).
- verbose: controls the verbosity of the imputer.
- copy: If True, a copy of X will be created.
- add_indicator: If True, a MissingIndicator transform will stack onto output of the imputer’s transform.
- """
-
- def __init__(
- self,
- columns: list,
- missing_values: Union[int, float, str] = np.nan,
- strategy: str = "mean",
- fill_value: float = None,
- verbose: int = 0,
- copy: bool = True,
- add_indicator: bool = False,
- ) -> None:
- super().__init__(
- missing_values=missing_values,
- strategy=strategy,
- fill_value=fill_value,
- copy=copy,
- add_indicator=add_indicator,
- )
-
- self.columns = columns
-
- def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None) -> "Imputer":
- """
- Fit the imputer on X.
-
- Args:
- X: Training dataset features.
- y: Training dataset target.
-
- Returns:
- Transformer.
- """
- X, y = check_xy(X[self.columns], y)
-
- super().fit(X, y)
- return self
-
- def transform(self, X: pd.DataFrame) -> pd.DataFrame:
- """
- Impute X missing values.
-
- Args:
- X: Training dataset features.
-
- Returns:
- Transformed training dataset.
- """
- X = check_x(X)
- XS = check_x(X[self.columns])
-
- X[self.columns] = super().transform(XS)
-
- return X
-
-
-class LagTransformer(BaseEstimator, TransformerMixin):
- """Add lags features of the selected columns.
-
- Lags added correspond to day -1, -3 and -7 and are added to each department separately.
-
- Parameters:
- date_column: date column.
- zone_columns: geographical zoning column.
- columns: columns to add lag.
- """
-
- def __init__(self, date_column: str, zone_column: str, columns: List[str]) -> None:
- self.date_column = date_column
- self.zone_column = zone_column
- self.columns = columns
-
- def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None) -> "LagTransformer":
- """
- Fit the imputer on X.
-
- Args:
- X: Training dataset features.
- y: Training dataset target.
-
- Returns:
- Transformer.
- """
- X, y = check_xy(X, y)
-
- return self
-
- def transform(self, X: pd.DataFrame) -> pd.DataFrame:
- """
- Create lag features.
-
- Args:
- X: Training dataset features.
-
- Returns:
- Transformed training dataset.
- """
- X = check_x(X)
-
- if X[self.date_column].dtypes != "datetime64[ns]":
- raise TypeError(
- f"{self.__class__.__name__} transforme methods expect date_column of type datetime64[ns]"
- )
-
- for var in self.columns:
- for dep in X[self.zone_column].unique():
- tmp = X[X[self.zone_column] == dep][[self.date_column, var]].set_index(
- self.date_column
- )
- tmp1 = tmp.copy()
- tmp1 = tmp1.join(
- tmp.shift(periods=1, freq="D"), rsuffix="_lag1", how="left"
- )
- tmp1 = tmp1.join(
- tmp.shift(periods=3, freq="D"), rsuffix="_lag3", how="left"
- )
- tmp1 = tmp1.join(
- tmp.shift(periods=7, freq="D"), rsuffix="_lag7", how="left"
- )
- new_vars = [var + "_lag1", var + "_lag3", var + "_lag7"]
- X.loc[X[self.zone_column] == dep, new_vars] = tmp1[new_vars].values
- return X
-
-
-class FeatureSelector(BaseEstimator, TransformerMixin):
- """Select features correlated to the target.
-
- Select features with correlation to the target above the threshold.
-
- Parameters:
- exclude: column to exclude from correlation calculation.
- method: correlation matrix calculation method.
- threshold: columns on which to add lags
- """
-
- def __init__(
- self, exclude: List[str], method: str = "pearson", threshold: float = 0.15
- ) -> None:
- self.exclude = exclude
- self.method = method
- self.threshold = threshold
-
- def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None) -> "FeatureSelector":
- """Fit the FeatureSelector on X.
-
- Compute the correlation matrix.
-
- Args:
- X: Training dataset features.
- y: Training dataset target.
-
- Returns:
- Transformer.
- """
- X, y = check_xy(X, y)
- self.target_correlation = (
- pd.concat([X, y], axis=1)
- .corr(method=self.method)
- .loc[y.name]
- .apply(abs)
- .sort_values(ascending=False)
- )
- self.target_correlation = self.target_correlation[
- self.target_correlation.index != y.name
- ]
-
- return self
-
- def transform(self, X: pd.DataFrame) -> pd.DataFrame:
- """
- Select lag features.
-
- Args:
- X: Training dataset features.
-
- Returns:
- Transformed training dataset.
- """
- X = check_x(X)
-
- mask = self.target_correlation > self.threshold
- self.selected_features = self.target_correlation[mask].index.tolist()
-
- return X[self.selected_features]
-
-
-class FeatureSubsetter(BaseEstimator, TransformerMixin):
- """Subset dataframe's column.
-
- Subset any given of the dataframe.
-
- Parameters:
- threshold: columns on which to add lags
- """
-
- def __init__(self, columns: List[str]) -> None:
- self.columns = columns
-
- def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None) -> "FeatureSubsetter":
- """Comply with pipeline requirements.
-
- The method does not fit the dataset, the naming convention ensure
- the compatibility of the transformer with scikit-learn `Pipeline`
- object.
-
- Args:
- X: Training dataset features.
- y: Training dataset target.
-
- Returns:
- Transformer.
- """
- X, y = check_xy(X, y)
-
- return self
-
- def transform(self, X: pd.DataFrame) -> pd.DataFrame:
- """
- Select columns.
-
- Args:
- X: Training dataset features.
-
- Returns:
- Training dataset features subset.
- """
- X = check_x(X)
-
- return X[self.columns]
diff --git a/pyro_risks/models/utils.py b/pyro_risks/models/utils.py
deleted file mode 100644
index 3356b5c..0000000
--- a/pyro_risks/models/utils.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from typing import Tuple
-import pandas as pd
-
-__all__ = ["check_xy", "check_x", "discretizer"]
-
-
-def check_xy(X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
- """
- Validate inputs for transformers.
-
- Args:
- X: Training dataset features pd.DataFrame.
- y: Training dataset target pd.Series.
-
- Raises:
- TypeError: Transformer methods expect pd.DataFrame and pd.Series as inputs.
-
- Returns:
- Copy of the inputs.
- """
- if isinstance(X, pd.DataFrame) and isinstance(y, pd.Series):
- X = X.copy()
- y = y.copy()
- else:
- raise TypeError(
- "Transformer methods expect pd.DataFrame\
- and pd.Series as inputs."
- )
- return X, y
-
-
-def check_x(X: pd.DataFrame) -> pd.DataFrame:
- """
- Validate inputs for tranformers.
-
- Args:
- X: Training dataset features pd.DataFrame.
-
- Raises:
- TypeError: Transformer methods expect pd.DataFrame as inputs.
-
- Returns:
- Copy of the inputs.
- """
- if isinstance(X, pd.DataFrame):
- X = X.copy()
- else:
- raise TypeError("Transformer methods expect pd.DataFrame as inputs")
- return X
-
-
-def discretizer(x: float) -> int:
- """
- Discretize values.
-
- Args:
- x (float): value to be discretized
-
- Returns:
- int: discretized value
- """
- return 1 if x > 0 else 0
diff --git a/pyro_risks/pipeline/__init__.py b/pyro_risks/pipeline/__init__.py
deleted file mode 100644
index f464890..0000000
--- a/pyro_risks/pipeline/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .load import *
-from .train import *
-from .predict import *
-from .evaluate import *
diff --git a/pyro_risks/pipeline/evaluate.py b/pyro_risks/pipeline/evaluate.py
deleted file mode 100644
index 17434ef..0000000
--- a/pyro_risks/pipeline/evaluate.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from typing import Union, Optional
-from datetime import datetime
-from sklearn.model_selection import train_test_split
-from sklearn.metrics import classification_report
-from plot_metric.functions import BinaryClassification
-from pyro_risks.models import discretizer
-from pyro_risks.pipeline.load import load_dataset
-
-import matplotlib.pyplot as plt
-import imblearn.pipeline as pp
-import pyro_risks.config as cfg
-
-import pandas as pd
-import numpy as np
-
-import sys
-import os
-import json
-import joblib
-
-__all__ = [
- "save_classification_reports",
- "save_classification_plots",
- "evaluate_pipeline",
-]
-
-
-def save_classification_reports(
- y_true: np.ndarray,
- y_pred: np.ndarray,
- prefix: Optional[str] = None,
- destination: Optional[str] = None,
-) -> None:
- """
- Build and save binary classification metrics reports.
-
- Args:
- y_true: Ground truth (correct) labels.
- y_pred: Predicted labels, as returned by a calibrated classifier.
- prefix: Classification report prefix i.e. pipeline name. Defaults to None.
- destination: Folder where the report should be saved. Defaults to ``METADATA_REGISTRY``.
- """
- destination = cfg.METADATA_REGISTRY if destination is None else destination
- fname = (
- "classification_report" if prefix is None else prefix + "_classification_report"
- )
- json_report_path = os.path.join(destination, fname + ".json")
- csv_report_path = os.path.join(destination, fname + ".csv")
-
- report = classification_report(y_true, y_pred, output_dict=True)
-
- report.pop("accuracy")
- report.pop("macro avg")
- report.pop("weighted avg")
-
- # JSON report for tracking metrics
- with open(json_report_path, "w") as fp:
- json.dump(obj=report, fp=fp)
-
- # CSV report for plotting classification report
-
- pd.DataFrame(report).transpose().round(3).to_csv(csv_report_path)
-
- print(classification_report(y_true, y_pred))
-
-
-def save_classification_plots(
- y_true: np.ndarray,
- y_proba: np.ndarray,
- threshold: np.float64,
- prefix: Optional[str] = None,
- destination: Optional[str] = None,
-) -> None:
- """
- Build and save binary classification performance evaluation plots.
-
- Args:
- y_true: Ground truth (correct) labels.
- y_pred: Predicted probabilities of the positive class returned by a classifier.
- threshold: Classification pipeline optimal threshold.
- prefix: Classification plots prefix i.e. pipeline name. Defaults to None.
- destination: Folder where the report should be saved. Defaults to ``METADATA_REGISTRY``.
- """
- destination = cfg.METADATA_REGISTRY if destination is None else destination
- fname = (
- "classification_plots.png"
- if prefix is None
- else prefix + "_classification_plots.png"
- )
- path = os.path.join(destination, fname)
-
- bc = BinaryClassification(y_true, y_proba, labels=["No fire", "Fire"])
-
- plt.figure(figsize=(15, 10))
- plt.subplot2grid(shape=(2, 6), loc=(0, 0), colspan=2)
- bc.plot_roc_curve(threshold=threshold)
- plt.subplot2grid((2, 6), (0, 2), colspan=2)
- bc.plot_precision_recall_curve(threshold=threshold)
- plt.subplot2grid((2, 6), (0, 4), colspan=2)
- bc.plot_class_distribution(threshold=threshold)
- plt.subplot2grid((2, 6), (1, 1), colspan=2)
- bc.plot_confusion_matrix(threshold=threshold)
- plt.subplot2grid((2, 6), (1, 3), colspan=2)
- bc.plot_confusion_matrix(threshold=threshold, normalize=True)
-
- plt.savefig(path)
-
-
-def evaluate_pipeline(
- X: pd.DataFrame,
- y: pd.Series,
- pipeline: Union[pp.Pipeline, str],
- threshold: str,
- prefix: Optional[str] = None,
- destination: Optional[str] = None,
-) -> None:
- """
- Build and save binary classification evaluation reports.
-
- Args:
- X: Training dataset features pd.DataFrame.
- y: Training dataset target pd.Series.
- pipeline: imbalanced-learn preprocessing pipeline or path to pipeline.
- threshold: Classification pipeline optimal threshold path.
- prefix: Classification reports prefix i.e. pipeline name. Defaults to None.
- destination: Folder where the report should be saved. Defaults to ``METADATA_REGISTRY``.
- """
- # setup
- _, X_test, _, y_test = train_test_split(
- X, y, test_size=cfg.TEST_SIZE, random_state=cfg.RANDOM_STATE
- )
-
- if not isinstance(pipeline, pp.Pipeline):
- pipeline = joblib.load(pipeline)
-
- y_proba = pipeline.predict_proba(X_test)
-
- with open(threshold, "r") as file:
- optimal_threshold = json.load(file)
-
- def predict(x):
- return 1 if x > optimal_threshold["threshold"] else 0
-
- vpredict = np.vectorize(predict)
- vdiscretizer = np.vectorize(discretizer)
-
- y_pred = vpredict(y_proba[:, 1])
- y_test = vdiscretizer(y_test)
-
- save_classification_reports(
- y_true=y_test, y_pred=y_pred, prefix=prefix, destination=destination
- )
-
- save_classification_plots(
- y_true=y_test,
- y_proba=y_proba[:, 1],
- threshold=optimal_threshold["threshold"],
- prefix=prefix,
- destination=destination,
- )
diff --git a/pyro_risks/pipeline/load.py b/pyro_risks/pipeline/load.py
deleted file mode 100644
index d479699..0000000
--- a/pyro_risks/pipeline/load.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from typing import Optional, List
-from pyro_risks.datasets.utils import download
-from datetime import datetime
-from typing import Tuple
-
-import pyro_risks.config as cfg
-import pandas as pd
-import os
-
-__all__ = ["load_dataset"]
-
-
-def load_dataset(
- url: Optional[str] = None,
- path: Optional[str] = None,
- usecols: Optional[List[str]] = None,
- pipeline_cols: Optional[List[str]] = None,
- destination: str = None,
-) -> Tuple[pd.DataFrame, pd.Series]:
- """
- Load Pyro Risks training datasets.
-
- Download and load Pyro Risks training datasets.
-
- Args:
- url: Training dataset URL. Defaults to None.
- path: Dataset full path. Defaults to None.
- usecols: Subset of the dataset columns. Defaults to None.
- pipeline_cols: Subset of the dataset used for training. Defaults to None.
- destination: folder where the dataset should be saved. Defaults to None.
-
- Returns:
- Tuple[pd.DataFrame, pd.Series]
- """
- url = cfg.ERA5T_VIIRS_PIPELINE if url is None else url
- path = os.path.join(cfg.DATA_REGISTRY, cfg.DATASET) if path is None else path
- usecols = (
- [cfg.DATE_VAR, cfg.ZONE_VAR, cfg.TARGET] + cfg.PIPELINE_ERA5T_VARS
- if usecols is None
- else usecols
- )
- pipeline_cols = (
- [cfg.DATE_VAR, cfg.ZONE_VAR] + cfg.PIPELINE_ERA5T_VARS
- if pipeline_cols is None
- else pipeline_cols
- )
- destination = cfg.DATA_REGISTRY if destination is None else destination
-
- if not os.path.isfile(path):
- download(url=url, default_extension="csv", unzip=False, destination=destination)
-
- df = pd.read_csv(path, usecols=usecols)
- df["day"] = df["day"].apply(
- lambda x: datetime.strptime(str(x), "%Y-%m-%d") if not pd.isnull(x) else x
- )
-
- X = df[pipeline_cols]
- y = df[cfg.TARGET]
- return X, y
diff --git a/pyro_risks/pipeline/predict.py b/pyro_risks/pipeline/predict.py
deleted file mode 100644
index 29d210e..0000000
--- a/pyro_risks/pipeline/predict.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-# type: ignore
-from pyro_risks import config as cfg
-from pyro_risks.datasets.fwi import get_fwi_data_for_predict
-from pyro_risks.datasets.ERA5 import get_data_era5t_for_predict
-from pyro_risks.datasets.era_fwi_viirs import process_dataset_to_predict
-from typing import Optional, List
-from io import BytesIO
-
-import pandas as pd
-import dvc.api
-import joblib
-import logging
-import os
-
-
-__all__ = ["PyroRisk"]
-
-
-class PyroRisk(object):
- """
- Pyronear Wildfire Risk Forecaster
-
- Load a trained pipeline from pyrorisks remote model registry, download features from publicly
- available data sources (CDS API). Forecast the local (NUTS 3 level) daily wildfire risks
- (forest fire danger) in a Given Country (France).
-
- Args:
- model: Can be 'RF' for random forest or 'XGBOOST' for xgboost. Defaults to 'RF'.
-
- Raises:
- ValueError: Model can be only of type RF or XGBOOST
- """
-
- def __init__(self, model: Optional[str] = "RF") -> None:
- self.inputs = None
- self.model = model
- self.pipeline = None
- self.predictions = None
- self.country = None
- self.zone = None
- self.predictions_registry = cfg.PREDICTIONS_REGISTRY
-
- if self.model == "RF":
- self.model_path = cfg.RFMODEL_ERA5T_PATH # file path
- elif self.model == "XGBOOST":
- self.model_path = cfg.XGBMODEL_ERA5T_PATH # file path
- else:
- raise ValueError("Model can be only of type RF or XGBOOST")
-
- def get_pipeline(
- self, path: Optional[str] = None, destination: Optional[str] = None
- ) -> None:
- """Download trained pipeline from remote model registry.
-
- The `get_pipeline` method downloads the selected trained pipeline from the pyrorisks remote
- model registry. The downloaded pipeline is persited in the destination joblib file.
-
- Args:
- path: Location and file name of the pipeline to download, relative to the root of the
- dvc project. Defaults to None (self.model_path).
- destination: Location where the pipeline is downloaded. Defaults to None (self.model_path).
- """
- path = self.model_path if path is None else path
- destination = self.model_path if destination is None else destination
-
- pipeline = joblib.load(
- BytesIO(
- dvc.api.read(
- path=path, repo=cfg.REPO_DIR, remote="artifacts-registry", mode="rb"
- )
- )
- )
- joblib.dump(pipeline, destination)
-
- @staticmethod
- def get_inputs(
- day: str,
- country: Optional[str] = "France",
- dir_destination: Optional[str] = None,
- ) -> None:
- """Download datasets and build features for forecasting daily wildfire risks on a given date.
-
- The `get_inputs` method downloads datsets from publicly available data sources (CDS API) and
- build features for forecasting wildfire risks on a given date. The downloaded inputs are
- persited in the destination csv file.
-
- Args:
- day: Date of interest ('%Y-%m-%d') for example '2020-05-05'.
- country: Country of interest. Defaults to 'France'.
- destination: Location where the daily inputs are persisted.
- Defaults to None (cfg.PIPELINE_INPUT_PATH).
- """
- # TODO:
- # Delete get_fwi_data_for_predict variables not available at predict time
- # Create process_era5 function
- # Create MergedEraViir class
- dir_destination = (
- cfg.PREDICTIONS_REGISTRY if dir_destination is None else dir_destination
- )
- fname = f"inputs_{country}_{day}.csv"
- destination = os.path.join(dir_destination, fname)
- fwi = get_fwi_data_for_predict(day)
- era = get_data_era5t_for_predict(day)
- res_test = process_dataset_to_predict(fwi, era)
- res_test = res_test.rename({"nom": "departement"}, axis=1)
- res_test.to_csv(destination)
-
- def load_pipeline(self, path: Optional[str] = None) -> None:
- """Load trained pipeline from local path.
-
- Args:
- path: Location where the pipeline has been downloaded. Defaults to None (self.model_path).
- """
- path = self.model_path if path is None else path
-
- if os.path.isfile(path):
- self.pipeline = joblib.load(path)
- else:
- self.get_pipeline(destination=path)
- self.pipeline = joblib.load(path)
-
- def load_inputs(
- self,
- day: str,
- country: Optional[str] = "France",
- usecols: Optional[List[str]] = None,
- dir_path: Optional[str] = None,
- ) -> None:
- """Load inputs from local path.
-
- Args:
- day: Date of interest ('%Y-%m-%d') for example '2020-05-05'.
- country: Country of interest. Defaults to 'France'.
- dir_path: Location where the daily inputs have been persisted. Defaults to None
- (cfg.PREDICTIONS_REGISTRY).
- """
- dir_path = cfg.PREDICTIONS_REGISTRY if dir_path is None else dir_path
- usecols = cfg.PIPELINE_COLS if usecols is None else usecols
- fname = f"inputs_{country}_{day}.csv"
-
- path = os.path.join(dir_path, fname)
-
- if os.path.isfile(path):
- self.inputs = pd.read_csv(path, usecols=usecols)
- else:
- self.get_inputs(day=day, country=country, dir_destination=dir_path)
- self.inputs = pd.read_csv(path, usecols=usecols)
- self.inputs[cfg.DATE_VAR] = pd.to_datetime(self.inputs[cfg.DATE_VAR])
-
- def predict(
- self,
- day: str,
- country: Optional[str] = "France",
- zone_column: Optional[str] = cfg.ZONE_VAR,
- dir_destination: Optional[str] = None,
- ) -> None:
- """Predict local daily wildfire risks in a given country.
-
- Forecast the local (NUTS 3 level) daily wildfire risks (forest fire danger) in a given
- country (France). Note that predictions on fwi and era5land data queried from CDS API
- will return 93 departments instead of 96 for France.
-
- Args:
- day: Date of interest ('%Y-%m-%d') for example '2020-05-05'.
- country: Country of interest. Defaults to 'France'.
- dir_destination: Location where the daily inputs are persisted.
- Defaults to None (cfg.PREDICTIONS_REGISTRY).
- """
- path = (
- os.path.join(dir_destination, f"{self.model}.joblib")
- if dir_destination is not None
- else os.path.join(cfg.PREDICTIONS_REGISTRY, f"{self.model}.joblib")
- )
- self.load_pipeline(path=path)
- self.load_inputs(day=day, country=country, dir_path=dir_destination)
-
- fname = f"{self.model}_predictions_{country}_{day}.joblib"
- destination = os.path.join(dir_destination, fname)
-
- if self.model == "RF":
- self.predictions = self.pipeline.predict_proba(self.inputs)
- res = dict(zip(self.inputs[zone_column], self.predictions[:, 1].round(3)))
- elif self.model == "XGBOOST":
- self.predictions = self.pipeline.predict_proba(self.inputs)
- res = dict(zip(self.inputs[zone_column], self.predictions.round(3)))
- logging.info(
- f"Predict {country} local wildfire risks on {day}, using {self.model} pipeline."
- )
- joblib.dump(res, destination)
-
- def get_predictions(
- self,
- day: str,
- country: Optional[str] = "France",
- dir_path: Optional[str] = None,
- dir_destination: Optional[str] = None,
- ) -> None:
- """Download predictions for the day of interest from the remote prediction registry.
-
- The `get_predictions` method downloads the forecasted local wildfire risks at a given
- day in a given country (France). The downloaded predictions are persited in the
- destination joblib file.
-
- Args:
- day: Date of interest ('%Y-%m-%d') for example '2020-05-05'.
- country: Country of interest. Defaults to 'France'.
- dir_path: Location of the predictions to download, relative to the root of the dvc project.
- Defaults to None.
- dir_destination: Location where the daily predictions are persisted. Defaults to None.
- """
- dir_path = cfg.PREDICTIONS_REGISTRY if dir_path is None else dir_path
- dir_destination = (
- cfg.PREDICTIONS_REGISTRY if dir_destination is None else dir_destination
- )
- fname = f"{self.model}_predictions_{country}_{day}.joblib"
- destination = os.path.join(dir_destination, fname)
- path = os.path.join(dir_path, fname)
-
- predictions = joblib.load(
- BytesIO(
- dvc.api.read(
- path=path, repo=cfg.REPO_DIR, remote="artifacts-registry", mode="rb"
- )
- )
- )
- joblib.dump(predictions, destination)
-
- def expose_predictions(
- self,
- day: str,
- country: Optional[str] = "France",
- dir_path: Optional[str] = None,
- dir_destination: Optional[str] = None,
- ) -> dict:
- """Serves a prediction for the specified day.
-
- Args:
- day: Date of interest ('%Y-%m-%d') for example '2020-05-05'.
- country: Country of interest. Defaults to 'France'.
- dir_path: Location of the predictions to download, relative to the root of the dvc project.
- Defaults to None.
-
- Returns:
- dict[dict]: keys are departements, values dictionaries whose keys are score and explainability
- and values probability predictions for label 1 (fire) and feature contributions to predictions
- respectively.
- """
- fname = f"{self.model}_predictions_{country}_{day}.joblib"
- path = os.path.join(dir_destination, fname)
-
- if os.path.isfile(path):
- self.predictions = joblib.load(path)
- else:
- self.get_predictions(
- day=day,
- country=country,
- dir_path=dir_path,
- dir_destination=dir_destination,
- )
- self.predictions = joblib.load(path)
- return {
- x: {"score": self.predictions[x], "explainability": None}
- for x in self.predictions
- }
diff --git a/pyro_risks/pipeline/train.py b/pyro_risks/pipeline/train.py
deleted file mode 100644
index b960546..0000000
--- a/pyro_risks/pipeline/train.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from typing import Union, Optional
-from sklearn.model_selection import train_test_split
-from sklearn.metrics import precision_recall_curve
-from sklearn.utils import estimator_html_repr
-from pyro_risks.models import xgb_pipeline, rf_pipeline, discretizer
-from pyro_risks.pipeline.load import load_dataset
-from datetime import datetime
-import imblearn.pipeline as pp
-import pyro_risks.config as cfg
-
-import sys
-import pandas as pd
-import numpy as np
-
-import os
-import time
-import json
-import joblib
-
-__all__ = ["calibrate_pipeline", "save_pipeline", "train_pipeline"]
-
-
-def calibrate_pipeline(
- y_test: Union[pd.Series, np.ndarray],
- y_scores: Union[pd.Series, np.ndarray],
- ignore_prints: Optional[bool] = False,
-) -> np.float64:
- """
- Calibrate Classification Pipeline.
-
- Args:
- y_test: Binary test target.
- y_scores: Predicted probabilities from the test set.
- ignore_prints: Whether to print results. Defaults to False.
-
- Returns:
- Threshold maximizing the f1-score.
- """
- precision, recall, thresholds = precision_recall_curve(y_test, y_scores[:, 1])
- fscore = (2 * precision * recall) / (precision + recall)
- ix = np.argmax(fscore)
-
- if not ignore_prints:
- print(f"Best Threshold={thresholds[ix]}, F-Score={fscore[ix]}")
-
- return thresholds[ix]
-
-
-def save_pipeline(
- pipeline: pp.Pipeline,
- model: str,
- optimal_threshold: np.float64,
- destination: Optional[str] = None,
- ignore_html: Optional[bool] = False,
-) -> None:
- """
- Serialize pipeline.
-
- Args:
- pipeline: imbalanced-learn preprocessing pipeline.
- model: model name.
- optimal_threshold: model calibration optimal threshold.
- destination: folder where the pipeline should be saved. Defaults to 'cfg.MODEL_REGISTRY'.
- ignore_html: Persist pipeline html description. Defaults to False.
- """
- threshold = {"threshold": float(optimal_threshold)}
- registry = cfg.MODEL_REGISTRY if destination is None else destination
- pipeline_fname = f"{model}.joblib"
- threshold_fname = f"{model}_threshold.json"
- html_fname = f"{model}_pipeline.html"
-
- if not os.path.exists(registry):
- os.makedirs(registry)
-
- joblib.dump(pipeline, os.path.join(registry, pipeline_fname))
-
- with open(registry + "/" + threshold_fname, "w") as file:
- json.dump(threshold, file)
-
- if not ignore_html:
- with open(registry + "/" + html_fname, "w") as file:
- file.write(estimator_html_repr(pipeline))
-
-
-def train_pipeline(
- X: pd.DataFrame,
- y: pd.Series,
- model: str,
- pipeline: Optional[pp.Pipeline] = None,
- destination: Optional[str] = None,
- ignore_prints: Optional[bool] = False,
- ignore_html: Optional[bool] = False,
-) -> None:
- """
- Train a classification pipeline.
-
- Args:
- X: Training dataset features pd.DataFrame.
- y: Training dataset target pd.Series.
- model: model name.
- pipeline: imbalanced-learn preprocessing pipeline. Defaults to None.
- destination: folder where the pipeline should be saved. Defaults to 'cfg.MODEL_REGISTRY'.
- ignore_prints: Whether to print results. Defaults to False.
- ignore_html: Persist pipeline html description. Defaults to False.
- """
- X_train, X_test, y_train, y_test = train_test_split(
- X, y, test_size=cfg.TEST_SIZE, random_state=cfg.RANDOM_STATE
- )
-
- vdiscretizer = np.vectorize(discretizer)
-
- if model == "RF":
- rf_pipeline.fit(X_train, y_train)
- y_scores = rf_pipeline.predict_proba(X_test)
- optimal_threshold = calibrate_pipeline(
- y_test=vdiscretizer(y_test), y_scores=y_scores, ignore_prints=ignore_prints
- )
- save_pipeline(
- pipeline=rf_pipeline,
- model=model,
- optimal_threshold=optimal_threshold,
- destination=destination,
- ignore_html=ignore_html,
- )
-
- elif model == "XGBOOST":
- xgb_pipeline.fit(
- X_train, y_train, xgboost__eval_metric=cfg.XGB_FIT_PARAMS["eval_metric"]
- )
- y_scores = xgb_pipeline.predict_proba(X_test)
- optimal_threshold = calibrate_pipeline(
- y_test=vdiscretizer(y_test), y_scores=y_scores, ignore_prints=ignore_prints
- )
- save_pipeline(
- pipeline=xgb_pipeline,
- model=model,
- optimal_threshold=optimal_threshold,
- destination=destination,
- ignore_html=ignore_html,
- )
-
- elif model not in ["RF", "XGBOOST"] and pipeline is not None:
- pipeline.fit(X_train, y_train)
- y_scores = pipeline.predict_proba(X_test)
- optimal_threshold = calibrate_pipeline(
- y_test=vdiscretizer(y_test), y_scores=y_scores, ignore_prints=ignore_prints
- )
- save_pipeline(
- pipeline=pipeline,
- model=model,
- optimal_threshold=optimal_threshold,
- destination=destination,
- ignore_html=ignore_html,
- )
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 3d61bc4..0000000
--- a/setup.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-#!usr/bin/python
-
-"""
-Package installation setup
-"""
-
-import os
-import subprocess
-from setuptools import setup, find_packages
-
-PACKAGE_NAME = "pyro_risks"
-VERSION = "0.0.1"
-
-
-with open("README.md") as f:
- readme = f.read()
-
-requirements = [
- "boto3==1.28.45",
- "botocore==1.31.45",
- "click==8.1.7",
- "geopandas==0.13.2",
- "pandas==2.1.0",
- "python-dotenv==1.0.0",
- "rasterio==1.3.9",
- "requests==2.31.0",
- "numpy==1.26.4",
-]
-
-setup(
- name=PACKAGE_NAME,
- version=VERSION,
- author="Pyronear Contributors",
- description="Pre-processing pipelines and models for wildfire forecasting and monitoring",
- long_description=readme,
- long_description_content_type="text/markdown",
- url="https://github.com/pyronear/pyro-risks",
- download_url="https://github.com/pyronear/pyro-risks/tags",
- license="GPLv3",
- entry_points={"console_scripts": ["pyrorisks = pyro_risks.main:main"]},
- classifiers=[
- "Development Status :: 2 - Pre-Alpha",
- "Intended Audience :: Developers",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
- "Natural Language :: English",
- "Operating System :: OS Independent",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Mathematics",
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
- "Topic :: Software Development",
- "Topic :: Software Development :: Libraries",
- "Topic :: Software Development :: Libraries :: Python Modules",
- ],
- keywords=["data science", "time series", "machine learning"],
- packages=find_packages(exclude=("test",)),
- zip_safe=True,
- python_requires=">=3.6.0",
- include_package_data=True,
- install_requires=requirements,
- package_data={"": ["LICENSE"]},
-)
From 16467db30a155e4d9d41f1fa1074a248ce16e872 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:26:24 +0200
Subject: [PATCH 05/41] refactor: Remove pyro_risks CLI
---
pyro_risks/main.py | 144 ---------------------------------------------
1 file changed, 144 deletions(-)
delete mode 100644 pyro_risks/main.py
diff --git a/pyro_risks/main.py b/pyro_risks/main.py
deleted file mode 100644
index 26bae51..0000000
--- a/pyro_risks/main.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-# type: ignore
-from pyro_risks.datasets.utils import download
-from pyro_risks.pipeline import load_dataset, train_pipeline, evaluate_pipeline
-from pyro_risks.pipeline import PyroRisk
-from datetime import date
-
-import pyro_risks.config as cfg
-import click
-
-
-@click.group()
-def main():
- pass
-
-
-@main.group(name="download")
-def download_main():
- pass
-
-
-@download_main.command(name="dataset")
-@click.option("--url", default=cfg.ERA5T_VIIRS_PIPELINE, help="Dataset URL")
-@click.option(
- "--extension", "default_extension", default="csv", help="Dataset file extension"
-)
-@click.option(
- "--unzip",
- is_flag=True,
- default=False,
- help="Wether the dataset file should be unzip or not",
-)
-@click.option(
- "--destination", default=cfg.DATA_REGISTRY, help="Dataset registry local path"
-)
-def _download_dataset(url: str, default_extension: str, unzip: bool, destination: str):
- click.echo(f"Download {cfg.DATASET} dataset in {destination}")
- download(
- url=url,
- default_extension=default_extension,
- unzip=unzip,
- destination=destination,
- )
-
-
-@download_main.command(name="inputs")
-@click.option("--day", help="Date of interest (%Y-%m-%d) for example 2020-05-05")
-@click.option("--country", default="France", help="Country of interest")
-@click.option(
- "--directory", default=cfg.PREDICTIONS_REGISTRY, help="Dataset registry local path"
-)
-def _download_inputs(day: str, country: str, directory: str):
- day = day if day is not None else date.today().strftime("%Y-%m-%d")
- pyrorisk = PyroRisk()
- location = "default directory" if directory is None else directory
- click.echo(f"Download inputs in {location} to fire risks in {country} on {day}")
- pyrorisk.get_inputs(day=day, country=country, dir_destination=directory)
- click.echo("The fire risks inputs are downloaded")
-
-
-@main.command(name="train")
-@click.option("--model", help="Classification Pipeline name RF, XGBOOST")
-@click.option(
- "--destination",
- default=cfg.MODEL_REGISTRY,
- help="Destination folder for persisting pipeline.",
-)
-@click.option(
- "--ignore_prints/--print", is_flag=True, help="Whether to print results or not."
-)
-@click.option(
- "--ignore_html/--html", is_flag=True, help="Persist pipeline html description."
-)
-def _train_pipeline(
- model: str, destination: str, ignore_prints: bool, ignore_html: bool
-) -> None:
- click.echo(f"Train and save pipeline in {destination}")
- X, y = load_dataset()
- train_pipeline(
- X=X,
- y=y,
- model=model,
- destination=destination,
- ignore_prints=ignore_prints,
- ignore_html=ignore_html,
- )
-
-
-@main.command(name="evaluate")
-@click.option("--pipeline", help="Pipeline location path.")
-@click.option("--threshold", help="Classification pipeline optimal threshold path.")
-@click.option("--prefix", help="Classification reports prefix i.e. pipeline name.")
-@click.option(
- "--destination",
- default=cfg.METADATA_REGISTRY,
- help="Folder where the report should be saved.",
-)
-def _evaluate_pipeline(
- pipeline: str, threshold: str, prefix: str, destination: str
-) -> None:
- click.echo(f"Evaluate and save pipeline performance metrics in {destination}")
- X, y = load_dataset()
- evaluate_pipeline(
- X=X,
- y=y,
- pipeline=pipeline,
- threshold=threshold,
- prefix=prefix,
- destination=destination,
- )
-
-
-@main.command(name="predict")
-@click.option(
- "--model",
- default="RF",
- help="trained pipeline from pyrorisks remote model default to RF",
-)
-@click.option("--day", help="Date of interest (%Y-%m-%d) for example 2020-05-05")
-@click.option("--country", default="France", help="Country of interest")
-@click.option("--zone", default=cfg.ZONE_VAR, help="Territorial unit variable")
-@click.option(
- "--directory",
- default=cfg.PREDICTIONS_REGISTRY,
- help="Predictions registry local path",
-)
-def _predict(model: str, day: str, country: str, zone: str, directory: str):
- day = day if day is not None else date.today().strftime("%Y-%m-%d")
- pyrorisk = PyroRisk(model=model)
- click.echo(f"Start predictions with the trained {pyrorisk.model} pipeline")
- pyrorisk.predict(
- day=day, country=country, zone_column=zone, dir_destination=directory
- )
- click.echo(
- f"Predictions are persisted in {directory}{pyrorisk.model}_prediction_{country}_{day}.joblib"
- )
-
-
-if __name__ == "__main__":
- main()
From f0260de5d585d67bd65714ba4e63d27d82425915 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:28:59 +0200
Subject: [PATCH 06/41] refactor: Deprecate example scripts
---
scripts/example_ERA5_FIRMS.py | 116 -------------------------
scripts/example_ERA5_VIIRS.py | 126 ----------------------------
scripts/example_NASA_FIRMS.py | 115 -------------------------
scripts/example_scorev0.py | 41 ---------
scripts/example_weather_wildfire.py | 84 -------------------
5 files changed, 482 deletions(-)
delete mode 100644 scripts/example_ERA5_FIRMS.py
delete mode 100644 scripts/example_ERA5_VIIRS.py
delete mode 100644 scripts/example_NASA_FIRMS.py
delete mode 100644 scripts/example_scorev0.py
delete mode 100644 scripts/example_weather_wildfire.py
diff --git a/scripts/example_ERA5_FIRMS.py b/scripts/example_ERA5_FIRMS.py
deleted file mode 100644
index 8116725..0000000
--- a/scripts/example_ERA5_FIRMS.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.datasets import NASAFIRMS, ERA5Land
-from pyro_risks.datasets.datasets_mergers import (
- merge_datasets_by_departements,
- merge_datasets_by_closest_weather_point,
-)
-from pyro_risks.datasets.utils import get_intersection_range
-
-
-def main(args):
- weather = ERA5Land(args.ERA5)
- nasa_firms = NASAFIRMS(args.nasa_firms, args.nasa_firms_type)
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Time span selection
- date_range = get_intersection_range(weather.time, nasa_firms.acq_date)
- weather = weather[weather.time.isin(date_range)]
- nasa_firms = nasa_firms[nasa_firms.acq_date.isin(date_range)]
-
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Merge
- if args.type_of_merged == "departements":
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["nom"], axis=1)
- merged_data = merge_datasets_by_departements(
- weather, "time", "code", nasa_firms, "acq_date", "code", "left"
- )
- to_drop = [
- "acq_date",
- "latitude_y",
- "longitude_y",
- "bright_t31",
- "frp",
- "acq_date_time",
- "confidence",
- ]
-
- else:
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["code", "nom"], axis=1)
- merged_data = merge_datasets_by_closest_weather_point(
- weather, "time", nasa_firms, "acq_date"
- )
- to_drop = [
- "closest_weather_point",
- "acq_date",
- "latitude_y",
- "longitude_y",
- "bright_t31",
- "frp",
- "acq_date_time",
- "confidence",
- "weather_lat",
- "weather_lon",
- ]
-
- final_data = merged_data.copy()
- where = merged_data["confidence"] >= 60
- final_data.loc[where, "Statut"] = 1
- final_data.loc[~where, "Statut"] = 0
- final_data["Statut"] = final_data["Statut"].astype(int)
-
- # drop unnecessary columns
- final_data = final_data.drop(to_drop, axis=1)
-
- print(final_data)
-
-
-def parse_args():
- import argparse
-
- parser = argparse.ArgumentParser(
- description="Pyronear wildfire history example based on NASA FIRMS and ERA5 Land",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- )
-
- parser.add_argument(
- "--ERA5", default=None, type=str, help="path or URL of ERA5 Land source"
- )
-
- parser.add_argument(
- "--nasa_firms",
- default=None,
- type=str,
- help="path or URL of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--nasa_firms_type",
- default="json",
- type=str,
- help="type of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--type_of_merged",
- default="proximity",
- type=str,
- help="type of merged between weather and fire datasets: either departements or proximity",
- )
-
- args = parser.parse_args()
-
- return args
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/scripts/example_ERA5_VIIRS.py b/scripts/example_ERA5_VIIRS.py
deleted file mode 100644
index 84adf9f..0000000
--- a/scripts/example_ERA5_VIIRS.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.datasets import NASAFIRMS_VIIRS, ERA5Land
-from pyro_risks.datasets.datasets_mergers import (
- merge_datasets_by_departements,
- merge_by_proximity,
-)
-from pyro_risks.datasets.utils import get_intersection_range
-
-
-def main(args):
- weather = ERA5Land(args.ERA5)
- nasa_firms = NASAFIRMS_VIIRS(args.nasa_firms, args.nasa_firms_type)
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Time span selection
- date_range = get_intersection_range(weather.time, nasa_firms.acq_date)
- weather = weather[weather.time.isin(date_range)]
- nasa_firms = nasa_firms[nasa_firms.acq_date.isin(date_range)]
-
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Keep only vegetation wildfires and remove thermal anomalies with low confidence
- where = (nasa_firms["confidence"] != "l") & (nasa_firms["type"] == 0)
- nasa_firms = nasa_firms[where]
-
- # Merge
- if args.type_of_merged == "departements":
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["nom"], axis=1)
- merged_data = merge_datasets_by_departements(
- weather, "time", "code", nasa_firms, "acq_date", "code", "left"
- )
- to_drop = [
- "acq_date",
- "latitude_y",
- "longitude_y",
- "bright_ti4",
- "confidence",
- "bright_ti5",
- "frp",
- "type",
- "acq_date_time",
- ]
-
- else:
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["code", "nom"], axis=1)
- merged_data = merge_by_proximity(
- nasa_firms, "acq_date", weather, "time", "right"
- )
- to_drop = [
- "latitude_x",
- "longitude_x",
- "closest_lat",
- "closest_lon",
- "acq_date",
- "bright_ti4",
- "confidence",
- "bright_ti5",
- "frp",
- "type",
- "acq_date_time",
- ]
-
- final_data = merged_data.copy()
- where = merged_data["confidence"].isna()
- final_data.loc[~where, "Statut"] = 1
- final_data.loc[where, "Statut"] = 0
- final_data["Statut"] = final_data["Statut"].astype(int)
-
- # drop unnecessary columns
- final_data = final_data.drop(to_drop, axis=1)
- final_data = final_data.rename(
- columns={"latitude_y": "latitude", "longitude_y": "longitude"}
- )
-
- print(final_data)
-
-
-def parse_args():
- import argparse
-
- parser = argparse.ArgumentParser(
- description="Pyronear wildfire history example based on NASA FIRMS and ERA5 Land",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- )
-
- parser.add_argument(
- "--ERA5", default=None, type=str, help="path or URL of ERA5 Land source"
- )
-
- parser.add_argument(
- "--nasa_firms",
- default=None,
- type=str,
- help="path or URL of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--nasa_firms_type",
- default="csv",
- type=str,
- help="type of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--type_of_merged",
- default="proximity",
- type=str,
- help="type of merged between weather and fire datasets: either departements or proximity",
- )
-
- args = parser.parse_args()
-
- return args
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/scripts/example_NASA_FIRMS.py b/scripts/example_NASA_FIRMS.py
deleted file mode 100644
index a765349..0000000
--- a/scripts/example_NASA_FIRMS.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.datasets import NASAFIRMS, NOAAWeather
-from pyro_risks.datasets.datasets_mergers import (
- merge_datasets_by_closest_weather_station,
- merge_datasets_by_departements,
-)
-from pyro_risks.datasets.utils import get_intersection_range
-
-
-def main(args):
- weather = NOAAWeather(args.weather)
- nasa_firms = NASAFIRMS(args.nasa_firms, args.nasa_firms_type)
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Time span selection
- date_range = get_intersection_range(weather.DATE, nasa_firms.acq_date)
- weather = weather[weather.DATE.isin(date_range)]
- nasa_firms = nasa_firms[nasa_firms.acq_date.isin(date_range)]
-
- print(weather.shape)
- print(nasa_firms.shape)
-
- # Merge
- if args.type_of_merged == "departements":
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["nom"], axis=1)
- merged_data = merge_datasets_by_departements(
- weather, "DATE", "code", nasa_firms, "acq_date", "code", "left"
- )
- to_drop = [
- # 'closest_weather_station',
- "acq_date",
- "latitude",
- "longitude",
- "bright_t31",
- "frp",
- "acq_date_time",
- "confidence",
- ]
-
- else:
- # drop redundant columns with weather datasets
- nasa_firms = nasa_firms.drop(["code", "nom"], axis=1)
- merged_data = merge_datasets_by_closest_weather_station(
- weather, "DATE", nasa_firms, "acq_date"
- )
- to_drop = [
- "closest_weather_station",
- "acq_date",
- "latitude",
- "longitude",
- "bright_t31",
- "frp",
- "acq_date_time",
- "confidence",
- ]
-
- final_data = merged_data.copy()
- where = merged_data["confidence"] >= 60
- final_data.loc[where, "Statut"] = 1
- final_data.loc[~where, "Statut"] = 0
- final_data["Statut"] = final_data["Statut"].astype(int)
-
- # drop unnecessary columns
- final_data = final_data.drop(to_drop, axis=1)
-
- print(final_data)
-
-
-def parse_args():
- import argparse
-
- parser = argparse.ArgumentParser(
- description="Pyronear wildfire history example based on NASA FIRMS",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- )
-
- parser.add_argument(
- "--weather", default=None, type=str, help="path or URL of NOAA weather source"
- )
-
- parser.add_argument(
- "--nasa_firms",
- default=None,
- type=str,
- help="path or URL of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--nasa_firms_type",
- default="json",
- type=str,
- help="type of NASA FIRMS data source",
- )
-
- parser.add_argument(
- "--type_of_merged",
- default="departements",
- type=str,
- help="type of merged between weather and fire datasets: either departements or proximity",
- )
-
- args = parser.parse_args()
-
- return args
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/scripts/example_scorev0.py b/scripts/example_scorev0.py
deleted file mode 100644
index ab64dff..0000000
--- a/scripts/example_scorev0.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.datasets import MergedEraFwiViirs
-from pyro_risks.models.score_v0 import (
- add_lags,
- prepare_dataset,
- train_random_forest,
- split_train_test,
- xgb_model,
-)
-
-SELECTED_DEP = [
- "Pyrénées-Atlantiques",
- "Hautes-Pyrénées",
- "Ariège",
- "Haute-Corse",
- "Lozère",
- "Gard",
- "Hérault",
- "Bouches-du-Rhônes",
- "Pyrénées-Orientales",
- "Cantal",
- "Alpes-Maritimes",
- "Aveyron",
-]
-
-
-def run():
- df = MergedEraFwiViirs()
- df_lags = add_lags(df, df.drop(["day", "departement", "fires"], axis=1).columns)
- X, y = prepare_dataset(df_lags, selected_dep=SELECTED_DEP)
- X_train, X_test, y_train, y_test = split_train_test(X, y)
- train_random_forest(X_train, X_test, y_train, y_test, ignore_prints=False)
- xgb_model(X_train, y_train, X_test, y_test, ignore_prints=False)
-
-
-if __name__ == "__main__":
- run()
diff --git a/scripts/example_weather_wildfire.py b/scripts/example_weather_wildfire.py
deleted file mode 100644
index 1142319..0000000
--- a/scripts/example_weather_wildfire.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.datasets import NOAAWeather, BDIFFHistory
-from pyro_risks.datasets.datasets_mergers import merge_datasets_by_departements
-from pyro_risks.datasets.utils import get_intersection_range
-
-
-def main(args):
- weather = NOAAWeather(args.weather)
- history = BDIFFHistory(args.wildfire)
-
- # Time span selection
- date_range = get_intersection_range(weather.DATE, history.date)
- weather = weather[weather.DATE.isin(date_range)]
- history = history[history.date.isin(date_range)]
-
- # Merge
- df = merge_datasets_by_departements(
- weather, "DATE", "code", history, "date", "Département", "left"
- )
-
- # Label data
- df.Statut = 1 - df.Statut.isna().astype(int)
-
- df = df.filter(
- items=[
- "DATE",
- "code",
- "nom",
- "LATITUDE",
- "LONGITUDE",
- "ELEVATION",
- "DEWP",
- "DEWP_ATTRIBUTES",
- "FRSHTT",
- "GUST",
- "MAX",
- "MIN",
- "MXSPD",
- "PRCP",
- "SLP",
- "SLP_ATTRIBUTES",
- "SNDP",
- "STP",
- "STP_ATTRIBUTES",
- "TEMP",
- "TEMP_ATTRIBUTES",
- "VISIB",
- "VISIB_ATTRIBUTES",
- "WDSP",
- "WDSP_ATTRIBUTES",
- "Statut",
- ]
- )
-
- print(df)
-
-
-def parse_args():
- import argparse
-
- parser = argparse.ArgumentParser(
- description="Pyronear weather & wildfire history example",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- )
-
- parser.add_argument(
- "--weather", default=None, type=str, help="path or URL of NOAA weather source"
- )
- parser.add_argument(
- "--wildfire", default=None, type=str, help="path or URL of BDIFF history source"
- )
-
- args = parser.parse_args()
-
- return args
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
From 75f7a64e88d56a06be9eb7a0884341a4c6a58bf8 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:32:43 +0200
Subject: [PATCH 07/41] refactor: Deprecate pyro_risks tests
---
test/test_datasets.py | 500 -------------------------------
test/test_evaluate.py | 90 ------
test/test_load.py | 25 --
test/test_main.py | 133 --------
test/test_models_transformers.py | 234 ---------------
test/test_models_utils.py | 29 --
test/test_predict.py | 168 -----------
test/test_train.py | 123 --------
8 files changed, 1302 deletions(-)
delete mode 100644 test/test_datasets.py
delete mode 100644 test/test_evaluate.py
delete mode 100644 test/test_load.py
delete mode 100644 test/test_main.py
delete mode 100644 test/test_models_transformers.py
delete mode 100644 test/test_models_utils.py
delete mode 100644 test/test_predict.py
delete mode 100644 test/test_train.py
diff --git a/test/test_datasets.py b/test/test_datasets.py
deleted file mode 100644
index 29b4379..0000000
--- a/test/test_datasets.py
+++ /dev/null
@@ -1,500 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import unittest
-
-import numpy as np
-import pandas as pd
-import tempfile
-import requests
-import tarfile
-import gzip
-import csv
-import os
-
-from pandas.testing import assert_frame_equal
-
-from io import BytesIO
-from pathlib import Path
-
-from zipfile import ZipFile
-from unittest.mock import patch
-from geopandas import GeoDataFrame
-
-import urllib.request
-import json
-
-from pyro_risks import config as cfg
-from pyro_risks.datasets import (
- masks,
- weather,
- wildfires,
- utils,
- nasa_wildfires,
- fwi,
- ERA5,
- era_fwi_viirs,
- queries_api,
-)
-from pyro_risks.datasets.datasets_mergers import (
- merge_datasets_by_departements,
- merge_datasets_by_closest_weather_station,
- merge_datasets_by_closest_weather_point,
- merge_by_proximity,
-)
-
-
-class UtilsTester(unittest.TestCase):
- def _test_get_intersection_range(self, s1, s2, expected_len):
- date_range = utils.get_intersection_range(s1, s2)
- self.assertIsInstance(date_range, pd.DatetimeIndex)
- self.assertEqual(len(date_range), expected_len)
-
- # Template unittest
- def test_get_intersection_range(self):
- # Non-intersecting series
- s1 = pd.Series(pd.date_range("2020-01-01", "2020-08-31"))
- s2 = pd.Series(pd.date_range("2020-09-01", "2020-11-01"))
- self.assertRaises(ValueError, utils.get_intersection_range, s1, s2)
-
- # s2 included in s1
- s1 = pd.Series(pd.date_range("2020-01-01", "2020-12-31"))
- s2 = pd.Series(pd.date_range("2020-09-01", "2020-09-30"))
- self._test_get_intersection_range(s1, s2, 30)
-
- # s2 included in s1
- s1 = pd.Series(pd.date_range("2020-09-01", "2020-11-01"))
- s2 = pd.Series(pd.date_range("2020-10-01", "2020-12-01"))
- self._test_get_intersection_range(s1, s2, 32)
-
- def test_load_data(self):
- with tempfile.TemporaryDirectory() as destination:
- fwi.load_data(output_path=destination)
- self.assertTrue(
- Path(destination, "fwi_unzipped/JRC_FWI_20190101.nc").is_file()
- )
-
- def test_get_fwi_data(self):
- with tempfile.TemporaryDirectory() as tmp:
- fwi.load_data(output_path=tmp)
- df = fwi.get_fwi_data(source_path=tmp)
- self.assertIsInstance(df, pd.DataFrame)
- self.assertEqual(df.shape, (26538, 11))
-
- def test_create_departement_df(self):
- test_data = pd.DataFrame(
- {
- "latitude": {
- 0: 47.978,
- 1: 46.783,
- 2: 43.760,
- },
- "longitude": {
- 0: 5.132,
- 1: 4.710,
- 2: 1.335,
- },
- "fwi": {0: 6.7, 1: 0.3, 2: 8.9},
- }
- )
- res = fwi.create_departement_df(day_data=test_data)
- true_res = pd.DataFrame(
- {
- "latitude": {0: 47.978, 1: 46.783, 2: 43.76},
- "longitude": {0: 5.132, 1: 4.71, 2: 1.335},
- "departement": {
- 0: "Haute-Marne",
- 1: "Saône-et-Loire",
- 2: "Haute-Garonne",
- },
- }
- )
- assert_frame_equal(res, true_res)
-
- def test_include_departement(self):
- test_row = pd.Series({"latitude": 51.072, "longitude": 2.531, "fwi": 0.0})
- with urllib.request.urlopen(cfg.FR_GEOJSON) as url:
- dep_polygons = json.loads(url.read().decode())
- self.assertEqual(fwi.include_department(test_row, dep_polygons), "Nord")
-
- @patch("pyro_risks.datasets.utils.requests.get")
- def test_url_retrieve(self, mock_get):
- mock_get.return_value.status_code = 200
- mock_get.return_value.content = bytes("WEATHER OR WILDFIRE FILE", "utf-8")
- content = utils.url_retrieve("url")
- self.assertIsInstance(content, bytes)
-
- mock_get.return_value.status_code = 400
- mock_get.return_value.content = bytes("WEATHER OR WILDFIRE FILE", "utf-8")
- self.assertRaises(
- requests.exceptions.ConnectionError, utils.url_retrieve, "url"
- )
-
- def test_get_fname(self):
- url_firms = "https://firms.modaps.eosdis.nasa.gov/data/active_fire/c6/csv/MODIS_C6_Europe_24h.csv"
- url_ghcn = "https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/2020.csv.gz"
- url_isd = "https://www.ncei.noaa.gov/data/global-hourly/archive/csv/2020.tar.gz"
-
- self.assertEqual(
- utils.get_fname(url_firms), ("MODIS_C6_Europe_24h", "csv", None)
- )
- self.assertEqual(utils.get_fname(url_ghcn), ("2020", "csv", "gz"))
- self.assertEqual(utils.get_fname(url_isd), ("2020", None, "tar.gz"))
-
- @staticmethod
- def _mock_csv(destination, fname):
- unzipped_content = [
- ["col1", "col2", "col3", "col4"],
- ["test", "test", "test", "test"],
- ["test", "test", "test", "test"],
- ["test", "test", "test", "test"],
- ]
-
- full_path = os.path.join(destination, "server/")
-
- os.makedirs(os.path.dirname(full_path), exist_ok=True)
-
- with open(os.path.join(full_path, fname), mode="w") as csvfile:
- writer = csv.writer(csvfile)
- writer.writerows(unzipped_content)
-
- def _make_tarfile(self, destination):
- self._mock_csv(destination, "test_tar.csv")
-
- full_path = os.path.join(destination, "server/")
- out = tarfile.open(os.path.join(full_path, "test.tar.gz"), "w:gz")
- out.add(full_path, arcname=os.path.basename(full_path))
- out.close()
-
- with open(os.path.join(full_path, "test.tar.gz"), "rb") as tar_file:
- memory_file = BytesIO(tar_file.read())
-
- return memory_file
-
- def _make_gzipfile(self, destination):
- self._mock_csv(destination, "test_gz.csv")
-
- full_path = os.path.join(destination, "server/")
- with gzip.GzipFile(os.path.join(full_path, "test.gz"), mode="w") as gz, open(
- os.path.join(full_path, "test_gz.csv"), mode="r"
- ) as csvfile:
- gz.write(csvfile.read().encode())
- gz.close()
-
- with open(os.path.join(full_path, "test.gz"), "rb") as gz_file:
- memory_file = BytesIO(gz_file.read())
-
- return memory_file
-
- def _make_zipfile(self, destination):
- self._mock_csv(destination, "test_zip.csv")
-
- full_path = os.path.join(destination, "server/")
- with ZipFile(os.path.join(full_path, "test.zip"), "w") as zip_file:
- zip_file.write(
- os.path.join(full_path, "test_zip.csv"),
- os.path.basename(os.path.join(full_path, "test_zip.csv")),
- )
-
- with open(os.path.join(full_path, "test.zip"), "rb") as zip_file:
- memory_file = BytesIO(zip_file.read())
-
- return memory_file
-
- def _make_csv(self, destination):
- self._mock_csv(destination, "test_csv.csv")
-
- full_path = os.path.join(destination, "server/")
- with open(os.path.join(full_path, "test_csv.csv"), "rb") as csv_file:
- memory_file = BytesIO(csv_file.read())
-
- return memory_file
-
- @staticmethod
- def _mock_fname(compression):
- if compression == "tar.gz":
- return ("test_tar", "csv", "tar.gz")
-
- elif compression == "zip":
- return ("test_zip", "csv", "zip")
-
- elif compression == "csv":
- return ("test_csv", "csv", None)
-
- else:
- return ("test_gz", "csv", "gz")
-
- @patch("pyro_risks.datasets.utils.get_fname")
- @patch("pyro_risks.datasets.utils.url_retrieve")
- def test_download(self, mock_url_retrieve, mock_fname):
- with tempfile.TemporaryDirectory() as destination:
- full_path = os.path.join(destination, "client/")
-
- mock_fname.return_value = self._mock_fname("tar.gz")
- mock_url_retrieve.return_value = self._make_tarfile(destination).read()
- utils.download(url="url", default_extension="csv", destination=full_path)
- self.assertTrue(Path(full_path, "test_tar.csv").is_file())
-
- mock_fname.return_value = self._mock_fname("zip")
- mock_url_retrieve.return_value = self._make_zipfile(destination).read()
- utils.download(url="url", default_extension="csv", destination=full_path)
- self.assertTrue(Path(full_path, "test_zip.csv").is_file())
-
- mock_fname.return_value = self._mock_fname("gz")
- mock_url_retrieve.return_value = self._make_gzipfile(destination).read()
- utils.download(url="url", default_extension="csv", destination=full_path)
- self.assertTrue(Path(full_path, "test_gz.csv").is_file())
-
- mock_fname.return_value = self._mock_fname("csv")
- mock_url_retrieve.return_value = self._make_csv(destination).read()
- utils.download(
- url="url", default_extension="csv", unzip=False, destination=full_path
- )
- self.assertTrue(Path(full_path, "test_csv.csv").is_file())
-
- mock_fname.return_value = self._mock_fname("gz")
- mock_url_retrieve.return_value = self._make_gzipfile(destination).read()
- utils.download(
- url="url", default_extension="csv", unzip=False, destination=full_path
- )
- self.assertTrue(Path(full_path, "test_gz.gz").is_file())
-
- mock_fname.return_value = self._mock_fname("csv")
- self.assertRaises(ValueError, utils.download, "url", "csv", True, full_path)
- # utils.download(url='url', default_extension="csv", unzip=False, destination=full_path)
-
- def test_get_modis(self):
- with tempfile.TemporaryDirectory() as destination:
- utils.get_modis(
- start_year=2000, end_year=2001, yearly=True, destination=destination
- )
- utils.get_modis(destination=destination)
- self.assertTrue(Path(destination, "modis_2000_France.csv").is_file())
- self.assertTrue(Path(destination, "MODIS_C6_Europe_24h.csv").is_file())
-
- def test_get_ghcn(self):
- with tempfile.TemporaryDirectory() as destination:
- utils.get_ghcn(start_year=2000, end_year=2001, destination=destination)
- self.assertTrue(Path(destination, "2000.csv").is_file())
-
- def test_find_closest_weather_station(self):
- # Dataframe without STATION column
- df = pd.DataFrame(
- np.array([[5.876, 23.875], [8.986, 12.978]]),
- columns=["LATITUDE", "LONGITUDE"],
- )
- self.assertRaises(
- ValueError, utils.find_closest_weather_station, df, 3.871, 11.234
- )
-
- # Dataframe with STATION column
- df = pd.DataFrame(
- np.array(
- [
- [5676499, 5.876, 23.875],
- [4597821, 3.286, 12.978],
- [8767822, 8.564, 10.764],
- ]
- ),
- columns=["STATION", "LATITUDE", "LONGITUDE"],
- )
- ref_station = utils.find_closest_weather_station(df, 3.871, 11.234)
- self.assertIsInstance(ref_station, int)
-
- def test_merge_datasets_by_departements(self):
- df_weather = weather.NOAAWeather()
- df_fires = wildfires.BDIFFHistory()
- df = merge_datasets_by_departements(
- df_weather, "DATE", "code", df_fires, "date", "Département", "left"
- )
- self.assertIsInstance(df, pd.DataFrame)
-
- def test_merge_datasets_by_closest_weather_station(self):
- df_weather = weather.NOAAWeather()
- nasa_firms = nasa_wildfires.NASAFIRMS()
- df = merge_datasets_by_closest_weather_station(
- df_weather, "DATE", nasa_firms, "acq_date"
- )
- self.assertIsInstance(df, pd.DataFrame)
-
- def test_merge_datasets_by_closest_weather_point(self):
- df_weather = pd.DataFrame(
- np.array(
- [
- [5.876, 23.875, "2019-06-24"],
- [3.286, 12.978, "2019-10-02"],
- [8.564, 10.764, "2019-03-12"],
- ]
- ),
- columns=["latitude", "longitude", "time"],
- )
- df_weather["latitude"] = df_weather["latitude"].astype(float)
- df_weather["longitude"] = df_weather["longitude"].astype(float)
- df_weather["time"] = pd.to_datetime(
- df_weather["time"], format="%Y-%m-%d", errors="coerce"
- )
- nasa_firms = nasa_wildfires.NASAFIRMS()
- df = merge_datasets_by_closest_weather_point(
- df_weather, "time", nasa_firms, "acq_date"
- )
- self.assertIsInstance(df, pd.DataFrame)
-
- def test_merge_datasets_by_proximity(self):
- df_weather = pd.DataFrame(
- np.array(
- [
- [5.876, 23.875, "2019-06-24"],
- [3.286, 12.978, "2019-10-02"],
- [8.564, 10.764, "2019-03-12"],
- ]
- ),
- columns=["latitude", "longitude", "time"],
- )
- df_weather["latitude"] = df_weather["latitude"].astype(float)
- df_weather["longitude"] = df_weather["longitude"].astype(float)
- df_weather["time"] = pd.to_datetime(
- df_weather["time"], format="%Y-%m-%d", errors="coerce"
- )
- nasa_firms = nasa_wildfires.NASAFIRMS_VIIRS()
- df = merge_by_proximity(nasa_firms, "acq_date", df_weather, "time", "right")
- self.assertIsInstance(df, pd.DataFrame)
-
-
-class DatasetsTester(unittest.TestCase):
- def test_get_french_geom(self):
- fr_geom = masks.get_french_geom()
- self.assertIsInstance(fr_geom, GeoDataFrame)
- self.assertTrue(
- all(
- v1 == v2 for v1, v2 in zip(fr_geom.columns, ["code", "nom", "geometry"])
- )
- )
-
- def test_noaaweather(self):
- ds = weather.NOAAWeather()
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_bdiffhistory(self):
- ds = wildfires.BDIFFHistory()
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasafirms_json(self):
- ds = nasa_wildfires.NASAFIRMS()
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasafirms_csv(self):
- ds = nasa_wildfires.NASAFIRMS(
- source_path=cfg.TEST_FR_FIRMS_CSV_FALLBACK, fmt="csv"
- )
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasafirms_xlsx(self):
- ds = nasa_wildfires.NASAFIRMS(
- source_path=cfg.TEST_FR_FIRMS_XLSX_FALLBACK, fmt="xlsx"
- )
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasaviirs_csv(self):
- ds = nasa_wildfires.NASAFIRMS_VIIRS()
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasaviirs_xlsx(self):
- ds = nasa_wildfires.NASAFIRMS_VIIRS(
- source_path=cfg.TEST_FR_VIIRS_XLSX_FALLBACK, fmt="xlsx"
- )
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_nasaviirs_json(self):
- ds = nasa_wildfires.NASAFIRMS_VIIRS(
- source_path=cfg.TEST_FR_VIIRS_JSON_FALLBACK, fmt="json"
- )
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_gwisfwi(self):
- ds = fwi.GwisFwi()
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_era5land(self):
- ds = ERA5.ERA5Land(source_path=cfg.TEST_FR_ERA5LAND_FALLBACK)
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_era5t(self):
- ds = ERA5.ERA5T(source_path=cfg.TEST_FR_ERA5LAND_FALLBACK)
- self.assertIsInstance(ds, pd.DataFrame)
-
- def test_MergedEraFwiViirs(self):
- ds = era_fwi_viirs.MergedEraFwiViirs(
- era_source_path=cfg.TEST_FR_ERA5T_FALLBACK,
- viirs_source_path=None,
- fwi_source_path=cfg.TEST_FWI_FALLBACK,
- )
- self.assertIsInstance(ds, pd.DataFrame)
- self.assertTrue(len(ds) > 0)
-
- def test_call_era5land(self):
- with tempfile.TemporaryDirectory() as tmp:
- queries_api.call_era5land(tmp, "2020", "07", "15")
- self.assertTrue(os.path.isfile(os.path.join(tmp, "era5land_2020_07_15.nc")))
-
- def test_call_era5t(self):
- with tempfile.TemporaryDirectory() as tmp:
- queries_api.call_era5t(tmp, "2020", "07", "15")
- self.assertTrue(os.path.isfile(os.path.join(tmp, "era5t_2020_07_15.nc")))
-
- def test_call_fwi(self):
- with tempfile.TemporaryDirectory() as tmp:
- queries_api.call_fwi(tmp, "2020", "07", "15")
- self.assertTrue(os.path.isfile(os.path.join(tmp, "fwi_2020_07_15.zip")))
-
- def test_get_fwi_from_api(self):
- res = fwi.get_fwi_from_api("2020-07-15")
- self.assertIsInstance(res, pd.DataFrame)
- self.assertEqual(len(res), 1039)
- self.assertEqual(res.iloc[0]["nom"], "Aisne")
- self.assertEqual(res.iloc[78]["isi"], np.float32(5.120605))
-
- def test_get_fwi_data_for_predict(self):
- res = fwi.get_fwi_data_for_predict("2020-05-05")
- self.assertTrue(
- np.array_equal(
- res.day.unique(),
- np.array(["2020-05-05", "2020-05-04", "2020-05-02", "2020-04-28"]),
- )
- )
-
- def test_get_data_era5land_for_predict(self):
- res = ERA5.get_data_era5land_for_predict("2020-05-05")
- self.assertTrue(
- np.array_equal(
- res.time.unique(),
- np.array(
- ["2020-05-05", "2020-05-04", "2020-05-02", "2020-04-28"],
- dtype="datetime64[ns]",
- ),
- )
- )
- self.assertTrue("evaow" in res.columns)
-
- def test_get_data_era5t_for_predict(self):
- res = ERA5.get_data_era5t_for_predict("2020-07-15")
- self.assertTrue("u10" in res.columns)
- self.assertEqual(len(res), 4156)
-
- def test_process_dataset_to_predict(self):
- fwi = pd.read_csv(cfg.TEST_FWI_TO_PREDICT)
- era = pd.read_csv(cfg.TEST_ERA_TO_PREDICT)
- res = era_fwi_viirs.process_dataset_to_predict(fwi, era)
- self.assertTrue(
- np.array_equal(
- res.loc[res["nom"] == "Vienne", "fwi_max"].values,
- np.array(
- [1.2649848, 0.06888488, 0.74846804, 1.6156918], dtype=np.float64
- ),
- )
- )
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_evaluate.py b/test/test_evaluate.py
deleted file mode 100644
index 1c6c93d..0000000
--- a/test/test_evaluate.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from collections import namedtuple
-from datetime import datetime
-from imblearn.pipeline import Pipeline
-from sklearn.dummy import DummyClassifier
-from sklearn.model_selection import train_test_split
-from sklearn.datasets import make_classification
-from pyro_risks.pipeline import train_pipeline, save_pipeline
-from pyro_risks.pipeline import (
- save_classification_reports,
- save_classification_plots,
- evaluate_pipeline,
-)
-
-
-import numpy as np
-import pandas as pd
-import pyro_risks.config as cfg
-
-import unittest
-import tempfile
-import glob
-
-
-class EvaluateTester(unittest.TestCase):
- def test_save_classification_reports(self):
- y_true = np.array([0, 0, 1, 1])
- y_pred = np.array([0, 1, 1, 1])
- with tempfile.TemporaryDirectory() as destination:
- save_classification_reports(
- y_true=y_true, y_pred=y_pred, prefix="TEST", destination=destination
- )
- files = glob.glob(destination + "/*")
- self.assertTrue(any([".json" in file for file in files]))
- self.assertTrue(any([".csv" in file for file in files]))
-
- def test_save_classification_plots(self):
- y_true = np.array([0, 0, 1, 1])
- y_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
- with tempfile.TemporaryDirectory() as destination:
- save_classification_plots(
- y_true=y_true,
- y_proba=y_proba[:, 1],
- threshold=0.35,
- prefix="TEST",
- destination=destination,
- )
- files = glob.glob(destination + "/*")
- self.assertTrue(any([".png" in file for file in files]))
-
- def test_evaluate_pipeline(self):
- X, y = make_classification(
- n_samples=100, n_features=5, n_informative=2, n_redundant=2
- )
- X_train, _, y_train, _ = train_test_split(
- X, y, test_size=cfg.TEST_SIZE, random_state=cfg.RANDOM_STATE
- )
- dummy_pipeline = Pipeline(
- [("dummy_classifier", DummyClassifier(strategy="constant", constant=0))]
- )
- dummy_pipeline.fit(X_train, y_train)
-
- with tempfile.TemporaryDirectory() as destination:
- threshold = destination + "/DUMMY_threshold.json"
- save_pipeline(
- pipeline=dummy_pipeline,
- model="DUMMY",
- optimal_threshold=0,
- destination=destination,
- )
- evaluate_pipeline(
- X=X,
- y=y,
- pipeline=dummy_pipeline,
- threshold=threshold,
- prefix="DUMMY",
- destination=destination,
- )
- files = glob.glob(destination + "/*")
- self.assertTrue(any([".png" in file for file in files]))
- self.assertTrue(any([".json" in file for file in files]))
- self.assertTrue(any([".csv" in file for file in files]))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_load.py b/test/test_load.py
deleted file mode 100644
index 2e2ce2f..0000000
--- a/test/test_load.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.pipeline import load_dataset
-from unittest import mock
-
-import pyro_risks.config as cfg
-import unittest
-import tempfile
-import os
-
-
-class LoadTester(unittest.TestCase):
- def test_load(self):
- with tempfile.TemporaryDirectory() as destination:
- with mock.patch("pyro_risks.config.DATA_REGISTRY", destination):
- dataset_path = os.path.join(destination, cfg.DATASET)
- load_dataset()
- self.assertTrue(os.path.isfile(dataset_path))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_main.py b/test/test_main.py
deleted file mode 100644
index 1e8a38a..0000000
--- a/test/test_main.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.pipeline import load_dataset
-from pyro_risks.main import main
-from pyro_risks.pipeline import train_pipeline
-from imblearn.pipeline import Pipeline
-from sklearn.dummy import DummyClassifier
-from click.testing import CliRunner
-import pyro_risks.config as cfg
-import requests
-
-import unittest
-import tempfile
-import glob
-import os
-
-
-class MainTester(unittest.TestCase):
- def test_download_dataset(self):
- runner = CliRunner()
- pattern = "/*.csv"
- with tempfile.TemporaryDirectory() as destination:
- runner.invoke(main, ["download", "dataset", "--destination", destination])
- files = glob.glob(destination + pattern)
- self.assertTrue(any([cfg.DATASET in file for file in files]))
-
- def test_download_inputs(self):
- runner = CliRunner()
- pattern = "/*.csv"
- with tempfile.TemporaryDirectory() as directory:
- runner.invoke(
- main,
- ["download", "inputs", "--day", "2020-05-05", "--directory", directory],
- )
- files = glob.glob(directory + pattern)
- self.assertTrue(
- any(["inputs_France_2020-05-05.csv" in file for file in files])
- )
-
- def test_train_pipeline(self):
- runner = CliRunner()
- pattern = "/*.joblib"
- with tempfile.TemporaryDirectory() as destination:
- runner.invoke(
- main, ["train", "--model", "RF", "--destination", destination]
- )
- files = glob.glob(destination + pattern)
- self.assertTrue(any(["RF" in file for file in files]))
-
- def test_evaluate_pipeline(self):
- runner = CliRunner()
- pattern = "/*.joblib"
- X, y = load_dataset()
-
- dummy_pipeline = Pipeline(
- [("dummy_classifier", DummyClassifier(strategy="constant", constant=0))]
- )
-
- with tempfile.TemporaryDirectory() as destination:
- threshold = destination + "/DUMMY_threshold.json"
- train_pipeline(
- X=X,
- y=y,
- model="DUMMY",
- pipeline=dummy_pipeline,
- destination=destination,
- ignore_prints=True,
- ignore_html=True,
- )
- pipeline_path = glob.glob(destination + pattern)
- runner.invoke(
- main,
- [
- "evaluate",
- "--pipeline",
- pipeline_path[0],
- "--threshold",
- threshold,
- "--prefix",
- "DUMMY",
- "--destination",
- destination,
- ],
- )
- files = glob.glob(destination + "/*")
- self.assertTrue(any([".png" in file for file in files]))
- self.assertTrue(any([".json" in file for file in files]))
- self.assertTrue(any([".csv" in file for file in files]))
-
- def test_predict(self):
- # TODO
- # Test with today date after bugfix
- inputs_fname = "inputs_France_2020-05-05.csv"
- pipeline_fname = "RF.joblib"
- mock_inputs = requests.get(
- url="https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data/inputs_France_2020-05-05.csv"
- )
- mock_pipeline = requests.get(
- url="https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data/RF.joblib"
- )
-
- runner = CliRunner()
- with tempfile.TemporaryDirectory() as directory:
- with open(os.path.join(directory, inputs_fname), "wb") as inputs:
- inputs.write(mock_inputs.content)
-
- with open(os.path.join(directory, pipeline_fname), "wb") as pipeline:
- pipeline.write(mock_pipeline.content)
- runner.invoke(
- main, ["predict", "--day", "2020-05-05", "--directory", directory]
- )
-
- files = glob.glob(directory + "/*")
- print(files)
- self.assertTrue(
- any(["inputs_France_2020-05-05.csv" in file for file in files])
- )
- self.assertTrue(
- any(
- [
- "RF_predictions_France_2020-05-05.joblib" in file
- for file in files
- ]
- )
- )
- self.assertTrue(any(["RF.joblib" in file for file in files]))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_models_transformers.py b/test/test_models_transformers.py
deleted file mode 100644
index fb345d7..0000000
--- a/test/test_models_transformers.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import unittest
-import numpy as np
-import pandas as pd
-
-from pandas.testing import assert_frame_equal, assert_series_equal
-
-from pyro_risks.models import (
- TargetDiscretizer,
- CategorySelector,
- Imputer,
- LagTransformer,
- FeatureSelector,
- FeatureSubsetter,
-)
-
-
-class TransformersTester(unittest.TestCase):
- def test_target_discretizer(self):
- td = TargetDiscretizer(discretizer=lambda x: 1 if x > 0 else 0)
- df = pd.DataFrame(
- {
- "day": ["2019-07-01", "2019-08-02", "2019-06-12"],
- "departement": ["Aisne", "Cantal", "Savoie"],
- "fires": [0, 5, 10],
- "fwi_mean": [13.3, 0.9, 2.5],
- "ffmc_max": [23, 45.3, 109.0],
- }
- )
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- Xr, yr = td.fit_resample(X, y)
- assert_series_equal(yr, pd.Series([0, 1, 1], name="fires"))
- assert_frame_equal(Xr, X)
- self.assertRaises(TypeError, TargetDiscretizer, [0, 1])
-
- def test_category_selector(self):
- cs = CategorySelector(variable="departement", category=["Aisne", "Cantal"])
- df = pd.DataFrame(
- {
- "day": ["2019-07-01", "2019-08-02", "2019-06-12"],
- "departement": ["Aisne", "Cantal", "Savoie"],
- "fires": [0, 5, 10],
- "fwi_mean": [13.3, 0.9, 2.5],
- "ffmc_max": [23, 45.3, 109.0],
- }
- )
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- Xr, yr = cs.fit_resample(X, y)
-
- self.assertRaises(TypeError, CategorySelector, "departement", 0)
- assert_frame_equal(Xr, X[X["departement"].isin(["Aisne", "Cantal"])])
- assert_series_equal(yr, y[X["departement"].isin(["Aisne", "Cantal"])])
-
- # pylint: disable=R0201
- def test_imputer(self):
- imp = Imputer(strategy="median", columns=["fwi_mean"])
- df = pd.DataFrame(
- {
- "fires": [0, 5, 10],
- "fwi_mean": [13.3, np.nan, 2.5],
- "ffmc_max": [23, np.nan, 109.0],
- }
- )
-
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- imp.fit(X, y)
-
- XT = imp.transform(X)
-
- assert_frame_equal(
- XT,
- pd.DataFrame(
- {
- "fwi_mean": [13.3, 7.9, 2.5],
- "ffmc_max": [23, np.nan, 109.0],
- }
- ),
- )
-
- def test_lag_transformer(self):
- lt = LagTransformer(
- date_column="date", zone_column="departement", columns=["fwi_mean"]
- )
- df = pd.DataFrame(
- {
- "date": [
- np.datetime64("2019-07-01"),
- np.datetime64("2019-07-04"),
- np.datetime64("2019-07-07"),
- np.datetime64("2019-07-08"),
- ],
- "departement": ["Cantal", "Cantal", "Cantal", "Cantal"],
- "fwi_mean": [1.1, 13.3, 0.9, 2.5],
- "fires": [0, 5, 10, 10],
- }
- )
- res = pd.DataFrame(
- {
- "date": [
- np.datetime64("2019-07-01"),
- np.datetime64("2019-07-04"),
- np.datetime64("2019-07-07"),
- np.datetime64("2019-07-08"),
- ],
- "departement": ["Cantal", "Cantal", "Cantal", "Cantal"],
- "fwi_mean": [1.1, 13.3, 0.9, 2.5],
- "fwi_mean_lag1": [np.nan, np.nan, np.nan, 0.9],
- "fwi_mean_lag3": [np.nan, 1.1, 13.3, np.nan],
- "fwi_mean_lag7": [np.nan, np.nan, np.nan, 1.1],
- }
- )
-
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- lt.fit(X, y)
-
- X = lt.transform(X)
-
- pd.DataFrame(
- {
- "day": ["2019-07-01", "2019-08-02", "2019-06-12"],
- "departement": ["Aisne", "Cantal", "Savoie"],
- "fwi_mean": [13.3, 0.9, 2.5],
- "ffmc_max": [23, 45.3, 109.0],
- }
- )
-
- assert_frame_equal(res, X)
- self.assertRaises(
- TypeError,
- LagTransformer.transform,
- pd.DataFrame(
- {
- "day": ["2019-07-01", "2019-08-02", "2019-06-12"],
- "departement": ["Aisne", "Cantal", "Savoie"],
- "fwi_mean": [13.3, 0.9, 2.5],
- "ffmc_max": [23, 45.3, 109.0],
- }
- ),
- )
-
- # pylint: disable=R0201
- def test_feature_selector(self):
- fs = FeatureSelector(
- exclude=["date", "department"], method="pearson", threshold=0.15
- )
- df = pd.DataFrame(
- {
- "date": [
- np.datetime64("2019-07-01"),
- np.datetime64("2019-07-04"),
- np.datetime64("2019-07-06"),
- np.datetime64("2019-07-07"),
- np.datetime64("2019-07-08"),
- ],
- "departement": ["Cantal", "Cantal", "Cantal", "Cantal", "Cantal"],
- "str_mean": [2, 3, 4, 0, 0],
- "ffmc_min": [0, 0, 0, 0, 0],
- "isi_mean": [3, 0, 1, 4, 5],
- "fires": [1, 1, 1, 0, 0],
- }
- )
-
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- fs.fit(X, y)
- X = fs.transform(X)
-
- res = pd.DataFrame(
- {
- "str_mean": [2, 3, 4, 0, 0],
- "isi_mean": [3, 0, 1, 4, 5],
- }
- )
-
- assert_frame_equal(res, X)
-
- # pylint: disable=R0201
- def test_feature_subsetter(self):
- fs = FeatureSubsetter(columns=["date", "departement", "str_mean"])
- df = pd.DataFrame(
- {
- "date": [
- np.datetime64("2019-07-01"),
- np.datetime64("2019-07-04"),
- np.datetime64("2019-07-06"),
- np.datetime64("2019-07-07"),
- np.datetime64("2019-07-08"),
- ],
- "departement": ["Cantal", "Cantal", "Cantal", "Cantal", "Cantal"],
- "str_mean": [2, 3, 4, 0, 0],
- "ffmc_min": [0, 0, 0, 0, 0],
- "isi_mean": [3, 0, 1, 4, 5],
- "fires": [1, 1, 1, 0, 0],
- }
- )
-
- X = df.drop(columns=["fires"])
- y = df["fires"]
-
- fs.fit(X, y)
- X = fs.transform(X)
-
- res = pd.DataFrame(
- {
- "date": [
- np.datetime64("2019-07-01"),
- np.datetime64("2019-07-04"),
- np.datetime64("2019-07-06"),
- np.datetime64("2019-07-07"),
- np.datetime64("2019-07-08"),
- ],
- "departement": ["Cantal", "Cantal", "Cantal", "Cantal", "Cantal"],
- "str_mean": [2, 3, 4, 0, 0],
- }
- )
-
- assert_frame_equal(res, X)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_models_utils.py b/test/test_models_utils.py
deleted file mode 100644
index a524e99..0000000
--- a/test/test_models_utils.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import unittest
-
-import numpy as np
-from pyro_risks.models import check_xy, check_x, discretizer
-
-
-class UtilsTester(unittest.TestCase):
- def test_check_xy(self):
- self.assertRaises(
- TypeError, check_xy, np.array([[0, 0, 0], [0, 0, 0]]), np.array([0, 1])
- )
-
- def test_check_x(self):
- self.assertRaises(
- TypeError, check_x, np.array([[0, 0, 0], [0, 0, 0]]), np.array([0, 1])
- )
-
- def test_discretizer(self):
- self.assertEqual(discretizer(5), 1)
- self.assertEqual(discretizer(0), 0)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_predict.py b/test/test_predict.py
deleted file mode 100644
index 8d342da..0000000
--- a/test/test_predict.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-from pyro_risks.pipeline import PyroRisk
-from pyro_risks import config as cfg
-
-import pandas as pd
-
-import requests
-import imblearn
-import unittest
-import tempfile
-import glob
-import os
-
-
-class PredictTester(unittest.TestCase):
- def test_pyrorisk(self):
- pyrorisk_rf = PyroRisk()
- pyrorisk_xgb = PyroRisk(model="XGBOOST")
- self.assertEqual(pyrorisk_rf.model, "RF")
- self.assertEqual(pyrorisk_xgb.model, "XGBOOST")
- self.assertEqual(pyrorisk_rf.model_path, cfg.RFMODEL_ERA5T_PATH)
- self.assertEqual(pyrorisk_xgb.model_path, cfg.XGBMODEL_ERA5T_PATH)
- self.assertEqual(pyrorisk_rf.predictions_registry, cfg.PREDICTIONS_REGISTRY)
- self.assertEqual(pyrorisk_xgb.predictions_registry, cfg.PREDICTIONS_REGISTRY)
- with self.assertRaises(ValueError):
- PyroRisk(model="`Mock`")
-
- def test_get_pipeline(self):
- pyrorisk = PyroRisk()
- with tempfile.TemporaryDirectory() as dir_destination:
- destination = f"{dir_destination}/RF.joblib"
- pyrorisk.get_pipeline(destination=destination)
- files = glob.glob(dir_destination + "/*")
- self.assertTrue(any(["RF.joblib" in file for file in files]))
-
- def test_get_inputs(self):
- pyrorisk = PyroRisk()
- country = "France"
- day = "2020-05-05"
- with tempfile.TemporaryDirectory() as dir_destination:
- pyrorisk.get_inputs(
- day=day, country=country, dir_destination=dir_destination
- )
- files = glob.glob(dir_destination + "/*")
- self.assertTrue(
- any([f"inputs_{country}_{day}.csv" in file for file in files])
- )
-
- def test_load_pipeline(self):
- pyrorisk = PyroRisk()
- with tempfile.TemporaryDirectory() as dir_path:
- path = dir_path + "/RF.joblib"
- pyrorisk.load_pipeline(path=path)
- files = glob.glob(dir_path + "/*")
- self.assertTrue(isinstance(pyrorisk.pipeline, imblearn.pipeline.Pipeline))
- self.assertTrue(any(["RF.joblib" in file for file in files]))
- pyrorisk.pipeline = None
- pyrorisk.load_pipeline(path=path)
- self.assertTrue(isinstance(pyrorisk.pipeline, imblearn.pipeline.Pipeline))
-
- def test_load_inputs(self):
- pyrorisk = PyroRisk()
- country = "France"
- day = "2020-05-05"
- with tempfile.TemporaryDirectory() as dir_path:
- pyrorisk.load_inputs(day=day, country=country, dir_path=dir_path)
- files = glob.glob(dir_path + "/*")
- self.assertTrue(isinstance(pyrorisk.inputs, pd.DataFrame))
- self.assertTrue(
- any([f"inputs_{country}_{day}.csv" in file for file in files])
- )
- pyrorisk.inputs = None
- pyrorisk.load_inputs(day=day, country=country, dir_path=dir_path)
- self.assertTrue(isinstance(pyrorisk.inputs, pd.DataFrame))
-
- def test_predict(self):
- pyrorisk_rf = PyroRisk()
- pyrorisk_xgb = PyroRisk(model="XGBOOST")
- country = "France"
- day = "2020-05-05"
- inputs_fname = "inputs_France_2020-05-05.csv"
- rf_pipeline_fname = "RF.joblib"
- xgb_pipeline_fname = "XGBOOST.joblib"
- mock_inputs = requests.get(
- url="https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data/inputs_France_2020-05-05.csv"
- )
- mock_rf_pipeline = requests.get(
- url="https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data/RF.joblib"
- )
- mock_xgb_pipeline = requests.get(
- url="https://github.com/pyronear/pyro-risks/releases/download/v0.1.0-data/RF.joblib"
- )
-
- with tempfile.TemporaryDirectory() as dir_destination:
- with open(os.path.join(dir_destination, inputs_fname), "wb") as inputs:
- inputs.write(mock_inputs.content)
-
- with open(
- os.path.join(dir_destination, rf_pipeline_fname), "wb"
- ) as pipeline:
- pipeline.write(mock_rf_pipeline.content)
- with open(
- os.path.join(dir_destination, xgb_pipeline_fname), "wb"
- ) as pipeline:
- pipeline.write(mock_xgb_pipeline.content)
- pyrorisk_rf.predict(
- day=day, country=country, dir_destination=dir_destination
- )
- pyrorisk_xgb.predict(
- day=day, country=country, dir_destination=dir_destination
- )
- files = glob.glob(dir_destination + "/*")
- self.assertTrue(
- any(
- [
- f"{pyrorisk_rf.model}_predictions_{country}_{day}.joblib"
- in file
- for file in files
- ]
- )
- )
- self.assertTrue(
- any(
- [
- f"{pyrorisk_xgb.model}_predictions_{country}_{day}.joblib"
- in file
- for file in files
- ]
- )
- )
-
- def test_get_predictions(self):
- pyrorisk = PyroRisk()
- country = "France"
- day = "2020-05-05"
- with tempfile.TemporaryDirectory() as destination:
- pyrorisk.get_predictions(day=day, dir_destination=destination)
- files = glob.glob(destination + "/*")
- self.assertTrue(
- any(
- [
- f"{pyrorisk.model}_predictions_{country}_{day}.joblib" in file
- for file in files
- ]
- )
- )
-
- def test_expose_predictions(self):
- pyrorisk = PyroRisk()
- day = "2020-05-05"
- with tempfile.TemporaryDirectory() as destination:
- predictions_dict = pyrorisk.expose_predictions(
- day=day, dir_destination=destination
- )
- predictions_load_dict = pyrorisk.expose_predictions(
- day=day, dir_destination=destination
- )
-
- self.assertTrue(isinstance(predictions_dict, dict))
- self.assertTrue(isinstance(predictions_load_dict, dict))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_train.py b/test/test_train.py
deleted file mode 100644
index 239930e..0000000
--- a/test/test_train.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-import unittest
-from collections import namedtuple
-import tempfile
-import glob
-
-
-import numpy as np
-import pandas as pd
-import pyro_risks.config as cfg
-
-from datetime import datetime
-from imblearn.pipeline import Pipeline
-from sklearn.dummy import DummyClassifier
-from pyro_risks.models import xgb_pipeline, rf_pipeline
-from pyro_risks.pipeline import (
- calibrate_pipeline,
- save_pipeline,
- train_pipeline,
- load_dataset,
-)
-
-
-class TrainTester(unittest.TestCase):
- def test_calibrate_pipeline(self):
- y_true = np.array([0, 0, 1, 1])
- y_scores = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
- optimal_threshold = calibrate_pipeline(y_true, y_scores)
- self.assertEqual(optimal_threshold, 0.35)
-
- def test_save_pipeline(self):
- y_true = np.array([0, 0, 1, 1])
- y_scores = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
- optimal_threshold = calibrate_pipeline(y_true, y_scores)
- model_pattern = "/*.joblib"
- html_pattern = "/*.html"
- registry = "/.model_registry"
-
- with tempfile.TemporaryDirectory() as destination:
- save_pipeline(
- pipeline=xgb_pipeline,
- model="RF",
- optimal_threshold=optimal_threshold,
- destination=destination,
- ignore_html=True,
- )
- save_pipeline(
- pipeline=rf_pipeline,
- model="RF",
- optimal_threshold=optimal_threshold,
- destination=destination,
- ignore_html=False,
- )
- model_files = glob.glob(destination + model_pattern)
- html_files = glob.glob(destination + html_pattern)
- self.assertTrue(any(["RF" in file for file in model_files]))
- self.assertTrue(any(["RF" in file for file in html_files]))
-
- with tempfile.TemporaryDirectory() as destination:
- save_pipeline(
- pipeline=xgb_pipeline,
- model="XGBOOST",
- optimal_threshold=optimal_threshold,
- destination=destination + registry,
- ignore_html=True,
- )
- save_pipeline(
- pipeline=rf_pipeline,
- model="XGBOOST",
- optimal_threshold=optimal_threshold,
- destination=destination + registry,
- ignore_html=False,
- )
- model_files = glob.glob(destination + registry + model_pattern)
- html_files = glob.glob(destination + registry + html_pattern)
- self.assertTrue(any(["XGBOOST" in file for file in model_files]))
- self.assertTrue(any(["XGBOOST" in file for file in html_files]))
-
- def test_train_pipeline(self):
- X, y = load_dataset()
- pattern = "/*.joblib"
-
- dummy_pipeline = Pipeline(
- [("dummy_classifier", DummyClassifier(strategy="constant", constant=0))]
- )
- with tempfile.TemporaryDirectory() as destination:
- train_pipeline(
- X=X,
- y=y,
- model="XGBOOST",
- destination=destination,
- ignore_prints=True,
- ignore_html=True,
- )
- train_pipeline(
- X=X,
- y=y,
- model="RF",
- destination=destination,
- ignore_prints=True,
- ignore_html=True,
- )
- train_pipeline(
- X=X,
- y=y,
- model="DUMMY",
- pipeline=dummy_pipeline,
- destination=destination,
- ignore_prints=True,
- ignore_html=True,
- )
- files = glob.glob(destination + pattern)
- self.assertTrue(any(["RF" in file for file in files]))
- self.assertTrue(any(["XGBOOST" in file for file in files]))
- self.assertTrue(any(["DUMMY" in file for file in files]))
-
-
-if __name__ == "__main__":
- unittest.main()
From 82b70d2e732d1feec56abbdd7fa7150a6bb6d21e Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:33:58 +0200
Subject: [PATCH 08/41] refactor: Rename package
---
{pyro_risks => pyrorisks}/__init__.py | 0
{pyro_risks => pyrorisks}/platform_fwi/get_fwi_effis_score.py | 0
{pyro_risks => pyrorisks}/platform_fwi/main.py | 0
{pyro_risks => pyrorisks}/utils/__init__.py | 0
{pyro_risks => pyrorisks}/utils/fwi_helpers.py | 0
{pyro_risks => pyrorisks}/utils/s3.py | 0
{pyro_risks => pyrorisks}/version.py | 0
7 files changed, 0 insertions(+), 0 deletions(-)
rename {pyro_risks => pyrorisks}/__init__.py (100%)
rename {pyro_risks => pyrorisks}/platform_fwi/get_fwi_effis_score.py (100%)
rename {pyro_risks => pyrorisks}/platform_fwi/main.py (100%)
rename {pyro_risks => pyrorisks}/utils/__init__.py (100%)
rename {pyro_risks => pyrorisks}/utils/fwi_helpers.py (100%)
rename {pyro_risks => pyrorisks}/utils/s3.py (100%)
rename {pyro_risks => pyrorisks}/version.py (100%)
diff --git a/pyro_risks/__init__.py b/pyrorisks/__init__.py
similarity index 100%
rename from pyro_risks/__init__.py
rename to pyrorisks/__init__.py
diff --git a/pyro_risks/platform_fwi/get_fwi_effis_score.py b/pyrorisks/platform_fwi/get_fwi_effis_score.py
similarity index 100%
rename from pyro_risks/platform_fwi/get_fwi_effis_score.py
rename to pyrorisks/platform_fwi/get_fwi_effis_score.py
diff --git a/pyro_risks/platform_fwi/main.py b/pyrorisks/platform_fwi/main.py
similarity index 100%
rename from pyro_risks/platform_fwi/main.py
rename to pyrorisks/platform_fwi/main.py
diff --git a/pyro_risks/utils/__init__.py b/pyrorisks/utils/__init__.py
similarity index 100%
rename from pyro_risks/utils/__init__.py
rename to pyrorisks/utils/__init__.py
diff --git a/pyro_risks/utils/fwi_helpers.py b/pyrorisks/utils/fwi_helpers.py
similarity index 100%
rename from pyro_risks/utils/fwi_helpers.py
rename to pyrorisks/utils/fwi_helpers.py
diff --git a/pyro_risks/utils/s3.py b/pyrorisks/utils/s3.py
similarity index 100%
rename from pyro_risks/utils/s3.py
rename to pyrorisks/utils/s3.py
diff --git a/pyro_risks/version.py b/pyrorisks/version.py
similarity index 100%
rename from pyro_risks/version.py
rename to pyrorisks/version.py
From 7591d5854be0850aa3fad9bc8bfea064efbcc4c5 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 17:21:08 +0200
Subject: [PATCH 09/41] refactor: Add setup.py
---
setup.py | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 70 insertions(+)
create mode 100644 setup.py
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..3d61bc4
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2021-2022, Pyronear.
+
+# This program is licensed under the Apache License version 2.
+# See LICENSE or go to for full license details.
+
+#!usr/bin/python
+
+"""
+Package installation setup
+"""
+
+import os
+import subprocess
+from setuptools import setup, find_packages
+
+PACKAGE_NAME = "pyro_risks"
+VERSION = "0.0.1"
+
+
+with open("README.md") as f:
+ readme = f.read()
+
+requirements = [
+ "boto3==1.28.45",
+ "botocore==1.31.45",
+ "click==8.1.7",
+ "geopandas==0.13.2",
+ "pandas==2.1.0",
+ "python-dotenv==1.0.0",
+ "rasterio==1.3.9",
+ "requests==2.31.0",
+ "numpy==1.26.4",
+]
+
+setup(
+ name=PACKAGE_NAME,
+ version=VERSION,
+ author="Pyronear Contributors",
+ description="Pre-processing pipelines and models for wildfire forecasting and monitoring",
+ long_description=readme,
+ long_description_content_type="text/markdown",
+ url="https://github.com/pyronear/pyro-risks",
+ download_url="https://github.com/pyronear/pyro-risks/tags",
+ license="GPLv3",
+ entry_points={"console_scripts": ["pyrorisks = pyro_risks.main:main"]},
+ classifiers=[
+ "Development Status :: 2 - Pre-Alpha",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Mathematics",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Topic :: Software Development",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords=["data science", "time series", "machine learning"],
+ packages=find_packages(exclude=("test",)),
+ zip_safe=True,
+ python_requires=">=3.6.0",
+ include_package_data=True,
+ install_requires=requirements,
+ package_data={"": ["LICENSE"]},
+)
From 5cb83260941b49fe64317d05386f7c1516f78869 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 18:09:30 +0200
Subject: [PATCH 10/41] refactor: Patch package version
---
docs/source/conf.py | 9 ++++-----
setup.py | 4 ++--
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 44e7639..7f2a3c1 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -20,7 +20,7 @@
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
-import pyro_risks
+import pyrorisks
import sphinx_rtd_theme
from datetime import datetime
@@ -29,17 +29,16 @@
from sphinx import addnodes
-sys.path.insert(0, os.path.abspath("../../pyro_risks"))
# -- Project information -----------------------------------------------------
master_doc = "index"
-project = "pyro_risks"
+project = "pyrorisks"
copyright = f"{datetime.now().year}, Pyronear Contributors"
author = "Pyronear Contributors"
# The full version, including alpha/beta/rc tags
-version = pyro_risks.__version__
-release = pyro_risks.__version__ + "-git"
+version = "0.1.0"
+release = "0.1.0" + "-git"
# -- General configuration ---------------------------------------------------
diff --git a/setup.py b/setup.py
index 3d61bc4..e13f1d7 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
import subprocess
from setuptools import setup, find_packages
-PACKAGE_NAME = "pyro_risks"
+PACKAGE_NAME = "pyrorisks"
VERSION = "0.0.1"
@@ -30,6 +30,7 @@
"rasterio==1.3.9",
"requests==2.31.0",
"numpy==1.26.4",
+ "sphinx_rtd_theme",
]
setup(
@@ -42,7 +43,6 @@
url="https://github.com/pyronear/pyro-risks",
download_url="https://github.com/pyronear/pyro-risks/tags",
license="GPLv3",
- entry_points={"console_scripts": ["pyrorisks = pyro_risks.main:main"]},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
From 6fb76a7051dce1d46b33d1fa2b65d9038071d271 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 18:10:37 +0200
Subject: [PATCH 11/41] refactor: Patch package version
---
docs/requirements.txt | 1 +
pyrorisks/version.py | 1 -
2 files changed, 1 insertion(+), 1 deletion(-)
delete mode 100644 pyrorisks/version.py
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 6459c56..0933295 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -2,3 +2,4 @@ sphinx
sphinx-rtd-theme==0.4.3
myst-parser==0.12.10
sphinx-autobuild==2020.9.1
+Jinja2<3.1
diff --git a/pyrorisks/version.py b/pyrorisks/version.py
deleted file mode 100644
index 8f02035..0000000
--- a/pyrorisks/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0a0"
From baeb1418e2b0a03677e3c0a409c20de72aae68db Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 18:11:06 +0200
Subject: [PATCH 12/41] refactor: Deprecate module documentation
---
docs/source/index.rst | 13 +------
docs/source/modules/datasets/bdiff.rst | 7 ----
.../modules/datasets/copernicus_era.rst | 7 ----
.../modules/datasets/copernicus_fwi.rst | 7 ----
docs/source/modules/datasets/datasets.rst | 34 -------------------
docs/source/modules/datasets/masks.rst | 7 ----
docs/source/modules/datasets/modules.rst | 18 ----------
docs/source/modules/datasets/nasa_firms.rst | 7 ----
docs/source/modules/datasets/noaa_ncei.rst | 7 ----
docs/source/modules/models/modules.rst | 12 -------
docs/source/modules/models/pipelines.rst | 10 ------
docs/source/modules/models/transformers.rst | 11 ------
docs/source/modules/models/utils.rst | 7 ----
docs/source/modules/pipeline/evaluate.rst | 7 ----
docs/source/modules/pipeline/load.rst | 7 ----
docs/source/modules/pipeline/modules.rst | 13 -------
docs/source/modules/pipeline/predict.rst | 7 ----
docs/source/modules/pipeline/train.rst | 7 ----
18 files changed, 1 insertion(+), 187 deletions(-)
delete mode 100644 docs/source/modules/datasets/bdiff.rst
delete mode 100644 docs/source/modules/datasets/copernicus_era.rst
delete mode 100644 docs/source/modules/datasets/copernicus_fwi.rst
delete mode 100644 docs/source/modules/datasets/datasets.rst
delete mode 100644 docs/source/modules/datasets/masks.rst
delete mode 100644 docs/source/modules/datasets/modules.rst
delete mode 100644 docs/source/modules/datasets/nasa_firms.rst
delete mode 100644 docs/source/modules/datasets/noaa_ncei.rst
delete mode 100644 docs/source/modules/models/modules.rst
delete mode 100644 docs/source/modules/models/pipelines.rst
delete mode 100644 docs/source/modules/models/transformers.rst
delete mode 100644 docs/source/modules/models/utils.rst
delete mode 100644 docs/source/modules/pipeline/evaluate.rst
delete mode 100644 docs/source/modules/pipeline/load.rst
delete mode 100644 docs/source/modules/pipeline/modules.rst
delete mode 100644 docs/source/modules/pipeline/predict.rst
delete mode 100644 docs/source/modules/pipeline/train.rst
diff --git a/docs/source/index.rst b/docs/source/index.rst
index dfff1bf..9efb834 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -17,7 +17,7 @@ Pyronear Wildfire Risk Forecasting Documentation
The pyro-risks project aims at providing the pyronear-platform with a machine learning based wildfire forecasting capability.
-The :mod:`pyro_risks` package aggregates pre-processing pipelines and models for wildfire forecasting.
+The :mod:`pyrorisks` package aggregates pre-processing pipelines and models for wildfire forecasting.
.. toctree::
@@ -35,17 +35,6 @@ The :mod:`pyro_risks` package aggregates pre-processing pipelines and models for
overview/datasets/C3S-ECMWF_ERA5T
overview/datasets/C3S-ECMWF_ERA5LAND
-.. toctree::
- :maxdepth: 1
- :caption: Pyro Risks Package References
-
- modules/datasets/modules
- modules/models/modules
- modules/pipeline/modules
-
-.. automodule:: pyro_risks
- :members:
-
.. toctree::
:maxdepth: 1
diff --git a/docs/source/modules/datasets/bdiff.rst b/docs/source/modules/datasets/bdiff.rst
deleted file mode 100644
index 79cdd2e..0000000
--- a/docs/source/modules/datasets/bdiff.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-BDIFF module
-=============
-
-.. automodule:: pyro_risks.datasets.wildfires
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/source/modules/datasets/copernicus_era.rst b/docs/source/modules/datasets/copernicus_era.rst
deleted file mode 100644
index 5c4b16d..0000000
--- a/docs/source/modules/datasets/copernicus_era.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Copernicus - ERA5 module
-=========================
-
-.. automodule:: pyro_risks.datasets.ERA5
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/datasets/copernicus_fwi.rst b/docs/source/modules/datasets/copernicus_fwi.rst
deleted file mode 100644
index db02abe..0000000
--- a/docs/source/modules/datasets/copernicus_fwi.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Copernicus - FWI module
-========================
-
-.. automodule:: pyro_risks.datasets.fwi
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/datasets/datasets.rst b/docs/source/modules/datasets/datasets.rst
deleted file mode 100644
index 655d882..0000000
--- a/docs/source/modules/datasets/datasets.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-
-pyro\_risks.datasets.datasets\_mergers module
----------------------------------------------
-
-.. automodule:: pyro_risks.datasets.datasets_mergers
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-pyro\_risks.datasets.era\_fwi\_viirs module
--------------------------------------------
-
-.. automodule:: pyro_risks.datasets.era_fwi_viirs
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-pyro\_risks.datasets.queries\_api module
-----------------------------------------
-
-.. automodule:: pyro_risks.datasets.queries_api
- :members:
- :undoc-members:
- :show-inheritance:
-
-pyro\_risks.datasets.utils module
----------------------------------
-
-.. automodule:: pyro_risks.datasets.utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/source/modules/datasets/masks.rst b/docs/source/modules/datasets/masks.rst
deleted file mode 100644
index ace1bbf..0000000
--- a/docs/source/modules/datasets/masks.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Masks module
-============
-
-.. automodule:: pyro_risks.datasets.masks
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/datasets/modules.rst b/docs/source/modules/datasets/modules.rst
deleted file mode 100644
index 3273892..0000000
--- a/docs/source/modules/datasets/modules.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-pyro\_risks.datasets
-====================
-
-The datasets subpackage contains modules defining functions for downloading and processing publicly available weather and fire datasets.
-
-The following modules are available:
-
-
-.. toctree::
- :maxdepth: 4
-
- masks
- nasa_firms
- copernicus_fwi
- copernicus_era
- noaa_ncei
- bdiff
- datasets
diff --git a/docs/source/modules/datasets/nasa_firms.rst b/docs/source/modules/datasets/nasa_firms.rst
deleted file mode 100644
index d669c95..0000000
--- a/docs/source/modules/datasets/nasa_firms.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-NASA FIRMS - Active Fire module
-===============================
-
-.. automodule:: pyro_risks.datasets.nasa_wildfires
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/datasets/noaa_ncei.rst b/docs/source/modules/datasets/noaa_ncei.rst
deleted file mode 100644
index a633a52..0000000
--- a/docs/source/modules/datasets/noaa_ncei.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-NOAA NCEI module
-=================
-
-.. automodule:: pyro_risks.datasets.weather
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/models/modules.rst b/docs/source/modules/models/modules.rst
deleted file mode 100644
index 29d5929..0000000
--- a/docs/source/modules/models/modules.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-pyro\_risks.models
-==================
-
-The models subpackage contains modules defining steps of the traininig and scoring sklearn pipelines.
-The following modules are available:
-
-.. toctree::
- :maxdepth: 4
-
- pipelines
- transformers
- utils
\ No newline at end of file
diff --git a/docs/source/modules/models/pipelines.rst b/docs/source/modules/models/pipelines.rst
deleted file mode 100644
index b66c7a5..0000000
--- a/docs/source/modules/models/pipelines.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Pipelines module
-================
-
-The pipelines module contains the definitions of our scoring pipelines. The risk scoring pipelines are implemented using the `imbalanced-learn `_
-Pipeline allowing for defining sequences of **resampling, preprocessing and modeling steps as one estimators**. See scikit-learn
-`Pipelines and composite estimators `_ for more information.
-
-
-.. literalinclude:: ../../../../pyro_risks/models/pipelines.py
- :language: PYTHON
\ No newline at end of file
diff --git a/docs/source/modules/models/transformers.rst b/docs/source/modules/models/transformers.rst
deleted file mode 100644
index acd461c..0000000
--- a/docs/source/modules/models/transformers.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Transformers module
-===================
-
-The pipelines module contains the definitions of our scikit-learn compliant preprocessing steps i.e. transformers.
-Transformers are estimators supporting transform and/or fit_transform methods see `Dataset transformations `_ ,
-`scikit-lego `_ and `feature-engine `_ for collections of transformers.
-
-.. automodule:: pyro_risks.models.transformers
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/models/utils.rst b/docs/source/modules/models/utils.rst
deleted file mode 100644
index 31c9fb5..0000000
--- a/docs/source/modules/models/utils.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Utils module
-============
-
-.. automodule:: pyro_risks.models.utils
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/pipeline/evaluate.rst b/docs/source/modules/pipeline/evaluate.rst
deleted file mode 100644
index b03a7ab..0000000
--- a/docs/source/modules/pipeline/evaluate.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Evaluate module
-===============
-
-.. automodule:: pyro_risks.pipeline.evaluate
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/pipeline/load.rst b/docs/source/modules/pipeline/load.rst
deleted file mode 100644
index 0de015b..0000000
--- a/docs/source/modules/pipeline/load.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Load module
-===============
-
-.. automodule:: pyro_risks.pipeline.load
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
diff --git a/docs/source/modules/pipeline/modules.rst b/docs/source/modules/pipeline/modules.rst
deleted file mode 100644
index d787d2e..0000000
--- a/docs/source/modules/pipeline/modules.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-pyro\_risks.pipeline
-====================
-
-The pipeline subpackage contains modules defining the helper functions for each stage of the model lifecycle.
-The following modules are available:
-
-.. toctree::
- :maxdepth: 4
-
- load
- train
- evaluate
- predict
\ No newline at end of file
diff --git a/docs/source/modules/pipeline/predict.rst b/docs/source/modules/pipeline/predict.rst
deleted file mode 100644
index e967c71..0000000
--- a/docs/source/modules/pipeline/predict.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Predict module
-==============
-
-.. automodule:: pyro_risks.pipeline.predict
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/source/modules/pipeline/train.rst b/docs/source/modules/pipeline/train.rst
deleted file mode 100644
index 91be723..0000000
--- a/docs/source/modules/pipeline/train.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Train module
-============
-
-.. automodule:: pyro_risks.pipeline.train
- :members:
- :undoc-members:
- :show-inheritance:
\ No newline at end of file
From dd927147e8f5e033e5d2c278e23cf8cf490b701b Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 18:24:36 +0200
Subject: [PATCH 13/41] chore: Add Mock predictor
---
app/api/inference.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/app/api/inference.py b/app/api/inference.py
index 990153f..a813252 100644
--- a/app/api/inference.py
+++ b/app/api/inference.py
@@ -3,10 +3,13 @@
# This program is licensed under the Apache License version 2.
# See LICENSE or go to for full license details.
-from pyro_risks.models.predict import PyroRisk
-
__all__ = ["predictor"]
-predictor = PyroRisk(which="RF")
+class Mock:
+ def predict(self, date):
+ return {"01": 0.5, "02": 0.5}
+
+
+predictor = Mock()
From 2abe79c21e72e95938613aaf53e050aa8722df90 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 18:25:44 +0200
Subject: [PATCH 14/41] fix: Update dockerfile package name
---
Dockerfile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Dockerfile b/Dockerfile
index a4d914b..0d6dc9f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,7 +12,7 @@ COPY ./requirements.txt requirements.txt
COPY ./requirements-app.txt /usr/src/app/requirements-app.txt
COPY ./setup.py setup.py
COPY ./README.md README.md
-COPY ./pyro_risks pyro_risks
+COPY ./pyrorisks pyrorisks
# install dependencies
RUN apt-get update && \
From 58719d1e2bcb9f260aabad7b203d339f03f84e26 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 23:40:13 +0200
Subject: [PATCH 15/41] refactor: Remove setup.py
---
requirements-app.txt | 3 --
requirements.txt | 24 ---------------
setup.py | 70 --------------------------------------------
3 files changed, 97 deletions(-)
delete mode 100644 requirements-app.txt
delete mode 100644 requirements.txt
delete mode 100644 setup.py
diff --git a/requirements-app.txt b/requirements-app.txt
deleted file mode 100644
index 281e37b..0000000
--- a/requirements-app.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-fastapi==0.61.1
-uvicorn>=0.11.1
-pyro_risks
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index ba82c03..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-pandas>=1.1.4
-geopandas>=0.8.1
-Rtree>=0.9.4
-Shapely>=1.7.1
-netCDF4>=1.5.4
-requests>=2.24.0
-xlrd==1.2.0
-boto3==1.26.106
-
-cdsapi
-
-numpy==1.26.4
-xarray>=0.16.1
-scipy>=1.5.4
-
-scikit-learn>=0.23.2
-imbalanced-learn>=0.7.0
-xgboost==1.2.1
-cdsapi==0.4.0
-python-dotenv>=0.15.0
-
-plot-metric==0.0.6
-dvc>=2.0.5
-dvc[gdrive]>=2.0.5
diff --git a/setup.py b/setup.py
deleted file mode 100644
index e13f1d7..0000000
--- a/setup.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2021-2022, Pyronear.
-
-# This program is licensed under the Apache License version 2.
-# See LICENSE or go to for full license details.
-
-#!usr/bin/python
-
-"""
-Package installation setup
-"""
-
-import os
-import subprocess
-from setuptools import setup, find_packages
-
-PACKAGE_NAME = "pyrorisks"
-VERSION = "0.0.1"
-
-
-with open("README.md") as f:
- readme = f.read()
-
-requirements = [
- "boto3==1.28.45",
- "botocore==1.31.45",
- "click==8.1.7",
- "geopandas==0.13.2",
- "pandas==2.1.0",
- "python-dotenv==1.0.0",
- "rasterio==1.3.9",
- "requests==2.31.0",
- "numpy==1.26.4",
- "sphinx_rtd_theme",
-]
-
-setup(
- name=PACKAGE_NAME,
- version=VERSION,
- author="Pyronear Contributors",
- description="Pre-processing pipelines and models for wildfire forecasting and monitoring",
- long_description=readme,
- long_description_content_type="text/markdown",
- url="https://github.com/pyronear/pyro-risks",
- download_url="https://github.com/pyronear/pyro-risks/tags",
- license="GPLv3",
- classifiers=[
- "Development Status :: 2 - Pre-Alpha",
- "Intended Audience :: Developers",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
- "Natural Language :: English",
- "Operating System :: OS Independent",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Mathematics",
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
- "Topic :: Software Development",
- "Topic :: Software Development :: Libraries",
- "Topic :: Software Development :: Libraries :: Python Modules",
- ],
- keywords=["data science", "time series", "machine learning"],
- packages=find_packages(exclude=("test",)),
- zip_safe=True,
- python_requires=">=3.6.0",
- include_package_data=True,
- install_requires=requirements,
- package_data={"": ["LICENSE"]},
-)
From 34265a99bc1e2bcc1db5ed23c970fc077c7dd26f Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 2 Jul 2024 23:41:55 +0200
Subject: [PATCH 16/41] refactor: Remove config tooling
---
.coveragerc | 2 --
.flake8 | 4 ---
mypy.ini | 76 -----------------------------------------------------
3 files changed, 82 deletions(-)
delete mode 100644 .coveragerc
delete mode 100644 .flake8
delete mode 100644 mypy.ini
diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index a044130..0000000
--- a/.coveragerc
+++ /dev/null
@@ -1,2 +0,0 @@
-[run]
-source = pyro_risks
\ No newline at end of file
diff --git a/.flake8 b/.flake8
deleted file mode 100644
index 0385d47..0000000
--- a/.flake8
+++ /dev/null
@@ -1,4 +0,0 @@
-[flake8]
-max-line-length = 120
-ignore = F401, E402, E265, F403, W503, W504, F821, W605
-exclude = .git, venv*, docs, build
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 18a36f4..0000000
--- a/mypy.ini
+++ /dev/null
@@ -1,76 +0,0 @@
-[mypy]
-files = pyro_risks/*.py
-show_error_codes = True
-pretty = True
-
-[mypy-dotenv]
-ignore_missing_imports = True
-
-[mypy-xarray]
-ignore_missing_imports = True
-
-[mypy-pandas]
-ignore_missing_imports = True
-
-[mypy-sklearn.ensemble]
-ignore_missing_imports = True
-
-[mypy-sklearn.model_selection]
-ignore_missing_imports = True
-
-[mypy-sklearn.metrics]
-ignore_missing_imports = True
-
-[mypy-sklearn.base]
-ignore_missing_imports = True
-
-[mypy-sklearn.impute]
-ignore_missing_imports = True
-
-[mypy-sklearn.utils]
-ignore_missing_imports = True
-
-[mypy-xgboost]
-ignore_missing_imports = True
-
-[mypy-numpy]
-ignore_missing_imports = True
-
-[mypy-geopandas]
-ignore_missing_imports = True
-
-[mypy-cdsapi]
-ignore_missing_imports = True
-
-[mypy-urllib3]
-ignore_missing_imports = True
-
-[mypy-joblib]
-ignore_missing_imports = True
-
-[mypy-imblearn]
-ignore_missing_imports = True
-
-[mypy-imblearn.pipeline]
-ignore_missing_imports = True
-
-[mypy-matplotlib]
-ignore_missing_imports = True
-
-[mypy-matplotlib.pyplot]
-ignore_missing_imports = True
-
-[mypy-plot_metric.functions]
-ignore_missing_imports = True
-
-[mypy-shapely]
-ignore_missing_imports = True
-
-[mypy-shapely.geometry]
-ignore_missing_imports = True
-
-[mypy-scipy]
-ignore_missing_imports = True
-
-[mypy-netCDF4]
-ignore_missing_imports = True
From b6060f9979bc63d9eae113e6834e59545016d744 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Wed, 3 Jul 2024 00:04:33 +0200
Subject: [PATCH 17/41] refactor: Add poetry
---
poetry.lock | 1304 ++++++++++++++++++++++++++++++++++++++++++++++++
poetry.toml | 2 +
pyproject.toml | 102 ++++
3 files changed, 1408 insertions(+)
create mode 100644 poetry.lock
create mode 100644 poetry.toml
create mode 100644 pyproject.toml
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..cf193cf
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,1304 @@
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
+]
+
+[[package]]
+name = "anyio"
+version = "3.7.1"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"},
+ {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"},
+]
+
+[package.dependencies]
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"]
+test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (<0.22)"]
+
+[[package]]
+name = "black"
+version = "23.12.1"
+description = "The uncompromising code formatter."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"},
+ {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"},
+ {file = "black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0"},
+ {file = "black-23.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3"},
+ {file = "black-23.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba"},
+ {file = "black-23.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b"},
+ {file = "black-23.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59"},
+ {file = "black-23.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50"},
+ {file = "black-23.12.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e"},
+ {file = "black-23.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec"},
+ {file = "black-23.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e"},
+ {file = "black-23.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9"},
+ {file = "black-23.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f"},
+ {file = "black-23.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d"},
+ {file = "black-23.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a"},
+ {file = "black-23.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e"},
+ {file = "black-23.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055"},
+ {file = "black-23.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54"},
+ {file = "black-23.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea"},
+ {file = "black-23.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2"},
+ {file = "black-23.12.1-py3-none-any.whl", hash = "sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e"},
+ {file = "black-23.12.1.tar.gz", hash = "sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+mypy-extensions = ">=0.4.3"
+packaging = ">=22.0"
+pathspec = ">=0.9.0"
+platformdirs = ">=2"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
+jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "boto3"
+version = "1.34.138"
+description = "The AWS SDK for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "boto3-1.34.138-py3-none-any.whl", hash = "sha256:81518aa95fad71279411fb5c94da4b4a554a5d53fc876faca62b7b5c8737f1cb"},
+ {file = "boto3-1.34.138.tar.gz", hash = "sha256:f79c15e33eb7706f197d98d828b193cf0891966682ad3ec5e900f6f9e7362e35"},
+]
+
+[package.dependencies]
+botocore = ">=1.34.138,<1.35.0"
+jmespath = ">=0.7.1,<2.0.0"
+s3transfer = ">=0.10.0,<0.11.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
+
+[[package]]
+name = "botocore"
+version = "1.34.138"
+description = "Low-level, data-driven core of boto 3."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "botocore-1.34.138-py3-none-any.whl", hash = "sha256:84e96a954c39a6f09cae4ea95b2ae582b5ae01b5040c92507b60509c9be5377a"},
+ {file = "botocore-1.34.138.tar.gz", hash = "sha256:f558bbea96c4a4abbaeeedc477dabb00902311ba1ca6327974a6819b9f384920"},
+]
+
+[package.dependencies]
+jmespath = ">=0.7.1,<2.0.0"
+python-dateutil = ">=2.1,<3.0.0"
+urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
+
+[package.extras]
+crt = ["awscrt (==0.20.11)"]
+
+[[package]]
+name = "certifi"
+version = "2024.6.2"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"},
+ {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"},
+]
+
+[[package]]
+name = "cfgv"
+version = "3.4.0"
+description = "Validate configuration and produce human readable error messages."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
+ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "coverage"
+version = "7.5.4"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"},
+ {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"},
+ {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"},
+ {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"},
+ {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"},
+ {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"},
+ {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"},
+ {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"},
+ {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"},
+ {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"},
+ {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"},
+ {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"},
+ {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"},
+ {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"},
+ {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"},
+ {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"},
+ {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"},
+ {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"},
+ {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"},
+ {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"},
+ {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"},
+ {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"},
+ {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"},
+ {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"},
+ {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"},
+ {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"},
+ {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"},
+ {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"},
+ {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"},
+ {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"},
+ {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"},
+ {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"},
+ {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"},
+ {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"},
+ {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"},
+ {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"},
+ {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"},
+ {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"},
+ {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"},
+ {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"},
+ {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"},
+ {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"},
+ {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"},
+ {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"},
+ {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"},
+ {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"},
+ {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"},
+ {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"},
+ {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"},
+ {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"},
+ {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"},
+ {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"},
+]
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "distlib"
+version = "0.3.8"
+description = "Distribution utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"},
+ {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.1"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"},
+ {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "fastapi"
+version = "0.103.2"
+description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "fastapi-0.103.2-py3-none-any.whl", hash = "sha256:3270de872f0fe9ec809d4bd3d4d890c6d5cc7b9611d721d6438f9dacc8c4ef2e"},
+ {file = "fastapi-0.103.2.tar.gz", hash = "sha256:75a11f6bfb8fc4d2bec0bd710c2d5f2829659c0e8c0afd5560fdda6ce25ec653"},
+]
+
+[package.dependencies]
+anyio = ">=3.7.1,<4.0.0"
+pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
+starlette = ">=0.27.0,<0.28.0"
+typing-extensions = ">=4.5.0"
+
+[package.extras]
+all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
+
+[[package]]
+name = "filelock"
+version = "3.15.4"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"},
+ {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"]
+typing = ["typing-extensions (>=4.8)"]
+
+[[package]]
+name = "geopandas"
+version = "1.0.1"
+description = "Geographic pandas extensions"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "geopandas-1.0.1-py3-none-any.whl", hash = "sha256:01e147d9420cc374d26f51fc23716ac307f32b49406e4bd8462c07e82ed1d3d6"},
+ {file = "geopandas-1.0.1.tar.gz", hash = "sha256:b8bf70a5534588205b7a56646e2082fb1de9a03599651b3d80c99ea4c2ca08ab"},
+]
+
+[package.dependencies]
+numpy = ">=1.22"
+packaging = "*"
+pandas = ">=1.4.0"
+pyogrio = ">=0.7.2"
+pyproj = ">=3.3.0"
+shapely = ">=2.0.0"
+
+[package.extras]
+all = ["GeoAlchemy2", "SQLAlchemy (>=1.3)", "folium", "geopy", "mapclassify", "matplotlib (>=3.5.0)", "psycopg-binary (>=3.1.0)", "pyarrow (>=8.0.0)", "xyzservices"]
+dev = ["black", "codecov", "pre-commit", "pytest (>=3.1.0)", "pytest-cov", "pytest-xdist"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "identify"
+version = "2.5.36"
+description = "File identification library for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"},
+ {file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"},
+]
+
+[package.extras]
+license = ["ukkonen"]
+
+[[package]]
+name = "idna"
+version = "3.7"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
+ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "jmespath"
+version = "1.0.1"
+description = "JSON Matching Expressions"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
+ {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
+]
+
+[[package]]
+name = "mypy"
+version = "1.10.1"
+description = "Optional static typing for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"},
+ {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"},
+ {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"},
+ {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"},
+ {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"},
+ {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"},
+ {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"},
+ {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"},
+ {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"},
+ {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"},
+ {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"},
+ {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"},
+ {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"},
+ {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"},
+ {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"},
+ {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"},
+ {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"},
+ {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"},
+ {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"},
+ {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"},
+ {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"},
+ {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"},
+ {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"},
+ {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"},
+ {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"},
+ {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"},
+ {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=1.0.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = ">=4.1.0"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+install-types = ["pip"]
+mypyc = ["setuptools (>=50)"]
+reports = ["lxml"]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
+]
+
+[[package]]
+name = "nodeenv"
+version = "1.9.1"
+description = "Node.js virtual environment builder"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
+ {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
+]
+
+[[package]]
+name = "numpy"
+version = "2.0.0"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"},
+ {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"},
+ {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"},
+ {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"},
+ {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"},
+ {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"},
+ {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"},
+ {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"},
+ {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"},
+ {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"},
+ {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"},
+ {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"},
+ {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"},
+ {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"},
+ {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"},
+ {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"},
+ {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"},
+ {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"},
+ {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"},
+ {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"},
+ {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"},
+ {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"},
+ {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"},
+ {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"},
+ {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"},
+ {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"},
+ {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"},
+ {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"},
+ {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"},
+ {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"},
+ {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"},
+ {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"},
+ {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"},
+ {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"},
+ {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"},
+ {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"},
+ {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"},
+ {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"},
+ {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"},
+ {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"},
+ {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"},
+ {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"},
+ {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"},
+ {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"},
+ {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"},
+]
+
+[[package]]
+name = "packaging"
+version = "24.1"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
+ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+]
+
+[[package]]
+name = "pandas"
+version = "2.2.2"
+description = "Powerful data structures for data analysis, time series, and statistics"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"},
+ {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"},
+ {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"},
+ {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"},
+ {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"},
+ {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"},
+ {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"},
+ {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"},
+ {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"},
+ {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"},
+ {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"},
+ {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"},
+ {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"},
+ {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"},
+ {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"},
+ {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"},
+ {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"},
+ {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"},
+ {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"},
+ {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"},
+ {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"},
+ {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"},
+ {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"},
+ {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"},
+ {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"},
+ {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"},
+ {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"},
+ {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"},
+ {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
+ {version = ">=1.22.4", markers = "python_version < \"3.11\""},
+ {version = ">=1.23.2", markers = "python_version == \"3.11\""},
+]
+python-dateutil = ">=2.8.2"
+pytz = ">=2020.1"
+tzdata = ">=2022.7"
+
+[package.extras]
+all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
+aws = ["s3fs (>=2022.11.0)"]
+clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
+compression = ["zstandard (>=0.19.0)"]
+computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
+consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
+feather = ["pyarrow (>=10.0.1)"]
+fss = ["fsspec (>=2022.11.0)"]
+gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
+hdf5 = ["tables (>=3.8.0)"]
+html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
+mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
+parquet = ["pyarrow (>=10.0.1)"]
+performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
+plot = ["matplotlib (>=3.6.3)"]
+postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
+pyarrow = ["pyarrow (>=10.0.1)"]
+spss = ["pyreadstat (>=1.2.0)"]
+sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
+test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.9.2)"]
+
+[[package]]
+name = "pathspec"
+version = "0.12.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.2.2"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
+ {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
+type = ["mypy (>=1.8)"]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
+ {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pre-commit"
+version = "3.7.1"
+description = "A framework for managing and maintaining multi-language pre-commit hooks."
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pre_commit-3.7.1-py2.py3-none-any.whl", hash = "sha256:fae36fd1d7ad7d6a5a1c0b0d5adb2ed1a3bda5a21bf6c3e5372073d7a11cd4c5"},
+ {file = "pre_commit-3.7.1.tar.gz", hash = "sha256:8ca3ad567bc78a4972a3f1a477e94a79d4597e8140a6e0b651c5e33899c3654a"},
+]
+
+[package.dependencies]
+cfgv = ">=2.0.0"
+identify = ">=1.0.0"
+nodeenv = ">=0.11.1"
+pyyaml = ">=5.1"
+virtualenv = ">=20.10.0"
+
+[[package]]
+name = "pydantic"
+version = "2.8.0"
+description = "Data validation using Python type hints"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"},
+ {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.4.0"
+pydantic-core = "2.20.0"
+typing-extensions = [
+ {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
+ {version = ">=4.6.1", markers = "python_version < \"3.13\""},
+]
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.20.0"
+description = "Core functionality for Pydantic validation and serialization"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"},
+ {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"},
+ {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"},
+ {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"},
+ {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"},
+ {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"},
+ {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"},
+ {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"},
+ {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"},
+ {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"},
+ {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"},
+ {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"},
+ {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"},
+ {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"},
+ {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"},
+ {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"},
+ {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"},
+ {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"},
+ {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"},
+ {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"},
+ {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"},
+ {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+
+[[package]]
+name = "pyogrio"
+version = "0.9.0"
+description = "Vectorized spatial vector file format I/O using GDAL/OGR"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pyogrio-0.9.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:1a495ca4fb77c69595747dd688f8f17bb7d2ea9cd86603aa71c7fc98cc8b4174"},
+ {file = "pyogrio-0.9.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:6dc94a67163218581c7df275223488ac9b31dc582ccd756da607c3338908566c"},
+ {file = "pyogrio-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e38c3c6d37cf2cc969407e4d051dcb507cfd948eb26c7b0840c4f7d7d4a71bd4"},
+ {file = "pyogrio-0.9.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:f47c9b6818cc0f420015b672d5dcc488530a5ee63e5ba35a184957b21ea3922a"},
+ {file = "pyogrio-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb04bd80964428491951766452f0071b0bc37c7d38c45ef02502dbd83e5d74a0"},
+ {file = "pyogrio-0.9.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f5d80eb846be4fc4e642cbedc1ed0c143e8d241653382ecc76a7620bbd2a5c3a"},
+ {file = "pyogrio-0.9.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:2f2ec57ab74785db9c2bf47c0a6731e5175595a13f8253f06fa84136adb310a9"},
+ {file = "pyogrio-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a289584da6df7ca318947301fe0ba9177e7f863f63110e087c80ac5f3658de8"},
+ {file = "pyogrio-0.9.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:13642608a1cd67797ae8b5d792b0518d8ef3eb76506c8232ab5eaa1ea1159dff"},
+ {file = "pyogrio-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:9440466c0211ac81f3417f274da5903f15546b486f76b2f290e74a56aaf0e737"},
+ {file = "pyogrio-0.9.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2e98913fa183f7597c609e774820a149e9329fd2a0f8d33978252fbd00ae87e6"},
+ {file = "pyogrio-0.9.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f8bf193269ea9d347ac3ddada960a59f1ab2e4a5c009be95dc70e6505346b2fc"},
+ {file = "pyogrio-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f964002d445521ad5b8e732a6b5ef0e2d2be7fe566768e5075c1d71398da64a"},
+ {file = "pyogrio-0.9.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:083351b258b3e08b6c6085dac560bd321b68de5cb4a66229095da68d5f3d696b"},
+ {file = "pyogrio-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:796e4f6a4e769b2eb6fea9a10546ea4bdee16182d1e29802b4d6349363c3c1d7"},
+ {file = "pyogrio-0.9.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:7fcafed24371fe6e23bcf5abebbb29269f8d79915f1dd818ac85453657ea714a"},
+ {file = "pyogrio-0.9.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:30cbeeaedb9bced7012487e7438919aa0c7dfba18ac3d4315182b46eb3139b9d"},
+ {file = "pyogrio-0.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4da0b9deb380bd9a200fee13182c4f95b02b4c554c923e2e0032f32aaf1439ed"},
+ {file = "pyogrio-0.9.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4e0f90a6c3771ee1f1fea857778b4b6a1b64000d851b819f435f9091b3c38c60"},
+ {file = "pyogrio-0.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:959022f3ad04053f8072dc9a2ad110c46edd9e4f92352061ba835fc91df3ca96"},
+ {file = "pyogrio-0.9.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:2829615cf58b1b24a9f96fea42abedaa1a800dd351c67374cc2f6341138608f3"},
+ {file = "pyogrio-0.9.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:17420febc17651876d5140b54b24749aa751d482b5f9ef6267b8053e6e962876"},
+ {file = "pyogrio-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2fcaa269031dbbc8ebd91243c6452c5d267d6df939c008ab7533413c9cf92d"},
+ {file = "pyogrio-0.9.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:019731a856a9abfe909e86f50eb13f8362f6742337caf757c54b7c8acfe75b89"},
+ {file = "pyogrio-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:d668cb10f2bf6ccd7c402f91e8b06290722dd09dbe265ae95b2c13db29ebeba0"},
+ {file = "pyogrio-0.9.0.tar.gz", hash = "sha256:6a6fa2e8cf95b3d4a7c0fac48bce6e5037579e28d3eb33b53349d6e11f15e5a8"},
+]
+
+[package.dependencies]
+certifi = "*"
+numpy = "*"
+packaging = "*"
+
+[package.extras]
+benchmark = ["pytest-benchmark"]
+dev = ["Cython"]
+geopandas = ["geopandas"]
+test = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "pyproj"
+version = "3.6.1"
+description = "Python interface to PROJ (cartographic projections and coordinate transformations library)"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"},
+ {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"},
+ {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"},
+ {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"},
+ {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"},
+ {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"},
+ {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"},
+ {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"},
+ {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"},
+ {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"},
+ {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"},
+ {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"},
+ {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"},
+ {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"},
+ {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"},
+ {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"},
+ {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"},
+ {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"},
+ {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"},
+ {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"},
+ {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"},
+ {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"},
+ {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"},
+ {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"},
+ {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"},
+ {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"},
+ {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"},
+]
+
+[package.dependencies]
+certifi = "*"
+
+[[package]]
+name = "pytest"
+version = "7.4.4"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
+ {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
+ {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "pytz"
+version = "2024.1"
+description = "World timezone definitions, modern and historical"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"},
+ {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"},
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.1"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
+ {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "ruff"
+version = "0.0.292"
+description = "An extremely fast Python linter, written in Rust."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"},
+ {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"},
+ {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"},
+ {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"},
+ {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"},
+ {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"},
+ {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"},
+ {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"},
+ {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"},
+ {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"},
+ {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"},
+]
+
+[[package]]
+name = "s3transfer"
+version = "0.10.2"
+description = "An Amazon S3 Transfer Manager"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69"},
+ {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"},
+]
+
+[package.dependencies]
+botocore = ">=1.33.2,<2.0a.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"]
+
+[[package]]
+name = "shapely"
+version = "2.0.4"
+description = "Manipulation and analysis of geometric objects"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "shapely-2.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:011b77153906030b795791f2fdfa2d68f1a8d7e40bce78b029782ade3afe4f2f"},
+ {file = "shapely-2.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9831816a5d34d5170aa9ed32a64982c3d6f4332e7ecfe62dc97767e163cb0b17"},
+ {file = "shapely-2.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5c4849916f71dc44e19ed370421518c0d86cf73b26e8656192fcfcda08218fbd"},
+ {file = "shapely-2.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841f93a0e31e4c64d62ea570d81c35de0f6cea224568b2430d832967536308e6"},
+ {file = "shapely-2.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b4431f522b277c79c34b65da128029a9955e4481462cbf7ebec23aab61fc58"},
+ {file = "shapely-2.0.4-cp310-cp310-win32.whl", hash = "sha256:92a41d936f7d6743f343be265ace93b7c57f5b231e21b9605716f5a47c2879e7"},
+ {file = "shapely-2.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:30982f79f21bb0ff7d7d4a4e531e3fcaa39b778584c2ce81a147f95be1cd58c9"},
+ {file = "shapely-2.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de0205cb21ad5ddaef607cda9a3191eadd1e7a62a756ea3a356369675230ac35"},
+ {file = "shapely-2.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7d56ce3e2a6a556b59a288771cf9d091470116867e578bebced8bfc4147fbfd7"},
+ {file = "shapely-2.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:58b0ecc505bbe49a99551eea3f2e8a9b3b24b3edd2a4de1ac0dc17bc75c9ec07"},
+ {file = "shapely-2.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:790a168a808bd00ee42786b8ba883307c0e3684ebb292e0e20009588c426da47"},
+ {file = "shapely-2.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4310b5494271e18580d61022c0857eb85d30510d88606fa3b8314790df7f367d"},
+ {file = "shapely-2.0.4-cp311-cp311-win32.whl", hash = "sha256:63f3a80daf4f867bd80f5c97fbe03314348ac1b3b70fb1c0ad255a69e3749879"},
+ {file = "shapely-2.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:c52ed79f683f721b69a10fb9e3d940a468203f5054927215586c5d49a072de8d"},
+ {file = "shapely-2.0.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5bbd974193e2cc274312da16b189b38f5f128410f3377721cadb76b1e8ca5328"},
+ {file = "shapely-2.0.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:41388321a73ba1a84edd90d86ecc8bfed55e6a1e51882eafb019f45895ec0f65"},
+ {file = "shapely-2.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0776c92d584f72f1e584d2e43cfc5542c2f3dd19d53f70df0900fda643f4bae6"},
+ {file = "shapely-2.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c75c98380b1ede1cae9a252c6dc247e6279403fae38c77060a5e6186c95073ac"},
+ {file = "shapely-2.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3e700abf4a37b7b8b90532fa6ed5c38a9bfc777098bc9fbae5ec8e618ac8f30"},
+ {file = "shapely-2.0.4-cp312-cp312-win32.whl", hash = "sha256:4f2ab0faf8188b9f99e6a273b24b97662194160cc8ca17cf9d1fb6f18d7fb93f"},
+ {file = "shapely-2.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:03152442d311a5e85ac73b39680dd64a9892fa42bb08fd83b3bab4fe6999bfa0"},
+ {file = "shapely-2.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:994c244e004bc3cfbea96257b883c90a86e8cbd76e069718eb4c6b222a56f78b"},
+ {file = "shapely-2.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05ffd6491e9e8958b742b0e2e7c346635033d0a5f1a0ea083547fcc854e5d5cf"},
+ {file = "shapely-2.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbdc1140a7d08faa748256438291394967aa54b40009f54e8d9825e75ef6113"},
+ {file = "shapely-2.0.4-cp37-cp37m-win32.whl", hash = "sha256:5af4cd0d8cf2912bd95f33586600cac9c4b7c5053a036422b97cfe4728d2eb53"},
+ {file = "shapely-2.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:464157509ce4efa5ff285c646a38b49f8c5ef8d4b340f722685b09bb033c5ccf"},
+ {file = "shapely-2.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:489c19152ec1f0e5c5e525356bcbf7e532f311bff630c9b6bc2db6f04da6a8b9"},
+ {file = "shapely-2.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b79bbd648664aa6f44ef018474ff958b6b296fed5c2d42db60078de3cffbc8aa"},
+ {file = "shapely-2.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:674d7baf0015a6037d5758496d550fc1946f34bfc89c1bf247cabdc415d7747e"},
+ {file = "shapely-2.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cd4ccecc5ea5abd06deeaab52fcdba372f649728050c6143cc405ee0c166679"},
+ {file = "shapely-2.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb5cdcbbe3080181498931b52a91a21a781a35dcb859da741c0345c6402bf00c"},
+ {file = "shapely-2.0.4-cp38-cp38-win32.whl", hash = "sha256:55a38dcd1cee2f298d8c2ebc60fc7d39f3b4535684a1e9e2f39a80ae88b0cea7"},
+ {file = "shapely-2.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:ec555c9d0db12d7fd777ba3f8b75044c73e576c720a851667432fabb7057da6c"},
+ {file = "shapely-2.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9103abd1678cb1b5f7e8e1af565a652e036844166c91ec031eeb25c5ca8af0"},
+ {file = "shapely-2.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:263bcf0c24d7a57c80991e64ab57cba7a3906e31d2e21b455f493d4aab534aaa"},
+ {file = "shapely-2.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddf4a9bfaac643e62702ed662afc36f6abed2a88a21270e891038f9a19bc08fc"},
+ {file = "shapely-2.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:485246fcdb93336105c29a5cfbff8a226949db37b7473c89caa26c9bae52a242"},
+ {file = "shapely-2.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8de4578e838a9409b5b134a18ee820730e507b2d21700c14b71a2b0757396acc"},
+ {file = "shapely-2.0.4-cp39-cp39-win32.whl", hash = "sha256:9dab4c98acfb5fb85f5a20548b5c0abe9b163ad3525ee28822ffecb5c40e724c"},
+ {file = "shapely-2.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:31c19a668b5a1eadab82ff070b5a260478ac6ddad3a5b62295095174a8d26398"},
+ {file = "shapely-2.0.4.tar.gz", hash = "sha256:5dc736127fac70009b8d309a0eeb74f3e08979e530cf7017f2f507ef62e6cfb8"},
+]
+
+[package.dependencies]
+numpy = ">=1.14,<3"
+
+[package.extras]
+docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"]
+test = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
+ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
+]
+
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.12.2"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
+]
+
+[[package]]
+name = "tzdata"
+version = "2024.1"
+description = "Provider of IANA time zone data"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"},
+ {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.2.2"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
+ {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+h2 = ["h2 (>=4,<5)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "uvicorn"
+version = "0.23.2"
+description = "The lightning-fast ASGI server."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "uvicorn-0.23.2-py3-none-any.whl", hash = "sha256:1f9be6558f01239d4fdf22ef8126c39cb1ad0addf76c40e760549d2c2f43ab53"},
+ {file = "uvicorn-0.23.2.tar.gz", hash = "sha256:4d3cc12d7727ba72b64d12d3cc7743124074c0a69f7b201512fc50c3e3f1569a"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+h11 = ">=0.8"
+typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
+
+[[package]]
+name = "virtualenv"
+version = "20.26.3"
+description = "Virtual Python Environment builder"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"},
+ {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.7,<1"
+filelock = ">=3.12.2,<4"
+platformdirs = ">=3.9.1,<5"
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.10"
+content-hash = "8fe4154e87c387421fa80ef8d9df954bc92874fb0c6f7017a9008a2a6ee8bc64"
diff --git a/poetry.toml b/poetry.toml
new file mode 100644
index 0000000..efa46ec
--- /dev/null
+++ b/poetry.toml
@@ -0,0 +1,2 @@
+[virtualenvs]
+in-project = true
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..b9c9ad2
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,102 @@
+[tool.poetry]
+name = "pyrorisks"
+version = "0.1.0"
+description = "Data pre-processing pipelines and models for wildfire forecasting and monitoring"
+authors = ["Pyronear "]
+license = "Apache-2.0"
+readme = "README.md"
+packages = [{include = "pyrorisks"}]
+classifiers=[
+ "Development Status :: 2 - Pre-Alpha",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Mathematics",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Topic :: Software Development",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+]
+keywords=["data science", "time series", "machine learning"]
+
+
+[tool.poetry.dependencies]
+python = "^3.10"
+requests = "^2.31.0"
+geopandas = "1.0.1"
+boto3 = "^1.28.62"
+
+
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^7.4.2"
+black = "^23.9.1"
+coverage = "^7.3.2"
+pre-commit = "^3.4.0"
+mypy = "^1.6.0"
+ruff = "^0.0.292"
+
+
+[tool.poetry.group.app.dependencies]
+fastapi = "^0.103.2"
+uvicorn = "^0.23.2"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.ruff]
+ignore = [
+ "F401", # line too long, handled by black
+ "E402", # do not perform function calls in argument defaults
+ "E265", # raise from
+ "F403", # too complex
+ "F821", # list comprehension to list()
+ "W605", # list comprehension to list()
+]
+exclude = [".git", "venv", "docs", "build"]
+line-length = 120
+target-version = "py39"
+preview = true
+
+[tool.mypy]
+python_version = "3.10"
+mypy_path = "src/"
+files = ["pyro_risks/*.py", "app/*.py"]
+show_error_codes = true
+pretty = true
+
+[[tool.mypy.overrides]]
+module = [
+ "dotenv",
+ "xarray",
+ "pandas",
+ "sklearn",
+ "sklearn",
+ "sklearn",
+ "sklearn",
+ "xgboost",
+ "numpy",
+ "geopandas",
+ "cdsapi",
+ "urllib3",
+ "joblib",
+ "imblearn",
+ "matplotlib",
+ "plot_metric",
+ "shapely",
+ "scipy",
+ "netCDF4",
+ "pyro_risks.version"
+]
+ignore_missing_imports = true
+
+[tool.black]
+line-length = 120
+target-version = ['py310']
\ No newline at end of file
From 05f4ad6e7955068205304db1f323e5f59da11505 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Wed, 3 Jul 2024 00:05:27 +0200
Subject: [PATCH 18/41] refactor: Update dockerfile
---
Dockerfile | 36 +++++++++++++-----------------------
1 file changed, 13 insertions(+), 23 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 0d6dc9f..70dbb9d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,28 +1,18 @@
-FROM python:3.8.1
+FROM python:3.10-buster
-# set work directory
-WORKDIR /usr/src/app
+RUN pip install poetry==1.7.1
-# set environment variables
-ENV PYTHONDONTWRITEBYTECODE 1
-ENV PYTHONUNBUFFERED 1
+ENV POETRY_NO_INTERACTION=1 \
+ POETRY_VIRTUALENVS_IN_PROJECT=1 \
+ POETRY_VIRTUALENVS_CREATE=1 \
+ POETRY_CACHE_DIR=/tmp/poetry_cache \
+ VIRTUAL_ENV=/app/.venv \
+ PATH="/app/.venv/bin:$PATH"
-# copy app requirements
-COPY ./requirements.txt requirements.txt
-COPY ./requirements-app.txt /usr/src/app/requirements-app.txt
-COPY ./setup.py setup.py
-COPY ./README.md README.md
-COPY ./pyrorisks pyrorisks
+WORKDIR /app
-# install dependencies
-RUN apt-get update && \
- apt-get install --no-install-recommends -y libspatialindex-dev python3-rtree && \
- pip install --upgrade pip setuptools wheel && \
- pip install -e . && \
- pip install -r /usr/src/app/requirements-app.txt && \
- mkdir /usr/src/app/app && \
- rm -rf /root/.cache/pip && \
- rm -rf /var/lib/apt/lists/*
+COPY pyrorisks ./pyrorisks
+COPY app ./app
+COPY pyproject.toml poetry.lock README.md ./
-# copy project
-COPY app/ /usr/src/app/app/
+RUN poetry install
From db8c3058f1302a092e169309118a0e8b29840917 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Wed, 3 Jul 2024 19:20:47 +0200
Subject: [PATCH 19/41] refactor: Add dependencies explicitly
---
poetry.lock | 166 ++++++++++++++++++++++++++++++++++++++++++++++++-
pyproject.toml | 4 +-
2 files changed, 168 insertions(+), 2 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index cf193cf..c048277 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,5 +1,20 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+[[package]]
+name = "affine"
+version = "2.4.0"
+description = "Matrices describing affine transformation of the plane"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "affine-2.4.0-py3-none-any.whl", hash = "sha256:8a3df80e2b2378aef598a83c1392efd47967afec4242021a0b06b4c7cbc61a92"},
+ {file = "affine-2.4.0.tar.gz", hash = "sha256:a24d818d6a836c131976d22f8c27b8d3ca32d0af64c1d8d29deb7bafa4da1eea"},
+]
+
+[package.extras]
+dev = ["coveralls", "flake8", "pydocstyle"]
+test = ["pytest (>=4.6)", "pytest-cov"]
+
[[package]]
name = "annotated-types"
version = "0.7.0"
@@ -32,6 +47,25 @@ doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-
test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (<0.22)"]
+[[package]]
+name = "attrs"
+version = "23.2.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
+ {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
+tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
+
[[package]]
name = "black"
version = "23.12.1"
@@ -251,6 +285,40 @@ files = [
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
+[[package]]
+name = "click-plugins"
+version = "1.1.1"
+description = "An extension module for click to enable registering CLI commands via setuptools entry-points."
+optional = false
+python-versions = "*"
+files = [
+ {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"},
+ {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"},
+]
+
+[package.dependencies]
+click = ">=4.0"
+
+[package.extras]
+dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"]
+
+[[package]]
+name = "cligj"
+version = "0.7.2"
+description = "Click params for commmand line interfaces to GeoJSON"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4"
+files = [
+ {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"},
+ {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"},
+]
+
+[package.dependencies]
+click = ">=4.0"
+
+[package.extras]
+test = ["pytest-cov"]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -903,6 +971,20 @@ dev = ["Cython"]
geopandas = ["geopandas"]
test = ["pytest", "pytest-cov"]
+[[package]]
+name = "pyparsing"
+version = "3.1.2"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"},
+ {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
[[package]]
name = "pyproj"
version = "3.6.1"
@@ -1049,6 +1131,55 @@ files = [
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
+[[package]]
+name = "rasterio"
+version = "1.3.10"
+description = "Fast and direct raster I/O for use with Numpy and SciPy"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rasterio-1.3.10-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:2ef27c3eff6f44f8b5d5de228003367c1843593edf648d85c0dc1319c00dc57d"},
+ {file = "rasterio-1.3.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c711b497e9ef0c4f5e1c01e34ba910708e066e1c4a69c25df18d1bcc04481287"},
+ {file = "rasterio-1.3.10-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:d1ac85857144cb8075e332e9d908b65426d30ddc1f59f7a04bcf6ed6fd3c0d47"},
+ {file = "rasterio-1.3.10-cp310-cp310-win_amd64.whl", hash = "sha256:ef8a496740df1e68f7a3d3449aa3be9c3210c22f4bb78a4a9e1c290183abd9b1"},
+ {file = "rasterio-1.3.10-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:97d867cada29f16cb83f1743217f775f8b982676fcdda77671d25abb26698159"},
+ {file = "rasterio-1.3.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:505b3e659eb3b137192c25233bf7954bc4997b1a474bae9e129fbd5ac2619404"},
+ {file = "rasterio-1.3.10-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:30f27e309a14a70c821d10a0ea18b110968dc2e2186b06a900aebd92094f4e00"},
+ {file = "rasterio-1.3.10-cp311-cp311-win_amd64.whl", hash = "sha256:cbb2eea127328302f9e3158a000363a7d9eea22537378dee4f824a7fa2d78c05"},
+ {file = "rasterio-1.3.10-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:3a9c4fb63e050e11bcd23e53f084ca186b445f976df1f70e7abd851c4072837f"},
+ {file = "rasterio-1.3.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c7ddca79444fd3b933f4cd1a1773e9f7839d0ce5d76e600bdf92ee9a79b95f8"},
+ {file = "rasterio-1.3.10-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:f9cd757e11cfb07ef39b1cc79a32497bf22aff7fec41fe330b868cb3043b4db5"},
+ {file = "rasterio-1.3.10-cp312-cp312-win_amd64.whl", hash = "sha256:7e653968f64840654d277e0f86f8666ed8f3030ba36fa865f420f9bc38d619ee"},
+ {file = "rasterio-1.3.10-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7a22c0e0cf07dbed6576faf9a49bc4afa1afedd5a14441b64a3d3dd6d10dc274"},
+ {file = "rasterio-1.3.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d29d30c2271fa265913bd3db93fa213d3a0894362ec704e7273cf30443098a90"},
+ {file = "rasterio-1.3.10-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:287e8d0d0472c778aa0b6392e9c00894a80f2bace28fa6eddb76c0a895097947"},
+ {file = "rasterio-1.3.10-cp38-cp38-win_amd64.whl", hash = "sha256:a420e5f25108b1c92c5d071cfd6518b3766f20a6eddb1b322d06c3d46a89fab6"},
+ {file = "rasterio-1.3.10-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:73ea4d0e584f696ef115601bbb97ba8d2b68a67c2bb3b40999414d31b6c7cf89"},
+ {file = "rasterio-1.3.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6eece6420d7d6ef9b9830633b8fcd15e86b8702cb13419abe251c16ca502cf3"},
+ {file = "rasterio-1.3.10-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:0bbd62b45a35cab53cb7fe72419e823e47ab31ee2d055af8e21dc7f37fe5ed6c"},
+ {file = "rasterio-1.3.10-cp39-cp39-win_amd64.whl", hash = "sha256:450f2bd45335308829da90566fbcbdb8e8aa0251a9d1f6ebb60667855dfb7554"},
+ {file = "rasterio-1.3.10.tar.gz", hash = "sha256:ce182c735b4f9e8735d90600607ecab15ef895eb8aa660bf665751529477e326"},
+]
+
+[package.dependencies]
+affine = "*"
+attrs = "*"
+certifi = "*"
+click = ">=4.0"
+click-plugins = "*"
+cligj = ">=0.5"
+numpy = "*"
+setuptools = "*"
+snuggs = ">=1.4.1"
+
+[package.extras]
+all = ["boto3 (>=1.2.4)", "ghp-import", "hypothesis", "ipython (>=2.0)", "matplotlib", "numpydoc", "packaging", "pytest (>=2.8.2)", "pytest-cov (>=2.2.0)", "shapely", "sphinx", "sphinx-rtd-theme"]
+docs = ["ghp-import", "numpydoc", "sphinx", "sphinx-rtd-theme"]
+ipython = ["ipython (>=2.0)"]
+plot = ["matplotlib"]
+s3 = ["boto3 (>=1.2.4)"]
+test = ["boto3 (>=1.2.4)", "hypothesis", "packaging", "pytest (>=2.8.2)", "pytest-cov (>=2.2.0)", "shapely"]
+
[[package]]
name = "requests"
version = "2.32.3"
@@ -1113,6 +1244,21 @@ botocore = ">=1.33.2,<2.0a.0"
[package.extras]
crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"]
+[[package]]
+name = "setuptools"
+version = "70.2.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "setuptools-70.2.0-py3-none-any.whl", hash = "sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05"},
+ {file = "setuptools-70.2.0.tar.gz", hash = "sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1"},
+]
+
+[package.extras]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+
[[package]]
name = "shapely"
version = "2.0.4"
@@ -1192,6 +1338,24 @@ files = [
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
+[[package]]
+name = "snuggs"
+version = "1.4.7"
+description = "Snuggs are s-expressions for Numpy"
+optional = false
+python-versions = "*"
+files = [
+ {file = "snuggs-1.4.7-py3-none-any.whl", hash = "sha256:988dde5d4db88e9d71c99457404773dabcc7a1c45971bfbe81900999942d9f07"},
+ {file = "snuggs-1.4.7.tar.gz", hash = "sha256:501cf113fe3892e14e2fee76da5cd0606b7e149c411c271898e6259ebde2617b"},
+]
+
+[package.dependencies]
+numpy = "*"
+pyparsing = ">=2.1.6"
+
+[package.extras]
+test = ["hypothesis", "pytest"]
+
[[package]]
name = "starlette"
version = "0.27.0"
@@ -1301,4 +1465,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "8fe4154e87c387421fa80ef8d9df954bc92874fb0c6f7017a9008a2a6ee8bc64"
+content-hash = "abe942688e45b4ca0e7afaa5eb75086ecf036333faeae141b572023ce1380293"
diff --git a/pyproject.toml b/pyproject.toml
index b9c9ad2..4730397 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ description = "Data pre-processing pipelines and models for wildfire forecasting
authors = ["Pyronear "]
license = "Apache-2.0"
readme = "README.md"
-packages = [{include = "pyrorisks"}]
+packages = [{include = "pyrorisks"}, {include = "app"}]
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
@@ -31,6 +31,8 @@ python = "^3.10"
requests = "^2.31.0"
geopandas = "1.0.1"
boto3 = "^1.28.62"
+rasterio = "^1.3.10"
+shapely = "^2.0.4"
From 706d910a9bb0ebdbcc9ce19c547bab3ad63f073a Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Wed, 3 Jul 2024 19:22:45 +0200
Subject: [PATCH 20/41] refactor: Update app deployment
---
Dockerfile | 2 +-
app/api/inference.py | 3 ++-
docker-compose.yml | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 70dbb9d..dfc366b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
FROM python:3.10-buster
-RUN pip install poetry==1.7.1
+RUN pip install poetry==1.8.1
ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
diff --git a/app/api/inference.py b/app/api/inference.py
index a813252..2c89471 100644
--- a/app/api/inference.py
+++ b/app/api/inference.py
@@ -9,7 +9,8 @@
class Mock:
def predict(self, date):
- return {"01": 0.5, "02": 0.5}
+ _ = date
+ return {"01": {"score": 0.5, "explainability": "weather"}, "02": {"score": 0.5, "explainability": "weather"}}
predictor = Mock()
diff --git a/docker-compose.yml b/docker-compose.yml
index a05ba86..cb1a624 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -5,7 +5,7 @@ services:
build: .
command: uvicorn app.main:app --reload --workers 1 --host 0.0.0.0 --port 8000
volumes:
- - ./app/:/usr/src/app/app/
+ - ./app/:/app/app/
ports:
- ${PORT}:8000
environment:
From 665c8a45cdd76aefd22e0738e0d8d1afbdaa1568 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 9 Jul 2024 10:08:41 +0200
Subject: [PATCH 21/41] chore: Remove docs requirements.txt
---
docs/requirements.txt | 5 -----
1 file changed, 5 deletions(-)
delete mode 100644 docs/requirements.txt
diff --git a/docs/requirements.txt b/docs/requirements.txt
deleted file mode 100644
index 0933295..0000000
--- a/docs/requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-sphinx
-sphinx-rtd-theme==0.4.3
-myst-parser==0.12.10
-sphinx-autobuild==2020.9.1
-Jinja2<3.1
From 639e154dc3fc11c3789cd9cee92af28089ed6d7b Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 9 Jul 2024 10:09:12 +0200
Subject: [PATCH 22/41] chore: Remove CI requirements.txt
---
.github/workflows/requirements.txt | 1 -
1 file changed, 1 deletion(-)
delete mode 100644 .github/workflows/requirements.txt
diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt
deleted file mode 100644
index 7dd0fc7..0000000
--- a/.github/workflows/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-coverage>=4.5.4
From 529e6b3bd04bda525fc2cd6c9371403c67d19c58 Mon Sep 17 00:00:00 2001
From: Joshua Sant'Anna <45068597+jsakv@users.noreply.github.com>
Date: Tue, 9 Jul 2024 10:13:13 +0200
Subject: [PATCH 23/41] chore: Add rasterio manylinux wheel
---
.gitignore | 2 +-
...-1.3.10-cp39-cp39-manylinux2014_x86_64.whl | Bin 0 -> 21501230 bytes
2 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 build/rasterio-1.3.10-cp39-cp39-manylinux2014_x86_64.whl
diff --git a/.gitignore b/.gitignore
index 2d5b3f2..1bacf7f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,7 +8,7 @@ __pycache__/
# Distribution / packaging
.Python
-build/
+docs/build/
develop-eggs/
dist/
downloads/
diff --git a/build/rasterio-1.3.10-cp39-cp39-manylinux2014_x86_64.whl b/build/rasterio-1.3.10-cp39-cp39-manylinux2014_x86_64.whl
new file mode 100644
index 0000000000000000000000000000000000000000..80b1beb89e72d3fea0a35256cf635f2c54ddb420
GIT binary patch
literal 21501230
zcmd43c{r4B{69(viAkGenIh3fkz|{xRFX6#Nn#T6kz`9_88b*J$&@u&CT+4#$&z&}
zMV3s9HOtIcnyfPnX2vYfx%pn_{Bh3t{r5Z9b^hqSo_U`8x$oEO^?tqI@4JRO~hE9KND#K~<%?5l}%+TFKhENqaA33XJ~
zo#K6mTq{X`KMC&e3gGZeX7~6#JLB7%>dWm-Xu5^h;#n5S7(%SJ}fvPhhd?GA%-
zRNcOC-VIVX!x}rgfndQSTGX)bt8t$;5A)^~LN70*Vno-JnAq{2VfKfv@mc{N9?s}3
zopVJ^!E4g=el(g==jE7!??$!kMf74PGx*^r4@YJ|anL{HC)IiAPB;_G!PK%Ha0{K}
zt+QEBc@F2O-jO+11hsd#odlcjM6$Cj%w1~P5&STc_U{}jZXpFDnmZVa>+3XSG7lyM
zA{RSjO*>TDk@KC*I}d}<224SkLIC3CSQb^3fI*ZcVd|b)K!x-lvs=_g4nKHZkgrtN
z@1ro#{eP014xOo2A9hVA(xjNxbch5f<;Qk~v_!p8=fAO=nG
z!#U`;hOACh@NV!uO
zxYY*zM;cXN4}XW%vLzmb5K(_k7LqKE5A%W*aMJjhEF}Hlc@>k=-r(2(junh4aELXN
zUK{R5@>Rg~niS@0sg0llpF3KZgfj+u<~!2@S%$yeknAT-RmPMYBHpt!JW{C+XU-gl
z2VKq;1^-9A@x7MqJ{iVvk*}#}_l^ywd&P#}{I}_CQT@vfP!Nt8#71x_3hcf^vB7r>
zn-HNPA21@d*wC`drVz5|ag)i&4_I!^$Ok0mA%p2x%YLFZU*6_MWdCr3LMK$10*nHm
z=%=>q{JCj4gz5$@y|FMws7;%THpI>$y4=i;-07=;ss77i(mxRKe|93NCSL&72%m=*
zkf524m~)R^toUe={n&Jnc=~85e{JmD{i6pdEhYdwe+Tj9L7&O4
z{|-{t2VVB-HJO&62K7kC9$ZE~*cClu=Ogo5`df^PA+H&iF0s}@h>5vd9Or4Lk5MtCiL&J`i
zTEA~X>{<$s6_`#t4wz1BvyJ1cClzKs|7`Hnj^)o$)&4f6DsZH3#|EW*QdnHzeGSL$
z{anM;=9_dl=Bi>3V7{SBE%>?t+w?3m!U>nDjoOmq0xlp@3Gpq83Bsl~au9vrcp5*9
zdF~i)9N+RuLAbNct!BUfQSxHY<=#NmCFB#eo@d<(b-2C9v2Knu&&79@x(1X!)TMwe
z!IgCvnGHxiET3_aN8{&X57GJS;B!S`;zDyen)H%esM0f!N=XZg(K{d*YB+23uHcva
zX3t*&Lm2$Wb_s03{
z7GS!QZ+B^p7nIcWl3&G-Sdi@n`*~Hz$K2xx#_r!4J+Iu-0wbA6#zX$$Fjki<(Vj^&@_*BE}Xp_z~2Vqh$;;Y}XQ51-joEkxS#$p+KR51M0Su14V(3
zpHEu~qh3M$mHn!XF8v!CES`s6PB~1Psy>p8!uQJXaQ8dCQj=jHQaB6~I2(&TRm{k?
z8xIO9;_Y#An8&xY={+?Y8~)VX?>w*EhvF^UdjxK`!tZ+(!YjY;6BnPx4UD5|3qErk
zhDWRmYPwUHkKb%b3<&G_3#jA~;uZwy)fa=ybL003_zP{-MKjNNh4E?CJ)$Hf9d_cE*JN0*
z)!*2V0l^RZWvi+pBDTkJap1#JKDB29?yTESb1z>ij`V(h_Mi_hHL;>_q|I@UHz}zx
z*2`%WJ^v~WY5i9@UBLx)mVY|6q|;GcAGMbFvE03o`{VJgHGAUeDqr!vAm}Zb=UCbq8(K0Oi_D;o6hwD1@dH;V`stj+$Cn3c
z-M#lvoBkj_X+f>ux4;iz8%KY+mM^|9`ZAG!tq9+1t&co|m|BOo`Lj=`?NC?1f>IL$
zx?JXpf-_5)aohgOBI%Qp=A5m%@pfFjn^vaFdqe?r&WF&G`F8rMvF6waFlP@
zEm~B37u%|;!|@QKaD3`ac9L&h*ybcb?p{#|)@9Bo-BH5RwMci|(n|D6_wp1|16%Th
zs5A8#vY8SnPk%t%y>rxd5fT%})50@T}IM!K9JU!?&7%p=F_Cf4+JS^cp7J
zrA*#%LFu()w_H>&pHX~hZ=l2T*+nmXhx@?CxMJ^_%^FuUz
z=MFy=y1qZx!HA(ejqsb*ni4ifCMFX1#p%6a@1M%UCpcgVlL^n}O+?%1qrL+75u?d{
zNNb_}7svSTj<|G&`>?|}MGks0h#Bjt7{*NhnzHko5-i2UKaP}&gYU;$^uyOexAvi}
z|8`r&k#}K_PiWJbml=$k%c46kgKKelxgAXcn;&Lg#8hc7f2qF^2v8bV-qY);R8f
zu*wjI@yu){`_&Tqbl9KT5w2snoi}5Gkt~*%lNU8P)IrqODAS6kZOX@P+Jm;|P}<@k
zr)6L9bVnD*qB)}pZGIJ}NGQlVqOa}=ceQTAj~yf>B$|!6R?JQ*w3%(VJ-`f3<+_xM
zeqZ{VZkz5%+*}u7Z-}~4_Gy@Gc7T2CvoHGM`N>DTHWQ>$iqNX0NX=afM)yr74U{Bs
zlK3z74@CS>s3diy2%AkC9e?uK8>Y+*>^VuIILMBByM+3!*6m4d)u7i`Mn$nD$2FBV
zcIfLAZ!%$Q=VVn`w&VoV>R32Bi!t__BYQ0x9_KVY>%sLMBWdQuQ8k2PUj3xC?N$A@
zZLpx{q!Z&J6IPUA=fQqI)fC}_VPx_%oJ#liDyq7M(ClwvS$4Y18)TYeRZ|Rf^>4-d
z-IvfwYJUrElu*{6u^UIVH&|Nzlt=i?RQR;ZT>l7@HHzhkI-Fd3Ao4xM_tH1)KlHahuJ`w~
z3vOjp_*vmz4Sy=W*Y1IQ%Fi&QP$t>*s2-ukn`w#PzCyZIrJfAJtGh@~_?;XXOoXZy
z7$E~;%Y=C95B(l2;%U$)v4P4juo9|F*-+YITCjmj&-Vk3Emk5;Pjz*@(O1pe
zibvLOL|pu1^sA??^4WaN=)Nte53C{2y0!zkT=7O&H{(@QzUVN0)1oo`##g)^*4^
zsMr%)f{9#U=ky5ANyPG64zPXa9enuNseDbx<5QK?tMnjGvq`d4_l2Ik_br7yy`58b
z9||oU?h|*iy*~J6r@v=PYpL_t=ZZgu5$#=gfA(Wx{b(&%MFRD%0(-z)nHiOGk++Rr
zF~M#gjiaRU3@+bkqFfop?z~Wh9=gQ|Xc`k~%s(g-+)O8}@ttB`X|=faW^wRc=5)&%
z55Xjv-*sV$7e&1?z-*YY&8M
z?X98SEy=-V{daFa-N`;yf2x2wq+fSJ`u*}^*k(P>#PM=$aayS66`;^Z
zQZ)?^WID6$uS`BN^Lv6PaRaS}m*t?eLb6>s1A+R6+tGhSH*At0mO=7GhY+<*zlB~5
zQ<|#06rV}_9Cw+LweC#jgz=>zxkPBtxjgvztaOtvXOyuJG&!FyI^G!D9wJ*|j^Crh
zc8lqVts+(=^Rx$-RE;f&%vRCPv)qlpkgsp?D~!hK`m{J-mvIL!wJ}`FJBjjIIH;uO
zuCW%6Fj2rQZzOF-JC$uAU*(<)?aXk!+GCimU2wL#rEfN)stvovubnZak9sh$+!R9`
zLzvrkL(eif=Mw`Iv29oI+D_;bo=EmP7PVD$U~nmO7FCHtRb=HBJri2Yr}eO-v=^}d
z4Wub-K=7pcp?0oG9>=?!@!Y~AtF6Qw^KbV|3a-D3(3Uc{?a=vdD2LUu7$iLq^tQ1C
zcdBp6yJ^h$A>Lt1h})Act>J3Fh~F(R&qKCff({F_>kX%H6YtH8fC4Rh@|cf5`7&yY
z#sr^g7kPZx;>FAyeblt-nPSlV@rZ?kHn)2O?U6=G9L;pD~ZQvW)F0XWAq!n)Drk
zDW&4Jcvqvj{m)@u%68|LUAsw;XjHF+%BmRmO6drHHlJyY^c&ha75LuQWtfp@rc^#m
z$V^%^r#&$2#QjQWz!V9p=T(n1-qlUp&A#wv2J8JI{_z6;5?RXQs-RqH)fF&U2Lnh4L)v
z-DiY!Owss0t7>kn69rY57NGQ_F>E;FDuo}mnVrw!VN>e-d~ZI<{g9n@ixFP-mKZxQ
zJer@+WZq<!h!0%U}eTl?x{2nf5ig8=1XimDVX1k{|eA64EM}KFM
z+?nB#$5#g47A|Z`^TpaE@;#xi`8Bxg66?RMg=eNjGq4EJCU5f!o)t;SPp6K+;&{|p
zuK$p&0!}r@!uCwzMpYhZ<30pKRM%zS559QyeN)zvz7c!&F;!0XWhX7SAC>>_x_R1T
zxu+uIu6&g}JXHA*s{85OzmhB;`JZgc<*CZsTZ~E8MDOqHP0MCELbeuRb$fcsh>uHcr}MpUqjC+Y%k`dG8FXoo^YiEj-Sbv
zxFyUlDD)987h>I8E#hYc+Gz7i>MgJD184h*Uj0jqfw#zA1KFcpJrsu~2eY?tU0&@v
z-}^g`u(>XE0eYCxbC>*{Nm%Pd2|C|e1$RLGapAgB3%EXgOLjPY^w+_%VLfslLtGhy
zmqA|o(;NJ2vf}aB&R?mqfhB}>9cA+~T9npuVXIGxLi75HC5X8gBt8(1Ax|>O`+JuC
zS`o(Y?(HRN3wM%8@o7Ez#F`yKZCld4@~|7Ledn#_-7e0AzTZEg7ko~|ja}_Z
zvj0>x8j)T2iC!Kt9RThPz>Ag8tyve%N5>>0-ns<(QWUSWhP29mNSOLtqU+J$l2_1_X-&*&K|hO6*JnJZ?UQ0{T!UU+;yE=MrRyrC`ms{@G@u0
zvs)B?GtSIxAs=7%qMshANbdH-Odt>C776oz9;-x_m@8$xqni4@+uK7?Mn4O9k?rxF
za$2Zr)A>4I20Hfl?U?~S{mFe)c`apwh7j4+1HFEOjQz75^sXb;shmNm7~@?F^Gu1&
z`{nBEX;yWMU*(8<7HqRrv0NjJPKH&?5bt^5x7-RJ?){XMX`S{dI~>vlwj75(6l&7T
zgLm09zb5-qyDv>X((y4@s?r*5gy25e8MT`16f=AmGom|n=?T8Q%8#To`p2^x=9|y_
zwCu$3*7B^ASG%o7@CC@SkcWGyd~WjL)|lwjlba$QW|qv8(Yvfxe8H1+x|KHGP>
zvNBUMVlURon|f_P_zl;OKi&Vg_3^UVmmbva)}uD%MY%*PgoK|ngLt+?ef-aGllK9R
zTCq=0h-+G~c3=7I;Y1-ez6eQq8mixRgglcf^e>*ly-CE8@?PyFyK=WLi@x2Zemuo|
zz4r>6wvkLHU*u0$7~tt?ecy{*jYJ#d1;T`B@|3+lXLclZV=)kBd!k
z4{Am>b=hp6AUp-+vJ$zS-1bHhv&1fx$eem-2&?1AiZ~z$d0J*FI!yhN{*_qCL!-
zK`#_N<-B<1YPVSNOeukFWj>Ko$lH@cWZy0=_8D^NdiVBfUSRQC;^QY>j3x6z;mur8
zcCXXd-ltEjgZrVAHE*~KEzuoslFqmQu(}sWX*#i*e+xB?L+qWX~-=<5uW-MZa)#Y56p1%^O_p`PxFN-!uLT
zH~nJ#v?k?uMmzfs9sH|Zlk_4->K4;fSVYWSC`B;Oq<~JrJRs1Tva2yu`aSygRpC85
z_d@=YnR(R%eM_oEr34KYd{((?u@$DtGGNkH
z*|IEnapIzeS=)54ZPo_!TI;k1?+y=JFQX!T)z0j=_Ijk;HfDs>sf9E-!tsLXrZSLGU
zhDY9bDG(r0Y8UgLD$5E#oe{1-)%El3yfY#X$M7oXuU47zs~ukx
zAbxjBUpVqs^G-ThZ{%&Vu#qd+54{y-Q8)x9oQkZ&V<7yh`(H^$>O*id8L+#DeO7GHz=6QN!o66QrY|-bQM2M7c
zj!F>rAv&64<>0qRT`r)ER7e4rpBH5p-~77WZS+$N;p1EbdJiJMHifWkIdQ&Z?oeY(
zA2%{~BO~_`;zFBcA2)=q>5F~YQ#Pk~rMFpI4(>PlyR3NZk*#G~Zmp&o_q9UJKokeY
z?&L(xGaGk%NU}!RpRj!A$C|d%);cpf!kY=IAy{&+KBY|vv6nhjICHNwS
zmYh!-u$v?Fb3=5{>$xXs(&Ff&9j3&odJKQ?^<(VbF0>T)4votBQH*dK=52cq=k0%=
zyMIhuiTm;8RP?guORN*kwEjlLT2@vLb_WZ5HRdmkXp2JA>ZK*f$|Epw(MFbHecG41
z(ujfa3CBnEj%(0)^+xwY64pSfEKVdoQYgK=bH4a)_Pp~)GnREu$V&<4O=gy!+&CO^Y$$wCsxSsA+#_6;?
z#@l0yg42kxAsSJTWE(+DWUK?163G2P?H0#xpL`^alVQn4!lmb=v6ESZ*m>d0?Cr!b
z#-<#>S(*g+8co}Rx9_NHBw2n=5g9z@oo)*@uMUXif1B9({QlH8wtFtC=MKg}X1o9SO8249b>7Kc9(DmR~YM>EI~nA4<>
z2}M7s55&-0xztFQ7*4Esv>p{XA61W%25N4jSws?~5SzKY`dD$a1jnTw8Dn-*jJRfT
zLA=f!tWj}-r$Zx%A!XSQIQQy_k@IqqNQv>$DAK(tMONC&Y!6yc6iP~Tuq#Oc>oXl&
zUmZzVgMfF2*P|LlYXzG=69vNtO!MfYN@Do++!A`6o`-zP7BRsIFm-h1#10m>aF+~s
z4RA8FjkU9$*&Hm-Jz>QG(F-FtN$rwKdr?LzZR#R?I?y;
z9jTPY%Fcrs)L`VFJZLm>67k(Q*x0m8lu22MahYj+fv_)H_DbAF9v24oTOo
z>txhdh$9raakhAURtkV7lB*$$)g4^I)Am`IJJmxn<3shB#t12vxGlw;CP$AM=x-v!
zux>PRGapSWIAQ1+MN+_uu_z!!T&yjA2g~p!b_2^W3bwXxE0E37LYPxe78`Ghq)LU#
zNY@=nX08=HuD1k+7$!*I)j0?1A%%bhF_E{K(i)N0k1R>78qGZtCW#YgojPWu4#?pp
z1ydo}5(3E@vP7L8Kt}#5T+a&L*}0#F8mZgPRS2OznMvTQ(x#3Ji}lQ~7B$SZB2S%J
zm|$Ohnp7QvC4L#nGK?h0FzEHt$9X$wb0VE>+|%{3Vvsmk4SjFktsY&^ze+>L&JRU0
zqxqnGeBr^l`^}W3MDp+PlB_2nt3A=@uZ9vSa+uLHrAzc=|o*V^mQSZ$w)-y%~Fv0d|
zs2p$lMUb#PF{?$%e=EwE;ybk_@VK@otIS)NgT@q9eZYm}`=OItCDL>F9A
zpb7S5HVBa{6kP=8?*1|BR*zSxqW_a?!${!dI-z
zPTfEWie#=ISGtA(dWht7h%(qJZln2(}0{t}BRWoe>KpLo@?fuk=L(6P(e(!??tkxL!wfkrzl*>RPZS
zw1Eh%f%KDMVs)@CkPWUt17b8#q|O#~kX7Lin4>^QA*E!PL>;^fu7JhUs@=@NENC;p
z=E4#waieS@85;EqNG-2xK!^RnIj~WYG(BQS%-AXDH4NO~6BTeBD|f!xBq
zT~Aoq#EqK`WBq=aeTL>GhFrs~TnVibSjg*y`JwvQmC)`qgQO8+Kw52A5?9@U&*dO;B>vnn{8{z8@OV$(MXcGK!YX`
zgu7IOhY!(mm=o_o5?bc%^G+GyEhpn
ziPL7a*5`_Y4A2G12?NDVa9f9Xe>x(TJANP
zyFVm>-C_obi=04$;Fy+s)+
zUThc!Fh_*|rorzKV%(w?4@}=sB?J-RBur=lE`T2c=OIWOBquD2ATC1Fl*FKIzz@uob3-sFt2k+}1iq1p5hF^^
zsY(scBMewY0OQ>T4*Vc2L_`oOgXD}AfXahFvPAVt^}x|N8@$9=c`MO-$bjmRD<$Ft
zI1{0_0cn)Jf>@e^jst(lB#T2@AXCX(!TnxvHi(y|AfxcAfFEkb7fN%8&j0I2@AQkG
z=A_BR#_N904a7NqV~x2=4cYJ8SXuT#6*1n8bQkO;EgO$gzwVV*flfWAK5y-8Gbo-+
zkMSOFJ@1uy{Q0HKX^&>_7;mZX30|2eo^Q>a-qvgtGqLYwzg_$$nct4dC7Q}Ditjx4
zn??uhcUx|=y?bZ;^`>{EX%7ZMV=(;#lJ7N`?H{m2`7BT0I86+&ZbW^;H`~ajc|h?hin?D#
z%DtLbUkD9~Pkmm`_qv+m?*yi*(!KhSqZ?z|KjNED$)VxPOCAcA0o$4HYk8c2C|NBV3cw6gyMh8AlpUCxf_a`MatTW#hqZ>fJ+b>BxnlE
z_+F~kpV#ue%pm;q+T#^uKnn4|XE;Q)eH5dcN9fQe2gY%k#t$YwAt}g)k$lm~r
zQT{<-OkkM;2>a<90Q7bAt>F4FJrxL|(^#}h!})(`01W^Q^%WW{{xl?Bfu!34ROWOT
zm~|v(03bLZiw9(lv;>M^)JEW1-wXT5-*O301X>*hpshY{0^{+|24FnRl>?yf`zx)`
zV5NaQ*3gT=s7faRAeS45z_=%d45WB1ivh8=RGe)PDZ_ROP{xX-F4nbq2sqV(Ecb*3x%^aXUQ+jO*xb
zVBAQjgK-TV4Wv5SNCwzG##n-->}3eR*RN#>*H>t$e_y3xx=LdkK%hC}q7N
z8#qr%%Pd7`u%v1BWW`jS^}oR)DluLFNSX}A1FEPL%C>+H=`mj5MdcWG@c3}!Copbq
z3<3-8Y77F`l8tO|eYBAct}i#rf$KdnN+tN_pE0B0+Ac;3ylWw&2R7v^x`ItPyJ~hCWw)eRt?C_CCwVJ5RMfeWdsl#$@qa;dWyqeM1Fz%P3OVp2C{xDqf9P%>>X19
z#yv3=0H#$;1pp`;QvtRJh{1sSA7ktRl|3<-RT95fNnln~t9bz`9rw7C
z1Q7(%Zv&!Lq>R?U#tG4kCGAX5>%2B1fG4~*AOM71MZ{{ALV)DbRzy%%s#G0FrxwLJ
zkdrMN?}F{@WxT-nGe!@9$t>hF@gtlbyOP}cYehh!
zy5n3>8$fca^U2LKsOReD9$tq!cb&tZ&@#;mSnC#c=V?~Q(I*GZw|`shDC)8vEeFqQ
zoU`QrPe*x%ySodR>rOIJt>I|uK{fD`!3NnUo{JX#yQ?$$U*nxS*=ijjo83(GDXR@d
zD4RAd25Ru{pP}W*(d-za#`N{Utb;-_wHygQ6HAAh8tl7>MF4lLj<)-iQOu&A9!vg
z;|TIqPqDZ|MDpiv`v10_;M4#2x0AO2dplvQ&{$0xqt&ixwAvNPz=63Na2cSrXbb}D
zp5HN;x!M&KR=VO*(0H;>e3k;&HlMXXhm7;z2O5kFy$8sNY-9qV%w@^IG8kr1kAE-go9Z&vUrfzMjC=cD+SEo89d%fN3BMq35}*(?}%L#5Fi7%Vjg52{bb##x}3k;Wjr2}=Mg8puKbwATi8Fy<<%gA$UvL4Ea%qP}`Y
z5&aJhKtR^KwF`0
zwo;BQj;6S0P~^@3c&DD6g>u%&W3;!&ea5#2bd&D-vij&HG;zF
zj%-{hnbKP8%|IooJ01cm3HA}|+!8(;NCnPlnN}ey$CBemfs*Gn%~ssB(<~N@8fKtg
zoZn$aSP6z15F|mX1_+XUB4p=^r;dW`Q$G~~s)<<)>pIvJPS*mPq8hCMzS)?xk5CXu
z@Re@36qB}^;46?1$dXojqAMt+MjAD7E3pVM1i&A&wu3eH(pUPUD!mM>B-?1UN@A*S
zg~SwCNcyXM4LBqK5}KQ1ch&&;&ui3xUV45<4d9-j
zS#xiNL?#FnYG)0o@(H^$LAVGSH9$Z06Cp;xHw2bC0H95;1zTep`F)TwXzUZj0G`mIHucO81#Y?$gur_@~mkAi7C}jZ)$+2*4(5K!i(}J|*C}WB
z77VuR)P&@fg}a5A6xR#hQ#tzSN$&iPPptG4^XWeQlLqpOUJuVzz514aQX!Zz<$w&-Mf-4W&01j;P~|^z@#g6aRgv9}k4i!nfIzeGs}{o3D6oZZp2|
z-)CfPK|b}>tx9KE*8pm=+{aeKW0mT=9RDQyoD1|!9&|}d^|{^VQujI^Iy06aha1qu
zOcy%bwnRJ)|F1)NmWL_5>Y3>5pE`c}d7x$If0a=eguL*xv(62%v+03DzJYE!Mi0iK
z&lxZ0zvVk>}N3zhMqt9e4;VRTi{ZSTtdT^QcscJOlv<82v%8Gd<_K5>JA4U~|v)a>#$8+*HbmBQvb
zlj_&n7BwrQ&KBR>Q*qR~z3ka!(yUJ9yXeu`=Yf8I##~QUcJAW5+0z?0mVF`65$X=~
zdmiZJmp|$JXi&U?G-S8wg3}A_E95Y{?~0y-UZM4wH_d<9G%V!Lp8qyd_4<1iim1Pa
zD}1LwlUjG}T&m%Sh_GWMWK_a9{7g;JL%*7$Xm8z+3ExaLwE0`@!g9_Q_%*uEXB8^r
zQQ_Ag`!?s0;|_9i5?D5ItY7kKZZ_L(jB^VW)%i*q*tU58arrHFQjh!-Xxg{iW0(n-
zZhA3)GcctuMd__<4maEkkEuhkIfj@@=gqHoet7%!+r3$F#*U%G5ViYv;2|S(7);sx
zA%zYG{A6rUE^C~Kc*YT9{`&X(q)loUdys++cD{80k=ieB1!47pPHhTsB(kV^bRr<)t;eCoE6lP!1xM}YJwb}Hd
zgX20CjiZ>#EeTYIB3AA*9rry7%Aq?Lc|A3IRiKHwu;xkjPhSGX0J9(Ag+4|MLVcKK
zN&jIqj6R?=RQ_Qco-<~}bbAr-^7a)~3FNp?&+s}g<+RI<$U$gPGGAHIC
zLZ3ZTMs68N=bL<~YFWJMKlUPDWbT{=~oIoHeI$W{ksnL>-Cll@9wx0MZPWTi)7w9WR`~QDI%OJ>TfB%
zB$pS?Gkm+4Pjl)$SG4v>(arbW38m|FilW~*T=uSH{#W$iUC*QT{Mj3?$}IgO2$%jG
zD!4S_@}uhdIo(;SG^Z)V_sZW1AA7{?eR}PR7VmXMOy~ca+!exbu%8Obxa8Bde82Q2
zdWpO4;uX=mOP2NpFS~?A{|%*PyjM^!Qn4($;)FR?u-W@3TYko8%C6@{kh9N_*{ibM
z)?2hOcXIR^PT<;pE5fr*-nO1crF79g2)PB8FC5N~pA=%^EycVDA@V}_Q0Bu`)
zr@hy&tTTwd!yv!8yXLEsN{-nL_MNG%A^D+~nUYHrd~$o+@7)a)^bzMkp_`9Qz~$e0
z77tkOZ*`7`zcF1p7`rrNqxR>4MU@u$X%%K%rwArRYCoj?*EZvld)M-_(!HKjkBpy=
z?cG;dqjF`o)*|eB-T0+${O&-8WX00AEQCkO0A|zJx~GHs6{(|z&G74$I`*?}61?
z%_j$1wJvVOjwJ5s{k>Z$A#hXti@}C>gAS9=ZIZ8UvDa-(-glrIwz0BB5|6RMG&@f=
zeJ@VW3iyVYjW;6sdkuy;1UHNK51WkA-FrGi=q0%Z?_3%>kIuY&0TpMksp3toC?y9k
zGaJORdg*p(a_u(a-D8;McSzO3>WR5cI2?39^XW^
z`r<98N|SPvT}ws|xPQ_40(r%O-P}`z%WdKXmuuX9_Wj!Pk=^|5?r-*!8^I(GrPrNv
zE#hTR>-dFIsP)J7p*Qn)DyH$0zuAZXxNUu>_Sb#qYq|0JTaW0gTtDyXmv6E!BN%5{
zLb*y4?~S~;?@u&V_1^lAJIP0nl~PIdiZkjTlH*rDdFY!*`SMi4_VCM
zizg21&gKfKq20|^r&TZRHEG&@)X#B^ELufoH(H5pi?AC`pkv5zR!K7<97Pz
zqgNp#X43(A^(A{cK@WDH+qFG>0?FERyD)U%
z+b=eIa7)Oo3l;6R1!oos_q~#@&9T>^wq`N~_=Uc25rco}X;ztkiJsvTf~obKKBLs1
zkkue!Ktver;*)EuS`d11qn&v&Z8GXuh4aLFL&bNx4>u3p5=OT3x+Xp;>iK1pe%-<^
z`gI>}NbJqk4V%UI`E&HjW;Kkye|UT|f;&A8b#Ay;@;H3b^+-R`5;I+D{-AX@w|@*f
z**o~b!{c${5og26Xv^B(QwXg9{M};=q}SSr#{VuF4tW
zrFTpVetn@BtyjH4)7kQ1`?H0mcl-`@1+(!)g_@(_8)@q&-^}L@Ye1MAf}v@+@jj1>
z3;Gw3Sd#W)P-Sy9)-@ph>QK%OTu13U5rM7FMxtq5(%_F$<;&AwCw~96m04PUA4L!H
zYdbNIePFz~jS>1F+SISY(9co#_;CJqatBTW)kC%~5oOthq+COwh2&*!>CjCfrN?Qo
z>~RldKPVt6TixM&lKLrs?|GdyXRhL8z)$Zj4eimn*&>2WTb_*i=U~YLI?>MB#?hxX
zH`KcH$mn`lNREG(IYPV`XQ&lD9!=ztWOmPt$SglsoIYcq@h?wvnTl_DEUEx$kE`z1YrJYwwJ?X+MP0*`%DC
zoVOHhe?EPv`h(Z`ZT`)a?rO7BT{novoA5;@hBj*%DUWVUiW6?9Xy!Y_KN*9%&sUoE
zUs#s%&C_=}YucHrj=p{QPxbGc-ojHK?wC6`FHe0gd5XEIwcy#W#e-SoGmZkrBr)V`krW0Voc)E*Gk
z@%xY;#~9TY3y2imNaCDnDe=KZsAE$bg&p0J#YH1GGfp`jzNmWYqG~_mj_UEvE&b=V
zSU#52Opr4z3{W+L1#CVvdO0=kPWu^E11Y8Xjj=(doS}xfQiD;a4?_rhSU;Xr
zv^+aW)jYdwEju1ZBN#xM%F-2qoA;rnbmr|hTNC{Chp%|=(sU~@jDBvgp&{phiL&t2
z-1MO>$6*c92|`Or-;0uq9WuE76Aa?sXwshOW6qK(2cu!%WoF@*;<9!{-)exXvWHbo
zG44Ure;VEPxBYT6J9DVSvh~U0oHy+e5KoD8#
zzsS?c+r~rSq?*{Y-PD!)g8OvQxIIn*U|LN($gTy&yQFaa5N(^LjYxBTi|zyZgG>
zL!-$Gqir`#AjF%*<9MCtXKl`c)qS?cl-&Mw{@CS6}W=>31KRJz_T5
zKby+1Lr)Ic4bSQt=a*ME!SY)gZn8si*}IMOJwH@YfXTjlfDe`~tq3I8Hue&Kpt
zth|wJU9u5rrOdYl;)-
zZdeQrRV$JHnKS%NUbU$~5r1e+qdYGh5gjkyZbM@AwNQ1-ytB7LQ^QCi>)X7wP>o|cGNm|iE`=5`apP}w-7pfB!a?zs+I(it=!`ab|^K$gFf`LpXhvuMMkX66TMH#5I~hc)xp
zGd#25p#z-x83#D?UmZ}He{-L=%&dO+TPAKYWuX^NrnKC0WgXd=$)&T5FiEzlW!f
zr#y+@Dv4k2u9?KSD00Mn{<&7qqK|ef?_Kw)TwE30cAuFGIzTy+%qG--8lMur2?q9GSq9pZk}>zTFsd!
zXNT^btvR>cn-Z<8b~A*zK(cHiXZWm$Qug8?XnSUt@K4;1D<~Z2Z!_
zB8{_{xXMcpoWF+i{@X#F_rt4o4;+xDyr0{xa`BDegS#!c_&Cv%i){zZTwF~tbMe+z
zYc9Un=kfGTI_JVaopW()pUTDZUEXr>0(`Dmy2{1fb(o7|tF5`%aS(Fxa237(ypwbB
zy0=_RUCp`Ze?aFVZI#YN#7^b?=q{CuaKWj&EV*d0#*>Q|_e(CeyU%QvT-^?3+&@u?Ac4Xxa*#q=GotQI)7#OT>gDj{atrXlKBd0?i&Hdr{kLposMKxH%F@9{{&V7C*hq-OP
z4znbA+q8q*xz|dZIdn%lv)iMxeUIu$OV@Ho%2_DgA;n{sqEdHi-g@O7o$?W
z4U+=!IrnbmEh9OERjg^mL4+>1HyLHk!#OmTXN%on4+LdwMq~zn<
z4s-E79p*pF4QKPVamVPt!du7KndVVhvP-3I!Ww(W_%XqnzE0_ozKP3q$9R34@;+yW
z%0+F#f9|m4qSOjcE@JnYxmdcv%tii9)?7RbQ5M?8xkyjrTue?=x%gqbEf>klITtZ|
zbuO;O8@ZUWm2*+dYc4kI^r+DD5vq6OYI`oeT5j9_?8E&}yv~I>@4Dumv|Z&QK=6p|
zmRuA`@Z@6RUNaZ5>&;x;-C)f{8Hn;9-fx%Mx|4G;W~a)
zF2-)*T+B}JmW$=79+e3@RW1guvghK{cxx_N?1fwmTBdVRZHw~$`!O%%a?x(9w+Z9G4$tS-q$+naSK_+*
z%QCC8@ArVSIZKW7y`Q4q+o5*3ek&YyxyP2+
z_CLFD|Fc-{a$}QKmWystS&tU{fLCMZ6PkWYIQ;f$`g~;JVtW5InZxfwPsPG)0W{wQ
z=Og_W>+_K+A~-Td1yEdYU&8YKx2>j!Y54v(=^o$2y6?~fmaN4bS@KTv{qM!*``?>X
z-4#V^uHYxxtnCS)fkV`T!$CB>`M8oY9;xaNH|t-vh~9tMq)ul(U!lGt(SETt<`9f|
z;3D1WpOpwkZ&ndKEx7+?m;lOhdr^-x`dj^=9p(f;>yb
zms+!xdt~HRH35Vrn-f4;&jgUM#q+t{U(G+4~_*=c3F;<$Z9n%0(f;-zVq&-)D7KEHuWUuVdZig<^
zeX*w0Jzlh)5&X-hy#M1VW3gfMH#yx87jnJFx0~;Olk(y9tUP+9dda7-_ve&V-5b@X
zXPBS9vVfj{s87F3&!na;w7ML%16*!eK>2d_v6bnHWu_WsvU|vDIE~jUYx6cL{F2~_
zf-4FBRq#`Sf71I8Nks2D?#uJ7HZ!+@
z&0-WpQj&VBH=X1+%;&sK->UO=d7eIZix#m5>y`Kr!I##XLtwB??AvY7@Uccs?st>8
z1)f-IXnmlx>TcDYEpnb70?$d^V@0d9;6YleuKQ`5RzB2fKw77bn>AzZGrC)Kk~=ex
z*Z_Ks_H9?+{YGNFmDtCo*qpgK
zk1eJ0gmp@+kzhVwRe5Y^6RVD5?MbZQTFzrglA%>ZX=SHqt)RI&k0%pV-OWX7uiysj
zR4)sV_g|K(o@+j5SUs1koS0%h|C>Y4{}Rqsryl)()v~K)kITks7>n!)S741X1C>?>Q)}@ot<^^APFkb1nhK6vV`{x=(|QrLI+NCuN~=6+6<1o<
zH)^e5Q|rP?Rrh+IBGQ~t?LP_Rf4o`&s6)L^;)Z%sg;%>^PgxX39h=zawnL*TY*ZQM0KWJW)9!-<}8$B=^Xd%8P;r_T@TqRJ42tc
zY+Nqmf2E9n!KGJv#=otm$yn3g=`{YAbGWCs()gdD#{W8V{7*N>zto+yLdL(~xD}r9
zZ_|1awK|j5lS*qjX%$ym*OScgZ)#nLm+>!JhXptGXdRet8~^KY{7<9tAFtkUA+5R7
z)%Z8H4o)-2ztnvuLB_w}j0Dg4w`qNhTBAs-zS8PTTCXatXH2bbrq)wZ_fMkrXqk+E
zkJdlaY~w!($3JQ9T*g|RNNdkDHU8I{|w`mPTt%RvG{*_i;
z((0tNnwVNqrd9)~d&Y7Z|ANPQw4SwT6-TY+q;-8MYdu3+x2LM{pJZCy}x_Pv^?{KA$+a8eZSTHGVj0n%xlB~H4*sB5M8js7^22)
z$OZSz?86-Onq)3x#cU^T)!1;?yVKdTzPJ~
zE$SoYtTn;>RR0yq$QLyAsn`A68TTmex29Ozw{9YQHLB}m<1VI++%y+iq8jXnWeVS4
ztZ*kee;t{gT_}%5^3wVL(@MTdFp|GLC*A4bWUE}t8t6`UCm8|7F6Qo(L(9p$$(*@g
zSL^O{Xp-(u{YCxkQWZ@P!3UO_THS0~ZBc79X*EzFH(0D%eJeR3D-}u#u~R8w&$n*(kD{G
zMfu7S6-jTwCzgooSKK{qHHELn??3-V?|&N~x>3x(V5l*avp?KH-+xdsR$8UKe@lvo
z`TS25`io+I!9A9kd3M)UX!|KFIHMF2>!#PRl}xL8ns%H)}4i{
zb$2_x|2L7fGFND=nx@vig{tm`qP0oz^B%4B6K(Ck62JdHfxiE*wEiKju}bSZQ)|Nn
z^ZWl&_pU`s>qEio7MWV1Hm$~}^&@FjQd)VWRaI&EnOYy1T6Y(yx;u*2MZxcTw9ZYi
zYGtedtwN-guzM!_goT=5vrqva-rW!T5n=7qS
zq}5Vs)iSmEnp%~m?%zbKnBbq|^8WARzdL_&orjm}v0j~2)9175KvF&Ur)~aAfN_5J
z4}JVMeZEpGC=rYie0$-4M9@n`@R1q8>_6!ITSYLHB4}faAQ&U)ZAMUD6o)QU5j-yV
z$AxCT3)^bCvmEmM0_h%}$GQtiH}emUEGb^+yT~6p-^=Eyy32^xOu>Z~=6U_=Bg=Rk
z!CxO4r0V|Mtb0kcp&q8xgHjFk&;GE++7e?O9Iahf7v(VvR3xPY4_u&D&w|q|%FntB
zDCKj@wDQZOd~&Xu-H(v;>1eC;zIc59Fe`6us>mMMrQ`$8%8q
zmzwWCMd`PFBc#@&^OQ=s;M94hN(Y-tD^!VOm0F}yU#XNbRl1leMWxoBqViz2!ks-T
zccZK-=a+&?Iab+9r(gS`SY_D~t@3D`R+%HUJ|QZ93Vt|Ob--bhZ5?o^s_P50u7q*4
z|DUZIs~ZL6sO*7WK)GI*Ab!LE+up!P1-
zK~*#jZi!X3z91?q1eez;FS+B#Syg5%0hI$|jrs@0s-qNgH$-W5HnoCAVu{xQ@XDO+-MS8bj
zIvGBv#=0rMsn>Ch$`el(0Dt(Mj=1)@V$=OP!tKhw}O_eWfDj%TA
zudEVCDz7P(@}^36Q^j9u?IS8K!QDJ6|BkV$@~;{S~XED+~TD
z*8BQXXP#~SdGtGna&n;#NV;8AWN(|1{V`gPj#s7bS+kT@O~K=5=>?~Z$}s&^eO_?3
zyXXGO3r^{|yx?4%tIpSE#_(I9bOdHBICs^5y6;Eff}{V~N+I`kzLmZrFE4Bv%^I^8
z=m2kwqVK=YP_3A}Q@y+}eYDl|=!IZ9d6d3A>N-REe~hx!R`A9cGkqW0YHETt{Y2VT
zW^no@r5PEhuA(bwM)%PuoxTUtW&RhfOM=^It-9`uqpVto7Jyb!(poiLzvViLo551OjE9pO|C1-O&v-5UsJ9J$-SfGo;T%wGUX~r^}|H&
z3BkQQaz$)%cjtp#ZIU}OP0O_=xuYXlE^(fgD`d(onWm~QC34dR7xc(Y8EKUpfpQy0
z7;@cAxfe<9YbDphl$$g{%Qcqj=gm-ZFA1JF!<2i$Cie`=y+?BQr)s&UNbbQ1mdl)L
z-v60$`=+Yu8;aZ}!Owf-){n5t#m&R}zu)No&y>4H1ADZR>tV_z{ifx@rTWzAN-k7z
z;&fB)U7Or%C>KF;{;#wi&NBP|Fm3JnU()|Y>$Kn&TC1M>
zvEJ{qxS&z$@1cg?QL6tJsXu$N
z()&U1vB`Sf?x=!|iZR#iD}#95{(L&G+jVBBb^D?m+^JVUd$)8qu*F#|7IMDN(`G>&|3C8HJU)t|c^ux&0Re$oU$%xpp=Q`1t+2
zf4tA8{H>h3|e{&YDl>-RxFXk#13VIqiAo@@PuKihd4!n@w7axifmp56>k8T`#Nq
zUvqH1vy4bv@Rb@8T51kHABY{l%R24=9hbc-*PCmkh4mHv`VU$zdxO@0OBwEr-$4KI
z_qNX&Xz}q}jTZY~Qfbk9kxGl#{YF|8t<-4o?^Q&LtW`vdlvRutb(iaDF)2?^ive?p
z7VTbAXpuV`XwiBp(V~8(wCK4~qeZJ#M2qTkjI_A;l8zR4&Q@qqx`g4LyqwXZfWId!
zSC#P>vyCmS)37871nu=sM8qKrrJws$RKef8(>pDznVOgcGg
zWhi3ONe};BO*-jdzqiFrA$xd6Lm)8ulW|!Cy@6EyK8xwU7ZiEE36Us%mPyrgl&T%G
zj5u$2Q76#9&s3yp$+HZDKbA6#=JEI6mNE##b6GA~VZ>C(ZKCF~oD5}w+99@0wTd;Q
zMB&~lCo`DsFDP@)V>2=T2Zwpl*8p>aFqorX)JN4UoBy1J|cyoc|U6_
z>3IdsIoR7>`3%i>zk%*rg#p*(1s$6A&rs03yqIW#?m;@%$%?)$_5+M^UCXg>^Fgj6NF|
zv!QbG_ce?48l+?A(Mt$2AKv)l;gBuk`g6u&7Hsao3CouzV=Yj7;~XPI>6j>aV@3dV
z8!b_TCqyaNu@MKn-Za)g$Hl5lluk1u_T>s4V&|s?Sq2KO@VVOF6%7m4BK<1J!Q{4U}Uk3!gaZH_~G0vl=a&
zOSthSKh1UT_-7b38qQIuQOltb!$ZWV`K*~3)uyuVTQPuZ{68gZ{$H-2
z|0m=8zmSE69K-p43Y-6zsq_D2b^hmSZ`UF||MU0H7a8%Iu~@?k_eT(~HC(0UFJgGn
zd{_ZqEf<^dN|?;;-$g23zfLydwQrdYuQbiugjh6-zt+z$6~+S7EfO}0;Gl|-dd9Oy~;#>HWu(yxC#!}_twU2%_O
z5pq9Sn1c-FOBdST!QV5~&qe99`~?ZzsWL9rvlyXvFIH!fT3IC3GoK-;?!B0)-)ak$
zdB?-PcuD)>@C5pz7yF_K|KeN8IPbJY{gHCaTbuAl(zGE^ehm}{)lm+4jiY>OiEfl{
zfkOG~`83LZ{R*J$@e(K>Jgbbd#jA|+Tk{$D>nvoW{2YG|N{mC_B8qo|K^+Ps#)f)m
zOctKynQCE!ymXO)F-sBo#gjt%Sh7wI9Fx-|zvTa6yae1WzlkQ?w@Um=7+MYWX!V+e
z@h?vkS`7o}|C~f<9nM$%pOe)1m#0}r?IniRX8wK$yyqkLwwh^}-2p&u%x>cv{ptcn
zfH$Aw1n_wXcd&9n-6kRdk~smE_zYa0j|6yTB7pKTN9+DcMzp#-t3%5RHRa|Etutkq
z|Bs;+t)kU;qJmaAv$y0C3m955f6rJDim=!x`ld~1;+j5(RlRM2%0!<|G=jAzUkBE)
zCjl(`4WzhdCHK<}DSA-+fOrAfjim$)Fblf^xgIkZUs_PWhE%LY)wZljs;y&(&?b1W^2v
zWrSkyGrGe<)w7hGCGY02UK?;IUgGbuPcxGEr?K&AH;X~_#B>JL$Ky1pZj+~Eg?Jue
zf4jbZI>h(J1U&zqgZBNUzz@+@i`>@$5wG2m#*5;=-~*QMwK0cbZXScF27iAtkHK_h
zDud}&T2IYzIOP@a6nHemiVySRd8kr;z+blWU$SOW7rxksB-FiLP-S@Kfnt)BOtkAC3ovhtFdRnm%#FhVt+Up^#EFWf2j^H(rTw
zOq$2=T9wQ2yqUjCxrTgr%D5_ACgyZfQa
ze*&}0gXc0Lbgc=aK9qfe6W{&(D|eRe^d|D=y^C~`Jr;lpHFyss^LXGZ
zpu+-pDWSbd@|7oytbWp`TR1ks!kn#k=I0K4fiAf1PY~E^7AlL;%i|P3>PuWd#^$iT
z{QUjmJhi)pVckvj>E!#}aTxzKM{(Q6-T)*R%(_cis3?vFYIm)8^Uuv=-8JLyPb=NE
zly3^_u7)1F`>?wJF5+IZK~HDKG4n)H5Nn1#eUB|~FI&1S;vZ-`@p5!Pu*r$pc9j`hft#7hr>QHQC@Mf5^K?rkBa4JW`4
z0lS*K9zH+~#x6I}V&NrSZ@9k$?L$q{hw2>|W!w$$5m8fa$T4gU82&n+G&f@S{$#~L
zK>AhmLApbtPaAfGiVUD%rg#xvJ#r8_SsqhG?;K0@nvex2o%V$@n&^#5jffZp(>e#diJxd&$`fKR0MN
z9UlGdra)=nHJDm6^LhR3Wl-P3>d)2cpLmMuKLDkHiLAcAb7OS9_+xLxKPly?gJbL9
zCwT?l8lmGtb7UY(5&sMI152oppYTtMpeUUc3r}|u3(KQLYq7W|=&cV8T%5-mhzlKO
zcqre`>AVTPvt+j(Am((QJU0qQ@mFsxXtpQ7CZ~_Vcm&|c;@D@Qo^@DJSpNaQDU{ht
zXpY$vESCHMqI9tzA?<5wnTz)ZM5jZ)38EDANG0;uXoUGX;!-GT5^2)7A;SU>j0%N#
zdvdSQI2_Tuq%)LcYRzZn_k(uV%)w#ph(c6L-icCL5{;!piZb&9cTF*T5m-0I_+B>B
z@D8*YjLk_w(U%;U^_ZbhXpZG6RDO+W`f&i?<%7E>ao(2KZih%!I$8
zyw^n2_|FWZ&*(=C^f@%s(Eex>{^8#b8f*A|m8pD;p?q3+dBPKh^1B@N_hFo
zhYaP74e(EY!i4_}1OGf~8h=CiSQGzknxT&0_YX%Hzg1d!t5K20-_ZVDCjK34DDPc7VZyt$+zpgxzejBWnpN^>i
zrt9(hbG!+EL-}1M`M3fTiCX-QQT^p3{!s3cw(G=i!vu9cK>VN5%qGnQbb+GR@HcgO7#vf5=$=_}H-WP17{|
zBaP20t^B=6^qDrzpr0n2_+R_J_xMQoYvrvXwcps#{%0onxiZzjA7f4OZz#WMh)MpZ
zYVtEW68?to$3AG9{|x0m^ZaipA2P-y|AzAELrwgz!S`NP1o*B@shqxtwDR%8BFVp2
z{!OIsS83(nN0fi9JUA|r{OjP$j5PjJ!u0>aVMhJ0egCpI68@9b@-HLt?;)*x{G*ZZ
z*UIZW7zuyf_YXzFf3i+K7mYW`=U{z#r-w}CDZ2J2MB;y~d{!j;;!3tQK9T5iDBHlF
z!%h6Jeg9=7{#s@Dex1p_oMtFD+i!ynN59`knDw7m{eJl1Nceln
z{?c>Ui6J`sYm+yO|JF=0+CN@yAF|S~Muh3lX@sl)32k(qb6TD;e<(&qZ
z%H5L;{Akk0EwuiJKOUj~8ajMNL?{nT)b!Z{5z2RH<-H@dzg8=cXzxiAD?saxNDYK4CS12Yx)J|#bB_e>qUtFrX=nOVP2)0gk+YnK14
z%IA-iEUur_Ms)rPKo~&&ZkP&>w15eB1{(y||@mOmppFS$QTr!rMz8_;KA3HF--2Fs_^896_@o22!^KvA5Tp6dW
zZzI|7hsLq-PkMrnzgE9hMx^nFa;Na+cf8KRhr;H2X`Ep_V$$DZcz<=dz8$Rf7aST%
z{tfNj@IZKbjg9a;o*4$um9aX0s+D2lr$hSkmS+2N(^!pP`$w|>^xw}g&*#&|GJHQ6
z%kUkewKu+RB>o?3z^6l>Fnn5!4Y$Yo8SJqdrt)+{dEjwA-lG{jhaT7DVL$|UHa#Bh
zKQsClSA~~P%`p1EwD!M`WG@VUTt~kqLrwHc(Y}8<(tO`o|NV=@Oy6G_qv?Z4_RgU(
z`tQe>$6x!tcfUyEKgKX#&yNnbf5$L-&mF-a(nc@D0p@#N1Wt!Jl1BPnrt5unX^{Z(@SeC@863gPh+Ym
zcR#AGS0b)IT0F}2cV905{Jkz;@6_P$u7~*gXB4l`%LDwqa1hu3qYU_%{1clDc^|6?E4Pbc~31Y0%H_kd0G^@uws
z)Q#B!FLwB`yZRO{=I!;?#Gh>Jr+-DD{Xd%18bw$~VGO)q7wF3_!m2Q=nJadq?9t
z01$^T&Y4};0^LUuXrjL>!qPPk;qcx_u)%M3{LO~H&6#bpcD5C3pC6%v`FoL)4I3W|6C8a2D%2grK_&~
z18_$?!Q-0}=keVkZXUq?7weJEdRiQ(oqFN@LNTRW$W6iQjd=x|a0w|&7eyg$yJ+1m
zEGhJ~I3awpL(JPD$l0xT&2{($qt~QrYw|;xKl-l0rN%_^=
z&LjQdk&b8PLvb7+Xcu#~*oC@1WzmO-_AD1uwq^e;`qN~BNRD>;byKAyl!(@Y7+DwR
z_CI6~+!#W6|ExSj|C?P_B>Fp_9%Z%22i-b-{QtZ0=+Ij?9!)d;$?=$walPX)d?bvA
zC~D)uMu88(AkjBG0i>~M=4cSRj>F@HoIk916g%h!T1)%WrLCDlLa9sIohh8zLGyjO
zuUcAeNXY#K>LEJY@Sif#mzDr{Z4+AHc|nxTPSG$HDcgkHd*A`O7>Qcp9{BGGe3)s0
zcDH7x3JJyMgrO(S2@h>MmnsY^ftNGqT*A--c-RL2LHDf-h57BF4fvoCz8s6nZaQca
zTeclOv8CCd;8$#EH+?KTbmCl^Fl;-vau)yjk##WiS7_=-VgAu5Y6^ZG1dX@aF3i6g
zg#g}P0B`$1A*DK;g6|u|PH0^!Jbe^s=EgHh?v6lrA@?gPncxL9bBcjuCd=%Mg4Dg-
zEnziZCx^!ejPa?$_a0yGvjA>pxe^>i?7cV^S9j|$e6Mn>To*V~H$Il6ak0YDlIM2j!oSq;|$C(6g%3+Sf;*}u}q*|koZ2*DOWsSWBR(+gs?JZ0L_Wyd{p>HL-ATrFXR>y)-w70vMGnLt{7ay@
z)(*TE1h)QVd=LGFsHe7<11GN{IE4Ibq5_$McLqQg`1LQt{O%g)mcBzZev#`C8|o0z
z*F7FtBIgk+vTiD|Lv{GC7c|y-yD+b_m451juQv$uTCvw&c6jY$6XrGJ#rWO}-s?~j
z<*^B3-YFs1g)b?`gxqqXOF!sozmQYL>=XDZo%dnY^^tB7=KYs(WxCSIj~1x`
zI@|YX0w`-C_bkeJ%I`w%XKYa8-6>mzocAoqMTCz_uz4KR)5IarN{_fqm(V)KF|jw6
zq&l$C9=3^2EV5%!s>3$1jT`iI0?KFZ8`xROPu}J@OGqa~36BB35A2d#klhCRqe$<}
ze0R=PkjJf)?m_~VN%?LlBD){t6e&FK^&lRS0711Wf?9+$QTZ`v8`RrBsRX|dd>>T&
z+HPMRF{M|0_6AWJlCTk{)3VRpzG`AhT733SQF?;^_HU@?PVqS6g}jL{iKI)T9K6KO
z+A)m3C|&Jm82)IhN-t2&799f|<%7W);A@M<05}$3=*9{e;9VMHH&F|?eSiQ=wXur^MOm`{t?yWlq`{6%_!
zJ(TOf#_0ijlQ*ZZ5LK|$(8d>~ZGoCb_fWoXK7OB9;Jsb+&Bw3BlKD0T4QT`aMFFEI
zKdGR|uwQ5RKAzQ1(7xxtBv$ynV$p(eDj)B~4rxPc4LuTAi6!!|9%}gXk{%d`eVn+W
zNL}w4`bWS;X@#A28mEDQ1&og>g+GCPpUA$C*S=4v@Oup6y&*6;P0sZ`*pT=u!{ChUT1s9t7xy9$$vJ0D)2^Y`W928JF??Q
zBvOgLpm8}Hw3*sUYrVe{KD%oG`wm*6){NgDkdx_S_D&U)h6aR#qw-*@kl?()B7+b4&&p_3BmZ>$wrnO2AAZa_tE%>
zzNYi~M3GgJHhBf{9RPp5J+@O!y;L{#(x8vZ1@4?6^m4(%dpU
zUfd}^2zec_c^J5OI+W&dB*-g0Fq8PlIT9#K+G#nVGvkE>2EW8BzvLdH
z)m`V@KExf>tYO^oWsq(FCf;b=S>cT}LEU9f6~0FYjG71zA_Y(z(7z=+=3k@yJs
zrJ4(xu?G$XBk>D1K%@vHFndM#B+WwnCD1#-c)T|rh?#7FuQli1<^>
zRRV?`c91T0a+B?F5|o7(k%#XS&oLer*rGt`AngpBJDYUwbc`52)q!LH|hy;!tGP
zt^|wSgwu+Zq9
z914Q%>n3~rqpg&GpqpY@2)V5W5Hud?H&Oat-qjHaGaZji{nrBqVtd^^BC9)-`2=io
zd6am@C8CUPPUq3-NcU#GZsq=rkkqd{uVD6nkmiD19Xcs{i|JENpKj%4
zKd>BMfg1*JS^hR~+Pptb@{5G&PpqIwuG6=IBzJFiZ2-wRg!yotM4Nc@cEuNiiB9gk
zL~KgVabhnhPu?L!Ecw}cRTbE-=zitM>Rs-TA-B$vUO*K$teBK@!1=^Nxe5!Zz4Q5k
z7CU=Y`^?0@Ih~yzq*G&GM!COdND|BQ_Rd~SnpOS*)PC8rPdSM)%FBMC{6+f$Gy1df
zgU10z5)pk+Kez@m{if4yPIdOFt3b&Y6i!RdPgbImTe=`hJBe25(l(V`-dOy}O4O9a
zE}ANO2W27el(z{CUb+vn6g_@75f)9<&39wPGyv*|*UdTdlDxJPGU_;o9abp7kt3{F
z)r-SH8ni!W$DdFYbp{H!{WFp9kMzWCjOgrP8tsMLwm=%9e@w?P`V(emeL=@h-6d&k
zFvtxIz`e&$oRz%^xJI@D=wox0si5lIhv^#=aA9bS{f#~R-Vh_PfQo#_l
zhfc;G@+*gTY8CK4c#ZI8$8n(YdE?Sfx$m9idZLqte8J50lL6kxBEWlX?LX4xTH2rV
zx7(k1P4LhD+wI@j@><&W{q6SqcDr|Aow_{I);;*^<`
z*4O$UoVecRkUZO;>>G8xVK8Jf6flI`Eo8M`Agi@4nUK
zQ;3r`0Jdie;|j_5FL&a$W;{=7NsEu{hXLsf!~BFl;5aCXT^Rs?!i`WM0<_o&cn#|X
zT5l}qW+!(R=o6qH*JGWxr=p&}S7#T_UO;XZ7zKG$8$KR^Cz(;YGYu@aXA%{^hR&`l
zLHJ6#ky+@G?&?~cdKZ|5r-JH=q+dUEMUrU@KR|yEx9{x8Nn>=jop&hmD6&W>|18lL
z(<n;_)hJFS+XWmYCZ!`YRNp)!?tlyW~v}eGMM&
zig7UQqTo-`4*A2jjEMk(dYCCA|18a=Knt!H)J_-m)5U*W6MtnmP0aPny&7}9ZD6jq
zTdT~q+X>8dcQ|u#KB|zvGF&G9`ke9C8Q_Mp^%e2gLgcSX#zUm>!0+_q!QMG(J~EAm
zfj&a+OpCIA(QzZ{2;;94`I*RAt%q;3iaP>pDTBvYw
z!Q%ODig13mD|(gSDO^Xfdr>#%5-T!#n?T#ZlHnke)DCGWv+D^rJZM8*M=%c6BTZFv17QBz9HD4PS%hn{?K&0K6Ge>Co0H5r9sv1<^>7A7>
zoux1iV$0KVx4X1eMK}%mbO9R7RA>Nt$3rWX(pI#djaQi$G>SZg0f4#W*YvOg_{Hah
z;W#5iK|x@g;vdj=z5O;QnZxE6Y){Yk%6<)}Yv3uW=Cok;qB$&K5iT7T_f;RqX^&58
zkD1!zkiPo(u500sf48cbKkm8jAL5TLeO3PG*;nHaEk0!K@8~YZ7%~U^
z{uPDx2&SWwF1n=eT+-JbDU>P|$*x;M@b?lX*7*b|?#j6uEi8VdAO1Mbk(%=-y!{tC
zmWE>D$Nr9HNtZU)rFpaI9i5^qY%7{2L-lvF(LMebx}Sg`D}CMR7${?e#l%t8_svh
z`2A?b+e&$f`q@RvCcn}yWJr}Q>;c4Emyx$VryfxF3`xp;v0rvD
zq(2@lXQLBg?$(1OWXE-WSdg2t6FNS8Q`MWllzl_;*ZFJ*@=5-Ls$BD_2#bFspX4Q3
zN&s;^7G!b=^$S@2dExb+=Jf|-{b^eL{0_{YsINPN*S#C-PSD45U+J7bWM}H$=NsQC(Nunklk8Bj#DctE>}+8P+(!wKwxK23
zu8;cIRePMLJ^s}jpm{Pw$*UANq&H6g9zox~9!|uktW4j>Gkq`Z1O=al`PZ8$>Ru(`=H{G)^D=lK
zf-iRZy=Web(r-+`dx&&MPTcihLyC$iHmVN_CN1w2{d^*|5If0l$1RMK{b9wt&|dMlW&$y?+nNlW1MNtgmYcL`W}SNdB0FSVJA?fkW{2q4e-Z7G9VR(X>!ZqfMwq@T
z34>SEyd{fh>g;aEDceR-veG_~wc7RXqdA^!B498p%+biycKN}Up-|GLydzNR6qXmu
zJ#SNW+G+WvJ1e%lHnjzeTZdyTn$}TbBz$+8@IBC5#rLP)Mtmz7pS-<_l_)IVB7Cw}
zzA3(9h^r&ScULq*o?rEID;j~b^vmrhx!7Gq3;M@K
zIzd`ii6CVq0r>Z6;KN)(c7)!>WV4&zO~GJ7J@=CipU+vY)4(8Y6gp^+&BGDW@L8!a
z?^l+v32u{xtsoS<0`WS$-btt!ua${W+%6S6Hr>}c$^s|~c~9fuOW8J<`*tUyhX~9B
zhAy|B6UDq!-iD%YW&+!`4K!z2Qh#h7;#rvWwl`*5HPC6Lflf%D-xT@)7#;vaDu!8`
zUDgxYNB&k$p8Rw^^!o#Hy87!v{rOPMU$Au!k!xo-xjqde*W-8S$kn+aTWrLc7aNba
zMrti%)OxcOr`AfM)?^!!N#tB{%G(I2V=-nMm$x)yV
z@eNHX=8t-J{6qXvtEb8zje7o{@yCk|RsJY!R3(2zX@eAfW5xU-+#ZoX@*70pkDD5*
z{4u(fojky`XZ~=z)yLbl$Gf!0c^RcdB;D*A6G<`KVsb1
zr{5l{UCI96KLy_r4Zag%Rs0(p&cC}f{?#$>EY7^yQ{|uQamL*y$2F*oakDwsoU6^2
z0(SFK;GtUq=WJ!)0T5^Wdi=%+{5rOt%C8&mLVn%G`SnI92xKe#ianOzgm;~&ad4IL
z={${3%Y2HC5>+0wX^#!G$C}z>sE4}VzlQzaO6IqIxBNrnFZWQ#|9lSvf2#3MVelzY
zM~R4*ufdNuQ#?bPo6%v5mhJI+Jmymn=_*THg>`Bi3RMOlz!P{w4D&yBQRAN!DrsR@
zy+sUjzbJQWgfwdDzyQ$4Bhcxtn{l1M)(nru!}D!y%@E@-o{fYTT^v$ATNfPE^a-cm
zCN<7I+fL&|n}u@cW_+YPcz2(YTfieBTtKN&Av~80?A-L7cqEoHPE0wNJr)e^Fve
z%ihuAs}LWmyFI8bZ^mk@kln+yzF_An^`nuj`S0}WxzZcd^&CmGvyx@UqUN|R4QJzI
z#hlc|Q*TX<#LwGJ{CtOoL>rBt4X1UGxhJcz_Xmp3Y=%L|w2^!n4N)J)k66z
zDcQkRJYR_yvz`p)+wd2DS5tnzmEgOk_<7VD`#5+)4>gqRLie!vF&-}zhy9O;%ghfy
zZzoCvXVn+^IcC2jw*(9rRukim7@?P-3ct+y|h7+^o7m`vj_s
z*AWkvQ8j%+5%38^F0K8M!Z%U&@0I;=<`}hU-|M
zOL%%Nry0KSI(g&pZKO`yUHKP|U4xzsy4P!K(5=ux70N#t0i+7on*i6rz)d|XrdI@b
z+V&)P#xQt3{TIPgPX|xEo_ctWUpG7}dK&jb-nW>}bI?qyF#U}@YmpKvnW{znF6)Up
z_3kek6RdUqMK>qm^m|%Z23$s>Ucv0|^$*eKTWY@D{zf|b01)yF
z9SG~8j(kHcHB3K@Hgin;82HhYpYiX)=kySK?R4APACl5G+q;^TAoHI`X_cppIeUG}vR|
z@Eiy<4cqVSQ;GdPkb4}_fSN<`1I&h>9KlF@3;@keXEyxn_0j(r(T3;qlP%(T0OZ`cPb6^NUzAAk9@u)yN=0;@7|gl~R$2
zZj%#?@jjXpj1}@;;UEFF#c~NMWx6+99|K5udB2k@{v=kN5ekO50!Rq6n~m{;
zY#mxGcjRq{#}~5GtHtuVIw5`@*iepsiEQXL(AKE(tK3iFn<;bvLX>`!Unmb&xCowa
z`v>DR_G$YS-y8Q+?irvzCem~>>NL5V1*)@zKf?_27k)17YA-{+YQ8=VjD&%}e4yrh
z4Bnema@3Os#*n1z<6}(uW%TtMvg(-?)z`CGGG)$s8-6Dxg?a$_JTiwxpx
za%kx0pQHlO-|ii}zjwj~n({RV2S5M!Z$xPW!FnQ
zj8NEgP!M0!#o7Axiow64$o;+C-wE~G>Fe|IvR7383p5CYl=J=xBBkUd0m_o{gIo9(og$ZRLr14Al-A;tRp!NmkS;yF8HBqKFv$Go8YeYfWP&^TV=SGyfjZ@09lgdVE
z_xAj^rqP<48^f%*ls}M}w}I7H=1uhYo}fAUv}y}>j7^3Qy3m97uU!$KsImG_1#@2F*@K@vE5*%Q5l0+Z)tjlkRgckBJsYBVb`G_CsEJCtQ
z5JTGy`VT1{IHB6vs?A@vT;x_%5)f~gEzsvGyASmO(En^W>T?N}nMgW@VXm$+TE
z7f+QHa?oGxu4=ZoO#WzqvYx7PYhWfv#XeBup*kAui+YF#1NPt50odBU?lvtx6R|5D
zW|-f3*DJB>d_1?c2Ij_%t&%L3;@H!TFdwfJ`z!hx{ITD_AE8BT3H(vc*ZN}@!blY9
z>@)9JSATN)aKBod!LJraxmnZKa30l3=a)MFWp3P49L0)(<$j^#2
zm=F_Xkq!xUopHF7d$r%WRhRaV0U@39_?!uD
zAL#C4m@sq}0@?$c6R^*Q7f}<{?gQo{bUP@9ox{V9p)<@4Zj%l{H}Dd#eroiW6zS~=
zBJtVOu=Q=u?_w%UUok|I3*j5?KU*2l`v`aGqQ~E92s=)Ri`96R=DYA@w3^}v7JowS
zDfF;_2`~U>U8$s}zg9BVj2HdgU`{HJ!nrw7l*)ytZ=k9DFc7kVy*4s>LhVHQ3%%|6
zP4w6AD;E9~BU=Apa!0A53lfF88OMdX;#o1JH2Zh>T$H{SbAAfuoUpP?(=jDt%06!!
z+>O7n7RzNU)Wxd^KtJHX+Tmk7yNSaSjPlli*F=1E`j8Nq&CKObjg3kpDXo|J-Yu+0vNImd&d@IDk>1k1?#FQ}hRxoa_TwiEGF
z(ILJeCj;imK#yZq0`B5vXo0GBHm@a|BkNWSJG(=4#dZA8IDf80ntfjr7_RVmv`!-n
zwqfWC?*a5Hv5VGiLfwaCF=co52@0jj$Kj8s;aB;f1kbow*!pK6)amL^wMHQ!^F(+{
z3i0*+A813pbS4-IB$2ZDKG2;~2?ye|faWz_)tTZyU0FUi1OF21UeU2`5Ls7dteYl%
zjog$b?bcueJF1sI^$82(VZfBkOtQPUZ-r*WeQs%&{47qk=w!kCdc?%mVz)ny^D$;z
zm$nN}r$kX467q3IO~%KgYewMXuPWwa%8PP_`1qv4$A#{c{lv$$d)BaW>Bn&pVH`rl
z$4=y95%@SneC$L%7RevQvJ}@urzm9%e3^vuv?zTAOTj&-#ljOYnY!^!jm3XQ#iBAp
z1|5p;4P&B(x?M}*H6}XKCEVB~MyUJX@l2t3tJNhmOpT$_0#E}mIGg<`%>Kx^?v!ob
zI{@ZdMVRb3W-DNi38?#T1GQg&55U(GdG|(k%?293*TlYK&O~Bgoga$MUsTM8Wy_Fn
zh!QVYh!RNF^VLyPYZT$_0mA(iN+fL2(IH_Ad{qd~$B8D0K2T$eHB+cx$mmdi!Bb3?h3?-g#jQW$IG`Gi4tj2#s^?a*H=R6cos(rj7lXeDNixB-9}sWw
zH^=M!&9R%VTG^4jeJ<%63eHFaOPEf276{9M5P3ULZ{TBH+TW0AK5ua4Pn@0GJ^mL<
z^|wRLI1^MiIJQz9`2QRtV8__8P)*1ihQs9R0n3WnHo8y*^WUXGqvX#K{ZnLAHp*QM
zHN_UiV&Q4LpWz%%NqIq>G(Tla=TLL;=I>yUD~K(&0J)n9Pani3KLWTLSM_^|&cLiG
zqI3Wl8pfp1E*72yLGIL1O!;ogFEEaik@Z2{l#15#z7m*#b2U`JhC99~tkp_)z8C)11=omuTd&
zL#V9$ZP}<{^TZJ3$LQ
zau!~&^tVi*!3p>e6kWo%ba9E$AOOEZ#f0tnyHIxw=)U@3r#uW3kQ)f<0&LD@FYv(7
zDg^b{FQq#`jst2%?v(99?ss&JgDr{^wW~^0x_4=aol$l@tHl^Ir-*))&*Avt6JAo{
zD#xdCv^TyB%!TUtI&%9n65KZfH2Z*^5{bAerNYuJuDCryZU~0Tol_nsJbe+L_F%J3
zEm26r!~P`5A61u&P{_91h@
z`*FZHd!vqnQ7Tce6gETd!n7cI4jV$u6(^<~5OUZ4MP#7)921M!oEQl^BmrOJ1HQlb
zJsK*VY$|oms?<3i82J~!59CCsG%%1g*|d(;s@;DOna)Fq#AmUQBY|f8yYDQz@1pQ7
zBKQuW>V|irbUmxI(6wCO!h!5Pfunz_^VN_J5%VoI1X;%GrZIxdBW(#~OeKAy=BrbK
zo#x~BNjm~9&IL993FXZRG-N-e@;LB7HT9jZ!D)-BquS>kX?RQfrl^V*2xW308pnW=SZ^K@&yA#lbsymJG
zJNkacc8K5)>1fIg*jP)wWz(|$@%R)Q+fxbmcg*OZINvR8hau}8P{sq^*^u26p#2+sry;wQy(_yb08Oe&&YFC>PqnZd
z*}DKLCE}2DwTmT%09hjV5KxvgH-Cu188V_nW$_b4gG@twM_h#X4!87my0n#Sr|Qyo
zIuPhW5fEn;8k-7@v8hyxN;mq6L)WSU^BJkI+^KC$Y565H5~XX%quRTJ7JpeqdxNf}
zy~jIf@#7uUMk~nQxleBJTQIc1kh;_4f5;{Nt1{kH*^e^h0Ms^NW-EbduXdooG{<>z
zkz}8@YB6Tpe^u--uU%euHW(?s4D;jZzghOHX1jHVrsbtSME;KaE82No+_#<9!FV5l
zSzd1zec2XW*vDi9Dx%zK^xD;d=jZg^YVdc~7JXghc0UJ0e19!`|J2ek%pUgkQk0eo
zPYW=6&;5Z^=)$zs38oFf*p+*JONfW*L9;%=jL8+k_vYCO;Y&8bS2ew!J5V*fJe9#S
z;BbZT%=sJe+y*Q85*5URIv-I}2{Z&~`(*he8h&BTB5ArKqmPa2ChI;#P
zd3lcKfE~q`kn*X6xb*`|d03~zYM`to-XDAx}*Sl#)B82-b7qWTo3O~lxOFv<<
z$)J6`R|cjr$lko!MoXG;FjCRq1w(8{+~4l~
zbF_iumbS_-!q^5}9n}5j&uB7Viz&gB8x9Pv7xE(nyc8RTJ6%1G{v`Eka(&D~tQaM+OXVRy#fKl|dpX`B$L!dUdkR(jL8y8ia
z@yOuwIO0j)C<-D`zaN5m%wT1euoJy92g~p(!&wEa!GtaNTdCUel()+EV7x$LWc{0y
z5;+FWpmBC0dZTIoJGL#cM
z1mhcEZl=cjO`sGtHWiY=10QIf#e1VX*CAYwfZ@|p>o?4o_H+u?tn*~%c
zc)muxlH%+ALE1BqJrdc+Xu*CiX9@$`y?%kCh$Ofqdyg;}iaD|lcn{PGsd9pT$@k#<
z1w$S8NC)?n@$8Q?9Pv5lVIfu$cexbb5^{dFGB2p~+k#X_n>nK#_gLYdHrC=)N0Jr3
zPLy{X2~u7=d!U=vKHFd25;z&u&a-yZ&a)0k)=oBSkL|U`mfGXZ$zkVNhlJUCI7ok;
zBfHP08I7uaEKl9XY$vHmKJ^RREQ7J#kDtAH4nH5>O|=*C$Mxs5E0CHeN6!D>NWTZn
z&c{q0M)sD&xb9eL>-W$~O6lneP{dtv9?yJPdV`8
zjB>3c*ZQAo&0yc(%_g3oahCB7uoOGd
z+txFVc0KT)9dubhU5lm%jN?+>$WKbV>G^LX#+&Bu6_$6m6=fRukM7F}<3IJxs*sCp
z | | |