From 689bb8d14987fa7ea9cacd89b0e518254d1f7d7a Mon Sep 17 00:00:00 2001 From: Andrei Rusu Date: Wed, 12 Jun 2024 13:32:47 +0200 Subject: [PATCH] added support for cloud storage --- environment.yml | 2 +- oceanstream/cli/main.py | 18 ++++-- oceanstream/core.py | 24 ++++++-- oceanstream/plot/echogram.py | 59 +++++++++++++----- oceanstream/process/combine_zarr.py | 5 +- oceanstream/process/file_processor.py | 74 ++++++++++++++++++----- oceanstream/process/folder_processor.py | 64 +++++++++++++------- oceanstream/settings/defaults.config.json | 4 +- pyproject.toml | 47 -------------- setup.cfg | 3 +- 10 files changed, 181 insertions(+), 119 deletions(-) diff --git a/environment.yml b/environment.yml index e4c4126..8ea5c78 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.11 + - python=3.10 - jinja2 - netcdf4 - numpy diff --git a/oceanstream/cli/main.py b/oceanstream/cli/main.py index 73803b2..959b189 100644 --- a/oceanstream/cli/main.py +++ b/oceanstream/cli/main.py @@ -11,7 +11,7 @@ from rich import print from rich.traceback import install, Traceback from oceanstream.settings import load_config -from dask.distributed import LocalCluster, Client +from dask.distributed import LocalCluster, Client, Variable from rich.console import Console @@ -195,6 +195,7 @@ def compute_sv( sonar_model: str = typer.Option(DEFAULT_SONAR_MODEL, help="Sonar model used to collect the data", show_choices=["AZFP", "EK60", "ES70", "EK80", "ES80", "EA640", "AD2CP"]), plot_echogram: bool = typer.Option(False, help="Plot the echogram after processing"), + use_dask: bool = typer.Option(False, help="Start a Local Dask cluster for parallel processing (always enabled for multiple files)"), depth_offset: float = typer.Option(0.0, help="Depth offset for the echogram plot"), waveform_mode: str = typer.Option("CW", help="Waveform mode, can be either CW or BB", show_choices=["CW", "BB"]), @@ -213,11 +214,14 @@ def compute_sv( file_path = Path(source) config_data = initialize(settings_dict, file_path, log_level=log_level) + client = None console = Console() + single_file = file_path.is_dir() and source.endswith(".zarr") with console.status("Processing...", spinner="dots") as status: status.start() - cluster = LocalCluster(n_workers=workers_count, threads_per_worker=1) - client = Client(cluster) + if use_dask or not single_file: + cluster = LocalCluster(n_workers=workers_count, threads_per_worker=1) + client = Client(cluster) try: if file_path.is_dir() and source.endswith(".zarr"): @@ -235,13 +239,14 @@ def compute_sv( f"[blue] Processing zarr files in {file_path}...[/blue] – navigate to " f"http://localhost:8787/status for progress") from oceanstream.process import process_zarr_files - + processed_count_var = Variable('processed_count', client) process_zarr_files(config_data, workers_count=workers_count, status=status, chunks=config_data.get('base_chunk_sizes'), plot_echogram=plot_echogram, waveform_mode=waveform_mode, + processed_count_var=processed_count_var, depth_offset=depth_offset) else: print(f"[red]❌ The provided path '{source}' is not a valid Zarr root.[/red]") @@ -252,8 +257,9 @@ def compute_sv( logging.exception("Error while processing %s", config_data['raw_path']) print(Traceback()) finally: - client.close() - cluster.close() + if use_dask: + client.close() + cluster.close() status.stop() diff --git a/oceanstream/core.py b/oceanstream/core.py index 3c6635c..da6d5af 100644 --- a/oceanstream/core.py +++ b/oceanstream/core.py @@ -14,6 +14,9 @@ def initialize(settings, file_path, log_level=None): logging.debug(f"Initializing with settings: {settings}, file path: {file_path}, log level: {log_level}") + if "config" not in settings: + settings["config"] = "" + config_data = load_config(settings["config"]) config_data["raw_path"] = file_path @@ -27,6 +30,9 @@ def initialize(settings, file_path, log_level=None): if settings["output_folder"] is not None: config_data["output_folder"] = settings["output_folder"] + if settings['cloud_storage'] is not None: + config_data['cloud_storage'] = settings['cloud_storage'] + return config_data @@ -103,8 +109,7 @@ def combine(source, output=None, config=None, log_level="WARNING", chunks=None): file_name = f"{Path(dir_path).stem}-combined.zarr" zarr_output_file = os.path.join(config_data['output_folder'], file_name) - logging.info( - f"Combining Zarr files to {zarr_output_file}; navigate to http://localhost:8787/status for progress") + logging.info(f"Combining Zarr files to {zarr_output_file}") combine_zarr_files(dir_path, zarr_output_file=zarr_output_file, chunks=chunks) logging.info("Zarr files have been combined successfully.") @@ -113,18 +118,24 @@ def combine(source, output=None, config=None, log_level="WARNING", chunks=None): def compute_sv(source, output=None, workers_count=None, sonar_model=DEFAULT_SONAR_MODEL, plot_echogram=False, - depth_offset=0.0, waveform_mode="CW", config=None, log_level="WARNING", chunks=None): - logging.debug("Starting compute_sv function") + depth_offset=0.0, waveform_mode="CW", log_level="WARNING", chunks=None, config=None, + processed_count_var=None): settings_dict = { - "config": config, "sonar_model": sonar_model, "output_folder": output or DEFAULT_OUTPUT_FOLDER } + + if config is not None: + settings_dict.update(config) + # settings_dict["config"] = '' + file_path = Path(source) config_data = initialize(settings_dict, file_path, log_level=log_level) if chunks: config_data['chunks'] = chunks + else: + config_data['chunks'] = config_data.get('base_chunk_sizes', None) if file_path.is_dir() and source.endswith(".zarr"): logging.debug(f"Computing Sv for Zarr root file: {file_path}") @@ -136,7 +147,8 @@ def compute_sv(source, output=None, workers_count=None, sonar_model=DEFAULT_SONA logging.debug(f"Processing Zarr files in directory: {file_path}") from oceanstream.process import process_zarr_files - process_zarr_files(config_data, workers_count=workers_count, chunks=chunks, plot_echogram=plot_echogram, + process_zarr_files(config_data, workers_count=workers_count, chunks=chunks, + processed_count_var=processed_count_var, plot_echogram=plot_echogram, waveform_mode=waveform_mode, depth_offset=depth_offset) else: logging.error(f"The provided path '{source}' is not a valid Zarr root.") diff --git a/oceanstream/plot/echogram.py b/oceanstream/plot/echogram.py index 622fafd..83dca6a 100644 --- a/oceanstream/plot/echogram.py +++ b/oceanstream/plot/echogram.py @@ -1,7 +1,10 @@ import asyncio import os +import tempfile import xarray as xr import numpy as np +import logging + from matplotlib.colors import LinearSegmentedColormap, Colormap import matplotlib.pyplot as plt import matplotlib.dates as mdates @@ -287,18 +290,13 @@ def plot_individual_channel_image_only(ds_Sv, channel, output_path, file_base_na plt.close() -def plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_name, - cmap='viridis', - regions2d=None, - region_ids=None, region_class=None): +def plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_name, echogram_path=None, + config_data=None, cmap='ocean_r'): """Plot and save echogram for a single channel with optional regions and enhancements.""" full_channel_name = ds_Sv.channel.values[channel] channel_name = "_".join(full_channel_name.split()[:3]) plt.figure(figsize=(30, 18)) - echogram_output_path = os.path.join(output_path, f"{file_base_name}_{channel_name}.png") - - # Apply the same preprocessing steps from _plot_echogram ds = ds_Sv filtered_ds = ds['Sv'] @@ -332,7 +330,7 @@ def plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_na yincrease=False, vmin=-80, vmax=-50, - cmap='ocean_r', + cmap=cmap, cbar_kwargs={'label': 'Volume backscattering strength (Sv re 1 m-1)'} ) @@ -340,8 +338,20 @@ def plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_na plt.xlabel('Ping time', fontsize=14) plt.ylabel('Depth', fontsize=14) plt.title(f'Echogram for Channel {channel_name}', fontsize=16, fontweight='bold') - plt.savefig(echogram_output_path, dpi=300, bbox_inches='tight') - plt.close() + + echogram_file_name = f"{file_base_name}_{channel_name}.png" + + if config_data and 'cloud_storage' in config_data: + with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as temp_file: + plt.savefig(temp_file.name, dpi=300, bbox_inches='tight') + plt.close() + echogram_output_path = os.path.join(echogram_path, echogram_file_name) + upload_to_cloud_storage(temp_file.name, echogram_output_path, config_data['cloud_storage']) + os.remove(temp_file.name) + else: + echogram_output_path = os.path.join(output_path, echogram_file_name) + plt.savefig(echogram_output_path, dpi=300, bbox_inches='tight') + plt.close() def plot_sv_data_parallel(ds_Sv, file_base_name=None, output_path=None, cmap=None, client=None): @@ -360,15 +370,15 @@ def plot_sv_data_parallel(ds_Sv, file_base_name=None, output_path=None, cmap=Non wait(futures) -def plot_sv_data(ds_Sv, file_base_name=None, output_path=None, cmap=None, regions2d=None, region_ids=None, - region_class=None): +def plot_sv_data(ds_Sv, file_base_name=None, output_path=None, echogram_path=None, config_data=None, cmap=None): """Plot the echogram data and the regions.""" if not plt.isinteractive(): plt.switch_backend('Agg') for channel in range(ds_Sv.dims['channel']): - plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_name, cmap, regions2d, region_ids, - region_class) + plot_individual_channel_simplified(ds_Sv, channel, output_path, file_base_name, echogram_path=echogram_path, + config_data=config_data, + cmap='ocean_r') # plot_individual_channel_image_only(ds_Sv, channel, output_path, file_base_name, cmap) # plot_individual_channel_shaders(ds_Sv=ds_Sv, channel=channel, output_path=output_path, # file_base_name=file_base_name, cmap='ocean_r') @@ -540,3 +550,24 @@ def _plot_region_ids(colors, ds_Sv, idx, labels_added, region_ids, regions2d): labels_added.add(label) idx += 1 return idx + + +def upload_to_cloud_storage(local_path, remote_path, cloud_storage_config): + storage_type = cloud_storage_config['storage_type'] + container_name = cloud_storage_config['container_name'] + storage_options = cloud_storage_config['storage_options'] + + if storage_type == 'azure': + upload_to_azure_blob(local_path, remote_path, container_name, storage_options) + else: + raise ValueError(f"Unsupported storage type: {storage_type}") + + +def upload_to_azure_blob(local_path, remote_path, container_name, storage_options): + from azure.storage.blob import BlobServiceClient + blob_service_client = BlobServiceClient.from_connection_string(storage_options['connection_string']) + blob_client = blob_service_client.get_blob_client(container=container_name, blob=remote_path) + + with open(local_path, "rb") as data: + blob_client.upload_blob(data, overwrite=True) + logging.info(f"Uploaded {local_path} to Azure Blob Storage as {remote_path}") diff --git a/oceanstream/process/combine_zarr.py b/oceanstream/process/combine_zarr.py index 7e5768c..a6010f5 100644 --- a/oceanstream/process/combine_zarr.py +++ b/oceanstream/process/combine_zarr.py @@ -5,7 +5,6 @@ from datetime import datetime from rich import print from pathlib import Path -import psutil from oceanstream.echodata import check_reversed_time, fix_time_reversions @@ -21,7 +20,7 @@ def read_zarr_files(input_folder): logging.error("Input folder does not exist: %s", input_folder) return - zarr_files = list(input_path.glob("*.zarr")) + zarr_files = list(input_path.rglob("*.zarr")) if not zarr_files: logging.error("No .zarr files found in directory: %s", input_folder) return @@ -47,7 +46,7 @@ def fix_time(ed): return ed -def combine_zarr_files(input_folder, zarr_output_file=None, chunks=None, log_level=logging.DEBUG): +def combine_zarr_files(input_folder, zarr_output_file=None, chunks=None): start_time = time.time() logging.debug("Starting to combine Zarr files from folder: %s", input_folder) diff --git a/oceanstream/process/file_processor.py b/oceanstream/process/file_processor.py index 9a1e49f..fdaf7d0 100644 --- a/oceanstream/process/file_processor.py +++ b/oceanstream/process/file_processor.py @@ -15,7 +15,6 @@ from oceanstream.echodata import get_campaign_metadata from .process import compute_sv, process_file_with_progress, read_file_with_progress - install(show_locals=True, width=120) @@ -23,13 +22,14 @@ def get_chunk_sizes(var_dims, chunk_sizes): return {dim: chunk_sizes[dim] for dim in var_dims if dim in chunk_sizes} -def compute_Sv_to_zarr(echodata, config_data, chunks=None, plot_echogram=False, **kwargs): +def compute_Sv_to_zarr(echodata, config_data, base_path=None, chunks=None, plot_echogram=False, **kwargs): """ Compute Sv from echodata and save to zarr file. Args: echodata: config_data: + base_path: chunks: plot_echogram: **kwargs: @@ -42,11 +42,21 @@ def compute_Sv_to_zarr(echodata, config_data, chunks=None, plot_echogram=False, encode_mode = waveform_mode == "CW" and "power" or "complex" Sv = compute_sv(echodata, encode_mode=encode_mode, **kwargs) - parent_folder = os.path.join(Path(config_data["output_folder"]), file_path.stem) - if not os.path.exists(parent_folder): - os.makedirs(parent_folder) + if base_path: + relative_path = file_path.relative_to(base_path) + + if relative_path.parent != ".": + zarr_path = Path(relative_path.parent) / file_path.stem + else: + zarr_path = relative_path.stem + else: + zarr_path = file_path.stem - output_path = os.path.join(parent_folder, f"{file_path.stem}_Sv.zarr") + output_path = Path(config_data["output_folder"]) / zarr_path + output_path.mkdir(parents=True, exist_ok=True) + + echogram_path = zarr_path + zarr_file_name = f"{file_path.stem}_Sv.zarr" if chunks is not None: for var in Sv.data_vars: @@ -56,14 +66,15 @@ def compute_Sv_to_zarr(echodata, config_data, chunks=None, plot_echogram=False, if 'chunks' in Sv[var].encoding: del Sv[var].encoding['chunks'] - print("Removing background noise...") ds_processed = Sv + # ds_processed = apply_background_noise_removal(Sv, config=config_data) - ds_processed.to_zarr(output_path) + write_zarr_file(zarr_path, zarr_file_name, ds_processed, config_data, output_path) if plot_echogram: try: - plot_sv_data(ds_processed, file_base_name=file_path.stem, output_path=parent_folder) + plot_sv_data(ds_processed, file_base_name=file_path.stem, output_path=output_path, + echogram_path=echogram_path, config_data=config_data) except Exception as e: logging.exception(f"Error plotting echogram for {file_path}:") raise e @@ -71,13 +82,22 @@ def compute_Sv_to_zarr(echodata, config_data, chunks=None, plot_echogram=False, return output_path +def write_zarr_file(zarr_path, zarr_file_name, ds_processed, config_data=None, output_path=None): + if 'cloud_storage' in config_data: + store = get_chunk_store(config_data['cloud_storage'], Path(zarr_path) / zarr_file_name) + else: + store = os.path.join(output_path, zarr_file_name) + + ds_processed.to_zarr(store, mode='w') + + async def process_raw_file_with_progress(config_data, plot_echogram, waveform_mode="CW", depth_offset=0): try: with Progress( - TextColumn("[progress.description]{task.description}"), - BarColumn(), - TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), - TimeElapsedColumn() + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeElapsedColumn() ) as progress: print(f"[green] Processing file: {config_data['raw_path']}[/green]") read_task = progress.add_task("[cyan]Reading raw file data...", total=100) @@ -117,14 +137,25 @@ async def process_raw_file_with_progress(config_data, plot_echogram, waveform_mo logging.exception(f"Error processing file {config_data['raw_path']}: {e}") -def convert_raw_file(file_path, config_data, progress_queue=None): +def convert_raw_file(file_path, config_data, base_path=None, progress_queue=None): logging.debug("Starting processing of file: %s", file_path) from oceanstream.echodata import read_file try: - file_config_data = {**config_data, 'raw_path': Path(file_path)} + file_path_obj = Path(file_path) + file_config_data = {**config_data, 'raw_path': file_path_obj} + + if base_path: + relative_path = file_path_obj.relative_to(base_path) + relative_path = relative_path.parent + else: + relative_path = file_path_obj.name + + output_path = Path(config_data["output_folder"]) / relative_path + output_path.mkdir(parents=True, exist_ok=True) + echodata, encode_mode = read_file(file_config_data, use_swap=True, skip_integrity_check=True) - echodata.to_zarr(save_path=config_data["output_folder"], overwrite=True, parallel=False) + echodata.to_zarr(save_path=output_path, overwrite=True, parallel=False) if progress_queue: progress_queue.put(file_path) @@ -133,6 +164,17 @@ def convert_raw_file(file_path, config_data, progress_queue=None): print(Traceback()) +def get_chunk_store(storage_config, path): + if storage_config['storage_type'] == 'azure': + from adlfs import AzureBlobFileSystem + azfs = AzureBlobFileSystem(**storage_config['storage_options']) + + return azfs.get_mapper(f"{storage_config['container_name']}/{path}") + + else: + raise ValueError(f"Unsupported storage type: {storage_config['storage_type']}") + + def compute_single_file(config_data, **kwargs): file_path = config_data["raw_path"] start_time = time.time() diff --git a/oceanstream/process/folder_processor.py b/oceanstream/process/folder_processor.py index c7bda09..cbec95a 100644 --- a/oceanstream/process/folder_processor.py +++ b/oceanstream/process/folder_processor.py @@ -7,6 +7,7 @@ import traceback import warnings +from dask import delayed, compute from pathlib import Path from datetime import datetime from rich import print @@ -21,8 +22,10 @@ from .processed_data_io import write_processed from .file_processor import convert_raw_file, compute_single_file -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') warnings.filterwarnings("ignore", module="echopype") +warnings.filterwarnings("ignore", category=UserWarning) +warnings.filterwarnings("ignore", category=FutureWarning) pool = None @@ -174,7 +177,8 @@ def convert_raw_files(config_data, workers_count=os.cpu_count()): pool = Pool(processes=workers_count) # Partial function with config_data, progress_queue and other arguments - process_func = partial(convert_raw_file, config_data=config_data, progress_queue=progress_queue) + process_func = partial(convert_raw_file, config_data=config_data, progress_queue=progress_queue, + base_path=dir_path) # Run the progress updater in a separate process progress_updater = Process(target=update_progress, args=(progress_queue, len(sorted_files), log_level)) @@ -200,14 +204,25 @@ def convert_raw_files(config_data, workers_count=os.cpu_count()): logging.exception("Error processing folder %s: %s", config_data['raw_path'], e) -def process_single_zarr_file(file_path, config_data, chunks=None, plot_echogram=False, waveform_mode="CW", - depth_offset=0): +def process_single_zarr_file(file_path, config_data, base_path=None, chunks=None, plot_echogram=False, waveform_mode="CW", + depth_offset=0, total_files=1, processed_count_var=None): file_config_data = {**config_data, 'raw_path': Path(file_path)} - compute_single_file(file_config_data, chunks=chunks, plot_echogram=plot_echogram, + + processed_count = None + + if processed_count_var: + processed_count = processed_count_var.get() + 1 + processed_count_var.set(processed_count) + + compute_single_file(file_config_data, base_path=base_path, chunks=chunks, plot_echogram=plot_echogram, waveform_mode=waveform_mode, depth_offset=depth_offset) + if processed_count: + print(f"Processed file: {file_path}. {processed_count}/{total_files}") -def process_zarr_files(config_data, workers_count=os.cpu_count(), status=None, chunks=None, plot_echogram=False, + +def process_zarr_files(config_data, workers_count=os.cpu_count(), status=None, chunks=None, processed_count_var=None, + plot_echogram=False, waveform_mode="CW", depth_offset=0): dir_path = config_data['raw_path'] zarr_files = read_zarr_files(dir_path) @@ -216,24 +231,26 @@ def process_zarr_files(config_data, workers_count=os.cpu_count(), status=None, c logging.error("No valid .zarr files with creation time found in directory: %s", dir_path) return - logging.info(f"Found {len(zarr_files)} Zarr files in directory: {dir_path}") - - # def wrapped_process_single_zarr_file(file_path): - # print(f"Computing Sv for {file_path}...\n") - # return process_single_zarr_file(file_path, config_data, - # chunks=chunks, plot_echogram=plot_echogram, waveform_mode=waveform_mode, - # depth_offset=depth_offset) - # - # # Use Dask bag to parallelize the processing - # bag = db.from_sequence(zarr_files, npartitions=workers_count) - # bag = bag.map(wrapped_process_single_zarr_file) - # - # # Compute the results - # results = bag.compute() + if status: + status.update(f"Found {len(zarr_files)} Zarr files in directory: {dir_path}\n") + + tasks = [] + total_files = len(zarr_files) + + if processed_count_var: + processed_count_var.set(0) + for file_path in zarr_files: - status.update(f"Computing Sv for {file_path}...\n") - process_single_zarr_file(file_path, config_data, chunks=chunks, plot_echogram=plot_echogram, - waveform_mode=waveform_mode, depth_offset=depth_offset) + if status: + status.update(f"Computing Sv for {file_path}...\n") + task = delayed(process_single_zarr_file)(file_path, config_data, chunks=chunks, base_path=dir_path, + plot_echogram=plot_echogram, + waveform_mode=waveform_mode, depth_offset=depth_offset, + total_files=total_files, processed_count_var=processed_count_var) + tasks.append(task) + + # Execute all tasks in parallel + compute(*tasks) logging.info("✅ All files have been processed") @@ -251,4 +268,5 @@ def from_filename(file_name): return None + signal.signal(signal.SIGINT, signal_handler) diff --git a/oceanstream/settings/defaults.config.json b/oceanstream/settings/defaults.config.json index ba00831..e67b02e 100644 --- a/oceanstream/settings/defaults.config.json +++ b/oceanstream/settings/defaults.config.json @@ -111,8 +111,8 @@ }, "profile": false, "base_chunk_sizes": { - "ping_time": 50, - "range_sample": 50 + "ping_time": 100, + "range_sample": 100 }, "sonar_model": null, "output_folder": "output", diff --git a/pyproject.toml b/pyproject.toml index 6f9ffe7..2cbcbc9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,50 +6,3 @@ requires = [ "wheel >= 0.29.0" ] build-backend = "setuptools.build_meta" - -#[project] -#name = "oceanstream" -#version = "0.0.5-dev" -#description = "OceanStream: process raw sonar data at scale" -#readme = "README.md" -#requires-python = ">=3.11" -#authors = [ -# { name="Pineview Labs", email="hello@pineview.io" } -#] -#license = { file="LICENSE" } -#classifiers = [ -# "Programming Language :: Python :: 3", -# "Programming Language :: Python :: 3.10", -# "Programming Language :: Python :: 3.11", -# "License :: OSI Approved :: MIT License", -# "Operating System :: OS Independent" -#] -# -#dependencies = [ -# "git+https://github.com/OceanStreamIO/echopype-dev.git@oceanstream#egg=echopype" -#] -# -#[project.optional-dependencies] -#cli = [ -# "typer", -# "rich" -#] -#plot = [ -# "matplotlib" -#] -#echodata = [ -# "pydantic" -#] -# -#denoise = [] -#exports = [] -#process = [] -#complete = [ -# "typer", -# "rich", -# "matplotlib", -# "pydantic" -#] -# -#[project.scripts] -#oceanstream = "oceanstream.cli.main:main" diff --git a/setup.cfg b/setup.cfg index b2c68b6..0c20321 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,6 +13,7 @@ license_files = LICENSE url = https://oceanstream.io classifiers = Programming Language :: Python :: 3 + Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 License :: OSI Approved :: MIT License Operating System :: OS Independent @@ -20,7 +21,7 @@ classifiers = [options] packages = find: platforms = any -python_requires = >=3.11 +python_requires = >=3.10 include_package_data = True setup_requires = setuptools_scm