diff --git a/docker/install/conda_env/torch_cpu_pip.txt b/docker/install/conda_env/torch_cpu_pip.txt index 9d2675a897c4..d600201767bd 100644 --- a/docker/install/conda_env/torch_cpu_pip.txt +++ b/docker/install/conda_env/torch_cpu_pip.txt @@ -18,7 +18,6 @@ requests[security]==2.28 scikit-learn scipy torch==2.3.0+cpu -torchdata torcheval torchmetrics torch_geometric diff --git a/docker/install/conda_env/torch_gpu_pip.txt b/docker/install/conda_env/torch_gpu_pip.txt index 38225aa1570d..e1c439c96bbb 100644 --- a/docker/install/conda_env/torch_gpu_pip.txt +++ b/docker/install/conda_env/torch_gpu_pip.txt @@ -16,7 +16,6 @@ requests[security]==2.28 scikit-learn scipy torch==2.3.0+cu121 -torchdata torcheval torchmetrics torch_geometric diff --git a/docs/source/guide/minibatch-parallelism.rst b/docs/source/guide/minibatch-parallelism.rst index 661d29538e19..5b7e88f890c3 100644 --- a/docs/source/guide/minibatch-parallelism.rst +++ b/docs/source/guide/minibatch-parallelism.rst @@ -24,7 +24,7 @@ generate a minibatch, including: dataloader = gb.DataLoader(datapipe) All these stages are implemented in separate -`IterableDataPipe `__ +`IterableDataPipe `__ and stacked together with `PyTorch DataLoader `__. This design allows us to easily customize the data loading process by @@ -44,7 +44,7 @@ data movement overheads between processes. What's more, in order to overlap the data movement and model computation, we wrap data pipes before ``copy_to`` with -`torchdata.datapipes.iter.Perfetcher `__ which prefetches elements from previous data pipes and puts them into a buffer. Such prefetching is totally transparent to users and requires no extra code. It diff --git a/python/setup.py b/python/setup.py index 0a4439cf0c6f..0f7fcac9b89f 100644 --- a/python/setup.py +++ b/python/setup.py @@ -226,14 +226,11 @@ def get_lib_file_path(lib_name, backend=""): "requests>=2.19.0", "tqdm", "psutil>=5.8.0", - "torchdata>=0.5.0", "pandas", "packaging", "pyyaml", "pydantic>=2.0", ] -if "DGLBACKEND" in os.environ and os.environ["DGLBACKEND"] != "pytorch": - install_requires.pop(install_requires.index("torchdata>=0.5.0")) setup( name="dgl" + os.getenv("DGL_PACKAGE_SUFFIX", ""), diff --git a/script/dgl_dev.yml.template b/script/dgl_dev.yml.template index 708df84aa1e5..d6aeed663652 100644 --- a/script/dgl_dev.yml.template +++ b/script/dgl_dev.yml.template @@ -31,7 +31,6 @@ dependencies: - scikit-learn - scipy - torch==__TORCH_VERSION__ - - torchdata>=0.5.0 - torcheval - torchmetrics - torch_geometric diff --git a/tests/lint/pylintrc b/tests/lint/pylintrc index cd24357b182e..d7ecd0100c27 100644 --- a/tests/lint/pylintrc +++ b/tests/lint/pylintrc @@ -213,7 +213,7 @@ function-naming-style=snake_case # op - operators # ty - type # A, B, C, W - for tensor operators like matmul -# dp - DataPipes (see https://pytorch.org/data/main/torchdata.datapipes.iter.html) +# dp - DataPipes (see https://pytorch.org/data/0.7/torchdata.datapipes.iter.html) # it - iterators good-names=f,i,j,k,u,v,e,n,m,w,x,y,z,s,d,t,r,g,G,hg,sg,fn,ex,Run,_,us,vs,gs,es,op,ty,A,B,C,W,a,b,N,D1,D2,R,dp,it