Skip to content

Commit

Permalink
Trim line lenght in the docuemntation/examples for the new theme
Browse files Browse the repository at this point in the history
- the new nvidia-theme reccomends the line lenght of the code snippets
  80 characters, not 100 as used in DALI

Signed-off-by: Janusz Lisiecki <jlisiecki@nvidia.com>
  • Loading branch information
JanuszL committed May 23, 2024
1 parent 708af60 commit d904786
Show file tree
Hide file tree
Showing 131 changed files with 17,802 additions and 1,282 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ on:
# TODO(klecki): Deduplicate this list of directories with `lint.cmake` file
env:
PYTHON_SECURITY_LINT_PATHS: "./tools ./dali/python ./dali_tf_plugin"
PYTHON_LINT_PATHS: "./dali ./docs ./internal_tools ./qa"
PYTHON_LINT_DOCS_PATHS: "./docs"
PYTHON_LINT_PATHS: "./dali ./internal_tools ./qa"
AUTOGRAPH_LINT_PATHS: "./dali/python/nvidia/dali/_autograph ./dali/test/python/autograph/"

jobs:
Expand All @@ -20,7 +21,8 @@ jobs:
python-version: '3.10'
- run: pip install flake8 bandit "black[jupyter]"==24.4.2
- run: black --check --verbose ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }} ${{ env.AUTOGRAPH_LINT_PATHS }}
- run: flake8 --config=.flake8 ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }}
- run: black --check --config pyproject_docs.toml --verbose ${{ env.PYTHON_LINT_DOCS_PATHS }}
- run: flake8 --config=.flake8 ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }} ${{ env.PYTHON_LINT_DOCS_PATHS }}
- run: flake8 --config=.flake8.ag ${{ env.AUTOGRAPH_LINT_PATHS }}
- run: bandit --config bandit.yml -r ${{ env.PYTHON_SECURITY_LINT_PATHS }}
cpp:
Expand Down
8 changes: 6 additions & 2 deletions cmake/lint.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@ set(PYTHON_SECURITY_LINT_PATHS
${PROJECT_SOURCE_DIR}/dali_tf_plugin
)

set (PYTHON_LINT_DOCS_PATHS
${PROJECT_SOURCE_DIR}/docs
)
set(PYTHON_LINT_PATHS
${PYTHON_SECURITY_LINT_PATHS}
${PROJECT_SOURCE_DIR}/dali
${PROJECT_SOURCE_DIR}/docs
${PROJECT_SOURCE_DIR}/qa
${PROJECT_SOURCE_DIR}/internal_tools
)
Expand All @@ -41,6 +43,8 @@ set(AUTOGRAPH_LINT_PATHS
add_custom_target(lint-python-black
COMMAND
black --check --config ${PROJECT_SOURCE_DIR}/pyproject.toml ${PYTHON_LINT_PATHS} ${AUTOGRAPH_LINT_PATHS}
COMMAND
black --check --config ${PROJECT_SOURCE_DIR}/pyproject_docs.toml ${PYTHON_LINT_DOCS_PATHS}
COMMENT
"Performing black Python formatting check"
)
Expand All @@ -55,7 +59,7 @@ add_custom_target(lint-python-bandit

add_custom_target(lint-python-flake
COMMAND
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8 ${PYTHON_LINT_PATHS}
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8 ${PYTHON_LINT_PATHS} ${PYTHON_LINT_DOCS_PATHS} ${PYTHON_LINT_DOCS_PATHS}
COMMAND
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8.ag ${AUTOGRAPH_LINT_PATHS}
COMMENT
Expand Down
8 changes: 4 additions & 4 deletions dali/operators/decoder/inflate/inflate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ concatenating compressed frames from the corresponding sequences.::
@pipeline_def
def inflate_sequence_pipeline():
compressed_seq, uncompressed_hwc_shape, compressed_chunk_sizes = fn.external_source(...)
compres_seq, uncompres_hwc_shape, compres_chunk_sizes = fn.external_source(...)
sequences = fn.experimental.inflate(
compressed_seq.gpu(),
chunk_sizes=compressed_chunk_sizes, # refers to sizes in ``compressed_seq``
shape=uncompressed_hwc_shape,
compres_seq.gpu(),
chunk_sizes=compres_chunk_sizes, # refers to sizes in ``compres_seq``
shape=uncompres_hwc_shape,
layout="HWC",
sequence_axis_name="F")
return sequences
Expand Down
14 changes: 10 additions & 4 deletions dali/operators/generic/erase/erase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,11 @@ A fill value is provided for all the channels. The coordinates can be transforme
multiplying by the input shape.
What gives::
output[y, x, c] = 100 if 0.15 * 300 <= x < (0.3 + 0.15) * 300 and 0.15 * 300 <= y < (0.3 + 0.15) * 300
output[y, x, c] = input[y, x, c] otherwise
if 0.15 * 300 <= x < (0.3 + 0.15) * 300 and
0.15 * 300 <= y < (0.3 + 0.15) * 300:
output[y, x, c] = 100
else:
output[y, x, c] = input[y, x, c] otherwise
**Example 4:**
``anchor`` = (0.15, 0.15), ``shape`` = (20, 30), ``normalized_anchor`` = True, ``normalized_shape`` = False
Expand All @@ -90,8 +93,11 @@ coordinates. Since no axis_names is provided, the anchor and shape must contain
except "C" (channels).
What gives::
output[y, x, c] = 0 if 0.15 * 300 <= x < (0.15 * 300) + 20 and (0.15 * 300) <= y < (0.15 * 300) + 30
output[y, x, c] = input[y, x, c] otherwise
if 0.15 * 300 <= x < (0.15 * 300) + 20 and
(0.15 * 300) <= y < (0.15 * 300) + 30:
output[y, x, c] = 0
else:
output[y, x, c] = input[y, x, c] otherwise
)code")
.NumInput(1)
.NumOutput(1)
Expand Down
3 changes: 2 additions & 1 deletion dali/operators/image/color/brightness_contrast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ DALI_SCHEMA(BrightnessContrast)
The brightness and contrast are adjusted based on the following formula::
out = brightness_shift * output_range + brightness * (contrast_center + contrast * (in - contrast_center))
out = brightness_shift * output_range + brightness *
(contrast_center + contrast * (in - contrast_center))
Where the output_range is 1 for float outputs or the maximum positive value for integral types.
Expand Down
14 changes: 9 additions & 5 deletions dali/operators/input/video_input_cpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,21 +60,25 @@ will be partial and the last sequence in this batch will be determined using
-------------------------------------------------------------------
User decided that there shall be 5 frames per sequence and the last_sequence_policy='partial':
User decided that there shall be 5 frames per sequence and
the last_sequence_policy='partial':
-------------------------------------------------------------------
[ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][]
-------------------------------------------------------------------
Since there are not enough frames, the last sequence comprises 2 frames.
Since there are not enough frames, the last sequence comprises 2 frames.
The Pipeline has max_batch_size=3, therefore the operator will return 5 batches of sequences.
First 4 batches comprise 3 sequences and the last batch is partial and comprises 2 sequences.
The Pipeline has max_batch_size=3, therefore the operator will return
5 batches of sequences.
First 4 batches comprise 3 sequences and the last batch is partial and
comprises 2 sequences.
--------------- --------------- --------------- --------------- -------
[ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][]
--------------- --------------- --------------- --------------- -------
With the last_sequence_policy='pad', the last sequence of the last batch will be padded with 0:
With the last_sequence_policy='pad', the last sequence of the last batch
will be padded with 0:
--------------- --------------- --------------- --------------- -------000
[ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ]
--------------- --------------- --------------- --------------- -------000
Expand Down
3 changes: 2 additions & 1 deletion dali/operators/reader/coco_reader_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ images and annotation JSON files.
This readers produces the following outputs::
images, bounding_boxes, labels, ((polygons, vertices) | (pixelwise_masks)), (image_ids)
images, bounding_boxes, labels, ((polygons, vertices) | (pixelwise_masks)),
(image_ids)
* **images**
Each sample contains image data with layout ``HWC`` (height, width, channels).
Expand Down
19 changes: 16 additions & 3 deletions dali/operators/reader/nemo_asr_reader_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,22 @@ NVIDIA NeMo compatible manifest.
Example manifest file::
{"audio_filepath": "path/to/audio1.wav", "duration": 3.45, "text": "this is a nemo tutorial"}
{"audio_filepath": "path/to/audio1.wav", "offset": 3.45, "duration": 1.45, "text": "same audio file but using offset"}
{"audio_filepath": "path/to/audio2.wav", "duration": 3.45, "text": "third transcript in this example"}
{
"audio_filepath": "path/to/audio1.wav",
"duration": 3.45,
"text": "this is a nemo tutorial"
}
{
"audio_filepath": "path/to/audio1.wav",
"offset": 3.45,
"duration": 1.45,
"text": "same audio file but using offset"
}
{
"audio_filepath": "path/to/audio2.wav",
"duration": 3.45,
"text": "third transcript in this example"
}
.. note::
Only ``audio_filepath`` is field mandatory. If ``duration`` is not specified, the whole audio file will be used. A missing ``text`` field
Expand Down
3 changes: 2 additions & 1 deletion dali/operators/segmentation/select_masks.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ masks are present.
Let us assume the following input mask, where symbolic coordinates are used for a clearer example::
polygons = [[0, 0, 3], [1, 3, 7], [2, 7, 10]]
vertices = [[x0, y0], [x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5], [x6, y6], [x7, y7], [x8, y8], [x9, y9]]
vertices = [[x0, y0], [x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5],
[x6, y6], [x7, y7], [x8, y8], [x9, y9]]
Example 1: Selecting a single mask with id ``1``, maintaining the original id::
Expand Down
3 changes: 2 additions & 1 deletion dali/python/nvidia/dali/plugin/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,8 @@ class DALIClassificationIterator(DALIGenericIterator):
.. code-block:: python
DALIClassificationIterator(pipelines, reader_name, data_name, label_name, data_layout)
DALIClassificationIterator(pipelines, reader_name, data_name, label_name,
data_layout)
is equivalent to calling
Expand Down
6 changes: 4 additions & 2 deletions docs/advanced_topics_sharding.rst
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,14 @@ Shard calculation

Here is the formula to calculate the shard size for a shard ID::

floor((id + 1) * dataset_size / num_shards) - floor(id * dataset_size / num_shards)
floor((id + 1) * dataset_size / num_shards) -
floor(id * dataset_size / num_shards)

When the pipeline advances through the epochs and the reader moves to the next shard, the formula
needs to be extended to reflect this change::

floor(((id + epoch_num) % num_shards + 1) * dataset_size / num_shards) - floor(((id + epoch_num) % num_shards) * dataset_size / num_shards)
floor(((id + epoch_num) % num_shards + 1) * dataset_size / num_shards) -
floor(((id + epoch_num) % num_shards) * dataset_size / num_shards)

When the second formula is used, providing a size value once at the beginning of the training works
only when the ``stick_to_shard`` reader option is enabled and prevents DALI from rotating shards.
Expand Down
3 changes: 2 additions & 1 deletion docs/auto_aug/auto_augment.rst
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ The best way is to wrap your policy creation into a function::

def my_custom_policy() -> Policy:
"""
Creates a simple AutoAugment policy with 3 sub-policies using custom magnitude ranges.
Creates a simple AutoAugment policy with 3 sub-policies using custom
magnitude ranges.
"""

shear_x = augmentations.shear_x.augmentation((0, 0.5), True)
Expand Down
28 changes: 21 additions & 7 deletions docs/autodoc_submodules.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@
exclude_fn_members = {}

installation_page_url = (
"https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
"https://docs.nvidia.com/deeplearning/dali/user-guide/"
"docs/installation.html"
)

mod_aditional_doc = {
Expand Down Expand Up @@ -87,11 +88,15 @@ def get_functions(module):
or hidden members. No nested modules would be reported."""
result = []
# Take all public members of given module
public_members = list(filter(lambda x: not str(x).startswith("_"), dir(module)))
public_members = list(
filter(lambda x: not str(x).startswith("_"), dir(module))
)
for member_name in public_members:
member = getattr(module, member_name)
# Just user-defined functions
if inspect.isfunction(member) and not member.__module__.endswith("hidden"):
if inspect.isfunction(member) and not member.__module__.endswith(
"hidden"
):
result.append(member_name)
return result

Expand Down Expand Up @@ -157,7 +162,9 @@ def single_module_file(module, funs_in_module, references):
result += "\n"

result += f"The following table lists all operations available in ``{module}`` module:\n"
result += operations_table.operations_table_str(get_schema_names(module, funs_in_module))
result += operations_table.operations_table_str(
get_schema_names(module, funs_in_module)
)
result += "\n\n"

result += ".. toctree::\n :hidden:\n\n"
Expand Down Expand Up @@ -185,15 +192,22 @@ def fn_autodoc(out_filename, generated_path, references):
# the rest is within the same directory, so there is no need for that
all_modules_str += f" {generated_path / module}\n"

single_module_str = single_module_file(module, funs_in_module, references)
single_module_str = single_module_file(
module, funs_in_module, references
)
with open(generated_path / (module + ".rst"), "w") as module_file:
module_file.write(single_module_str)

for fun in funs_in_module:
full_name = f"{module}.{fun}"
if module in exclude_fn_members and fun in exclude_fn_members[module]:
if (
module in exclude_fn_members
and fun in exclude_fn_members[module]
):
continue
with open(generated_path / (full_name + ".rst"), "w") as function_file:
with open(
generated_path / (full_name + ".rst"), "w"
) as function_file:
single_file_str = single_fun_file(full_name, references)
function_file.write(single_file_str)

Expand Down
Loading

0 comments on commit d904786

Please sign in to comment.