Skip to content

Commit

Permalink
Workaround for iterative validation
Browse files Browse the repository at this point in the history
  • Loading branch information
TheChymera committed Feb 6, 2023
1 parent 4926477 commit a072601
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 1 deletion.
2 changes: 1 addition & 1 deletion dandi/tests/test_validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_validate_bids_errors(bids_error_examples, dataset):
# ideally make a list and erode etc.

selected_dataset = os.path.join(bids_error_examples, dataset)
validation_result = validate_bids(selected_dataset, report=True)
validation_result = validate(selected_dataset)
with open(os.path.join(selected_dataset, ".ERRORS.json")) as f:
expected_errors = json.load(f)
for i in validation_result:
Expand Down
12 changes: 12 additions & 0 deletions dandi/validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,18 @@ def validate(
for df in find_dandi_files(
p, dandiset_path=dandiset_path, allow_all=allow_any_path
):
# This is pretty awkward, as it turns out, if validation is called once on just the base
# files, the results are somehow cached, and subsequent validation attempts in the same
# BIDS dataset will just return the errors of the first run, which is to say, none.
if df.path in [
"dataset_description.json",
"README",
"README.md",
"README.txt",
"README.rst",
]:
print("🤔🤔🤔🤔🤔")
continue
yield from df.get_validation_errors(
schema_version=schema_version, devel_debug=devel_debug
)

0 comments on commit a072601

Please sign in to comment.