Skip to content

Commit

Permalink
Bump black from 22.6.0 to 24.3.0 (#6820)
Browse files Browse the repository at this point in the history
Dependabot upgrade #6802 failed since Python files need to be reformatted after `black` upgrade, bumping `black` version and linting the Python files in this PR.

#oncall
  • Loading branch information
yatbear committed Apr 3, 2024
1 parent 81f3d1d commit c6c3545
Show file tree
Hide file tree
Showing 36 changed files with 101 additions and 98 deletions.
28 changes: 14 additions & 14 deletions tensorboard/backend/event_processing/data_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,13 +152,13 @@ def read_last_scalars(
plugin_name, run_tag_filter, summary_pb2.DATA_CLASS_SCALAR
)
run_tag_to_last_scalar_datum = collections.defaultdict(dict)
for (run, tags_for_run) in index.items():
for (tag, metadata) in tags_for_run.items():
for run, tags_for_run in index.items():
for tag, metadata in tags_for_run.items():
events = self._multiplexer.Tensors(run, tag)
if events:
run_tag_to_last_scalar_datum[run][
tag
] = _convert_scalar_event(events[-1])
run_tag_to_last_scalar_datum[run][tag] = (
_convert_scalar_event(events[-1])
)

return run_tag_to_last_scalar_datum

Expand Down Expand Up @@ -222,11 +222,11 @@ def _index(self, plugin_name, run_tag_filter, data_class_filter):
all_metadata = self._multiplexer.AllSummaryMetadata()

result = {}
for (run, tag_to_metadata) in all_metadata.items():
for run, tag_to_metadata in all_metadata.items():
if runs is not None and run not in runs:
continue
result_for_run = {}
for (tag, metadata) in tag_to_metadata.items():
for tag, metadata in tag_to_metadata.items():
if tags is not None and tag not in tags:
continue
if metadata.data_class != data_class_filter:
Expand All @@ -250,10 +250,10 @@ def _list(self, construct_time_series, index):
suitable to be returned from `list_scalars` or `list_tensors`.
"""
result = {}
for (run, tag_to_metadata) in index.items():
for run, tag_to_metadata in index.items():
result_for_run = {}
result[run] = result_for_run
for (tag, summary_metadata) in tag_to_metadata.items():
for tag, summary_metadata in tag_to_metadata.items():
max_step = None
max_wall_time = None
for event in self._multiplexer.Tensors(run, tag):
Expand Down Expand Up @@ -286,10 +286,10 @@ def _read(self, convert_event, index, downsample):
suitable to be returned from `read_scalars` or `read_tensors`.
"""
result = {}
for (run, tags_for_run) in index.items():
for run, tags_for_run in index.items():
result_for_run = {}
result[run] = result_for_run
for (tag, metadata) in tags_for_run.items():
for tag, metadata in tags_for_run.items():
events = self._multiplexer.Tensors(run, tag)
data = [convert_event(e) for e in events]
result_for_run[tag] = _downsample(data, downsample)
Expand All @@ -304,10 +304,10 @@ def list_blob_sequences(
plugin_name, run_tag_filter, summary_pb2.DATA_CLASS_BLOB_SEQUENCE
)
result = {}
for (run, tag_to_metadata) in index.items():
for run, tag_to_metadata in index.items():
result_for_run = {}
result[run] = result_for_run
for (tag, metadata) in tag_to_metadata.items():
for tag, metadata in tag_to_metadata.items():
max_step = None
max_wall_time = None
max_length = None
Expand Down Expand Up @@ -345,7 +345,7 @@ def read_blob_sequences(
plugin_name, run_tag_filter, summary_pb2.DATA_CLASS_BLOB_SEQUENCE
)
result = {}
for (run, tags) in index.items():
for run, tags in index.items():
result_for_run = {}
result[run] = result_for_run
for tag in tags:
Expand Down
8 changes: 4 additions & 4 deletions tensorboard/backend/event_processing/data_provider_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def setUp(self):
("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
]
for (description, distribution, name) in data:
for description, distribution, name in data:
tensor = tf.constant([distribution], dtype=tf.float64)
for i in range(1, 11):
histogram_summary.histogram(
Expand All @@ -97,7 +97,7 @@ def setUp(self):
("blue", (1, 91, 158), "bottom-left"),
("yellow", (239, 220, 111), "bottom-right"),
]
for (name, color, description) in data:
for name, color, description in data:
image_1x1 = tf.constant([[[color]]], dtype=tf.uint8)
for i in range(1, 11):
# Use a non-monotonic sequence of sample sizes to
Expand Down Expand Up @@ -289,7 +289,7 @@ def test_read_scalars(self):
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
for datum, event in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
self.assertEqual(
Expand Down Expand Up @@ -424,7 +424,7 @@ def test_read_tensors(self):
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
for datum, event in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
np.testing.assert_equal(
Expand Down
2 changes: 1 addition & 1 deletion tensorboard/backend/event_processing/event_multiplexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(
"Event Multplexer doing initialization load for %s",
run_path_map,
)
for (run, path) in run_path_map.items():
for run, path in run_path_map.items():
self.AddRun(path, run)
logger.info("Event Multiplexer done initializing")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def __init__(
"Event Multplexer doing initialization load for %s",
run_path_map,
)
for (run, path) in run_path_map.items():
for run, path in run_path_map.items():
self.AddRun(path, run)
logger.info("Event Multiplexer done initializing")

Expand Down
8 changes: 5 additions & 3 deletions tensorboard/compat/tensorflow_stub/io/gfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,9 +602,11 @@ def glob(self, filename):
prefix = self._get_chain_protocol_prefix(filename)

return [
file
if (self.SEPARATOR in file or self.CHAIN_SEPARATOR in file)
else prefix + file
(
file
if (self.SEPARATOR in file or self.CHAIN_SEPARATOR in file)
else prefix + file
)
for file in files
]

Expand Down
6 changes: 3 additions & 3 deletions tensorboard/compat/tensorflow_stub/io/gfile_tf_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def testWalkInOrder(self):
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in gfile.walk(dir_path, topdown=True):
for w_dir, w_subdirs, w_files in gfile.walk(dir_path, topdown=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
Expand Down Expand Up @@ -198,7 +198,7 @@ def testWalkPostOrder(self):
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in gfile.walk(dir_path, topdown=False):
for w_dir, w_subdirs, w_files in gfile.walk(dir_path, topdown=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
Expand Down Expand Up @@ -237,7 +237,7 @@ def testWalkFailure(self):
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in gfile.walk(dir_path, topdown=False):
for w_dir, w_subdirs, w_files in gfile.walk(dir_path, topdown=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
Expand Down
20 changes: 10 additions & 10 deletions tensorboard/data/grpc_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def read_scalars(
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for (step, wt, value) in zip(d.step, d.wall_time, d.value):
for step, wt, value in zip(d.step, d.wall_time, d.value):
point = provider.ScalarDatum(
step=step,
wall_time=wt,
Expand Down Expand Up @@ -177,13 +177,13 @@ def read_last_scalars(
d = tag_entry.data
# There should be no more than one datum in
# `tag_entry.data` since downsample was set to 1.
for (step, wt, value) in zip(d.step, d.wall_time, d.value):
result[run_name][
tag_entry.tag_name
] = provider.ScalarDatum(
step=step,
wall_time=wt,
value=value,
for step, wt, value in zip(d.step, d.wall_time, d.value):
result[run_name][tag_entry.tag_name] = (
provider.ScalarDatum(
step=step,
wall_time=wt,
value=value,
)
)
return result

Expand Down Expand Up @@ -243,7 +243,7 @@ def read_tensors(
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for (step, wt, value) in zip(d.step, d.wall_time, d.value):
for step, wt, value in zip(d.step, d.wall_time, d.value):
point = provider.TensorDatum(
step=step,
wall_time=wt,
Expand Down Expand Up @@ -308,7 +308,7 @@ def read_blob_sequences(
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for (step, wt, blob_sequence) in zip(
for step, wt, blob_sequence in zip(
d.step, d.wall_time, d.values
):
values = []
Expand Down
2 changes: 1 addition & 1 deletion tensorboard/data/grpc_provider_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ def test_rpc_error(self):
(grpc.StatusCode.NOT_FOUND, errors.NotFoundError),
(grpc.StatusCode.PERMISSION_DENIED, errors.PermissionDeniedError),
]
for (code, error_type) in cases:
for code, error_type in cases:
with self.subTest(code.name):
msg = "my favorite cause"
e = _grpc_error(code, msg)
Expand Down
1 change: 1 addition & 0 deletions tensorboard/data/server_ingester_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def test(self):
error_file = os.path.join(tmpdir.name, "startup_error")

real_popen = subprocess.Popen

# Stub out `subprocess.Popen` to write the port file.
def fake_popen(subprocess_args, *args, **kwargs):
def target():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ def _serve_tags(self, request):
)

result = {run: {} for run in mapping}
for (run, tag_to_timeseries) in mapping.items():
for (tag, timeseries) in tag_to_timeseries.items():
for run, tag_to_timeseries in mapping.items():
for tag, timeseries in tag_to_timeseries.items():
result[run][tag] = {
"description": timeseries.description,
}
Expand Down
2 changes: 1 addition & 1 deletion tensorboard/functionaltests/core_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def testDashboardSelection(self):
"reload_button": "paper-icon-button#reload-button",
}
elements = {}
for (name, selector) in selectors.items():
for name, selector in selectors.items():
locator = (by.By.CSS_SELECTOR, selector)
self.wait.until(
expected_conditions.presence_of_element_located(locator)
Expand Down
4 changes: 2 additions & 2 deletions tensorboard/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def _display_colab(port, height, display_handle):
("%PORT%", "%d" % port),
("%HEIGHT%", "%d" % height),
]
for (k, v) in replacements:
for k, v in replacements:
shell = shell.replace(k, v)
script = IPython.display.Javascript(shell)

Expand Down Expand Up @@ -398,7 +398,7 @@ def _display_ipython(port, height, display_handle):
("%URL%", json.dumps("/")),
]

for (k, v) in replacements:
for k, v in replacements:
shell = shell.replace(k, v)
iframe = IPython.display.HTML(shell)
if display_handle:
Expand Down
2 changes: 1 addition & 1 deletion tensorboard/pip_package/requirements_dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ moto==1.3.7
fsspec>=2021.06.0

# For linting
black==22.6.0
black==24.3.0
flake8==3.7.8
yamllint==1.17.0

Expand Down
1 change: 1 addition & 0 deletions tensorboard/plugin_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
"th",
]


# Cache Markdown converter to avoid expensive initialization at each
# call to `markdown_to_safe_html`. Cache a different instance per thread.
class _MarkdownStore(threading.local):
Expand Down
4 changes: 2 additions & 2 deletions tensorboard/plugins/audio/audio_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ def _index_impl(self, ctx, experiment):
plugin_name=metadata.PLUGIN_NAME,
)
result = {run: {} for run in mapping}
for (run, tag_to_time_series) in mapping.items():
for (tag, time_series) in tag_to_time_series.items():
for run, tag_to_time_series in mapping.items():
for tag, time_series in tag_to_time_series.items():
md = metadata.parse_plugin_metadata(time_series.plugin_content)
if not self._version_checker.ok(md.version, run, tag):
continue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ def createPlugin(self, logdir):
custom_scalars_plugin_instance,
]
for plugin_instance in plugin_instances:
plugin_name_to_instance[
plugin_instance.plugin_name
] = plugin_instance
plugin_name_to_instance[plugin_instance.plugin_name] = (
plugin_instance
)
return custom_scalars_plugin_instance

def test_download_url_json(self):
Expand Down
1 change: 1 addition & 0 deletions tensorboard/plugins/debugger_v2/debugger_v2_plugin_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def setUp(self):
self.plugin = debugger_v2_plugin.DebuggerV2Plugin(context)
wsgi_app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(wsgi_app, wrappers.Response)

# The multiplexer reads data asynchronously on a separate thread, so
# as not to block the main thread of the TensorBoard backend. During
# unit test, we disable the asynchronous behavior, so that we can
Expand Down
16 changes: 8 additions & 8 deletions tensorboard/plugins/graph/graphs_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ def add_row_item(run, tag=None):
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME_RUN_METADATA_WITH_GRAPH,
)
for (run_name, tags) in mapping.items():
for (tag, tag_data) in tags.items():
for run_name, tags in mapping.items():
for tag, tag_data in tags.items():
# The Summary op is defined in TensorFlow and does not use a stringified proto
# as a content of plugin data. It contains single string that denotes a version.
# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790
Expand All @@ -128,8 +128,8 @@ def add_row_item(run, tag=None):
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME_RUN_METADATA,
)
for (run_name, tags) in mapping.items():
for (tag, tag_data) in tags.items():
for run_name, tags in mapping.items():
for tag, tag_data in tags.items():
if tag_data.plugin_content != b"1":
logger.warning(
"Ignoring unrecognizable version of RunMetadata."
Expand All @@ -146,8 +146,8 @@ def add_row_item(run, tag=None):
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME_KERAS_MODEL,
)
for (run_name, tags) in mapping.items():
for (tag, tag_data) in tags.items():
for run_name, tags in mapping.items():
for tag, tag_data in tags.items():
if tag_data.plugin_content != b"1":
logger.warning(
"Ignoring unrecognizable version of RunMetadata."
Expand All @@ -161,7 +161,7 @@ def add_row_item(run, tag=None):
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
)
for (run_name, tags) in mapping.items():
for run_name, tags in mapping.items():
if metadata.RUN_GRAPH_NAME in tags:
(run_item, _) = add_row_item(run_name, None)
run_item["run_graph"] = True
Expand All @@ -172,7 +172,7 @@ def add_row_item(run, tag=None):
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME_TAGGED_RUN_METADATA,
)
for (run_name, tags) in mapping.items():
for run_name, tags in mapping.items():
for tag in tags:
(_, tag_item) = add_row_item(run_name, tag)
tag_item["profile"] = True
Expand Down
Loading

0 comments on commit c6c3545

Please sign in to comment.