From d5349bb409cc94eaca6a6646ca5df9010e3f94db Mon Sep 17 00:00:00 2001 From: Glib <71976818+GLEF1X@users.noreply.github.com> Date: Tue, 10 Jan 2023 09:03:18 -0500 Subject: [PATCH] Remove obsolete/unused code (#155) * remove obsolete/unused code * remove more obsolete/unused code --- doc/conf.py | 4 ---- doc/examples/bench_time_func.py | 2 +- pyperf/__main__.py | 5 ++--- pyperf/_bench.py | 3 +-- pyperf/_cli.py | 8 ++------ pyperf/_collect_metadata.py | 5 +---- pyperf/_compare.py | 4 +--- pyperf/_process_time.py | 4 +--- pyperf/_runner.py | 8 ++++---- pyperf/_system.py | 2 +- pyperf/_timeit.py | 2 +- pyperf/tests/test_bench.py | 2 +- pyperf/tests/test_examples.py | 2 +- 13 files changed, 17 insertions(+), 34 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 6ef7b59c..9f5a50a3 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -12,10 +12,6 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os -import shlex - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. diff --git a/doc/examples/bench_time_func.py b/doc/examples/bench_time_func.py index 1c126287..e9569d49 100755 --- a/doc/examples/bench_time_func.py +++ b/doc/examples/bench_time_func.py @@ -6,7 +6,7 @@ def bench_dict(loops, mydict): range_it = range(loops) t0 = pyperf.perf_counter() - for loops in range_it: + for _ in range_it: mydict['0'] mydict['100'] mydict['200'] diff --git a/pyperf/__main__.py b/pyperf/__main__.py index ddc5e453..aee19d18 100644 --- a/pyperf/__main__.py +++ b/pyperf/__main__.py @@ -357,7 +357,7 @@ def group_by_name_ignored(self): yield (suite, ignored) -def load_benchmarks(args, name=True): +def load_benchmarks(args): data = Benchmarks() data.load_benchmark_suites(args.filenames) if getattr(args, 'benchmarks', None): @@ -681,7 +681,6 @@ def cmd_convert(args): file=sys.stderr) sys.exit(1) except TypeError: - raise print("ERROR: Metadata %r of benchmark %r is not an integer" % (name, benchmark.get_name()), file=sys.stderr) @@ -699,7 +698,7 @@ def cmd_convert(args): def cmd_slowest(args): - data = load_benchmarks(args, name=False) + data = load_benchmarks(args) nslowest = args.n use_title = (data.get_nsuite() > 1) diff --git a/pyperf/_bench.py b/pyperf/_bench.py index 18b57951..fae94bbf 100644 --- a/pyperf/_bench.py +++ b/pyperf/_bench.py @@ -521,8 +521,7 @@ def _as_json(self, suite_metadata): metadata = self._get_common_metadata() common_metadata = dict(metadata, **suite_metadata) - data = {} - data['runs'] = [run._as_json(common_metadata) for run in self._runs] + data = {'runs': [run._as_json(common_metadata) for run in self._runs]} metadata = _exclude_common_metadata(metadata, suite_metadata) if metadata: data['metadata'] = metadata diff --git a/pyperf/_cli.py b/pyperf/_cli.py index 4da8cc60..f8017018 100644 --- a/pyperf/_cli.py +++ b/pyperf/_cli.py @@ -87,7 +87,6 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False, loops, value = warmup raw_value = value * (loops * inner_loops) if raw: - text = format_value(raw_value) text = ("%s (loops: %s)" % (format_value(raw_value), format_number(loops))) @@ -273,8 +272,7 @@ def format_stats(bench, lines): lines.append('') # Minimum - table = [] - table.append(("Minimum", bench.format_value(min(values)))) + table = [("Minimum", bench.format_value(min(values)))] # Median +- MAD median = bench.median() @@ -382,8 +380,6 @@ def value_bucket(value): value_width = max([len(bench.format_value(bucket * value_k)) for bucket in range(bucket_min, bucket_max + 1)]) - width = columns - value_width - line = ': %s #' % count_max width = columns - (value_width + len(line)) if not extend: @@ -517,7 +513,7 @@ def format_result_value(bench): return _format_result_value(bench) -def format_result(bench, prefix=True): +def format_result(bench): loops = None warmups = None for run in bench._runs: diff --git a/pyperf/_collect_metadata.py b/pyperf/_collect_metadata.py index 3fa79058..a0146848 100644 --- a/pyperf/_collect_metadata.py +++ b/pyperf/_collect_metadata.py @@ -233,7 +233,6 @@ def collect_cpu_freq(metadata, cpus): # Example: "processor 0: version = 00, identification = [...]" match = re.match(r'^processor ([0-9]+): ', line) if match is None: - raise Exception # unknown /proc/cpuinfo format: silently ignore and exit return @@ -410,9 +409,7 @@ def collect_cpu_metadata(metadata): def collect_metadata(process=True): - metadata = {} - metadata['perf_version'] = pyperf.__version__ - metadata['date'] = format_datetime(datetime.datetime.now()) + metadata = {'perf_version': pyperf.__version__, 'date': format_datetime(datetime.datetime.now())} collect_system_metadata(metadata) collect_cpu_metadata(metadata) diff --git a/pyperf/_compare.py b/pyperf/_compare.py index 31703ecb..dc2afdc9 100644 --- a/pyperf/_compare.py +++ b/pyperf/_compare.py @@ -284,9 +284,7 @@ def sort_key(results): for item in self.all_results[0]: headers.append(item.changed.name) - all_norm_means = [] - for column in headers[2:]: - all_norm_means.append([]) + all_norm_means = [[] for _ in range(len(headers[2:]))] rows = [] not_significant = [] diff --git a/pyperf/_process_time.py b/pyperf/_process_time.py index 7010195d..90fd2dff 100644 --- a/pyperf/_process_time.py +++ b/pyperf/_process_time.py @@ -60,7 +60,7 @@ def bench_process(loops, args, kw, profile_filename=None): temp_profile_filename = tempfile.mktemp() args = [args[0], "-m", "cProfile", "-o", temp_profile_filename] + args[1:] - for loop in range_it: + for _ in range_it: start_rss = get_max_rss() proc = subprocess.Popen(args, **kw) @@ -75,8 +75,6 @@ def bench_process(loops, args, kw, profile_filename=None): os.unlink(temp_profile_filename) sys.exit(exitcode) - proc = None - rss = get_max_rss() - start_rss max_rss = max(max_rss, rss) diff --git a/pyperf/_runner.py b/pyperf/_runner.py index 4142ced8..6c344cb1 100644 --- a/pyperf/_runner.py +++ b/pyperf/_runner.py @@ -71,7 +71,7 @@ class Runner: # Default parameters are chosen to have approximatively a run of 0.5 second # and so a total duration of 5 seconds by default - def __init__(self, values=None, warmups=None, processes=None, + def __init__(self, values=None, processes=None, loops=0, min_time=0.1, metadata=None, show_name=True, program_args=None, add_cmdline_args=None, @@ -485,7 +485,7 @@ def bench_time_func(self, name, time_func, *args, **kwargs): if self.args.profile: profiler, time_func = profiling_wrapper(time_func) - def task_func(task, loops): + def task_func(_, loops): return time_func(loops, *args) task = WorkerProcessTask(self, name, task_func, metadata) @@ -514,7 +514,7 @@ def bench_func(self, name, func, *args, **kwargs): if self.args.profile: profiler, func = profiling_wrapper(func) - def task_func(task, loops): + def task_func(_, loops): # use fast local variables local_timer = time.perf_counter local_func = func @@ -557,7 +557,7 @@ def bench_async_func(self, name, func, *args, **kwargs): if self.args.profile: profiler, func = profiling_wrapper(func) - def task_func(task, loops): + def task_func(_, loops): if loops != 1: async def main(): # use fast local variables diff --git a/pyperf/_system.py b/pyperf/_system.py index f127c94f..38c830e1 100644 --- a/pyperf/_system.py +++ b/pyperf/_system.py @@ -213,7 +213,7 @@ def write_msr(self, cpu, reg_num, value): fd = os.open(path, os.O_WRONLY) try: if hasattr(os, 'pwrite'): - data = os.pwrite(fd, data, reg_num) + os.pwrite(fd, data, reg_num) else: os.lseek(fd, reg_num, os.SEEK_SET) os.write(fd, data) diff --git a/pyperf/_timeit.py b/pyperf/_timeit.py index 9e70c7e3..64da6a51 100644 --- a/pyperf/_timeit.py +++ b/pyperf/_timeit.py @@ -108,7 +108,7 @@ def make_inner(self): exec(code, global_ns, local_ns) return local_ns["inner"] - def update_linecache(self, file=None): + def update_linecache(self): import linecache linecache.cache[self.filename] = (len(self.src), diff --git a/pyperf/tests/test_bench.py b/pyperf/tests/test_bench.py index b14c61bc..8fd4e2a1 100644 --- a/pyperf/tests/test_bench.py +++ b/pyperf/tests/test_bench.py @@ -375,7 +375,7 @@ def test_stats(self): self.assertEqual(bench.median_abs_dev(), 24.0) def test_stats_same(self): - values = [5.0 for i in range(10)] + values = [5.0 for _ in range(10)] run = create_run(values) bench = pyperf.Benchmark([run]) self.assertEqual(bench.mean(), 5.0) diff --git a/pyperf/tests/test_examples.py b/pyperf/tests/test_examples.py index 12c1c492..1ef79a89 100644 --- a/pyperf/tests/test_examples.py +++ b/pyperf/tests/test_examples.py @@ -23,7 +23,7 @@ def tearDownClass(cls): if not_tested: raise Exception("not tested scripts: %s" % sorted(not_tested)) - def check_command(self, script, args, nproc=3): + def check_command(self, script, args): self.TESTED.add(script) script = os.path.join(EXAMPLES_DIR, script)