diff --git a/packages/vitest/src/defaults.ts b/packages/vitest/src/defaults.ts index 8e06bfb70146..31612e838bc0 100644 --- a/packages/vitest/src/defaults.ts +++ b/packages/vitest/src/defaults.ts @@ -24,6 +24,7 @@ export const benchmarkConfigDefaults: Required< exclude: defaultExclude, includeSource: [], reporters: ['default'], + includeSamples: false, } const defaultCoverageExcludes = [ diff --git a/packages/vitest/src/node/config/serializeConfig.ts b/packages/vitest/src/node/config/serializeConfig.ts index 03e78489c54a..249f4bbdaa1a 100644 --- a/packages/vitest/src/node/config/serializeConfig.ts +++ b/packages/vitest/src/node/config/serializeConfig.ts @@ -160,5 +160,8 @@ export function serializeConfig( standalone: config.standalone, printConsoleTrace: config.printConsoleTrace ?? coreConfig.printConsoleTrace, + benchmark: config.benchmark && { + includeSamples: config.benchmark.includeSamples, + }, } } diff --git a/packages/vitest/src/node/reporters/benchmark/table/index.ts b/packages/vitest/src/node/reporters/benchmark/table/index.ts index 99dae30d86a2..866a5a1d9019 100644 --- a/packages/vitest/src/node/reporters/benchmark/table/index.ts +++ b/packages/vitest/src/node/reporters/benchmark/table/index.ts @@ -167,10 +167,8 @@ interface FormattedBenchmarkGroup { benchmarks: FormattedBenchmarkResult[] } -export type FormattedBenchmarkResult = Omit & { +export type FormattedBenchmarkResult = BenchmarkResult & { id: string - sampleCount: number - median: number } function createFormattedBenchmarkReport(files: File[]) { @@ -183,18 +181,7 @@ function createFormattedBenchmarkReport(files: File[]) { for (const t of task.tasks) { const benchmark = t.meta.benchmark && t.result?.benchmark if (benchmark) { - const { samples, ...rest } = benchmark - benchmarks.push({ - id: t.id, - sampleCount: samples.length, - median: - samples.length % 2 - ? samples[Math.floor(samples.length / 2)] - : (samples[samples.length / 2] - + samples[samples.length / 2 - 1]) - / 2, - ...rest, - }) + benchmarks.push({ id: t.id, ...benchmark, samples: [] }) } } if (benchmarks.length) { diff --git a/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts b/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts index 8c9b21ba0790..483454956874 100644 --- a/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts +++ b/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts @@ -68,7 +68,7 @@ function renderBenchmarkItems(result: BenchmarkResult) { formatNumber(result.p995 || 0), formatNumber(result.p999 || 0), `±${(result.rme || 0).toFixed(2)}%`, - result.samples.length.toString(), + (result.sampleCount || 0).toString(), ] } @@ -124,10 +124,7 @@ export function renderTree( } const baseline = options.compare?.[t.id] if (baseline) { - benchMap[t.id].baseline = { - ...baseline, - samples: Array.from({ length: baseline.sampleCount }), - } + benchMap[t.id].baseline = baseline } } } diff --git a/packages/vitest/src/node/types/benchmark.ts b/packages/vitest/src/node/types/benchmark.ts index e2ee6c53a884..46796b8f4657 100644 --- a/packages/vitest/src/node/types/benchmark.ts +++ b/packages/vitest/src/node/types/benchmark.ts @@ -50,4 +50,11 @@ export interface BenchmarkUserOptions { * benchmark output file */ outputJson?: string + + /** + * Include `samples` array of benchmark results for API or custom reporter usages. + * This is disabled by default to reduce memory usage. + * @default false + */ + includeSamples?: boolean } diff --git a/packages/vitest/src/runtime/config.ts b/packages/vitest/src/runtime/config.ts index 0ecbf4cd1628..14ebe708c67b 100644 --- a/packages/vitest/src/runtime/config.ts +++ b/packages/vitest/src/runtime/config.ts @@ -129,6 +129,9 @@ export interface SerializedConfig { standalone: boolean logHeapUsage: boolean | undefined coverage: SerializedCoverageConfig + benchmark?: { + includeSamples: boolean + } } export interface SerializedCoverageConfig { diff --git a/packages/vitest/src/runtime/runners/benchmark.ts b/packages/vitest/src/runtime/runners/benchmark.ts index cc6edb95c824..9dad217ed277 100644 --- a/packages/vitest/src/runtime/runners/benchmark.ts +++ b/packages/vitest/src/runtime/runners/benchmark.ts @@ -72,6 +72,15 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { const taskRes = task.result! const result = benchmark.result!.benchmark! Object.assign(result, taskRes) + // compute extra stats and free raw samples as early as possible + const samples = result.samples + result.sampleCount = samples.length + result.median = samples.length % 2 + ? samples[Math.floor(samples.length / 2)] + : (samples[samples.length / 2] + samples[samples.length / 2 - 1]) / 2 + if (!runner.config.benchmark?.includeSamples) { + result.samples.length = 0 + } updateTask(benchmark) }, { diff --git a/packages/vitest/src/runtime/types/benchmark.ts b/packages/vitest/src/runtime/types/benchmark.ts index fb83535b3f56..34535bce7582 100644 --- a/packages/vitest/src/runtime/types/benchmark.ts +++ b/packages/vitest/src/runtime/types/benchmark.ts @@ -18,6 +18,8 @@ export interface Benchmark extends Custom { export interface BenchmarkResult extends TinybenchResult { name: string rank: number + sampleCount: number + median: number } export type BenchFunction = (this: BenchFactory) => Promise | void diff --git a/test/benchmark/test/reporter.test.ts b/test/benchmark/test/reporter.test.ts index 35ee2e496a91..0e3ddebe2dd1 100644 --- a/test/benchmark/test/reporter.test.ts +++ b/test/benchmark/test/reporter.test.ts @@ -1,4 +1,4 @@ -import { expect, it } from 'vitest' +import { assert, expect, it } from 'vitest' import * as pathe from 'pathe' import { runVitest } from '../../test-utils' @@ -27,3 +27,24 @@ it('non-tty', async () => { ` expect(lines).toMatchObject(expected.trim().split('\n').map(s => expect.stringContaining(s))) }) + +it.for([true, false])('includeSamples %s', async (includeSamples) => { + const result = await runVitest( + { + root: pathe.join(import.meta.dirname, '../fixtures/reporter'), + benchmark: { includeSamples }, + }, + ['summary.bench.ts'], + 'benchmark', + ) + assert(result.ctx) + const allSamples = [...result.ctx.state.idMap.values()] + .filter(t => t.meta.benchmark) + .map(t => t.result?.benchmark?.samples) + if (includeSamples) { + expect(allSamples[0]).not.toEqual([]) + } + else { + expect(allSamples[0]).toEqual([]) + } +})