diff --git a/.cargo/config.toml b/.cargo/config.toml index 3ee08c0c61ee..32a3fc10a4ac 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -2,6 +2,7 @@ rustflags = [ "-Zproc-macro-backtrace", "-Wunused_qualifications", + "-Wclippy::upper_case_acronyms", # Flag to make build.rs scripts generate docs. Should only be used in this repository # internally, not by dependants. '--cfg=HYDROFLOW_GENERATE_DOCS', @@ -10,6 +11,9 @@ rustflags = [ # "-Aclippy::uninlined-format-args", ] +[target.x86_64-apple-darwin] +linker = "rust-lld" + [target.x86_64-unknown-linux-musl] linker = "rust-lld" diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index 037a310d4af7..5c2190246757 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -4,12 +4,13 @@ on: push: branches: - main + - feature/** tags: - 'hydro_cli-v[0-9]+.[0-9]+.[0-9]+' env: PACKAGE_NAME: hydro_deploy - PYTHON_VERSION: "3.7" # to build abi3 wheels + PYTHON_VERSION: "3.10" # to build abi3 wheels # based on Ruff's CI jobs: @@ -33,16 +34,17 @@ jobs: - universal2-apple-darwin steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} - architecture: x64 - name: "Build wheels" uses: PyO3/maturin-action@v1 with: working-directory: hydro_deploy/hydro_cli target: ${{ matrix.target }} args: --release --out dist + env: + CARGO_TARGET_X86_64_APPLE_DARWIN_LINKER: clang - name: "Install built wheel" run: | pip install hydro_deploy/hydro_cli/dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall @@ -63,7 +65,7 @@ jobs: - i686-unknown-linux-gnu steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} architecture: x64 @@ -108,7 +110,7 @@ jobs: arch: armv7 steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} - name: "Build wheels" @@ -139,7 +141,7 @@ jobs: arch: x64 steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} architecture: ${{ matrix.platform.arch }} @@ -173,7 +175,7 @@ jobs: - uses: actions/download-artifact@v3 with: name: wheels - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 - name: "Publish to PyPi" env: TWINE_USERNAME: __token__ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index acb4efc0cc93..01f55673a43b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,9 @@ name: CI on: push: - branches: [main] + branches: + - main + - feature/** pull_request: schedule: - cron: "35 03 * * *" # Daily at 8:35 PM PDT, 7:35 PM PST. @@ -27,7 +29,7 @@ jobs: check: name: Lint and Check if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name != 'pull_request' }} - timeout-minutes: 15 + timeout-minutes: 20 needs: pre_job runs-on: ${{ matrix.os }} strategy: @@ -63,38 +65,28 @@ jobs: - name: Run sccache-cache if: matrix.rust_release == 'pinned-nightly' - uses: mozilla-actions/sccache-action@v0.0.3 + uses: mozilla-actions/sccache-action@v0.0.4 - name: Set Rust caching env vars if: matrix.rust_release == 'pinned-nightly' - run: | - echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV - echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + uses: actions/github-script@v6 + with: + script: | + core.exportVariable('SCCACHE_GHA_ENABLED', 'true'); + core.exportVariable('RUSTC_WRAPPER', 'sccache'); - name: Run cargo fmt if: ${{ matrix.os == 'ubuntu-latest' }} - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + run: cargo fmt --all -- --check - name: Run cargo clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets --features python -- -D warnings + run: cargo clippy --all-targets --features python -- -D warnings - name: Run cargo check - uses: actions-rs/cargo@v1 - with: - command: check - args: --all-targets --features python + run: cargo check --all-targets --features python - name: Run cargo check (no default features) - uses: actions-rs/cargo@v1 - with: - command: check - args: --all-targets --no-default-features + run: cargo check --all-targets --no-default-features check-wasm: name: Check WebAssembly @@ -133,7 +125,7 @@ jobs: test: name: Test Suite if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name != 'pull_request' }} - timeout-minutes: 25 + timeout-minutes: 35 needs: pre_job runs-on: ${{ matrix.os }} strategy: @@ -169,13 +161,15 @@ jobs: - name: Run sccache-cache if: matrix.rust_release == 'pinned-nightly' - uses: mozilla-actions/sccache-action@v0.0.3 + uses: mozilla-actions/sccache-action@v0.0.4 - name: Set Rust caching env vars if: matrix.rust_release == 'pinned-nightly' - run: | - echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV - echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + uses: actions/github-script@v6 + with: + script: | + core.exportVariable('SCCACHE_GHA_ENABLED', 'true'); + core.exportVariable('RUSTC_WRAPPER', 'sccache'); - name: Install cargo-nextest (linux) if: ${{ matrix.os == 'ubuntu-latest' }} @@ -187,16 +181,10 @@ jobs: run: curl -LsSf https://get.nexte.st/latest/windows-tar | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - name: Run cargo nextest on all targets - uses: actions-rs/cargo@v1 - with: - command: nextest - args: run --no-fail-fast --features python --all-targets + run: cargo nextest run --no-fail-fast --features python --all-targets - name: Run doctests - uses: actions-rs/cargo@v1 - with: - command: test - args: --no-fail-fast --features python --doc + run: cargo test --no-fail-fast --features python --doc - name: Install Python uses: actions/setup-python@v4 diff --git a/.github/workflows/conventional_commits.yml b/.github/workflows/conventional_commits.yml index 90fa3ae37418..4bd1fe057a2e 100644 --- a/.github/workflows/conventional_commits.yml +++ b/.github/workflows/conventional_commits.yml @@ -1,13 +1,20 @@ name: Conventional Commits on: - pull_request: - branches: [ main ] + pull_request_target: + types: + - opened + - edited + - synchronize + +permissions: + pull-requests: read jobs: - build: + main: name: Conventional Commits runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: webiny/action-conventional-commits@v1.1.0 + - uses: amannn/action-semantic-pull-request@v5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/examples-container.yml b/.github/workflows/examples-container.yml index 932167e22669..66dcfd76bf68 100644 --- a/.github/workflows/examples-container.yml +++ b/.github/workflows/examples-container.yml @@ -3,7 +3,9 @@ name: examples-container-build on: push: - branches: [main] + branches: + - main + - feature/** jobs: pre_job: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a504cb752788..1189a4dc800e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,6 +26,7 @@ jobs: timeout-minutes: 20 runs-on: ubuntu-latest steps: + # https://github.com/orgs/community/discussions/25305#discussioncomment-8256560 # Unfortunately branch protection means that this workflow can't push the updated changelogs # and tags to `main` in the normal way. Instead we have an app: # https://github.com/organizations/hydro-project/settings/apps/hydro-project-bot @@ -35,10 +36,10 @@ jobs: # `APP_PRIVATE_KEY` ("Generate a private key"). - name: Generate token id: generate_token - uses: tibdex/github-app-token@v1 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} - run: | echo "Version bump: $BUMP" @@ -54,7 +55,7 @@ jobs: git config --global user.email "132423234+hydro-project-bot[bot]@users.noreply.github.com" - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Fetch all commit history so smart-release can generate changelogs. fetch-depth: 0 @@ -87,9 +88,12 @@ jobs: ${{ inputs.execute && '--execute' || '--no-publish' }} hydroflow hydroflow_lang hydroflow_macro hydroflow_plus hydroflow_datalog hydroflow_datalog_core - hydro_deploy hydro_cli hydroflow_cli_integration - hydroflow_plus_cli_integration + hydro_deploy hydro_cli hydroflow_deploy_integration + hydroflow_plus_deploy stageleft stageleft_macro stageleft_tool + multiplatform_test env: # Make sure to set this so the `gh` CLI works using our token. GH_TOKEN: ${{ steps.generate_token.outputs.token }} + # Show `cargo-smart-release`'s stack trace on error. + RUST_BACKTRACE: 1 diff --git a/.github/workflows/template.yml b/.github/workflows/template.yml new file mode 100644 index 000000000000..431935e730e6 --- /dev/null +++ b/.github/workflows/template.yml @@ -0,0 +1,154 @@ +name: Template + +on: + push: + branches: [main] + pull_request: + schedule: + - cron: "14 04 * * *" # Daily at 9:14 PM PDT, 8:14 PM PST. + +jobs: + pre_job: + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v3.4.0 + with: + cancel_others: "true" + + test_hydroflow: + name: Test hydroflow + if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name != 'pull_request' }} + timeout-minutes: 10 + needs: pre_job + runs-on: ubuntu-latest + + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + components: rustfmt, clippy + + - name: Action cargo-generate + uses: cargo-generate/cargo-generate-action@v0.20.0 + with: + name: generated + template: template/hydroflow + arguments: "-d hydroflow_git=${{ github.event.pull_request.head.repo.clone_url }} -d hydroflow_branch=${{ github.event.pull_request.head.ref }}" + - name: Move generated project + run: | + mv generated ${{ runner.temp }}/ + + - name: Run cargo check + uses: actions-rs/cargo@v1 + with: + command: check + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets + + - name: Run cargo fmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all -- --check + + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all --all-targets -- -D warnings + + - name: Run cargo test + uses: actions-rs/cargo@v1 + with: + command: test + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets --no-fail-fast + + - name: Run cargo build + uses: actions-rs/cargo@v1 + with: + command: build + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets + + - name: test template example + run: | + ECHO_STRING='hello this is a test' + + trap 'rm client-input && rm client-output && kill $(jobs -p)' EXIT + + echo "$ECHO_STRING" >client-input + + "${{ runner.temp }}/generated/target/debug/hydroflow-template" --role server --address 127.0.0.100:2048 & + "${{ runner.temp }}/generated/target/debug/hydroflow-template" --role client --address 127.0.0.100:2048 client-output & + + sleep 1 + + if cat client-output | grep -q "$ECHO_STRING"; then + exit 0 + else + exit -1 + fi + + test_hydroflow_plus: + name: Test hydroflow_plus + if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name != 'pull_request' }} + timeout-minutes: 10 + needs: pre_job + runs-on: ubuntu-latest + + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + components: rustfmt, clippy + + - name: Action cargo-generate + uses: cargo-generate/cargo-generate-action@v0.20.0 + with: + name: generated + template: template/hydroflow_plus + arguments: "-d hydroflow_git=${{ github.event.pull_request.head.repo.clone_url }} -d hydroflow_branch=${{ github.event.pull_request.head.ref }}" + - name: Move generated project + run: | + mv generated ${{ runner.temp }}/ + + - name: Run cargo check + uses: actions-rs/cargo@v1 + with: + command: check + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets + + - name: Run cargo fmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all -- --check + + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all --all-targets -- -D warnings + + - name: Run cargo test + uses: actions-rs/cargo@v1 + with: + command: test + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets --no-fail-fast + + - name: Run cargo build + uses: actions-rs/cargo@v1 + with: + command: build + args: --manifest-path "${{ runner.temp }}/generated/Cargo.toml" --all-targets diff --git a/.gitignore b/.gitignore index add32060e2be..0bef5c8eb91f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,13 @@ -/target* +/target /rust.git /*.dot __pycache__/ **/.DS_Store +**/target-bisector-* # Profiling related outputs of the perf binary and cargo-flamegraph perf.data perf.data.old flamegraph.svg + +/rustc-ice-*.txt diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 000000000000..26d33521af10 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/.idea/hydroflow.iml b/.idea/hydroflow.iml new file mode 100644 index 000000000000..fc353ac331a9 --- /dev/null +++ b/.idea/hydroflow.iml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 000000000000..07115cdf15dd --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 000000000000..50f2925099c1 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 000000000000..35eb1ddfbbc0 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index d462ae195686..1057a431c935 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,15 +1,22 @@ { "rust-analyzer.runnableEnv": [ { - // Set output levels for `tracing` logging. "env": { + // Stack backtraces. "RUST_BACKTRACE": "full", + // Set output levels for `tracing` logging. "RUST_LOG": "debug,hydroflow=trace", - "INSTA_FORCE_PASS": "1" + // Make sure all snapshots are written instead of just the first failure. + "INSTA_FORCE_PASS": "1", + "INSTA_UPDATE": "always", + "TRYBUILD": "overwrite", } } ], "files.watcherExclude": { "**/target": true - } + }, + "rust-analyzer.cargo.features": [ + "python" + ] } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..77f6fe59cc41 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,120 @@ +# Contributing to Hydroflow + +Thanks for your interest in contributing to Hydroflow! This is an experimental, research-driven +project which can make getting started a bit tricky. This guide will explain the project structure, +code style, commit messages, testing setups, and more to help you get started. + +## Repository Structure + +The Hydroflow repo is set up as a monorepo and [Cargo workspace](https://doc.rust-lang.org/book/ch14-03-cargo-workspaces.html). +Relative to the repository root: + +* `hydroflow` is the main Hydroflow package, containing the Hydroflow runtime. It re-exports the + surface syntax macros in `hydroflow_macro` and `hydroflow_lang`. The runtime is the "scheduled + layer" while the surface syntax compiler is the "compiled layer". +* `hydroflow_plus` and related packages contain Hydroflow+, which is a functional syntax built on + top of `hydroflow`. +* `hydroflow_datalog` provides a datalog compiler, based on top of the Hydroflow surface syntax. +* `docs` is the [Hydro.run](https://hydro.run/) website. `website_playground` contains the + playground portion of the website, used for compiling Hydroflow in-browser via WASM. +* `benches` contains some microbenchmarks for Hydroflow and other frameworks. +* `design_docs` contains old point-in-time design docs for Hydroflow's architecture. + +There are several subpackages included that are used by Hydroflow but are more general-purpose: + +* `stageleft` is a framework for staged programming in Rust, used by `hydroflow_plus`. +* `lattices` is a abstract algebra library, originally for lattice types. +* `variadics` is a crate for emulating variadic generics using tuple lists. +* `pusherator` is a rudimentary library providing push-based iterators. +* `multiplatform_test` provides a convenience macro for specifying and initializing tests on + various platforms. + +There are auxillary repositories as well: + +* [`hydro-project/rust-sitter`](https://github.com/hydro-project/rust-sitter) provides a + [Tree-sitter](https://tree-sitter.github.io/tree-sitter/)-based parser generator interface, used + by `hydroflow_datalog`. + +## Rust + +Hydroflow should build on latest stable releases of Rust. However we develop on a pinned nightly +version, bumped up every month or two. The version is in `rust-toolchain.toml` which is +automatically detected by `cargo`, so no special setup should be needed. + +## Python + +Some parts of the Hydroflow repo require a relatively recent version of Python 3, maybe 3.10 or +later. On Mac, installing directly from python.org may work if `brew install` doesn't. + +### `wasm-bindgen` + +[`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) is required for running WASM tests. +```shell +cargo install wasm-bindgen-cli +``` + +## Submitting Changes + +### Feature Branches +Prototypes should be committed to feature branches, rather than main. To create a feature branch: + +```shell +git fetch origin +git checkout -b feature/$FEATURE_NAME origin/main +git push origin HEAD +``` + +To add changes on top of feature branches: +```shell +git checkout -b $BRANCH_NAME `feature/$FEATURE_NAME` +.. make changes .. +git add ... # Add all changes +git commit # Commit changes +git push origin HEAD +``` + +### Commit Messages + +Pull request title and body should follow [Conventional Commits specification](https://www.conventionalcommits.org/). +The repository defaults to Squash+Merge commits, so individual commits are only useful for showing code evolution +during code-reviews. + +Pull request title and body are used to generate changelogs. See [Releasing](#releasing) for more. + +### Pull Requests and `precheck.bash` + +CI runs a comprehensive set of tests on PRs before they are merged. This includes format and lint +checks. To run some checks locally, you can run `./precheck.bash` (or `./precheck.bash --quick` for +a quicker subset of the checks). Note that this will overwrite any changed snapshot tests instead of +failing-- you should double-check that the snapshot diff matches what you expect. + +## Snapshot Testing + +Hydroflow uses two types of snapshot testing: [`insta`](https://insta.rs/) and [`trybuild`](https://github.com/dtolnay/trybuild). +Insta provides general snapshot testing in Rust, and we mainly use it to test the Hydroflow graphs +generated from the surface syntax. These snapshots are of the [Mermaid](https://mermaid.js.org/) or +[DOT](https://graphviz.org/) graph visualizations rather than the graph datastructures themselves; +see `hydroflow/tests/snapshots`. The snapshots can be useful not just to track changes but also as +a quick reference to view the visualizations (i.e. by pasting into [mermaid.live](https://mermaid.live/)). +`trybuild` is used to test the error messages in Hydroflow's surface syntax; see `hydroflow/tests/compile-fail`. + +`insta` provides a CLI, `cargo insta` to run tests and review changes: +```shell +cargo install cargo-insta +cargo insta test # or cargo test --all-targets --no-fail-fast +cargo insta review +``` +Environmental variables [`INSTA_FORCE_PASS=1` and `INSTA_UPDATE=always`](https://insta.rs/docs/advanced/#disabling-assertion-failure) +can be used instead, to update `insta` snapshots. `TRYBUILD=overwrite` can be used to update +`trybuild` snapshots. `precheck.bash` uses these, and they are also set when running code with +`rust-analyzer`(see `.vscode/settings.json`). + +## CI Testing + +The CI runs the same the tests that are done on PRs, but also runs some tests on the latest +nightly. Sometimes these tests fail when the PR tests pass. Most often this is due to new lints +in the latest version of `clippy`. See [Setup#Rust](#rust) above. + +## Releasing + +See [`RELEASING.md`](https://github.com/hydro-project/hydroflow/blob/main/RELEASING.md). diff --git a/Cargo.lock b/Cargo.lock index fc26165e1563..28c15037ea84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,9 +21,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -34,11 +34,24 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -70,52 +83,53 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -123,38 +137,32 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" dependencies = [ "backtrace", ] [[package]] -name = "ascii" -version = "1.1.0" +name = "arrayvec" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] -name = "async-channel" -version = "1.9.0" +name = "ascii" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] +checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" [[package]] name = "async-channel" -version = "2.1.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 4.0.1", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -162,120 +170,88 @@ dependencies = [ [[package]] name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if 1.0.0", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.2.2" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.2.0", - "cfg-if 1.0.0", + "async-lock", + "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.1.0", + "futures-lite", "parking", - "polling 3.3.1", - "rustix 0.38.28", + "polling", + "rustix", "slab", "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", + "windows-sys 0.59.0", ] [[package]] name = "async-lock" -version = "3.2.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.1", + "event-listener", "event-listener-strategy", "pin-project-lite", ] -[[package]] -name = "async-once-cell" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9338790e78aa95a416786ec8389546c4b6a1dfc3dc36071ed9518a9413a542eb" - [[package]] name = "async-process" -version = "1.8.1" +version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", + "async-channel", + "async-io", + "async-lock", "async-signal", + "async-task", "blocking", - "cfg-if 1.0.0", - "event-listener 3.1.0", - "futures-lite 1.13.0", - "rustix 0.38.28", - "windows-sys 0.48.0", + "cfg-if", + "event-listener", + "futures-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", ] [[package]] name = "async-recursion" -version = "1.0.5" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "async-signal" -version = "0.2.5" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io 2.2.2", - "async-lock 2.8.0", + "async-io", + "async-lock", "atomic-waker", - "cfg-if 1.0.0", + "cfg-if", "futures-core", "futures-io", - "rustix 0.38.28", + "rustix", "signal-hook-registry", "slab", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "async-ssh2-lite" -version = "0.4.7" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb43eaa75050ebe27dfd16e6de7078d9796a251f03c77d7a24c05aa9037c29b" +checksum = "e00ee230bcd18bf15547e674950ace76b13fcda3ea1a33acca27e2eab245b58a" dependencies = [ "async-trait", "futures-util", @@ -286,19 +262,19 @@ dependencies = [ [[package]] name = "async-task" -version = "4.6.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d90cd0b264dfdd8eb5bad0a2c217c1f88fa96a8573f40e7b12de23fb468f46" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -307,71 +283,38 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi 0.3.9", -] - [[package]] name = "auto_impl" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.75", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "base64" -version = "0.21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" - -[[package]] -name = "basic-toml" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f2139706359229bfa8f19142ac1155b4b80beafb7a60471ac5dd109d4a19778" -dependencies = [ - "serde", -] - [[package]] name = "benches" version = "0.0.0" @@ -379,8 +322,7 @@ dependencies = [ "criterion", "differential-dataflow-master", "hydroflow", - "lazy_static", - "rand 0.8.5", + "rand", "rand_distr", "seq-macro", "static_assertions", @@ -405,9 +347,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" @@ -418,27 +360,54 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c132eebf10f5cad5289222520a4a058514204aed6d791f1cf4fe8088b82d15f" +dependencies = [ + "objc2", +] + [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.1.1", - "async-lock 3.2.0", + "async-channel", "async-task", - "fastrand 2.0.1", "futures-io", - "futures-lite 2.1.0", + "futures-lite", "piper", - "tracing", +] + +[[package]] +name = "buildstructor" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3907aac66c65520545ae3cb3c195306e20d5ed5c90bfbb992e061cf12a104d0" +dependencies = [ + "lazy_static", + "proc-macro2", + "quote", + "str_inflector", + "syn 2.0.75", + "thiserror", + "try_match", ] [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "bytemuck" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" [[package]] name = "byteorder" @@ -448,9 +417,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "c2rust-bitfields" @@ -474,31 +443,31 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] [[package]] name = "cargo_metadata" -version = "0.15.4" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.20", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -512,11 +481,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ - "libc", + "shlex", ] [[package]] @@ -533,27 +502,27 @@ checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "cfg-if" -version = "1.0.0" +name = "cfg_aliases" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -564,9 +533,9 @@ checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -575,15 +544,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -591,24 +560,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "4.4.11" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -616,39 +570,39 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.10.0", + "strsim", ] [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "colored" @@ -671,9 +625,9 @@ dependencies = [ [[package]] name = "combine" -version = "4.6.6" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "memchr", @@ -681,24 +635,24 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "console" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] @@ -707,7 +661,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen", ] @@ -723,40 +677,19 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "core_affinity" -version = "0.5.10" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f8a03115cc34fb0d7c321dd154a3914b3ca082ccc5c11d91bf7117dbbe7171f" -dependencies = [ - "kernel32-sys", - "libc", - "num_cpus", - "winapi 0.2.8", -] +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" dependencies = [ "libc", ] -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "criterion" version = "0.5.1" @@ -766,7 +699,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.11", + "clap", "criterion-plot", "futures", "is-terminal", @@ -797,45 +730,43 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.9" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if 1.0.0", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.16" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", "crossbeam-utils", - "memoffset 0.9.0", ] [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-common" @@ -848,33 +779,34 @@ dependencies = [ ] [[package]] -name = "ctor" -version = "0.2.6" +name = "ctrlc" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d2b3721e861707777e3195b0158f950ae6dc4a27e4d02ff9f67e3eb3de199e" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "quote", - "syn 2.0.42", + "nix", + "windows-sys 0.59.0", ] [[package]] name = "dashmap" -version = "5.5.3" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.14.3", + "cfg-if", + "crossbeam-utils", + "hashbrown", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "datadriven" @@ -888,9 +820,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -926,41 +858,42 @@ dependencies = [ [[package]] name = "dirs" -version = "3.0.2" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] name = "dunce" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encode_unicode" @@ -968,17 +901,27 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +[[package]] +name = "env_filter" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" +dependencies = [ + "log", + "regex", +] + [[package]] name = "env_logger" -version = "0.10.1" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" dependencies = [ + "anstream", + "anstyle", + "env_filter", "humantime", - "is-terminal", "log", - "regex", - "termcolor", ] [[package]] @@ -989,9 +932,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -999,26 +942,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "4.0.1" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f2cdcf274580f2d63697192d744727b3198894b1bf02923643bf59e2c26712" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -1027,37 +953,30 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 4.0.1", + "event-listener", "pin-project-lite", ] [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] -name = "flate2" -version = "1.0.28" +name = "filetime" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550" dependencies = [ - "crc32fast", - "miniz_oxide", + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", ] [[package]] @@ -1075,11 +994,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs4" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e180ac76c23b45e767bd7ae9579bc0bb458618c4bc71835926e098e61d15f8" +dependencies = [ + "rustix", + "windows-sys 0.52.0", +] + [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1092,9 +1021,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1102,15 +1031,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1119,63 +1048,51 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.13.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 1.9.0", + "fastrand", "futures-core", "futures-io", - "memchr", "parking", "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" -dependencies = [ - "futures-core", - "pin-project-lite", ] [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1210,33 +1127,22 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -1246,34 +1152,21 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - -[[package]] -name = "hashbrown" -version = "0.12.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" - -[[package]] -name = "hdrhistogram" -version = "7.5.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "base64 0.21.5", - "byteorder", - "crossbeam-channel", - "flate2", - "nom 7.1.3", - "num-traits", + "ahash", ] [[package]] @@ -1283,19 +1176,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] -name = "hermit-abi" -version = "0.1.19" +name = "heck" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hex" @@ -1323,9 +1219,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1334,9 +1230,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1352,17 +1248,15 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hydro_cli" -version = "0.6.0" +version = "0.9.0" dependencies = [ "anyhow", - "async-channel 1.9.0", "async-ssh2-lite", "bytes", - "clap 4.4.11", + "clap", "futures", "hydro_deploy", - "hydroflow_cli_integration", - "once_cell", + "hydroflow_deploy_integration", "pyo3", "pyo3-asyncio", "pythonize", @@ -1373,12 +1267,10 @@ dependencies = [ name = "hydro_cli_examples" version = "0.0.0" dependencies = [ - "dashmap", "futures", "hydroflow", "hydroflow_datalog", - "procinfo", - "rand 0.8.5", + "rand", "serde", "serde_json", "tokio", @@ -1387,62 +1279,60 @@ dependencies = [ [[package]] name = "hydro_deploy" -version = "0.6.0" +version = "0.9.0" dependencies = [ "anyhow", - "async-channel 1.9.0", - "async-once-cell", "async-process", "async-recursion", "async-ssh2-lite", "async-trait", + "buildstructor", "bytes", "cargo_metadata", "dunce", "dyn-clone", "futures", - "futures-core", - "hydroflow_cli_integration", + "hydroflow_deploy_integration", "indicatif", + "inferno", + "itertools", + "memo-map", + "nameof", "nanoid", "nix", - "once_cell", "serde", "serde_json", "shell-escape", "tempfile", "tokio", + "tokio-stream", "tokio-util", ] [[package]] name = "hydroflow" -version = "0.6.0" +version = "0.9.0" dependencies = [ "bincode", "byteorder", "bytes", "chrono", - "clap 4.4.11", + "clap", "colored", - "core_affinity", "criterion", - "ctor", "futures", - "getrandom 0.2.11", - "hdrhistogram", - "hydroflow_cli_integration", + "getrandom", "hydroflow_datalog", + "hydroflow_deploy_integration", "hydroflow_lang", "hydroflow_macro", "insta", - "instant", "itertools", "lattices", "multiplatform_test", "pusherator", "pyo3", - "rand 0.8.5", + "rand", "rand_distr", "ref-cast", "regex", @@ -1453,7 +1343,6 @@ dependencies = [ "slotmap", "smallvec", "static_assertions", - "textnonce", "time", "tokio", "tokio-stream", @@ -1463,111 +1352,116 @@ dependencies = [ "trybuild", "variadics", "wasm-bindgen-test", + "web-time", "zipf", ] -[[package]] -name = "hydroflow_cli_integration" -version = "0.5.1" -dependencies = [ - "async-recursion", - "async-trait", - "bytes", - "futures", - "pin-project", - "serde", - "tempfile", - "tokio", - "tokio-util", -] - [[package]] name = "hydroflow_datalog" -version = "0.6.0" +version = "0.9.0" dependencies = [ "hydroflow_datalog_core", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "hydroflow_datalog_core" -version = "0.6.0" +version = "0.9.0" dependencies = [ "hydroflow_lang", "insta", "prettyplease", - "proc-macro-crate", "proc-macro2", "quote", "rust-sitter", "rust-sitter-tool", "slotmap", - "syn 2.0.42", + "syn 2.0.75", "tempfile", ] +[[package]] +name = "hydroflow_deploy_integration" +version = "0.9.0" +dependencies = [ + "async-recursion", + "async-trait", + "bytes", + "futures", + "pin-project", + "serde", + "tempfile", + "tokio", + "tokio-stream", + "tokio-util", +] + [[package]] name = "hydroflow_lang" -version = "0.6.0" +version = "0.9.0" dependencies = [ "auto_impl", - "clap 4.4.11", + "clap", "data-encoding", "itertools", "prettyplease", "proc-macro2", "quote", - "regex", "serde", "serde_json", "slotmap", - "syn 2.0.42", + "syn 2.0.75", "webbrowser", ] [[package]] name = "hydroflow_macro" -version = "0.6.0" +version = "0.9.0" dependencies = [ "hydroflow_lang", "itertools", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "hydroflow_plus" -version = "0.6.0" +version = "0.9.0" dependencies = [ "bincode", "hydroflow", "hydroflow_lang", + "insta", "proc-macro-crate", "proc-macro2", "quote", "serde", "stageleft", "stageleft_tool", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] -name = "hydroflow_plus_cli_integration" -version = "0.6.0" +name = "hydroflow_plus_deploy" +version = "0.9.0" dependencies = [ - "async-channel 1.9.0", "hydro_deploy", "hydroflow_plus", + "nameof", + "prettyplease", "serde", + "sha2", "stageleft", "stageleft_tool", - "syn 2.0.42", + "syn 2.0.75", "tokio", + "toml", + "trybuild-internals-api", ] [[package]] @@ -1577,32 +1471,44 @@ dependencies = [ "futures", "hydro_deploy", "hydroflow_plus", - "hydroflow_plus_cli_integration", - "hydroflow_plus_test_macro", + "hydroflow_plus_deploy", "insta", - "rand 0.8.5", + "rand", + "serde", "stageleft", "stageleft_tool", "tokio", ] [[package]] -name = "hydroflow_plus_test_macro" +name = "hydroflow_plus_test_local" version = "0.0.0" dependencies = [ + "futures", + "hydroflow", "hydroflow_plus", - "hydroflow_plus_cli_integration", - "rand 0.8.5", + "hydroflow_plus_test_local_macro", + "insta", + "rand", + "stageleft", + "stageleft_tool", +] + +[[package]] +name = "hydroflow_plus_test_local_macro" +version = "0.0.0" +dependencies = [ + "hydroflow_plus", + "rand", "stageleft", "stageleft_tool", - "tokio", ] [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1633,29 +1539,19 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.1.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", @@ -1666,56 +1562,70 @@ dependencies = [ [[package]] name = "indoc" -version = "1.0.9" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + +[[package]] +name = "inferno" +version = "0.11.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" +dependencies = [ + "ahash", + "clap", + "crossbeam-channel", + "crossbeam-utils", + "dashmap", + "env_logger", + "indexmap", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", +] [[package]] name = "insta" -version = "1.34.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc" +checksum = "810ae6042d48e2c9e9215043563a58a80b877bc863228a74cf10c49d4620a6f5" dependencies = [ "console", "lazy_static", "linked-hash-map", "similar", - "yaml-rust", ] [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", + "cfg-if", ] [[package]] -name = "io-lifetimes" -version = "1.0.11" +name = "is-terminal" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.4.0", "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] -name = "is-terminal" -version = "0.4.9" +name = "is_terminal_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi 0.3.3", - "rustix 0.38.28", - "windows-sys 0.48.0", -] +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -1728,9 +1638,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jni" @@ -1739,7 +1649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" dependencies = [ "cesu8", - "cfg-if 1.0.0", + "cfg-if", "combine", "jni-sys", "log", @@ -1756,52 +1666,56 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lattices" -version = "0.5.3" +version = "0.5.7" dependencies = [ "cc-traits", + "lattices_macro", "sealed", "serde", + "trybuild", +] + +[[package]] +name = "lattices_macro" +version = "0.5.6" +dependencies = [ + "insta", + "prettyplease", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.75", ] [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", + "cfg-if", + "windows-targets 0.52.6", ] [[package]] @@ -1812,13 +1726,13 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.3", ] [[package]] @@ -1837,9 +1751,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" dependencies = [ "cc", "libc", @@ -1855,21 +1769,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1877,18 +1785,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "malloc_buf" -version = "0.0.6" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" -dependencies = [ - "libc", -] +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "matchers" @@ -1901,72 +1800,59 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] -name = "memoffset" -version = "0.7.1" +name = "memo-map" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" -dependencies = [ - "autocfg", -] +checksum = "38d1115007560874e373613744c6fba374c17688327a71c1476d1a5954cc857b" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] [[package]] -name = "memoffset" -version = "0.9.0" +name = "minicov" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" dependencies = [ - "autocfg", + "cc", + "walkdir", ] -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.10" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi 0.3.9", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "wasi", + "windows-sys 0.52.0", ] [[package]] name = "multiplatform_test" -version = "0.0.0" +version = "0.2.0" dependencies = [ "env_logger", "log", @@ -1977,13 +1863,19 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "nameof" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce8b389a86cabeb0d8b33a61e60f3cbb4de38914342fe274e69111f3b5d9c44" + [[package]] name = "nanoid" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -1994,15 +1886,14 @@ checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" [[package]] name = "nix" -version = "0.26.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", "libc", - "memoffset 0.7.1", - "pin-utils", ] [[package]] @@ -2012,65 +1903,86 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] -name = "nom" -version = "7.1.3" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ - "memchr", - "minimal-lexical", + "overload", + "winapi", ] [[package]] -name = "nu-ansi-term" -version = "0.46.0" +name = "num-conv" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" dependencies = [ - "overload", - "winapi 0.3.9", + "arrayvec", + "itoa", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", ] [[package]] -name = "num_cpus" -version = "1.16.0" +name = "number_prefix" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + +[[package]] +name = "objc-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb91bdd390c7ce1a8607f35f3ca7151b65afc0ff5ff3b34fa350f7d7c7e4310" + +[[package]] +name = "objc2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46a785d4eeff09c14c487497c162e92766fbb3e4059a71840cecc03d9a50b804" dependencies = [ - "hermit-abi 0.3.3", - "libc", + "objc-sys", + "objc2-encode", ] [[package]] -name = "number_prefix" -version = "0.4.0" +name = "objc2-encode" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" +checksum = "7891e71393cd1f227313c9379a26a584ff3d7e6e7159e988851f0934c993f0f8" [[package]] -name = "objc" -version = "0.2.7" +name = "objc2-foundation" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "malloc_buf", + "bitflags 2.6.0", + "block2", + "libc", + "objc2", ] [[package]] name = "object" -version = "0.32.1" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] @@ -2083,24 +1995,24 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl-src" -version = "300.2.1+3.2.0" +version = "300.3.1+3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" +checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.97" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -2109,6 +2021,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "overload" version = "0.1.1" @@ -2134,12 +2052,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -2148,32 +2066,32 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.16", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.3", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "percent-encoding" @@ -2183,29 +2101,29 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2215,26 +2133,26 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.0.1", + "fastrand", "futures-io", ] [[package]] name = "pkg-config" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -2245,54 +2163,39 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.8.0" +version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" -dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", + "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.28", + "rustix", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -2302,18 +2205,21 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -2323,38 +2229,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", + "toml_edit 0.19.15", ] [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -2367,13 +2249,13 @@ checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" dependencies = [ "byteorder", "libc", - "nom 2.2.1", + "nom", "rustc_version", ] [[package]] name = "pusherator" -version = "0.0.5" +version = "0.0.8" dependencies = [ "either", "variadics", @@ -2381,15 +2263,16 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.18.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b1ac5b3731ba34fdaa9785f8d74d17448cd18f30cf19e0c7e7b1fdb5272109" +checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "indoc", "libc", - "memoffset 0.8.0", - "parking_lot 0.12.1", + "memoffset", + "parking_lot 0.12.3", + "portable-atomic", "pyo3-build-config", "pyo3-ffi", "pyo3-macros", @@ -2398,9 +2281,9 @@ dependencies = [ [[package]] name = "pyo3-asyncio" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3564762e37035cfc486228e10b0528460fa026d681b5763873c693aa0d5c260" +checksum = "6ea6b68e93db3622f3bb3bf363246cf948ed5375afe7abff98ccbdd50b184995" dependencies = [ "futures", "once_cell", @@ -2412,9 +2295,9 @@ dependencies = [ [[package]] name = "pyo3-asyncio-macros" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be72d4cd43a27530306bd0d20d3932182fbdd072c6b98d3638bc37efb9d559dd" +checksum = "56c467178e1da6252c95c29ecf898b133f742e9181dca5def15dc24e19d45a39" dependencies = [ "proc-macro2", "quote", @@ -2423,9 +2306,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.18.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cb946f5ac61bb61a5014924910d936ebd2b23b705f7a4a3c40b05c720b079a3" +checksum = "deaa5745de3f5231ce10517a1f5dd97d53e5a2fd77aa6b5842292085831d48d7" dependencies = [ "once_cell", "target-lexicon", @@ -2433,9 +2316,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.18.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4d7c5337821916ea2a1d21d1092e8443cf34879e53a0ac653fbb98f44ff65c" +checksum = "62b42531d03e08d4ef1f6e85a2ed422eb678b8cd62b762e53891c05faf0d4afa" dependencies = [ "libc", "pyo3-build-config", @@ -2443,57 +2326,55 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.18.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d39c55dab3fc5a4b25bbd1ac10a2da452c4aca13bb450f22818a002e29648d" +checksum = "7305c720fa01b8055ec95e484a6eca7a83c841267f0dd5280f0c8b8551d2c158" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 1.0.109", + "syn 2.0.75", ] [[package]] name = "pyo3-macros-backend" -version = "0.18.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97daff08a4c48320587b5224cc98d609e3c27b6d437315bd40b605c98eeb5918" +checksum = "7c7e9b68bb9c3149c5b0cade5d07f953d6d125eb4337723c4ccdb665f1f96185" dependencies = [ + "heck 0.4.1", "proc-macro2", + "pyo3-build-config", "quote", - "syn 1.0.109", + "syn 2.0.75", ] [[package]] name = "pythonize" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a0e1bbcd2a3856284bf4f4ef09ccb1157e9467847792754556f153ea3fe6b42" +checksum = "ffd1c3ef39c725d63db5f9bc455461bafd80540cb7824c61afb823501921a850" dependencies = [ "pyo3", "serde", ] [[package]] -name = "quote" -version = "1.0.33" +name = "quick-xml" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" dependencies = [ - "proc-macro2", + "memchr", ] [[package]] -name = "rand" -version = "0.7.3" +name = "quote" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "proc-macro2", ] [[package]] @@ -2503,18 +2384,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -2524,16 +2395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -2542,7 +2404,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.11", + "getrandom", ] [[package]] @@ -2552,29 +2414,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "rand", ] -[[package]] -name = "raw-window-handle" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" - [[package]] name = "rayon" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2582,9 +2429,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -2601,54 +2448,54 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.11", + "getrandom", "libredox", "thiserror", ] [[package]] name = "ref-cast" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53313ec9f12686aeeffb43462c3ac77aa25f590a5f630eb2cde0de59417b29c7" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2566c4bf6845f2c2e83b27043c3f5dfcd5ba8f2937d6c00dc009bfb51a079dc4" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "regex" -version = "1.10.2" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", - "regex-syntax 0.8.2", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -2662,13 +2509,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] @@ -2679,9 +2526,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "relalg" @@ -2693,14 +2540,23 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", +] + +[[package]] +name = "rgb" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f86ae463694029097b846d8f99fd5536740602ae00022c0c50c5600720b2f71" +dependencies = [ + "bytemuck", ] [[package]] name = "rust-sitter" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a0f365b4eb9591dd3e685791389a932041b0dc6ccf5db1ec3d8913f67279365" +checksum = "f69b9a5d53b74db5166799a0024c2849e144c652dd6253c5bf58dfe086798cbc" dependencies = [ "rust-sitter-macro", "tree-sitter-c2rust", @@ -2708,9 +2564,9 @@ dependencies = [ [[package]] name = "rust-sitter-common" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c0a0b1da7317031274502b7c52cbb7cf529e7d1e1f3e23876519372b173a94" +checksum = "b559ebfd4114d398a36dfe25d7221bf84839fc3ef1309a6b7f4d1eece78dc690" dependencies = [ "quote", "syn 1.0.109", @@ -2718,9 +2574,9 @@ dependencies = [ [[package]] name = "rust-sitter-macro" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25e213e40efa00713547cc0f3529694aca547cfceb0839bbc9406632e14d410" +checksum = "8238447de92f7104ddbda8b5fd38a9be055229373283ef42b774b340d8117def" dependencies = [ "proc-macro2", "quote", @@ -2730,9 +2586,9 @@ dependencies = [ [[package]] name = "rust-sitter-tool" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803c6596476a188a4dd18106eb927a926a202e00077cdaa5648dd620262af158" +checksum = "b840052f42d08fb67d13f68b72f1c41f99865d83239f4edff8fa1c6fd6fa0a12" dependencies = [ "cc", "rust-sitter-common", @@ -2747,9 +2603,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -2768,36 +2624,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.28" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.12", + "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -2826,10 +2668,10 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -2843,9 +2685,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -2864,9 +2706,9 @@ checksum = "5a9f47faea3cad316faa914d013d24f471cd90bfca1a0c70f05a3f42c6441e99" [[package]] name = "serde" -version = "1.0.193" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -2884,34 +2726,44 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ - "indexmap 2.1.0", + "indexmap", "itoa", + "memchr", "ryu", "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +dependencies = [ + "serde", +] + [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -2922,16 +2774,16 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha256" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7895c8ae88588ccead14ff438b939b0c569cd619116f14b4d13fdff7b8333386" +checksum = "18278f6a914fa3070aa316493f7d2ddfb9ac86ebc06fa3b83bffda487e9065b0" dependencies = [ "async-trait", "bytes", @@ -2955,20 +2807,26 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f" +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "similar" -version = "2.3.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "slab" @@ -2991,34 +2849,24 @@ dependencies = [ [[package]] name = "smallbitvec" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ce4f9dc4a41b4c3476cc925f1efb11b66df373a8fde5d4b8915fa91b5d995e" +checksum = "fcc3fc564a4b53fd1e8589628efafe57602d91bde78be18186b5f61e8faea470" [[package]] name = "smallvec" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" - -[[package]] -name = "socket2" -version = "0.4.10" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi 0.3.9", -] +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3035,27 +2883,26 @@ dependencies = [ [[package]] name = "stageleft" -version = "0.2.0" +version = "0.4.0" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", "stageleft_macro", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "stageleft_macro" -version = "0.1.0" +version = "0.3.0" dependencies = [ "insta", - "lazy_static", "prettyplease", "proc-macro-crate", "proc-macro2", "quote", "sha256", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -3077,12 +2924,12 @@ dependencies = [ [[package]] name = "stageleft_tool" -version = "0.1.0" +version = "0.3.0" dependencies = [ "proc-macro2", "quote", "sha256", - "syn 2.0.42", + "syn 2.0.75", "syn-inline-mod 0.6.0", ] @@ -3093,16 +2940,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "strsim" -version = "0.8.0" +name = "str_inflector" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0b848d5a7695b33ad1be00f84a3c079fe85c9278a325ff9159e6c99cef4ef7" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "str_stack" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" @@ -3117,9 +2974,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.42" +version = "2.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b7d0a2c048d661a1a59fcd7355baa232f7ed34e0ee4df2eef3c1c1c0d3852d8" +checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" dependencies = [ "proc-macro2", "quote", @@ -3143,7 +3000,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fa6dca1fdb7b2ed46dd534a326725419d4fb10f23d8c85a8b2860e5eb25d0f9" dependencies = [ "proc-macro2", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -3160,88 +3017,70 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.12" +version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.8.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", - "redox_syscall 0.4.1", - "rustix 0.38.28", - "windows-sys 0.48.0", + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", ] [[package]] name = "termcolor" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] -[[package]] -name = "textnonce" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f8d70cd784ed1dc33106a18998d77758d281dc40dc3e6d050cf0f5286683" -dependencies = [ - "base64 0.12.3", - "rand 0.7.3", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" -version = "1.0.51" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] [[package]] name = "time" -version = "0.3.31" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", + "num-conv", "powerfmt", "serde", "time-core", @@ -3334,9 +3173,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -3349,39 +3188,38 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -3403,9 +3241,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -3413,23 +3251,28 @@ dependencies = [ "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.5.11" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.20", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -3437,21 +3280,33 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.22.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.18", ] [[package]] name = "topolotree" version = "0.0.0" dependencies = [ - "dashmap", "futures", "hydroflow", "hydroflow_datalog", "procinfo", - "rand 0.8.5", + "rand", "serde", "serde_json", "tokio", @@ -3476,7 +3331,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", ] [[package]] @@ -3520,9 +3375,9 @@ dependencies = [ [[package]] name = "tree-sitter" -version = "0.20.10" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e747b1f9b7b931ed39a548c1fae149101497de3c1fc8d9e18c62c1a66c683d3d" +checksum = "df7cc499ceadd4dcdf7ec6d4cbc34ece92c3fa07821e287aedecd4416c516dca" dependencies = [ "cc", "regex", @@ -3530,9 +3385,9 @@ dependencies = [ [[package]] name = "tree-sitter-c2rust" -version = "0.20.10" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee40a4d9cf5a30c199935f346887588239daceae4d1418d81b789276fffb8d91" +checksum = "62cd4f1075a82f3c4ae5e93dc39d4e0765d132a6d2773b3d86214fcc54e6d1e9" dependencies = [ "c2rust-bitfields", "once_cell", @@ -3541,45 +3396,50 @@ dependencies = [ [[package]] name = "tree-sitter-cli" -version = "0.20.8" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae7e9d844d4d38e511a7b93fe8ced79f2a364c32fdea10d04546f1c8317d5a0c" +checksum = "f7437ac48e37e5014007527ed9281c00c333c9ad0731e1c8489c0eff667b99d5" dependencies = [ "ansi_term", + "anstyle", "anyhow", - "atty", - "clap 2.34.0", + "clap", + "ctrlc", "difference", "dirs", + "filetime", "glob", + "heck 0.5.0", "html-escape", - "indexmap 1.9.3", + "indexmap", + "indoc", "lazy_static", "log", + "memchr", "regex", - "regex-syntax 0.6.29", + "regex-syntax 0.8.4", "rustc-hash", - "semver 1.0.20", + "semver 1.0.23", "serde", + "serde_derive", "serde_json", "smallbitvec", "tiny_http", - "toml", "tree-sitter", "tree-sitter-config", "tree-sitter-highlight", "tree-sitter-loader", "tree-sitter-tags", "walkdir", + "wasmparser", "webbrowser", - "which", ] [[package]] name = "tree-sitter-config" -version = "0.19.0" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fec4cb27f052ead2246631b332dba0cb6af9a54ce012badee59c4b0ded5e03" +checksum = "5d64b4608a1d822f56e3afcecabfa4915a768ea92bc44abad1ae32cd4c607ebd" dependencies = [ "anyhow", "dirs", @@ -3589,10 +3449,11 @@ dependencies = [ [[package]] name = "tree-sitter-highlight" -version = "0.20.1" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "042342584c5a7a0b833d9fc4e2bdab3f9868ddc6c4b339a1e01451c6720868bc" +checksum = "eaca0fe34fa96eec6aaa8e63308dbe1bafe65a6317487c287f93938959b21907" dependencies = [ + "lazy_static", "regex", "thiserror", "tree-sitter", @@ -3600,18 +3461,21 @@ dependencies = [ [[package]] name = "tree-sitter-loader" -version = "0.20.0" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b17eef4833c7c139abed66d562dfa23228e97e647597baf246fd56c21bbfaf" +checksum = "73c9b13749644fbe22ec25c79861dc1e637ef4ab9e469fd820fcb30b10091293" dependencies = [ "anyhow", "cc", "dirs", + "fs4", + "indoc", "libloading", "once_cell", "regex", "serde", "serde_json", + "tempfile", "tree-sitter", "tree-sitter-highlight", "tree-sitter-tags", @@ -3619,9 +3483,9 @@ dependencies = [ [[package]] name = "tree-sitter-tags" -version = "0.20.2" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3f1376219530a37a809751ecf65aa35fd8b9c1c4ab6d4faf5f6a9eeda2c05" +checksum = "34380416097ab36d1b4cd83f887d9e150ea4feaeb6ee9a5ecfe53d26839acc69" dependencies = [ "memchr", "regex", @@ -3629,19 +3493,52 @@ dependencies = [ "tree-sitter", ] +[[package]] +name = "try_match" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b065c869a3f832418e279aa4c1d7088f9d5d323bde15a60a08e20c2cd4549082" +dependencies = [ + "try_match_inner", +] + +[[package]] +name = "try_match_inner" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9c81686f7ab4065ccac3df7a910c4249f8c0f3fb70421d6ddec19b9311f63f9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.75", +] + [[package]] name = "trybuild" -version = "1.0.86" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419ecd263363827c5730386f418715766f584e2f874d32c23c5b00bd9727e7e" +checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "termcolor", + "toml", +] + +[[package]] +name = "trybuild-internals-api" +version = "1.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bde7ac6ae8a3798bbd277b668363eec49497db8f7beb14070b3eaf09e6884a8" dependencies = [ - "basic-toml", "glob", - "once_cell", "serde", "serde_derive", "serde_json", "termcolor", + "toml", ] [[package]] @@ -3656,7 +3553,7 @@ dependencies = [ "http", "httparse", "log", - "rand 0.8.5", + "rand", "sha1", "thiserror", "url", @@ -3671,9 +3568,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -3683,36 +3580,36 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" [[package]] name = "unindent" -version = "0.1.11" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -3733,9 +3630,9 @@ checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "valuable" @@ -3745,7 +3642,7 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "variadics" -version = "0.0.4" +version = "0.0.6" dependencies = [ "sealed", "trybuild", @@ -3757,40 +3654,22 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "waker-fn" -version = "1.1.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3799,36 +3678,37 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.39" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -3836,9 +3716,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3846,31 +3726,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-bindgen-test" -version = "0.3.39" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cf9242c0d27999b831eae4767b2a146feb0b27d332d553e605864acd2afd403" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" dependencies = [ "console_error_panic_hook", "js-sys", + "minicov", "scoped-tls", "wasm-bindgen", "wasm-bindgen-futures", @@ -3879,20 +3760,43 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.39" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794645f5408c9a039fd09f4d113cdfb2e7eba5ff1956b07bcf701cf4b394fe89" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.75", +] + +[[package]] +name = "wasmparser" +version = "0.206.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39192edb55d55b41963db40fd49b0b542156f04447b5b512744a91d38567bdbc" +dependencies = [ + "ahash", + "bitflags 2.6.0", + "hashbrown", + "indexmap", + "semver 1.0.23", ] [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3900,17 +3804,18 @@ dependencies = [ [[package]] name = "webbrowser" -version = "0.8.12" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b2391658b02c27719fc5a0a73d6e696285138e8b12fba9d4baa70451023c71" +checksum = "425ba64c1e13b1c6e8c5d2541c8fac10022ca584f33da781db01b5756aef1f4e" dependencies = [ + "block2", "core-foundation", "home", "jni", "log", "ndk-context", - "objc", - "raw-window-handle", + "objc2", + "objc2-foundation", "url", "web-sys", ] @@ -3929,44 +3834,13 @@ dependencies = [ "quote", "serde", "serde-wasm-bindgen", - "syn 2.0.42", + "syn 2.0.75", "tokio", "wasm-bindgen", "wasm-bindgen-test", "web-sys", - "wee_alloc", -] - -[[package]] -name = "wee_alloc" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "memory_units", - "winapi 0.3.9", -] - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.28", ] -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -3977,12 +3851,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -3991,11 +3859,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi 0.3.9", + "windows-sys 0.59.0", ] [[package]] @@ -4006,11 +3874,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -4037,7 +3905,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -4072,17 +3949,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4099,9 +3977,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4117,9 +3995,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4135,9 +4013,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4153,9 +4037,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4171,9 +4055,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4189,9 +4073,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4207,26 +4091,47 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.30" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "winnow" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ - "linked-hash-map", + "memchr", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.75", ] [[package]] @@ -4235,5 +4140,5 @@ version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "390e51da0ed8cc3ade001d15fa5ba6f966b99c858fb466ec6b06d1682f1f94dd" dependencies = [ - "rand 0.8.5", + "rand", ] diff --git a/Cargo.toml b/Cargo.toml index cc1568a37cdc..53af7d2e836a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,8 +6,8 @@ members = [ "hydro_deploy/core", "hydro_deploy/hydro_cli", "hydro_deploy/hydro_cli_examples", - "hydro_deploy/hydroflow_cli_integration", - "hydro_deploy/hydroflow_plus_cli_integration", + "hydro_deploy/hydroflow_deploy_integration", + "hydro_deploy/hydroflow_plus_deploy", "hydroflow", "hydroflow_datalog", "hydroflow_datalog_core", @@ -15,8 +15,10 @@ members = [ "hydroflow_macro", "hydroflow_plus", "hydroflow_plus_test", - "hydroflow_plus_test_macro", + "hydroflow_plus_test_local", + "hydroflow_plus_test_local_macro", "lattices", + "lattices_macro", "multiplatform_test", "pusherator", "relalg", @@ -43,6 +45,7 @@ lto = "fat" [profile.profile] inherits = "release" debug = 2 +lto = "off" strip = "none" [profile.dev.package.website_playground] diff --git a/README.md b/README.md index a97230b747ca..da04f8a3587f 100644 --- a/README.md +++ b/README.md @@ -40,17 +40,38 @@ For more, check out the [surface syntax section of the Hydroflow book](https://h We provide a `cargo-generate` template for you to get started from a simple working example. To install `cargo-generate`, run the following: -```bash, ignore +```bash,ignore cargo install cargo-generate ``` -Then run -```bash, ignore -cargo generate gh:hydro-project/hydroflow-template +Then run: +```bash,ignore +cargo generate gh:hydro-project/hydroflow template/hydroflow ``` -and you will get a well-formed Hydroflow/Rust project to use as a starting point. It provides a simple Echo Server and Client, and advice + +`cd` into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: +```bash +#shell-command-next-line +cd +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test +``` + +And you will get a well-formed Hydroflow/Rust project to use as a starting point. It provides a simple Echo Server and Client, and advice for adapting it to other uses. +### Enable IDE Support for Ligatures +Since flow edges `->` appear frequently in flows described using the Hydroflow surface syntax, enabling ligature support +in your IDE may improve your code reading experience. This has no impact on code functionality or performance. + +Instructions to enable this for the `Fira Code` font: +- [VSCode](https://github.com/tonsky/FiraCode/wiki/VS-Code-Instructions) +- [IntelliJ](https://github.com/tonsky/FiraCode/wiki/IntelliJ-products-instructions) + +More font options are available [here](https://github.com/tonsky/FiraCode?tab=readme-ov-file#alternatives). + ## Dev Setup See the [setup section of the book](https://hydro.run/docs/hydroflow/quickstart/setup). diff --git a/RELEASING.md b/RELEASING.md index f8063a6e5633..8b96d3e48e79 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -18,6 +18,8 @@ messages, but requires manually intervention to do so in some situations. ```sh cargo install cargo-smart-release ``` +Re-run this command before each release to update the tool before testing locally, as the CI will +always use the latest version. ## Dry run to ensure changelogs can be generated @@ -41,8 +43,8 @@ showing that all the changelogs can be modified. Make sure the version bumps loo ```log [INFO ] Updating crates-io index -[WARN ] Refused to publish 'hydroflow_cli_integration' as as it didn't change. -[INFO ] Will not publish or alter 3 dependent crates: unchanged = 'hydroflow_cli_integration', 'variadics', 'pusherator' +[WARN ] Refused to publish 'hydroflow_deploy_integration' as as it didn't change. +[INFO ] Will not publish or alter 3 dependent crates: unchanged = 'hydroflow_deploy_integration', 'variadics', 'pusherator' [INFO ] WOULD auto-bump dependent package 'hydroflow_lang' from 0.4.0 to 0.5.0 for publishing [INFO ] WOULD auto-bump dependent package 'hydroflow_datalog_core' from 0.4.0 to 0.5.0 for publishing, for SAFETY due to breaking package 'hydroflow_lang' [INFO ] WOULD auto-bump dependent package 'hydroflow_datalog' from 0.4.0 to 0.5.0 for publishing, for SAFETY due to breaking package 'hydroflow_datalog_core' @@ -118,13 +120,51 @@ On the commit immediately _before_ you move the package(s) and run the following ``` cargo changelog --write ... ``` -Then, before committing the changes, go through the modified `CHANGELOG.md` files and add a prefix -to the `Commit Statistics` and `Commit Details` headers, for example: `Pre-Move Commit Statistics`/`Pre-Move Commit Details`. +(This command is provided by `cargo install cargo-smart-release`; don't use any other `cargo changelog` command) + +Next (even if there are no changes), go through the modified `CHANGELOG.md` files and add a prefix +to **all** (not just the new) the `Commit Statistics` and `Commit Details` headers, for example: +`Pre-Move Commit Statistics`/`Pre-Move Commit Details`. This is necessary because otherwise `cargo-smart-release` will treat those sections as auto-generated and will not preserve them, but then won't regenerate them due to the package moving. Commit the updated changelogs and cherry-pick that commit to the latest version if you went back in history. The changelogs should now be safely preserved by future releases. +## Addendum: Renaming crates + +First, follow the [steps above for moving crates](#addendum-moving-crates). + +After renaming a crate, `cargo-smart-release` will see it a brand new crate with no published +versions on crates.io, and will therefore not bump the version. This is not desired behavior, and +generating the changelog will fail unintelligibly due to the conflicting versions: +```log +BUG: User segments are never auto-generated: ... +``` + +To fix this, before releasing, manually bump the version of the renamed crate. `Cargo.toml`: +```toml +name = "crate_old_name" +publish = true +version = "0.8.0" +# becomes +name = "crate_new_name" +publish = true +version = "0.9.0" +``` +(In this case, bumping the minor version) + +You will also need to manually update any crates that depend on the renamed crate as well: +```toml +crate_old_name = { path = "../crate_old_path", version = "^0.8.0" } +# becomes +crate_new_name = { path = "../crate_new_path", version = "^0.9.0" } +``` + +Commit those changes, then continue as normal. + +(There may be other issues with the `git tag`s `cargo-smart-release` uses to track versions if you +are renaming a crate _back to an old name_). + ## Addendum: The GitHub App account So... `cargo smart-release` wants to push to `hydro-project/hydroflow`'s `main` branch. However, diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 8f2087ef178a..e37a0825cb8b 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -10,17 +10,15 @@ license = "Apache-2.0" [dependencies] [dev-dependencies] -criterion = { version = "0.5", features = [ "async_tokio", "html_reports" ] } -hydroflow = { path = "../hydroflow" } -lazy_static = "1.4.0" -# pprof = { version = "0.6", features = [ "flamegraph", "criterion" ] } -rand = "0.8.4" +criterion = { version = "0.5.0", features = [ "async_tokio", "html_reports" ] } +hydroflow = { path = "../hydroflow", features = [ "debugging" ] } +rand = "0.8.0" rand_distr = "0.4.3" -seq-macro = "0.2" +seq-macro = "0.2.0" timely = { package = "timely-master", version = "0.13.0-dev.1" } differential-dataflow = { package = "differential-dataflow-master", version = "0.13.0-dev.1" } # git = "https://github.com/TimelyDataflow/differential-dataflow.git", rev = "7bc5338a977fe1d95b96a9ba84ba8cd460e0cdd7" } # "0.12" -tokio = { version = "1.0", features = [ "rt-multi-thread" ] } -static_assertions = "1.1.0" +tokio = { version = "1.29.0", features = [ "rt-multi-thread" ] } +static_assertions = "1.0.0" [[bench]] name = "arithmetic" diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 000000000000..394f876531d1 --- /dev/null +++ b/benches/README.md @@ -0,0 +1,13 @@ +# Microbenchmarks + +Of Hydroflow and other crates. + +Run all benchmarks: +``` +cargo bench -p benches +``` + +Run specific benchmarks: +``` +cargo bench -p benches --bench reachability +``` diff --git a/benches/benches/.gitignore b/benches/benches/.gitignore new file mode 100644 index 000000000000..1d8d6d755406 --- /dev/null +++ b/benches/benches/.gitignore @@ -0,0 +1 @@ +fork_join_*.hf diff --git a/benches/benches/fork_join.rs b/benches/benches/fork_join.rs index 58ca87868ca3..62ca9b7b60de 100644 --- a/benches/benches/fork_join.rs +++ b/benches/benches/fork_join.rs @@ -1,4 +1,5 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use hydroflow::hydroflow_syntax; use hydroflow::scheduled::graph::Hydroflow; use hydroflow::scheduled::graph_ext::GraphExt; use hydroflow::scheduled::handoff::{Iter, VecHandoff}; @@ -82,6 +83,17 @@ fn benchmark_hydroflow(c: &mut Criterion) { }); } +fn benchmark_hydroflow_surface(c: &mut Criterion) { + c.bench_function("fork_join/hydroflow/surface", |b| { + b.iter(|| { + let mut hf = hydroflow_syntax! { + source_iter(0..NUM_INTS) -> import!("fork_join_20.hf") -> for_each(|x| { black_box(x); }); + }; + hf.run_available(); + }) + }); +} + fn benchmark_hydroflow_builder(c: &mut Criterion) { c.bench_function("fork_join/hydroflow_builder", |b| { b.iter(|| { @@ -151,157 +163,12 @@ fn benchmark_timely(c: &mut Criterion) { }); } -// fn benchmark_spinach(c: &mut Criterion) { -// c.bench_function("spinach", |b| { -// b.to_async( -// tokio::runtime::Builder::new_current_thread() -// .build() -// .unwrap(), -// ) -// .iter(|| { -// async { -// use spinachflow::comp::Comp; - -// type MyLatRepr = -// spinachflow::lattice::set_union::SetUnionRepr; -// let op = >::new((0..NUM_INTS).collect()); - -// struct Even(); -// impl spinachflow::func::unary::Morphism for Even { -// type InLatRepr = MyLatRepr; -// type OutLatRepr = MyLatRepr; -// fn call( -// &self, -// item: spinachflow::hide::Hide, -// ) -> spinachflow::hide::Hide { -// item.filter(|i| 0 == i % 2) -// } -// } - -// struct Odds(); -// impl spinachflow::func::unary::Morphism for Odds { -// type InLatRepr = MyLatRepr; -// type OutLatRepr = MyLatRepr; -// fn call( -// &self, -// item: spinachflow::hide::Hide, -// ) -> spinachflow::hide::Hide { -// item.filter(|i| 1 == i % 2) -// } -// } - -// ///// MAGIC NUMBER!!!!!!!! is NUM_OPS -// seq_macro::seq!(N in 0..20 { -// let [ op_even, op_odds ] = spinachflow::op::fixed_split::<_, 2>(op); -// let op_even = spinachflow::op::MorphismOp::new(op_even, Even()); -// let op_odds = spinachflow::op::MorphismOp::new(op_odds, Odds()); -// let op = spinachflow::op::MergeOp::new(op_even, op_odds); -// let op = spinachflow::op::DynOpDelta::new(Box::new(op)); -// }); - -// let comp = spinachflow::comp::NullComp::new(op); -// spinachflow::comp::CompExt::run(&comp).await.unwrap_err(); -// } -// }); -// }); -// } - -// fn benchmark_spinach_switch(c: &mut Criterion) { -// c.bench_function("spinach w/ switch", |b| { -// b.to_async( -// tokio::runtime::Builder::new_current_thread() -// .build() -// .unwrap(), -// ) -// .iter(|| { -// async { -// use spinachflow::comp::Comp; - -// type MyLatRepr = -// spinachflow::lattice::set_union::SetUnionRepr; -// let op = >::new((0..NUM_INTS).collect()); - -// struct SwitchEvenOdd(); -// impl spinachflow::func::unary::Morphism for SwitchEvenOdd { -// type InLatRepr = MyLatRepr; -// type OutLatRepr = spinachflow::lattice::pair::PairRepr; -// fn call( -// &self, -// item: spinachflow::hide::Hide, -// ) -> spinachflow::hide::Hide { -// let (a, b) = item.switch(|i| 0 == i % 2); -// spinachflow::hide::Hide::zip(a, b) -// } -// } - -// ///// MAGIC NUMBER!!!!!!!! is NUM_OPS -// seq_macro::seq!(N in 0..20 { -// let op = spinachflow::op::MorphismOp::new(op, SwitchEvenOdd()); -// let ( op_even, op_odds ) = spinachflow::op::SwitchOp::new(op); -// let op = spinachflow::op::MergeOp::new(op_even, op_odds); -// let op = spinachflow::op::DynOpDelta::new(Box::new(op)); -// }); - -// let comp = spinachflow::comp::NullComp::new(op); -// spinachflow::comp::CompExt::run(&comp).await.unwrap_err(); -// } -// }); -// }); -// } - -// fn benchmark_spinachflow_symm(c: &mut Criterion) { -// c.bench_function("spinachflow (symmetric)", |b| { -// b.to_async( -// tokio::runtime::Builder::new_current_thread() -// .build() -// .unwrap(), -// ) -// .iter(|| { -// async { -// use spinachflow::futures::StreamExt; -// use spinachflow::futures::future::ready; - -// let stream = spinachflow::futures::stream::iter(0..NUM_INTS); - -// ///// MAGIC NUMBER!!!!!!!! is NUM_OPS -// seq_macro::seq!(N in 0..20 { -// let splitter = spinachflow::stream::Splitter::new(stream); -// let mut i = 0; -// let splits = [(); BRANCH_FACTOR].map(|_| { -// let j = i; -// i += 1; -// splitter.add_split().filter(move |x| ready(j == x % BRANCH_FACTOR)) -// }); -// let stream = spinachflow::stream::SelectArr::new(splits); -// let stream: std::pin::Pin>> = Box::pin(stream); -// }); - -// let mut stream = stream; -// loop { -// let item = stream.next().await; -// if item.is_none() { -// break; -// } -// } -// } -// }); -// }); -// } - -// criterion_group!( -// name = fork_join_dataflow; -// config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); -// targets = benchmark_babyflow -// ); -// criterion_group!(fork_join_dataflow, benchmark_timely,); criterion_group!( fork_join_dataflow, benchmark_hydroflow, + benchmark_hydroflow_surface, benchmark_hydroflow_builder, benchmark_timely, benchmark_raw, - // benchmark_spinach, - // benchmark_spinach_switch, - // benchmark_spinachflow_symm, ); criterion_main!(fork_join_dataflow); diff --git a/benches/benches/micro_ops.rs b/benches/benches/micro_ops.rs index 684eb839011c..ce32f2308e6a 100644 --- a/benches/benches/micro_ops.rs +++ b/benches/benches/micro_ops.rs @@ -273,7 +273,7 @@ fn ops(c: &mut Criterion) { const DATA: [u64; 1024] = [0; 1024]; let mut df = hydroflow_syntax! { - source_iter(black_box(DATA)) -> persist() + source_iter(black_box(DATA)) -> persist::<'static>() -> map(black_box) -> defer_tick() -> map(black_box) @@ -307,7 +307,7 @@ fn ops(c: &mut Criterion) { const DATA: [[u8; 8192]; 1] = [[0; 8192]; 1]; let mut df = hydroflow_syntax! { - source_iter(black_box(DATA)) -> persist() + source_iter(black_box(DATA)) -> persist::<'static>() -> defer_tick() -> map(black_box) -> defer_tick() diff --git a/benches/benches/reachability.rs b/benches/benches/reachability.rs index 7d68e6787b7f..3537300baa7b 100644 --- a/benches/benches/reachability.rs +++ b/benches/benches/reachability.rs @@ -2,6 +2,7 @@ use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::io::{BufRead, BufReader, Cursor}; use std::rc::Rc; +use std::sync::LazyLock; use criterion::{criterion_group, criterion_main, Criterion}; use differential_dataflow::input::Input; @@ -9,44 +10,45 @@ use differential_dataflow::operators::{Iterate, Join, Threshold}; use hydroflow::hydroflow_syntax; use hydroflow::scheduled::graph_ext::GraphExt; -lazy_static::lazy_static! { - static ref EDGES: HashMap> = { - let cursor = Cursor::new(include_bytes!("reachability_edges.txt")); - let reader = BufReader::new(cursor); - - let mut edges = HashMap::<_, Vec<_>>::new(); - for line in reader.lines() { - let line = line.unwrap(); - let mut nums = line.split_whitespace(); - let a = nums.next().unwrap().parse().unwrap(); - let b = nums.next().unwrap().parse().unwrap(); - assert!(nums.next().is_none()); - edges.entry(a).or_default().push(b); - } - edges - }; - static ref EDGE_VEC: Vec<(usize, usize)> = { - let cursor = Cursor::new(include_bytes!("reachability_edges.txt")); - let reader = BufReader::new(cursor); - - reader.lines().map(|line| { +static EDGES: LazyLock>> = LazyLock::new(|| { + let cursor = Cursor::new(include_bytes!("reachability_edges.txt")); + let reader = BufReader::new(cursor); + + let mut edges = HashMap::<_, Vec<_>>::new(); + for line in reader.lines() { + let line = line.unwrap(); + let mut nums = line.split_whitespace(); + let a = nums.next().unwrap().parse().unwrap(); + let b = nums.next().unwrap().parse().unwrap(); + assert!(nums.next().is_none()); + edges.entry(a).or_default().push(b); + } + edges +}); +static EDGE_VEC: LazyLock> = LazyLock::new(|| { + let cursor = Cursor::new(include_bytes!("reachability_edges.txt")); + let reader = BufReader::new(cursor); + + reader + .lines() + .map(|line| { let line = line.unwrap(); let mut v = line.split_whitespace().map(|n| n.parse::().unwrap()); (v.next().unwrap(), v.next().unwrap()) - }).collect() - }; - static ref REACHABLE: HashSet = { - let cursor = Cursor::new(include_bytes!("reachability_reachable.txt")); - let reader = BufReader::new(cursor); - - let mut set = HashSet::new(); - for line in reader.lines() { - let line = line.unwrap(); - set.insert(line.parse().unwrap()); - } - set - }; -} + }) + .collect() +}); +static REACHABLE: LazyLock> = LazyLock::new(|| { + let cursor = Cursor::new(include_bytes!("reachability_reachable.txt")); + let reader = BufReader::new(cursor); + + let mut set = HashSet::new(); + for line in reader.lines() { + let line = line.unwrap(); + set.insert(line.parse().unwrap()); + } + set +}); fn benchmark_timely(c: &mut Criterion) { use timely::dataflow::operators::{ diff --git a/benches/build.rs b/benches/build.rs new file mode 100644 index 000000000000..90ded262aeaa --- /dev/null +++ b/benches/build.rs @@ -0,0 +1,37 @@ +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::path::PathBuf; + +const NUM_OPS: usize = 20; + +pub fn main() { + if let Err(err) = fork_join() { + eprintln!("benches/build.rs error: {:?}", err); + } +} + +pub fn fork_join() -> std::io::Result<()> { + let path = PathBuf::from_iter([ + env!("CARGO_MANIFEST_DIR"), + "benches", + &format!("fork_join_{}.hf", NUM_OPS), + ]); + let file = File::create(path)?; + let mut write = BufWriter::new(file); + + writeln!(write, "a0 = mod -> tee();")?; + + for i in 0..NUM_OPS { + if i > 0 { + writeln!(write, "a{} = union() -> tee();", i)?; + } + writeln!(write, "a{} -> filter(|x| x % 2 == 0) -> a{};", i, i + 1)?; + writeln!(write, "a{} -> filter(|x| x % 2 == 1) -> a{};", i, i + 1)?; + } + + writeln!(write, "a{} = union() -> mod;", NUM_OPS)?; + + write.flush()?; + + Ok(()) +} diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 000000000000..e988714bc327 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +upper-case-acronyms-aggressive = true +avoid-breaking-exported-api = false diff --git a/design_docs/2021-10_architecture_design_doc.md b/design_docs/2021-10_architecture_design_doc.md index bf3f8daf88e5..b20752e30c77 100644 --- a/design_docs/2021-10_architecture_design_doc.md +++ b/design_docs/2021-10_architecture_design_doc.md @@ -20,7 +20,7 @@ We need to support both of them. ## Performance/Optimization - Monomorphization: compiler should inline operators in sequence & remove dead code, as much as possible. -- Avoid unneccesary copying. +- Avoid unnecessary copying. - Keep cost of scheduling low and thread-local when possible. Support batching to avoid scheduler and context-switching overhead. # Benchmark Conclusions diff --git a/design_docs/2023-02_hydro_cli.md b/design_docs/2023-02_hydro_cli.md index 87c4a31baaea..5560f3e5b505 100644 --- a/design_docs/2023-02_hydro_cli.md +++ b/design_docs/2023-02_hydro_cli.md @@ -36,19 +36,19 @@ module main( Assume we also have a `src/echo_client.hf` file that periodically sends `EchoMsg`s to an `outbound` channel and logs echos from the `inbound` channel. We can deploy this program to a cloud machine using the following `echo.hydro.py` config file: ```python from hydro import Deployment -from hydro.gcp import GCPMachine # keys are automatically loaded from somewhere +from hydro.gcp import GcpMachine # keys are automatically loaded from somewhere async def main(): deployment = Deployment() # Specify the GCP instances we want to deploy to - server_machine = deployment.GCPMachine( + server_machine = deployment.GcpMachine( project="hydro-1234", zone="us-west1-a", type="e2-micro", ) - client_machine = deployment.GCPMachine( + client_machine = deployment.GcpMachine( project="hydro-1234", zone="us-west1-a", type="e2-micro", diff --git a/docs/docs/deploy/deployments-hosts-and-services.md b/docs/docs/deploy/deployments-hosts-and-services.md index ec1a72b558d8..4baa62c7e5fb 100644 --- a/docs/docs/deploy/deployments-hosts-and-services.md +++ b/docs/docs/deploy/deployments-hosts-and-services.md @@ -30,16 +30,16 @@ Hydro Deploy also supports deploying to cloud VMs, currently supporting Google C ### Google Cloud Platform To deploy to Google Cloud Platform, you will need to install Terraform and the Google Cloud SDK (see [install](./install)). You will also need to create a Google Cloud project. -The first step is to create a VPC, which will enable network connections for our services. We can do this by creating a `hydro.GCPNetwork` object: +The first step is to create a VPC, which will enable network connections for our services. We can do this by creating a `hydro.GcpNetwork` object: ```python -network = deployment.GCPNetwork( +network = deployment.GcpNetwork( project="my-project" ) ``` -Then, we can launch a VM on this network using `hydro.GCPComputeEngineHost`: +Then, we can launch a VM on this network using `hydro.GcpComputeEngineHost`: ```python -host = deployment.GCPComputeEngineHost( +host = deployment.GcpComputeEngineHost( name="my-host", network=network, machine_type="e2-micro", diff --git a/docs/docs/deploy/your-first-deploy.md b/docs/docs/deploy/your-first-deploy.md index c4498abaebc8..1c727172341d 100644 --- a/docs/docs/deploy/your-first-deploy.md +++ b/docs/docs/deploy/your-first-deploy.md @@ -13,15 +13,25 @@ First, we need to write the Hydroflow application, which will intergrate with Hy cargo install --locked cargo-generate #shell-command-next-line -cargo generate hydro-project/hydroflow-template +cargo generate gh:hydro-project/hydroflow template/hydroflow ``` -We'll need to add an additional dependency for `hydroflow_cli_integration` to our `Cargo.toml`: +`cd` into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: +```bash +#shell-command-next-line +cd +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test +``` + +We'll need to add an additional dependency for `hydroflow_deploy_integration` to our `Cargo.toml`: ```toml [dependencies] # ... -hydroflow_cli_integration = "0.1.1" +hydroflow_deploy_integration = "0.1.1" ``` Let's open up `src/main.rs` in the generated project and write a new `main` function that initializes Hydro Deploy: @@ -29,7 +39,7 @@ Let's open up `src/main.rs` in the generated project and write a new `main` func ```rust #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init().await; + let ports = hydroflow::util::deploy::init().await; } ``` @@ -67,27 +77,27 @@ Now, we need to wire up the ports. Hydro Deploy uses _named ports_, which can th Returning briefly to our Hydroflow code, we can then load these ports and use them to send and receive packets: ```rust -use hydroflow_cli_integration::ConnectedDirect; +use hydroflow_deploy_integration::ConnectedDirect; use hydroflow::hydroflow_syntax; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init().await; + let ports = hydroflow::util::deploy::init().await; let input_recv = ports .port("input") // connect to the port with a single recipient - .connect::() + .connect::() .await .into_source(); let output_send = ports .port("output") - .connect::() + .connect::() .await .into_sink(); - hydroflow::util::cli::launch_flow(hydroflow_syntax! { + hydroflow::util::deploy::launch_flow(hydroflow_syntax! { source_iter(["hello".to_string()]) -> dest_sink(output_send); input = source_stream(input_recv) -> tee(); input -> dest_sink(output_send); diff --git a/docs/docs/hydroflow/concepts/index.md b/docs/docs/hydroflow/concepts/index.md index 6c7ba4d4b930..854e8d528b6c 100644 --- a/docs/docs/hydroflow/concepts/index.md +++ b/docs/docs/hydroflow/concepts/index.md @@ -2,12 +2,12 @@ Hydroflow is different from other distributed systems infrastructure, so you probably have questions, like: What is Hydroflow? How does it work? What is special about it, and what are the key concepts to understand? -This chapter covers those basic questions. We start simple describing Hydroflow, and build up to an understanding of +This chapter covers those basic questions. We start simple describing Hydroflow, and build up to an understanding of what makes Hydroflow uniquely powerful. But in case you want a preview of the Hydroflow goodies, here are the main themes: 1. **Distributed Correctness**: Hydroflow's type system can prevent distributed system bugs at compile time. (One example: will your code -produce the same results if you deploy on a single machine or replicate on a distributed cluster of machines?) +produce the same results if you deploy on a single machine or replicate on a distributed cluster of machines?) 2. **Local Efficiency**: Hydroflow compiles your dataflow programs into efficient, low-latency, single-threaded executables. Taken together, Hydroflow provides a high-efficiency substrate on which to build powerful languages for distributed computing. @@ -20,36 +20,36 @@ Setting fanfare aside, what *is* Hydroflow? Hydroflow is a library that can be used in any Rust program. It includes two main components: 1. A runtime library that executes low-latency, reactive dataflow programs written in Rust. (The *core API*.) -2. A domain-specific language (DSL) for specifying dataflow programs. (The Hydroflow *surface syntax*.) +2. A domain-specific language (DSL) for specifying dataflow programs. (The Hydroflow *surface syntax*.) -Hydroflow's surface syntax must be embedded in a Rust program; the Rust compiler takes that Hydroflow syntax and -compiles it into an efficient binary executable. +Hydroflow's surface syntax must be embedded in a Rust program; the Rust compiler takes that Hydroflow syntax and +compiles it into an efficient binary executable. > We call a running Hydroflow binary a *transducer*. -In typical usage, a developer writes a transducer as a single-threaded Rust program that is mostly composed of -Hydroflow surface syntax. Each transducer is typically responsible for a single +In typical usage, a developer writes a transducer as a single-threaded Rust program that is mostly composed of +Hydroflow surface syntax. Each transducer is typically responsible for a single "node" (a machine, or a core) in a distributed system composed of many such transducers, which send and receive flows of data to each other. -> Hydroflow itself does not generate distributed code. It is a library for specifying the transducers (individual nodes) that -> participate in a distributed system. +> Hydroflow itself does not generate distributed code. It is a library for specifying the transducers (individual nodes) that +> participate in a distributed system. > -> In the [Hydro Project](https://hydro.run), higher-level languages are being built on top of Hydroflow to generate -> distributed code in the form of multiple transducers. -> Meanwhile, you can use Hydroflow to write your own distributed code, by writing individual transducers that work together, +> In the [Hydro Project](https://hydro.run), higher-level languages are being built on top of Hydroflow to generate +> distributed code in the form of multiple transducers. +> Meanwhile, you can use Hydroflow to write your own distributed code, by writing individual transducers that work together, > and deploying them manually or with a tool like [Hydroplane](https://github.com/hydro-project/hydroplane). See the [Hydro Ecosystem](../ecosystem) for more on this. ### So how might a human write distributed systems with Hydroflow? -As an illustration of how you can work at the Hydroflow layer, consider the -[Chat Server example](../quickstart/example_8_chat_server). If you run that binary +As an illustration of how you can work at the Hydroflow layer, consider the +[Chat Server example](https://github.com/hydro-project/hydroflow/tree/main/hydroflow/examples/chat). If you run that binary with the command-line argument `--role server` it will start a single transducer that is responsible for a chat server: receiving membership requests and messages from clients, and forwarding messages from individual clients to all other clients. -If you run that binary with the argument `--role client` it will start a transducer that is responsible for a chat client, which -forwards chat messages from stdin to the server, and prints out messages sent by the server. As a distributed system, the chat +If you run that binary with the argument `--role client` it will start a transducer that is responsible for a chat client, which +forwards chat messages from stdin to the server, and prints out messages sent by the server. As a distributed system, the chat service would typically consist of many client transducers and a single server transducer. -Note that this is an example of an extremely simple distributed system in a "star" or "hub-and spokes" topology: the multiple client transducers are completely independent of each other, and each talks only with the central server transducer. +Note that this is an example of an extremely simple distributed system in a "star" or "hub-and spokes" topology: the multiple client transducers are completely independent of each other, and each talks only with the central server transducer.
@@ -65,8 +65,8 @@ graph TD; ```
- If we wanted something more interesting, we could consider deploying a cluster of multiple server transducers, say for fault tolerance or geo-distribution, perhaps like this: - + If we wanted something more interesting, we could consider deploying a cluster of multiple server transducers, say for fault tolerance or geo-distribution, perhaps like this: +
```mermaid @@ -84,5 +84,5 @@ graph LR; ```
- We'd need to change the server code to get servers sharing their state in a correct and reliable manner. But that's a topic for another section **TODO**; for now, let's stay focused on the basics. + We'd need to change the server code to get servers sharing their state in a correct and reliable manner. But that's a topic for another section **TODO**; for now, let's stay focused on the basics. diff --git a/docs/docs/hydroflow/lattices_crate/index.mdx b/docs/docs/hydroflow/lattices_crate/index.mdx index 6d92a57c3cd8..e043a670affa 100644 --- a/docs/docs/hydroflow/lattices_crate/index.mdx +++ b/docs/docs/hydroflow/lattices_crate/index.mdx @@ -1,8 +1,11 @@ import CrateDocs from '../../../../lattices/README.md' +import MacroDocs from '../../../../lattices_macro/README.md' # The `lattices` Crate - + + + ## Next diff --git a/docs/docs/hydroflow/quickstart/example_1_simplest.mdx b/docs/docs/hydroflow/quickstart/example_1_simplest.mdx index 980ad8f6f62d..4a3f72c2bb34 100644 --- a/docs/docs/hydroflow/quickstart/example_1_simplest.mdx +++ b/docs/docs/hydroflow/quickstart/example_1_simplest.mdx @@ -20,10 +20,10 @@ the numbers in `0..10`. Create a clean template project: ```console #shell-command-next-line -cargo generate hydro-project/hydroflow-template -⚠️ Favorite `hydro-project/hydroflow-template` not found in config, using it as a git repository: https://github.com/hydro-project/hydroflow-template.git +cargo generate gh:hydro-project/hydroflow template/hydroflow +⚠️ Favorite `gh:hydro-project/hydroflow` not found in config, using it as a git repository: https://github.com/hydro-project/hydroflow.git 🤷 Project Name: simple -🔧 Destination: /Users/jmh/code/sussudio/simple ... +🔧 Destination: /Users/me/code/simple ... 🔧 project-name: simple ... 🔧 Generating template ... [11/11] Done: src @@ -32,7 +32,15 @@ cargo generate hydro-project/hydroflow-template ✨ Done! New project created /simple ``` -Change directory into the resulting `simple` folder or open it in your IDE. Then edit the `src/main.rs` file, replacing +After `cd`ing into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: +```bash +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test +``` + +Then edit the `src/main.rs` file, replacing *all* of its contents with the following code: {exampleCode} @@ -43,9 +51,11 @@ And then run the program: ## Understanding the Code Although this is a trivial program, it's useful to go through it line by line. + {getLines(exampleCode, 1)} + This import gives you everything you need from Hydroflow to write code with Hydroflow's -[_surface syntax_](../syntax/index). +[_surface syntax_](../syntax). Next, inside the main method we specify a flow by calling the `hydroflow_syntax!` macro. We assign the resulting `Hydroflow` instance to @@ -69,10 +79,11 @@ We can run this flow from within Rust via the [`run_available()` method](https:/ Note that `run_available()` runs the Hydroflow graph until no more work is immediately available. In this example flow, running the graph drains the iterator completely, so no more work will *ever* be available. In future examples we will use external inputs such as -network ingress, in which case more work might appear at any time. In those examples we may need a different method than `run_available()`, -e.g. the [`run_async()`](https://hydro-project.github.io/hydroflow/doc/hydroflow/scheduled/graph/struct.Hydroflow.html#method.run_async) method, -which we'll see -in [the EchoServer example](./example_7_echo_server). +network ingress, in which case more work might appear at any time. + +In server applications that use network ingress for inputs, new work can appear at any time. In these application +we may need a different method than `run_available()`, e.g. the [`run_async()`](https://hydro-project.github.io/hydroflow/doc/hydroflow/scheduled/graph/struct.Hydroflow.html#method.run_async) method, check [the networking +examples](./example_7_networking) for more. ### A Note on Project Structure The template project is intended to be a starting point for your own Hydroflow project, and you can add files and directories as you see fit. The only requirement is that the `src/main.rs` file exists and contains a `main()` function. diff --git a/docs/docs/hydroflow/quickstart/example_2_simple.mdx b/docs/docs/hydroflow/quickstart/example_2_simple.mdx index 5e8cf286b711..830f73b3fb12 100644 --- a/docs/docs/hydroflow/quickstart/example_2_simple.mdx +++ b/docs/docs/hydroflow/quickstart/example_2_simple.mdx @@ -64,7 +64,7 @@ Replace the contents of `src/main.rs` with the following: Here the `filter_map` operator takes a map closure that returns a Rust [`Option`](https://doc.rust-lang.org/std/option/enum.Option.html). If the value is `Some(...)`, it is passed to the output; if it is `None` it is filtered. -The `flat_map` operator takes a map closure that generates a collection type (in this case a `Vec`) +The `flat_map` operator takes a map closure that generates an iterable type (in this case a `RangeInclusive`) which is flattened. Results: diff --git a/docs/docs/hydroflow/quickstart/example_5_reachability.mdx b/docs/docs/hydroflow/quickstart/example_5_reachability.mdx index 364ff45c4f19..6a652b3bb697 100644 --- a/docs/docs/hydroflow/quickstart/example_5_reachability.mdx +++ b/docs/docs/hydroflow/quickstart/example_5_reachability.mdx @@ -64,7 +64,7 @@ addition of the `reached_vertices` variable, which uses the [union()](../syntax/ op to union the output of two operators into one. We route the `origin` vertex into it as one input right away: -{getLines(exampleCode, 8, 12)} +{getLines(exampleCode, 19, 19)} Note the square-bracket syntax for assigning index names to the multiple inputs to `union()`; this is similar to the indexes for `join()`, except that (a) union can have an arbitrary number of inputs, (b) the index names can be arbitrary strings, and (c) the indexes are optional can be omitted entirely. (By contrast, recall that @@ -81,7 +81,7 @@ Finally, we process the output of the `join` as passed through the `tee`. One branch pushes reached vertices back up into the `reached_vertices` variable (which begins with a `union`), while the other prints out all the reached vertices as in the simple program. -{getLines(exampleCode, 14, 17)} +{getLines(exampleCode, 12, 15)} Below is the diagram rendered by [mermaid](https://mermaid-js.github.io/) showing the structure of the full flow: diff --git a/docs/docs/hydroflow/quickstart/example_7_echo_server.mdx b/docs/docs/hydroflow/quickstart/example_7_echo_server.mdx deleted file mode 100644 index 48ef12b3e315..000000000000 --- a/docs/docs/hydroflow/quickstart/example_7_echo_server.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -sidebar_position: 8 ---- -import CodeBlock from '@theme/CodeBlock'; -import main from '!!raw-loader!../../../../hydroflow/examples/echoserver/main.rs'; -import protocol from '!!raw-loader!../../../../hydroflow/examples/echoserver/protocol.rs'; -import server from '!!raw-loader!../../../../hydroflow/examples/echoserver/server.rs'; -import client from '!!raw-loader!../../../../hydroflow/examples/echoserver/client.rs'; -import { getLines } from '../../../src/util'; - -# Networked Services 1: EchoServer -> In this example we cover: -> * The standard project template for networked Hydroflow services. -> * Rust's `clap` crate for command-line options -> * Defining message types -> * Destination operators (e.g. for sending data to a network) -> * Network sources and dests with built-in serde (`source_stream_serde`, `dest_sink_serde`) -> * The `source_stdin` source -> * Long-running services via `run_async` - -Our examples up to now have been simple single-node programs, to get us comfortable with Hydroflow's -surface syntax. But the whole point of Hydroflow is to help us write distributed programs or services that run on a cluster of machines! - -In this example we'll study the "hello, world" of distributed systems -- a simple echo server. It will listen on a UDP port, -and send back a copy of any message it receives, with a timestamp. We will also look at a client to -accept strings from the command line, send them to the echo server, and print responses. - -We will use a fresh `hydroflow-template` project template to get started. Change to the directory where you'd like to put your project, and once again run: -```bash -cargo generate hydro-project/hydroflow-template -``` -Then change directory into the resulting project. - -The Hydroflow template project provides *this example* as its default, so there's no code for us to change. -The `README.md` for the template project is a good place to start. It contains a brief overview of the project structure, and how to build and run the example. Here we'll spend more time learning from the code. - -### `main.rs` -We start with a `main` function that parses command-line options, and invokes the appropriate -role-specific service. -After a prelude of imports, we start by defining a Rust `enum` for the `Role`s that the service supports. - -{getLines(main, 1, 17)} - -Following that, we use Rust's [`clap`](https://docs.rs/clap/latest/clap/) (Command Line Argument Parser) crate to parse command-line options: - -{getLines(main, 19, 27)} - -This sets up 3 command-line flags: `role`, `addr`, and `server_addr`. Note how the `addr` and `server_addr` flags are made optional via wrapping in a Rust `Option`; by contrast, the `role` option is required. The `clap` crate will parse the command-line options and populate the `Opts` struct with the values. `clap` handles parsing the command line strings into the associated Rust types -- the `value_parser` attribute tells `clap` to use Hydroflow's `ipv4_resolve` helper function to parse a string like "127.0.0.1:6552" into a `SocketAddr`. - -This brings us to the `main` function itself. It is prefaced by a `#[hydroflow::main]` attribute, which is a macro that sets up the tokio runtime for Hydroflow. It is also an async function. This is necessary because Hydroflow uses the tokio runtime for asynchronous execution as a service. - -{getLines(main, 29, 40)} - -After parsing the command line arguments we set up some Rust-based networking. Specifically, for either client or server roles we will need to allocate a UDP socket that is used for both sending and receiving messages. We do this by calling the async `bind_udp_bytes` function, which is defined in the `hydroflow/src/util` module. As an async function it returns a `Future`, so requires appending `.await`; the function returns a triple of type `(UdpSink, UdpSource, SocketAddr)`. The first two are the types that we'll use in Hydroflow to send and receive messages, respectively. (Note: your IDE might expand out the `UdpSink` and `UdpSource` traits to their more verbose definitions. That is fine; you can ignore those.) The SocketAddr is there in case you specified port 0 in your `addr` argument, in which case this return value tells you what port the OS has assigned for you. - -All that's left is to fire up the code for the appropriate role! - -{getLines(main, 42, 50)} - -### `protocol.rs` -As a design pattern, it is natural in distributed Hydroflow programs to define various message types in a `protocol.rs` file with structures shared for use by all the Hydroflow logic across roles. In this simple example, we define only one message type: `EchoMsg`, and a simple struct with two fields: `payload` and `ts` (timestamp). The `payload` field is a string, and the `ts` field is a `DateTime`, which is a type from the [`chrono`](https://docs.rs/chrono/latest/chrono/) crate. Note the various derived traits on `EchoMsg`—specifically `Serialize` and `Deserialize`—these are required for structs that we send over the network. - -{protocol} - -### `server.rs` -Things get interesting when we look at the `run_server` function. This function is the main entry point for the server. It takes as arguments the `outbound` and `inbound` sockets from `main`, and the `Opts` (which are ignored). - -After printing a cheery message, we get into the Hydroflow code for the server, consisting of three short pipelines. - -{server} - -Lets take the Hydroflow code one statement at a time. - -{getLines(server, 14, 15)} - -The first pipeline statement, `inbound_chan`, uses a source operator we have not seen before, [`source_stream_serde()`](../syntax/surface_ops_gen.md#source_stream_serde). This is a streaming source like `source_stream`, but for network streams. It takes a `UdpSource` as an argument, and has a particular output type: a stream of `(T, SocketAddr)` pairs where `T` is some type that implements the `Serialize` and `Deserialize` traits (together known as "serde"), and `SocketAddr` is the network address of the sender of the item. In this case, `T` is `EchoMsg`, which we defined in `protocol.rs`, and the `SocketAddr` is the address of the client that sent the message. We pipe the result into a `tee()` for reuse. - ---- - -The second statement is a simple `for_each` to print the messages received at the server. - -{getLines(server, 17, 19)} - ---- - -The third and final pipeline statement constructs a response `EchoMsg` with the local timestamp copied in. It then pipes the result into a `dest_XXX` operator—the first that we've seen! A dest is the opposite of a `source_XXX` operator: it can go at the end of a pipeline and sends data out on a tokio channel. The specific operator used here is [`dest_sink_serde()`](../syntax/surface_ops_gen.md#dest_sink_serde). This is a dest operator like `dest_sink`, but for network streams. It takes a `UdpSink` as an argument, and requires a particular input type: a stream of `(T, SocketAddr)` pairs where `T` is some type that implements the `Serialize` and `Deserialize` traits, and `SocketAddr` is the network address of the destination. In this case, `T` is once again `EchoMsg`, and the `SocketAddr` is the address of the client that sent the original message. - -{getLines(server, 21, 23)} - ---- - -The remaining line of code runs the server. The `run_async()` function is a method on the `Hydroflow` type. It is an async function, so we append `.await` to the call. The program will block on this call until the server is terminated. - -{getLines(server, 26, 27)} - -### `client.rs` -The client begins by making sure the user specified a server address at the command line. After printing a message to the terminal, it constructs a hydroflow graph. - -{client} - -Again, we start the hydroflow code defining shared inbound and outbound channels. The code here is simplified compared -to the server because the `inbound_chan` and `outbound_chan` are each referenced only once, so they do not require `tee` or `union` operators, respectively (they have been commented out). - -The `inbound_chan` drives a pipeline that prints messages to the screen. - -Only the last pipeline is novel for us by now. It uses another new source operator [`source_stdin()`](../syntax/surface_ops_gen.md#source_stdin), which does what you might expect: it streams lines of text as they arrive from `stdin` (i.e. as they are typed into a terminal). It then uses a `map` to construct an `EchoMsg` with each line of text and the current timestamp. The result is piped into a sink operator [`dest_sink_serde()`](../syntax/surface_ops_gen.md#dest_sink_serde), which sends the message to the server. - -{getLines(client, 28, 30)} - -<>The client logic ends by launching the flow graph with {getLines(client, 33)}. - -## Running the example -As described in the [`README.md` file](https://github.com/hydro-project/hydroflow/tree/main/hydroflow/examples/echoserver#readme), we can run the server in one terminal, and the client in another. The server will print the messages it receives, and the client will print the messages it receives back from the server. The client and servers' `--server-addr' arguments need to match or this won't work! - -<>{/* TODO(mingwei): test this code somehow*/} - -Fire up the server in terminal 1: -```console -#shell-command-next-line -cargo run -- --role server --addr localhost:12347 -``` - -Then start the client in terminal 2 and type some messages! -```console -#shell-command-next-line -cargo run -- --role client --server-addr localhost:12347 -Listening on 127.0.0.1:51778 -Connecting to server at 127.0.0.1:12347 -Client live! -This is a test -2023-05-31 23:13:26.717165 UTC: Got Echo { payload: "This is a test", ts: 2023-05-31T23:13:26.715997Z } from 127.0.0.1:12347 -This is the rest -2023-05-31 23:13:47.117957 UTC: Got Echo { payload: "This is the rest", ts: 2023-05-31T23:13:47.117236Z } from 127.0.0.1:12347 -``` - -And have a look back at the server console! -```console -Listening on 127.0.0.1:12347 -Server live! -2023-05-31 23:13:26.715063 UTC: Got Echo { payload: "This is a test", ts: 2023-05-31T23:13:26.713063Z } from 127.0.0.1:51778 -2023-05-31 23:13:47.117165 UTC: Got Echo { payload: "This is the rest", ts: 2023-05-31T23:13:47.115551Z } from 127.0.0.1:51778 -``` diff --git a/docs/docs/hydroflow/quickstart/example_7_networking.mdx b/docs/docs/hydroflow/quickstart/example_7_networking.mdx new file mode 100644 index 000000000000..027804991d86 --- /dev/null +++ b/docs/docs/hydroflow/quickstart/example_7_networking.mdx @@ -0,0 +1,47 @@ +--- +sidebar_position: 8 +--- +import CodeBlock from '@theme/CodeBlock'; +import exampleCode from '!!raw-loader!../../../../hydroflow/examples/example_6_unreachability.rs'; +import exampleOutput from '!!raw-loader!../../../../hydroflow/tests/snapshots/surface_examples__example_6_unreachability.snap'; +import { getLines, extractOutput, extractMermaid } from '../../../src/util'; + +# Networked Services + +Hydroflow provides a robust framework for developing networked services. The best way to get started is to use +the Hydroflow template. + +```bash +cargo generate gh:hydro-project/hydroflow template/hydroflow +``` + +`cd` into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: +```bash +#shell-command-next-line +cd +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test +``` + +The template contains a chat server & client implementation using UDP. You can start multiple instances of +the client and connect to the same server. + +## Things to Check +* [README](https://github.com/hydro-project/hydroflow/tree/main/template/hydroflow#readme) - Contains instructions on building the template and +running the client and server examples. +* [server.rs](https://github.com/hydro-project/hydroflow/blob/main/template/hydroflow/src/server.rs) - Contains the chat server +implementation. The inline rust documentation explains the networking-related hydroflow operators and their usage. +* [client.rs](https://github.com/hydro-project/hydroflow/blob/main/template/hydroflow/src/client.rs) - Contains the chat client +implementation. The inline rust documentation explains the networking-related hydroflow operators and their usage. + +Hydroflow also supports TCP for reliable, connection-oriented communication. Understanding the differences and use +cases of these protocols can help you choose the right one for your networked service. + +## Additional Examples + +The Hydroflow examples directory contains additional examples that deal with networking. For example, the [Randomized +Gossip Server](https://github.com/hydro-project/hydroflow/blob/main/hydroflow/examples/chat/randomized_gossiping_server.rs) +example extends the chat example to work with a cluster of redundant servers that use gossip for sharing chat-related +state between themselves. \ No newline at end of file diff --git a/docs/docs/hydroflow/quickstart/example_8_chat_server.mdx b/docs/docs/hydroflow/quickstart/example_8_chat_server.mdx deleted file mode 100644 index 90dec2b370b9..000000000000 --- a/docs/docs/hydroflow/quickstart/example_8_chat_server.mdx +++ /dev/null @@ -1,298 +0,0 @@ ---- -sidebar_position: 9 ---- -import CodeBlock from '@theme/CodeBlock'; -import main from '!!raw-loader!../../../../hydroflow/examples/chat/main.rs'; -import protocol from '!!raw-loader!../../../../hydroflow/examples/chat/protocol.rs'; -import server from '!!raw-loader!../../../../hydroflow/examples/chat/server.rs'; -import client from '!!raw-loader!../../../../hydroflow/examples/chat/client.rs'; -import { getLines } from '../../../src/util'; - -# Networked Services 2: Chat Server -> In this example we cover: -> * Multiple message types and the [`demux`](../syntax/surface_ops_gen.md#demux) operator. -> * A broadcast pattern via the [`cross_join`](../syntax/surface_ops_gen.md#cross_join) operator. -> * One-time bootstrapping pipelines -> * A "gated buffer" using [`defer_signal`](../syntax/surface_ops_gen.md#defer_signal) and [`persist`](../syntax/surface_ops_gen.md#persist) operators - -Our previous [echo server](./example_7_echo_server) example was admittedly simplistic. In this example, we'll build something a bit more useful: a simple chat server. We will again have two roles: a `Client` and a `Server`. `Clients` will register their presence with the `Server`, which maintains a list of clients. Each `Client` sends messages to the `Server`, which will then broadcast those messages to all other clients. - -## Project files - -### `Cargo.toml` -We will use a text-coloring crate called `colored` in this example. -To follow along, add the following line to the bottom of the `Cargo.toml` file -that appears at that root of your template: - -```toml -colored = "2.0.0" -``` - -### `main.rs` -The `main.rs` file here is very similar to that of the echo server, just with two new command-line arguments: one called `name` for a "nickname" in the chatroom, and another optional argument `graph` for printing a dataflow graph if desired. To follow along, you can copy the contents of this file into the `src/main.rs` file of your template. - -{main} - -### `protocol.rs` -Our protocol file here expands upon what we saw with the echoserver by defining multiple message types. -Replace the template contents of `src/protocol.rs` with the following: - -{protocol} - -Note how we use a single Rust `enum` to represent all varieties of message types; this allows us to handle `Message`s of different types with a single Rust network channel. We will use the [`demux`](../syntax/surface_ops_gen.md#demux) operator to separate out these different message types on the receiving end. - -The `ConnectRequest` and `ConnectResponse` messages have no payload; -the address of the sender and the type of the message will be sufficient information. The `ChatMsg` message type has a `nickname` field, a `message` field, and a `ts` -field for the timestamp. Once again we use the `chrono` crate to represent timestamps. - -### `server.rs` -The chat server is nearly as simple as the echo server. The main differences are (a) we need to handle multiple message types, -(b) we need to keep track of the list of clients, and (c) we need to broadcast messages to all clients. - -To follow along, replace the contents of `src/server.rs` with the code below: - -{getLines(server, 1, 24)} - -After a short prelude, we have the Hydroflow code near the top of `run_server()`. It begins by defining `outbound_chan` as a `union`ed destination sink for network messages. Then we get to the -more interesting `inbound_chan` definition. - -The `inbound` channel is a source stream that will carry many -types of `Message`s. -We first use a `map` operator to `unwrap` the Rust `Result` type that comes from deserializing the input -from `source_stream_serde`. -Then we use the [`demux`](../syntax/surface_ops_gen.md#demux) operator to partition the stream objects into three channels. The `clients` channel -will carry the addresses of clients that have connected to the server. The `msgs` channel will carry the `ChatMsg` messages that clients send to the server. -The `errs` channel will carry any other messages that clients send to the server. - -Note the structure of the `demux` operator: it takes a closure on -`(Message, SocketAddr)` pairs, and a variadic tuple (`var_args!`) of the output channel names—in this case `clients`, `msgs`, and `errs`. The closure is basically a big -Rust pattern [`match`](https://doc.rust-lang.org/book/ch06-02-match.html), with one arm for each output channel name given in the variadic tuple. Note -that each output channel can have its own message type! Note also that we destructure the incoming `Message` types into component fields. (If we didn't we'd have to write boilerplate code to handle every possible `Message` type in every downstream pipeline!) - -The remainder of the server consists of two independent pipelines, the code to print out the flow graph, -and the code to run the flow graph. To follow along, paste the following into the bottom of your `src/server.rs` file: - -{getLines(server, 26, 55)} - -The first pipeline is one line long, -and is responsible for acknowledging requests from `clients`: it takes the address of the incoming `Message::ConnectRequest` -and sends a `ConnectResponse` back to that address. The second pipeline is responsible for broadcasting -all chat messages to all clients. This all-to-all pairing corresponds to the notion of a cartesian product -or [`cross_join`](../syntax/surface_ops_gen.md#cross_join) in Hydroflow. The `cross_join` operator takes two input -channels and produces a single output channel with a tuple for each pair of inputs, in this case it produces -`(Message, SocketAddr)` pairs. Conveniently, that is exactly the structure needed for sending to the `outbound_chan` sink! -We call the cross-join pipeline `broadcast` because it effectively broadcasts all messages to all clients. - -The mermaid graph for the server is below. The three branches of the `demux` are very clear toward the top. Note also the `tee` of the `clients` channel -for both `ClientResponse` and broadcasting, and the `union` of all outbound messages into `dest_sink_serde`. - -<>{/* TODO(mingwei): automate this mermaid generation */} - -```mermaid -%%{init:{'theme':'base','themeVariables':{'clusterBkg':'#ddd','clusterBorder':'#888'}}}%% -flowchart TD -classDef pullClass fill:#8af,stroke:#000,text-align:left,white-space:pre -classDef pushClass fill:#ff8,stroke:#000,text-align:left,white-space:pre -linkStyle default stroke:#aaa,stroke-width:4px,color:red,font-size:1.5em; -subgraph sg_1v1 ["sg_1v1 stratum 0"] - 8v1[\"(8v1) map(|addr| (Message::ConnectResponse, addr))"/]:::pullClass - 9v1[\"(9v1) cross_join()"/]:::pullClass - 1v1[\"(1v1) union()"/]:::pullClass - 2v1[/"(2v1) dest_sink_serde(outbound)"\]:::pushClass - 8v1--0--->1v1 - 9v1--1--->1v1 - 1v1--->2v1 - subgraph sg_1v1_var_broadcast ["var broadcast"] - 9v1 - end - subgraph sg_1v1_var_outbound_chan ["var outbound_chan"] - 1v1 - 2v1 - end -end -subgraph sg_2v1 ["sg_2v1 stratum 0"] - 3v1[\"(3v1) source_stream_serde(inbound)"/]:::pullClass - 4v1[\"(4v1) map(Result::unwrap)"/]:::pullClass - 5v1[/"
(5v1)
demux(|(msg, addr), var_args!(clients, msgs, errs)| match msg {
Message::ConnectRequest => clients.give(addr),
Message::ChatMsg { .. } => msgs.give(msg),
_ => errs.give(msg),
})
"\]:::pushClass - 6v1[/"(6v1) tee()"\]:::pushClass - 7v1[/"(7v1) for_each(|m| println!("Received unexpected message type: {:?}", m))"\]:::pushClass - 3v1--->4v1 - 4v1--->5v1 - 5v1--clients--->6v1 - 5v1--errs--->7v1 - subgraph sg_2v1_var_clients ["var clients"] - 6v1 - end - subgraph sg_2v1_var_inbound_chan ["var inbound_chan"] - 3v1 - 4v1 - 5v1 - end -end -5v1--msgs--->11v1 -6v1--0--->10v1 -6v1--1--->12v1 -10v1["(10v1) handoff"]:::otherClass -10v1--->8v1 -11v1["(11v1) handoff"]:::otherClass -11v1--0--->9v1 -12v1["(12v1) handoff"]:::otherClass -12v1--1--->9v1 -``` - -### `client.rs` -The chat client is not very different from the echo server client, with two new design patterns: - 1. a `initialize` operator that runs once to "bootstrap" action in the first tick - 2. the use of `defer_signal` and `persist` as a "gated buffer" to postpone sending messages. - -We also include a Rust helper routine `pretty_print_msg` for formatting output. - -The prelude of the file is almost the same as the echo server client, with the addition of the crate for -handling `colored` text output. This is followed by the `pretty_print_msg` function, which is fairly self-explanatory. -To follow along, start by replacing the contents of `src/client.rs` with the following: - -{getLines(client, 1, 27)} - -This brings us to the `run_client` function. As in `run_server` we begin by ensuring the server address -is supplied. We then have the hydroflow code starting with a standard pattern of a `union`ed `outbound_chan`, -and a `demux`ed `inbound_chan`. The client handles only two inbound `Message` types: `Message::ConnectResponse` and `Message::ChatMsg`. - -Paste the following to the bottom of `src/client.rs`: - -{getLines(client, 29, 45)} - -The core logic of the client consists of three dataflow pipelines shown below. Paste this into the -bottom of your `src/client.rs` file. - -{getLines(client, 47, 64)} - -1. The first pipeline is the "bootstrap" alluded to above. -It starts with the `initialize` operator that emits a single, opaque "unit" (`()`) value. This value is emitted when the client begins, which means -this pipeline runs once, immediately on startup, and generates a single `ConnectRequest` message which is sent to the server. - -2. The second pipeline reads from `source_stdin` and sends messages to the server. It differs from our echo-server example in the use of the [`defer_signal`](../syntax/surface_ops_gen.md#defer_signal) operator, which buffers up messages until a `ConnectResponse` is received. The flow assigned to the `lines` -variable takes chat messages from stdin and passes them to the `[input]` channel of the `defer_signal`. -The `defer_signal` operator buffers these messages until it gets an input on its `[signal]` channel. Then all `[input]` data buffered from previous ticks is passed along to the output, along with any data that streams in during the current tick. -In our chat example, we want messages to be sent to the server in *all subsequent ticks* after `ConnectResponse` is received! To enforce this, we need to send something on the `[signal]` channel of `defer_signal` every subsequent tick. We achieve this by interposing a `persist` between `inbound_chan[acks]` and `[signal]msg_send`. The [`persist`](../syntax/surface_ops_gen.md#persist) operator stores its input data in order across time, and replays its current contents -each tick. In this case it is storing `ConnectResponse` messages, of which we expect only one. The -`persist` op will replay this signal every tick after it is received, so the client will always send its messages to the server once connected. - -3. The final pipeline simply pretty-prints the messages received from the server. - -Finish up the file by pasting the code below for optionally generating the graph and running the flow: - -{getLines(client, 65, 85)} - -The client's mermaid graph looks a bit different than the server's, mostly because it routes some data to -the screen rather than to an outbound network channel. - -<>{/* TODO(mingwei): automate this mermaid generation */} - -```mermaid -%%{init:{'theme':'base','themeVariables':{'clusterBkg':'#ddd','clusterBorder':'#888'}}}%% -flowchart TD -classDef pullClass fill:#8af,stroke:#000,text-align:left,white-space:pre -classDef pushClass fill:#ff8,stroke:#000,text-align:left,white-space:pre -linkStyle default stroke:#aaa,stroke-width:4px,color:red,font-size:1.5em; -subgraph sg_1v1 ["sg_1v1 stratum 0"] - 8v1[\"(8v1) source_iter([()])"/]:::pullClass - 9v1[\"(9v1) map(|_m| (Message::ConnectRequest, server_addr))"/]:::pullClass - 12v1[\"(12v1) source_stdin()"/]:::pullClass - 13v1[\"
(13v1)
map(|l| Message::ChatMsg {
nickname: opts.name.clone(),
message: l.unwrap(),
ts: Utc::now(),
})
"/]:::pullClass - 10v1[\"(10v1) cross_join()"/]:::pullClass - 11v1[\"(11v1) map(|(msg, _)| (msg, server_addr))"/]:::pullClass - 1v1[\"(1v1) union()"/]:::pullClass - 2v1[/"(2v1) dest_sink_serde(outbound)"\]:::pushClass - 8v1--->9v1 - 9v1--0--->1v1 - 12v1--->13v1 - 13v1--0--->10v1 - 10v1--->11v1 - 11v1--1--->1v1 - 1v1--->2v1 - subgraph sg_1v1_var_lines ["var lines"] - 12v1 - 13v1 - end - subgraph sg_1v1_var_msg_send ["var msg_send"] - 10v1 - 11v1 - end - subgraph sg_1v1_var_outbound_chan ["var outbound_chan"] - 1v1 - 2v1 - end -end -subgraph sg_2v1 ["sg_2v1 stratum 0"] - 3v1[\"(3v1) source_stream_serde(inbound)"/]:::pullClass - 4v1[\"(4v1) map(Result::unwrap)"/]:::pullClass - 5v1[\"(5v1) map(|(m, _)| m)"/]:::pullClass - 6v1[/"
(6v1)
demux(|m, var_args!(acks, msgs, errs)| match m {
Message::ConnectResponse => acks.give(m),
Message::ChatMsg { .. } => msgs.give(m),
_ => errs.give(m),
})
"\]:::pushClass - 7v1[/"(7v1) for_each(|m| println!("Received unexpected message type: {:?}", m))"\]:::pushClass - 14v1[/"(14v1) for_each(pretty_print_msg)"\]:::pushClass - 3v1--->4v1 - 4v1--->5v1 - 5v1--->6v1 - 6v1--errs--->7v1 - 6v1--msgs--->14v1 - subgraph sg_2v1_var_inbound_chan ["var inbound_chan"] - 3v1 - 4v1 - 5v1 - 6v1 - end -end -6v1--acks--->15v1 -15v1["(15v1) handoff"]:::otherClass -15v1--1--->10v1 -``` - -## Running the example -As described in `hydroflow/hydroflow/example/chat/README.md`, we can run the server in one terminal, and run clients in additional terminals. -The server's `addr` and the client's `server-addr` need to agree or this won't work! - -<>{/* TODO(mingwei): automate this example output */} - -Fire up the server in terminal 1: -```console -#shell-command-next-line -cargo run -- --name "_" --role server --addr 127.0.0.1:12347 -``` - -Start client "alice" in terminal 2 and type some messages, and you'll see them -echoed back to you. This will appear in colored fonts in most terminals -(but unfortunately not in this markdown-based book!) -```console -#shell-command-next-line -cargo run -- --name "alice" --role client --server-addr 127.0.0.1:12347 -Listening on 127.0.0.1:50460 -Client live! -Hello (hello hello) ... is there anybody in here? -May 31, 5:12:23 alice: Hello (hello hello) ... is there anybody in here? -Just nod if you can hear me. -May 31, 5:12:36 alice: Just nod if you can hear me. -Is there anyone home? -May 31, 5:12:40 alice: Is there anyone home? -``` - -Now start client "bob" in terminal 3, and notice how he instantly receives the backlog of Alice's messages from the server's `cross_join`. -(The messages may not be printed in the same order as they were timestamped! The `cross_join` operator is not guaranteed to preserve order, nor -is the udp network. Fixing these issues requires extra client logic (perhaps using the [`sort()`](../syntax/surface_ops_gen#sort) operator) that we leave as an exercise to the reader.) -```console -#shell-command-next-line -cargo run -- --name "bob" --role client --server-addr 127.0.0.1:12347 -Listening on 127.0.0.1:49298 -Client live! -May 31, 5:12:23 alice: Hello (hello hello) ... is there anybody in here? -May 31, 5:12:36 alice: Just nod if you can hear me. -May 31, 5:12:40 alice: Is there anyone home? -``` -Now in terminal 3, Bob can respond: -```console -*nods* -May 31, 5:13:43 bob: *nods* -``` -and if we go back to terminal 2 we can see that Alice gets the message too: -```console -May 31, 5:13:43 bob: *nods* -``` diff --git a/docs/docs/hydroflow/quickstart/setup.md b/docs/docs/hydroflow/quickstart/setup.md index f3a2c477688d..027665899ba9 100644 --- a/docs/docs/hydroflow/quickstart/setup.md +++ b/docs/docs/hydroflow/quickstart/setup.md @@ -41,6 +41,7 @@ In this book we will be using the Hydroflow template generator, which we recomme as a starting point for your Hydroflow projects. For this purpose you will need to install the `cargo-generate` tool: ```bash +#shell-command-next-line cargo install cargo-generate ``` @@ -50,26 +51,35 @@ We recommend using VS Code with the `rust-analyzer` extension (and NOT the `Rust` extension). ## Setting up a Hydroflow Project -The easiest way to get started with Hydroflow is to begin with a template project. +The easiest way to get started with Hydroflow is to begin with a template project. Create a directory where you'd like to put that project, direct your terminal there and run: ```bash -cargo generate hydro-project/hydroflow-template +#shell-command-next-line +cargo generate gh:hydro-project/hydroflow template/hydroflow +``` +You will be prompted to name your project. The `cargo generate` command will create a subdirectory +with the relevant files and folders. + +`cd` into the generated folder, ensure the correct nightly version of rust is installed: +```bash +#shell-command-next-line +cd +#shell-command-next-line +rustup update ``` -You will be prompted to name your project. The `cargo generate` command will create a subdirectory -with the relevant files and folders. As part of generating the project, the `hydroflow` library will be downloaded as a dependency. You can then open the project in VS Code or IDE of your choice, or you can simply build the template project with `cargo build`. ```bash -cd +#shell-command-next-line cargo build ``` This should return successfully. The template provides a simple working example of a Hydroflow program. As a sort of "hello, world" of distributed systems, it implements an "echo server" that -simply echoes back the messages you sent it; it also implements a client to test the server. +simply echoes back the messages you sent it; it also implements a client to test the server. We will replace the code in that example with our own, but it's a good idea to run it first to make sure everything is working. :::note @@ -104,7 +114,7 @@ Hello! This book will assume you are using the template project, but some Rust experts may want to get started with Hydroflow by cloning and working in the -repository directly. +repository directly. You should fork the repository if you want to push your changes. @@ -120,7 +130,7 @@ will provide inline type and error messages, code completion, etc. To work with the repository, it's best to start with an "example", found in the [`hydroflow/examples` folder](https://github.com/hydro-project/hydroflow/tree/main/hydroflow/examples). -The simplest example is the +The simplest example is the ['hello world'](https://github.com/hydro-project/hydroflow/blob/main/hydroflow/examples/hello_world/main.rs) example; the simplest example with networking is the [`echo server`](https://github.com/hydro-project/hydroflow/blob/main/hydroflow/examples/echoserver/main.rs). @@ -129,4 +139,4 @@ The Hydroflow repository is set up as a [workspace](https://doc.rust-lang.org/bo i.e. a repo containing a bunch of separate packages, `hydroflow` is just the main one. So if you want to work in a proper separate cargo package, you can create one and add it into the [root `Cargo.toml`](https://github.com/hydro-project/hydroflow/blob/main/Cargo.toml), -much like the [provided template](https://github.com/hydro-project/hydroflow-template/blob/main/Cargo.toml). +much like the [provided template](https://github.com/hydro-project/hydroflow/tree/main/template/hydroflow#readme). diff --git a/docs/docs/hydroflow/syntax/surface_data.mdx b/docs/docs/hydroflow/syntax/surface_data.mdx index ae4e2b47094a..db75e77d692a 100644 --- a/docs/docs/hydroflow/syntax/surface_data.mdx +++ b/docs/docs/hydroflow/syntax/surface_data.mdx @@ -49,7 +49,7 @@ in from outside the flow: {exampleCodeInput} -Sometimes we want to trigger activity based on timing, not data. To achieve this, we can use the [`source_interval()`](./surface_ops_gen.md#source_interval) operator, which takes a `Duration` `d` as an argument, and outputs a Tokio time Instant after every `d` units of time pass. +Sometimes we want to trigger activity based on timing, not data. To achieve this, we can use the [`source_interval()`](./surface_ops_gen.md#source_interval) operator, which takes a `Duration` `d` as an argument, and outputs a unit `()` after every `d` units of time pass. ## Destinations As duals to our data source operators, we also have data destination operators. The dest operators you'll likely use diff --git a/docs/docs/hydroflow/syntax/surface_flows.md b/docs/docs/hydroflow/syntax/surface_flows.mdx similarity index 61% rename from docs/docs/hydroflow/syntax/surface_flows.md rename to docs/docs/hydroflow/syntax/surface_flows.mdx index 1d55b2c756de..804c44683f0e 100644 --- a/docs/docs/hydroflow/syntax/surface_flows.md +++ b/docs/docs/hydroflow/syntax/surface_flows.mdx @@ -1,22 +1,25 @@ --- sidebar_position: 2 --- +import CodeBlock from '@theme/CodeBlock'; +import example1Code from '!!raw-loader!../../../../hydroflow/examples/example_surface_flows_1_basic.rs'; +import example2Code from '!!raw-loader!../../../../hydroflow/examples/example_surface_flows_2_varname.rs'; +import example3Code from '!!raw-loader!../../../../hydroflow/examples/example_surface_flows_3_ports.rs'; +import example3Out from '!!raw-loader!../../../../hydroflow/tests/snapshots/surface_examples__example_surface_flows_3_ports.snap'; +import example4Code from '!!raw-loader!../../../../hydroflow/examples/example_surface_flows_4_context.rs'; +import example4Out from '!!raw-loader!../../../../hydroflow/tests/snapshots/surface_examples__example_surface_flows_4_context.snap'; +import { getLines, extractMermaid, extractOutput } from '../../../src/util'; # Flow Syntax Flows consist of named _operators_ that are connected via flow _edges_ denoted by `->`. The example below uses the [`source_iter`](./surface_ops_gen.md#source_iter) operator to generate two strings from a Rust `vec`, the [`map`](./surface_ops_gen.md#map) operator to apply some Rust code to uppercase each string, and the [`for_each`](./surface_ops_gen.md#for_each) operator to print each string to stdout. -```rust,ignore -source_iter(vec!["Hello", "world"]) - -> map(|x| x.to_uppercase()) -> for_each(|x| println!("{}", x)); -``` +{getLines(example1Code, 5, 6)} Flows can be assigned to variable names for convenience. E.g, the above can be rewritten as follows: -```rust,ignore -source_iter(vec!["Hello", "world"]) -> upper_print; -upper_print = map(|x| x.to_uppercase()) -> for_each(|x| println!("{}", x)); -``` +{getLines(example2Code, 5, 6)} + Note that the order of the statements (lines) doesn't matter. In this example, `upper_print` is referenced before it is assigned, and that is completely OK and better matches the flow of data, making the program more understandable. @@ -35,49 +38,12 @@ allow you to choose arbitrary strings, which help you make your code and dataflo (e.g. `my_tee[print]` and `my_tee[continue]`). Here is an example that tees one flow into two, handles each separately, and then unions them to print out the contents in both lowercase and uppercase: -```rust,ignore -my_tee = source_iter(vec!["Hello", "world"]) -> tee(); -my_tee -> map(|x| x.to_uppercase()) -> [low_road]my_union; -my_tee -> map(|x| x.to_lowercase()) -> [high_road]my_union; -my_union = union() -> for_each(|x| println!("{}", x)); -``` +{getLines(example3Code, 5, 8)} + Here is a visualization of the flow that was generated. Note that the outbound labels to `my_tee` were auto-generated, but the inbound labels to `my_union` were specified by the code above: -```mermaid -%%{init:{'theme':'base','themeVariables':{'clusterBkg':'#ddd','clusterBorder':'#888'}}}%% -flowchart TD -classDef pullClass fill:#02f,color:#fff,stroke:#000 -classDef pushClass fill:#ff0,stroke:#000 -linkStyle default stroke:#aaa,stroke-width:4px,color:red,font-size:1.5em; -subgraph sg_1v1 ["sg_1v1 stratum 0"] - 1v1[\"(1v1) source_iter(vec! ["Hello", "world"])"/]:::pullClass - 2v1[/"(2v1) tee()"\]:::pushClass - 1v1--->2v1 - subgraph sg_1v1_var_my_tee ["var my_tee"] - 1v1 - 2v1 - end -end -subgraph sg_2v1 ["sg_2v1 stratum 0"] - 3v1[\"(3v1) map(| x : & str | x.to_uppercase())"/]:::pullClass - 4v1[\"(4v1) map(| x : & str | x.to_lowercase())"/]:::pullClass - 5v1[\"(5v1) union()"/]:::pullClass - 6v1[/"(6v1) for_each(| x | println! ("{}", x))"\]:::pushClass - 3v1--low road--->5v1 - 4v1--high road--->5v1 - 5v1--->6v1 - subgraph sg_2v1_var_my_union ["var my_union"] - 5v1 - 6v1 - end -end -2v1--0--->7v1 -2v1--1--->8v1 -7v1["(7v1) handoff"]:::otherClass -7v1--->3v1 -8v1["(8v1) handoff"]:::otherClass -8v1--->4v1 -``` + + Hydroflow compiled this flow into two subgraphs called _compiled components_, connected by _handoffs_. You can ignore these details unless you are interested in low-level performance tuning; they are explained in the discussion of [in-out trees](../architecture/in-out_trees.md). @@ -87,9 +53,7 @@ Closures inside surface syntax operators have access to a special `context` obje access to scheduling, timing, and state APIs. The object is accessible as a shared reference (`&Context`) via the special name `context`. [Here is the full API documentation for `Context`](https://hydro-project.github.io/hydroflow/doc/hydroflow/scheduled/context/struct.Context.html). +{getLines(example4Code, 5, 6)} -```rust,ignore -source_iter([()]) - -> for_each(|()| println!("Current tick: {}, stratum: {}", context.current_tick(), context.current_stratum())); -// Current tick: 0, stratum: 0 -``` +Output: +{extractOutput(example4Out, true)} diff --git a/docs/docs/hydroflow_plus/aggregations.mdx b/docs/docs/hydroflow_plus/aggregations.mdx index 2951c1d097f5..ae61ad9611ea 100644 --- a/docs/docs/hydroflow_plus/aggregations.mdx +++ b/docs/docs/hydroflow_plus/aggregations.mdx @@ -21,7 +21,7 @@ To specify this **window**, Hydroflow+ offers two operators, `tick_batch()` and For example, consider a pipelined aggregation across two processes. We can sum up elements on the first process in a batched manner using `tick_batch()`, then sum up the results on the second process in an unbounded manner using `all_ticks()`: ```rust -let root_stream = process.source_stream(q!(1..=10)); +let root_stream = flow.source_stream(&process, q!(1..=10)); root_stream .tick_batch() .fold(q!(|| 0), q!(|acc, x| *acc += x)) diff --git a/docs/docs/hydroflow_plus/clusters.mdx b/docs/docs/hydroflow_plus/clusters.mdx index c85e165a61a1..98033ddd541b 100644 --- a/docs/docs/hydroflow_plus/clusters.mdx +++ b/docs/docs/hydroflow_plus/clusters.mdx @@ -11,7 +11,7 @@ Clusters solve this by providing an nearly-identical API to processes, but repre Instantiating clusters is done using the `cluster` method on `FlowBuilder`, taking a `ClusterSpec`: ```rust pub fn my_flow<'a, D: Deploy<'a>>( - flow: &'a FlowBuilder<'a, D>, + flow: &FlowBuilder<'a, D>, cluster_spec: &impl ClusterSpec<'a, D> ) { let cluster = flow.cluster(cluster_spec); @@ -23,7 +23,7 @@ This API follows the same pattern as processes, where a cluster spec represents Instantiating streams on clusters uses the same APIs as streams: `source_iter` and `source_stream` are both available. But when using these APIs, the root streams will be instantiated on _all_ instances in the cluster. ```rust -let stream = cluster.source_iter(q!(vec![1, 2, 3])); +let stream = flow.source_iter(&cluster, q!(vec![1, 2, 3])); stream.for_each(q!(|x| println!("{}", x))) // will print 1, 2, 3 on **each** instance @@ -36,7 +36,7 @@ Elements in a cluster are identified by a **cluster ID** (a `u32`). To get the I This can then be passed into `source_iter` to load the IDs into the graph. ```rust -let stream = process.source_iter(cluster.ids()).cloned(); +let stream = flow.source_iter(&process, flow.cluster_members(&cluster)).cloned(); ``` ### One-to-Many @@ -44,8 +44,8 @@ When sending data from a process to a cluster, the source must be a stream of tu This is useful for partitioning data across instances. For example, we can partition a stream of elements in a round-robin fashion by using `enumerate` to add a sequence number to each element, then using `send_bincode` to send each element to the instance with the matching sequence number: ```rust -let cluster_ids = cluster.ids(); -let stream = process.source_iter(q!(vec![123, 456, 789])) +let cluster_ids = flow.cluster_members(&cluster); +let stream = flow.source_iter(&process, q!(vec![123, 456, 789])) .enumerate() .map(q!(|(i, x)| ( i % cluster_ids.len() as u32, @@ -57,7 +57,7 @@ let stream = process.source_iter(q!(vec![123, 456, 789])) To broadcast data to all instances in a cluster, use `broadcast_{bincode,bytes}`, which acts as a shortcut for the cross product. ```rust -let stream = process.source_iter(q!(vec![123, 456, 789])) +let stream = flow.source_iter(&process, q!(vec![123, 456, 789])) .broadcast_bincode(cluster); ``` @@ -66,14 +66,14 @@ In the other direction, sending data from a cluster to a process, we have a stre This is useful for aggregating data from multiple instances into a single stream. For example, we can use `send_bincode` to send data from all instances to a single process, and then print them all out: ```rust -let stream = cluster.source_iter(q!(vec![123, 456, 789])) +let stream = flow.source_iter(&cluster, q!(vec![123, 456, 789])) .send_bincode(process) .for_each(q!(|(id, x)| println!("{}: {}", id, x))); ``` If you don't care which instance sent the data, you can use `send_{bincode,bytes}_interleaved`, where the recipient receives a stream of `T` elements, but the elements received from different instances will be interleaved. ```rust -let stream = cluster.source_iter(q!(vec![123, 456, 789])) +let stream = flow.source_iter(&cluster, q!(vec![123, 456, 789])) .send_bincode_interleaved(process) .for_each(q!(|x| println!("{}", x))); ``` @@ -83,7 +83,7 @@ Finally, when sending data from one cluster to another (or to itself as in distr We can use the same shortcuts as before. For example, we can use `broadcast_bincode_interleaved` to send data from all instances in a cluster to all instances in another cluster, and then print them all out: ```rust -let stream = cluster1.source_iter(q!(vec![123, 456, 789])) +let stream = flow.source_iter(&cluster1, q!(vec![123, 456, 789])) .broadcast_bincode_interleaved(cluster2) .for_each(q!(|x| println!("{}", x))); ``` diff --git a/docs/docs/hydroflow_plus/cycles.mdx b/docs/docs/hydroflow_plus/cycles.mdx index fd393c0db093..2682cc01dd8d 100644 --- a/docs/docs/hydroflow_plus/cycles.mdx +++ b/docs/docs/hydroflow_plus/cycles.mdx @@ -7,10 +7,10 @@ Hydroflow+ supports cyclic graphs, which are useful for iterative computations o Because streams are represented as values when constructing a Hydroflow+ graph, we can't directly create cycles since that would require a forward reference. Instead, Hydroflow+ offers an API to create a cycle by using a _placeholder_ stream, which is a stream that can be used as a placeholder for a stream that will be created later. -We can create a cycle by using the `cycle` method on a process or cluster. This returns a tuple of two values: a `HfCycle` value that can be used to complete the cycle later and the placeholder stream. +We can create a cycle by using the `cycle` method on flow with a process or cluster. This returns a tuple of two values: a `HfCycle` value that can be used to complete the cycle later and the placeholder stream. ```rust -let (complete_cycle, cycle_placeholder) = process.cycle(); +let (complete_cycle, cycle_placeholder) = flow.cycle(&process); ``` For example, consider the classic graph reachability problem, which computes the nodes reachable from a given set of roots in a directed graph. This can be modeled as an iterative fixpoint computation where we start with the roots, then repeatedly add the children of each node to the set of reachable nodes until we reach a fixpoint. @@ -18,10 +18,10 @@ For example, consider the classic graph reachability problem, which computes the In Hydroflow+, we can implement this using cycles: ```rust -let roots = process.source_stream(roots); -let edges = process.source_stream(edges); +let roots = flow.source_stream(&process, roots); +let edges = flow.source_stream(&process, edges); -let (complete_reached_nodes, reached_nodes) = process.cycle(); +let (complete_reached_nodes, reached_nodes) = flow.cycle(&process); let reach_iteration = roots .union(&reached_nodes) diff --git a/docs/docs/hydroflow_plus/index.mdx b/docs/docs/hydroflow_plus/index.mdx index dde0e1896cac..fe0edfd2bef4 100644 --- a/docs/docs/hydroflow_plus/index.mdx +++ b/docs/docs/hydroflow_plus/index.mdx @@ -14,12 +14,15 @@ Hydroflow+ requires a particular workspace setup, as any crate that uses Hydrofl #shell-command-next-line cargo install cargo-generate #shell-command-next-line -cargo generate hydro-project/hydroflow-plus-template +cargo generate hydro-project/hydroflow template/hydroflow_plus ``` -Then, you can test the example dataflow: - +`cd` into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: ```bash #shell-command-next-line +cd +#shell-command-next-line +rustup update +#shell-command-next-line cargo test ``` diff --git a/docs/docs/hydroflow_plus/process_streams.mdx b/docs/docs/hydroflow_plus/process_streams.mdx index ffb8082171da..e99c6b49971e 100644 --- a/docs/docs/hydroflow_plus/process_streams.mdx +++ b/docs/docs/hydroflow_plus/process_streams.mdx @@ -16,7 +16,7 @@ To create a process, we must take a `ProcessSpec` as an argument to our function ```rust pub fn my_flow<'a, D: Deploy<'a>>( - flow: &'a FlowBuilder<'a, D>, + flow: &FlowBuilder<'a, D>, process_spec: &impl ProcessSpec<'a, D> ) { ... @@ -41,7 +41,7 @@ Root streams are created using methods available on an an instantiated process. To create a stream from a Rust iterator, use `source_iter`. This is useful for loading static data into the graph. Each element of the iterator will be emitted _exactly once_ in the _first tick_ of execution (see [Aggregations and Ticks](./aggregations.mdx)). ```rust -let stream = process.source_iter(q!(vec![1, 2, 3])); +let stream = flow.source_iter(&process, q!(vec![1, 2, 3])); ``` #### `source_stream` @@ -52,7 +52,7 @@ pub fn my_flow<'a, D: Deploy<'a>>( ..., my_stream: RuntimeData> ) { - let stream = process.source_stream(my_stream); + let stream = flow.source_stream(&process, my_stream); ... } ``` @@ -66,13 +66,13 @@ If sending a type that supports serialization using `serde`, use `send_bincode`, let process0 = flow.process(process_spec); let process1 = flow.process(process_spec); -let stream0 = process0.source_iter(...); +let stream0 = flow.source_iter(&process0, ...); let stream1 = stream0.send_bincode(process1); ``` To use custom serializers, you can use the `send_bytes` method to send a stream of `Bytes` values. ```rust -let stream0 = process0.source_iter(...); +let stream0 = flow.source_iter(&process0, ...); let stream1 = stream0.send_bytes(process1); ``` diff --git a/docs/docs/hydroflow_plus/quickstart/clusters.mdx b/docs/docs/hydroflow_plus/quickstart/clusters.mdx index d3e922ab4019..454c636a2883 100644 --- a/docs/docs/hydroflow_plus/quickstart/clusters.mdx +++ b/docs/docs/hydroflow_plus/quickstart/clusters.mdx @@ -12,26 +12,31 @@ We start with the standard architecture, with a flow graph and a runtime entrypo :::tip -If you have been following along with the Hydroflow+ template, you'll now need to declare a new module for this example. Create a new file at `flow/src/broadcast.rs` and add the following to `flow/src/lib.rs`: +If you have been following along with the Hydroflow+ template, you'll now need to declare a new module for this example. Create a new file at `src/broadcast.rs` and add the following to `src/lib.rs`: -```rust title="flow/src/lib.rs" +```rust title="src/lib.rs" pub mod broadcast; ``` ::: -```rust title="flow/src/broadcast.rs" +```rust title="src/broadcast.rs" use hydroflow_plus::*; use stageleft::*; -pub fn broadcast<'a, D: Deploy<'a>>( - flow: &'a FlowBuilder<'a, D>, - process_spec: &impl ProcessSpec<'a, D>, - cluster_spec: &impl ClusterSpec<'a, D> -) { - let leader = flow.process(process_spec); - let workers = flow.cluster(cluster_spec); +pub struct Leader {} +pub struct Workers {} + +pub fn broadcast( + flow: &FlowBuilder, +) -> (Process, Cluster) { + let leader = flow.process(); + let workers = flow.cluster(); + + // ... + + (leader, workers) } ``` @@ -39,7 +44,7 @@ pub fn broadcast<'a, D: Deploy<'a>>( When sending data between individual processes, we used the `send_bincode` operator. When sending data from a process to a cluster, we can use the `broadcast_bincode` operator instead. ```rust -let data = leader.source_iter(q!(0..10)); +let data = flow.source_iter(&leader, q!(0..10)); data .broadcast_bincode(&workers) .for_each(q!(|n| println!("{}", n))); @@ -48,83 +53,30 @@ data The `Stream` returned by `broadcast_bincode` represents the data received on _each_ process in the cluster. Because all processes in a cluster run the exact same computation, we can then use the `for_each` operator directly on that stream to print the data on each process. ## Deploying Graphs with Clusters -To deploy this application, we must set up the runtime entrypoint and the Hydro Deploy configuration. The entrypoint looks similar to before, but now uses the CLI data to instantiate the cluster as well. - -```rust title="flow/src/broadcast.rs" -use hydroflow_plus::util::cli::HydroCLI; -use hydroflow_plus_cli_integration::{CLIRuntime, HydroflowPlusMeta}; - -#[stageleft::entry] -pub fn broadcast_runtime<'a>( - flow: &'a FlowBuilder<'a, CLIRuntime>, - cli: RuntimeData<&'a HydroCLI>, -) -> impl Quoted<'a, Hydroflow<'a>> { - broadcast(flow, &cli, &cli); - flow.build(q!(cli.meta.subgraph_id)) -} -``` - -Our binary (`src/bin/broadcast.rs`) looks similar to before: +To deploy this application, we must set up the Hydro Deploy configuration as before. Our deployment script (`examples/broadcast.rs`) instantiates multiple services for the leader process and the workers. Since this script defines the physical deployment, we explicitly instantiate multiple services for the cluster spec, returning a `Vec` of services. We also set a display name for each service so that we can tell them apart in the logs. -```rust title="flow/src/bin/broadcast.rs" -#[tokio::main] -async fn main() { - let ports = hydroflow_plus::util::cli::init().await; - - hydroflow_plus::util::cli::launch_flow( - flow::broadcast::broadcast_runtime!(&ports) - ).await; -} -``` - -Finally, our deployment script (`examples/broadcast.rs`) instantiates multiple services for the leader process and the workers. Because we are sharing the deployment across multiple builders, we wrap it in a `RefCell`. Since this script defines the physical deployment, we explicitly instantiate multiple services for the cluster spec, returning a `Vec` of services. We also set a display name for each service so that we can tell them apart in the logs. - -```rust title="flow/examples/broadcast.rs" +```rust title="examples/broadcast.rs" use std::cell::RefCell; use hydro_deploy::{Deployment, HydroflowCrate}; -use hydroflow_plus_cli_integration::{DeployProcessSpec, DeployClusterSpec}; +use hydroflow_plus_deploy::TrybuildHost; #[tokio::main] async fn main() { - let deployment = RefCell::new(Deployment::new()); - let localhost = deployment.borrow_mut().Localhost(); - let profile = "dev"; + let mut deployment = Deployment::new(); let builder = hydroflow_plus::FlowBuilder::new(); - flow::broadcast::broadcast( - &builder, - &DeployProcessSpec::new(|| { - let mut deployment = deployment.borrow_mut(); - deployment.add_service( - HydroflowCrate::new(".", localhost.clone()) - .bin("broadcast") - .profile(profile) - .display_name("leader"), - ) - }), - &DeployClusterSpec::new(|| { - let mut deployment = deployment.borrow_mut(); - (0..2) - .map(|idx| { - deployment.add_service( - HydroflowCrate::new(".", localhost.clone()) - .bin("broadcast") - .profile(profile) - .display_name(format!("worker/{}", idx)), - ) - }) - .collect() - }), - ); - - let mut deployment = deployment.into_inner(); - - deployment.deploy().await.unwrap(); - - deployment.start().await.unwrap(); - - tokio::signal::ctrl_c().await.unwrap() + let (leader, workers) = flow::broadcast::broadcast(&builder); + + flow.with_default_optimize() + .with_process(&leader, TrybuildHost::new(deployment.Localhost())) + .with_cluster(&workers, (0..2) + .map(|idx| TrybuildHost::new(deployment.Localhost())) + .collect::>() + ) + .deploy(&mut deployment); + + deployment.run_ctrl_c().await.unwrap(); } ``` @@ -132,7 +84,7 @@ If we run this script, we should see the following output: ```bash #shell-command-next-line -cargo run -p flow --example broadcast +cargo run --example broadcast [worker/0] 0 [worker/1] 0 [worker/0] 1 diff --git a/docs/docs/hydroflow_plus/quickstart/distributed.mdx b/docs/docs/hydroflow_plus/quickstart/distributed.mdx index a7f4af0b430f..a8ae364060bd 100644 --- a/docs/docs/hydroflow_plus/quickstart/distributed.mdx +++ b/docs/docs/hydroflow_plus/quickstart/distributed.mdx @@ -1,115 +1,27 @@ --- sidebar_position: 2 --- +import CodeBlock from '@theme/CodeBlock'; +import firstTenDistSrc from '!!raw-loader!../../../../template/hydroflow_plus/src/first_ten_distributed.rs'; +import firstTenDistExample from '!!raw-loader!../../../../template/hydroflow_plus/examples/first_ten_distributed.rs'; +import { getLines, extractOutput } from '../../../src/util'; # Adding Distribution -Continuing from our previous example, we will now look at how to extend our program to run on multiple processes. Recall that our previous flow graph looked like this: +Continuing from our previous example, we will now look at how to deploy our program to run on multiple processes. -```rust title="flow/src/first_ten.rs" -use hydroflow_plus::*; -use stageleft::*; +We achieve this by using [Hydro Deploy](../../deploy/index.md). Hydroflow+ integrates with Hydro Deploy to automatically construct the topology based on the flow graph. We can create a new file `examples/first_ten_distributed.rs` with the following contents: -pub fn first_ten<'a, D: LocalDeploy<'a>>( - flow: &'a FlowBuilder<'a, D>, - process_spec: &impl ProcessSpec<'a, D> -) { - let process = flow.process(process_spec); - let numbers = process.source_iter(q!(0..10)); - numbers.for_each(q!(|n| println!("{}", n))); -} -``` - -## The Flow Graph -Let's extend this example to print the numbers on a separate process. First, we need to specify that our flow graph will involve the network. We do this by replacing the `LocalDeploy<'a>` trait bound with the general `Deploy<'a>`. Then, we can use the `process_spec` to create a second process: -```rust title="flow/src/first_ten_distributed.rs" -use hydroflow_plus::*; -use stageleft::*; - -pub fn first_ten_distributed<'a, D: Deploy<'a>>( - flow: &'a FlowBuilder<'a, D>, - process_spec: &impl ProcessSpec<'a, D> -) { - let process = flow.process(process_spec); - let second_process = flow.process(process_spec); -} -``` - -Now, we can distribute our dataflow by using the `send_bincode` operator to mark where the data should be sent using bincode serialization. - -```rust -let numbers = process.source_iter(q!(0..10)); -numbers - .send_bincode(&second_process) - .for_each(q!(|n| println!("{}", n))); -``` - -## The Runtime -Now that our graph spans multiple processes, our runtime entrypoint will involve multiple subgraphs. This means we can't get away with `build_single`. Instead, we must take the subgraph ID as a runtime parameter (`subgraph_id`) to select the appropriate graph. In addition, our dataflow involves the network, so we take a `HydroCLI` runtime parameter (`cli`) so that processes can look up their network connections and instantiate the flow graph with access to it. - -```rust title="flow/src/first_ten_distributed.rs" -use hydroflow_plus::util::cli::HydroCLI; -use hydroflow_plus_cli_integration::{CLIRuntime, HydroflowPlusMeta}; +{firstTenDistExample} -#[stageleft::entry] -pub fn first_ten_distributed_runtime<'a>( - flow: &'a FlowBuilder<'a, CLIRuntime>, - cli: RuntimeData<&'a HydroCLI>, -) -> impl Quoted<'a, Hydroflow<'a>> { - first_ten_distributed(flow, &cli); - flow.build(q!(cli.meta.subgraph_id)) -} -``` - -The corresponding binary in `src/bin/first_ten_distributed.rs` then instantiates the CLI and reads the process ID from the command line arguments: - -```rust title="flow/src/bin/first_ten_distributed.rs" -#[tokio::main] -async fn main() { - hydroflow_plus::util::cli::launch( - |ports| flow::first_ten_distributed_runtime!(ports) - ).await; -} -``` - -## The Deployment -Finally, we need to deploy our dataflow with the appropriate network topology. We achieve this by using [Hydro Deploy](../../deploy/index.md). Hydroflow+ integrates with Hydro Deploy to automatically construct the topology based on the flow graph. We can create a new file `examples/first_ten_distributed.rs` with the following contents: - -```rust title="flow/examples/first_ten_distributed.rs" -use hydro_deploy::{Deployment, HydroflowCrate}; -use hydroflow_plus_cli_integration::DeployProcessSpec; - -#[tokio::main] -async fn main() { - let mut deployment = Deployment::new(); - let localhost = deployment.Localhost(); - - let builder = hydroflow_plus::FlowBuilder::new(); - flow::first_ten::first_ten_distributed( - &builder, - &DeployProcessSpec::new(|| { - deployment.add_service( - HydroflowCrate::new(".", localhost.clone()) - .bin("first_ten_distributed") - .profile("dev"), - ) - }), - ); - - deployment.deploy().await.unwrap(); - - deployment.start().await.unwrap(); - - tokio::signal::ctrl_c().await.unwrap() -} -``` - -Most importantly, we specify a `DeployProcessSpec`, which takes a closure that constructs a Hydro Deploy service for each process in the flow graph. In our case, we use the `HydroflowCrate` service type, which deploys a Hydroflow+ binary. We also specify the process ID as a command line argument, which is read by our runtime binary. +Most importantly, we specify a `DeployProcessSpec`, which constructs a Hydro Deploy service for each process in the flow graph. In our case, we use the `TrybuildHost` service type, which compiles and deploys a Hydroflow+ graph. We can then run our distributed dataflow with: +<>{/* TODO(mingwei): grab this output from a tested snapshot file */} + ```bash #shell-command-next-line -cargo run -p flow --example first_ten_distributed +cargo run --example first_ten_distributed [service/1] 0 [service/1] 1 [service/1] 2 diff --git a/docs/docs/hydroflow_plus/quickstart/index.mdx b/docs/docs/hydroflow_plus/quickstart/index.mdx index 361cbf7ad6f7..d366e2f61046 100644 --- a/docs/docs/hydroflow_plus/quickstart/index.mdx +++ b/docs/docs/hydroflow_plus/quickstart/index.mdx @@ -7,8 +7,16 @@ To get started with a new project, we'll use the Hydroflow+ template. The templa #shell-command-next-line cargo install cargo-generate #shell-command-next-line -cargo generate hydro-project/hydroflow-plus-template +cargo generate gh:hydro-project/hydroflow template/hydroflow_plus # Project Name: my-example-project #shell-command-next-line cd my-example-project ``` + +After `cd`ing into the generated folder, ensure the correct nightly version of rust is installed and test the generated project: +```bash +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test +``` \ No newline at end of file diff --git a/docs/docs/hydroflow_plus/quickstart/structure.mdx b/docs/docs/hydroflow_plus/quickstart/structure.mdx index 038bde1941df..54c13f1588a0 100644 --- a/docs/docs/hydroflow_plus/quickstart/structure.mdx +++ b/docs/docs/hydroflow_plus/quickstart/structure.mdx @@ -1,12 +1,14 @@ --- sidebar_position: 1 --- +import CodeBlock from '@theme/CodeBlock'; +import firstTenSrc from '!!raw-loader!../../../../template/hydroflow_plus/src/first_ten_distributed.rs'; +import { getLines, extractOutput } from '../../../src/util'; # Your First Dataflow -Hydroflow+ programs require special structure to support code generation and distributed deployments. There are three main components of a Hydroflow+ program: +Hydroflow+ programs require special structure to support code generation and distributed deployments. There are two main components of a Hydroflow+ program: - The **flow graph** describes the dataflow logic of the program. -- The **runtime** wraps the dataflow in an executable Rust binary. -- The **deployment** describes how to map the flow graph to instances of the runtime. This is only needed for distributed deployments. +- The **deployment** describes how to map the flow graph to a physical deployment. :::tip @@ -16,7 +18,17 @@ We recommend using the Hydroflow+ template to get started with a new project. Th #shell-command-next-line cargo install cargo-generate #shell-command-next-line -cargo generate hydro-project/hydroflow-plus-template +cargo generate gh:hydro-project/hydroflow template/hydroflow_plus +``` + +`cd` into the generated folder, ensure the correct nightly version of rust is installed, and test the generated project: +```bash +#shell-command-next-line +cd +#shell-command-next-line +rustup update +#shell-command-next-line +cargo test ``` ::: @@ -26,84 +38,21 @@ Let's look a minimal example of a Hydroflow+ program. We'll start with a simple ## The Flow Graph -```rust title="flow/src/first_ten.rs" -use hydroflow_plus::*; -use stageleft::*; - -pub fn first_ten<'a, D: LocalDeploy<'a>>( - flow: &'a FlowBuilder<'a, D>, - process_spec: &impl ProcessSpec<'a, D> -) {} -``` -To build a Hydroflow+ application, we need to define a dataflow that spans multiple processes. The `FlowBuilder` parameter captures the global dataflow, while the `process_spec` variable defines how to construct the processes where the dataflow will be executed. For now, we will only use the `ProcessSpec` once, to add a single process to our global dataflow. +{getLines(firstTenSrc, 1, 17)} -```rust -pub fn first_ten<'a, D: LocalDeploy<'a>>( - flow: &'a FlowBuilder<'a, D>, - process_spec: &impl ProcessSpec<'a, D> -) { - let process = flow.process(process_spec); -} -``` +To build a Hydroflow+ application, we need to define a dataflow that spans multiple processes. The `FlowBuilder` parameter captures the global dataflow, and we can instantiate processes to define boundaries between distributed logic. When defining a process, we also pass in a type parameter to a "tag" that identifies the process. When transforming streams, the Rust type system will guarantee that we are operating on streams on the same process. + +{getLines(firstTenSrc, 8, 9)} Now, we can build out the dataflow to run on this process. Every dataflow starts at a source that is bound to a specific process. First, we instantiate a stream that emits the first 10 natural numbers. -```rust -let numbers = process.source_iter(q!(0..10)); // : Stream<_, i32, _, _> -``` +{getLines(firstTenSrc, 11)} In Hydroflow+, whenever there are snippets of Rust code passed to operators (like `source_iter`, `map`, or `for_each`), we use the `q!` macro to mark them. For example, we may use Rust snippets to define static sources of data or closures that transform them. To print out these numbers, we can use the `for_each` operator (note that the body of `for_each` is a closure wrapped in `q!`): -```rust -numbers.for_each(q!(|n| println!("{}", n))); -``` - -## The Runtime -Next, we need to instantiate our dataflow into a runnable Rust binary. We do this by defining a [Stageleft entrypoint](../stageleft.mdx) for the graph, and then invoking the entrypoint inside a separate Rust binary. - -To define the entrypoint, we use the `#[stageleft::entry]` macro, which takes the graph being built and returns a generated Hydroflow program. We define this as a new function `first_ten_runtime`. In this first example, we assume there is a single process so that we do not need to specify how the graph is laid out across processes. We specify this by using the `SingleProcessGraph` type parameter on `FlowBuilder`. - -Having done that, we can use some simple defaults for "distributing" this single-process deployment. First, we use `()` as an argument to `first_ten` to choose the default process spec. Then we use the `build_single` method to generate the Hydroflow program. - - -```rust title="flow/src/first_ten.rs" -#[stageleft::entry] -pub fn first_ten_runtime<'a>( - flow: &'a FlowBuilder<'a, SingleProcessGraph> -) -> impl Quoted<'a, Hydroflow<'a>> { - first_ten(flow, &() /* for a single process graph */); - flow.build_single() // : impl Quoted<'a, Hydroflow<'a>> -} -``` - -Finally, it's time to write our `main` function. -Stageleft entries are usable as macros from other programs. In our case, we will instantiate our entrypoint from the Rust binary for our dataflow. We can create a new file `src/bin/first_ten.rs` with the following contents. Note that Hydroflow+ requires that we use `tokio` and its `async` function specification: - -```rust title="flow/src/bin/first_ten.rs" -#[tokio::main] -async fn main() { - flow::first_ten::first_ten_runtime!().run_async().await; -} -``` - -We can now run this binary to see the output of our dataflow: - -```bash -#shell-command-next-line -cargo run -p flow --bin first_ten -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -``` +{getLines(firstTenSrc, 12, 14)} -In the next section, we will look at how to extend this program to run on multiple processs. +In the next section, we will look at how to deploy this program to run on multiple processs. diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 2af9169ec9b4..6ce7eea1c1be 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -229,6 +229,22 @@ const config = { indexName: 'hydro' }, }), + scripts: [ + { + id: "runllm-widget-script", + type: "module", + src: "https://widget.runllm.com", + "runllm-server-address": "https://api.runllm.com", + "runllm-assistant-id": "136", + "runllm-position": "BOTTOM_RIGHT", + "runllm-keyboard-shortcut": "Mod+j", + "runllm-preset": "docusaurus", + "runllm-slack-community-url": "", + "runllm-name": "Hydro", + "runllm-theme-color": "#005EEC", + async: true, + }, + ], }; module.exports = config; diff --git a/docs/src/pages/img/papers/auto-compartmentalization-src.png b/docs/src/pages/img/papers/auto-compartmentalization-src.png index 125efc23ffe7..8785f09babac 100644 Binary files a/docs/src/pages/img/papers/auto-compartmentalization-src.png and b/docs/src/pages/img/papers/auto-compartmentalization-src.png differ diff --git a/docs/src/pages/img/papers/conor-papoc-2024.png b/docs/src/pages/img/papers/conor-papoc-2024.png new file mode 100644 index 000000000000..f0c3ce84da1e Binary files /dev/null and b/docs/src/pages/img/papers/conor-papoc-2024.png differ diff --git a/docs/src/pages/img/papers/david-papoc-2024.png b/docs/src/pages/img/papers/david-papoc-2024.png new file mode 100644 index 000000000000..c73169aa34e8 Binary files /dev/null and b/docs/src/pages/img/papers/david-papoc-2024.png differ diff --git a/docs/src/pages/img/papers/david-sigmod-2024.png b/docs/src/pages/img/papers/david-sigmod-2024.png new file mode 100644 index 000000000000..f6751f75f505 Binary files /dev/null and b/docs/src/pages/img/papers/david-sigmod-2024.png differ diff --git a/docs/src/pages/img/papers/hydroflow-thesis.png b/docs/src/pages/img/papers/hydroflow-thesis.png index 0355feb0083b..40515491a2c3 100644 Binary files a/docs/src/pages/img/papers/hydroflow-thesis.png and b/docs/src/pages/img/papers/hydroflow-thesis.png differ diff --git a/docs/src/pages/img/papers/joe-applied-2023.png b/docs/src/pages/img/papers/joe-applied-2023.png new file mode 100644 index 000000000000..8c2d958ec16b Binary files /dev/null and b/docs/src/pages/img/papers/joe-applied-2023.png differ diff --git a/docs/src/pages/img/papers/katara.png b/docs/src/pages/img/papers/katara.png index 88ffd89316fd..eea111ce35ee 100644 Binary files a/docs/src/pages/img/papers/katara.png and b/docs/src/pages/img/papers/katara.png differ diff --git a/docs/src/pages/img/papers/keep-calm-and-crdt-on.png b/docs/src/pages/img/papers/keep-calm-and-crdt-on.png index 9c6f48d1e3ea..714e5c493656 100644 Binary files a/docs/src/pages/img/papers/keep-calm-and-crdt-on.png and b/docs/src/pages/img/papers/keep-calm-and-crdt-on.png differ diff --git a/docs/src/pages/img/papers/new-directions.png b/docs/src/pages/img/papers/new-directions.png index 298c43004439..a6d419e779d1 100644 Binary files a/docs/src/pages/img/papers/new-directions.png and b/docs/src/pages/img/papers/new-directions.png differ diff --git a/docs/src/pages/img/papers/tiemo-cidr-2024.png b/docs/src/pages/img/papers/tiemo-cidr-2024.png new file mode 100644 index 000000000000..0c0a9378f96d Binary files /dev/null and b/docs/src/pages/img/papers/tiemo-cidr-2024.png differ diff --git a/docs/src/pages/img/papers/tiemo-sigmod-2024.png b/docs/src/pages/img/papers/tiemo-sigmod-2024.png new file mode 100644 index 000000000000..bbddb6449653 Binary files /dev/null and b/docs/src/pages/img/papers/tiemo-sigmod-2024.png differ diff --git a/docs/src/pages/playground.js b/docs/src/pages/playground.js index b736d417a585..e1f706d3f5a1 100644 --- a/docs/src/pages/playground.js +++ b/docs/src/pages/playground.js @@ -66,7 +66,7 @@ source_iter(0..10) -> for_each(|n| println!("Howdy {}", n));`, "Chat Server": `\ -// https://hydro.run/docs/hydroflow/quickstart/example_8_chat_server +// https://github.com/hydro-project/hydroflow/blob/main/template/hydroflow/src/server.rs // Define shared inbound and outbound channels outbound_chan = union() -> dest_sink_serde(outbound); inbound_chan = source_stream_serde(inbound) @@ -87,7 +87,7 @@ inbound_chan[msgs] -> [0]broadcast; clients[1] -> [1]broadcast;`, "Chat Client": `\ -// https://hydro.run/docs/hydroflow/quickstart/example_8_chat_server +// https://github.com/hydro-project/hydroflow/blob/main/template/hydroflow/src/client.rs // set up channels outbound_chan = union() -> dest_sink_serde(outbound); inbound_chan = source_stream_serde(inbound) -> map(|(m, _)| m) @@ -200,6 +200,7 @@ export function EditorDemo({ compileFn, examples, mermaidId }) { noVarnames: false, noPullPush: false, noHandoffs: false, + noReferences: false, opShortText: false, }); const writeGraphConfigOnChange = (name) => { diff --git a/docs/src/pages/research.js b/docs/src/pages/research.js index ce6ab746c039..6c01039f536b 100644 --- a/docs/src/pages/research.js +++ b/docs/src/pages/research.js @@ -6,6 +6,61 @@ import Image from '@theme/IdealImage'; import styles from './research.module.css'; const papers = [ + { + title: "Optimizing Distributed Protocols with Query Rewrites", + pdf: "pathname:///papers/david-sigmod-2024.pdf", + thumb: require("./img/papers/david-sigmod-2024.png"), + authors: <>David Chu, Rithvik Panchapakesan, Shadaj Laddad, Lucky Katahanas, Chris Liu, Kaushik Shivakumar, Natacha Crooks, Joseph M. Hellerstein, & Heidi Howard, + description: [ + <>Distributed protocols such as 2PC and Paxos lie at the core of many systems in the cloud, but standard implementations do not scale. New scalable distributed protocols are developed through careful analysis and rewrites, but this process is ad hoc and error-prone. This paper presents an approach for scaling any distributed protocol by applying rule-driven rewrites, borrowing from query optimization. Distributed protocol rewrites entail a new burden: reasoning about spatiotemporal correctness. We leverage order-insensitivity and data dependency analysis to systematically identify correct coordination-free scaling opportunities. We apply this analysis to create preconditions and mechanisms for coordination-free decoupling and partitioning, two fundamental vertical and horizontal scaling techniques. Manual rule-driven applications of decoupling and partitioning improve the throughput of 2PC by 5x and Paxos by 3x, and match state-of-the-art throughput in recent work. These results point the way toward automated optimizers for distributed protocols based on correct-by-construction rewrite rules. + ], + conf: "SIGMOD 2024", + links: <>PDF / Tech Report / GitHub + }, + { + title: "SkyPIE: A Fast & Accurate Oracle for Object Placement", + pdf: "pathname:///papers/tiemo-sigmod-2024.pdf", + thumb: require("./img/papers/tiemo-sigmod-2024.png"), + authors: <>Tiemo Bang, Chris Douglas, Natacha Crooks and Joseph M. Hellerstein, + description: [ + <>Cloud object stores offer vastly different price points for object storage as a function of workload and geography. Poor object placement can thus lead to significant cost overheads. Prior cost-saving techniques attempt to optimize placement policies on the fly, deciding object placements for each object individually. In practice, these techniques do not scale to the size of the modern cloud. In this work, we leverage the static nature and pay-per-use pricing model of cloud environments to explore a different approach. Rather than computing object placements on the fly, we precompute a SkyPIE oracle---a lookup structure representing all possible placement policies and the workloads for which they are optimal. Internally, SkyPIE represents placement policies as a matrix of cost-hyperplanes, which we effectively precompute through pruning and convex optimization. By leveraging a fast geometric algorithm, online queries then are 1 to 8 orders of magnitude faster but as accurate as Integer-Linear-Programming. This makes exact optimization tractable for real workloads and we show >10x cost savings compared to state-of-the-art heuristic approaches. + ], + conf: "SIGMOD 2024", + links: <>PDF / GitHub + }, + { + title: "Bigger, not Badder: Safely Scaling BFT Protocols", + pdf: "pathname:///papers/david-papoc-2024.pdf", + thumb: require("./img/papers/david-papoc-2024.png"), + authors: <>David Chu, Chris Liu, Natacha Crooks, Joseph M. Hellerstein, & Heidi Howard, + description: [ + <>Byzantine Fault Tolerant (BFT) protocols provide powerful guarantees in the presence of arbitrary machine failures, yet they do not scale. The process of creating new, scalable BFT protocols requires expert analysis and is often error-prone. Recent work suggests that localized, rule-driven rewrites can be mechanically applied to scale existing (non-BFT) protocols, including Paxos. We modify these rewrites--- decoupling and partitioning---so they can be safely applied to BFT protocols, and apply these rewrites to the critical path of PBFT, improving its throughput by 5x. We prove the correctness of the modified rewrites on any BFT protocol by formally modeling the arbitrary logic of a Byzantine node. We define the Borgesian simulator, a theoretical node that simulates a Byzantine node through randomness, and show that in any BFT protocol, the messages that a Borgesian simulator can generate before and after optimization is the same. Our initial results point the way towards an automatic optimizer for BFT protocols. + ], + conf: "PaPoC 2024", + links: <>PDF / GitHub + }, + { + title: "Wrapping Rings in Lattices: An Algebraic Symbiosis of Incremental View Maintenance and Eventual Consistency", + pdf: "pathname:///papers/conor-papoc-2024.pdf", + thumb: require("./img/papers/conor-papoc-2024.png"), + authors: <>Conor Power, Saikrishna Achalla, Ryan Cottone, Nathaniel Macasaet & Joseph M. Hellerstein, + description: [ + <>We reconcile the use of semi-lattices in CRDTs and the use of groups and rings in incremental view maintenance to construct systems with strong eventual consistency, incremental computation, and database query optimization. + ], + conf: "PaPoC 2024", + links: <>PDF + }, + { + title: "Optimizing the cloud? Don't train models. Build oracles!", + pdf: "pathname:///papers/tiemo-cidr-2024.pdf", + thumb: require("./img/papers/tiemo-cidr-2024.png"), + authors: <>Tiemo Bang, Conor Power, Siavash Ameli, Natacha Crooks & Joseph M. Hellerstein, + description: [ + <>We propose cloud oracles as an alternative to machine learning for online optimization of cloud configurations. Our cloud oracle approach guarantees complete accuracy and explainability of decisions for problems that can be formulated as parametric convex optimizations. We give experimental evidence of this technique’s efficacy and share a vision of research directions for expanding its applicability. + ], + conf: "CIDR 2024", + links: <>PDF + }, { title: "Keep CALM and CRDT On", pdf: "pathname:///papers/keep-calm-and-crdt-on.pdf", @@ -18,6 +73,17 @@ const papers = [ conf: "VLDB 2023", links: <>PDF / arXiv }, + { + title: "Invited Paper: Initial Steps Toward a Compiler for Distributed Programs", + pdf: "pathname:///papers/joe-applied-2023.pdf", + thumb: require("./img/papers/joe-applied-2023.png"), + authors: <>Joseph M. Hellerstein, Shadaj Laddad, Mae Milano, Conor Power & Mingwei Samuel, + description: [ + <>In the Hydro project we are designing a compiler toolkit that can optimize for the concerns of distributed systems, including scale-up and scale-down, availability, and consistency of outcomes across replicas. This invited paper overviews the project, and provides an early walk-through of the kind of optimization that is possible. We illustrate how type transformations as well as local program transformations can combine, step by step, to convert a single- node program into a variety of distributed design points that offer the same semantics with different performance and deployment characteristics. + ], + conf: "ApPLIED 2023", + links: <>PDF / arXiv + }, { title: "Katara: Synthesizing CRDTs with Verified Lifting", pdf: "pathname:///papers/katara.pdf", @@ -74,8 +140,8 @@ const linkIcon = ( top: "50%", transform: "translate(-50%, -50%)", }}> - - + + ); diff --git a/docs/src/util.ts b/docs/src/util.ts index d8be71bc6627..4cd8eec79eb9 100644 --- a/docs/src/util.ts +++ b/docs/src/util.ts @@ -1,24 +1,52 @@ +/// Grabs the specified lines `[lineStart, lineEnd]` from the string. +/// +/// Lines are one-indexed (start with `1`). Both `lineStart` and `lineEnd` are inclusive. export function getLines(str: string, lineStart: number, lineEnd?: number): string { - return str.split('\n').slice(lineStart - 1, lineEnd || lineStart).join('\n'); + let lines = str.split('\n').slice(lineStart - 1, lineEnd || lineStart); + const leadingWhitespace = Math.min(...lines.filter(line => 0 !== line.length).map(line => line.search(/\S/)).map(Number)); + if (0 < leadingWhitespace) { + lines = lines.map(line => line.slice(leadingWhitespace)); + } + return lines.join('\n'); } -export function extractOutput(output: string): string { +/// Extract the output from the stdout snapshots created by `surface_examples.rs`. +/// +/// This hides the graph output. Use `extractMermaid` to extract the graph output. +/// +/// If `short` is false (default), will include code to show the `cargo run` console call. +/// If `short` is true, returns only the stdout output. +export function extractOutput(output: string, short = false): string { const outputLines = output.replace(/\n$/, '').split('\n'); + // Delete the first four lines, which are the snapshot front matter. outputLines.splice(0, 4); + // Mermaid graph starts with double-percent signs. if (outputLines[0].startsWith('%%')) { + // Continues until double newline (a blank line). const count = outputLines.findIndex(line => 0 === line.length); + // Hide mermaid output. outputLines.splice(0, count + 1, ''); } const stdOut = outputLines.join('\n'); + if (short) { + return stdOut; + } return `#shell-command-next-line cargo run ${stdOut}`; } +/// Extract the mermaid graph logged to stdout from the snapshots created by `surface_examples.rs`. export function extractMermaid(output: string): string { const outputLines = output.split('\n'); + // Delete the first four lines, which are the snapshot front matter. outputLines.splice(0, 4); + // Mermaid graph starts with double-percent signs. + if (!outputLines[0].startsWith('%%')) { + console.error('Snapshot output may be missing mermaid graph.'); + } + // Continues until double newline (a blank line). const count = outputLines.findIndex(line => 0 === line.length); outputLines.length = count; return outputLines.join('\n'); diff --git a/docs/static/papers/conor-papoc-2024.pdf b/docs/static/papers/conor-papoc-2024.pdf new file mode 100644 index 000000000000..1c0f73c53e41 Binary files /dev/null and b/docs/static/papers/conor-papoc-2024.pdf differ diff --git a/docs/static/papers/david-papoc-2024.pdf b/docs/static/papers/david-papoc-2024.pdf new file mode 100644 index 000000000000..f07569ab468c Binary files /dev/null and b/docs/static/papers/david-papoc-2024.pdf differ diff --git a/docs/static/papers/david-sigmod-2024.pdf b/docs/static/papers/david-sigmod-2024.pdf new file mode 100644 index 000000000000..8e647c27f94c Binary files /dev/null and b/docs/static/papers/david-sigmod-2024.pdf differ diff --git a/docs/static/papers/joe-applied-2023.pdf b/docs/static/papers/joe-applied-2023.pdf new file mode 100644 index 000000000000..7392a035bb23 Binary files /dev/null and b/docs/static/papers/joe-applied-2023.pdf differ diff --git a/docs/static/papers/tiemo-cidr-2024.pdf b/docs/static/papers/tiemo-cidr-2024.pdf new file mode 100644 index 000000000000..bcebe0dc2836 Binary files /dev/null and b/docs/static/papers/tiemo-cidr-2024.pdf differ diff --git a/docs/static/papers/tiemo-sigmod-2024.pdf b/docs/static/papers/tiemo-sigmod-2024.pdf new file mode 100644 index 000000000000..a4c52b3b7e70 Binary files /dev/null and b/docs/static/papers/tiemo-sigmod-2024.pdf differ diff --git a/hydro_deploy/core/CHANGELOG.md b/hydro_deploy/core/CHANGELOG.md index dd00beeb4015..26f8b6922b0e 100644 --- a/hydro_deploy/core/CHANGELOG.md +++ b/hydro_deploy/core/CHANGELOG.md @@ -5,8 +5,298 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## v0.9.0 (2024-08-30) + +### Chore + + - manually set versions for crates renamed in #1413 + - lower min dependency versions where possible, update `Cargo.lock` + Moved from #1418 + + --------- + +### Documentation + + - cleanup doc comments for clippy latest + +### New Features + + - Add end-to-end flamegraph generation for macos and linux localhost, fix #1351 + - improve progress UX by collapsing nested groups + Now, when a group only has a single active task, we skip printing a line + for the group itself and instead collapse its information into the line + for the inner task (recursively as necessary). This allows us to show + more fine grained progress without overflowing the console. + - use trybuild to compile subgraph binaries + +### Bug Fixes + + - only record usermode events in perf + When kernel stacks are included, the DWARF traces can become corrupted / + overflown leading to flamegraphs with broken parents. We only are + interested in usermode, anyways, and can measure I/O overhead through + other methods. + - only instantiate `Localhost` once + - avoid Terraform crashing on empty provider block + +### Refactor + + - adjust `ProgressTracker::println` + A small refactor pulled out of the perf tracing work, barely related to + #1359 + - cleanup handling of arc `Weak` in `deployment.rs` + +### Style + + - use `name_of!` macro + +### New Features (BREAKING) + + - Perf works over SSH + See documentation on how to use in + [Notion](https://www.notion.so/hydro-project/perf-Measuring-CPU-usage-6135b6ce56a94af38eeeba0a55deef9c). + +### Refactor (BREAKING) + + - rename integration crates to drop CLI references + - simplify process/cluster specs + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1394). + * #1395 + * __->__ #1394 + - end-to-end flamegraph generation, fix #1365 + Depends on #1370 + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading + * `perf` profile downloading moved from the `drop()` impl to `async fn + stop()` + * download perf data via stdout + * update async-ssh2-lite to 0.5 to cleanup tokio compat issues + + WIP for #1365 + - use `buildstructor` to handle excessive `Deployment` method arguments, fix #1364 + Adds new method `Deployment::AzureHost` + +### Commit Statistics + + + + - 19 commits contributed to the release. + - 18 commits were understood as [conventional](https://www.conventionalcommits.org). + - 17 unique issues were worked on: [#1313](https://github.com/hydro-project/hydroflow/issues/1313), [#1360](https://github.com/hydro-project/hydroflow/issues/1360), [#1366](https://github.com/hydro-project/hydroflow/issues/1366), [#1369](https://github.com/hydro-project/hydroflow/issues/1369), [#1370](https://github.com/hydro-project/hydroflow/issues/1370), [#1372](https://github.com/hydro-project/hydroflow/issues/1372), [#1378](https://github.com/hydro-project/hydroflow/issues/1378), [#1394](https://github.com/hydro-project/hydroflow/issues/1394), [#1396](https://github.com/hydro-project/hydroflow/issues/1396), [#1398](https://github.com/hydro-project/hydroflow/issues/1398), [#1403](https://github.com/hydro-project/hydroflow/issues/1403), [#1411](https://github.com/hydro-project/hydroflow/issues/1411), [#1413](https://github.com/hydro-project/hydroflow/issues/1413), [#1423](https://github.com/hydro-project/hydroflow/issues/1423), [#1428](https://github.com/hydro-project/hydroflow/issues/1428), [#1429](https://github.com/hydro-project/hydroflow/issues/1429), [#1431](https://github.com/hydro-project/hydroflow/issues/1431) + +### Commit Details + + + +
view details + + * **[#1313](https://github.com/hydro-project/hydroflow/issues/1313)** + - Fixup! feat(hydro_deploy)!: Perf works over SSH ([`220b5bc`](https://github.com/hydro-project/hydroflow/commit/220b5bce4fbf1af5e14ebe5aa73da7a7e668fea7)) + - Perf works over SSH ([`749a103`](https://github.com/hydro-project/hydroflow/commit/749a10307f4eff2a46a1056735e84ed94d44b39e)) + * **[#1360](https://github.com/hydro-project/hydroflow/issues/1360)** + - Avoid Terraform crashing on empty provider block ([`654b77d`](https://github.com/hydro-project/hydroflow/commit/654b77d8f65ae6eb62c164a2d736168ff96cb168)) + * **[#1366](https://github.com/hydro-project/hydroflow/issues/1366)** + - Use `buildstructor` to handle excessive `Deployment` method arguments, fix #1364 ([`8856c85`](https://github.com/hydro-project/hydroflow/commit/8856c8596d5ad9d5f24a46467690bfac1549fae2)) + * **[#1369](https://github.com/hydro-project/hydroflow/issues/1369)** + - Cleanup handling of arc `Weak` in `deployment.rs` ([`77246e7`](https://github.com/hydro-project/hydroflow/commit/77246e77df47a0006dcb3eaeeb76882efacfd25c)) + * **[#1370](https://github.com/hydro-project/hydroflow/issues/1370)** + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading ([`a214786`](https://github.com/hydro-project/hydroflow/commit/a2147864b24110c9ae2c1553e9e8b55bd5065f15)) + * **[#1372](https://github.com/hydro-project/hydroflow/issues/1372)** + - End-to-end flamegraph generation, fix #1365 ([`bb081d3`](https://github.com/hydro-project/hydroflow/commit/bb081d3b0af6dbce9630e23dfe8b7d1363751c2b)) + * **[#1378](https://github.com/hydro-project/hydroflow/issues/1378)** + - Adjust `ProgressTracker::println` ([`a88a550`](https://github.com/hydro-project/hydroflow/commit/a88a550cefde3a56790859127edc6a4e27e07090)) + * **[#1394](https://github.com/hydro-project/hydroflow/issues/1394)** + - Simplify process/cluster specs ([`128aaec`](https://github.com/hydro-project/hydroflow/commit/128aaecd40edce57dc254afdcd61ecd5b9948d71)) + * **[#1396](https://github.com/hydro-project/hydroflow/issues/1396)** + - Add end-to-end flamegraph generation for macos and linux localhost, fix #1351 ([`6568263`](https://github.com/hydro-project/hydroflow/commit/6568263e03899d4e96837690e6e59284c194d7ff)) + * **[#1398](https://github.com/hydro-project/hydroflow/issues/1398)** + - Use trybuild to compile subgraph binaries ([`46a8a2c`](https://github.com/hydro-project/hydroflow/commit/46a8a2cb08732bb21096e824bc4542d208c68fb2)) + * **[#1403](https://github.com/hydro-project/hydroflow/issues/1403)** + - Only instantiate `Localhost` once ([`63b528f`](https://github.com/hydro-project/hydroflow/commit/63b528feeb2e6dac2ed12c02b2e39e0d42133a74)) + * **[#1411](https://github.com/hydro-project/hydroflow/issues/1411)** + - Improve progress UX by collapsing nested groups ([`fedd3ef`](https://github.com/hydro-project/hydroflow/commit/fedd3ef60fe16ab862244d816f7973269a7295e8)) + * **[#1413](https://github.com/hydro-project/hydroflow/issues/1413)** + - Rename integration crates to drop CLI references ([`0a465e5`](https://github.com/hydro-project/hydroflow/commit/0a465e55dd39c76bc1aefb020460a639d792fe87)) + * **[#1423](https://github.com/hydro-project/hydroflow/issues/1423)** + - Lower min dependency versions where possible, update `Cargo.lock` ([`11af328`](https://github.com/hydro-project/hydroflow/commit/11af32828bab6e4a4264d2635ff71a12bb0bb778)) + * **[#1428](https://github.com/hydro-project/hydroflow/issues/1428)** + - Cleanup doc comments for clippy latest ([`f5f1eb0`](https://github.com/hydro-project/hydroflow/commit/f5f1eb0c612f5c0c1752360d972ef6853c5e12f0)) + * **[#1429](https://github.com/hydro-project/hydroflow/issues/1429)** + - Use `name_of!` macro ([`3fde68d`](https://github.com/hydro-project/hydroflow/commit/3fde68d0db0414017cfb771a218b14b8f57d1686)) + * **[#1431](https://github.com/hydro-project/hydroflow/issues/1431)** + - Only record usermode events in perf ([`c4683ca`](https://github.com/hydro-project/hydroflow/commit/c4683caca43f2927694c920b43ef35a6d1629eaa)) + * **Uncategorized** + - Manually set versions for crates renamed in #1413 ([`a2ec110`](https://github.com/hydro-project/hydroflow/commit/a2ec110ccadb97e293b19d83a155d98d94224bba)) +
+ +## v0.8.0 (2024-07-23) + + + + + + + + + + + + +### Refactor + + - remove unneeded `Arc Curious if there was any intention behind why it was `Arc I think before some refactors we took the I/O handles instead of using broadcast channels. + - build cache cleanup + * Replace mystery tuple with new `struct BuildOutput` + * Replace `Mutex` and `Arc`-infested `HashMap` with `memo-map` crate, + greatly simplifying build cache typing + * Remove redundant build caching in `HydroflowCrateService`, expose and + use cache parameters as `BuildParams` + * Remove `once_cell` and `async-once-cell` dependencies, use `std`'s + `OnceLock` + * Add `Failed to execute command: {}` context to `perf` error message + * Cleanup some repeated `format!` expressions + +### Style + + - rename `SSH` -> `Ssh` + +### Refactor (BREAKING) + + - make `Service::collect_resources` take `&self` instead of `&mut self` + #430 but still has `RwLock` wrapping + + Depends on #1347 + - make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1346 + - Make `Host::provision` not async anymore + I noticed that none of the method impls have any `await`s + - make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1339 + - replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` + Depends on #1339 + + We could make the publicly facing `stdout`, `stderr` APIs return `impl Stream` in the future, maybe + - replace some uses of `tokio::sync::RwLock` with `std::sync::Mutex` #430 (3/3) + +### Style (BREAKING) + + - enable clippy `upper-case-acronyms-aggressive` + * rename `GCP` -> `Gcp`, `NodeID` -> `NodeId` + * update CI `cargo-generate` template testing to use PR's branch instead + of whatever `main` happens to be + +### Commit Statistics + + + + - 11 commits contributed to the release. + - 10 commits were understood as [conventional](https://www.conventionalcommits.org). + - 10 unique issues were worked on: [#1334](https://github.com/hydro-project/hydroflow/issues/1334), [#1338](https://github.com/hydro-project/hydroflow/issues/1338), [#1339](https://github.com/hydro-project/hydroflow/issues/1339), [#1340](https://github.com/hydro-project/hydroflow/issues/1340), [#1343](https://github.com/hydro-project/hydroflow/issues/1343), [#1345](https://github.com/hydro-project/hydroflow/issues/1345), [#1346](https://github.com/hydro-project/hydroflow/issues/1346), [#1347](https://github.com/hydro-project/hydroflow/issues/1347), [#1348](https://github.com/hydro-project/hydroflow/issues/1348), [#1356](https://github.com/hydro-project/hydroflow/issues/1356) + +### Commit Details + + + +
view details + + * **[#1334](https://github.com/hydro-project/hydroflow/issues/1334)** + - Build cache cleanup ([`0feae74`](https://github.com/hydro-project/hydroflow/commit/0feae7454e4674eea1f3308b3d6d4e9d459cda67)) + * **[#1338](https://github.com/hydro-project/hydroflow/issues/1338)** + - Remove unneeded `Arc `Ssh` ([`947ebc1`](https://github.com/hydro-project/hydroflow/commit/947ebc1cb21a07fbfacae4ac956dbd0015a8a418)) + * **[#1343](https://github.com/hydro-project/hydroflow/issues/1343)** + - Make `Host::provision` not async anymore ([`f536ecc`](https://github.com/hydro-project/hydroflow/commit/f536eccf7297be8185108b60897e92ad0efffe4a)) + * **[#1345](https://github.com/hydro-project/hydroflow/issues/1345)** + - Enable clippy `upper-case-acronyms-aggressive` ([`12b8ba5`](https://github.com/hydro-project/hydroflow/commit/12b8ba53f28eb9de1318b41cdf1e23282f6f0eb6)) + * **[#1346](https://github.com/hydro-project/hydroflow/issues/1346)** + - Make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 ([`057a0a5`](https://github.com/hydro-project/hydroflow/commit/057a0a510568cf81932368c8c65e056f91af7202)) + * **[#1347](https://github.com/hydro-project/hydroflow/issues/1347)** + - Make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 ([`c5a8de2`](https://github.com/hydro-project/hydroflow/commit/c5a8de28e7844b3c29d58116d8340967f2e6bcc4)) + * **[#1348](https://github.com/hydro-project/hydroflow/issues/1348)** + - Make `Service::collect_resources` take `&self` instead of `&mut self` ([`2286558`](https://github.com/hydro-project/hydroflow/commit/22865583a4260fe401c28aa39a74987478edc73d)) + * **[#1356](https://github.com/hydro-project/hydroflow/issues/1356)** + - Replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` ([`6039078`](https://github.com/hydro-project/hydroflow/commit/60390782dd7dcec18d193c800af716843a944dba)) + * **Uncategorized** + - Release hydroflow_lang v0.8.0, hydroflow_datalog_core v0.8.0, hydroflow_datalog v0.8.0, hydroflow_macro v0.8.0, lattices_macro v0.5.5, lattices v0.5.6, variadics v0.0.5, pusherator v0.0.7, hydroflow v0.8.0, hydroflow_plus v0.8.0, hydro_deploy v0.8.0, hydro_cli v0.8.0, hydroflow_plus_cli_integration v0.8.0, safety bump 7 crates ([`ca6c16b`](https://github.com/hydro-project/hydroflow/commit/ca6c16b4a7ce35e155fe7fc6c7d1676c37c9e4de)) +
+ +## v0.7.0 (2024-05-24) + +### New Features + + - add support for collecting counts and running perf + +### Bug Fixes + + - end processes with SIGTERM instead of SIGKILL + fix(hydro_deploy): end processes with SIGTERM instead of SIGKILL + +### Commit Statistics + + + + - 3 commits contributed to the release. + - 2 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1129](https://github.com/hydro-project/hydroflow/issues/1129), [#1157](https://github.com/hydro-project/hydroflow/issues/1157) + +### Commit Details + + + +
view details + + * **[#1129](https://github.com/hydro-project/hydroflow/issues/1129)** + - End processes with SIGTERM instead of SIGKILL ([`92c72ba`](https://github.com/hydro-project/hydroflow/commit/92c72ba9527241f88dfb23f64b999c8e4bd2b26c)) + * **[#1157](https://github.com/hydro-project/hydroflow/issues/1157)** + - Add support for collecting counts and running perf ([`29a263f`](https://github.com/hydro-project/hydroflow/commit/29a263fb564c5ce4bc495ea4e9d20b8b2621b645)) + * **Uncategorized** + - Release hydroflow_lang v0.7.0, hydroflow_datalog_core v0.7.0, hydroflow_datalog v0.7.0, hydroflow_macro v0.7.0, lattices v0.5.5, multiplatform_test v0.1.0, pusherator v0.0.6, hydroflow v0.7.0, stageleft_macro v0.2.0, stageleft v0.3.0, stageleft_tool v0.2.0, hydroflow_plus v0.7.0, hydro_deploy v0.7.0, hydro_cli v0.7.0, hydroflow_plus_cli_integration v0.7.0, safety bump 8 crates ([`2852147`](https://github.com/hydro-project/hydroflow/commit/285214740627685e911781793e05d234ab2ad2bd)) +
+ +## v0.6.1 (2024-04-09) + + + +### Style + + - qualified path cleanups for clippy + +### Commit Statistics + + + + - 4 commits contributed to the release. + - 1 commit was understood as [conventional](https://www.conventionalcommits.org). + - 1 unique issue was worked on: [#1090](https://github.com/hydro-project/hydroflow/issues/1090) + +### Commit Details + + + +
view details + + * **[#1090](https://github.com/hydro-project/hydroflow/issues/1090)** + - Qualified path cleanups for clippy ([`7958fb0`](https://github.com/hydro-project/hydroflow/commit/7958fb0d900be8fe7359326abfa11dcb8fb35e8a)) + * **Uncategorized** + - Release hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1 ([`c385c13`](https://github.com/hydro-project/hydroflow/commit/c385c132c9733d1bace82156aa14216b8e7fef9f)) + - Release hydroflow_lang v0.6.2, hydroflow v0.6.2, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`23cfe08`](https://github.com/hydro-project/hydroflow/commit/23cfe0839079aa17d042bbd3976f6d188689d290)) + - Release hydroflow_cli_integration v0.5.2, hydroflow_lang v0.6.1, hydroflow_datalog_core v0.6.1, lattices v0.5.4, hydroflow v0.6.1, stageleft_macro v0.1.1, stageleft v0.2.1, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`cd63f22`](https://github.com/hydro-project/hydroflow/commit/cd63f2258c961a40f0e5dbef20ac329a2d570ad0)) +
+ ## v0.6.0 (2024-03-02) + + + ### Chore - appease various clippy lints @@ -25,8 +315,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 3 commits contributed to the release over the course of 7 calendar days. - - 32 days passed between releases. + - 4 commits contributed to the release. - 3 commits were understood as [conventional](https://www.conventionalcommits.org). - 3 unique issues were worked on: [#1015](https://github.com/hydro-project/hydroflow/issues/1015), [#1043](https://github.com/hydro-project/hydroflow/issues/1043), [#1084](https://github.com/hydro-project/hydroflow/issues/1084) @@ -42,6 +331,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add support for azure ([`fcf43bf`](https://github.com/hydro-project/hydroflow/commit/fcf43bf86fe550247dffa4641a9ce3aff3b9afc3)) * **[#1084](https://github.com/hydro-project/hydroflow/issues/1084)** - Appease various clippy lints ([`39ab8b0`](https://github.com/hydro-project/hydroflow/commit/39ab8b0278e9e3fe96552ace0a4ae768a6bc10d8)) + * **Uncategorized** + - Release hydroflow_lang v0.6.0, hydroflow_datalog_core v0.6.0, hydroflow_datalog v0.6.0, hydroflow_macro v0.6.0, lattices v0.5.3, variadics v0.0.4, pusherator v0.0.5, hydroflow v0.6.0, stageleft v0.2.0, hydroflow_plus v0.6.0, hydro_deploy v0.6.0, hydro_cli v0.6.0, hydroflow_plus_cli_integration v0.6.0, safety bump 7 crates ([`09ea65f`](https://github.com/hydro-project/hydroflow/commit/09ea65fe9cd45c357c43bffca30e60243fa45cc8)) ## v0.5.1 (2024-01-29) @@ -83,7 +374,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 12 commits contributed to the release over the course of 39 calendar days. + - 12 commits contributed to the release. - 11 commits were understood as [conventional](https://www.conventionalcommits.org). - 9 unique issues were worked on: [#1010](https://github.com/hydro-project/hydroflow/issues/1010), [#1014](https://github.com/hydro-project/hydroflow/issues/1014), [#986](https://github.com/hydro-project/hydroflow/issues/986), [#987](https://github.com/hydro-project/hydroflow/issues/987), [#992](https://github.com/hydro-project/hydroflow/issues/992), [#994](https://github.com/hydro-project/hydroflow/issues/994), [#995](https://github.com/hydro-project/hydroflow/issues/995), [#996](https://github.com/hydro-project/hydroflow/issues/996), [#999](https://github.com/hydro-project/hydroflow/issues/999) diff --git a/hydro_deploy/core/Cargo.toml b/hydro_deploy/core/Cargo.toml index 99fc1e741d3c..94a858ef919d 100644 --- a/hydro_deploy/core/Cargo.toml +++ b/hydro_deploy/core/Cargo.toml @@ -1,34 +1,36 @@ [package] name = "hydro_deploy" publish = true -version = "0.6.0" +version = "0.9.0" edition = "2021" license = "Apache-2.0" documentation = "https://docs.rs/hydro_deploy/" description = "Hydro Deploy" [dependencies] -anyhow = { version = "1.0.69", features = [ "backtrace" ] } -async-channel = "1.8.0" -async-once-cell = "0.5.3" -async-process = "1.6.0" -async-recursion = "1.0" -async-ssh2-lite = { version = "0.4.2", features = [ "tokio" ] } -async-trait = "0.1.64" +anyhow = { version = "1.0.82", features = [ "backtrace" ] } +async-process = "2.0.0" +async-recursion = "1.0.0" +async-ssh2-lite = { version = "0.5.0", features = [ "tokio" ] } +async-trait = "0.1.54" +buildstructor = "0.5.0" bytes = "1.1.0" -cargo_metadata = "0.15.4" -dunce = "1.0.4" -dyn-clone = "1" -futures = "0.3.26" -futures-core = "0.3.26" -hydroflow_cli_integration = { path = "../hydroflow_cli_integration", version = "^0.5.1" } -indicatif = "0.17.6" +cargo_metadata = "0.18.0" +dunce = "1.0.0" +dyn-clone = "1.0.0" +futures = "0.3.0" +hydroflow_deploy_integration = { path = "../hydroflow_deploy_integration", version = "^0.9.0" } +indicatif = "0.17.0" +inferno = "0.11.0" +itertools = "0.10.0" # TODO(mingwei): remove when `iter_intersperse` is stabilized. +memo-map = "0.3.0" +nameof = "1.0.0" nanoid = "0.4.0" -nix = "0.26.2" -once_cell = "1.17" -serde = { version = "1", features = [ "derive" ] } -serde_json = "1" -shell-escape = "0.1.5" -tempfile = "3.3.0" -tokio = { version = "1.16", features = [ "full" ] } -tokio-util = { version = "0.7.7", features=[ "compat" ] } +nix = { version = "0.29.0", features = [ "signal" ] } +serde = { version = "1.0.197", features = [ "derive" ] } +serde_json = "1.0.115" +shell-escape = "0.1.0" +tempfile = "3.0.0" +tokio = { version = "1.29.0", features = [ "full" ] } +tokio-stream = { version = "0.1.3", default-features = false } +tokio-util = { version = "0.7.5", features = [ "compat", "io-util" ] } diff --git a/hydro_deploy/core/src/azure.rs b/hydro_deploy/core/src/azure.rs index 2e3ba8987c36..83b3754ad4b2 100644 --- a/hydro_deploy/core/src/azure.rs +++ b/hydro_deploy/core/src/azure.rs @@ -1,5 +1,5 @@ use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, Mutex, OnceLock}; use anyhow::Result; use async_trait::async_trait; @@ -11,7 +11,8 @@ use super::{ ClientStrategy, Host, HostTargetType, LaunchedHost, ResourceBatch, ResourceResult, ServerStrategy, }; -use crate::ssh::LaunchedSSHHost; +use crate::ssh::LaunchedSshHost; +use crate::HostStrategyGetter; pub struct LaunchedVirtualMachine { resource_result: Arc, @@ -20,7 +21,7 @@ pub struct LaunchedVirtualMachine { pub external_ip: Option, } -impl LaunchedSSHHost for LaunchedVirtualMachine { +impl LaunchedSshHost for LaunchedVirtualMachine { fn get_external_ip(&self) -> Option { self.external_ip.clone() } @@ -43,15 +44,17 @@ impl LaunchedSSHHost for LaunchedVirtualMachine { } pub struct AzureHost { - pub id: usize, - pub project: String, - pub os_type: String, // linux or windows - pub machine_size: String, - pub image: Option>, - pub region: String, - pub user: Option, - pub launched: Option>, - external_ports: Vec, + /// ID from [`crate::Deployment::add_host`]. + id: usize, + + project: String, + os_type: String, // linux or windows + machine_size: String, + image: Option>, + region: String, + user: Option, + pub launched: OnceLock>, // TODO(mingwei): fix pub + external_ports: Mutex>, } impl AzureHost { @@ -72,8 +75,8 @@ impl AzureHost { image, region, user, - launched: None, - external_ports: vec![], + launched: OnceLock::new(), + external_ports: Mutex::new(Vec::new()), } } } @@ -84,17 +87,17 @@ impl Host for AzureHost { HostTargetType::Linux(crate::LinuxArchitecture::AARCH64) } - fn request_port(&mut self, bind_type: &ServerStrategy) { + fn request_port(&self, bind_type: &ServerStrategy) { match bind_type { ServerStrategy::UnixSocket => {} ServerStrategy::InternalTcpPort => {} ServerStrategy::ExternalTcpPort(port) => { - if !self.external_ports.contains(port) { - if self.launched.is_some() { + let mut external_ports = self.external_ports.lock().unwrap(); + if !external_ports.contains(port) { + if self.launched.get().is_some() { todo!("Cannot adjust firewall after host has been launched"); } - - self.external_ports.push(*port); + external_ports.push(*port); } } ServerStrategy::Demux(demux) => { @@ -114,7 +117,7 @@ impl Host for AzureHost { } } - fn request_custom_binary(&mut self) { + fn request_custom_binary(&self) { self.request_port(&ServerStrategy::ExternalTcpPort(22)); } @@ -126,12 +129,8 @@ impl Host for AzureHost { self } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } - fn collect_resources(&self, resource_batch: &mut ResourceBatch) { - if self.launched.is_some() { + if self.launched.get().is_some() { return; } @@ -398,46 +397,43 @@ impl Host for AzureHost { fn launched(&self) -> Option> { self.launched - .as_ref() + .get() .map(|a| a.clone() as Arc) } - async fn provision(&mut self, resource_result: &Arc) -> Arc { - if self.launched.is_none() { - let id = self.id; - - let internal_ip = resource_result - .terraform - .outputs - .get(&format!("vm-instance-{id}-internal-ip")) - .unwrap() - .value - .clone(); - - let external_ip = resource_result - .terraform - .outputs - .get(&format!("vm-instance-{id}-public-ip")) - .map(|v| v.value.clone()); - - self.launched = Some(Arc::new(LaunchedVirtualMachine { - resource_result: resource_result.clone(), - user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()), - internal_ip, - external_ip, - })) - } - - self.launched.as_ref().unwrap().clone() + fn provision(&self, resource_result: &Arc) -> Arc { + self.launched + .get_or_init(|| { + let id = self.id; + + let internal_ip = resource_result + .terraform + .outputs + .get(&format!("vm-instance-{id}-internal-ip")) + .unwrap() + .value + .clone(); + + let external_ip = resource_result + .terraform + .outputs + .get(&format!("vm-instance-{id}-public-ip")) + .map(|v| v.value.clone()); + + Arc::new(LaunchedVirtualMachine { + resource_result: resource_result.clone(), + user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()), + internal_ip, + external_ip, + }) + }) + .clone() } fn strategy_as_server<'a>( &'a self, client_host: &dyn Host, - ) -> Result<( - ClientStrategy<'a>, - Box ServerStrategy>, - )> { + ) -> Result<(ClientStrategy<'a>, HostStrategyGetter)> { if client_host.can_connect_to(ClientStrategy::UnixSocket(self.id)) { Ok(( ClientStrategy::UnixSocket(self.id), @@ -452,7 +448,7 @@ impl Host for AzureHost { Ok(( ClientStrategy::ForwardedTcpPort(self), Box::new(|me| { - me.downcast_mut::() + me.downcast_ref::() .unwrap() .request_port(&ServerStrategy::ExternalTcpPort(22)); // needed to forward ServerStrategy::InternalTcpPort diff --git a/hydro_deploy/core/src/custom_service.rs b/hydro_deploy/core/src/custom_service.rs index 2a9d707838f8..947b2e8410fc 100644 --- a/hydro_deploy/core/src/custom_service.rs +++ b/hydro_deploy/core/src/custom_service.rs @@ -1,21 +1,22 @@ use std::any::Any; use std::ops::Deref; -use std::sync::{Arc, Weak}; +use std::sync::{Arc, OnceLock, Weak}; use anyhow::{bail, Result}; use async_trait::async_trait; -use hydroflow_cli_integration::{ConnectedDirect, ServerPort}; +use hydroflow_deploy_integration::{ConnectedDirect, ServerPort}; use tokio::sync::RwLock; use super::hydroflow_crate::ports::{ HydroflowServer, HydroflowSink, HydroflowSource, ServerConfig, SourcePath, }; use super::{Host, LaunchedHost, ResourceBatch, ResourceResult, ServerStrategy, Service}; +use crate::hydroflow_crate::ports::ReverseSinkInstantiator; /// Represents an unknown, third-party service that is not part of the Hydroflow ecosystem. pub struct CustomService { _id: usize, - on: Arc>, + on: Arc, /// The ports that the service wishes to expose to the public internet. external_ports: Vec, @@ -24,7 +25,7 @@ pub struct CustomService { } impl CustomService { - pub fn new(id: usize, on: Arc>, external_ports: Vec) -> Self { + pub fn new(id: usize, on: Arc, external_ports: Vec) -> Self { Self { _id: id, on, @@ -40,15 +41,12 @@ impl CustomService { #[async_trait] impl Service for CustomService { - fn collect_resources(&mut self, _resource_batch: &mut ResourceBatch) { + fn collect_resources(&self, _resource_batch: &mut ResourceBatch) { if self.launched_host.is_some() { return; } - let mut host = self - .on - .try_write() - .expect("No one should be reading/writing the host while resources are collected"); + let host = &self.on; for port in self.external_ports.iter() { host.request_port(&ServerStrategy::ExternalTcpPort(*port)); @@ -60,9 +58,9 @@ impl Service for CustomService { return Ok(()); } - let mut host_write = self.on.write().await; - let launched = host_write.provision(resource_result); - self.launched_host = Some(launched.await); + let host = &self.on; + let launched = host.provision(resource_result); + self.launched_host = Some(launched); Ok(()) } @@ -81,20 +79,20 @@ impl Service for CustomService { pub struct CustomClientPort { pub on: Weak>, - client_port: Option, + client_port: OnceLock, } impl CustomClientPort { pub fn new(on: Weak>) -> Self { Self { on, - client_port: None, + client_port: OnceLock::new(), } } pub async fn server_port(&self) -> ServerPort { self.client_port - .as_ref() + .get() .unwrap() .load_instantiated(&|p| p) .await @@ -102,7 +100,7 @@ impl CustomClientPort { pub async fn connect(&self) -> ConnectedDirect { self.client_port - .as_ref() + .get() .unwrap() .load_instantiated(&|p| p) .await @@ -117,7 +115,7 @@ impl HydroflowSource for CustomClientPort { SourcePath::Direct(self.on.upgrade().unwrap().try_read().unwrap().on.clone()) } - fn host(&self) -> Arc> { + fn host(&self) -> Arc { panic!("Custom services cannot be used as the server") } @@ -125,17 +123,20 @@ impl HydroflowSource for CustomClientPort { panic!("Custom services cannot be used as the server") } - fn record_server_config(&mut self, config: ServerConfig) { - self.client_port = Some(config); + fn record_server_config(&self, config: ServerConfig) { + self.client_port + .set(config) + .map_err(drop) // `ServerConfig` doesn't implement `Debug` for `.expect()`. + .expect("Cannot call `record_server_config()` multiple times."); } - fn record_server_strategy(&mut self, _config: ServerStrategy) { + fn record_server_strategy(&self, _config: ServerStrategy) { panic!("Custom services cannot be used as the server") } } impl HydroflowSink for CustomClientPort { - fn as_any_mut(&mut self) -> &mut dyn Any { + fn as_any(&self) -> &dyn Any { self } @@ -145,26 +146,24 @@ impl HydroflowSink for CustomClientPort { fn instantiate_reverse( &self, - server_host: &Arc>, + server_host: &Arc, server_sink: Arc, wrap_client_port: &dyn Fn(ServerConfig) -> ServerConfig, - ) -> Result ServerStrategy>> { + ) -> Result { let client = self.on.upgrade().unwrap(); let client_read = client.try_read().unwrap(); - let server_host_clone = server_host.clone(); - let server_host = server_host_clone.try_read().unwrap(); + let server_host = server_host.clone(); - let (conn_type, bind_type) = - server_host.strategy_as_server(client_read.on.try_read().unwrap().deref())?; + let (conn_type, bind_type) = server_host.strategy_as_server(client_read.on.deref())?; let client_port = wrap_client_port(ServerConfig::from_strategy(&conn_type, server_sink)); - let server_host_clone = server_host_clone.clone(); Ok(Box::new(move |me| { - let mut server_host = server_host_clone.try_write().unwrap(); - me.downcast_mut::().unwrap().client_port = Some(client_port); - bind_type(server_host.as_any_mut()) + me.downcast_ref::() + .unwrap() + .record_server_config(client_port); + bind_type(server_host.as_any()) })) } } diff --git a/hydro_deploy/core/src/deployment.rs b/hydro_deploy/core/src/deployment.rs index 4570f28d8489..cbc00b58f22a 100644 --- a/hydro_deploy/core/src/deployment.rs +++ b/hydro_deploy/core/src/deployment.rs @@ -1,132 +1,134 @@ +use std::collections::HashMap; +use std::future::Future; use std::sync::{Arc, Weak}; use anyhow::Result; -use futures::{StreamExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use tokio::sync::RwLock; -use super::gcp::GCPNetwork; +use super::gcp::GcpNetwork; use super::{ - progress, CustomService, GCPComputeEngineHost, Host, LocalhostHost, ResourcePool, + progress, CustomService, GcpComputeEngineHost, Host, LocalhostHost, ResourcePool, ResourceResult, Service, }; -use crate::ServiceBuilder; +use crate::{AzureHost, ServiceBuilder}; -#[derive(Default)] pub struct Deployment { - pub hosts: Vec>>, + pub hosts: Vec>, pub services: Vec>>, pub resource_pool: ResourcePool, + localhost_host: Option>, last_resource_result: Option>, next_host_id: usize, next_service_id: usize, } -impl Deployment { - pub fn new() -> Self { - Self::default() +impl Default for Deployment { + fn default() -> Self { + Self::new() } +} - #[allow(non_snake_case)] - pub fn Localhost(&mut self) -> Arc> { - self.add_host(LocalhostHost::new) +impl Deployment { + pub fn new() -> Self { + let mut ret = Self { + hosts: Vec::new(), + services: Vec::new(), + resource_pool: ResourcePool::default(), + localhost_host: None, + last_resource_result: None, + next_host_id: 0, + next_service_id: 0, + }; + + ret.localhost_host = Some(ret.add_host(LocalhostHost::new)); + ret } #[allow(non_snake_case)] - pub fn GCPComputeEngineHost( - &mut self, - project: impl Into, - machine_type: impl Into, - image: impl Into, - region: impl Into, - network: Arc>, - user: Option, - ) -> Arc> { - self.add_host(|id| { - GCPComputeEngineHost::new(id, project, machine_type, image, region, network, user) - }) + pub fn Localhost(&self) -> Arc { + self.localhost_host.clone().unwrap() } #[allow(non_snake_case)] pub fn CustomService( &mut self, - on: Arc>, + on: Arc, external_ports: Vec, ) -> Arc> { self.add_service(|id| CustomService::new(id, on, external_ports)) } + /// Runs `deploy()`, and `start()`, waits for the trigger future, then runs `stop()`. + pub async fn run_until(&mut self, trigger: impl Future) -> Result<()> { + // TODO(mingwei): should `trigger` interrupt `deploy()` and `start()`? If so make sure shutdown works as expected. + self.deploy().await?; + self.start().await?; + trigger.await; + self.stop().await?; + Ok(()) + } + + /// Runs `deploy()`, and `start()`, waits for CTRL+C, then runs `stop()`. + pub async fn run_ctrl_c(&mut self) -> Result<()> { + self.run_until(tokio::signal::ctrl_c().map(|_| ())).await + } + pub async fn deploy(&mut self) -> Result<()> { - progress::ProgressTracker::with_group("deploy", None, || async { + self.services.retain(|weak| weak.strong_count() > 0); + + progress::ProgressTracker::with_group("deploy", Some(3), || async { let mut resource_batch = super::ResourceBatch::new(); - let active_services = self - .services - .iter() - .filter(|service| service.upgrade().is_some()) - .cloned() - .collect::>(); - self.services = active_services; - - for service in self.services.iter_mut() { - service - .upgrade() - .unwrap() - .write() - .await - .collect_resources(&mut resource_batch); + + for service in self.services.iter().filter_map(Weak::upgrade) { + service.read().await.collect_resources(&mut resource_batch); } - for host in self.hosts.iter_mut() { - host.write().await.collect_resources(&mut resource_batch); + for host in self.hosts.iter().filter_map(Weak::upgrade) { + host.collect_resources(&mut resource_batch); } - let result = Arc::new( - progress::ProgressTracker::with_group("provision", None, || async { + let resource_result = Arc::new( + progress::ProgressTracker::with_group("provision", Some(1), || async { resource_batch .provision(&mut self.resource_pool, self.last_resource_result.clone()) .await }) .await?, ); - self.last_resource_result = Some(result.clone()); - - progress::ProgressTracker::with_group("provision", None, || { - let hosts_provisioned = - self.hosts - .iter_mut() - .map(|host: &mut Arc>| async { - host.write().await.provision(&result).await; - }); - futures::future::join_all(hosts_provisioned) - }) - .await; - - progress::ProgressTracker::with_group("deploy", None, || { - let services_future = self - .services - .iter_mut() - .map(|service: &mut Weak>| async { - service - .upgrade() - .unwrap() - .write() - .await - .deploy(&result) - .await + self.last_resource_result = Some(resource_result.clone()); + + for host in self.hosts.iter().filter_map(Weak::upgrade) { + host.provision(&resource_result); + } + + let upgraded_services = self + .services + .iter() + .filter_map(Weak::upgrade) + .collect::>(); + + progress::ProgressTracker::with_group("prepare", Some(upgraded_services.len()), || { + let services_future = upgraded_services + .iter() + .map(|service: &Arc>| { + let resource_result = &resource_result; + async move { service.write().await.deploy(resource_result).await } }) .collect::>(); futures::stream::iter(services_future) - .buffer_unordered(8) + .buffer_unordered(16) .try_fold((), |_, _| async { Ok(()) }) }) .await?; - progress::ProgressTracker::with_group("ready", None, || { + progress::ProgressTracker::with_group("ready", Some(upgraded_services.len()), || { let all_services_ready = - self.services + upgraded_services .iter() - .map(|service: &Weak>| async { - service.upgrade().unwrap().write().await.ready().await?; + .map(|service: &Arc>| async move { + service.write().await.ready().await?; Ok(()) as Result<()> }); @@ -140,22 +142,15 @@ impl Deployment { } pub async fn start(&mut self) -> Result<()> { - let active_services = self - .services - .iter() - .filter(|service| service.upgrade().is_some()) - .cloned() - .collect::>(); - self.services = active_services; + self.services.retain(|weak| weak.strong_count() > 0); progress::ProgressTracker::with_group("start", None, || { - let all_services_start = - self.services - .iter() - .map(|service: &Weak>| async { - service.upgrade().unwrap().write().await.start().await?; - Ok(()) as Result<()> - }); + let all_services_start = self.services.iter().filter_map(Weak::upgrade).map( + |service: Arc>| async move { + service.write().await.start().await?; + Ok(()) as Result<()> + }, + ); futures::future::try_join_all(all_services_start) }) @@ -163,14 +158,30 @@ impl Deployment { Ok(()) } - pub fn add_host T>( - &mut self, - host: F, - ) -> Arc> { - let arc = Arc::new(RwLock::new(host(self.next_host_id))); + pub async fn stop(&mut self) -> Result<()> { + self.services.retain(|weak| weak.strong_count() > 0); + + progress::ProgressTracker::with_group("stop", None, || { + let all_services_stop = self.services.iter().filter_map(Weak::upgrade).map( + |service: Arc>| async move { + service.write().await.stop().await?; + Ok(()) as Result<()> + }, + ); + + futures::future::try_join_all(all_services_stop) + }) + .await?; + Ok(()) + } +} + +impl Deployment { + pub fn add_host T>(&mut self, host: F) -> Arc { + let arc = Arc::new(host(self.next_host_id)); self.next_host_id += 1; - self.hosts.push(arc.clone()); + self.hosts.push(Arc::downgrade(&arc) as Weak); arc } @@ -181,8 +192,52 @@ impl Deployment { let arc = Arc::new(RwLock::new(service.build(self.next_service_id))); self.next_service_id += 1; - let dyn_arc: Arc> = arc.clone(); - self.services.push(Arc::downgrade(&dyn_arc)); + self.services + .push(Arc::downgrade(&arc) as Weak>); arc } } + +/// Buildstructor methods. +#[buildstructor::buildstructor] +impl Deployment { + #[allow(clippy::too_many_arguments)] + #[builder(entry = "GcpComputeEngineHost", exit = "add")] + pub fn add_gcp_compute_engine_host( + &mut self, + project: String, + machine_type: String, + image: String, + region: String, + network: Arc>, + user: Option, + startup_script: Option, + ) -> Arc { + self.add_host(|id| { + GcpComputeEngineHost::new( + id, + project, + machine_type, + image, + region, + network, + user, + startup_script, + ) + }) + } + + #[allow(clippy::too_many_arguments)] + #[builder(entry = "AzureHost", exit = "add")] + pub fn add_azure_host( + &mut self, + project: String, + os_type: String, // linux or windows + machine_size: String, + image: Option>, + region: String, + user: Option, + ) -> Arc { + self.add_host(|id| AzureHost::new(id, project, os_type, machine_size, image, region, user)) + } +} diff --git a/hydro_deploy/core/src/gcp.rs b/hydro_deploy/core/src/gcp.rs index ea9d1dd064b9..e827e30b68d4 100644 --- a/hydro_deploy/core/src/gcp.rs +++ b/hydro_deploy/core/src/gcp.rs @@ -1,5 +1,5 @@ use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, Mutex, OnceLock}; use anyhow::Result; use async_trait::async_trait; @@ -12,7 +12,8 @@ use super::{ ClientStrategy, Host, HostTargetType, LaunchedHost, ResourceBatch, ResourceResult, ServerStrategy, }; -use crate::ssh::LaunchedSSHHost; +use crate::ssh::LaunchedSshHost; +use crate::HostStrategyGetter; pub struct LaunchedComputeEngine { resource_result: Arc, @@ -21,7 +22,7 @@ pub struct LaunchedComputeEngine { pub external_ip: Option, } -impl LaunchedSSHHost for LaunchedComputeEngine { +impl LaunchedSshHost for LaunchedComputeEngine { fn get_external_ip(&self) -> Option { self.external_ip.clone() } @@ -44,13 +45,13 @@ impl LaunchedSSHHost for LaunchedComputeEngine { } #[derive(Debug)] -pub struct GCPNetwork { +pub struct GcpNetwork { pub project: String, pub existing_vpc: Option, id: String, } -impl GCPNetwork { +impl GcpNetwork { pub fn new(project: impl Into, existing_vpc: Option) -> Self { Self { project: project.into(), @@ -167,27 +168,32 @@ impl GCPNetwork { } } -pub struct GCPComputeEngineHost { - pub id: usize, - pub project: String, - pub machine_type: String, - pub image: String, - pub region: String, - pub network: Arc>, - pub user: Option, - pub launched: Option>, - external_ports: Vec, +pub struct GcpComputeEngineHost { + /// ID from [`crate::Deployment::add_host`]. + id: usize, + + project: String, + machine_type: String, + image: String, + region: String, + network: Arc>, + user: Option, + startup_script: Option, + pub launched: OnceLock>, // TODO(mingwei): fix pub + external_ports: Mutex>, } -impl GCPComputeEngineHost { +impl GcpComputeEngineHost { + #[allow(clippy::too_many_arguments)] // TODO(mingwei) pub fn new( id: usize, project: impl Into, machine_type: impl Into, image: impl Into, region: impl Into, - network: Arc>, + network: Arc>, user: Option, + startup_script: Option, ) -> Self { Self { id, @@ -197,29 +203,30 @@ impl GCPComputeEngineHost { region: region.into(), network, user, - launched: None, - external_ports: vec![], + startup_script, + launched: OnceLock::new(), + external_ports: Mutex::new(Vec::new()), } } } #[async_trait] -impl Host for GCPComputeEngineHost { +impl Host for GcpComputeEngineHost { fn target_type(&self) -> HostTargetType { HostTargetType::Linux(crate::LinuxArchitecture::AARCH64) } - fn request_port(&mut self, bind_type: &ServerStrategy) { + fn request_port(&self, bind_type: &ServerStrategy) { match bind_type { ServerStrategy::UnixSocket => {} ServerStrategy::InternalTcpPort => {} ServerStrategy::ExternalTcpPort(port) => { - if !self.external_ports.contains(port) { - if self.launched.is_some() { + let mut external_ports = self.external_ports.lock().unwrap(); + if !external_ports.contains(port) { + if self.launched.get().is_some() { todo!("Cannot adjust firewall after host has been launched"); } - - self.external_ports.push(*port); + external_ports.push(*port); } } ServerStrategy::Demux(demux) => { @@ -239,7 +246,7 @@ impl Host for GCPComputeEngineHost { } } - fn request_custom_binary(&mut self) { + fn request_custom_binary(&self) { self.request_port(&ServerStrategy::ExternalTcpPort(22)); } @@ -251,12 +258,8 @@ impl Host for GCPComputeEngineHost { self } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } - fn collect_resources(&self, resource_batch: &mut ResourceBatch) { - if self.launched.is_some() { + if self.launched.get().is_some() { return; } @@ -339,7 +342,8 @@ impl Host for GCPComputeEngineHost { let mut tags = vec![]; let mut external_interfaces = vec![]; - if self.external_ports.is_empty() { + let external_ports = self.external_ports.lock().unwrap(); + if external_ports.is_empty() { external_interfaces.push(json!({ "network": format!("${{{vpc_path}.self_link}}") })); } else { external_interfaces.push(json!({ @@ -352,7 +356,7 @@ impl Host for GCPComputeEngineHost { })); // open the external ports that were requested - let my_external_tags = self.external_ports.iter().map(|port| { + let my_external_tags = external_ports.iter().map(|port| { let rule_id = nanoid!(8, &TERRAFORM_ALPHABET); let firewall_rule = resource_batch .terraform @@ -386,6 +390,7 @@ impl Host for GCPComputeEngineHost { } ); } + drop(external_ports); // Drop the lock as soon as possible. let user = self.user.as_ref().cloned().unwrap_or("hydro".to_string()); resource_batch @@ -413,7 +418,8 @@ impl Host for GCPComputeEngineHost { ] } ], - "network_interface": external_interfaces + "network_interface": external_interfaces, + "metadata_startup_script": self.startup_script, }), ); @@ -429,46 +435,43 @@ impl Host for GCPComputeEngineHost { fn launched(&self) -> Option> { self.launched - .as_ref() + .get() .map(|a| a.clone() as Arc) } - async fn provision(&mut self, resource_result: &Arc) -> Arc { - if self.launched.is_none() { - let id = self.id; - - let internal_ip = resource_result - .terraform - .outputs - .get(&format!("vm-instance-{id}-internal-ip")) - .unwrap() - .value - .clone(); + fn provision(&self, resource_result: &Arc) -> Arc { + self.launched + .get_or_init(|| { + let id = self.id; - let external_ip = resource_result - .terraform - .outputs - .get(&format!("vm-instance-{id}-public-ip")) - .map(|v| v.value.clone()); - - self.launched = Some(Arc::new(LaunchedComputeEngine { - resource_result: resource_result.clone(), - user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()), - internal_ip, - external_ip, - })) - } + let internal_ip = resource_result + .terraform + .outputs + .get(&format!("vm-instance-{id}-internal-ip")) + .unwrap() + .value + .clone(); - self.launched.as_ref().unwrap().clone() + let external_ip = resource_result + .terraform + .outputs + .get(&format!("vm-instance-{id}-public-ip")) + .map(|v| v.value.clone()); + + Arc::new(LaunchedComputeEngine { + resource_result: resource_result.clone(), + user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()), + internal_ip, + external_ip, + }) + }) + .clone() } fn strategy_as_server<'a>( &'a self, client_host: &dyn Host, - ) -> Result<( - ClientStrategy<'a>, - Box ServerStrategy>, - )> { + ) -> Result<(ClientStrategy<'a>, HostStrategyGetter)> { if client_host.can_connect_to(ClientStrategy::UnixSocket(self.id)) { Ok(( ClientStrategy::UnixSocket(self.id), @@ -483,7 +486,7 @@ impl Host for GCPComputeEngineHost { Ok(( ClientStrategy::ForwardedTcpPort(self), Box::new(|me| { - me.downcast_mut::() + me.downcast_ref::() .unwrap() .request_port(&ServerStrategy::ExternalTcpPort(22)); // needed to forward ServerStrategy::InternalTcpPort @@ -510,7 +513,7 @@ impl Host for GCPComputeEngineHost { } ClientStrategy::InternalTcpPort(target_host) => { if let Some(gcp_target) = - target_host.as_any().downcast_ref::() + target_host.as_any().downcast_ref::() { self.project == gcp_target.project } else { diff --git a/hydro_deploy/core/src/hydroflow_crate/build.rs b/hydro_deploy/core/src/hydroflow_crate/build.rs index a692ed07b9f8..51d98d3fbb03 100644 --- a/hydro_deploy/core/src/hydroflow_crate/build.rs +++ b/hydro_deploy/core/src/hydroflow_crate/build.rs @@ -1,85 +1,110 @@ -use std::collections::HashMap; use std::error::Error; use std::fmt::Display; use std::io::BufRead; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; -use std::sync::{Arc, Mutex}; +use std::sync::OnceLock; use cargo_metadata::diagnostic::Diagnostic; +use memo_map::MemoMap; use nanoid::nanoid; -use once_cell::sync::Lazy; use tokio::sync::OnceCell; use crate::progress::ProgressTracker; use crate::{HostTargetType, LinuxArchitecture}; -#[derive(PartialEq, Eq, Hash)] -struct CacheKey { +/// Build parameters for [`build_crate_memoized`]. +#[derive(PartialEq, Eq, Hash, Clone)] +pub struct BuildParams { + /// The working directory for the build, where the `cargo build` command will be run. Crate root. + /// [`Self::new`] canonicalizes this path. src: PathBuf, + /// `--bin` binary name parameter. bin: Option, + /// `--example` parameter. example: Option, + /// `--profile` parameter. profile: Option, + rustflags: Option, + target_dir: Option, + no_default_features: bool, + /// `--target ` if cross-compiling for linux ([`HostTargetType::Linux`]). target_type: HostTargetType, + /// `--features` flags, will be comma-delimited. features: Option>, } +impl BuildParams { + /// Creates a new `BuildParams` and canonicalizes the `src` path. + #[allow(clippy::too_many_arguments)] + pub fn new( + src: impl AsRef, + bin: Option, + example: Option, + profile: Option, + rustflags: Option, + target_dir: Option, + no_default_features: bool, + target_type: HostTargetType, + features: Option>, + ) -> Self { + // `fs::canonicalize` prepends windows paths with the `r"\\?\"` + // https://stackoverflow.com/questions/21194530/what-does-mean-when-prepended-to-a-file-path + // However, this breaks the `include!(concat!(env!("OUT_DIR"), "/my/forward/slash/path.rs"))` + // Rust codegen pattern on windows. To help mitigate this happening in third party crates, we + // instead use `dunce::canonicalize` which is the same as `fs::canonicalize` but avoids the + // `\\?\` prefix when possible. + let src = dunce::canonicalize(src).expect("Failed to canonicalize path for build."); + + BuildParams { + src, + bin, + example, + profile, + rustflags, + target_dir, + no_default_features, + target_type, + features, + } + } +} -pub type BuiltCrate = Arc<(String, Vec, PathBuf)>; +/// Information about a built crate. See [`build_crate`]. +pub struct BuildOutput { + /// A unique but meaningless id. + pub unique_id: String, + /// The binary contents as a byte array. + pub bin_data: Vec, + /// The path to the binary file. [`Self::bin_data`] has a copy of the content. + pub bin_path: PathBuf, +} -static BUILDS: Lazy>>>> = - Lazy::new(Default::default); +/// Build memoization cache. +static BUILDS: OnceLock>> = OnceLock::new(); -pub async fn build_crate( - src: impl AsRef, - bin: Option, - example: Option, - profile: Option, - target_type: HostTargetType, - features: Option>, -) -> Result { - // `fs::canonicalize` prepends windows paths with the `r"\\?\"` - // https://stackoverflow.com/questions/21194530/what-does-mean-when-prepended-to-a-file-path - // However, this breaks the `include!(concat!(env!("OUT_DIR"), "/my/forward/slash/path.rs"))` - // Rust codegen pattern on windows. To help mitigate this happening in third party crates, we - // instead use `dunce::canonicalize` which is the same as `fs::canonicalize` but avoids the - // `\\?\` prefix when possible. - let src = dunce::canonicalize(src).expect("Failed to canonicalize path for build."); - - let key = CacheKey { - src: src.clone(), - bin: bin.clone(), - example: example.clone(), - profile: profile.clone(), - target_type, - features: features.clone(), - }; - - let unit_of_work = { - let mut builds = BUILDS.lock().unwrap(); - builds.entry(key).or_default().clone() - // Release BUILDS table lock here. - }; - - unit_of_work +pub async fn build_crate_memoized(params: BuildParams) -> Result<&'static BuildOutput, BuildError> { + BUILDS + .get_or_init(MemoMap::new) + .get_or_insert(¶ms, Default::default) .get_or_try_init(move || { - ProgressTracker::rich_leaf("build".to_string(), move |_, set_msg| async move { + ProgressTracker::rich_leaf("build", move |set_msg| async move { tokio::task::spawn_blocking(move || { let mut command = Command::new("cargo"); - command.args([ - "build".to_string(), - "--profile".to_string(), - profile.unwrap_or("release".to_string()), - ]); + command.args(["build"]); - if let Some(bin) = bin.as_ref() { + if let Some(profile) = params.profile.as_ref() { + command.args(["--profile", profile]); + } + + if let Some(bin) = params.bin.as_ref() { command.args(["--bin", bin]); } - if let Some(example) = example.as_ref() { + if let Some(example) = params.example.as_ref() { command.args(["--example", example]); } - match target_type { + match params.target_type { HostTargetType::Local => {} HostTargetType::Linux(LinuxArchitecture::X86_64) => { command.args(["--target", "x86_64-unknown-linux-musl"]); @@ -89,14 +114,26 @@ pub async fn build_crate( } } - if let Some(features) = features { + if params.no_default_features { + command.arg("--no-default-features"); + } + + if let Some(features) = params.features { command.args(["--features", &features.join(",")]); } command.arg("--message-format=json-diagnostic-rendered-ansi"); + if let Some(rustflags) = params.rustflags.as_ref() { + command.env("RUSTFLAGS", rustflags); + } + + if let Some(target_dir) = params.target_dir.as_ref() { + command.env("CARGO_TARGET_DIR", target_dir); + } + let mut spawned = command - .current_dir(&src) + .current_dir(¶ms.src) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .stdin(Stdio::null()) @@ -122,7 +159,7 @@ pub async fn build_crate( for message in cargo_metadata::Message::parse_stream(reader) { match message.unwrap() { cargo_metadata::Message::CompilerArtifact(artifact) => { - let is_output = if example.is_some() { + let is_output = if params.example.is_some() { artifact.target.kind.contains(&"example".to_string()) } else { artifact.target.kind.contains(&"bin".to_string()) @@ -133,7 +170,11 @@ pub async fn build_crate( let path_buf: PathBuf = path.clone().into(); let path = path.into_string(); let data = std::fs::read(path).unwrap(); - return Ok(Arc::new((nanoid!(8), data, path_buf))); + return Ok(BuildOutput { + unique_id: nanoid!(8), + bin_data: data, + bin_path: path_buf, + }); } } cargo_metadata::Message::CompilerMessage(msg) => { @@ -155,7 +196,6 @@ pub async fn build_crate( }) }) .await - .cloned() } #[derive(Clone, Debug)] diff --git a/hydro_deploy/core/src/hydroflow_crate/flamegraph.rs b/hydro_deploy/core/src/hydroflow_crate/flamegraph.rs new file mode 100644 index 000000000000..3eed5077bc93 --- /dev/null +++ b/hydro_deploy/core/src/hydroflow_crate/flamegraph.rs @@ -0,0 +1,89 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; + +use anyhow::{Error, Result}; +use futures::stream::FuturesUnordered; +use tokio_stream::StreamExt; + +use super::tracing_options::TracingOptions; + +pub async fn handle_fold_data( + tracing: &TracingOptions, + fold_data: impl Into>, +) -> Result<()> { + // Wrap in Arc to allow sharing data across multiple outputs. + let fold_data = &fold_data.into(); + let output_tasks = + FuturesUnordered::> + Send + Sync>>>::new(); + + // fold_outfile + if let Some(fold_outfile) = tracing.fold_outfile.clone() { + let fold_data = Arc::clone(fold_data); + output_tasks.push(Box::pin(async move { + let mut reader = &*fold_data; + let mut writer = tokio::fs::File::create(fold_outfile).await?; + tokio::io::copy_buf(&mut reader, &mut writer).await?; + Ok(()) + })); + }; + + // flamegraph_outfile + if let Some(flamegraph_outfile) = tracing.flamegraph_outfile.clone() { + let mut options = tracing + .flamegraph_options + .map(|f| (f)()) + .unwrap_or_default(); + output_tasks.push(Box::pin(async move { + let writer = tokio::fs::File::create(flamegraph_outfile) + .await? + .into_std() + .await; + let fold_data = Arc::clone(fold_data); + tokio::task::spawn_blocking(move || { + inferno::flamegraph::from_lines( + &mut options, + fold_data + .split(|&b| b == b'\n') + .map(std::str::from_utf8) + .map(Result::unwrap), + writer, + ) + }) + .await??; + Ok(()) + })); + }; + + let errors = output_tasks + .filter_map(Result::err) + .collect::>() + .await; + if !errors.is_empty() { + Err(MultipleErrors { errors })?; + }; + + Ok(()) +} + +#[derive(Debug)] +struct MultipleErrors { + errors: Vec, +} +impl std::fmt::Display for MultipleErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if 1 == self.errors.len() { + self.errors.first().unwrap().fmt(f) + } else { + writeln!(f, "({}) errors occured:", self.errors.len())?; + writeln!(f)?; + for (i, error) in self.errors.iter().enumerate() { + write!(f, "({}/{}):", i + 1, self.errors.len())?; + error.fmt(f)?; + writeln!(f)?; + } + Ok(()) + } + } +} +impl std::error::Error for MultipleErrors {} diff --git a/hydro_deploy/core/src/hydroflow_crate/mod.rs b/hydro_deploy/core/src/hydroflow_crate/mod.rs index ff500694e904..adc0dc724708 100644 --- a/hydro_deploy/core/src/hydroflow_crate/mod.rs +++ b/hydro_deploy/core/src/hydroflow_crate/mod.rs @@ -1,31 +1,41 @@ use std::path::PathBuf; use std::sync::Arc; -use tokio::sync::RwLock; +use nameof::name_of; +use tracing_options::TracingOptions; use super::Host; use crate::ServiceBuilder; -mod build; +pub(crate) mod build; pub mod ports; pub mod service; pub use service::*; -#[derive(PartialEq)] +pub(crate) mod flamegraph; +pub mod tracing_options; + +#[derive(PartialEq, Clone)] pub enum CrateTarget { Default, Bin(String), Example(String), } -/// Specifies a crate that uses `hydroflow_cli_integration` to be +/// Specifies a crate that uses `hydroflow_deploy_integration` to be /// deployed as a service. +#[derive(Clone)] pub struct HydroflowCrate { src: PathBuf, target: CrateTarget, - on: Arc>, + on: Arc, profile: Option, + rustflags: Option, + target_dir: Option, + no_default_features: bool, + features: Option>, + tracing: Option, args: Vec, display_name: Option, } @@ -34,12 +44,17 @@ impl HydroflowCrate { /// Creates a new `HydroflowCrate` that will be deployed on the given host. /// The `src` argument is the path to the crate's directory, and the `on` /// argument is the host that the crate will be deployed on. - pub fn new(src: impl Into, on: Arc>) -> Self { + pub fn new(src: impl Into, on: Arc) -> Self { Self { src: src.into(), target: CrateTarget::Default, on, profile: None, + rustflags: None, + target_dir: None, + no_default_features: false, + features: None, + tracing: None, args: vec![], display_name: None, } @@ -49,7 +64,7 @@ impl HydroflowCrate { /// equivalent to `cargo run --bin `. pub fn bin(mut self, bin: impl Into) -> Self { if self.target != CrateTarget::Default { - panic!("target already set"); + panic!("{} already set", name_of!(target in Self)); } self.target = CrateTarget::Bin(bin.into()); @@ -60,7 +75,7 @@ impl HydroflowCrate { /// equivalent to `cargo run --example `. pub fn example(mut self, example: impl Into) -> Self { if self.target != CrateTarget::Default { - panic!("target already set"); + panic!("{} already set", name_of!(target in Self)); } self.target = CrateTarget::Example(example.into()); @@ -71,13 +86,54 @@ impl HydroflowCrate { /// Equivalent to `cargo run --profile `. pub fn profile(mut self, profile: impl Into) -> Self { if self.profile.is_some() { - panic!("profile already set"); + panic!("{} already set", name_of!(profile in Self)); } self.profile = Some(profile.into()); self } + pub fn rustflags(mut self, rustflags: impl Into) -> Self { + if self.rustflags.is_some() { + panic!("{} already set", name_of!(rustflags in Self)); + } + + self.rustflags = Some(rustflags.into()); + self + } + + pub fn target_dir(mut self, target_dir: impl Into) -> Self { + if self.target_dir.is_some() { + panic!("{} already set", name_of!(target_dir in Self)); + } + + self.target_dir = Some(target_dir.into()); + self + } + + pub fn no_default_features(mut self) -> Self { + self.no_default_features = true; + self + } + + pub fn features(mut self, features: impl IntoIterator>) -> Self { + if self.features.is_some() { + panic!("{} already set", name_of!(features in Self)); + } + + self.features = Some(features.into_iter().map(|s| s.into()).collect()); + self + } + + pub fn tracing(mut self, perf: impl Into) -> Self { + if self.tracing.is_some() { + panic!("{} already set", name_of!(tracing in Self)); + } + + self.tracing = Some(perf.into()); + self + } + /// Sets the arguments to be passed to the binary when it is launched. pub fn args(mut self, args: impl IntoIterator>) -> Self { self.args.extend(args.into_iter().map(|s| s.into())); @@ -87,7 +143,7 @@ impl HydroflowCrate { /// Sets the display name for this service, which will be used in logging. pub fn display_name(mut self, display_name: impl Into) -> Self { if self.display_name.is_some() { - panic!("display_name already set"); + panic!("{} already set", name_of!(display_name in Self)); } self.display_name = Some(display_name.into()); @@ -111,7 +167,11 @@ impl ServiceBuilder for HydroflowCrate { bin, example, self.profile, - None, + self.rustflags, + self.target_dir, + self.no_default_features, + self.tracing, + self.features, Some(self.args), self.display_name, vec![], @@ -128,22 +188,20 @@ mod tests { async fn test_crate_panic() { let mut deployment = deployment::Deployment::new(); - let localhost = deployment.Localhost(); - let service = deployment.add_service( - HydroflowCrate::new("../hydro_cli_examples", localhost.clone()) + HydroflowCrate::new("../hydro_cli_examples", deployment.Localhost()) .example("panic_program") .profile("dev"), ); deployment.deploy().await.unwrap(); - let stdout = service.try_read().unwrap().stdout().await; + let mut stdout = service.try_read().unwrap().stdout(); deployment.start().await.unwrap(); assert_eq!(stdout.recv().await.unwrap(), "hello!"); - assert!(stdout.recv().await.is_err()); + assert!(stdout.recv().await.is_none()); } } diff --git a/hydro_deploy/core/src/hydroflow_crate/ports.rs b/hydro_deploy/core/src/hydroflow_crate/ports.rs index b72a8130ae06..2d346ba61b4c 100644 --- a/hydro_deploy/core/src/hydroflow_crate/ports.rs +++ b/hydro_deploy/core/src/hydroflow_crate/ports.rs @@ -7,7 +7,7 @@ use anyhow::Result; use async_recursion::async_recursion; use async_trait::async_trait; use dyn_clone::DynClone; -use hydroflow_cli_integration::ServerPort; +use hydroflow_deploy_integration::ServerPort; use tokio::sync::RwLock; use super::HydroflowCrateService; @@ -15,17 +15,17 @@ use crate::{ClientStrategy, Host, HostStrategyGetter, LaunchedHost, ServerStrate pub trait HydroflowSource: Send + Sync { fn source_path(&self) -> SourcePath; - fn record_server_config(&mut self, config: ServerConfig); + fn record_server_config(&self, config: ServerConfig); - fn host(&self) -> Arc>; + fn host(&self) -> Arc; fn server(&self) -> Arc; - fn record_server_strategy(&mut self, config: ServerStrategy); + fn record_server_strategy(&self, config: ServerStrategy); fn wrap_reverse_server_config(&self, config: ServerConfig) -> ServerConfig { config } - fn send_to(&mut self, sink: &mut dyn HydroflowSink) { + fn send_to(&self, sink: &dyn HydroflowSink) { let forward_res = sink.instantiate(&self.source_path()); if let Ok(instantiated) = forward_res { self.record_server_config(instantiated()); @@ -36,21 +36,20 @@ pub trait HydroflowSource: Send + Sync { self.wrap_reverse_server_config(p) }) .unwrap(); - self.record_server_strategy(instantiated(sink.as_any_mut())); + self.record_server_strategy(instantiated(sink.as_any())); } } } -#[async_trait] pub trait HydroflowServer: DynClone + Send + Sync { fn get_port(&self) -> ServerPort; - async fn launched_host(&self) -> Arc; + fn launched_host(&self) -> Arc; } -pub type ReverseSinkInstantiator = Box ServerStrategy>; +pub type ReverseSinkInstantiator = Box ServerStrategy>; pub trait HydroflowSink: Send + Sync { - fn as_any_mut(&mut self) -> &mut dyn Any; + fn as_any(&self) -> &dyn Any; /// Instantiate the sink as the source host connecting to the sink host. /// Returns a thunk that can be called to perform mutations that instantiate the sink. @@ -60,49 +59,40 @@ pub trait HydroflowSink: Send + Sync { /// Returns a thunk that can be called to perform mutations that instantiate the sink, taking a mutable reference to this sink. fn instantiate_reverse( &self, - server_host: &Arc>, + server_host: &Arc, server_sink: Arc, wrap_client_port: &dyn Fn(ServerConfig) -> ServerConfig, ) -> Result; } pub struct TaggedSource { - pub source: Arc>, + pub source: Arc, pub tag: u32, } impl HydroflowSource for TaggedSource { fn source_path(&self) -> SourcePath { - SourcePath::Tagged( - Box::new(self.source.try_read().unwrap().source_path()), - self.tag, - ) + SourcePath::Tagged(Box::new(self.source.source_path()), self.tag) } - fn record_server_config(&mut self, config: ServerConfig) { - self.source - .try_write() - .unwrap() - .record_server_config(config); + fn record_server_config(&self, config: ServerConfig) { + self.source.record_server_config(config); } - fn host(&self) -> Arc> { - self.source.try_read().unwrap().host() + fn host(&self) -> Arc { + self.source.host() } fn server(&self) -> Arc { - self.source.try_read().unwrap().server() + self.source.server() } fn wrap_reverse_server_config(&self, config: ServerConfig) -> ServerConfig { ServerConfig::Tagged(Box::new(config), self.tag) } - fn record_server_strategy(&mut self, config: ServerStrategy) { - self.source - .try_write() - .unwrap() - .record_server_strategy(config); + fn record_server_strategy(&self, config: ServerStrategy) { + self.source.record_server_strategy(config); } } @@ -113,7 +103,7 @@ impl HydroflowSource for NullSourceSink { SourcePath::Null } - fn host(&self) -> Arc> { + fn host(&self) -> Arc { panic!("null source has no host") } @@ -121,12 +111,12 @@ impl HydroflowSource for NullSourceSink { panic!("null source has no server") } - fn record_server_config(&mut self, _config: ServerConfig) {} - fn record_server_strategy(&mut self, _config: ServerStrategy) {} + fn record_server_config(&self, _config: ServerConfig) {} + fn record_server_strategy(&self, _config: ServerStrategy) {} } impl HydroflowSink for NullSourceSink { - fn as_any_mut(&mut self) -> &mut dyn Any { + fn as_any(&self) -> &dyn Any { self } @@ -136,7 +126,7 @@ impl HydroflowSink for NullSourceSink { fn instantiate_reverse( &self, - _server_host: &Arc>, + _server_host: &Arc, _server_sink: Arc, _wrap_client_port: &dyn Fn(ServerConfig) -> ServerConfig, ) -> Result { @@ -145,18 +135,18 @@ impl HydroflowSink for NullSourceSink { } pub struct DemuxSink { - pub demux: HashMap>>, + pub demux: HashMap>, } impl HydroflowSink for DemuxSink { - fn as_any_mut(&mut self) -> &mut dyn Any { + fn as_any(&self) -> &dyn Any { self } fn instantiate(&self, client_host: &SourcePath) -> Result ServerConfig>> { let mut thunk_map = HashMap::new(); for (key, target) in &self.demux { - thunk_map.insert(*key, target.try_read().unwrap().instantiate(client_host)?); + thunk_map.insert(*key, target.instantiate(client_host)?); } Ok(Box::new(move || { @@ -171,15 +161,15 @@ impl HydroflowSink for DemuxSink { fn instantiate_reverse( &self, - server_host: &Arc>, + server_host: &Arc, server_sink: Arc, wrap_client_port: &dyn Fn(ServerConfig) -> ServerConfig, - ) -> Result ServerStrategy>> { + ) -> Result { let mut thunk_map = HashMap::new(); for (key, target) in &self.demux { thunk_map.insert( *key, - target.try_write().unwrap().instantiate_reverse( + target.instantiate_reverse( server_host, server_sink.clone(), // the parent wrapper selects the demux port for the parent defn, so do that first @@ -189,22 +179,10 @@ impl HydroflowSink for DemuxSink { } Ok(Box::new(move |me| { - let me = me.downcast_mut::().unwrap(); + let me = me.downcast_ref::().unwrap(); let instantiated_map = thunk_map .into_iter() - .map(|(key, thunk)| { - ( - key, - thunk( - me.demux - .get_mut(&key) - .unwrap() - .try_write() - .unwrap() - .as_any_mut(), - ), - ) - }) + .map(|(key, thunk)| (key, thunk(me.demux.get(&key).unwrap().as_any()))) .collect(); ServerStrategy::Demux(instantiated_map) @@ -215,7 +193,7 @@ impl HydroflowSink for DemuxSink { #[derive(Clone)] pub struct HydroflowPortConfig { pub service: Weak>, - pub service_host: Arc>, + pub service_host: Arc, pub service_server_defns: Arc>>, pub port: String, pub merge: bool, @@ -246,7 +224,7 @@ impl HydroflowSource for HydroflowPortConfig { ) } - fn host(&self) -> Arc> { + fn host(&self) -> Arc { self.service_host.clone() } @@ -263,7 +241,7 @@ impl HydroflowSource for HydroflowPortConfig { }) } - fn record_server_config(&mut self, config: ServerConfig) { + fn record_server_config(&self, config: ServerConfig) { let from = self.service.upgrade().unwrap(); let mut from_write = from.try_write().unwrap(); @@ -272,7 +250,7 @@ impl HydroflowSource for HydroflowPortConfig { from_write.port_to_server.insert(self.port.clone(), config); } - fn record_server_strategy(&mut self, config: ServerStrategy) { + fn record_server_strategy(&self, config: ServerStrategy) { let from = self.service.upgrade().unwrap(); let mut from_write = from.try_write().unwrap(); @@ -289,14 +267,14 @@ impl HydroflowServer for HydroflowPortConfig { server_defns.get(&self.port).unwrap().clone() } - async fn launched_host(&self) -> Arc { - self.service_host.read().await.launched().unwrap() + fn launched_host(&self) -> Arc { + self.service_host.launched().unwrap() } } pub enum SourcePath { Null, - Direct(Arc>), + Direct(Arc), Tagged(Box, u32), } @@ -308,7 +286,6 @@ impl SourcePath { ) -> Result<(HostStrategyGetter, ServerConfig)> { match self { SourcePath::Direct(client_host) => { - let client_host = client_host.try_read().unwrap(); let (conn_type, bind_type) = server_host.strategy_as_server(client_host.deref())?; let base_config = ServerConfig::from_strategy(&conn_type, Arc::new(server.clone())); Ok((bind_type, base_config)) @@ -334,7 +311,7 @@ impl SourcePath { } impl HydroflowSink for HydroflowPortConfig { - fn as_any_mut(&mut self) -> &mut dyn Any { + fn as_any(&self) -> &dyn Any { self } @@ -342,8 +319,7 @@ impl HydroflowSink for HydroflowPortConfig { let server = self.service.upgrade().unwrap(); let server_read = server.try_read().unwrap(); - let server_host_clone = server_read.on.clone(); - let server_host = server_host_clone.try_read().unwrap(); + let server_host = server_read.on.clone(); let (bind_type, base_config) = client_path.plan(self, server_host.deref())?; @@ -352,7 +328,7 @@ impl HydroflowSink for HydroflowPortConfig { let port = self.port.clone(); Ok(Box::new(move || { let mut server_write = server.try_write().unwrap(); - let bind_type = bind_type(server_write.on.try_write().unwrap().as_any_mut()); + let bind_type = (bind_type)(server_write.on.as_any()); if merge { let merge_config = server_write @@ -377,18 +353,16 @@ impl HydroflowSink for HydroflowPortConfig { fn instantiate_reverse( &self, - server_host: &Arc>, + server_host: &Arc, server_sink: Arc, wrap_client_port: &dyn Fn(ServerConfig) -> ServerConfig, - ) -> Result ServerStrategy>> { + ) -> Result { let client = self.service.upgrade().unwrap(); let client_read = client.try_read().unwrap(); - let server_host_clone = server_host.clone(); - let server_host = server_host_clone.try_read().unwrap(); + let server_host = server_host.clone(); - let (conn_type, bind_type) = - server_host.strategy_as_server(client_read.on.try_read().unwrap().deref())?; + let (conn_type, bind_type) = server_host.strategy_as_server(client_read.on.deref())?; let client_port = wrap_client_port(ServerConfig::from_strategy(&conn_type, server_sink)); let client = client.clone(); @@ -415,8 +389,7 @@ impl HydroflowSink for HydroflowPortConfig { .insert(port.clone(), client_port); }; - let mut server_host = client_write.on.try_write().unwrap(); - bind_type(server_host.as_any_mut()) + (bind_type)(client_write.on.as_any()) })) } } @@ -489,7 +462,7 @@ impl ServerConfig { ServerConfig::Forwarded(server) => { let selected = select(server.get_port()); - forward_connection(&selected, server.launched_host().await.as_ref()).await + forward_connection(&selected, server.launched_host().as_ref()).await } ServerConfig::Demux(demux) => { diff --git a/hydro_deploy/core/src/hydroflow_crate/service.rs b/hydro_deploy/core/src/hydroflow_crate/service.rs index 9e3bdfa5936b..08c73d5e03ea 100644 --- a/hydro_deploy/core/src/hydroflow_crate/service.rs +++ b/hydro_deploy/core/src/hydroflow_crate/service.rs @@ -3,45 +3,38 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; -use anyhow::{bail, Result}; -use async_channel::Receiver; +use anyhow::{bail, Context, Result}; use async_trait::async_trait; -use futures_core::Future; -use hydroflow_cli_integration::{InitConfig, ServerPort}; +use futures::Future; +use hydroflow_deploy_integration::{InitConfig, ServerPort}; use serde::Serialize; -use tokio::sync::RwLock; +use tokio::sync::{mpsc, RwLock}; -use super::build::{build_crate, BuildError, BuiltCrate}; +use super::build::{build_crate_memoized, BuildError, BuildOutput, BuildParams}; use super::ports::{self, HydroflowPortConfig, HydroflowSink, SourcePath}; +use super::tracing_options::TracingOptions; use crate::progress::ProgressTracker; use crate::{ - Host, HostTargetType, LaunchedBinary, LaunchedHost, ResourceBatch, ResourceResult, - ServerStrategy, Service, + Host, LaunchedBinary, LaunchedHost, ResourceBatch, ResourceResult, ServerStrategy, Service, }; pub struct HydroflowCrateService { id: usize, - src: PathBuf, - pub(super) on: Arc>, - bin: Option, - example: Option, - profile: Option, - features: Option>, + pub(super) on: Arc, + build_params: BuildParams, + tracing: Option, args: Option>, display_id: Option, external_ports: Vec, meta: Option, - target_type: HostTargetType, - /// Configuration for the ports this service will connect to as a client. pub(super) port_to_server: HashMap, /// Configuration for the ports that this service will listen on a port for. pub(super) port_to_bind: HashMap, - built_binary: Arc>>, launched_host: Option>, /// A map of port names to config for how other services can connect to this one. @@ -49,7 +42,7 @@ pub struct HydroflowCrateService { /// in `server_ports`. pub(super) server_defns: Arc>>, - launched_binary: Option>>, + launched_binary: Option>, started: bool, } @@ -58,33 +51,44 @@ impl HydroflowCrateService { pub fn new( id: usize, src: PathBuf, - on: Arc>, + on: Arc, bin: Option, example: Option, profile: Option, + rustflags: Option, + target_dir: Option, + no_default_features: bool, + tracing: Option, features: Option>, args: Option>, display_id: Option, external_ports: Vec, ) -> Self { - let target_type = on.try_read().unwrap().target_type(); + let target_type = on.target_type(); - Self { - id, + let build_params = BuildParams::new( src, bin, - on, example, profile, + rustflags, + target_dir, + no_default_features, + target_type, features, + ); + + Self { + id, + on, + build_params, + tracing, args, display_id, - target_type, external_ports, meta: None, port_to_server: HashMap::new(), port_to_bind: HashMap::new(), - built_binary: Arc::new(async_once_cell::OnceCell::new()), launched_host: None, server_defns: Arc::new(RwLock::new(HashMap::new())), launched_binary: None, @@ -118,7 +122,7 @@ impl HydroflowCrateService { &mut self, self_arc: &Arc>, my_port: String, - sink: &mut dyn HydroflowSink, + sink: &dyn HydroflowSink, ) -> Result<()> { let forward_res = sink.instantiate(&SourcePath::Direct(self.on.clone())); if let Ok(instantiated) = forward_res { @@ -142,80 +146,40 @@ impl HydroflowCrateService { assert!(!self.port_to_bind.contains_key(&my_port)); self.port_to_bind - .insert(my_port, instantiated(sink.as_any_mut())); + .insert(my_port, instantiated(sink.as_any())); Ok(()) } } - pub async fn stdout(&self) -> Receiver { - self.launched_binary - .as_ref() - .unwrap() - .read() - .await - .stdout() - .await + pub fn stdout(&self) -> mpsc::UnboundedReceiver { + self.launched_binary.as_ref().unwrap().stdout() } - pub async fn stderr(&self) -> Receiver { - self.launched_binary - .as_ref() - .unwrap() - .read() - .await - .stderr() - .await + pub fn stderr(&self) -> mpsc::UnboundedReceiver { + self.launched_binary.as_ref().unwrap().stderr() } - pub async fn exit_code(&self) -> Option { - self.launched_binary - .as_ref() - .unwrap() - .read() - .await - .exit_code() - .await + pub fn exit_code(&self) -> Option { + self.launched_binary.as_ref().unwrap().exit_code() } - fn build(&self) -> impl Future> { - let src_cloned = self.src.clone(); - let bin_cloned = self.bin.clone(); - let example_cloned = self.example.clone(); - let features_cloned = self.features.clone(); - let profile_cloned = self.profile.clone(); - let target_type = self.target_type; - let built_binary_cloned = self.built_binary.clone(); - - async move { - built_binary_cloned - .get_or_init(build_crate( - src_cloned, - bin_cloned, - example_cloned, - profile_cloned, - target_type, - features_cloned, - )) - .await - .clone() - } + fn build(&self) -> impl Future> { + // Memoized, so no caching in `self` is needed. + build_crate_memoized(self.build_params.clone()) } } #[async_trait] impl Service for HydroflowCrateService { - fn collect_resources(&mut self, _resource_batch: &mut ResourceBatch) { + fn collect_resources(&self, _resource_batch: &mut ResourceBatch) { if self.launched_host.is_some() { return; } tokio::task::spawn(self.build()); - let mut host = self - .on - .try_write() - .expect("No one should be reading/writing the host while resources are collected"); + let host = &self.on; host.request_custom_binary(); for (_, bind_type) in self.port_to_bind.iter() { @@ -233,18 +197,17 @@ impl Service for HydroflowCrateService { } ProgressTracker::with_group( - &self - .display_id + self.display_id .clone() .unwrap_or_else(|| format!("service/{}", self.id)), None, || async { - let built = self.build().await?; + let built = ProgressTracker::leaf("build", self.build()).await?; - let mut host_write = self.on.write().await; - let launched = host_write.provision(resource_result).await; + let host = &self.on; + let launched = host.provision(resource_result); - launched.copy_binary(built.clone()).await?; + launched.copy_binary(built).await?; self.launched_host = Some(launched); Ok(()) @@ -259,8 +222,7 @@ impl Service for HydroflowCrateService { } ProgressTracker::with_group( - &self - .display_id + self.display_id .clone() .unwrap_or_else(|| format!("service/{}", self.id)), None, @@ -275,8 +237,9 @@ impl Service for HydroflowCrateService { self.display_id .clone() .unwrap_or_else(|| format!("service/{}", self.id)), - built.clone(), + built, &args, + self.tracing.clone(), ) .await?; @@ -289,21 +252,17 @@ impl Service for HydroflowCrateService { serde_json::to_string::(&(bind_config, self.meta.clone())).unwrap(); // request stdout before sending config so we don't miss the "ready" response - let stdout_receiver = binary.write().await.cli_stdout().await; - - binary - .write() - .await - .stdin() - .await - .send(format!("{formatted_bind_config}\n")) - .await?; + let stdout_receiver = binary.deploy_stdout(); + + binary.stdin().send(format!("{formatted_bind_config}\n"))?; let ready_line = ProgressTracker::leaf( - "waiting for ready".to_string(), + "waiting for ready", tokio::time::timeout(Duration::from_secs(60), stdout_receiver), ) - .await??; + .await + .context("Timed out waiting for ready")? + .context("Program unexpectedly quit")?; if ready_line.starts_with("ready: ") { *self.server_defns.try_write().unwrap() = serde_json::from_str(ready_line.trim_start_matches("ready: ")).unwrap(); @@ -331,28 +290,20 @@ impl Service for HydroflowCrateService { let formatted_defns = serde_json::to_string(&sink_ports).unwrap(); - let stdout_receiver = self - .launched_binary - .as_mut() - .unwrap() - .write() - .await - .cli_stdout() - .await; + let stdout_receiver = self.launched_binary.as_ref().unwrap().deploy_stdout(); self.launched_binary - .as_mut() + .as_ref() .unwrap() - .write() - .await .stdin() - .await .send(format!("start: {formatted_defns}\n")) - .await .unwrap(); let start_ack_line = ProgressTracker::leaf( - "waiting for ack start".to_string(), + self.display_id + .clone() + .unwrap_or_else(|| format!("service/{}", self.id)) + + " / waiting for ack start", tokio::time::timeout(Duration::from_secs(60), stdout_receiver), ) .await??; @@ -365,24 +316,30 @@ impl Service for HydroflowCrateService { } async fn stop(&mut self) -> Result<()> { - self.launched_binary - .as_mut() - .unwrap() - .write() - .await - .stdin() - .await - .send("stop\n".to_string()) - .await?; + ProgressTracker::with_group( + self.display_id + .clone() + .unwrap_or_else(|| format!("service/{}", self.id)), + None, + || async { + let launched_binary = self.launched_binary.as_mut().unwrap(); + launched_binary.stdin().send("stop\n".to_string())?; - self.launched_binary - .as_mut() - .unwrap() - .write() - .await - .wait() - .await; + let timeout_result = ProgressTracker::leaf( + "waiting for exit", + tokio::time::timeout(Duration::from_secs(60), launched_binary.wait()), + ) + .await; + match timeout_result { + Err(_timeout) => {} // `wait()` timed out, but stop will force quit. + Ok(Err(unexpected_error)) => return Err(unexpected_error), // `wait()` errored. + Ok(Ok(_exit_status)) => {} + } + launched_binary.stop().await?; - Ok(()) + Ok(()) + }, + ) + .await } } diff --git a/hydro_deploy/core/src/hydroflow_crate/tracing_options.rs b/hydro_deploy/core/src/hydroflow_crate/tracing_options.rs new file mode 100644 index 000000000000..c7f2957a6200 --- /dev/null +++ b/hydro_deploy/core/src/hydroflow_crate/tracing_options.rs @@ -0,0 +1,29 @@ +use std::path::PathBuf; + +use inferno::collapse::dtrace::Options as DtraceOptions; +use inferno::collapse::perf::Options as PerfOptions; + +type FlamegraphOptions = inferno::flamegraph::Options<'static>; + +#[derive(Clone, buildstructor::Builder)] +#[non_exhaustive] // Prevent direct construction. +pub struct TracingOptions { + /// Samples per second. + pub frequency: u32, + + /// Output filename for `dtrace`. Example: `my_worker.stacks`. + pub dtrace_outfile: Option, + + /// Output filename for the raw data emitted by `perf record`. Example: `my_worker.perf.data`. + pub perf_raw_outfile: Option, + // /// Output filename for `perf script -i <`[`Self::perf_raw_outfile`]`>`. Example: `my_worker.perf`. + // pub perf_script_outfile: Option, + /// If set, what the write the folded output to. + pub fold_outfile: Option, + pub fold_dtrace_options: Option, + pub fold_perf_options: Option, + /// If set, what to write the output flamegraph SVG file to. + pub flamegraph_outfile: Option, + // This type is super annoying and isn't `clone` and has a lifetime... so wrap in fn pointer for now. + pub flamegraph_options: Option FlamegraphOptions>, +} diff --git a/hydro_deploy/core/src/lib.rs b/hydro_deploy/core/src/lib.rs index 31f8f3f00ce4..72b5b01af6ea 100644 --- a/hydro_deploy/core/src/lib.rs +++ b/hydro_deploy/core/src/lib.rs @@ -1,13 +1,13 @@ +#![allow(clippy::let_and_return)] + use std::collections::HashMap; use std::net::SocketAddr; -use std::path::PathBuf; use std::sync::Arc; use anyhow::Result; -use async_channel::{Receiver, Sender}; use async_trait::async_trait; -use hydroflow_cli_integration::ServerBindConfig; -use tokio::sync::RwLock; +use hydroflow_crate::tracing_options::TracingOptions; +use hydroflow_deploy_integration::ServerBindConfig; pub mod deployment; pub use deployment::Deployment; @@ -20,7 +20,7 @@ pub use localhost::LocalhostHost; pub mod ssh; pub mod gcp; -pub use gcp::GCPComputeEngineHost; +pub use gcp::GcpComputeEngineHost; pub mod azure; pub use azure::AzureHost; @@ -30,6 +30,9 @@ pub use hydroflow_crate::HydroflowCrate; pub mod custom_service; pub use custom_service::CustomService; +use tokio::sync::{mpsc, oneshot}; + +use crate::hydroflow_crate::build::BuildOutput; pub mod terraform; @@ -71,20 +74,23 @@ pub struct ResourceResult { #[async_trait] pub trait LaunchedBinary: Send + Sync { - async fn stdin(&self) -> Sender; + fn stdin(&self) -> mpsc::UnboundedSender; - /// Provides a oneshot channel for the CLI to handshake with the binary, - /// with the guarantee that as long as the CLI is holding on + /// Provides a oneshot channel to handshake with the binary, + /// with the guarantee that as long as deploy is holding on /// to a handle, none of the messages will also be broadcast /// to the user-facing [`LaunchedBinary::stdout`] channel. - async fn cli_stdout(&self) -> tokio::sync::oneshot::Receiver; + fn deploy_stdout(&self) -> oneshot::Receiver; - async fn stdout(&self) -> Receiver; - async fn stderr(&self) -> Receiver; + fn stdout(&self) -> mpsc::UnboundedReceiver; + fn stderr(&self) -> mpsc::UnboundedReceiver; - async fn exit_code(&self) -> Option; + fn exit_code(&self) -> Option; - async fn wait(&mut self) -> Option; + /// Wait for the process to stop on its own. Returns the exit code. + async fn wait(&mut self) -> Result; + /// If the process is still running, force stop it. Then run post-run tasks. + async fn stop(&mut self) -> Result<()>; } #[async_trait] @@ -93,14 +99,15 @@ pub trait LaunchedHost: Send + Sync { /// to listen to network connections (such as the IP address to bind to). fn server_config(&self, strategy: &ServerStrategy) -> ServerBindConfig; - async fn copy_binary(&self, binary: Arc<(String, Vec, PathBuf)>) -> Result<()>; + async fn copy_binary(&self, binary: &BuildOutput) -> Result<()>; async fn launch_binary( &self, id: String, - binary: Arc<(String, Vec, PathBuf)>, + binary: &BuildOutput, args: &[String], - ) -> Result>>; + perf: Option, + ) -> Result>; async fn forward_port(&self, addr: &SocketAddr) -> Result; } @@ -148,31 +155,28 @@ pub enum HostTargetType { Linux(LinuxArchitecture), } -pub type HostStrategyGetter = Box ServerStrategy>; +pub type HostStrategyGetter = Box ServerStrategy>; -#[async_trait] pub trait Host: Send + Sync { fn target_type(&self) -> HostTargetType; - fn request_port(&mut self, bind_type: &ServerStrategy); + fn request_port(&self, bind_type: &ServerStrategy); /// An identifier for this host, which is unique within a deployment. fn id(&self) -> usize; - /// Returns a reference to the host as a trait object. - fn as_any(&self) -> &dyn std::any::Any; - - /// Returns a reference to the host as a trait object. - fn as_any_mut(&mut self) -> &mut dyn std::any::Any; - /// Configures the host to support copying and running a custom binary. - fn request_custom_binary(&mut self); + fn request_custom_binary(&self); /// Makes requests for physical resources (servers) that this host needs to run. + /// + /// This should be called before `provision` is called. fn collect_resources(&self, resource_batch: &mut ResourceBatch); /// Connects to the acquired resources and prepares the host to run services. - async fn provision(&mut self, resource_result: &Arc) -> Arc; + /// + /// This should be called after `collect_resources` is called. + fn provision(&self, resource_result: &Arc) -> Arc; fn launched(&self) -> Option>; @@ -185,6 +189,9 @@ pub trait Host: Send + Sync { /// Determines whether this host can connect to another host using the given strategy. fn can_connect_to(&self, typ: ClientStrategy) -> bool; + + /// Returns a reference to the host as a trait object. + fn as_any(&self) -> &dyn std::any::Any; } #[async_trait] @@ -195,7 +202,7 @@ pub trait Service: Send + Sync { /// /// This should also perform any "free", non-blocking computations (compilations), /// because the `deploy` method will be called after these resources are allocated. - fn collect_resources(&mut self, resource_batch: &mut ResourceBatch); + fn collect_resources(&self, resource_batch: &mut ResourceBatch); /// Connects to the acquired resources and prepares the service to be launched. async fn deploy(&mut self, resource_result: &Arc) -> Result<()>; diff --git a/hydro_deploy/core/src/localhost/launched_binary.rs b/hydro_deploy/core/src/localhost/launched_binary.rs index 5ae98861a547..90c40f4927ae 100644 --- a/hydro_deploy/core/src/localhost/launched_binary.rs +++ b/hydro_deploy/core/src/localhost/launched_binary.rs @@ -1,30 +1,66 @@ #[cfg(unix)] use std::os::unix::process::ExitStatusExt; -use std::sync::Arc; +use std::process::{ExitStatus, Stdio}; +use std::sync::{Arc, Mutex}; -use async_channel::{Receiver, Sender}; +use anyhow::{anyhow, bail, Result}; +use async_process::Command; use async_trait::async_trait; -use futures::io::BufReader; -use futures::{AsyncBufReadExt, AsyncWriteExt, StreamExt}; -use tokio::sync::RwLock; +use futures::io::BufReader as FuturesBufReader; +use futures::{AsyncBufReadExt as _, AsyncWriteExt as _}; +use inferno::collapse::dtrace::Folder as DtraceFolder; +use inferno::collapse::perf::Folder as PerfFolder; +use inferno::collapse::Collapse; +use nameof::name_of; +use tokio::io::{AsyncBufReadExt as _, BufReader as TokioBufReader}; +use tokio::sync::{mpsc, oneshot}; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tokio_util::io::SyncIoBridge; +use crate::hydroflow_crate::flamegraph::handle_fold_data; +use crate::hydroflow_crate::tracing_options::TracingOptions; +use crate::progress::ProgressTracker; use crate::util::prioritized_broadcast; use crate::LaunchedBinary; pub struct LaunchedLocalhostBinary { - child: RwLock, - stdin_sender: Sender, - stdout_cli_receivers: Arc>>>, - stdout_receivers: Arc>>>, - stderr_receivers: Arc>>>, + child: Mutex, + tracing: Option, + stdin_sender: mpsc::UnboundedSender, + stdout_deploy_receivers: Arc>>>, + stdout_receivers: Arc>>>, + stderr_receivers: Arc>>>, +} + +#[cfg(unix)] +impl Drop for LaunchedLocalhostBinary { + fn drop(&mut self) { + let mut child = self.child.lock().unwrap(); + + if let Ok(Some(_)) = child.try_status() { + return; + } + + let pid = child.id(); + if let Err(e) = nix::sys::signal::kill( + nix::unistd::Pid::from_raw(pid as i32), + nix::sys::signal::SIGTERM, + ) { + ProgressTracker::println(format!("Failed to SIGTERM process {}: {}", pid, e)); + } + } } impl LaunchedLocalhostBinary { - pub fn new(mut child: async_process::Child, id: String) -> Self { - let (stdin_sender, mut stdin_receiver) = async_channel::unbounded::(); + pub fn new( + mut child: async_process::Child, + id: String, + tracing: Option, + ) -> Self { + let (stdin_sender, mut stdin_receiver) = mpsc::unbounded_channel::(); let mut stdin = child.stdin.take().unwrap(); tokio::spawn(async move { - while let Some(line) = stdin_receiver.next().await { + while let Some(line) = stdin_receiver.recv().await { if stdin.write_all(line.as_bytes()).await.is_err() { break; } @@ -34,19 +70,20 @@ impl LaunchedLocalhostBinary { }); let id_clone = id.clone(); - let (stdout_cli_receivers, stdout_receivers) = prioritized_broadcast( - BufReader::new(child.stdout.take().unwrap()).lines(), - move |s| println!("[{id_clone}] {s}"), + let (stdout_deploy_receivers, stdout_receivers) = prioritized_broadcast( + FuturesBufReader::new(child.stdout.take().unwrap()).lines(), + move |s| ProgressTracker::println(format!("[{id_clone}] {s}")), ); let (_, stderr_receivers) = prioritized_broadcast( - BufReader::new(child.stderr.take().unwrap()).lines(), - move |s| eprintln!("[{id}] {s}"), + FuturesBufReader::new(child.stderr.take().unwrap()).lines(), + move |s| ProgressTracker::println(format!("[{id} stderr] {s}")), ); Self { - child: RwLock::new(child), + child: Mutex::new(child), + tracing, stdin_sender, - stdout_cli_receivers, + stdout_deploy_receivers, stdout_receivers, stderr_receivers, } @@ -55,53 +92,145 @@ impl LaunchedLocalhostBinary { #[async_trait] impl LaunchedBinary for LaunchedLocalhostBinary { - async fn stdin(&self) -> Sender { + fn stdin(&self) -> mpsc::UnboundedSender { self.stdin_sender.clone() } - async fn cli_stdout(&self) -> tokio::sync::oneshot::Receiver { - let mut receivers = self.stdout_cli_receivers.write().await; + fn deploy_stdout(&self) -> oneshot::Receiver { + let mut receivers = self.stdout_deploy_receivers.lock().unwrap(); if receivers.is_some() { - panic!("Only one CLI stdout receiver is allowed at a time"); + panic!("Only one deploy stdout receiver is allowed at a time"); } - let (sender, receiver) = tokio::sync::oneshot::channel::(); + let (sender, receiver) = oneshot::channel::(); *receivers = Some(sender); receiver } - async fn stdout(&self) -> Receiver { - let mut receivers = self.stdout_receivers.write().await; - let (sender, receiver) = async_channel::unbounded::(); + fn stdout(&self) -> mpsc::UnboundedReceiver { + let mut receivers = self.stdout_receivers.lock().unwrap(); + let (sender, receiver) = mpsc::unbounded_channel::(); receivers.push(sender); receiver } - async fn stderr(&self) -> Receiver { - let mut receivers = self.stderr_receivers.write().await; - let (sender, receiver) = async_channel::unbounded::(); + fn stderr(&self) -> mpsc::UnboundedReceiver { + let mut receivers = self.stderr_receivers.lock().unwrap(); + let (sender, receiver) = mpsc::unbounded_channel::(); receivers.push(sender); receiver } - async fn exit_code(&self) -> Option { + fn exit_code(&self) -> Option { self.child - .write() - .await + .lock() + .unwrap() .try_status() .ok() .flatten() - .and_then(|c| { - #[cfg(unix)] - return c.code().or(c.signal()); - #[cfg(not(unix))] - return c.code(); - }) + .map(exit_code) } - async fn wait(&mut self) -> Option { - let _ = self.child.get_mut().status().await; - self.exit_code().await + async fn wait(&mut self) -> Result { + Ok(exit_code(self.child.get_mut().unwrap().status().await?)) + } + + async fn stop(&mut self) -> Result<()> { + if let Err(err) = self.child.get_mut().unwrap().kill() { + if !matches!(err.kind(), std::io::ErrorKind::InvalidInput) { + Err(err)?; + } + } + + // Run perf post-processing and download perf output. + if let Some(tracing) = self.tracing.as_ref() { + let fold_data = if cfg!(target_os = "macos") || cfg!(target_family = "windows") { + let dtrace_outfile = tracing.dtrace_outfile.as_ref().ok_or_else(|| { + anyhow!( + "`{}` must be set for `dtrace` on localhost.", + name_of!(dtrace_outfile in TracingOptions) + ) + })?; + let mut fold_er = + DtraceFolder::from(tracing.fold_dtrace_options.clone().unwrap_or_default()); + + let fold_data = + ProgressTracker::leaf("fold dtrace output".to_owned(), async move { + let mut fold_data = Vec::new(); + fold_er.collapse_file(Some(dtrace_outfile), &mut fold_data)?; + Result::<_>::Ok(fold_data) + }) + .await?; + fold_data + } else if cfg!(target_family = "unix") { + let perf_raw_outfile = tracing.perf_raw_outfile.as_ref().ok_or_else(|| { + anyhow!( + "`{}` must be set for `perf` on localhost.", + name_of!(perf_raw_outfile in TracingOptions) + ) + })?; + + // Run perf script. + let mut perf_script = Command::new("perf") + .args(["script", "--symfs=/", "-i"]) + .arg(perf_raw_outfile) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()?; + + let stdout = perf_script.stdout.take().unwrap().compat(); + let mut stderr_lines = + TokioBufReader::new(perf_script.stderr.take().unwrap().compat()).lines(); + + let mut fold_er = + PerfFolder::from(tracing.fold_perf_options.clone().unwrap_or_default()); + + // Pattern on `()` to make sure no `Result`s are ignored. + let ((), fold_data, ()) = tokio::try_join!( + async move { + // Log stderr. + while let Ok(Some(s)) = stderr_lines.next_line().await { + ProgressTracker::println(format!("[perf script stderr] {s}")); + } + Result::<_>::Ok(()) + }, + async move { + // Stream `perf script` stdout and fold. + tokio::task::spawn_blocking(move || { + let mut fold_data = Vec::new(); + fold_er.collapse( + SyncIoBridge::new(tokio::io::BufReader::new(stdout)), + &mut fold_data, + )?; + Ok(fold_data) + }) + .await? + }, + async move { + // Close stdin and wait for command exit. + perf_script.status().await?; + Ok(()) + }, + )?; + fold_data + } else { + bail!( + "Unknown OS for perf/dtrace tracing: {}", + std::env::consts::OS + ); + }; + + handle_fold_data(tracing, fold_data).await?; + }; + + Ok(()) } } + +fn exit_code(c: ExitStatus) -> i32 { + #[cfg(unix)] + return c.code().or(c.signal()).unwrap(); + #[cfg(not(unix))] + return c.code().unwrap(); +} diff --git a/hydro_deploy/core/src/localhost/mod.rs b/hydro_deploy/core/src/localhost/mod.rs index 6bde6ca3b6bb..a15632af030e 100644 --- a/hydro_deploy/core/src/localhost/mod.rs +++ b/hydro_deploy/core/src/localhost/mod.rs @@ -1,18 +1,23 @@ +use std::borrow::Cow; use std::collections::HashMap; use std::net::SocketAddr; -use std::path::PathBuf; +use std::ops::Deref; use std::sync::Arc; -use anyhow::Result; +use anyhow::{anyhow, bail, Context, Result}; use async_process::{Command, Stdio}; use async_trait::async_trait; -use hydroflow_cli_integration::ServerBindConfig; -use tokio::sync::RwLock; +use hydroflow_deploy_integration::ServerBindConfig; +use nameof::name_of; use super::{ ClientStrategy, Host, HostTargetType, LaunchedBinary, LaunchedHost, ResourceBatch, ResourceResult, ServerStrategy, }; +use crate::hydroflow_crate::build::BuildOutput; +use crate::hydroflow_crate::tracing_options::TracingOptions; +use crate::progress::ProgressTracker; +use crate::HostStrategyGetter; pub mod launched_binary; pub use launched_binary::*; @@ -45,9 +50,9 @@ impl Host for LocalhostHost { HostTargetType::Local } - fn request_port(&mut self, _bind_type: &ServerStrategy) {} + fn request_port(&self, _bind_type: &ServerStrategy) {} fn collect_resources(&self, _resource_batch: &mut ResourceBatch) {} - fn request_custom_binary(&mut self) {} + fn request_custom_binary(&self) {} fn id(&self) -> usize { self.id @@ -57,25 +62,18 @@ impl Host for LocalhostHost { self } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } - fn launched(&self) -> Option> { - Some(Arc::new(LaunchedLocalhost {})) + Some(Arc::new(LaunchedLocalhost)) } - async fn provision(&mut self, _resource_result: &Arc) -> Arc { - Arc::new(LaunchedLocalhost {}) + fn provision(&self, _resource_result: &Arc) -> Arc { + Arc::new(LaunchedLocalhost) } fn strategy_as_server<'a>( &'a self, connection_from: &dyn Host, - ) -> Result<( - ClientStrategy<'a>, - Box ServerStrategy>, - )> { + ) -> Result<(ClientStrategy<'a>, HostStrategyGetter)> { if self.client_only { anyhow::bail!("Localhost cannot be a server if it is client only") } @@ -115,7 +113,7 @@ impl Host for LocalhostHost { } } -struct LaunchedLocalhost {} +struct LaunchedLocalhost; #[async_trait] impl LaunchedHost for LaunchedLocalhost { @@ -147,27 +145,127 @@ impl LaunchedHost for LaunchedLocalhost { } } - async fn copy_binary(&self, _binary: Arc<(String, Vec, PathBuf)>) -> Result<()> { + async fn copy_binary(&self, _binary: &BuildOutput) -> Result<()> { Ok(()) } async fn launch_binary( &self, id: String, - binary: Arc<(String, Vec, PathBuf)>, + binary: &BuildOutput, args: &[String], - ) -> Result>> { - let child = Command::new(&binary.2) - .args(args) - .kill_on_drop(true) + tracing: Option, + ) -> Result> { + let mut command = if let Some(tracing) = tracing.as_ref() { + if cfg!(target_os = "macos") || cfg!(target_family = "windows") { + // dtrace + ProgressTracker::println( + format!("[{id} tracing] Profiling binary with `dtrace`.",), + ); + let dtrace_outfile = tracing.dtrace_outfile.as_ref().ok_or_else(|| { + anyhow!( + "`{}` must be set for `dtrace` on localhost.", + name_of!(dtrace_outfile in TracingOptions) + ) + })?; + + let mut command = Command::new("dtrace"); + command + .arg("-o") + .arg(dtrace_outfile) + .arg("-n") + .arg(format!( + "profile-{} /pid == $target/ {{ @[ustack()] = count(); }}", + tracing.frequency + )) + .arg("-c") + .arg({ + // TODO(mingwei): use std `intersperse` when stabilized. + let inner_command = itertools::Itertools::intersperse( + std::iter::once(binary.bin_path.to_str().unwrap()) + .chain(args.iter().map(Deref::deref)) + .map(|s| shell_escape::unix::escape(s.into())), + Cow::Borrowed(" "), + ) + .collect::(); + &*shell_escape::unix::escape(inner_command.into()) + }); + command + } + // else if cfg!(target_family = "windows") { + // // blondie_dtrace + // ProgressTracker::println(&format!( + // "[{id} tracing] Profiling binary with `blondie`. `TracingOptions::frequency` is ignored. Ensure that this is run as admin.", + // )); + // ProgressTracker::println(&format!( + // "[{id} tracing] Install `blondie` via `cargo install blondie --all-features`.", + // )); + // let _ = tracing; + // let mut command = Command::new("blondie"); + // command + // .arg("-o") + // .arg(format!( + // "./blondie-{}.stacks", + // nanoid::nanoid!(5), // TODO! + // )) + // .arg("folded-text") + // .arg(&binary.bin_path) + // .args(args); + // command + // } + else if cfg!(target_family = "unix") { + // perf + ProgressTracker::println(format!("[{} tracing] Tracing binary with `perf`.", id)); + let perf_outfile = tracing.perf_raw_outfile.as_ref().ok_or_else(|| { + anyhow!( + "`{}` must be set for `perf` on localhost.", + name_of!(perf_raw_outfile in TracingOptions) + ) + })?; + + let mut command = Command::new("perf"); + command + .args([ + "record", + "-F", + &tracing.frequency.to_string(), + "-e", + "cycles:u", + "--call-graph", + "dwarf,65528", + "-o", + ]) + .arg(perf_outfile) + .arg(&binary.bin_path) + .args(args); + command + } else { + bail!( + "Unknown OS for perf/dtrace tracing: {}", + std::env::consts::OS + ); + } + } else { + let mut command = Command::new(&binary.bin_path); + command.args(args); + command + }; + + command .stdin(Stdio::piped()) .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn()?; + .stderr(Stdio::piped()); + + #[cfg(not(target_family = "unix"))] + command.kill_on_drop(true); + + ProgressTracker::println(format!("[{}] running command: `{:?}`", id, command)); + + let child = command + .spawn() + .with_context(|| format!("Failed to execute command: {:?}", command))?; - Ok(Arc::new(RwLock::new(LaunchedLocalhostBinary::new( - child, id, - )))) + Ok(Box::new(LaunchedLocalhostBinary::new(child, id, tracing))) } async fn forward_port(&self, addr: &SocketAddr) -> Result { diff --git a/hydro_deploy/core/src/progress.rs b/hydro_deploy/core/src/progress.rs index 8e02c199310f..e4f8bdd6b4f6 100644 --- a/hydro_deploy/core/src/progress.rs +++ b/hydro_deploy/core/src/progress.rs @@ -64,9 +64,6 @@ impl BarTree { } } BarTree::Group(name, pb, children, anticipated_total) => { - let mut path_with_group = cur_path.to_vec(); - path_with_group.push(name.clone()); - let finished_count = children .iter() .filter(|child| child.status() == LeafStatus::Finished) @@ -78,23 +75,46 @@ impl BarTree { let queued_count = anticipated_total.map(|total| total - finished_count - started_count); - match queued_count { - Some(queued_count) => { - pb.set_prefix(format!( - "{} ({}/{}/{})", - path_with_group.join(" / "), - finished_count, - started_count, - queued_count - )); - } - None => pb.set_prefix(format!( - "{} ({}/{})", - path_with_group.join(" / "), - finished_count, - started_count - )), + let progress_str = + if anticipated_total.iter().any(|v| *v == 1) && started_count == 1 { + "".to_string() + } else { + match queued_count { + Some(queued_count) => { + format!( + " ({}/{}/{})", + finished_count, + started_count, + queued_count + finished_count + started_count + ) + } + None => format!(" ({}/{}/?)", finished_count, started_count), + } + }; + + if cur_path.is_empty() { + pb.set_prefix(format!("{}{}", name, progress_str)); + } else { + pb.set_prefix(format!( + "{} / {}{}", + cur_path.join(" / "), + name, + progress_str, + )); } + + let mut path_with_group = cur_path.to_vec(); + let non_finished_count = children + .iter() + .filter(|child| child.status() != LeafStatus::Finished) + .count(); + + if non_finished_count == 1 { + path_with_group.push(format!("{}{}", name, progress_str)); + } else { + path_with_group.push(name.clone()); + } + for child in children { child.refresh_prefix(&path_with_group); } @@ -108,7 +128,7 @@ impl BarTree { } } - fn find_node(&mut self, path: &[usize]) -> &mut BarTree { + fn find_node(&self, path: &[usize]) -> &BarTree { if path.is_empty() { return self; } @@ -120,12 +140,26 @@ impl BarTree { _ => panic!(), } } + + fn find_node_mut(&mut self, path: &[usize]) -> &mut BarTree { + if path.is_empty() { + return self; + } + + match self { + BarTree::Root(children) | BarTree::Group(_, _, children, _) => { + children[path[0]].find_node_mut(&path[1..]) + } + _ => panic!(), + } + } } pub struct ProgressTracker { pub(crate) multi_progress: MultiProgress, tree: BarTree, pub(crate) current_count: usize, + progress_list: Vec<(Arc, bool)>, } impl ProgressTracker { @@ -134,6 +168,7 @@ impl ProgressTracker { multi_progress: MultiProgress::new(), tree: BarTree::Root(vec![]), current_count: 0, + progress_list: vec![], } } @@ -152,6 +187,49 @@ impl ProgressTracker { _ => panic!(), }; + if let Some(surrounding_pb) = &surrounding_pb { + let non_finished_count = surrounding_children + .iter() + .filter(|child| child.status() != LeafStatus::Finished) + .count(); + if non_finished_count == 0 { + self.multi_progress.remove(surrounding_pb.as_ref()); + let surrounding_idx = self + .progress_list + .iter() + .position(|(pb, _)| Arc::ptr_eq(pb, surrounding_pb)) + .unwrap(); + self.progress_list[surrounding_idx].1 = false; + } else if non_finished_count == 1 { + let self_idx = self + .progress_list + .iter() + .position(|(pb, _)| Arc::ptr_eq(pb, surrounding_pb)) + .unwrap(); + let last_visible_before = self.progress_list[..self_idx] + .iter() + .rposition(|(_, visible)| *visible); + if let Some(last_visible_before) = last_visible_before { + self.multi_progress.insert_after( + &self.progress_list[last_visible_before].0, + surrounding_pb.as_ref().clone(), + ); + } else { + self.multi_progress + .insert(0, surrounding_pb.as_ref().clone()); + } + + self.progress_list[self_idx].1 = true; + } + } + + let surrounding = self.tree.find_node_mut(&under_path); + let (surrounding_children, surrounding_pb) = match surrounding { + BarTree::Root(children) => (children, None), + BarTree::Group(_, pb, children, _) => (children, Some(pb)), + _ => panic!(), + }; + self.current_count += 1; let core_bar = indicatif::ProgressBar::new(100); @@ -160,15 +238,45 @@ impl ProgressTracker { .rev() .flat_map(|c| c.get_pb()) .next(); - let created_bar = if let Some(previous_bar) = previous_bar { - self.multi_progress.insert_after(previous_bar, core_bar) + + let index_to_insert = if let Some(previous_bar) = previous_bar { + let index_of_prev = self + .progress_list + .iter() + .position(|pb| Arc::ptr_eq(&pb.0, previous_bar)) + .unwrap(); + index_of_prev + 1 } else if let Some(group_pb) = surrounding_pb { - self.multi_progress.insert_after(group_pb, core_bar) + let index_of_group = self + .progress_list + .iter() + .position(|pb| Arc::ptr_eq(&pb.0, group_pb)) + .unwrap(); + index_of_group + 1 + } else if !self.progress_list.is_empty() { + self.progress_list.len() + } else { + 0 + }; + + let last_visible = if !self.progress_list.is_empty() { + self.progress_list[..index_to_insert] + .iter() + .rposition(|(_, visible)| *visible) + } else { + None + }; + + let created_bar = if let Some(last_visible) = last_visible { + self.multi_progress + .insert_after(&self.progress_list[last_visible].0, core_bar) } else { - self.multi_progress.add(core_bar) + self.multi_progress.insert(0, core_bar) }; let pb = Arc::new(created_bar); + self.progress_list + .insert(index_to_insert, (pb.clone(), true)); if group { surrounding_children.push(BarTree::Group(name, pb.clone(), vec![], anticipated_total)); } else { @@ -197,11 +305,50 @@ impl ProgressTracker { } pub fn end_task(&mut self, path: Vec) { - match self.tree.find_node(&path[0..path.len() - 1]) { + let parent = self.tree.find_node_mut(&path[0..path.len() - 1]); + match parent { BarTree::Root(children) | BarTree::Group(_, _, children, _) => { let removed = children[*path.last().unwrap()].get_pb().unwrap().clone(); children[*path.last().unwrap()] = BarTree::Finished; self.multi_progress.remove(&removed); + self.progress_list + .retain(|(pb, _)| !Arc::ptr_eq(pb, &removed)); + + let non_finished_count = children + .iter() + .filter(|child| child.status() != LeafStatus::Finished) + .count(); + if let BarTree::Group(_, pb, _, _) = parent { + if non_finished_count == 1 { + self.multi_progress.remove(pb.as_ref()); + self.progress_list + .iter_mut() + .find(|(pb2, _)| Arc::ptr_eq(pb2, pb)) + .unwrap() + .1 = false; + } else if non_finished_count == 0 { + let self_idx = self + .progress_list + .iter() + .position(|(pb2, _)| Arc::ptr_eq(pb2, pb)) + .unwrap(); + + let last_visible_before = self.progress_list[..self_idx] + .iter() + .rposition(|(_, visible)| *visible); + + if let Some(last_visible_before) = last_visible_before { + self.multi_progress.insert_after( + &self.progress_list[last_visible_before].0, + pb.as_ref().clone(), + ); + } else { + self.multi_progress.insert(0, pb.as_ref().clone()); + } + + self.progress_list[self_idx].1 = true; + } + } } _ => panic!(), @@ -217,19 +364,19 @@ impl ProgressTracker { } impl ProgressTracker { - pub fn println(msg: &str) { + pub fn println(msg: impl AsRef) { let progress_bar = PROGRESS_TRACKER .get_or_init(|| Mutex::new(ProgressTracker::new())) .lock() .unwrap(); - if progress_bar.multi_progress.println(msg).is_err() { - println!("{}", msg); + if progress_bar.multi_progress.println(msg.as_ref()).is_err() { + println!("{}", msg.as_ref()); } } pub fn with_group<'a, T, F: Future>( - name: &str, + name: impl Into, anticipated_total: Option, f: impl FnOnce() -> F + 'a, ) -> impl Future + 'a { @@ -242,13 +389,7 @@ impl ProgressTracker { .get_or_init(|| Mutex::new(ProgressTracker::new())) .lock() .unwrap(); - progress_bar.start_task( - group.clone(), - name.to_string(), - true, - anticipated_total, - false, - ) + progress_bar.start_task(group.clone(), name.into(), true, anticipated_total, false) }; group.push(group_i); @@ -264,7 +405,10 @@ impl ProgressTracker { }) } - pub fn leaf>(name: String, f: F) -> impl Future { + pub fn leaf>( + name: impl Into, + f: F, + ) -> impl Future { let mut group = CURRENT_GROUP .try_with(|cur| cur.clone()) .unwrap_or_default(); @@ -274,7 +418,7 @@ impl ProgressTracker { .get_or_init(|| Mutex::new(ProgressTracker::new())) .lock() .unwrap(); - progress_bar.start_task(group.clone(), name, false, None, false) + progress_bar.start_task(group.clone(), name.into(), false, None, false) }; group.push(leaf_i); @@ -291,7 +435,40 @@ impl ProgressTracker { } pub fn rich_leaf<'a, T, F: Future>( - name: String, + name: impl Into, + f: impl FnOnce(Box) -> F + 'a, + ) -> impl Future + 'a { + let mut group = CURRENT_GROUP + .try_with(|cur| cur.clone()) + .unwrap_or_default(); + + let (leaf_i, bar) = { + let mut progress_bar = PROGRESS_TRACKER + .get_or_init(|| Mutex::new(ProgressTracker::new())) + .lock() + .unwrap(); + progress_bar.start_task(group.clone(), name.into(), false, None, false) + }; + + group.push(leaf_i); + + async move { + let my_bar = bar.clone(); + let out = f(Box::new(move |msg| { + my_bar.set_message(msg); + })) + .await; + let mut progress_bar = PROGRESS_TRACKER + .get_or_init(|| Mutex::new(ProgressTracker::new())) + .lock() + .unwrap(); + progress_bar.end_task(group); + out + } + } + + pub fn progress_leaf<'a, T, F: Future>( + name: impl Into, f: impl FnOnce(Box, Box) -> F + 'a, ) -> impl Future + 'a { let mut group = CURRENT_GROUP @@ -303,7 +480,7 @@ impl ProgressTracker { .get_or_init(|| Mutex::new(ProgressTracker::new())) .lock() .unwrap(); - progress_bar.start_task(group.clone(), name, false, None, true) + progress_bar.start_task(group.clone(), name.into(), false, None, true) }; group.push(leaf_i); diff --git a/hydro_deploy/core/src/ssh.rs b/hydro_deploy/core/src/ssh.rs index cd75e7100415..dd315f06fe18 100644 --- a/hydro_deploy/core/src/ssh.rs +++ b/hydro_deploy/core/src/ssh.rs @@ -2,69 +2,80 @@ use std::borrow::Cow; use std::collections::HashMap; use std::net::SocketAddr; use std::path::PathBuf; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::Duration; -use anyhow::{Context, Result}; -use async_channel::{Receiver, Sender}; +use anyhow::{Context as _, Result}; use async_ssh2_lite::ssh2::ErrorCode; -use async_ssh2_lite::{AsyncChannel, AsyncSession, Error, SessionConfiguration}; +use async_ssh2_lite::{AsyncChannel, AsyncSession, SessionConfiguration}; use async_trait::async_trait; -use futures::io::BufReader; -use futures::{AsyncBufReadExt, AsyncWriteExt, StreamExt}; -use hydroflow_cli_integration::ServerBindConfig; +use futures::io::BufReader as FuturesBufReader; +use futures::{AsyncBufReadExt, AsyncWriteExt}; +use hydroflow_deploy_integration::ServerBindConfig; +use inferno::collapse::perf::Folder; +use inferno::collapse::Collapse; use nanoid::nanoid; +use tokio::io::BufReader as TokioBufReader; use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::RwLock; +use tokio::runtime::Handle; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::StreamExt; +use tokio_util::io::SyncIoBridge; use super::progress::ProgressTracker; use super::util::async_retry; use super::{LaunchedBinary, LaunchedHost, ResourceResult, ServerStrategy}; +use crate::hydroflow_crate::build::BuildOutput; +use crate::hydroflow_crate::flamegraph::handle_fold_data; +use crate::hydroflow_crate::tracing_options::TracingOptions; use crate::util::prioritized_broadcast; -struct LaunchedSSHBinary { +const PERF_OUTFILE: &str = "__profile.perf.data"; + +struct LaunchedSshBinary { _resource_result: Arc, session: Option>, channel: AsyncChannel, - stdin_sender: Sender, - stdout_receivers: Arc>>>, - stdout_cli_receivers: Arc>>>, - stderr_receivers: Arc>>>, + stdin_sender: mpsc::UnboundedSender, + stdout_receivers: Arc>>>, + stdout_deploy_receivers: Arc>>>, + stderr_receivers: Arc>>>, + tracing: Option, } #[async_trait] -impl LaunchedBinary for LaunchedSSHBinary { - async fn stdin(&self) -> Sender { +impl LaunchedBinary for LaunchedSshBinary { + fn stdin(&self) -> mpsc::UnboundedSender { self.stdin_sender.clone() } - async fn cli_stdout(&self) -> tokio::sync::oneshot::Receiver { - let mut receivers = self.stdout_cli_receivers.write().await; + fn deploy_stdout(&self) -> oneshot::Receiver { + let mut receivers = self.stdout_deploy_receivers.lock().unwrap(); if receivers.is_some() { - panic!("Only one CLI stdout receiver is allowed at a time"); + panic!("Only one deploy stdout receiver is allowed at a time"); } - let (sender, receiver) = tokio::sync::oneshot::channel::(); + let (sender, receiver) = oneshot::channel::(); *receivers = Some(sender); receiver } - async fn stdout(&self) -> Receiver { - let mut receivers = self.stdout_receivers.write().await; - let (sender, receiver) = async_channel::unbounded::(); + fn stdout(&self) -> mpsc::UnboundedReceiver { + let mut receivers = self.stdout_receivers.lock().unwrap(); + let (sender, receiver) = mpsc::unbounded_channel::(); receivers.push(sender); receiver } - async fn stderr(&self) -> Receiver { - let mut receivers = self.stderr_receivers.write().await; - let (sender, receiver) = async_channel::unbounded::(); + fn stderr(&self) -> mpsc::UnboundedReceiver { + let mut receivers = self.stderr_receivers.lock().unwrap(); + let (sender, receiver) = mpsc::unbounded_channel::(); receivers.push(sender); receiver } - async fn exit_code(&self) -> Option { + fn exit_code(&self) -> Option { // until the program exits, the exit status is meaningless if self.channel.eof() { self.channel.exit_status().ok() @@ -73,32 +84,89 @@ impl LaunchedBinary for LaunchedSSHBinary { } } - async fn wait(&mut self) -> Option { + async fn wait(&mut self) -> Result { self.channel.wait_eof().await.unwrap(); - let ret = self.exit_code().await; + let exit_code = self.channel.exit_status()?; self.channel.wait_close().await.unwrap(); - ret + + Ok(exit_code) + } + + async fn stop(&mut self) -> Result<()> { + if !self.channel.eof() { + ProgressTracker::leaf("force stopping", async { + self.channel.write_all(b"\x03").await?; // `^C` + self.channel.send_eof().await?; + self.channel.wait_eof().await?; + // `exit_status()` + self.channel.wait_close().await?; + Result::<_>::Ok(()) + }) + .await?; + } + + // Run perf post-processing and download perf output. + if let Some(tracing) = self.tracing.as_ref() { + let mut script_channel = self.session.as_ref().unwrap().channel_session().await?; + let mut fold_er = Folder::from(tracing.fold_perf_options.clone().unwrap_or_default()); + + let fold_data = ProgressTracker::leaf("perf script & folding", async move { + let mut stderr_lines = FuturesBufReader::new(script_channel.stderr()).lines(); + let stdout = script_channel.stream(0); + + // Pattern on `()` to make sure no `Result`s are ignored. + let ((), fold_data, ()) = tokio::try_join!( + async move { + // Log stderr. + while let Some(Ok(s)) = stderr_lines.next().await { + ProgressTracker::println(format!("[perf stderr] {s}")); + } + Result::<_>::Ok(()) + }, + async move { + // Download perf output and fold. + tokio::task::spawn_blocking(move || { + let mut fold_data = Vec::new(); + fold_er.collapse( + SyncIoBridge::new(TokioBufReader::new(stdout)), + &mut fold_data, + )?; + Ok(fold_data) + }) + .await? + }, + async move { + // Run command (last!). + script_channel + .exec(&format!("perf script --symfs=/ -i {PERF_OUTFILE}")) + .await?; + Ok(()) + }, + )?; + Result::<_>::Ok(fold_data) + }) + .await?; + + handle_fold_data(tracing, fold_data).await?; + }; + + Ok(()) } } -impl Drop for LaunchedSSHBinary { +impl Drop for LaunchedSshBinary { fn drop(&mut self) { - let session = self.session.take().unwrap(); - std::thread::scope(|s| { - s.spawn(|| { - let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); - runtime - .block_on(session.disconnect(None, "", None)) - .unwrap(); + if let Some(session) = self.session.take() { + tokio::task::block_in_place(|| { + Handle::current().block_on(session.disconnect(None, "", None)) }) - .join() .unwrap(); - }); + } } } #[async_trait] -pub trait LaunchedSSHHost: Send + Sync { +pub trait LaunchedSshHost: Send + Sync { fn get_internal_ip(&self) -> String; fn get_external_ip(&self) -> Option; fn get_cloud_provider(&self) -> String; @@ -126,7 +194,7 @@ pub trait LaunchedSSHHost: Send + Sync { ServerStrategy::Demux(demux) => { let mut config_map = HashMap::new(); for (key, underlying) in demux { - config_map.insert(*key, LaunchedSSHHost::server_config(self, underlying)); + config_map.insert(*key, LaunchedSshHost::server_config(self, underlying)); } ServerBindConfig::Demux(config_map) @@ -134,13 +202,13 @@ pub trait LaunchedSSHHost: Send + Sync { ServerStrategy::Merge(merge) => { let mut configs = vec![]; for underlying in merge { - configs.push(LaunchedSSHHost::server_config(self, underlying)); + configs.push(LaunchedSshHost::server_config(self, underlying)); } ServerBindConfig::Merge(configs) } ServerStrategy::Tagged(underlying, id) => ServerBindConfig::Tagged( - Box::new(LaunchedSSHHost::server_config(self, underlying)), + Box::new(LaunchedSshHost::server_config(self, underlying)), *id, ), ServerStrategy::Null => ServerBindConfig::Null, @@ -197,12 +265,12 @@ pub trait LaunchedSSHHost: Send + Sync { } #[async_trait] -impl LaunchedHost for T { +impl LaunchedHost for T { fn server_config(&self, bind_type: &ServerStrategy) -> ServerBindConfig { - LaunchedSSHHost::server_config(self, bind_type) + LaunchedSshHost::server_config(self, bind_type) } - async fn copy_binary(&self, binary: Arc<(String, Vec, PathBuf)>) -> Result<()> { + async fn copy_binary(&self, binary: &BuildOutput) -> Result<()> { let session = self.open_ssh_session().await?; let sftp = async_retry( @@ -213,7 +281,7 @@ impl LaunchedHost for T { .await?; // we may be deploying multiple binaries, so give each a unique name - let unique_name = &binary.0; + let unique_name = &binary.unique_id; let user = self.ssh_user(); let binary_path = PathBuf::from(format!("/home/{user}/hydro-{unique_name}")); @@ -223,8 +291,8 @@ impl LaunchedHost for T { let temp_path = PathBuf::from(format!("/home/{user}/hydro-{random}")); let sftp = &sftp; - ProgressTracker::rich_leaf( - format!("uploading binary to /home/{user}/hydro-{unique_name}"), + ProgressTracker::progress_leaf( + format!("uploading binary to {}", binary_path.display()), |set_progress, _| { let binary = &binary; let binary_path = &binary_path; @@ -232,15 +300,17 @@ impl LaunchedHost for T { let mut created_file = sftp.create(&temp_path).await?; let mut index = 0; - while index < binary.1.len() { + while index < binary.bin_data.len() { let written = created_file .write( - &binary.1 - [index..std::cmp::min(index + 128 * 1024, binary.1.len())], + &binary.bin_data[index + ..std::cmp::min(index + 128 * 1024, binary.bin_data.len())], ) .await?; index += written; - set_progress(((index as f64 / binary.1.len() as f64) * 100.0) as u64); + set_progress( + ((index as f64 / binary.bin_data.len() as f64) * 100.0) as u64, + ); } let mut orig_file_stat = sftp.stat(&temp_path).await?; orig_file_stat.perm = Some(0o755); // allow the copied binary to be executed by anyone @@ -250,7 +320,9 @@ impl LaunchedHost for T { match sftp.rename(&temp_path, binary_path, None).await { Ok(_) => {} - Err(Error::Ssh2(e)) if e.code() == ErrorCode::SFTP(4) => { + Err(async_ssh2_lite::Error::Ssh2(e)) + if e.code() == ErrorCode::SFTP(4) => + { // file already exists sftp.unlink(&temp_path).await?; } @@ -271,18 +343,19 @@ impl LaunchedHost for T { async fn launch_binary( &self, id: String, - binary: Arc<(String, Vec, PathBuf)>, + binary: &BuildOutput, args: &[String], - ) -> Result>> { + tracing: Option, + ) -> Result> { let session = self.open_ssh_session().await?; - let unique_name = &binary.0; + let unique_name = &binary.unique_id; let user = self.ssh_user(); let binary_path = PathBuf::from(format!("/home/{user}/hydro-{unique_name}")); let channel = ProgressTracker::leaf( - format!("launching binary /home/{user}/hydro-{unique_name}"), + format!("launching binary {}", binary_path.display()), async { let mut channel = async_retry( @@ -297,24 +370,29 @@ impl LaunchedHost for T { Duration::from_secs(1), ) .await?; - let binary_path_string = binary_path.to_str().unwrap(); - let args_string = args - .iter() - .map(|s| shell_escape::unix::escape(Cow::from(s))) - .fold("".to_string(), |acc, v| format!("{acc} {v}")); - channel - .exec(&format!("{binary_path_string}{args_string}")) - .await?; + let mut command = binary_path.to_str().unwrap().to_owned(); + for arg in args{ + command.push(' '); + command.push_str(&shell_escape::unix::escape(Cow::Borrowed(arg))) + } + // Launch with perf if specified, also copy local binary to expected place for perf report to work + if let Some(TracingOptions { frequency, .. }) = tracing.clone() { + // Attach perf to the command + command = format!( + "perf record -F {frequency} -e cycles:u --call-graph dwarf,65528 -o {PERF_OUTFILE} {command}", + ); + } + channel.exec(&command).await?; anyhow::Ok(channel) }, ) .await?; - let (stdin_sender, mut stdin_receiver) = async_channel::unbounded::(); + let (stdin_sender, mut stdin_receiver) = mpsc::unbounded_channel::(); let mut stdin = channel.stream(0); // stream 0 is stdout/stdin, we use it for stdin tokio::spawn(async move { - while let Some(line) = stdin_receiver.next().await { + while let Some(line) = stdin_receiver.recv().await { if stdin.write_all(line.as_bytes()).await.is_err() { break; } @@ -324,24 +402,25 @@ impl LaunchedHost for T { }); let id_clone = id.clone(); - let (stdout_cli_receivers, stdout_receivers) = - prioritized_broadcast(BufReader::new(channel.stream(0)).lines(), move |s| { - println!("[{id_clone}] {s}") + let (stdout_deploy_receivers, stdout_receivers) = + prioritized_broadcast(FuturesBufReader::new(channel.stream(0)).lines(), move |s| { + ProgressTracker::println(format!("[{id_clone}] {s}")); }); let (_, stderr_receivers) = - prioritized_broadcast(BufReader::new(channel.stderr()).lines(), move |s| { - eprintln!("[{id}] {s}") + prioritized_broadcast(FuturesBufReader::new(channel.stderr()).lines(), move |s| { + ProgressTracker::println(format!("[{id} stderr] {s}")); }); - Ok(Arc::new(RwLock::new(LaunchedSSHBinary { + Ok(Box::new(LaunchedSshBinary { _resource_result: self.resource_result().clone(), session: Some(session), channel, stdin_sender, - stdout_cli_receivers, + stdout_deploy_receivers, stdout_receivers, stderr_receivers, - }))) + tracing, + })) } async fn forward_port(&self, addr: &SocketAddr) -> Result { diff --git a/hydro_deploy/core/src/terraform.rs b/hydro_deploy/core/src/terraform.rs index 342b4181e435..73be99d697b0 100644 --- a/hydro_deploy/core/src/terraform.rs +++ b/hydro_deploy/core/src/terraform.rs @@ -79,6 +79,7 @@ impl Drop for TerraformPool { #[derive(Serialize, Deserialize)] pub struct TerraformBatch { pub terraform: TerraformConfig, + #[serde(skip_serializing_if = "HashMap::is_empty")] pub provider: HashMap, #[serde(skip_serializing_if = "HashMap::is_empty")] pub data: HashMap>, @@ -118,7 +119,7 @@ impl TerraformBatch { }); } - ProgressTracker::with_group("terraform", None, || async { + ProgressTracker::with_group("terraform", Some(1), || async { let dothydro_folder = std::env::current_dir().unwrap().join(".hydro"); std::fs::create_dir_all(&dothydro_folder).unwrap(); let deployment_folder = tempfile::tempdir_in(dothydro_folder).unwrap(); @@ -189,7 +190,9 @@ async fn display_apply_outputs(stdout: &mut ChildStdout) { ( channel_send, tokio::task::spawn(ProgressTracker::leaf(id, async move { - channel_recv.await.unwrap(); + // `Err(RecvError)` means send side was dropped due to another error. + // Ignore here to prevent spurious panic stack traces. + let _result = channel_recv.await; })), ), ); @@ -243,7 +246,7 @@ impl TerraformApply { let stderr_loop = tokio::task::spawn_blocking(move || { let mut lines = BufReader::new(stderr).lines(); while let Some(Ok(line)) = lines.next() { - ProgressTracker::println(&format!("[terraform] {}", line)); + ProgressTracker::println(format!("[terraform] {}", line)); } }); @@ -254,7 +257,7 @@ impl TerraformApply { self.child = None; if !status.unwrap().success() { - bail!("Terraform deployment failed"); + bail!("Terraform deployment failed, see `[terraform]` logs above."); } let mut output_command = Command::new("terraform"); diff --git a/hydro_deploy/core/src/util.rs b/hydro_deploy/core/src/util.rs index 3adf46dee848..3a3c243bafb4 100644 --- a/hydro_deploy/core/src/util.rs +++ b/hydro_deploy/core/src/util.rs @@ -1,12 +1,10 @@ use std::io; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::Duration; use anyhow::Result; -use async_channel::Sender; -use futures::{Future, StreamExt}; -use futures_core::Stream; -use tokio::sync::RwLock; +use futures::{Future, Stream, StreamExt}; +use tokio::sync::{mpsc, oneshot}; pub async fn async_retry>>( mut thunk: impl FnMut() -> F, @@ -26,60 +24,60 @@ pub async fn async_retry>>( } type PriorityBroadcacst = ( - Arc>>>, - Arc>>>, + Arc>>>, + Arc>>>, ); pub fn prioritized_broadcast> + Send + Unpin + 'static>( mut lines: T, default: impl Fn(String) + Send + 'static, ) -> PriorityBroadcacst { - let priority_receivers = Arc::new(RwLock::new(None::>)); - let receivers = Arc::new(RwLock::new(Vec::>::new())); + let priority_receivers = Arc::new(Mutex::new(None::>)); + let receivers = Arc::new(Mutex::new(Vec::>::new())); let weak_priority_receivers = Arc::downgrade(&priority_receivers); let weak_receivers = Arc::downgrade(&receivers); tokio::spawn(async move { - 'line_loop: while let Some(Result::Ok(line)) = lines.next().await { - if let Some(cli_receivers) = weak_priority_receivers.upgrade() { - let mut cli_receivers = cli_receivers.write().await; + while let Some(Result::Ok(line)) = lines.next().await { + if let Some(deploy_receivers) = weak_priority_receivers.upgrade() { + let mut deploy_receivers = deploy_receivers.lock().unwrap(); - let successful_send = if let Some(r) = cli_receivers.take() { + let successful_send = if let Some(r) = deploy_receivers.take() { r.send(line.clone()).is_ok() } else { false }; + drop(deploy_receivers); if successful_send { - continue 'line_loop; + continue; } } if let Some(receivers) = weak_receivers.upgrade() { - let mut receivers = receivers.write().await; + let mut receivers = receivers.lock().unwrap(); + receivers.retain(|receiver| !receiver.is_closed()); + let mut successful_send = false; - for r in receivers.iter() { - successful_send |= r.send(line.clone()).await.is_ok(); + for receiver in receivers.iter() { + successful_send |= receiver.send(line.clone()).is_ok(); } - - receivers.retain(|r| !r.is_closed()); - if !successful_send { - default(line); + (default)(line); } } else { break; } } - if let Some(cli_receivers) = weak_priority_receivers.upgrade() { - let mut cli_receivers = cli_receivers.write().await; - drop(cli_receivers.take()); + if let Some(deploy_receivers) = weak_priority_receivers.upgrade() { + let mut deploy_receivers = deploy_receivers.lock().unwrap(); + drop(deploy_receivers.take()); } if let Some(receivers) = weak_receivers.upgrade() { - let mut receivers = receivers.write().await; + let mut receivers = receivers.lock().unwrap(); receivers.clear(); } }); @@ -89,21 +87,23 @@ pub fn prioritized_broadcast> + Send + Unpin #[cfg(test)] mod test { - use futures::StreamExt; + use tokio_stream::wrappers::UnboundedReceiverStream; + + use super::*; #[tokio::test] async fn broadcast_listeners_close_when_source_does() { - let (tx, rx) = async_channel::unbounded::<_>(); - let (_, receivers) = super::prioritized_broadcast(rx, |_| {}); + let (tx, rx) = mpsc::unbounded_channel(); + let (_, receivers) = prioritized_broadcast(UnboundedReceiverStream::new(rx), |_| {}); - let (tx2, mut rx2) = async_channel::unbounded::<_>(); + let (tx2, mut rx2) = mpsc::unbounded_channel(); - receivers.try_write().unwrap().push(tx2); + receivers.lock().unwrap().push(tx2); - tx.send(Ok("hello".to_string())).await.unwrap(); - assert_eq!(rx2.next().await, Some("hello".to_string())); + tx.send(Ok("hello".to_string())).unwrap(); + assert_eq!(rx2.recv().await, Some("hello".to_string())); - let wait_again = tokio::spawn(async move { rx2.next().await }); + let wait_again = tokio::spawn(async move { rx2.recv().await }); drop(tx); diff --git a/hydro_deploy/hydro_cli/CHANGELOG.md b/hydro_deploy/hydro_cli/CHANGELOG.md index e4c718ff8ecc..6e2875acbf5d 100644 --- a/hydro_deploy/hydro_cli/CHANGELOG.md +++ b/hydro_deploy/hydro_cli/CHANGELOG.md @@ -5,8 +5,228 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 0.9.0 (2024-08-30) + +### Chore + + - manually set versions for crates renamed in #1413 + - lower min dependency versions where possible, update `Cargo.lock` + Moved from #1418 + + --------- + +### New Features + + - use trybuild to compile subgraph binaries + +### Bug Fixes + + - only instantiate `Localhost` once + +### New Features (BREAKING) + + - Perf works over SSH + See documentation on how to use in + [Notion](https://www.notion.so/hydro-project/perf-Measuring-CPU-usage-6135b6ce56a94af38eeeba0a55deef9c). + +### Refactor (BREAKING) + + - rename integration crates to drop CLI references + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading + * `perf` profile downloading moved from the `drop()` impl to `async fn + stop()` + * download perf data via stdout + * update async-ssh2-lite to 0.5 to cleanup tokio compat issues + + WIP for #1365 + - use `buildstructor` to handle excessive `Deployment` method arguments, fix #1364 + Adds new method `Deployment::AzureHost` + +### Commit Statistics + + + + - 8 commits contributed to the release. + - 8 commits were understood as [conventional](https://www.conventionalcommits.org). + - 7 unique issues were worked on: [#1313](https://github.com/hydro-project/hydroflow/issues/1313), [#1366](https://github.com/hydro-project/hydroflow/issues/1366), [#1370](https://github.com/hydro-project/hydroflow/issues/1370), [#1398](https://github.com/hydro-project/hydroflow/issues/1398), [#1403](https://github.com/hydro-project/hydroflow/issues/1403), [#1413](https://github.com/hydro-project/hydroflow/issues/1413), [#1423](https://github.com/hydro-project/hydroflow/issues/1423) + +### Commit Details + + + +
view details + + * **[#1313](https://github.com/hydro-project/hydroflow/issues/1313)** + - Perf works over SSH ([`749a103`](https://github.com/hydro-project/hydroflow/commit/749a10307f4eff2a46a1056735e84ed94d44b39e)) + * **[#1366](https://github.com/hydro-project/hydroflow/issues/1366)** + - Use `buildstructor` to handle excessive `Deployment` method arguments, fix #1364 ([`8856c85`](https://github.com/hydro-project/hydroflow/commit/8856c8596d5ad9d5f24a46467690bfac1549fae2)) + * **[#1370](https://github.com/hydro-project/hydroflow/issues/1370)** + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading ([`a214786`](https://github.com/hydro-project/hydroflow/commit/a2147864b24110c9ae2c1553e9e8b55bd5065f15)) + * **[#1398](https://github.com/hydro-project/hydroflow/issues/1398)** + - Use trybuild to compile subgraph binaries ([`46a8a2c`](https://github.com/hydro-project/hydroflow/commit/46a8a2cb08732bb21096e824bc4542d208c68fb2)) + * **[#1403](https://github.com/hydro-project/hydroflow/issues/1403)** + - Only instantiate `Localhost` once ([`63b528f`](https://github.com/hydro-project/hydroflow/commit/63b528feeb2e6dac2ed12c02b2e39e0d42133a74)) + * **[#1413](https://github.com/hydro-project/hydroflow/issues/1413)** + - Rename integration crates to drop CLI references ([`0a465e5`](https://github.com/hydro-project/hydroflow/commit/0a465e55dd39c76bc1aefb020460a639d792fe87)) + * **[#1423](https://github.com/hydro-project/hydroflow/issues/1423)** + - Lower min dependency versions where possible, update `Cargo.lock` ([`11af328`](https://github.com/hydro-project/hydroflow/commit/11af32828bab6e4a4264d2635ff71a12bb0bb778)) + * **Uncategorized** + - Manually set versions for crates renamed in #1413 ([`a2ec110`](https://github.com/hydro-project/hydroflow/commit/a2ec110ccadb97e293b19d83a155d98d94224bba)) +
+ +## 0.8.0 (2024-07-23) + + + + + + + + + + +### Chore + + - update pinned rust version to 2024-06-17 + +### Refactor + + - build cache cleanup + * Replace mystery tuple with new `struct BuildOutput` + * Replace `Mutex` and `Arc`-infested `HashMap` with `memo-map` crate, + greatly simplifying build cache typing + * Remove redundant build caching in `HydroflowCrateService`, expose and + use cache parameters as `BuildParams` + * Remove `once_cell` and `async-once-cell` dependencies, use `std`'s + `OnceLock` + * Add `Failed to execute command: {}` context to `perf` error message + * Cleanup some repeated `format!` expressions + +### Style + + - rename `SSH` -> `Ssh` + +### Refactor (BREAKING) + + - make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1346 + - make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1339 + - replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` + Depends on #1339 + + We could make the publicly facing `stdout`, `stderr` APIs return `impl Stream` in the future, maybe + - replace some uses of `tokio::sync::RwLock` with `std::sync::Mutex` #430 (3/3) + +### Style (BREAKING) + + - enable clippy `upper-case-acronyms-aggressive` + * rename `GCP` -> `Gcp`, `NodeID` -> `NodeId` + * update CI `cargo-generate` template testing to use PR's branch instead + of whatever `main` happens to be + +### Commit Statistics + + + + - 9 commits contributed to the release. + - 8 commits were understood as [conventional](https://www.conventionalcommits.org). + - 8 unique issues were worked on: [#1309](https://github.com/hydro-project/hydroflow/issues/1309), [#1334](https://github.com/hydro-project/hydroflow/issues/1334), [#1339](https://github.com/hydro-project/hydroflow/issues/1339), [#1340](https://github.com/hydro-project/hydroflow/issues/1340), [#1345](https://github.com/hydro-project/hydroflow/issues/1345), [#1346](https://github.com/hydro-project/hydroflow/issues/1346), [#1347](https://github.com/hydro-project/hydroflow/issues/1347), [#1356](https://github.com/hydro-project/hydroflow/issues/1356) + +### Commit Details + + + +
view details + + * **[#1309](https://github.com/hydro-project/hydroflow/issues/1309)** + - Update pinned rust version to 2024-06-17 ([`3098f77`](https://github.com/hydro-project/hydroflow/commit/3098f77fd99882aae23c4b31017aa4b761306197)) + * **[#1334](https://github.com/hydro-project/hydroflow/issues/1334)** + - Build cache cleanup ([`0feae74`](https://github.com/hydro-project/hydroflow/commit/0feae7454e4674eea1f3308b3d6d4e9d459cda67)) + * **[#1339](https://github.com/hydro-project/hydroflow/issues/1339)** + - Replace some uses of `tokio::sync::RwLock` with `std::sync::Mutex` #430 (3/3) ([`141eae1`](https://github.com/hydro-project/hydroflow/commit/141eae1c3a1869fa42756250618a21ea2a2c7e34)) + * **[#1340](https://github.com/hydro-project/hydroflow/issues/1340)** + - Rename `SSH` -> `Ssh` ([`947ebc1`](https://github.com/hydro-project/hydroflow/commit/947ebc1cb21a07fbfacae4ac956dbd0015a8a418)) + * **[#1345](https://github.com/hydro-project/hydroflow/issues/1345)** + - Enable clippy `upper-case-acronyms-aggressive` ([`12b8ba5`](https://github.com/hydro-project/hydroflow/commit/12b8ba53f28eb9de1318b41cdf1e23282f6f0eb6)) + * **[#1346](https://github.com/hydro-project/hydroflow/issues/1346)** + - Make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 ([`057a0a5`](https://github.com/hydro-project/hydroflow/commit/057a0a510568cf81932368c8c65e056f91af7202)) + * **[#1347](https://github.com/hydro-project/hydroflow/issues/1347)** + - Make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 ([`c5a8de2`](https://github.com/hydro-project/hydroflow/commit/c5a8de28e7844b3c29d58116d8340967f2e6bcc4)) + * **[#1356](https://github.com/hydro-project/hydroflow/issues/1356)** + - Replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` ([`6039078`](https://github.com/hydro-project/hydroflow/commit/60390782dd7dcec18d193c800af716843a944dba)) + * **Uncategorized** + - Release hydroflow_lang v0.8.0, hydroflow_datalog_core v0.8.0, hydroflow_datalog v0.8.0, hydroflow_macro v0.8.0, lattices_macro v0.5.5, lattices v0.5.6, variadics v0.0.5, pusherator v0.0.7, hydroflow v0.8.0, hydroflow_plus v0.8.0, hydro_deploy v0.8.0, hydro_cli v0.8.0, hydroflow_plus_cli_integration v0.8.0, safety bump 7 crates ([`ca6c16b`](https://github.com/hydro-project/hydroflow/commit/ca6c16b4a7ce35e155fe7fc6c7d1676c37c9e4de)) +
+ +## 0.7.0 (2024-05-24) + + + +### Chore + + - update pyo3, silence warnings in generated code + +### New Features + + - add support for collecting counts and running perf + +### Commit Statistics + + + + - 3 commits contributed to the release. + - 2 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1152](https://github.com/hydro-project/hydroflow/issues/1152), [#1157](https://github.com/hydro-project/hydroflow/issues/1157) + +### Commit Details + + + +
view details + + * **[#1152](https://github.com/hydro-project/hydroflow/issues/1152)** + - Update pyo3, silence warnings in generated code ([`1801502`](https://github.com/hydro-project/hydroflow/commit/18015029a725b068696ed9edefd1097583c858a6)) + * **[#1157](https://github.com/hydro-project/hydroflow/issues/1157)** + - Add support for collecting counts and running perf ([`29a263f`](https://github.com/hydro-project/hydroflow/commit/29a263fb564c5ce4bc495ea4e9d20b8b2621b645)) + * **Uncategorized** + - Release hydroflow_lang v0.7.0, hydroflow_datalog_core v0.7.0, hydroflow_datalog v0.7.0, hydroflow_macro v0.7.0, lattices v0.5.5, multiplatform_test v0.1.0, pusherator v0.0.6, hydroflow v0.7.0, stageleft_macro v0.2.0, stageleft v0.3.0, stageleft_tool v0.2.0, hydroflow_plus v0.7.0, hydro_deploy v0.7.0, hydro_cli v0.7.0, hydroflow_plus_cli_integration v0.7.0, safety bump 8 crates ([`2852147`](https://github.com/hydro-project/hydroflow/commit/285214740627685e911781793e05d234ab2ad2bd)) +
+ +## 0.6.1 (2024-04-09) + + + +### Style + + - qualified path cleanups for clippy + +### Commit Statistics + + + + - 4 commits contributed to the release. + - 1 commit was understood as [conventional](https://www.conventionalcommits.org). + - 1 unique issue was worked on: [#1090](https://github.com/hydro-project/hydroflow/issues/1090) + +### Commit Details + + + +
view details + + * **[#1090](https://github.com/hydro-project/hydroflow/issues/1090)** + - Qualified path cleanups for clippy ([`7958fb0`](https://github.com/hydro-project/hydroflow/commit/7958fb0d900be8fe7359326abfa11dcb8fb35e8a)) + * **Uncategorized** + - Release hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1 ([`c385c13`](https://github.com/hydro-project/hydroflow/commit/c385c132c9733d1bace82156aa14216b8e7fef9f)) + - Release hydroflow_lang v0.6.2, hydroflow v0.6.2, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`23cfe08`](https://github.com/hydro-project/hydroflow/commit/23cfe0839079aa17d042bbd3976f6d188689d290)) + - Release hydroflow_cli_integration v0.5.2, hydroflow_lang v0.6.1, hydroflow_datalog_core v0.6.1, lattices v0.5.4, hydroflow v0.6.1, stageleft_macro v0.1.1, stageleft v0.2.1, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`cd63f22`](https://github.com/hydro-project/hydroflow/commit/cd63f2258c961a40f0e5dbef20ac329a2d570ad0)) +
+ ## 0.6.0 (2024-03-02) + + ### New Features - Add support for azure @@ -21,8 +241,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 2 commits contributed to the release over the course of 7 calendar days. - - 28 days passed between releases. + - 3 commits contributed to the release. - 2 commits were understood as [conventional](https://www.conventionalcommits.org). - 2 unique issues were worked on: [#1015](https://github.com/hydro-project/hydroflow/issues/1015), [#1043](https://github.com/hydro-project/hydroflow/issues/1043) @@ -36,6 +255,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Consolidate tasks and use sccache and nextest ([`e9639f6`](https://github.com/hydro-project/hydroflow/commit/e9639f608f8dafd3f384837067800a66951b25df)) * **[#1043](https://github.com/hydro-project/hydroflow/issues/1043)** - Add support for azure ([`fcf43bf`](https://github.com/hydro-project/hydroflow/commit/fcf43bf86fe550247dffa4641a9ce3aff3b9afc3)) + * **Uncategorized** + - Release hydroflow_lang v0.6.0, hydroflow_datalog_core v0.6.0, hydroflow_datalog v0.6.0, hydroflow_macro v0.6.0, lattices v0.5.3, variadics v0.0.4, pusherator v0.0.5, hydroflow v0.6.0, stageleft v0.2.0, hydroflow_plus v0.6.0, hydro_deploy v0.6.0, hydro_cli v0.6.0, hydroflow_plus_cli_integration v0.6.0, safety bump 7 crates ([`09ea65f`](https://github.com/hydro-project/hydroflow/commit/09ea65fe9cd45c357c43bffca30e60243fa45cc8)) ## 0.5.1 (2024-02-02) @@ -104,8 +325,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 7 commits contributed to the release over the course of 43 calendar days. - - 114 days passed between releases. + - 7 commits contributed to the release. - 5 commits were understood as [conventional](https://www.conventionalcommits.org). - 4 unique issues were worked on: [#1046](https://github.com/hydro-project/hydroflow/issues/1046), [#986](https://github.com/hydro-project/hydroflow/issues/986), [#987](https://github.com/hydro-project/hydroflow/issues/987), [#994](https://github.com/hydro-project/hydroflow/issues/994) @@ -259,7 +479,7 @@ Unchanged from previous release. - Manually bump versions for v0.2.0 release ([`fd896fb`](https://github.com/hydro-project/hydroflow/commit/fd896fbe925fbd8ef1d16be7206ac20ba585081a)) -## v0.1.0 (2023-05-30) +## v0.1.0 (2023-05-29) diff --git a/hydro_deploy/hydro_cli/Cargo.toml b/hydro_deploy/hydro_cli/Cargo.toml index 4047a5983c35..ed127a4f4049 100644 --- a/hydro_deploy/hydro_cli/Cargo.toml +++ b/hydro_deploy/hydro_cli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "hydro_cli" publish = true -version = "0.6.0" +version = "0.9.0" edition = "2021" license = "Apache-2.0" documentation = "https://docs.rs/hydro_cli/" @@ -13,18 +13,16 @@ name = "hydro_cli" crate-type = ["cdylib"] [dependencies] -hydro_deploy = { path = "../core", version = "^0.6.0" } -tokio = { version = "1.16", features = [ "full" ] } -once_cell = "1.17" -anyhow = { version = "1.0.69", features = [ "backtrace" ] } -clap = { version = "4.1.8", features = ["derive"] } -pyo3 = { version = "0.18", features = ["abi3-py37"] } -pyo3-asyncio = { version = "0.18", features = ["attributes", "tokio-runtime"] } -pythonize = "0.18" -futures = "0.3.26" -async-channel = "1.8.0" +hydro_deploy = { path = "../core", version = "^0.9.0" } +tokio = { version = "1.29.0", features = [ "full" ] } +anyhow = { version = "1.0.82", features = [ "backtrace" ] } +clap = { version = "4.5.4", features = ["derive"] } +pyo3 = { version = "0.20.0", features = ["abi3-py37"] } +pyo3-asyncio = { version = "0.20.0", features = ["attributes", "tokio-runtime"] } +pythonize = "0.20.0" +futures = "0.3.0" bytes = "1.1.0" -hydroflow_cli_integration = { path = "../hydroflow_cli_integration", version = "^0.5.1" } +hydroflow_deploy_integration = { path = "../hydroflow_deploy_integration", version = "^0.9.0" } # request vendored openssl -async-ssh2-lite = { version = "0.4.2", features = [ "vendored-openssl" ] } +async-ssh2-lite = { version = "0.5.0", features = [ "vendored-openssl" ] } diff --git a/hydro_deploy/hydro_cli/hydro/_core.pyi b/hydro_deploy/hydro_cli/hydro/_core.pyi index 5f561ce846a3..1e46fc653642 100644 --- a/hydro_deploy/hydro_cli/hydro/_core.pyi +++ b/hydro_deploy/hydro_cli/hydro/_core.pyi @@ -16,7 +16,7 @@ class Deployment(object): def Localhost(self) -> "LocalhostHost": ... - def GCPComputeEngineHost(self, project: str, machine_type: str, image: str, region: str, network: "GCPNetwork", user: Optional[str] = None) -> "GCPComputeEngineHost": ... + def GcpComputeEngineHost(self, project: str, machine_type: str, image: str, region: str, network: "GcpNetwork", user: Optional[str] = None) -> "GcpComputeEngineHost": ... def CustomService(self, on: "Host", external_ports: List[int]) -> "CustomService": ... @@ -32,10 +32,10 @@ class Host(object): class LocalhostHost(Host): def client_only() -> "LocalhostHost": ... -class GCPNetwork(object): +class GcpNetwork(object): def __init__(self, project: str, existing: Optional[str] = None) -> None: ... -class GCPComputeEngineHost(Host): +class GcpComputeEngineHost(Host): internal_ip: str external_ip: Optional[str] ssh_key_path: str diff --git a/hydro_deploy/hydro_cli/src/lib.rs b/hydro_deploy/hydro_cli/src/lib.rs index 6e2fcb793cba..b9af3d7b8067 100644 --- a/hydro_deploy/hydro_cli/src/lib.rs +++ b/hydro_deploy/hydro_cli/src/lib.rs @@ -1,13 +1,16 @@ +// TODO(mingwei): For pyo3 generated code. +#![allow(unused_qualifications, non_local_definitions)] + use core::hydroflow_crate::ports::HydroflowSource; +use std::cell::OnceCell; use std::collections::HashMap; -use std::ops::DerefMut; +use std::ops::Deref; use std::pin::Pin; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; -use async_channel::Receiver; use bytes::Bytes; use futures::{Future, SinkExt, StreamExt}; -use hydroflow_cli_integration::{ +use hydroflow_deploy_integration::{ ConnectedDirect, ConnectedSink, ConnectedSource, DynSink, DynStream, ServerOrBound, }; use pyo3::exceptions::{PyException, PyStopAsyncIteration}; @@ -17,11 +20,11 @@ use pyo3::{create_exception, wrap_pymodule}; use pyo3_asyncio::TaskLocals; use pythonize::pythonize; use tokio::sync::oneshot::Sender; -use tokio::sync::RwLock; +use tokio::sync::{Mutex, RwLock}; mod cli; -use hydro_deploy as core; -use hydro_deploy::ssh::LaunchedSSHHost; +use hydro_deploy::ssh::LaunchedSshHost; +use hydro_deploy::{self as core}; static TOKIO_RUNTIME: std::sync::RwLock> = std::sync::RwLock::new(None); @@ -53,7 +56,7 @@ impl pyo3_asyncio::generic::Runtime for TokioRuntime { } tokio::task_local! { - static TASK_LOCALS: once_cell::unsync::OnceCell; + static TASK_LOCALS: OnceCell; } impl pyo3_asyncio::generic::ContextExt for TokioRuntime { @@ -61,17 +64,16 @@ impl pyo3_asyncio::generic::ContextExt for TokioRuntime { where F: Future + Send + 'static, { - let cell = once_cell::unsync::OnceCell::new(); + let cell = OnceCell::new(); cell.set(locals).unwrap(); Box::pin(TASK_LOCALS.scope(cell, fut)) } fn get_task_locals() -> Option { - match TASK_LOCALS.try_with(|c| c.get().cloned()) { - Ok(locals) => locals, - Err(_) => None, - } + TASK_LOCALS + .try_with(|c| c.get().cloned()) + .unwrap_or_default() } } @@ -91,8 +93,7 @@ impl SafeCancelToken { } } -static CONVERTERS_MODULE: once_cell::sync::OnceCell> = - once_cell::sync::OnceCell::new(); +static CONVERTERS_MODULE: OnceLock> = OnceLock::new(); fn interruptible_future_to_py(py: Python<'_>, fut: F) -> PyResult<&PyAny> where @@ -141,7 +142,7 @@ impl AnyhowWrapper { #[pyclass(subclass)] #[derive(Clone)] struct HydroflowSink { - underlying: Arc>, + underlying: Arc, } #[pyclass(name = "Deployment")] @@ -154,13 +155,13 @@ impl Deployment { #[new] fn new() -> Self { Deployment { - underlying: Arc::new(RwLock::new(core::Deployment::default())), + underlying: Arc::new(RwLock::new(core::Deployment::new())), } } #[allow(non_snake_case)] fn Localhost(&self, py: Python<'_>) -> PyResult> { - let arc = self.underlying.blocking_write().Localhost(); + let arc = self.underlying.blocking_read().Localhost(); Ok(Py::new( py, @@ -173,18 +174,19 @@ impl Deployment { } #[allow(non_snake_case, clippy::too_many_arguments)] - fn GCPComputeEngineHost( + fn GcpComputeEngineHost( &self, py: Python<'_>, project: String, machine_type: String, image: String, region: String, - network: GCPNetwork, + network: GcpNetwork, user: Option, + startup_script: Option, ) -> PyResult> { let arc = self.underlying.blocking_write().add_host(|id| { - core::GCPComputeEngineHost::new( + core::GcpComputeEngineHost::new( id, project, machine_type, @@ -192,6 +194,7 @@ impl Deployment { region, network.underlying, user, + startup_script, ) }); @@ -200,7 +203,7 @@ impl Deployment { PyClassInitializer::from(Host { underlying: arc.clone(), }) - .add_subclass(GCPComputeEngineHost { underlying: arc }), + .add_subclass(GcpComputeEngineHost { underlying: arc }), )? .into_py(py)) } @@ -276,6 +279,10 @@ impl Deployment { bin, example, profile, + None, // Python API doesn't support rustflags + None, // Python API doesn't support target_dir + false, // Python API doesn't support no_default_features + None, // Python API doesn't support perf features, args, display_id, @@ -324,20 +331,18 @@ impl Deployment { #[pyclass(subclass)] pub struct Host { - underlying: Arc>, + underlying: Arc, } #[pyclass(extends=Host, subclass)] struct LocalhostHost { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl LocalhostHost { fn client_only(&self, py: Python<'_>) -> PyResult> { - let arc = Arc::new(RwLock::new( - self.underlying.try_read().unwrap().client_only(), - )); + let arc = Arc::new(self.underlying.client_only()); Ok(Py::new( py, @@ -352,55 +357,42 @@ impl LocalhostHost { #[pyclass] #[derive(Clone)] -struct GCPNetwork { - underlying: Arc>, +struct GcpNetwork { + underlying: Arc>, } #[pymethods] -impl GCPNetwork { +impl GcpNetwork { #[new] fn new(project: String, existing: Option) -> Self { - GCPNetwork { - underlying: Arc::new(RwLock::new(core::gcp::GCPNetwork::new(project, existing))), + GcpNetwork { + underlying: Arc::new(RwLock::new(core::gcp::GcpNetwork::new(project, existing))), } } } #[pyclass(extends=Host, subclass)] -struct GCPComputeEngineHost { - underlying: Arc>, +struct GcpComputeEngineHost { + underlying: Arc, } #[pymethods] -impl GCPComputeEngineHost { +impl GcpComputeEngineHost { #[getter] fn internal_ip(&self) -> String { - self.underlying - .blocking_read() - .launched - .as_ref() - .unwrap() - .internal_ip - .clone() + self.underlying.launched.get().unwrap().internal_ip.clone() } #[getter] fn external_ip(&self) -> Option { - self.underlying - .blocking_read() - .launched - .as_ref() - .unwrap() - .external_ip - .clone() + self.underlying.launched.get().unwrap().external_ip.clone() } #[getter] fn ssh_key_path(&self) -> String { self.underlying - .blocking_read() .launched - .as_ref() + .get() .unwrap() .ssh_key_path() .to_str() @@ -411,39 +403,26 @@ impl GCPComputeEngineHost { #[pyclass(extends=Host, subclass)] struct AzureHost { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl AzureHost { #[getter] fn internal_ip(&self) -> String { - self.underlying - .blocking_read() - .launched - .as_ref() - .unwrap() - .internal_ip - .clone() + self.underlying.launched.get().unwrap().internal_ip.clone() } #[getter] fn external_ip(&self) -> Option { - self.underlying - .blocking_read() - .launched - .as_ref() - .unwrap() - .external_ip - .clone() + self.underlying.launched.get().unwrap().external_ip.clone() } #[getter] fn ssh_key_path(&self) -> String { self.underlying - .blocking_read() .launched - .as_ref() + .get() .unwrap() .ssh_key_path() .to_str() @@ -471,7 +450,7 @@ impl Service { #[pyclass] struct PyReceiver { - receiver: Arc>, + receiver: Arc>>, } #[pymethods] @@ -481,13 +460,15 @@ impl PyReceiver { } fn __anext__<'p>(&self, py: Python<'p>) -> Option<&'p PyAny> { - let my_receiver = self.receiver.clone(); + let receiver = self.receiver.clone(); Some( interruptible_future_to_py(py, async move { - let underlying = my_receiver.recv(); - underlying + receiver + .lock() .await - .map_err(|_| PyStopAsyncIteration::new_err(())) + .recv() + .await + .ok_or_else(|| PyStopAsyncIteration::new_err(())) }) .unwrap(), ) @@ -502,8 +483,8 @@ struct CustomService { #[pymethods] impl CustomService { fn client_port(&self, py: Python<'_>) -> PyResult> { - let arc = Arc::new(RwLock::new(core::custom_service::CustomClientPort::new( - Arc::downgrade(&self.underlying), + let arc = Arc::new(core::custom_service::CustomClientPort::new(Arc::downgrade( + &self.underlying, ))); Ok(Py::new( @@ -520,31 +501,27 @@ impl CustomService { #[pyclass(extends=HydroflowSink, subclass)] #[derive(Clone)] struct CustomClientPort { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl CustomClientPort { - fn send_to(&mut self, to: &HydroflowSink) { - self.underlying - .try_write() - .unwrap() - .send_to(to.underlying.try_write().unwrap().deref_mut()); + fn send_to(&self, to: &HydroflowSink) { + self.underlying.send_to(to.underlying.deref()); } fn tagged(&self, tag: u32) -> TaggedSource { TaggedSource { - underlying: Arc::new(RwLock::new(core::hydroflow_crate::ports::TaggedSource { + underlying: Arc::new(core::hydroflow_crate::ports::TaggedSource { source: self.underlying.clone(), tag, - })), + }), } } fn server_port<'p>(&self, py: Python<'p>) -> PyResult<&'p PyAny> { let underlying = self.underlying.clone(); interruptible_future_to_py(py, async move { - let underlying = underlying.read().await; Ok(ServerPort { underlying: underlying.server_port().await, }) @@ -564,7 +541,7 @@ impl HydroflowCrate { interruptible_future_to_py(py, async move { let underlying = underlying.read().await; Ok(PyReceiver { - receiver: Arc::new(underlying.stdout().await), + receiver: Arc::new(Mutex::new(underlying.stdout())), }) }) } @@ -574,7 +551,7 @@ impl HydroflowCrate { interruptible_future_to_py(py, async move { let underlying = underlying.read().await; Ok(PyReceiver { - receiver: Arc::new(underlying.stderr().await), + receiver: Arc::new(Mutex::new(underlying.stderr())), }) }) } @@ -583,7 +560,7 @@ impl HydroflowCrate { let underlying = self.underlying.clone(); interruptible_future_to_py(py, async move { let underlying = underlying.read().await; - Ok(underlying.exit_code().await) + Ok(underlying.exit_code()) }) } @@ -604,12 +581,12 @@ struct HydroflowCratePorts { #[pymethods] impl HydroflowCratePorts { fn __getattribute__(&self, name: String, py: Python<'_>) -> PyResult> { - let arc = Arc::new(RwLock::new( + let arc = Arc::new( self.underlying .try_read() .unwrap() .get_port(name, &self.underlying), - )); + ); Ok(Py::new( py, @@ -625,15 +602,13 @@ impl HydroflowCratePorts { #[pyclass(extends=HydroflowSink, subclass)] #[derive(Clone)] struct HydroflowCratePort { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl HydroflowCratePort { fn merge(&self, py: Python<'_>) -> PyResult> { - let arc = Arc::new(RwLock::new( - self.underlying.try_read().unwrap().clone().merge(), - )); + let arc = Arc::new(self.underlying.clone().merge()); Ok(Py::new( py, @@ -645,19 +620,16 @@ impl HydroflowCratePort { .into_py(py)) } - fn send_to(&mut self, to: &HydroflowSink) { - self.underlying - .try_write() - .unwrap() - .send_to(to.underlying.try_write().unwrap().deref_mut()); + fn send_to(&self, to: &HydroflowSink) { + self.underlying.send_to(to.underlying.deref()); } fn tagged(&self, tag: u32) -> TaggedSource { TaggedSource { - underlying: Arc::new(RwLock::new(core::hydroflow_crate::ports::TaggedSource { + underlying: Arc::new(core::hydroflow_crate::ports::TaggedSource { source: self.underlying.clone(), tag, - })), + }), } } } @@ -665,7 +637,7 @@ impl HydroflowCratePort { #[pyfunction] fn demux(mapping: &PyDict) -> HydroflowSink { HydroflowSink { - underlying: Arc::new(RwLock::new(core::hydroflow_crate::ports::DemuxSink { + underlying: Arc::new(core::hydroflow_crate::ports::DemuxSink { demux: mapping .into_iter() .map(|(k, v)| { @@ -674,31 +646,28 @@ fn demux(mapping: &PyDict) -> HydroflowSink { (k, v.underlying) }) .collect(), - })), + }), } } #[pyclass(subclass)] #[derive(Clone)] struct TaggedSource { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl TaggedSource { - fn send_to(&mut self, to: &HydroflowSink) { - self.underlying - .try_write() - .unwrap() - .send_to(to.underlying.try_write().unwrap().deref_mut()); + fn send_to(&self, to: &HydroflowSink) { + self.underlying.send_to(to.underlying.deref()); } fn tagged(&self, tag: u32) -> TaggedSource { TaggedSource { - underlying: Arc::new(RwLock::new(core::hydroflow_crate::ports::TaggedSource { + underlying: Arc::new(core::hydroflow_crate::ports::TaggedSource { source: self.underlying.clone(), tag, - })), + }), } } } @@ -706,31 +675,28 @@ impl TaggedSource { #[pyclass(extends=HydroflowSink, subclass)] #[derive(Clone)] struct HydroflowNull { - underlying: Arc>, + underlying: Arc, } #[pymethods] impl HydroflowNull { - fn send_to(&mut self, to: &HydroflowSink) { - self.underlying - .try_write() - .unwrap() - .send_to(to.underlying.try_write().unwrap().deref_mut()); + fn send_to(&self, to: &HydroflowSink) { + self.underlying.send_to(to.underlying.deref()); } fn tagged(&self, tag: u32) -> TaggedSource { TaggedSource { - underlying: Arc::new(RwLock::new(core::hydroflow_crate::ports::TaggedSource { + underlying: Arc::new(core::hydroflow_crate::ports::TaggedSource { source: self.underlying.clone(), tag, - })), + }), } } } #[pyfunction] fn null(py: Python<'_>) -> PyResult> { - let arc = Arc::new(RwLock::new(core::hydroflow_crate::ports::NullSourceSink)); + let arc = Arc::new(core::hydroflow_crate::ports::NullSourceSink); Ok(Py::new( py, @@ -744,7 +710,7 @@ fn null(py: Python<'_>) -> PyResult> { #[pyclass] struct ServerPort { - underlying: hydroflow_cli_integration::ServerPort, + underlying: hydroflow_deploy_integration::ServerPort, } fn with_tokio_runtime(f: impl Fn() -> T) -> T { @@ -760,7 +726,7 @@ impl ServerPort { } #[allow(clippy::wrong_self_convention)] - fn into_source<'p>(&mut self, py: Python<'p>) -> PyResult<&'p PyAny> { + fn into_source<'p>(&self, py: Python<'p>) -> PyResult<&'p PyAny> { let realized = with_tokio_runtime(|| ServerOrBound::Server((&self.underlying).into())); interruptible_future_to_py(py, async move { @@ -773,7 +739,7 @@ impl ServerPort { } #[allow(clippy::wrong_self_convention)] - fn into_sink<'p>(&mut self, py: Python<'p>) -> PyResult<&'p PyAny> { + fn into_sink<'p>(&self, py: Python<'p>) -> PyResult<&'p PyAny> { let realized = with_tokio_runtime(|| ServerOrBound::Server((&self.underlying).into())); interruptible_future_to_py(py, async move { @@ -794,7 +760,7 @@ struct PythonSink { #[pymethods] impl PythonSink { - fn send<'p>(&mut self, data: Py, py: Python<'p>) -> PyResult<&'p PyAny> { + fn send<'p>(&self, data: Py, py: Python<'p>) -> PyResult<&'p PyAny> { let underlying = self.underlying.clone(); let bytes = Bytes::from(data.as_bytes(py).to_vec()); interruptible_future_to_py(py, async move { @@ -871,8 +837,8 @@ async def coroutine_to_safely_cancellable(c, cancel_token): module.add_class::()?; module.add_class::()?; - module.add_class::()?; - module.add_class::()?; + module.add_class::()?; + module.add_class::()?; module.add_class::()?; module.add_class::()?; diff --git a/hydro_deploy/hydro_cli_examples/2pc.hydro.py b/hydro_deploy/hydro_cli_examples/2pc.hydro.py index e03528a42bca..144e36049118 100644 --- a/hydro_deploy/hydro_cli_examples/2pc.hydro.py +++ b/hydro_deploy/hydro_cli_examples/2pc.hydro.py @@ -9,14 +9,14 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - machine1 = deployment.GCPComputeEngineHost( + machine1 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", region="us-west1-a" ) if machine_1_gcp else localhost_machine - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/Cargo.toml b/hydro_deploy/hydro_cli_examples/Cargo.toml index 0e15058ef8a3..88a88ecbb1e8 100644 --- a/hydro_deploy/hydro_cli_examples/Cargo.toml +++ b/hydro_deploy/hydro_cli_examples/Cargo.toml @@ -35,18 +35,14 @@ name = "dedalus_2pc_participant" name = "ws_chat_server" [dev-dependencies] -hydroflow = { path = "../../hydroflow", features = [ "cli_integration" ] } +hydroflow = { path = "../../hydroflow", features = [ "deploy_integration" ] } hydroflow_datalog = { path = "../../hydroflow_datalog" } -tokio = { version = "1.16", features = [ "full" ] } -serde = { version = "1", features = ["rc"] } -serde_json = "1" -rand = "0.8.5" -dashmap = "5.4.0" +tokio = { version = "1.29.0", features = [ "full" ] } +serde = { version = "1.0.197", features = ["rc"] } +serde_json = "1.0.115" +rand = "0.8.0" -futures = "0.3.28" +futures = "0.3.0" tokio-tungstenite = "0.20.0" - -[target.'cfg(target_os = "linux")'.dev-dependencies] -procinfo = "0.4.2" diff --git a/hydro_deploy/hydro_cli_examples/dedalus_python_receiver.hydro.py b/hydro_deploy/hydro_cli_examples/dedalus_python_receiver.hydro.py index 5337ecbdf965..789bbfa2bb42 100644 --- a/hydro_deploy/hydro_cli_examples/dedalus_python_receiver.hydro.py +++ b/hydro_deploy/hydro_cli_examples/dedalus_python_receiver.hydro.py @@ -9,11 +9,11 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="autocompartmentalization", ) - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_coordinator/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_coordinator/main.rs index 055df4f205af..57eff93ca75f 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_coordinator/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_coordinator/main.rs @@ -1,10 +1,10 @@ -use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes}; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let vote_to_participant_port = ports .port("vote_to_participant") .connect::>() @@ -34,16 +34,16 @@ async fn main() { let mut df = datalog!( r#" - .input clientIn `source_iter([("vote".to_string(),),]) -> persist()` + .input clientIn `source_iter([("vote".to_string(),),]) -> persist::<'static>()` .output clientOut `for_each(|(i,msg):(u32,String,)| println!("committed {:?}: {:?}", i, msg))` # EDBs - .input startIndex `source_iter([(1u32,),]) -> persist()` - .input participants `source_iter(peers.clone()) -> map(|p| (p,)) -> persist()` - .input success `source_iter([(true,),]) -> persist()` - .input reject `source_iter([(false,),]) -> persist()` - .input commitInstruct `source_iter([(true,),]) -> persist()` - .input rollbackInstruct `source_iter([(false,),]) -> persist()` + .input startIndex `source_iter([(1u32,),]) -> persist::<'static>()` + .input participants `source_iter(peers.clone()) -> map(|p| (p,)) -> persist::<'static>()` + .input success `source_iter([(true,),]) -> persist::<'static>()` + .input reject `source_iter([(false,),]) -> persist::<'static>()` + .input commitInstruct `source_iter([(true,),]) -> persist::<'static>()` + .input rollbackInstruct `source_iter([(false,),]) -> persist::<'static>()` .async voteToParticipant `map(|(node_id, v):(u32,(u32,String))| (node_id, serialize_to_bytes(v))) -> dest_sink(vote_to_participant_sink)` `null::<(u32,String,)>()` .async voteFromParticipant `null::<(u32,String,bool,u32,)>()` `source_stream(vote_from_participant_source) -> map(|v| deserialize_from_bytes::<(u32,String,bool,u32,)>(v.unwrap()).unwrap())` diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_participant/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_participant/main.rs index a96a325262cd..97a806db7c9d 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_participant/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_2pc_participant/main.rs @@ -1,10 +1,10 @@ -use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes}; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let vote_to_participant_source = ports .port("vote_to_participant") .connect::() @@ -37,24 +37,24 @@ async fn main() { let mut df = datalog!( r#" - .input myID `source_iter(my_id.clone()) -> persist() -> map(|p| (p,))` - .input coordinator `source_iter(peers.clone()) -> persist() -> map(|p| (p,))` - .input verdict `source_iter([(true,),]) -> persist()` + .input myID `source_iter(my_id.clone()) -> persist::<'static>() -> map(|p| (p,))` + .input coordinator `source_iter(peers.clone()) -> persist::<'static>() -> map(|p| (p,))` + .input verdict `source_iter([(true,),]) -> persist::<'static>()` // .output voteOut `for_each(|(i,myID):(u32,u32,)| println!("participant {:?}: message {:?}", myID, i))` - + .async voteToParticipant `null::<(u32,String,)>()` `source_stream(vote_to_participant_source) -> map(|x| deserialize_from_bytes::<(u32,String,)>(x.unwrap()).unwrap())` .async voteFromParticipant `map(|(node_id, v)| (node_id, serialize_to_bytes(v))) -> dest_sink(vote_from_participant_sink)` `null::<(u32,String,)>()` .async instructToParticipant `null::<(u32,String,bool,)>()` `source_stream(instruct_to_participant_source) -> map(|x| deserialize_from_bytes::<(u32,String,bool,)>(x.unwrap()).unwrap())` .async ackFromParticipant `map(|(node_id, v)| (node_id, serialize_to_bytes(v))) -> dest_sink(ack_from_participant_sink)` `null::<(u32,String,u32,)>()` - - # .output verdictRequest + + # .output verdictRequest # .output log - + # verdictRequest(i, msg) :- voteToParticipant(i, msg) voteFromParticipant@addr(i, msg, res, l_from) :~ voteToParticipant(i, msg), coordinator(addr), myID(l_from), verdict(res) ackFromParticipant@addr(i, msg, l_from) :~ instructToParticipant(i, msg, b), coordinator(addr), myID(l_from) // voteOut(i, l) :- voteToParticipant(i, msg), myID(l) - + # log(i, msg, type) :- instructToParticipant(i, msg, type) # the log channel will sort everything out "# ); diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_receiver/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_receiver/main.rs index a1fb8f98d827..ae37b42e8b71 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_receiver/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_receiver/main.rs @@ -1,10 +1,10 @@ -use hydroflow::util::cli::{ConnectedDirect, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDirect, ConnectedSource}; use hydroflow::util::deserialize_from_bytes; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let broadcast_recv = ports .port("broadcast") .connect::() @@ -20,5 +20,5 @@ async fn main() { "# ); - hydroflow::util::cli::launch_flow(df).await; + hydroflow::util::deploy::launch_flow(df).await; } diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_sender/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_sender/main.rs index 1ba3dad412f6..b9e2c9d43f26 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_sender/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_sender/main.rs @@ -1,11 +1,11 @@ use hydroflow::tokio_stream::wrappers::IntervalStream; -use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink}; +use hydroflow::util::deploy::{ConnectedDemux, ConnectedDirect, ConnectedSink}; use hydroflow::util::serialize_to_bytes; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let broadcast_port = ports .port("broadcast") .connect::>() @@ -25,12 +25,12 @@ async fn main() { r#" .input repeated `spin() -> flat_map(|_| to_repeat.iter().cloned())` .input periodic `source_stream(periodic) -> map(|_| ())` - .input peers `source_iter(peers.clone()) -> persist() -> map(|p| (p,))` + .input peers `source_iter(peers.clone()) -> persist::<'static>() -> map(|p| (p,))` .async broadcast `map(|(node_id, v)| (node_id, serialize_to_bytes(v))) -> dest_sink(broadcast_sink)` `null::<(String,)>()` broadcast@n(x) :~ repeated(x), periodic(), peers(n) "# ); - hydroflow::util::cli::launch_flow(df).await; + hydroflow::util::deploy::launch_flow(df).await; } diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_leader/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_leader/main.rs index 22ae1b349c31..7e21656bba5e 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_leader/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_leader/main.rs @@ -1,10 +1,10 @@ -use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes}; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let to_replica_port = ports .port("to_replica") .connect::>() @@ -22,13 +22,13 @@ async fn main() { let mut df = datalog!( r#" - .input clientIn `source_iter([("vote".to_string(),),]) -> persist()` + .input clientIn `source_iter([("vote".to_string(),),]) -> persist::<'static>()` .output stdout `for_each(|_:(String,)| println!("voted"))` - .input replicas `source_iter(peers.clone()) -> persist() -> map(|p| (p,))` + .input replicas `source_iter(peers.clone()) -> persist::<'static>() -> map(|p| (p,))` .async voteToReplica `map(|(node_id, v)| (node_id, serialize_to_bytes(v))) -> dest_sink(to_replica_sink)` `null::<(String,)>()` .async voteFromReplica `null::<(u32,String,)>()` `source_stream(from_replica_source) -> map(|v| deserialize_from_bytes::<(u32,String,)>(v.unwrap()).unwrap())` - + voteToReplica@addr(v) :~ clientIn(v), replicas(addr) allVotes(s, v) :- voteFromReplica(s, v) allVotes(s, v) :+ allVotes(s, v) diff --git a/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_participant/main.rs b/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_participant/main.rs index 262be0dfd7ef..eaa142a39945 100644 --- a/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_participant/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/dedalus_vote_participant/main.rs @@ -1,10 +1,10 @@ -use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource}; use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes}; use hydroflow_datalog::datalog; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let to_replica_source = ports .port("to_replica") .connect::() @@ -24,11 +24,11 @@ async fn main() { let mut df = datalog!( r#" - .input myID `source_iter(my_id.clone()) -> persist() -> map(|p| (p,))` - .input leader `source_iter(peers.clone()) -> persist() -> map(|p| (p,))` + .input myID `source_iter(my_id.clone()) -> persist::<'static>() -> map(|p| (p,))` + .input leader `source_iter(peers.clone()) -> persist::<'static>() -> map(|p| (p,))` .async voteToReplica `null::<(String,)>()` `source_stream(to_replica_source) -> map(|x| deserialize_from_bytes::<(String,)>(x.unwrap()).unwrap())` .async voteFromReplica `map(|(node_id, v)| (node_id, serialize_to_bytes(v))) -> dest_sink(from_replica_sink)` `null::<(u32,String,)>()` - + voteFromReplica@addr(i, v) :~ voteToReplica(v), leader(addr), myID(i) "# ); diff --git a/hydro_deploy/hydro_cli_examples/examples/empty_program/main.rs b/hydro_deploy/hydro_cli_examples/examples/empty_program/main.rs index 0794176236f5..19f3e1f285af 100644 --- a/hydro_deploy/hydro_cli_examples/examples/empty_program/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/empty_program/main.rs @@ -1,6 +1,6 @@ #[hydroflow::main] async fn main() { - let _ = hydroflow::util::cli::init::<()>().await; + let _ = hydroflow::util::deploy::init::<()>().await; loop { tokio::time::sleep(std::time::Duration::from_secs(1)).await; } diff --git a/hydro_deploy/hydro_cli_examples/examples/panic_program/main.rs b/hydro_deploy/hydro_cli_examples/examples/panic_program/main.rs index 08d5f1914bfe..3b57b0680932 100644 --- a/hydro_deploy/hydro_cli_examples/examples/panic_program/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/panic_program/main.rs @@ -2,7 +2,7 @@ use std::io::Write; #[hydroflow::main] async fn main() { - let _ = hydroflow::util::cli::init::<()>().await; + let _ = hydroflow::util::deploy::init::<()>().await; println!("hello!"); std::io::stdout().flush().unwrap(); diff --git a/hydro_deploy/hydro_cli_examples/examples/stdout_receiver/main.rs b/hydro_deploy/hydro_cli_examples/examples/stdout_receiver/main.rs index 3a575a8854cd..e92794c4d5ba 100644 --- a/hydro_deploy/hydro_cli_examples/examples/stdout_receiver/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/stdout_receiver/main.rs @@ -1,9 +1,9 @@ use hydroflow::hydroflow_syntax; -use hydroflow::util::cli::{ConnectedDirect, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedDirect, ConnectedSource}; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let echo_recv = ports .port("echo") .connect::() @@ -16,5 +16,5 @@ async fn main() { for_each(|x| println!("echo {:?}", x)); }; - hydroflow::util::cli::launch_flow(df).await; + hydroflow::util::deploy::launch_flow(df).await; } diff --git a/hydro_deploy/hydro_cli_examples/examples/tagged_stdout_receiver/main.rs b/hydro_deploy/hydro_cli_examples/examples/tagged_stdout_receiver/main.rs index 4859c59e4584..36c0dc19154b 100644 --- a/hydro_deploy/hydro_cli_examples/examples/tagged_stdout_receiver/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/tagged_stdout_receiver/main.rs @@ -1,9 +1,9 @@ use hydroflow::hydroflow_syntax; -use hydroflow::util::cli::{ConnectedDirect, ConnectedSource, ConnectedTagged}; +use hydroflow::util::deploy::{ConnectedDirect, ConnectedSource, ConnectedTagged}; #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let echo_recv = ports .port("echo") .connect::>() @@ -19,5 +19,5 @@ async fn main() { for_each(|x| println!("echo {:?}", x)); }; - hydroflow::util::cli::launch_flow(df).await; + hydroflow::util::deploy::launch_flow(df).await; } diff --git a/hydro_deploy/hydro_cli_examples/examples/ws_chat_server/main.rs b/hydro_deploy/hydro_cli_examples/examples/ws_chat_server/main.rs index fc8ac3b4d787..3525c81f1428 100644 --- a/hydro_deploy/hydro_cli_examples/examples/ws_chat_server/main.rs +++ b/hydro_deploy/hydro_cli_examples/examples/ws_chat_server/main.rs @@ -1,6 +1,6 @@ use hydroflow::compiled::pull::HalfMultisetJoinState; use hydroflow::hydroflow_syntax; -use hydroflow::util::cli::{ConnectedSink, ConnectedSource}; +use hydroflow::util::deploy::{ConnectedSink, ConnectedSource}; use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes}; use serde::{Deserialize, Serialize}; use tokio::net::TcpListener; @@ -29,17 +29,17 @@ struct ChatMessage { #[hydroflow::main] async fn main() { - let ports = hydroflow::util::cli::init::<()>().await; + let ports = hydroflow::util::deploy::init::<()>().await; let from_peer = ports .port("from_peer") - .connect::() + .connect::() .await .into_source(); let to_peer = ports .port("to_peer") - .connect::>() + .connect::>() .await .into_sink(); @@ -54,7 +54,7 @@ async fn main() { util::ws_server(ws_port).await; let df = hydroflow_syntax! { - all_peers = source_iter((0..number_of_nodes).filter(move |&i| i != self_node_id)) -> persist(); + all_peers = source_iter((0..number_of_nodes).filter(move |&i| i != self_node_id)) -> persist::<'static>(); // networking from_peer = source_stream(from_peer) -> map(|b| deserialize_from_bytes::(b.unwrap()).unwrap()); @@ -68,16 +68,16 @@ async fn main() { // helpers peer_broadcast = cross_join::<'tick, 'tick, HalfMultisetJoinState>() -> to_peer; - all_peers -> [0] peer_broadcast; - to_peers = [1] peer_broadcast; + all_peers -> [0]peer_broadcast; + to_peers = [1]peer_broadcast; names = from_client -> filter_map(|(client, msg)| if let FromClient::Name(name) = msg { Some((client, name)) } else { None }); messages = from_client -> filter_map(|(client, msg)| if let FromClient::Message { id, text } = msg { Some((client, (id, text))) } else { None }); - clients_connect -> persist() -> [pos] active_clients; - clients_disconnect -> persist() -> [neg] active_clients; + clients_connect -> persist::<'static>() -> [pos]active_clients; + clients_disconnect -> persist::<'static>() -> [neg]active_clients; active_clients = difference() -> null(); // logic @@ -88,8 +88,8 @@ async fn main() { // })) -> to_client; // replicated chat - messages -> [0] local_messages; - names -> persist() -> [1] local_messages; + messages -> [0]local_messages; + names -> persist::<'static>() -> [1]local_messages; local_messages = join::<'tick, 'tick, HalfMultisetJoinState>() -> tee(); local_messages -> map(|(client_id, ((msg_id, text), name))| (ChatMessage { @@ -108,13 +108,13 @@ async fn main() { from_peer -> map(|p| (p.msg, p.node_id, p.client_id, p.msg_id)) -> all_messages; - all_messages = union() /* -> persist() -> (PATCH 2) */ -> unique::<'tick>() -> map(|t| t.0); + all_messages = union() /* -> persist::<'static>() -> (PATCH 2) */ -> unique::<'tick>() -> map(|t| t.0); broadcast_clients = cross_join::<'static /*'tick (PATCH 1) */, 'static /*'tick, HalfMultisetJoinState (PATCH 2) */>() -> multiset_delta() -> to_client; - // active_clients -> [0] broadcast_clients; (PATCH 1) - clients_connect -> [0] broadcast_clients; - all_messages -> [1] broadcast_clients; + // active_clients -> [0]broadcast_clients; (PATCH 1) + clients_connect -> [0]broadcast_clients; + all_messages -> [1]broadcast_clients; }; - hydroflow::util::cli::launch_flow(df).await; + hydroflow::util::deploy::launch_flow(df).await; } diff --git a/hydro_deploy/hydro_cli_examples/hydro_python_sender.hydro.py b/hydro_deploy/hydro_cli_examples/hydro_python_sender.hydro.py index 18551515a56c..a83e1b0def52 100644 --- a/hydro_deploy/hydro_cli_examples/hydro_python_sender.hydro.py +++ b/hydro_deploy/hydro_cli_examples/hydro_python_sender.hydro.py @@ -7,11 +7,11 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="autocompartmentalization", ) - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/pn_counter.hydro.py b/hydro_deploy/hydro_cli_examples/pn_counter.hydro.py index f247ed1bed3a..7c51488e1bf6 100644 --- a/hydro_deploy/hydro_cli_examples/pn_counter.hydro.py +++ b/hydro_deploy/hydro_cli_examples/pn_counter.hydro.py @@ -17,13 +17,13 @@ async def main(args): on=localhost_machine, ) - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="hydro-chrisdouglas", ) def create_machine(): if args[0] == "gcp": - return deployment.GCPComputeEngineHost( + return deployment.GcpComputeEngineHost( project="hydro-chrisdouglas", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/simple_dedalus.hydro.py b/hydro_deploy/hydro_cli_examples/simple_dedalus.hydro.py index 789d1074a109..1ed7e73c19b2 100644 --- a/hydro_deploy/hydro_cli_examples/simple_dedalus.hydro.py +++ b/hydro_deploy/hydro_cli_examples/simple_dedalus.hydro.py @@ -10,11 +10,11 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="autocompartmentalization", ) - machine1 = deployment.GCPComputeEngineHost( + machine1 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", @@ -22,7 +22,7 @@ async def main(args): network=gcp_vpc ) if machine_1_gcp else localhost_machine - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/tagged_python_sender.hydro.py b/hydro_deploy/hydro_cli_examples/tagged_python_sender.hydro.py index fa616398cb3e..dd6591e08793 100644 --- a/hydro_deploy/hydro_cli_examples/tagged_python_sender.hydro.py +++ b/hydro_deploy/hydro_cli_examples/tagged_python_sender.hydro.py @@ -7,11 +7,11 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="autocompartmentalization", ) - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/vote.hydro.py b/hydro_deploy/hydro_cli_examples/vote.hydro.py index f26247aa320e..8cab8409ee66 100644 --- a/hydro_deploy/hydro_cli_examples/vote.hydro.py +++ b/hydro_deploy/hydro_cli_examples/vote.hydro.py @@ -9,14 +9,14 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - machine1 = deployment.GCPComputeEngineHost( + machine1 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", region="us-west1-a" ) if machine_1_gcp else localhost_machine - machine2 = deployment.GCPComputeEngineHost( + machine2 = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydro_cli_examples/ws_chat_server.py b/hydro_deploy/hydro_cli_examples/ws_chat_server.py index 453869319ffc..42d2e8485293 100644 --- a/hydro_deploy/hydro_cli_examples/ws_chat_server.py +++ b/hydro_deploy/hydro_cli_examples/ws_chat_server.py @@ -10,15 +10,15 @@ async def main(args): deployment = hydro.Deployment() localhost_machine = deployment.Localhost() - gcp_vpc = hydro.GCPNetwork( + gcp_vpc = hydro.GcpNetwork( project="autocompartmentalization", ) - machines: List[hydro.GCPComputeEngineHost] = [] + machines: List[hydro.GcpComputeEngineHost] = [] chat_servers: Dict[int, hydro.HydroflowCrate] = {} ports: List[int] = [] for i in range(num_replicas): - machine = deployment.GCPComputeEngineHost( + machine = deployment.GcpComputeEngineHost( project="autocompartmentalization", machine_type="e2-micro", image="debian-cloud/debian-11", diff --git a/hydro_deploy/hydroflow_cli_integration/Cargo.toml b/hydro_deploy/hydroflow_cli_integration/Cargo.toml deleted file mode 100644 index e26318fa2479..000000000000 --- a/hydro_deploy/hydroflow_cli_integration/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "hydroflow_cli_integration" -publish = true -version = "0.5.1" -edition = "2021" -license = "Apache-2.0" -documentation = "https://docs.rs/hydroflow_cli_integration/" -description = "`hydro_cli` integration for Hydroflow" - -[dependencies] -serde = { version = "1", features = [ "derive" ] } -tempfile = "3.3.0" -async-trait = "0.1" -bytes = "1.1.0" -futures = "0.3.26" -async-recursion = "1" -pin-project = "1" - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -tokio = { version = "1.16", features = [ "full" ] } -tokio-util = { version = "0.7.4", features = [ "net", "codec" ] } - -[target.'cfg(target_arch = "wasm32")'.dependencies] -tokio = { version = "1.16", features = [ "rt" , "sync", "macros", "io-util", "time" ] } -tokio-util = { version = "0.7.4", features = [ "codec" ] } diff --git a/hydro_deploy/hydroflow_cli_integration/CHANGELOG.md b/hydro_deploy/hydroflow_deploy_integration/CHANGELOG.md similarity index 84% rename from hydro_deploy/hydroflow_cli_integration/CHANGELOG.md rename to hydro_deploy/hydroflow_deploy_integration/CHANGELOG.md index 818068d757f3..187072076cc4 100644 --- a/hydro_deploy/hydroflow_cli_integration/CHANGELOG.md +++ b/hydro_deploy/hydroflow_deploy_integration/CHANGELOG.md @@ -5,8 +5,79 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 0.9.0 (2024-08-30) + +### Chore + + - manually set versions for crates renamed in #1413 + - lower min dependency versions where possible, update `Cargo.lock` + Moved from #1418 + + --------- + +### Other + + - update `RELEASING.md` notes, prep for release, wip + +### Refactor (BREAKING) + + - rename integration crates to drop CLI references + +### Commit Statistics + + + + - 4 commits contributed to the release. + - 4 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1413](https://github.com/hydro-project/hydroflow/issues/1413), [#1423](https://github.com/hydro-project/hydroflow/issues/1423) + +### Commit Details + + + +
view details + + * **[#1413](https://github.com/hydro-project/hydroflow/issues/1413)** + - Rename integration crates to drop CLI references ([`0a465e5`](https://github.com/hydro-project/hydroflow/commit/0a465e55dd39c76bc1aefb020460a639d792fe87)) + * **[#1423](https://github.com/hydro-project/hydroflow/issues/1423)** + - Lower min dependency versions where possible, update `Cargo.lock` ([`11af328`](https://github.com/hydro-project/hydroflow/commit/11af32828bab6e4a4264d2635ff71a12bb0bb778)) + * **Uncategorized** + - Manually set versions for crates renamed in #1413 ([`a2ec110`](https://github.com/hydro-project/hydroflow/commit/a2ec110ccadb97e293b19d83a155d98d94224bba)) + - Update `RELEASING.md` notes, prep for release, wip ([`c41787f`](https://github.com/hydro-project/hydroflow/commit/c41787f527859cb9d704736ecdea5ca7bc641460)) +
+ +## 0.5.2 (2024-04-05) + + + +### Refactor + + - use `TcpListenerStream` instead of spawning task, fix #659 + +### Pre-Move Commit Statistics + + + + - 1 commit contributed to the release over the course of 9 calendar days. + - 67 days passed between releases. + - 1 commit was understood as [conventional](https://www.conventionalcommits.org). + - 1 unique issue was worked on: [#1121](https://github.com/hydro-project/hydroflow/issues/1121) + +### Pre-Move Commit Details + + + +
view details + + * **[#1121](https://github.com/hydro-project/hydroflow/issues/1121)** + - Use `TcpListenerStream` instead of spawning task, fix #659 ([`ba2df44`](https://github.com/hydro-project/hydroflow/commit/ba2df44efd42b7c4d37ebefbf82e77c6f1d4cb94)) +
+ ## 0.5.1 (2024-01-29) + + + ### New Features - auto-configure Hydro Deploy based on Hydroflow+ plans @@ -45,16 +116,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Auto-configure Hydro Deploy based on Hydroflow+ plans ([`9e27582`](https://github.com/hydro-project/hydroflow/commit/9e275824c88b24d060a7de5822e1359959b36b03)) -### Commit Statistics +### Pre-Move Commit Statistics - - 3 commits contributed to the release over the course of 39 calendar days. + - 4 commits contributed to the release over the course of 39 calendar days. - 209 days passed between releases. - 3 commits were understood as [conventional](https://www.conventionalcommits.org). - 2 unique issues were worked on: [#1046](https://github.com/hydro-project/hydroflow/issues/1046), [#986](https://github.com/hydro-project/hydroflow/issues/986) -### Commit Details +### Pre-Move Commit Details @@ -65,6 +136,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **[#986](https://github.com/hydro-project/hydroflow/issues/986)** - Split Rust core from Python bindings ([`c50ca12`](https://github.com/hydro-project/hydroflow/commit/c50ca121b6d5e30dc07843f82caa135b68626301)) * **Uncategorized** + - Release hydroflow_cli_integration v0.5.1, hydroflow_lang v0.5.1, hydroflow_datalog_core v0.5.1, hydroflow_datalog v0.5.1, hydroflow_macro v0.5.1, lattices v0.5.1, variadics v0.0.3, pusherator v0.0.4, hydroflow v0.5.1, stageleft_macro v0.1.0, stageleft v0.1.0, hydroflow_plus v0.5.1, hydro_deploy v0.5.1, hydro_cli v0.5.1 ([`478aebc`](https://github.com/hydro-project/hydroflow/commit/478aebc8fee2aa78eab86bd386322db1c70bde6a)) - Manually set lockstep-versioned crates (and `lattices`) to version `0.5.1` ([`1b555e5`](https://github.com/hydro-project/hydroflow/commit/1b555e57c8c812bed4d6495d2960cbf77fb0b3ef)) diff --git a/hydro_deploy/hydroflow_deploy_integration/Cargo.toml b/hydro_deploy/hydroflow_deploy_integration/Cargo.toml new file mode 100644 index 000000000000..87ae45c3260e --- /dev/null +++ b/hydro_deploy/hydroflow_deploy_integration/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "hydroflow_deploy_integration" +publish = true +version = "0.9.0" +edition = "2021" +license = "Apache-2.0" +documentation = "https://docs.rs/hydroflow_deploy_integration/" +description = "`hydro_deploy` integration for Hydroflow" + +[dependencies] +async-recursion = "1.0.0" +async-trait = "0.1.54" +bytes = "1.1.0" +futures = "0.3.0" +pin-project = "1.0.0" +serde = { version = "1.0.197", features = [ "derive" ] } +tempfile = "3.0.0" + +# [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +tokio = { version = "1.29.0", features = [ "full" ] } +tokio-util = { version = "0.7.5", features = [ "net", "codec" ] } +tokio-stream = { version = "0.1.3", default-features = false, features = [ "net" ] } + +# [target.'cfg(target_arch = "wasm32")'.dependencies] +# tokio = { version = "1.29.0", features = [ "rt" , "sync", "macros", "io-util", "time" ] } +# tokio-util = { version = "0.7.5", features = [ "codec" ] } diff --git a/hydro_deploy/hydroflow_cli_integration/src/lib.rs b/hydro_deploy/hydroflow_deploy_integration/src/lib.rs similarity index 97% rename from hydro_deploy/hydroflow_cli_integration/src/lib.rs rename to hydro_deploy/hydroflow_deploy_integration/src/lib.rs index f1888e035f31..4cd1c75c3984 100644 --- a/hydro_deploy/hydroflow_cli_integration/src/lib.rs +++ b/hydro_deploy/hydroflow_deploy_integration/src/lib.rs @@ -17,8 +17,9 @@ use tokio::io; use tokio::net::{TcpListener, TcpStream}; #[cfg(unix)] use tokio::net::{UnixListener, UnixStream}; -use tokio::sync::mpsc::UnboundedReceiver; use tokio::task::JoinHandle; +use tokio_stream::wrappers::TcpListenerStream; +use tokio_stream::StreamExt; use tokio_util::codec::{Framed, LengthDelimitedCodec}; pub type InitConfig = (HashMap, Option); @@ -135,20 +136,7 @@ impl ServerBindConfig { ServerBindConfig::TcpPort(host) => { let listener = TcpListener::bind((host, 0)).await.unwrap(); let addr = listener.local_addr().unwrap(); - let (conn_send, conn_recv) = tokio::sync::mpsc::unbounded_channel(); - - tokio::spawn(async move { - loop { - if conn_send - .send(listener.accept().await.map(|r| r.0)) - .is_err() - { - break; - } - } - }); - - BoundConnection::TcpPort(conn_recv, addr) + BoundConnection::TcpPort(TcpListenerStream::new(listener), addr) } ServerBindConfig::Demux(bindings) => { let mut demux = HashMap::new(); @@ -191,7 +179,7 @@ impl ServerOrBound { pub async fn accept_tcp(&mut self) -> TcpStream { if let ServerOrBound::Bound(BoundConnection::TcpPort(handle, _)) = self { - handle.recv().await.unwrap().unwrap() + handle.next().await.unwrap().unwrap() } else { panic!("Not a TCP port") } @@ -234,7 +222,7 @@ pub trait ConnectedSource { #[derive(Debug)] pub enum BoundConnection { UnixSocket(JoinHandle>, tempfile::TempDir), - TcpPort(UnboundedReceiver>, SocketAddr), + TcpPort(TcpListenerStream, SocketAddr), Demux(HashMap), Merge(Vec), Tagged(Box, u32), @@ -306,7 +294,7 @@ async fn accept(bound: BoundConnection) -> ConnectedDirect { } } BoundConnection::TcpPort(mut listener, _) => { - let stream = listener.recv().await.unwrap().unwrap(); + let stream = listener.next().await.unwrap().unwrap(); ConnectedDirect { stream_sink: Some(Box::pin(tcp_bytes(stream))), source_only: None, diff --git a/hydro_deploy/hydroflow_plus_cli_integration/CHANGELOG.md b/hydro_deploy/hydroflow_plus_cli_integration/CHANGELOG.md deleted file mode 100644 index b0aa25cedfd2..000000000000 --- a/hydro_deploy/hydroflow_plus_cli_integration/CHANGELOG.md +++ /dev/null @@ -1,94 +0,0 @@ - - -## v0.6.0 (2024-03-02) - -### New Features - - - unify send/demux/tagged APIs - feat(hydroflow_plus): unify send/demux/tagged APIs - - use an IR before lowering to Hydroflow - Makes it possible to write custom optimization passes. - -### Commit Statistics - - - - - 2 commits contributed to the release over the course of 2 calendar days. - - 28 days passed between releases. - - 2 commits were understood as [conventional](https://www.conventionalcommits.org). - - 2 unique issues were worked on: [#1070](https://github.com/hydro-project/hydroflow/issues/1070), [#1080](https://github.com/hydro-project/hydroflow/issues/1080) - -### Commit Details - - - -
view details - - * **[#1070](https://github.com/hydro-project/hydroflow/issues/1070)** - - Use an IR before lowering to Hydroflow ([`eb34ccd`](https://github.com/hydro-project/hydroflow/commit/eb34ccd13f56e1d07cbae35ead79daeb3b9bad20)) - * **[#1080](https://github.com/hydro-project/hydroflow/issues/1080)** - - Unify send/demux/tagged APIs ([`c1d1b51`](https://github.com/hydro-project/hydroflow/commit/c1d1b51ee26cc9946af59ac02c040e0a33d15fde)) -
- -## v0.5.1 (2024-02-03) - - - - - -### Chore - - - prep for initial release - > it contains the logic linking hydroflow+ to deploy, it should be published that’s a bug - - manually set lockstep-versioned crates (and `lattices`) to version `0.5.1` - Setting manually since - https://github.com/frewsxcv/rust-crates-index/issues/159 is messing with - smart-release - -### Chore - - - fix/add releasing for `hydroflow_plus_cli_integration`, `stageleft[_macro/_tool]` - -### New Features - - - add APIs for declaring external ports on clusters - - improve API naming and polish docs - - pass subgraph ID through deploy metadata - - improve API naming and eliminate wire API for builders - - improve Rust API for defining services - - split Rust core from Python bindings - -### Commit Statistics - - - - - 11 commits contributed to the release over the course of 43 calendar days. - - 9 commits were understood as [conventional](https://www.conventionalcommits.org). - - 6 unique issues were worked on: [#1013](https://github.com/hydro-project/hydroflow/issues/1013), [#1056](https://github.com/hydro-project/hydroflow/issues/1056), [#986](https://github.com/hydro-project/hydroflow/issues/986), [#987](https://github.com/hydro-project/hydroflow/issues/987), [#995](https://github.com/hydro-project/hydroflow/issues/995), [#996](https://github.com/hydro-project/hydroflow/issues/996) - -### Commit Details - - - -
view details - - * **[#1013](https://github.com/hydro-project/hydroflow/issues/1013)** - - Improve API naming and polish docs ([`6eeb9be`](https://github.com/hydro-project/hydroflow/commit/6eeb9be9bc4136041a2855f650ae640c478b7fc9)) - * **[#1056](https://github.com/hydro-project/hydroflow/issues/1056)** - - Prep for initial release ([`e9c7ced`](https://github.com/hydro-project/hydroflow/commit/e9c7ced8760f88e3215a4b1b4e23f8b9db159a84)) - * **[#986](https://github.com/hydro-project/hydroflow/issues/986)** - - Split Rust core from Python bindings ([`c50ca12`](https://github.com/hydro-project/hydroflow/commit/c50ca121b6d5e30dc07843f82caa135b68626301)) - * **[#987](https://github.com/hydro-project/hydroflow/issues/987)** - - Improve Rust API for defining services ([`53d7aee`](https://github.com/hydro-project/hydroflow/commit/53d7aee8dcc574d47864ec89bfea30a82eab0ee7)) - * **[#995](https://github.com/hydro-project/hydroflow/issues/995)** - - Improve API naming and eliminate wire API for builders ([`b7aafd3`](https://github.com/hydro-project/hydroflow/commit/b7aafd3c97897db4bff62c4ab0b7480ef9a799e0)) - * **[#996](https://github.com/hydro-project/hydroflow/issues/996)** - - Pass subgraph ID through deploy metadata ([`46d87fa`](https://github.com/hydro-project/hydroflow/commit/46d87fa364d3fe01422cf3c404fbc8a1d5e9fb88)) - * **Uncategorized** - - Release hydroflow_plus_cli_integration v0.5.1 ([`8202feb`](https://github.com/hydro-project/hydroflow/commit/8202febcd8ed160c23d08c37d7c13773831c97da)) - - Fix/add releasing for `hydroflow_plus_cli_integration`, `stageleft[_macro/_tool]` ([`03ce73d`](https://github.com/hydro-project/hydroflow/commit/03ce73d2eb1377179f5ebd6f80e082c2889588a7)) - - Release hydroflow_lang v0.5.2, hydroflow_datalog_core v0.5.2, hydroflow_macro v0.5.2, lattices v0.5.2, hydroflow v0.5.2, hydro_cli v0.5.1, hydroflow_plus_cli_integration v0.5.1 ([`6ac8720`](https://github.com/hydro-project/hydroflow/commit/6ac872081753548ebb8ec95549b4d820dc050d3e)) - - Manually set lockstep-versioned crates (and `lattices`) to version `0.5.1` ([`1b555e5`](https://github.com/hydro-project/hydroflow/commit/1b555e57c8c812bed4d6495d2960cbf77fb0b3ef)) - - Add APIs for declaring external ports on clusters ([`7d930a2`](https://github.com/hydro-project/hydroflow/commit/7d930a2ccf656d3d6bc5db3e22eb63c5fd6d37d1)) -
- diff --git a/hydro_deploy/hydroflow_plus_cli_integration/Cargo.toml b/hydro_deploy/hydroflow_plus_cli_integration/Cargo.toml deleted file mode 100644 index 6351bf642cb9..000000000000 --- a/hydro_deploy/hydroflow_plus_cli_integration/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "hydroflow_plus_cli_integration" -publish = true -version = "0.6.0" -edition = "2021" -license = "Apache-2.0" -documentation = "https://docs.rs/hydroflow_plus_cli_integration/" -description = "Library for working with hydro_deploy and hydroflow_plus" - -[features] -default = [] -deploy = [ "hydro_deploy", "async-channel" ] - -[dependencies] -stageleft = { path = "../../stageleft", version = "^0.2.0" } -hydroflow_plus = { path = "../../hydroflow_plus", version = "^0.6.0", features = [ "cli_integration" ] } -syn = { version = "2.0.0", features = [ "parsing", "extra-traits" ] } -tokio = { version = "1.16", features = [ "full" ] } -serde = { version = "1", features = [ "derive" ] } - -hydro_deploy = { path = "../core", version = "^0.6.0", optional = true } -async-channel = { version = "1.8.0", optional = true } - -[build-dependencies] -stageleft_tool = { path = "../../stageleft_tool", version = "^0.1.0" } diff --git a/hydro_deploy/hydroflow_plus_cli_integration/src/deploy.rs b/hydro_deploy/hydroflow_plus_cli_integration/src/deploy.rs deleted file mode 100644 index 902f84d32643..000000000000 --- a/hydro_deploy/hydroflow_plus_cli_integration/src/deploy.rs +++ /dev/null @@ -1,445 +0,0 @@ -use std::cell::RefCell; -use std::collections::HashMap; -use std::rc::Rc; -use std::sync::Arc; - -use async_channel::Receiver; -use hydro_deploy::custom_service::CustomClientPort; -use hydro_deploy::hydroflow_crate::ports::{ - DemuxSink, HydroflowSink, HydroflowSource, TaggedSource, -}; -use hydro_deploy::hydroflow_crate::HydroflowCrateService; -use hydro_deploy::{Deployment, Host}; -use hydroflow_plus::ir::HfPlusLeaf; -use hydroflow_plus::location::{ - Cluster, ClusterSpec, Deploy, HfSendManyToMany, HfSendManyToOne, HfSendOneToMany, - HfSendOneToOne, Location, ProcessSpec, -}; -use hydroflow_plus::FlowBuilder; -use stageleft::internal::syn::parse_quote; -use stageleft::q; -use tokio::sync::RwLock; - -use super::HydroflowPlusMeta; - -pub struct HydroDeploy {} - -impl<'a> Deploy<'a> for HydroDeploy { - type ClusterId = u32; - type Process = DeployNode<'a>; - type Cluster = DeployCluster<'a>; - type Meta = HashMap>; - type GraphId = (); - type ProcessPort = DeployPort>; - type ClusterPort = DeployPort>; -} - -pub trait DeployCrateWrapper { - fn underlying(&self) -> Arc>; - - #[allow(async_fn_in_trait)] - async fn create_sender( - &self, - port: &str, - deployment: &mut Deployment, - on: &Arc>, - ) -> CustomClientPort { - let sender_service = deployment.CustomService(on.clone(), vec![]); - let mut sender_port = sender_service.read().await.declare_client(&sender_service); - let mut recipient = self - .underlying() - .read() - .await - .get_port(port.to_string(), &self.underlying()); - - sender_port.send_to(&mut recipient); - sender_port - } - - #[allow(async_fn_in_trait)] - async fn stdout(&self) -> Receiver { - self.underlying().read().await.stdout().await - } - - #[allow(async_fn_in_trait)] - async fn stderr(&self) -> Receiver { - self.underlying().read().await.stderr().await - } -} - -#[derive(Clone)] -pub struct DeployNode<'a> { - id: usize, - builder: &'a FlowBuilder<'a, HydroDeploy>, - cycle_counter: Rc>, - next_port: Rc>, - underlying: Arc>, -} - -impl<'a> DeployCrateWrapper for DeployNode<'a> { - fn underlying(&self) -> Arc> { - self.underlying.clone() - } -} - -pub struct DeployPort { - node: N, - port: String, -} - -impl<'a> DeployPort> { - pub async fn create_sender( - &self, - deployment: &mut Deployment, - on: &Arc>, - ) -> CustomClientPort { - self.node.create_sender(&self.port, deployment, on).await - } -} - -impl<'a> DeployPort> { - pub async fn create_senders( - &self, - deployment: &mut Deployment, - on: &Arc>, - ) -> Vec { - let mut out = vec![]; - for member in &self.node.members { - out.push(member.create_sender(&self.port, deployment, on).await); - } - - out - } -} - -impl<'a> Location<'a> for DeployNode<'a> { - type Port = DeployPort; - type Meta = HashMap>; - - fn id(&self) -> usize { - self.id - } - - fn ir_leaves(&self) -> &'a RefCell> { - self.builder.ir_leaves() - } - - fn cycle_counter(&self) -> &RefCell { - self.cycle_counter.as_ref() - } - - fn next_port(&self) -> DeployPort { - let next_port = *self.next_port.borrow(); - *self.next_port.borrow_mut() += 1; - - DeployPort { - node: self.clone(), - port: format!("port_{}", next_port), - } - } - - fn update_meta(&mut self, meta: &Self::Meta) { - let mut n = self.underlying.try_write().unwrap(); - n.update_meta(HydroflowPlusMeta { - clusters: meta.clone(), - subgraph_id: self.id, - }); - } -} - -#[derive(Clone)] -pub struct DeployClusterNode { - underlying: Arc>, -} - -impl DeployCrateWrapper for DeployClusterNode { - fn underlying(&self) -> Arc> { - self.underlying.clone() - } -} - -#[derive(Clone)] -pub struct DeployCluster<'a> { - id: usize, - builder: &'a FlowBuilder<'a, HydroDeploy>, - cycle_counter: Rc>, - next_port: Rc>, - pub members: Vec, -} - -impl<'a> Location<'a> for DeployCluster<'a> { - type Port = DeployPort; - type Meta = HashMap>; - - fn id(&self) -> usize { - self.id - } - - fn ir_leaves(&self) -> &'a RefCell> { - self.builder.ir_leaves() - } - - fn cycle_counter(&self) -> &RefCell { - self.cycle_counter.as_ref() - } - - fn next_port(&self) -> DeployPort { - let next_port = *self.next_port.borrow(); - *self.next_port.borrow_mut() += 1; - - DeployPort { - node: self.clone(), - port: format!("port_{}", next_port), - } - } - - fn update_meta(&mut self, meta: &Self::Meta) { - let meta = HydroflowPlusMeta { - clusters: meta.clone(), - subgraph_id: self.id, - }; - - self.members.iter().for_each(|n| { - let mut n = n.underlying.try_write().unwrap(); - n.update_meta(&meta); - }); - } -} - -impl<'a> Cluster<'a> for DeployCluster<'a> { - type Id = u32; - - fn ids(&self) -> impl stageleft::Quoted<'a, &'a Vec> + Copy + 'a { - q!(panic!()) - } -} - -impl<'a> HfSendOneToOne<'a, DeployNode<'a>> for DeployNode<'a> { - fn connect( - &self, - other: &DeployNode<'a>, - source_port: &DeployPort>, - recipient_port: &DeployPort>, - ) { - let mut source_port = self - .underlying - .try_read() - .unwrap() - .get_port(source_port.port.clone(), &self.underlying); - - let mut recipient_port = other - .underlying - .try_read() - .unwrap() - .get_port(recipient_port.port.clone(), &other.underlying); - - source_port.send_to(&mut recipient_port); - } - - fn gen_sink_statement(&self, _port: &Self::Port) -> syn::Expr { - parse_quote!(null) - } - - fn gen_source_statement(_other: &DeployNode<'a>, _port: &Self::Port) -> syn::Expr { - parse_quote!(null) - } -} - -impl<'a> HfSendManyToOne<'a, DeployNode<'a>, u32> for DeployCluster<'a> { - fn connect( - &self, - other: &DeployNode<'a>, - source_port: &DeployPort>, - recipient_port: &DeployPort>, - ) { - let mut recipient_port = other - .underlying - .try_read() - .unwrap() - .get_port(recipient_port.port.clone(), &other.underlying) - .merge(); - - for (i, node) in self.members.iter().enumerate() { - let source_port = node - .underlying - .try_read() - .unwrap() - .get_port(source_port.port.clone(), &node.underlying); - - TaggedSource { - source: Arc::new(RwLock::new(source_port)), - tag: i as u32, - } - .send_to(&mut recipient_port); - } - } - - fn gen_sink_statement(&self, _port: &Self::Port) -> syn::Expr { - parse_quote!(null) - } - - fn gen_source_statement( - _other: &DeployNode<'a>, - _port: &DeployPort>, - ) -> syn::Expr { - parse_quote!(null) - } -} - -impl<'a> HfSendOneToMany<'a, DeployCluster<'a>, u32> for DeployNode<'a> { - fn connect( - &self, - other: &DeployCluster<'a>, - source_port: &DeployPort>, - recipient_port: &DeployPort>, - ) { - let mut source_port = self - .underlying - .try_read() - .unwrap() - .get_port(source_port.port.clone(), &self.underlying); - - let mut recipient_port = DemuxSink { - demux: other - .members - .iter() - .enumerate() - .map(|(id, c)| { - let n = c.underlying.try_read().unwrap(); - ( - id as u32, - Arc::new(RwLock::new( - n.get_port(recipient_port.port.clone(), &c.underlying), - )) as Arc>, - ) - }) - .collect(), - }; - - source_port.send_to(&mut recipient_port); - } - - fn gen_sink_statement(&self, _port: &Self::Port) -> syn::Expr { - parse_quote!(null) - } - - fn gen_source_statement( - _other: &DeployCluster<'a>, - _port: &DeployPort>, - ) -> syn::Expr { - parse_quote!(null) - } -} - -impl<'a> HfSendManyToMany<'a, DeployCluster<'a>, u32> for DeployCluster<'a> { - fn connect( - &self, - other: &DeployCluster<'a>, - source_port: &DeployPort>, - recipient_port: &DeployPort>, - ) { - for (i, sender) in self.members.iter().enumerate() { - let source_port = sender - .underlying - .try_read() - .unwrap() - .get_port(source_port.port.clone(), &sender.underlying); - - let mut recipient_port = DemuxSink { - demux: other - .members - .iter() - .enumerate() - .map(|(id, c)| { - let n = c.underlying.try_read().unwrap(); - ( - id as u32, - Arc::new(RwLock::new( - n.get_port(recipient_port.port.clone(), &c.underlying) - .merge(), - )) - as Arc>, - ) - }) - .collect(), - }; - - TaggedSource { - source: Arc::new(RwLock::new(source_port)), - tag: i as u32, - } - .send_to(&mut recipient_port); - } - } - - fn gen_sink_statement(&self, _port: &Self::Port) -> syn::Expr { - parse_quote!(null) - } - - fn gen_source_statement( - _other: &DeployCluster<'a>, - _port: &DeployPort>, - ) -> syn::Expr { - parse_quote!(null) - } -} - -type CrateBuilder<'a> = dyn FnMut() -> Arc> + 'a; - -pub struct DeployProcessSpec<'a>(RefCell>>); - -impl<'a> DeployProcessSpec<'a> { - pub fn new Arc> + 'a>(f: F) -> Self { - Self(RefCell::new(Box::new(f))) - } -} - -impl<'a: 'b, 'b> ProcessSpec<'a, HydroDeploy> for DeployProcessSpec<'b> { - fn build( - &self, - id: usize, - builder: &'a FlowBuilder<'a, HydroDeploy>, - _meta: &mut HashMap>, - ) -> DeployNode<'a> { - DeployNode { - id, - builder, - cycle_counter: Rc::new(RefCell::new(0)), - next_port: Rc::new(RefCell::new(0)), - underlying: (self.0.borrow_mut())(), - } - } -} - -type ClusterSpecFn<'a> = dyn FnMut() -> Vec>> + 'a; - -pub struct DeployClusterSpec<'a>(RefCell>>); - -impl<'a> DeployClusterSpec<'a> { - pub fn new Vec>> + 'a>(f: F) -> Self { - Self(RefCell::new(Box::new(f))) - } -} - -impl<'a: 'b, 'b> ClusterSpec<'a, HydroDeploy> for DeployClusterSpec<'b> { - fn build( - &self, - id: usize, - builder: &'a FlowBuilder<'a, HydroDeploy>, - meta: &mut HashMap>, - ) -> DeployCluster<'a> { - let cluster_nodes = (self.0.borrow_mut())(); - meta.insert(id, (0..(cluster_nodes.len() as u32)).collect()); - - DeployCluster { - id, - builder, - cycle_counter: Rc::new(RefCell::new(0)), - next_port: Rc::new(RefCell::new(0)), - members: cluster_nodes - .into_iter() - .map(|u| DeployClusterNode { underlying: u }) - .collect(), - } - } -} diff --git a/hydro_deploy/hydroflow_plus_cli_integration/src/runtime.rs b/hydro_deploy/hydroflow_plus_cli_integration/src/runtime.rs deleted file mode 100644 index eb482e15f82d..000000000000 --- a/hydro_deploy/hydroflow_plus_cli_integration/src/runtime.rs +++ /dev/null @@ -1,257 +0,0 @@ -use std::cell::RefCell; -use std::rc::Rc; - -use hydroflow_plus::ir::HfPlusLeaf; -use hydroflow_plus::location::{ - Cluster, ClusterSpec, Deploy, HfSendManyToMany, HfSendManyToOne, HfSendOneToMany, - HfSendOneToOne, Location, ProcessSpec, -}; -use hydroflow_plus::util::cli::{ - ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource, ConnectedTagged, HydroCLI, -}; -use hydroflow_plus::FlowBuilder; -use stageleft::{q, Quoted, RuntimeData}; - -use super::HydroflowPlusMeta; - -pub struct CLIRuntime {} - -impl<'a> Deploy<'a> for CLIRuntime { - type ClusterId = u32; - type Process = CLIRuntimeNode<'a>; - type Cluster = CLIRuntimeCluster<'a>; - type Meta = (); - type GraphId = usize; - type ProcessPort = String; - type ClusterPort = String; -} - -#[derive(Clone)] -pub struct CLIRuntimeNode<'a> { - id: usize, - builder: &'a FlowBuilder<'a, CLIRuntime>, - cycle_counter: Rc>, - next_port: Rc>, - cli: RuntimeData<&'a HydroCLI>, -} - -impl<'a> Location<'a> for CLIRuntimeNode<'a> { - type Port = String; - type Meta = (); - - fn id(&self) -> usize { - self.id - } - - fn ir_leaves(&self) -> &'a RefCell> { - self.builder.ir_leaves() - } - - fn cycle_counter(&self) -> &RefCell { - self.cycle_counter.as_ref() - } - - fn next_port(&self) -> String { - let next_send_port = *self.next_port.borrow(); - *self.next_port.borrow_mut() += 1; - format!("port_{}", next_send_port) - } - - fn update_meta(&mut self, _meta: &Self::Meta) {} -} - -#[derive(Clone)] -pub struct CLIRuntimeCluster<'a> { - id: usize, - builder: &'a FlowBuilder<'a, CLIRuntime>, - cycle_counter: Rc>, - next_port: Rc>, - cli: RuntimeData<&'a HydroCLI>, -} - -impl<'a> Location<'a> for CLIRuntimeCluster<'a> { - type Port = String; - type Meta = (); - - fn id(&self) -> usize { - self.id - } - - fn ir_leaves(&self) -> &'a RefCell> { - self.builder.ir_leaves() - } - - fn cycle_counter(&self) -> &RefCell { - self.cycle_counter.as_ref() - } - - fn next_port(&self) -> String { - let next_send_port = *self.next_port.borrow(); - *self.next_port.borrow_mut() += 1; - format!("port_{}", next_send_port) - } - - fn update_meta(&mut self, _meta: &Self::Meta) {} -} - -impl<'a> Cluster<'a> for CLIRuntimeCluster<'a> { - type Id = u32; - - fn ids(&self) -> impl Quoted<'a, &'a Vec> + Copy + 'a { - let cli = self.cli; - let self_id = self.id; - q!(cli.meta.clusters.get(&self_id).unwrap()) - } -} - -impl<'a> HfSendOneToOne<'a, CLIRuntimeNode<'a>> for CLIRuntimeNode<'a> { - fn connect(&self, _other: &CLIRuntimeNode, _source_port: &String, _recipient_port: &String) {} - - fn gen_sink_statement(&self, port: &String) -> syn::Expr { - let self_cli = self.cli; - let port = port.as_str(); - q!({ - self_cli - .port(port) - .connect_local_blocking::() - .into_sink() - }) - .splice() - } - - fn gen_source_statement(other: &CLIRuntimeNode<'a>, port: &String) -> syn::Expr { - let self_cli = other.cli; - let port = port.as_str(); - q!({ - self_cli - .port(port) - .connect_local_blocking::() - .into_source() - }) - .splice() - } -} - -impl<'a> HfSendManyToOne<'a, CLIRuntimeNode<'a>, u32> for CLIRuntimeCluster<'a> { - fn connect(&self, _other: &CLIRuntimeNode, _source_port: &String, _recipient_port: &String) {} - - fn gen_sink_statement(&self, port: &String) -> syn::Expr { - let self_cli = self.cli; - let port = port.as_str(); - q!({ - self_cli - .port(port) - .connect_local_blocking::() - .into_sink() - }) - .splice() - } - - fn gen_source_statement(other: &CLIRuntimeNode<'a>, port: &String) -> syn::Expr { - let self_cli = other.cli; - let port = port.as_str(); - q!({ - self_cli - .port(port) - .connect_local_blocking::>() - .into_source() - }) - .splice() - } -} - -impl<'a> HfSendOneToMany<'a, CLIRuntimeCluster<'a>, u32> for CLIRuntimeNode<'a> { - fn connect(&self, _other: &CLIRuntimeCluster, _source_port: &String, _recipient_port: &String) { - } - - fn gen_sink_statement(&self, port: &String) -> syn::Expr { - let self_cli = self.cli; - let port = port.as_str(); - - q!({ - self_cli - .port(port) - .connect_local_blocking::>() - .into_sink() - }) - .splice() - } - - fn gen_source_statement(other: &CLIRuntimeCluster<'a>, port: &String) -> syn::Expr { - let self_cli = other.cli; - let port = port.as_str(); - - q!({ - self_cli - .port(port) - .connect_local_blocking::() - .into_source() - }) - .splice() - } -} - -impl<'a> HfSendManyToMany<'a, CLIRuntimeCluster<'a>, u32> for CLIRuntimeCluster<'a> { - fn connect(&self, _other: &CLIRuntimeCluster, _source_port: &String, _recipient_port: &String) { - } - - fn gen_sink_statement(&self, port: &String) -> syn::Expr { - let self_cli = self.cli; - let port = port.as_str(); - - q!({ - self_cli - .port(port) - .connect_local_blocking::>() - .into_sink() - }) - .splice() - } - - fn gen_source_statement(other: &CLIRuntimeCluster<'a>, port: &String) -> syn::Expr { - let self_cli = other.cli; - let port = port.as_str(); - - q!({ - self_cli - .port(port) - .connect_local_blocking::>() - .into_source() - }) - .splice() - } -} - -impl<'cli> ProcessSpec<'cli, CLIRuntime> for RuntimeData<&'cli HydroCLI> { - fn build( - &self, - id: usize, - builder: &'cli FlowBuilder<'cli, CLIRuntime>, - _meta: &mut (), - ) -> CLIRuntimeNode<'cli> { - CLIRuntimeNode { - id, - builder, - cycle_counter: Rc::new(RefCell::new(0)), - next_port: Rc::new(RefCell::new(0)), - cli: *self, - } - } -} - -impl<'cli> ClusterSpec<'cli, CLIRuntime> for RuntimeData<&'cli HydroCLI> { - fn build( - &self, - id: usize, - builder: &'cli FlowBuilder<'cli, CLIRuntime>, - _meta: &mut (), - ) -> CLIRuntimeCluster<'cli> { - CLIRuntimeCluster { - id, - builder, - cycle_counter: Rc::new(RefCell::new(0)), - next_port: Rc::new(RefCell::new(0)), - cli: *self, - } - } -} diff --git a/hydro_deploy/hydroflow_plus_deploy/CHANGELOG.md b/hydro_deploy/hydroflow_plus_deploy/CHANGELOG.md new file mode 100644 index 000000000000..615125a01968 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/CHANGELOG.md @@ -0,0 +1,358 @@ + + +## 0.9.0 (2024-08-30) + + + +### New Features + + - use trybuild to compile subgraph binaries + - Add end-to-end flamegraph generation for macos and linux localhost, fix #1351 + +### Refactor (BREAKING) + + - disentangle instantiated nodes from locations + - simplify process/cluster specs + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1394). + * #1395 + * __->__ #1394 + - defer network instantiation until after finalizing IR + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1377). + * #1395 + * #1394 + * __->__ #1377 + +### Style (BREAKING) + + - rename some `CLI`->`Deploy`, decapitalize acronym names + +### Refactor (BREAKING) + + - rename integration crates to drop CLI references + +### Style + + - use `name_of!` macro + +### Other + + - update `RELEASING.md` notes, prep for release, wip + +### Bug Fixes + + - remove `FlowProps` + +### Chore + + - manually set versions for crates renamed in #1413 + - lower min dependency versions where possible, update `Cargo.lock` + Moved from #1418 + + --------- + + simplify process/cluster specs + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1394). + * #1395 + * __->__ #1394 + - defer network instantiation until after finalizing IR + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1377). + * #1395 + * #1394 + * __->__ #1377 + + defer network instantiation until after finalizing IR + --- + [//]: # (BEGIN SAPLING FOOTER) + Stack created with [Sapling](https://sapling-scm.com). Best reviewed + with + [ReviewStack](https://reviewstack.dev/hydro-project/hydroflow/pull/1377). + * #1395 + * #1394 + * __->__ #1377 + +### Pre-Move Commit Statistics + + + + - 4 commits contributed to the release over the course of 7 calendar days. + - 30 days passed between releases. + - 4 commits were understood as [conventional](https://www.conventionalcommits.org). + - 4 unique issues were worked on: [#1377](https://github.com/hydro-project/hydroflow/issues/1377), [#1394](https://github.com/hydro-project/hydroflow/issues/1394), [#1395](https://github.com/hydro-project/hydroflow/issues/1395), [#1398](https://github.com/hydro-project/hydroflow/issues/1398) + +### Pre-Move Commit Details + + + +
view details + + * **[#1377](https://github.com/hydro-project/hydroflow/issues/1377)** + - Defer network instantiation until after finalizing IR ([`0eba702`](https://github.com/hydro-project/hydroflow/commit/0eba702f62e7a6816cf931b01a2ea5643bd7321d)) + * **[#1394](https://github.com/hydro-project/hydroflow/issues/1394)** + - Simplify process/cluster specs ([`128aaec`](https://github.com/hydro-project/hydroflow/commit/128aaecd40edce57dc254afdcd61ecd5b9948d71)) + * **[#1395](https://github.com/hydro-project/hydroflow/issues/1395)** + - Disentangle instantiated nodes from locations ([`5f2789a`](https://github.com/hydro-project/hydroflow/commit/5f2789a13d1602f170e678fe9bbc59caf69db4b5)) + * **[#1398](https://github.com/hydro-project/hydroflow/issues/1398)** + - Use trybuild to compile subgraph binaries ([`46a8a2c`](https://github.com/hydro-project/hydroflow/commit/46a8a2cb08732bb21096e824bc4542d208c68fb2)) +
+ +### Commit Statistics + + + + - 8 commits contributed to the release. + - 8 commits were understood as [conventional](https://www.conventionalcommits.org). + - 6 unique issues were worked on: [#1396](https://github.com/hydro-project/hydroflow/issues/1396), [#1399](https://github.com/hydro-project/hydroflow/issues/1399), [#1413](https://github.com/hydro-project/hydroflow/issues/1413), [#1420](https://github.com/hydro-project/hydroflow/issues/1420), [#1423](https://github.com/hydro-project/hydroflow/issues/1423), [#1429](https://github.com/hydro-project/hydroflow/issues/1429) + +### Commit Details + + + +
view details + + * **[#1396](https://github.com/hydro-project/hydroflow/issues/1396)** + - Add end-to-end flamegraph generation for macos and linux localhost, fix #1351 ([`6568263`](https://github.com/hydro-project/hydroflow/commit/6568263e03899d4e96837690e6e59284c194d7ff)) + * **[#1399](https://github.com/hydro-project/hydroflow/issues/1399)** + - Rename some `CLI`->`Deploy`, decapitalize acronym names ([`fa41720`](https://github.com/hydro-project/hydroflow/commit/fa417205569d8c49c85b0c2324118e0f9b1c8407)) + * **[#1413](https://github.com/hydro-project/hydroflow/issues/1413)** + - Rename integration crates to drop CLI references ([`0a465e5`](https://github.com/hydro-project/hydroflow/commit/0a465e55dd39c76bc1aefb020460a639d792fe87)) + * **[#1420](https://github.com/hydro-project/hydroflow/issues/1420)** + - Remove `FlowProps` ([`22c7218`](https://github.com/hydro-project/hydroflow/commit/22c72189bb76412955d29b03c5d99894c558a07c)) + * **[#1423](https://github.com/hydro-project/hydroflow/issues/1423)** + - Lower min dependency versions where possible, update `Cargo.lock` ([`11af328`](https://github.com/hydro-project/hydroflow/commit/11af32828bab6e4a4264d2635ff71a12bb0bb778)) + * **[#1429](https://github.com/hydro-project/hydroflow/issues/1429)** + - Use `name_of!` macro ([`3fde68d`](https://github.com/hydro-project/hydroflow/commit/3fde68d0db0414017cfb771a218b14b8f57d1686)) + * **Uncategorized** + - Manually set versions for crates renamed in #1413 ([`a2ec110`](https://github.com/hydro-project/hydroflow/commit/a2ec110ccadb97e293b19d83a155d98d94224bba)) + - Update `RELEASING.md` notes, prep for release, wip ([`c41787f`](https://github.com/hydro-project/hydroflow/commit/c41787f527859cb9d704736ecdea5ca7bc641460)) +
+ +## v0.8.0 (2024-07-23) + + + + + + +### Refactor (BREAKING) + + - make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1346 + - make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 + Depends on #1339 + - replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` + Depends on #1339 + + We could make the publicly facing `stdout`, `stderr` APIs return `impl Stream` in the future, maybe + - replace some uses of `tokio::sync::RwLock` with `std::sync::Mutex` #430 (3/3) + +### Pre-Move Commit Statistics + + + + - 5 commits contributed to the release over the course of 4 calendar days. + - 59 days passed between releases. + - 4 commits were understood as [conventional](https://www.conventionalcommits.org). + - 4 unique issues were worked on: [#1339](https://github.com/hydro-project/hydroflow/issues/1339), [#1346](https://github.com/hydro-project/hydroflow/issues/1346), [#1347](https://github.com/hydro-project/hydroflow/issues/1347), [#1356](https://github.com/hydro-project/hydroflow/issues/1356) + +### Pre-Move Commit Details + + + +
view details + + * **[#1339](https://github.com/hydro-project/hydroflow/issues/1339)** + - Replace some uses of `tokio::sync::RwLock` with `std::sync::Mutex` #430 (3/3) ([`141eae1`](https://github.com/hydro-project/hydroflow/commit/141eae1c3a1869fa42756250618a21ea2a2c7e34)) + * **[#1346](https://github.com/hydro-project/hydroflow/issues/1346)** + - Make `HydroflowSource`, `HydroflowSink` traits use `&self` interior mutability to remove `RwLock` wrappings #430 ([`057a0a5`](https://github.com/hydro-project/hydroflow/commit/057a0a510568cf81932368c8c65e056f91af7202)) + * **[#1347](https://github.com/hydro-project/hydroflow/issues/1347)** + - Make `Host` trait use `&self` interior mutability to remove `RwLock` wrappings #430 ([`c5a8de2`](https://github.com/hydro-project/hydroflow/commit/c5a8de28e7844b3c29d58116d8340967f2e6bcc4)) + * **[#1356](https://github.com/hydro-project/hydroflow/issues/1356)** + - Replace `async-channel` with `tokio::sync::mpsc::unbounded_channel` ([`6039078`](https://github.com/hydro-project/hydroflow/commit/60390782dd7dcec18d193c800af716843a944dba)) + * **Uncategorized** + - Release hydroflow_lang v0.8.0, hydroflow_datalog_core v0.8.0, hydroflow_datalog v0.8.0, hydroflow_macro v0.8.0, lattices_macro v0.5.5, lattices v0.5.6, variadics v0.0.5, pusherator v0.0.7, hydroflow v0.8.0, hydroflow_plus v0.8.0, hydro_deploy v0.8.0, hydro_cli v0.8.0, hydroflow_plus_cli_integration v0.8.0, safety bump 7 crates ([`ca6c16b`](https://github.com/hydro-project/hydroflow/commit/ca6c16b4a7ce35e155fe7fc6c7d1676c37c9e4de)) +
+ +## v0.7.0 (2024-05-24) + + + +### Chore + + - use workaround for `cargo smart-release` not properly ordering `dev-`/`build-dependencies` + +### New Features + + - add API to get the cluster ID of the current node + feat(hydroflow_plus): add API to get the cluster ID of the current node + +### Pre-Move Commit Statistics + + + + - 3 commits contributed to the release over the course of 9 calendar days. + - 44 days passed between releases. + - 2 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1194](https://github.com/hydro-project/hydroflow/issues/1194), [#1238](https://github.com/hydro-project/hydroflow/issues/1238) + +### Pre-Move Commit Details + + + +
view details + + * **[#1194](https://github.com/hydro-project/hydroflow/issues/1194)** + - Add API to get the cluster ID of the current node ([`6e57172`](https://github.com/hydro-project/hydroflow/commit/6e571726ff40818fbe9bbe9923511877c20fb243)) + * **[#1238](https://github.com/hydro-project/hydroflow/issues/1238)** + - Use workaround for `cargo smart-release` not properly ordering `dev-`/`build-dependencies` ([`c9dfddc`](https://github.com/hydro-project/hydroflow/commit/c9dfddc680e0ce5415539d7b77bc5beb97ab59d9)) + * **Uncategorized** + - Release hydroflow_lang v0.7.0, hydroflow_datalog_core v0.7.0, hydroflow_datalog v0.7.0, hydroflow_macro v0.7.0, lattices v0.5.5, multiplatform_test v0.1.0, pusherator v0.0.6, hydroflow v0.7.0, stageleft_macro v0.2.0, stageleft v0.3.0, stageleft_tool v0.2.0, hydroflow_plus v0.7.0, hydro_deploy v0.7.0, hydro_cli v0.7.0, hydroflow_plus_cli_integration v0.7.0, safety bump 8 crates ([`2852147`](https://github.com/hydro-project/hydroflow/commit/285214740627685e911781793e05d234ab2ad2bd)) +
+ +## v0.6.1 (2024-04-09) + +### New Features + + - simplify Location trait to remove lifetimes + - simplify lifetime bounds for processes and clusters + feat(hydroflow_plus): simplify lifetime bounds for processes and + clusters + + This allows `extract` to move the flow builder, which is a prerequisite + for having developers run the optimizer during deployment as well in + case it changes the network topology. + +### Pre-Move Commit Statistics + + + + - 5 commits contributed to the release over the course of 18 calendar days. + - 38 days passed between releases. + - 2 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1100](https://github.com/hydro-project/hydroflow/issues/1100), [#1101](https://github.com/hydro-project/hydroflow/issues/1101) + +### Pre-Move Commit Details + + + +
view details + + * **[#1100](https://github.com/hydro-project/hydroflow/issues/1100)** + - Simplify lifetime bounds for processes and clusters ([`77f3e5a`](https://github.com/hydro-project/hydroflow/commit/77f3e5afb9e276d1d6c643574ebac75ed0003939)) + * **[#1101](https://github.com/hydro-project/hydroflow/issues/1101)** + - Simplify Location trait to remove lifetimes ([`7f68ebf`](https://github.com/hydro-project/hydroflow/commit/7f68ebf2a23e8e73719229a6f0408bffc7fbe7af)) + * **Uncategorized** + - Release hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1 ([`c385c13`](https://github.com/hydro-project/hydroflow/commit/c385c132c9733d1bace82156aa14216b8e7fef9f)) + - Release hydroflow_lang v0.6.2, hydroflow v0.6.2, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`23cfe08`](https://github.com/hydro-project/hydroflow/commit/23cfe0839079aa17d042bbd3976f6d188689d290)) + - Release hydroflow_cli_integration v0.5.2, hydroflow_lang v0.6.1, hydroflow_datalog_core v0.6.1, lattices v0.5.4, hydroflow v0.6.1, stageleft_macro v0.1.1, stageleft v0.2.1, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`cd63f22`](https://github.com/hydro-project/hydroflow/commit/cd63f2258c961a40f0e5dbef20ac329a2d570ad0)) +
+ +## v0.6.0 (2024-03-02) + +### New Features + + - unify send/demux/tagged APIs + feat(hydroflow_plus): unify send/demux/tagged APIs + - use an IR before lowering to Hydroflow + Makes it possible to write custom optimization passes. + +### Pre-Move Commit Statistics + + + + - 3 commits contributed to the release over the course of 2 calendar days. + - 28 days passed between releases. + - 2 commits were understood as [conventional](https://www.conventionalcommits.org). + - 2 unique issues were worked on: [#1070](https://github.com/hydro-project/hydroflow/issues/1070), [#1080](https://github.com/hydro-project/hydroflow/issues/1080) + +### Pre-Move Commit Details + + + +
view details + + * **[#1070](https://github.com/hydro-project/hydroflow/issues/1070)** + - Use an IR before lowering to Hydroflow ([`eb34ccd`](https://github.com/hydro-project/hydroflow/commit/eb34ccd13f56e1d07cbae35ead79daeb3b9bad20)) + * **[#1080](https://github.com/hydro-project/hydroflow/issues/1080)** + - Unify send/demux/tagged APIs ([`c1d1b51`](https://github.com/hydro-project/hydroflow/commit/c1d1b51ee26cc9946af59ac02c040e0a33d15fde)) + * **Uncategorized** + - Release hydroflow_lang v0.6.0, hydroflow_datalog_core v0.6.0, hydroflow_datalog v0.6.0, hydroflow_macro v0.6.0, lattices v0.5.3, variadics v0.0.4, pusherator v0.0.5, hydroflow v0.6.0, stageleft v0.2.0, hydroflow_plus v0.6.0, hydro_deploy v0.6.0, hydro_cli v0.6.0, hydroflow_plus_cli_integration v0.6.0, safety bump 7 crates ([`09ea65f`](https://github.com/hydro-project/hydroflow/commit/09ea65fe9cd45c357c43bffca30e60243fa45cc8)) +
+ +## v0.5.1 (2024-02-03) + + + + + +### Chore + + - prep for initial release + > it contains the logic linking hydroflow+ to deploy, it should be published that’s a bug + - manually set lockstep-versioned crates (and `lattices`) to version `0.5.1` + Setting manually since + https://github.com/frewsxcv/rust-crates-index/issues/159 is messing with + smart-release + +### Chore + + - fix/add releasing for `hydroflow_plus_cli_integration`, `stageleft[_macro/_tool]` + +### New Features + + - add APIs for declaring external ports on clusters + - improve API naming and polish docs + - pass subgraph ID through deploy metadata + - improve API naming and eliminate wire API for builders + - improve Rust API for defining services + - split Rust core from Python bindings + +### Pre-Move Commit Statistics + + + + - 11 commits contributed to the release over the course of 43 calendar days. + - 9 commits were understood as [conventional](https://www.conventionalcommits.org). + - 6 unique issues were worked on: [#1013](https://github.com/hydro-project/hydroflow/issues/1013), [#1056](https://github.com/hydro-project/hydroflow/issues/1056), [#986](https://github.com/hydro-project/hydroflow/issues/986), [#987](https://github.com/hydro-project/hydroflow/issues/987), [#995](https://github.com/hydro-project/hydroflow/issues/995), [#996](https://github.com/hydro-project/hydroflow/issues/996) + +### Pre-Move Commit Details + + + +
view details + + * **[#1013](https://github.com/hydro-project/hydroflow/issues/1013)** + - Improve API naming and polish docs ([`6eeb9be`](https://github.com/hydro-project/hydroflow/commit/6eeb9be9bc4136041a2855f650ae640c478b7fc9)) + * **[#1056](https://github.com/hydro-project/hydroflow/issues/1056)** + - Prep for initial release ([`e9c7ced`](https://github.com/hydro-project/hydroflow/commit/e9c7ced8760f88e3215a4b1b4e23f8b9db159a84)) + * **[#986](https://github.com/hydro-project/hydroflow/issues/986)** + - Split Rust core from Python bindings ([`c50ca12`](https://github.com/hydro-project/hydroflow/commit/c50ca121b6d5e30dc07843f82caa135b68626301)) + * **[#987](https://github.com/hydro-project/hydroflow/issues/987)** + - Improve Rust API for defining services ([`53d7aee`](https://github.com/hydro-project/hydroflow/commit/53d7aee8dcc574d47864ec89bfea30a82eab0ee7)) + * **[#995](https://github.com/hydro-project/hydroflow/issues/995)** + - Improve API naming and eliminate wire API for builders ([`b7aafd3`](https://github.com/hydro-project/hydroflow/commit/b7aafd3c97897db4bff62c4ab0b7480ef9a799e0)) + * **[#996](https://github.com/hydro-project/hydroflow/issues/996)** + - Pass subgraph ID through deploy metadata ([`46d87fa`](https://github.com/hydro-project/hydroflow/commit/46d87fa364d3fe01422cf3c404fbc8a1d5e9fb88)) + * **Uncategorized** + - Release hydroflow_plus_cli_integration v0.5.1 ([`8202feb`](https://github.com/hydro-project/hydroflow/commit/8202febcd8ed160c23d08c37d7c13773831c97da)) + - Fix/add releasing for `hydroflow_plus_cli_integration`, `stageleft[_macro/_tool]` ([`03ce73d`](https://github.com/hydro-project/hydroflow/commit/03ce73d2eb1377179f5ebd6f80e082c2889588a7)) + - Release hydroflow_lang v0.5.2, hydroflow_datalog_core v0.5.2, hydroflow_macro v0.5.2, lattices v0.5.2, hydroflow v0.5.2, hydro_cli v0.5.1, hydroflow_plus_cli_integration v0.5.1 ([`6ac8720`](https://github.com/hydro-project/hydroflow/commit/6ac872081753548ebb8ec95549b4d820dc050d3e)) + - Manually set lockstep-versioned crates (and `lattices`) to version `0.5.1` ([`1b555e5`](https://github.com/hydro-project/hydroflow/commit/1b555e57c8c812bed4d6495d2960cbf77fb0b3ef)) + - Add APIs for declaring external ports on clusters ([`7d930a2`](https://github.com/hydro-project/hydroflow/commit/7d930a2ccf656d3d6bc5db3e22eb63c5fd6d37d1)) +
+ diff --git a/hydro_deploy/hydroflow_plus_deploy/Cargo.toml b/hydro_deploy/hydroflow_plus_deploy/Cargo.toml new file mode 100644 index 000000000000..6f35b034f484 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "hydroflow_plus_deploy" +publish = true +version = "0.9.0" +edition = "2021" +license = "Apache-2.0" +documentation = "https://docs.rs/hydroflow_plus_deploy/" +description = "Library for working with hydro_deploy and hydroflow_plus" + +[features] +deploy = [ "hydro_deploy", "trybuild-internals-api", "toml", "prettyplease" ] +stageleft_devel = [] + +[dependencies] +hydroflow_plus = { path = "../../hydroflow_plus", version = "^0.9.0", features = [ "deploy_integration" ] } +nameof = "1.0.0" +serde = { version = "1.0.197", features = [ "derive" ] } +sha2 = "0.10.0" +stageleft = { path = "../../stageleft", version = "^0.4.0" } +syn = { version = "2.0.46", features = [ "parsing", "extra-traits" ] } +tokio = { version = "1.29.0", features = [ "full" ] } + +hydro_deploy = { path = "../core", version = "^0.9.0", optional = true } +prettyplease = { version = "0.2.0", features = [ "verbatim" ], optional = true } +toml = { version = "0.8.0", optional = true } +trybuild-internals-api = { version = "1.0.99", optional = true } + +# added to workaround `cargo smart-release` https://github.com/Byron/cargo-smart-release/issues/16 +stageleft_tool = { path = "../../stageleft_tool", version = "^0.3.0", optional = true } + +[build-dependencies] +stageleft_tool = { path = "../../stageleft_tool", version = "^0.3.0" } diff --git a/hydro_deploy/hydroflow_plus_cli_integration/build.rs b/hydro_deploy/hydroflow_plus_deploy/build.rs similarity index 100% rename from hydro_deploy/hydroflow_plus_cli_integration/build.rs rename to hydro_deploy/hydroflow_plus_deploy/build.rs diff --git a/hydro_deploy/hydroflow_plus_deploy/src/deploy.rs b/hydro_deploy/hydroflow_plus_deploy/src/deploy.rs new file mode 100644 index 000000000000..cad8a79249a2 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/src/deploy.rs @@ -0,0 +1,681 @@ +use std::cell::RefCell; +use std::collections::HashMap; +use std::rc::Rc; +use std::sync::Arc; + +use hydro_deploy::custom_service::CustomClientPort; +use hydro_deploy::hydroflow_crate::ports::{ + DemuxSink, HydroflowSink, HydroflowSource, TaggedSource, +}; +use hydro_deploy::hydroflow_crate::tracing_options::TracingOptions; +use hydro_deploy::hydroflow_crate::HydroflowCrateService; +use hydro_deploy::{Deployment, Host, HydroflowCrate}; +use hydroflow_plus::deploy::{ClusterSpec, Deploy, Node, ProcessSpec}; +use hydroflow_plus::lang::graph::HydroflowGraph; +use nameof::name_of; +use sha2::{Digest, Sha256}; +use stageleft::Quoted; +use tokio::sync::RwLock; + +use super::HydroflowPlusMeta; +use crate::deploy_runtime::*; +use crate::trybuild::{compile_graph_trybuild, create_trybuild}; + +pub struct HydroDeploy {} + +impl<'a> Deploy<'a> for HydroDeploy { + type InstantiateEnv = Deployment; + type CompileEnv = (); + type Process = DeployNode; + type Cluster = DeployCluster; + type Meta = HashMap>; + type GraphId = (); + type ProcessPort = DeployPort; + type ClusterPort = DeployPort; + + fn allocate_process_port(process: &Self::Process) -> Self::ProcessPort { + process.next_port() + } + + fn allocate_cluster_port(cluster: &Self::Cluster) -> Self::ClusterPort { + cluster.next_port() + } + + fn o2o_sink_source( + _env: &(), + _p1: &Self::Process, + p1_port: &Self::ProcessPort, + _p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) -> (syn::Expr, syn::Expr) { + let p1_port = p1_port.port.as_str(); + let p2_port = p2_port.port.as_str(); + deploy_o2o(p1_port, p2_port) + } + + fn o2o_connect( + p1: &Self::Process, + p1_port: &Self::ProcessPort, + p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) { + let self_underlying_borrow = p1.underlying.borrow(); + let self_underlying = self_underlying_borrow.as_ref().unwrap(); + let source_port = self_underlying + .try_read() + .unwrap() + .get_port(p1_port.port.clone(), self_underlying); + + let other_underlying_borrow = p2.underlying.borrow(); + let other_underlying = other_underlying_borrow.as_ref().unwrap(); + let recipient_port = other_underlying + .try_read() + .unwrap() + .get_port(p2_port.port.clone(), other_underlying); + + source_port.send_to(&recipient_port); + } + + fn o2m_sink_source( + _env: &(), + _p1: &Self::Process, + p1_port: &Self::ProcessPort, + _c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) -> (syn::Expr, syn::Expr) { + let p1_port = p1_port.port.as_str(); + let c2_port = c2_port.port.as_str(); + deploy_o2m(p1_port, c2_port) + } + + fn o2m_connect( + p1: &Self::Process, + p1_port: &Self::ProcessPort, + c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) { + let self_underlying_borrow = p1.underlying.borrow(); + let self_underlying = self_underlying_borrow.as_ref().unwrap(); + let source_port = self_underlying + .try_read() + .unwrap() + .get_port(p1_port.port.clone(), self_underlying); + + let recipient_port = DemuxSink { + demux: c2 + .members + .borrow() + .iter() + .enumerate() + .map(|(id, c)| { + let n = c.underlying.try_read().unwrap(); + ( + id as u32, + Arc::new(n.get_port(c2_port.port.clone(), &c.underlying)) + as Arc, + ) + }) + .collect(), + }; + + source_port.send_to(&recipient_port); + } + + fn m2o_sink_source( + _env: &(), + _c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + _p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) -> (syn::Expr, syn::Expr) { + let c1_port = c1_port.port.as_str(); + let p2_port = p2_port.port.as_str(); + deploy_m2o(c1_port, p2_port) + } + + fn m2o_connect( + c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) { + let other_underlying_borrow = p2.underlying.borrow(); + let other_underlying = other_underlying_borrow.as_ref().unwrap(); + let recipient_port = other_underlying + .try_read() + .unwrap() + .get_port(p2_port.port.clone(), other_underlying) + .merge(); + + for (i, node) in c1.members.borrow().iter().enumerate() { + let source_port = node + .underlying + .try_read() + .unwrap() + .get_port(c1_port.port.clone(), &node.underlying); + + TaggedSource { + source: Arc::new(source_port), + tag: i as u32, + } + .send_to(&recipient_port); + } + } + + fn m2m_sink_source( + _env: &(), + _c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + _c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) -> (syn::Expr, syn::Expr) { + let c1_port = c1_port.port.as_str(); + let c2_port = c2_port.port.as_str(); + deploy_m2m(c1_port, c2_port) + } + + fn m2m_connect( + c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) { + for (i, sender) in c1.members.borrow().iter().enumerate() { + let source_port = sender + .underlying + .try_read() + .unwrap() + .get_port(c1_port.port.clone(), &sender.underlying); + + let recipient_port = DemuxSink { + demux: c2 + .members + .borrow() + .iter() + .enumerate() + .map(|(id, c)| { + let n = c.underlying.try_read().unwrap(); + ( + id as u32, + Arc::new(n.get_port(c2_port.port.clone(), &c.underlying).merge()) + as Arc, + ) + }) + .collect(), + }; + + TaggedSource { + source: Arc::new(source_port), + tag: i as u32, + } + .send_to(&recipient_port); + } + } + + fn cluster_ids( + _env: &Self::CompileEnv, + of_cluster: usize, + ) -> impl Quoted<'a, &'a Vec> + Copy + 'a { + cluster_members(of_cluster) + } + + fn cluster_self_id(_env: &Self::CompileEnv) -> impl Quoted<'a, u32> + Copy + 'a { + cluster_self_id() + } +} + +pub trait DeployCrateWrapper { + fn underlying(&self) -> Arc>; + + #[allow(async_fn_in_trait)] + async fn create_sender( + &self, + port: &str, + deployment: &mut Deployment, + on: &Arc, + ) -> CustomClientPort { + let sender_service = deployment.CustomService(on.clone(), vec![]); + let sender_port = sender_service.read().await.declare_client(&sender_service); + let recipient = self + .underlying() + .read() + .await + .get_port(port.to_string(), &self.underlying()); + + sender_port.send_to(&recipient); + sender_port + } + + #[allow(async_fn_in_trait)] + async fn stdout(&self) -> tokio::sync::mpsc::UnboundedReceiver { + self.underlying().read().await.stdout() + } + + #[allow(async_fn_in_trait)] + async fn stderr(&self) -> tokio::sync::mpsc::UnboundedReceiver { + self.underlying().read().await.stderr() + } +} + +#[derive(Clone)] +pub struct TrybuildHost { + pub host: Arc, + pub display_name: Option, + pub rustflags: Option, + pub tracing: Option, + pub name_hint: Option, + pub cluster_idx: Option, +} + +impl TrybuildHost { + pub fn new(host: Arc) -> Self { + Self { + host, + display_name: None, + rustflags: None, + tracing: None, + name_hint: None, + cluster_idx: None, + } + } + + pub fn display_name(self, display_name: impl Into) -> Self { + if self.display_name.is_some() { + panic!("{} already set", name_of!(display_name in Self)); + } + + Self { + display_name: Some(display_name.into()), + ..self + } + } + + pub fn rustflags(self, rustflags: impl Into) -> Self { + if self.rustflags.is_some() { + panic!("{} already set", name_of!(rustflags in Self)); + } + + Self { + rustflags: Some(rustflags.into()), + ..self + } + } + + pub fn tracing(self, tracing: TracingOptions) -> Self { + if self.tracing.is_some() { + panic!("{} already set", name_of!(tracing in Self)); + } + + Self { + tracing: Some(tracing), + ..self + } + } +} + +impl From> for TrybuildHost { + fn from(h: Arc) -> Self { + Self { + host: h, + display_name: None, + rustflags: None, + tracing: None, + name_hint: None, + cluster_idx: None, + } + } +} + +pub enum CrateOrTrybuild { + Crate(HydroflowCrate), + Trybuild(TrybuildHost), +} + +#[derive(Clone)] +pub struct DeployNode { + id: usize, + next_port: Rc>, + service_spec: Rc>>, + underlying: Rc>>>>, +} + +impl DeployCrateWrapper for DeployNode { + fn underlying(&self) -> Arc> { + self.underlying.borrow().as_ref().unwrap().clone() + } +} + +pub struct DeployPort { + node: N, + port: String, +} + +impl DeployPort { + pub async fn create_sender( + &self, + deployment: &mut Deployment, + on: &Arc, + ) -> CustomClientPort { + self.node.create_sender(&self.port, deployment, on).await + } +} + +impl DeployPort { + pub async fn create_senders( + &self, + deployment: &mut Deployment, + on: &Arc, + ) -> Vec { + let mut out = vec![]; + for member in self.node.members() { + out.push(member.create_sender(&self.port, deployment, on).await); + } + + out + } +} + +impl Node for DeployNode { + type Port = DeployPort; + type Meta = HashMap>; + type InstantiateEnv = Deployment; + + fn next_port(&self) -> DeployPort { + let next_port = *self.next_port.borrow(); + *self.next_port.borrow_mut() += 1; + + DeployPort { + node: self.clone(), + port: format!("port_{}", next_port), + } + } + + fn update_meta(&mut self, meta: &Self::Meta) { + let underlying_node = self.underlying.borrow(); + let mut n = underlying_node.as_ref().unwrap().try_write().unwrap(); + n.update_meta(HydroflowPlusMeta { + clusters: meta.clone(), + cluster_id: None, + subgraph_id: self.id, + }); + } + + fn instantiate( + &self, + env: &mut Self::InstantiateEnv, + _meta: &mut Self::Meta, + graph: HydroflowGraph, + extra_stmts: Vec, + ) { + let service = match self.service_spec.borrow_mut().take().unwrap() { + CrateOrTrybuild::Crate(c) => c, + CrateOrTrybuild::Trybuild(trybuild) => { + let (bin_name, (dir, target_dir, features)) = + create_graph_trybuild(graph, extra_stmts, &trybuild.name_hint); + create_trybuild_service(trybuild, &dir, &target_dir, &features, &bin_name) + } + }; + + *self.underlying.borrow_mut() = Some(env.add_service(service)); + } +} + +#[derive(Clone)] +pub struct DeployClusterNode { + underlying: Arc>, +} + +impl DeployCrateWrapper for DeployClusterNode { + fn underlying(&self) -> Arc> { + self.underlying.clone() + } +} + +#[derive(Clone)] +pub struct DeployCluster { + id: usize, + next_port: Rc>, + cluster_spec: Rc>>>, + members: Rc>>, + name_hint: Option, +} + +impl DeployCluster { + pub fn members(&self) -> Vec { + self.members.borrow().clone() + } +} + +impl Node for DeployCluster { + type Port = DeployPort; + type Meta = HashMap>; + type InstantiateEnv = Deployment; + + fn next_port(&self) -> DeployPort { + let next_port = *self.next_port.borrow(); + *self.next_port.borrow_mut() += 1; + + DeployPort { + node: self.clone(), + port: format!("port_{}", next_port), + } + } + + fn instantiate( + &self, + env: &mut Self::InstantiateEnv, + meta: &mut Self::Meta, + graph: HydroflowGraph, + extra_stmts: Vec, + ) { + let has_trybuild = self + .cluster_spec + .borrow() + .as_ref() + .unwrap() + .iter() + .any(|spec| matches!(spec, CrateOrTrybuild::Trybuild { .. })); + + let maybe_trybuild = if has_trybuild { + Some(create_graph_trybuild(graph, extra_stmts, &self.name_hint)) + } else { + None + }; + + let cluster_nodes = self + .cluster_spec + .borrow_mut() + .take() + .unwrap() + .into_iter() + .map(|spec| { + let service = match spec { + CrateOrTrybuild::Crate(c) => c, + CrateOrTrybuild::Trybuild(trybuild) => { + let (bin_name, (dir, target_dir, features)) = + maybe_trybuild.as_ref().unwrap(); + create_trybuild_service(trybuild, dir, target_dir, features, bin_name) + } + }; + + env.add_service(service) + }) + .collect::>(); + meta.insert(self.id, (0..(cluster_nodes.len() as u32)).collect()); + *self.members.borrow_mut() = cluster_nodes + .into_iter() + .map(|n| DeployClusterNode { underlying: n }) + .collect(); + } + + fn update_meta(&mut self, meta: &Self::Meta) { + for (cluster_id, node) in self.members.borrow().iter().enumerate() { + let mut n = node.underlying.try_write().unwrap(); + n.update_meta(HydroflowPlusMeta { + clusters: meta.clone(), + cluster_id: Some(cluster_id as u32), + subgraph_id: self.id, + }); + } + } +} + +#[derive(Clone)] +pub struct DeployProcessSpec(HydroflowCrate); + +impl DeployProcessSpec { + pub fn new(t: HydroflowCrate) -> Self { + Self(t) + } +} + +impl<'a> ProcessSpec<'a, HydroDeploy> for DeployProcessSpec { + fn build(self, id: usize, _name_hint: &str) -> DeployNode { + DeployNode { + id, + next_port: Rc::new(RefCell::new(0)), + service_spec: Rc::new(RefCell::new(Some(CrateOrTrybuild::Crate(self.0)))), + underlying: Rc::new(RefCell::new(None)), + } + } +} + +impl<'a> ProcessSpec<'a, HydroDeploy> for TrybuildHost { + fn build(mut self, id: usize, name_hint: &str) -> DeployNode { + self.name_hint = Some(format!("{} (process {id})", name_hint)); + DeployNode { + id, + next_port: Rc::new(RefCell::new(0)), + service_spec: Rc::new(RefCell::new(Some(CrateOrTrybuild::Trybuild(self)))), + underlying: Rc::new(RefCell::new(None)), + } + } +} + +#[derive(Clone)] +pub struct DeployClusterSpec(Vec); + +impl DeployClusterSpec { + pub fn new(crates: Vec) -> Self { + Self(crates) + } +} + +impl<'a> ClusterSpec<'a, HydroDeploy> for DeployClusterSpec { + fn build(self, id: usize, _name_hint: &str) -> DeployCluster { + DeployCluster { + id, + next_port: Rc::new(RefCell::new(0)), + cluster_spec: Rc::new(RefCell::new(Some( + self.0.into_iter().map(CrateOrTrybuild::Crate).collect(), + ))), + members: Rc::new(RefCell::new(vec![])), + name_hint: None, + } + } +} + +impl<'a> ClusterSpec<'a, HydroDeploy> for Vec { + fn build(self, id: usize, name_hint: &str) -> DeployCluster { + let name_hint = format!("{} (cluster {id})", name_hint); + DeployCluster { + id, + next_port: Rc::new(RefCell::new(0)), + cluster_spec: Rc::new(RefCell::new(Some( + self.into_iter() + .enumerate() + .map(|(idx, mut b)| { + b.name_hint = Some(name_hint.clone()); + b.cluster_idx = Some(idx); + CrateOrTrybuild::Trybuild(b) + }) + .collect(), + ))), + members: Rc::new(RefCell::new(vec![])), + name_hint: Some(name_hint), + } + } +} + +fn clean_name_hint(name_hint: &str) -> String { + name_hint + .replace("::", "__") + .replace(" ", "_") + .replace(",", "_") + .replace("<", "_") + .replace(">", "") + .replace("(", "") + .replace(")", "") +} + +fn create_graph_trybuild( + graph: HydroflowGraph, + extra_stmts: Vec, + name_hint: &Option, +) -> ( + String, + (std::path::PathBuf, std::path::PathBuf, Option>), +) { + let source_ast = compile_graph_trybuild(graph, extra_stmts); + + let source_dir = trybuild_internals_api::cargo::manifest_dir().unwrap(); + let source_manifest = trybuild_internals_api::dependencies::get_manifest(&source_dir).unwrap(); + let crate_name = &source_manifest.package.name.to_string().replace("-", "_"); + let source = prettyplease::unparse(&source_ast) + .to_string() + .replace(crate_name, &format!("{crate_name}::__staged")) + .replace("crate::__staged", &format!("{crate_name}::__staged")); + + let mut hasher = Sha256::new(); + hasher.update(&source); + let hash = format!("{:X}", hasher.finalize()) + .chars() + .take(8) + .collect::(); + + let bin_name = if let Some(name_hint) = &name_hint { + format!("{}_{}", clean_name_hint(name_hint), &hash) + } else { + hash + }; + + let trybuild_created = create_trybuild(&source, &bin_name).unwrap(); + (bin_name, trybuild_created) +} + +fn create_trybuild_service( + trybuild: TrybuildHost, + dir: &std::path::PathBuf, + target_dir: &std::path::PathBuf, + features: &Option>, + bin_name: &str, +) -> HydroflowCrate { + let mut ret = HydroflowCrate::new(dir, trybuild.host) + .target_dir(target_dir) + .bin(bin_name) + .no_default_features(); + + if let Some(display_name) = trybuild.display_name { + ret = ret.display_name(display_name); + } else if let Some(name_hint) = trybuild.name_hint { + if let Some(cluster_idx) = trybuild.cluster_idx { + ret = ret.display_name(format!("{} / {}", name_hint, cluster_idx)); + } else { + ret = ret.display_name(name_hint); + } + } + + if let Some(rustflags) = trybuild.rustflags { + ret = ret.rustflags(rustflags); + } + + if let Some(tracing) = trybuild.tracing { + ret = ret.tracing(tracing); + } + + if let Some(features) = features { + ret = ret.features(features.clone()); + } + + ret +} diff --git a/hydro_deploy/hydroflow_plus_deploy/src/deploy_runtime.rs b/hydro_deploy/hydroflow_plus_deploy/src/deploy_runtime.rs new file mode 100644 index 000000000000..e98d3e30ca61 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/src/deploy_runtime.rs @@ -0,0 +1,113 @@ +use hydroflow_plus::util::deploy::{ + ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource, ConnectedTagged, DeployPorts, +}; +use stageleft::{q, Quoted, RuntimeData}; + +use crate::HydroflowPlusMeta; + +pub fn cluster_members<'a>(of_cluster: usize) -> impl Quoted<'a, &'a Vec> + Copy + 'a { + let cli: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + q!(cli.meta.clusters.get(&of_cluster).unwrap()) +} + +pub fn cluster_self_id<'a>() -> impl Quoted<'a, u32> + Copy + 'a { + let cli: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + q!(cli + .meta + .cluster_id + .expect("Tried to read Cluster ID on a non-cluster node")) +} + +pub fn deploy_o2o(p1_port: &str, p2_port: &str) -> (syn::Expr, syn::Expr) { + let env: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + ( + { + q!({ + env.port(p1_port) + .connect_local_blocking::() + .into_sink() + }) + .splice() + }, + { + q!({ + env.port(p2_port) + .connect_local_blocking::() + .into_source() + }) + .splice() + }, + ) +} + +pub fn deploy_o2m(p1_port: &str, c2_port: &str) -> (syn::Expr, syn::Expr) { + let env: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + ( + { + q!({ + env.port(p1_port) + .connect_local_blocking::>() + .into_sink() + }) + .splice() + }, + { + q!({ + env.port(c2_port) + .connect_local_blocking::() + .into_source() + }) + .splice() + }, + ) +} + +pub fn deploy_m2o(c1_port: &str, p2_port: &str) -> (syn::Expr, syn::Expr) { + let env: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + ( + { + q!({ + env.port(c1_port) + .connect_local_blocking::() + .into_sink() + }) + .splice() + }, + { + q!({ + env.port(p2_port) + .connect_local_blocking::>() + .into_source() + }) + .splice() + }, + ) +} + +pub fn deploy_m2m(c1_port: &str, c2_port: &str) -> (syn::Expr, syn::Expr) { + let env: RuntimeData<&DeployPorts> = + RuntimeData::new("__hydroflow_plus_trybuild_cli"); + ( + { + q!({ + env.port(c1_port) + .connect_local_blocking::>() + .into_sink() + }) + .splice() + }, + { + q!({ + env.port(c2_port) + .connect_local_blocking::>() + .into_source() + }) + .splice() + }, + ) +} diff --git a/hydro_deploy/hydroflow_plus_cli_integration/src/lib.rs b/hydro_deploy/hydroflow_plus_deploy/src/lib.rs similarity index 73% rename from hydro_deploy/hydroflow_plus_cli_integration/src/lib.rs rename to hydro_deploy/hydroflow_plus_deploy/src/lib.rs index 37b9043fcc9b..0bc35fada434 100644 --- a/hydro_deploy/hydroflow_plus_cli_integration/src/lib.rs +++ b/hydro_deploy/hydroflow_plus_deploy/src/lib.rs @@ -3,8 +3,14 @@ stageleft::stageleft_no_entry_crate!(); mod runtime; use std::collections::HashMap; +#[cfg(feature = "deploy")] +pub(crate) mod trybuild; + pub use runtime::*; +#[allow(unused)] +pub(crate) mod deploy_runtime; + #[cfg(feature = "deploy")] mod deploy; @@ -15,5 +21,6 @@ use serde::{Deserialize, Serialize}; #[derive(Default, Serialize, Deserialize)] pub struct HydroflowPlusMeta { pub clusters: HashMap>, + pub cluster_id: Option, pub subgraph_id: usize, } diff --git a/hydro_deploy/hydroflow_plus_deploy/src/runtime.rs b/hydro_deploy/hydroflow_plus_deploy/src/runtime.rs new file mode 100644 index 000000000000..3c0fe4331037 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/src/runtime.rs @@ -0,0 +1,302 @@ +use std::cell::RefCell; +use std::rc::Rc; + +use hydroflow_plus::deploy::{ClusterSpec, Deploy, Node, ProcessSpec}; +use hydroflow_plus::lang::graph::HydroflowGraph; +use hydroflow_plus::util::deploy::{ + ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource, ConnectedTagged, DeployPorts, +}; +use stageleft::{q, Quoted, RuntimeData}; + +use super::HydroflowPlusMeta; + +pub struct DeployRuntime {} + +impl<'a> Deploy<'a> for DeployRuntime { + type InstantiateEnv = (); + type CompileEnv = RuntimeData<&'a DeployPorts>; + type Process = DeployRuntimeNode; + type Cluster = DeployRuntimeCluster; + type Meta = (); + type GraphId = usize; + type ProcessPort = String; + type ClusterPort = String; + + fn has_trivial_node() -> bool { + true + } + + fn trivial_process(_id: usize) -> Self::Process { + DeployRuntimeNode { + next_port: Rc::new(RefCell::new(0)), + } + } + + fn trivail_cluster(_id: usize) -> Self::Cluster { + DeployRuntimeCluster { + next_port: Rc::new(RefCell::new(0)), + } + } + + fn allocate_process_port(process: &Self::Process) -> Self::ProcessPort { + process.next_port() + } + + fn allocate_cluster_port(cluster: &Self::Cluster) -> Self::ClusterPort { + cluster.next_port() + } + + fn o2o_sink_source( + env: &Self::CompileEnv, + _p1: &Self::Process, + p1_port: &Self::ProcessPort, + _p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) -> (syn::Expr, syn::Expr) { + let env = *env; + ( + { + let port = p1_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::() + .into_sink() + }) + .splice() + }, + { + let port = p2_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::() + .into_source() + }) + .splice() + }, + ) + } + + fn o2o_connect( + _p1: &Self::Process, + _p1_port: &Self::ProcessPort, + _p2: &Self::Process, + _p2_port: &Self::ProcessPort, + ) { + panic!() + } + + fn o2m_sink_source( + env: &Self::CompileEnv, + _p1: &Self::Process, + p1_port: &Self::ProcessPort, + _c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) -> (syn::Expr, syn::Expr) { + let env = *env; + ( + { + let port = p1_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::>() + .into_sink() + }) + .splice() + }, + { + let port = c2_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::() + .into_source() + }) + .splice() + }, + ) + } + + fn o2m_connect( + _p1: &Self::Process, + _p1_port: &Self::ProcessPort, + _c2: &Self::Cluster, + _c2_port: &Self::ClusterPort, + ) { + panic!() + } + + fn m2o_sink_source( + env: &Self::CompileEnv, + _c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + _p2: &Self::Process, + p2_port: &Self::ProcessPort, + ) -> (syn::Expr, syn::Expr) { + let env = *env; + ( + { + let port = c1_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::() + .into_sink() + }) + .splice() + }, + { + let port = p2_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::>() + .into_source() + }) + .splice() + }, + ) + } + + fn m2o_connect( + _c1: &Self::Cluster, + _c1_port: &Self::ClusterPort, + _p2: &Self::Process, + _p2_port: &Self::ProcessPort, + ) { + panic!() + } + + fn m2m_sink_source( + env: &Self::CompileEnv, + _c1: &Self::Cluster, + c1_port: &Self::ClusterPort, + _c2: &Self::Cluster, + c2_port: &Self::ClusterPort, + ) -> (syn::Expr, syn::Expr) { + let env = *env; + ( + { + let port = c1_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::>() + .into_sink() + }) + .splice() + }, + { + let port = c2_port.as_str(); + + q!({ + env.port(port) + .connect_local_blocking::>() + .into_source() + }) + .splice() + }, + ) + } + + fn m2m_connect( + _c1: &Self::Cluster, + _c1_port: &Self::ClusterPort, + _c2: &Self::Cluster, + _c2_port: &Self::ClusterPort, + ) { + panic!() + } + + fn cluster_ids( + env: &Self::CompileEnv, + of_cluster: usize, + ) -> impl Quoted<'a, &'a Vec> + Copy + 'a { + let cli = *env; + q!(cli.meta.clusters.get(&of_cluster).unwrap()) + } + + fn cluster_self_id(env: &Self::CompileEnv) -> impl Quoted<'a, u32> + Copy + 'a { + let cli = *env; + q!(cli + .meta + .cluster_id + .expect("Tried to read Cluster ID on a non-cluster node")) + } +} + +#[derive(Clone)] +pub struct DeployRuntimeNode { + next_port: Rc>, +} + +impl Node for DeployRuntimeNode { + type Port = String; + type Meta = (); + type InstantiateEnv = (); + + fn next_port(&self) -> String { + let next_send_port = *self.next_port.borrow(); + *self.next_port.borrow_mut() += 1; + format!("port_{}", next_send_port) + } + + fn update_meta(&mut self, _meta: &Self::Meta) {} + + fn instantiate( + &self, + _env: &mut Self::InstantiateEnv, + _meta: &mut Self::Meta, + _graph: HydroflowGraph, + _extra_stmts: Vec, + ) { + panic!(".deploy() cannot be called on a DeployRuntimeNode"); + } +} + +#[derive(Clone)] +pub struct DeployRuntimeCluster { + next_port: Rc>, +} + +impl Node for DeployRuntimeCluster { + type Port = String; + type Meta = (); + type InstantiateEnv = (); + + fn next_port(&self) -> String { + let next_send_port = *self.next_port.borrow(); + *self.next_port.borrow_mut() += 1; + format!("port_{}", next_send_port) + } + + fn update_meta(&mut self, _meta: &Self::Meta) {} + + fn instantiate( + &self, + _env: &mut Self::InstantiateEnv, + _meta: &mut Self::Meta, + _graph: HydroflowGraph, + _extra_stmts: Vec, + ) { + panic!(".deploy() cannot be called on a DeployRuntimeCluster"); + } +} + +impl<'a> ProcessSpec<'a, DeployRuntime> for () { + fn build(self, _id: usize, _name_hint: &str) -> DeployRuntimeNode { + DeployRuntimeNode { + next_port: Rc::new(RefCell::new(0)), + } + } +} + +impl<'cli> ClusterSpec<'cli, DeployRuntime> for () { + fn build(self, _id: usize, _name_hint: &str) -> DeployRuntimeCluster { + DeployRuntimeCluster { + next_port: Rc::new(RefCell::new(0)), + } + } +} diff --git a/hydro_deploy/hydroflow_plus_deploy/src/trybuild.rs b/hydro_deploy/hydroflow_plus_deploy/src/trybuild.rs new file mode 100644 index 000000000000..5ec673974df2 --- /dev/null +++ b/hydro_deploy/hydroflow_plus_deploy/src/trybuild.rs @@ -0,0 +1,145 @@ +use std::fs; +use std::path::PathBuf; + +use hydroflow_plus::lang::graph::{partition_graph, HydroflowGraph}; +use stageleft::internal::quote; +use trybuild_internals_api::cargo::{self, Metadata}; +use trybuild_internals_api::env::Update; +use trybuild_internals_api::run::{PathDependency, Project}; +use trybuild_internals_api::{dependencies, features, path, Runner}; + +pub fn compile_graph_trybuild(graph: HydroflowGraph, extra_stmts: Vec) -> syn::File { + let partitioned_graph = partition_graph(graph).expect("Failed to partition (cycle detected)."); + + let mut diagnostics = Vec::new(); + let tokens = + partitioned_graph.as_code("e! { hydroflow_plus }, true, quote!(), &mut diagnostics); + + let source_ast: syn::File = syn::parse_quote! { + #![allow(unused_crate_dependencies, missing_docs)] + + #[allow(unused)] + fn __hfplus_runtime<'a>(__hydroflow_plus_trybuild_cli: &'a hydroflow_plus::util::deploy::DeployPorts) -> hydroflow_plus::Hydroflow<'a> { + #(#extra_stmts)* + #tokens + } + + #[tokio::main] + async fn main() { + let ports = hydroflow_plus::util::deploy::init_no_ack_start().await; + let flow = __hfplus_runtime(&ports); + println!("ack start"); + hydroflow_plus::util::deploy::launch_flow(flow).await; + } + }; + source_ast +} + +pub fn create_trybuild( + source: &str, + bin: &str, +) -> Result<(PathBuf, PathBuf, Option>), trybuild_internals_api::error::Error> { + let Metadata { + target_directory: target_dir, + workspace_root: workspace, + packages, + } = cargo::metadata()?; + + let source_dir = cargo::manifest_dir()?; + let mut source_manifest = dependencies::get_manifest(&source_dir)?; + source_manifest.dev_dependencies.clear(); + + let mut features = features::find(); + + let path_dependencies = source_manifest + .dependencies + .iter() + .filter_map(|(name, dep)| { + let path = dep.path.as_ref()?; + if packages.iter().any(|p| &p.name == name) { + // Skip path dependencies coming from the workspace itself + None + } else { + Some(PathDependency { + name: name.clone(), + normalized_path: path.canonicalize().ok()?, + }) + } + }) + .collect(); + + let crate_name = source_manifest.package.name.clone(); + let project_dir = path!(target_dir / "hfplus_trybuild" / crate_name /); + fs::create_dir_all(&project_dir)?; + + let project_name = format!("{}-hfplus-trybuild", crate_name); + let manifest = Runner::make_manifest( + &workspace, + &project_name, + &source_dir, + &packages, + &[], + source_manifest, + )?; + + if let Some(enabled_features) = &mut features { + enabled_features.retain(|feature| { + manifest.features.contains_key(feature) + && feature != "default" + && feature != "stageleft_devel" + }); + } + + let project = Project { + dir: project_dir, + source_dir, + target_dir, + name: project_name, + update: Update::env()?, + has_pass: false, + has_compile_fail: false, + features, + workspace, + path_dependencies, + manifest, + keep_going: false, + }; + + let manifest_toml = toml::to_string(&project.manifest)?; + fs::write(path!(project.dir / "Cargo.toml"), manifest_toml)?; + + fs::create_dir_all(path!(project.dir / "src" / "bin"))?; + + let out_path = path!(project.dir / "src" / "bin" / format!("{bin}.rs")); + if !out_path.exists() || fs::read_to_string(&out_path)? != source { + fs::write( + path!(project.dir / "src" / "bin" / format!("{bin}.rs")), + source, + )?; + } + // TODO(shadaj): garbage collect this directory occasionally + + let workspace_cargo_lock = path!(project.workspace / "Cargo.lock"); + if workspace_cargo_lock.exists() { + let _ = fs::copy(workspace_cargo_lock, path!(project.dir / "Cargo.lock")); + } else { + let _ = cargo::cargo(&project).arg("generate-lockfile").status(); + } + + let workspace_dot_cargo_config_toml = path!(project.workspace / ".cargo" / "config.toml"); + if workspace_dot_cargo_config_toml.exists() { + let dot_cargo_folder = path!(project.dir / ".cargo"); + fs::create_dir_all(&dot_cargo_folder)?; + + let _ = fs::copy( + workspace_dot_cargo_config_toml, + path!(dot_cargo_folder / "config.toml"), + ); + } + + Ok(( + project.dir.as_ref().into(), + path!(project.target_dir / "hfplus_trybuild"), + project.features, + )) +} diff --git a/hydroflow/CHANGELOG.md b/hydroflow/CHANGELOG.md index 28807fac1103..1a5930a2bceb 100644 --- a/hydroflow/CHANGELOG.md +++ b/hydroflow/CHANGELOG.md @@ -5,8 +5,602 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 0.9.0 (2024-08-30) + +### Chore + + - manually set versions for crates renamed in #1413 + - lower min dependency versions where possible, update `Cargo.lock` + Moved from #1418 + + --------- + - drop unused dependencies + +### Documentation + + - cleanup doc comments for clippy latest + +### New Features + + - allow `demux_enum` to have any number of outputs, fix #1329 + - Added simulation framework. + Will move it out of the blind gossip PR next. + - add `cross_singleton` operator + This operator is necessary to eliminate performance bottlenecks in Paxos + where cross-products result in unnecessary cloning and lack of + short-circuit behavior results in values being cloned out of the + internal state of `reduce_keyed`. + +### Bug Fixes + + - remove `FlowProps` + - `fold<'tick>` should always emit a value + +### Refactor (BREAKING) + + - rename integration crates to drop CLI references + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading + * `perf` profile downloading moved from the `drop()` impl to `async fn + stop()` + * download perf data via stdout + * update async-ssh2-lite to 0.5 to cleanup tokio compat issues + + WIP for #1365 + +### Style (BREAKING) + + - rename some `CLI`->`Deploy`, decapitalize acronym names + +### Commit Statistics + + + + - 12 commits contributed to the release. + - 12 commits were understood as [conventional](https://www.conventionalcommits.org). + - 11 unique issues were worked on: [#1370](https://github.com/hydro-project/hydroflow/issues/1370), [#1373](https://github.com/hydro-project/hydroflow/issues/1373), [#1399](https://github.com/hydro-project/hydroflow/issues/1399), [#1407](https://github.com/hydro-project/hydroflow/issues/1407), [#1409](https://github.com/hydro-project/hydroflow/issues/1409), [#1413](https://github.com/hydro-project/hydroflow/issues/1413), [#1416](https://github.com/hydro-project/hydroflow/issues/1416), [#1417](https://github.com/hydro-project/hydroflow/issues/1417), [#1420](https://github.com/hydro-project/hydroflow/issues/1420), [#1423](https://github.com/hydro-project/hydroflow/issues/1423), [#1428](https://github.com/hydro-project/hydroflow/issues/1428) + +### Commit Details + + + +
view details + + * **[#1370](https://github.com/hydro-project/hydroflow/issues/1370)** + - `Deployment.stop()` for graceful shutdown including updated `perf` profile downloading ([`a214786`](https://github.com/hydro-project/hydroflow/commit/a2147864b24110c9ae2c1553e9e8b55bd5065f15)) + * **[#1373](https://github.com/hydro-project/hydroflow/issues/1373)** + - Add `cross_singleton` operator ([`bd793e2`](https://github.com/hydro-project/hydroflow/commit/bd793e2fc1db735a6e623973028e99675a7933e0)) + * **[#1399](https://github.com/hydro-project/hydroflow/issues/1399)** + - Rename some `CLI`->`Deploy`, decapitalize acronym names ([`fa41720`](https://github.com/hydro-project/hydroflow/commit/fa417205569d8c49c85b0c2324118e0f9b1c8407)) + * **[#1407](https://github.com/hydro-project/hydroflow/issues/1407)** + - `fold<'tick>` should always emit a value ([`75dd4fb`](https://github.com/hydro-project/hydroflow/commit/75dd4fbb772a3ae7e58989aacab2121a1a487557)) + * **[#1409](https://github.com/hydro-project/hydroflow/issues/1409)** + - Allow `demux_enum` to have any number of outputs, fix #1329 ([`9e5f58e`](https://github.com/hydro-project/hydroflow/commit/9e5f58ef773f0aee39a9705d9845361a2488649b)) + * **[#1413](https://github.com/hydro-project/hydroflow/issues/1413)** + - Rename integration crates to drop CLI references ([`0a465e5`](https://github.com/hydro-project/hydroflow/commit/0a465e55dd39c76bc1aefb020460a639d792fe87)) + * **[#1416](https://github.com/hydro-project/hydroflow/issues/1416)** + - Added simulation framework. ([`3850b47`](https://github.com/hydro-project/hydroflow/commit/3850b4793b33ce7c4e88c80868335ba7506c4229)) + * **[#1417](https://github.com/hydro-project/hydroflow/issues/1417)** + - Drop unused dependencies ([`3c41e1e`](https://github.com/hydro-project/hydroflow/commit/3c41e1e9b505bb84474ec10df30fb65c6d6f90cb)) + * **[#1420](https://github.com/hydro-project/hydroflow/issues/1420)** + - Remove `FlowProps` ([`22c7218`](https://github.com/hydro-project/hydroflow/commit/22c72189bb76412955d29b03c5d99894c558a07c)) + * **[#1423](https://github.com/hydro-project/hydroflow/issues/1423)** + - Lower min dependency versions where possible, update `Cargo.lock` ([`11af328`](https://github.com/hydro-project/hydroflow/commit/11af32828bab6e4a4264d2635ff71a12bb0bb778)) + * **[#1428](https://github.com/hydro-project/hydroflow/issues/1428)** + - Cleanup doc comments for clippy latest ([`f5f1eb0`](https://github.com/hydro-project/hydroflow/commit/f5f1eb0c612f5c0c1752360d972ef6853c5e12f0)) + * **Uncategorized** + - Manually set versions for crates renamed in #1413 ([`a2ec110`](https://github.com/hydro-project/hydroflow/commit/a2ec110ccadb97e293b19d83a155d98d94224bba)) +
+ +## 0.8.0 (2024-07-23) + + + + + + + + +### Chore + + - update pinned rust version to 2024-06-17 + +### Documentation + + - test code snipets, generate output for `surface_flows`, fix #814 + +### Bug Fixes + + + + + + + - allow `ensure_singleton_referencers_succeed_persist` to ignore `identity`/`tee`/`union` operators, helps #1290 + Adds tests for `ensure_singleton_referencers_succeed_persist` persist + insertion behavior + + Fixes some cases of #1290, when just identity operators + (union/tee/identity) are between the singleton referencer and its + preceding flow state. + + TODO track per-operator instead: #1311 + - improve spanning of singleton substitution code, add compile-fail test, fix #1294 + Another small spanning improvement + - add `add_state_tick` to state API, reset at end of each tick, fix #1298 + Option 2 of #1298 + + * Main feature in the title is in `src/scheduled/{context,graph}.rs` +* Codegen for some stateful operators (those which can be used as singletons, and some others) is changed to use the new API. +* Add a test `test_cartesian_product_tick_state` for #1298 +* Rest of the diff is snapshot changes + +### Refactor + + - improve diagnostics for missing generic params + Improves diagnostic messages a bit for when no generic params are + supplied but some are expected. Previously this would span to the entire + macro invocation. + - improve diagnostics by re-spanning `#root` + Inspired by fixing the spans in `demux_enum` in #1271 + * re-span `#root` to `op_span` for better diagnostics + * use better span `func.inputs` in `demux` and `demux_enum` + * clippy fixups in `source_json`, `source_stdin` + * fix #1201 (for the most part) + +### Reverted + + - "feat(hydroflow): Added poll_futures and poll_futures_async operators.", fix #1183 + This reverts commit 997d90a76db9a4e05dbac35073a09548750ce342. + + We have been trying to figure out the semantics a bit, and want to give + it more thought before we commit to maintaining it + + Can un-revert and adjust the semantics later when we use it + +### Test + + - fix `fold_keyed()` and add `reduce_keyed()` compile-fail tests, fix #1279 + +### Bug Fixes (BREAKING) + + - remove singleton referencer `persist::<'static>()` insertion + Also enables singletons for `persist()` and ensures that only the + `'static` lifetime is used + + Singletons are supposed to act like `cross_join()`. I.e. if we have this + code: + ```rust + stream -> filter(|item| ... #y ...) -> ... + ``` + It should behave equivalently to + ```rust + stream -> cj[0]; + y -> cj[1]; + cj = cross_join() -> filter(|(item, y)| ...) -> ... + ``` + + This has a very unintuitive replaying behavior, if `y` receives multiple + updates: + 1. `y` receives an item `10` +2. `stream` receives an item `20` +3. `(10, 20)` is emitted +4. `y` receives an item `11` +5. `(11, 20)` is emitted + In this case the item `20` gets emitted twice. + +### Refactor (BREAKING) + + - require lifetime on `perist*()` operators + +### Style (BREAKING) + + - enable clippy `upper-case-acronyms-aggressive` + * rename `GCP` -> `Gcp`, `NodeID` -> `NodeId` + * update CI `cargo-generate` template testing to use PR's branch instead + of whatever `main` happens to be + +### Commit Statistics + + + + - 17 commits contributed to the release. + - 16 commits were understood as [conventional](https://www.conventionalcommits.org). + - 17 unique issues were worked on: [#1143](https://github.com/hydro-project/hydroflow/issues/1143), [#1216](https://github.com/hydro-project/hydroflow/issues/1216), [#1244](https://github.com/hydro-project/hydroflow/issues/1244), [#1260](https://github.com/hydro-project/hydroflow/issues/1260), [#1271](https://github.com/hydro-project/hydroflow/issues/1271), [#1273](https://github.com/hydro-project/hydroflow/issues/1273), [#1274](https://github.com/hydro-project/hydroflow/issues/1274), [#1280](https://github.com/hydro-project/hydroflow/issues/1280), [#1283](https://github.com/hydro-project/hydroflow/issues/1283), [#1295](https://github.com/hydro-project/hydroflow/issues/1295), [#1296](https://github.com/hydro-project/hydroflow/issues/1296), [#1297](https://github.com/hydro-project/hydroflow/issues/1297), [#1300](https://github.com/hydro-project/hydroflow/issues/1300), [#1309](https://github.com/hydro-project/hydroflow/issues/1309), [#1312](https://github.com/hydro-project/hydroflow/issues/1312), [#1332](https://github.com/hydro-project/hydroflow/issues/1332), [#1345](https://github.com/hydro-project/hydroflow/issues/1345) + +### Commit Details + + + +
view details + + * **[#1143](https://github.com/hydro-project/hydroflow/issues/1143)** + - "feat(hydroflow): Added poll_futures and poll_futures_async operators.", fix #1183 ([`256779a`](https://github.com/hydro-project/hydroflow/commit/256779abece03bee662b351430d27141d10bd5ef)) + * **[#1216](https://github.com/hydro-project/hydroflow/issues/1216)** + - "feat(hydroflow): Added poll_futures and poll_futures_async operators.", fix #1183 ([`256779a`](https://github.com/hydro-project/hydroflow/commit/256779abece03bee662b351430d27141d10bd5ef)) + * **[#1244](https://github.com/hydro-project/hydroflow/issues/1244)** + - Make inner for `WithTop` & `WithBot` private ([`1ad690b`](https://github.com/hydro-project/hydroflow/commit/1ad690b993f38ac6a03667fdce56e6603076b1d2)) + * **[#1260](https://github.com/hydro-project/hydroflow/issues/1260)** + - Test code snipets, generate output for `surface_flows`, fix #814 ([`2a4881d`](https://github.com/hydro-project/hydroflow/commit/2a4881d7c981bdf8f4deae9902e7d305f36c4203)) + * **[#1271](https://github.com/hydro-project/hydroflow/issues/1271)** + - Allow use of generics in `demux_enum::<...>()` op ([`b79f1a4`](https://github.com/hydro-project/hydroflow/commit/b79f1a4c8f30131c8ca2ab4900efed9ded819581)) + * **[#1273](https://github.com/hydro-project/hydroflow/issues/1273)** + - Make sure tasks are spawned!!!! ([`39ad0d6`](https://github.com/hydro-project/hydroflow/commit/39ad0d68de5281c1ee4ecc97e05a1d14c602538b)) + * **[#1274](https://github.com/hydro-project/hydroflow/issues/1274)** + - Yield before `collect_ready_async` to ensure background async tasks can send to the stream ([`dfb5ec9`](https://github.com/hydro-project/hydroflow/commit/dfb5ec9022d2f854d65d68e55ae5c445ddbb8f5e)) + * **[#1280](https://github.com/hydro-project/hydroflow/issues/1280)** + - Improve diagnostics by re-spanning `#root` ([`70a3f9e`](https://github.com/hydro-project/hydroflow/commit/70a3f9e19f70fae0967eb96c454ab922c0f5290b)) + * **[#1283](https://github.com/hydro-project/hydroflow/issues/1283)** + - Fix `fold_keyed()` and add `reduce_keyed()` compile-fail tests, fix #1279 ([`b3c233d`](https://github.com/hydro-project/hydroflow/commit/b3c233dadd7ebc57eb7e8ee111e43e8dddb9c882)) + * **[#1295](https://github.com/hydro-project/hydroflow/issues/1295)** + - Require lifetime on `perist*()` operators ([`67c0e51`](https://github.com/hydro-project/hydroflow/commit/67c0e51fb25ea1a2e3aae197c1984920b46759fa)) + * **[#1296](https://github.com/hydro-project/hydroflow/issues/1296)** + - Improve diagnostics for missing generic params ([`f1442a0`](https://github.com/hydro-project/hydroflow/commit/f1442a0161fe7a1827a60cb96ff11646d711373d)) + * **[#1297](https://github.com/hydro-project/hydroflow/issues/1297)** + - Allow `ensure_singleton_referencers_succeed_persist` to ignore `identity`/`tee`/`union` operators, helps #1290 ([`f45b9dd`](https://github.com/hydro-project/hydroflow/commit/f45b9ddbfca84e11398f3dec774b713b5b071422)) + * **[#1300](https://github.com/hydro-project/hydroflow/issues/1300)** + - Add `add_state_tick` to state API, reset at end of each tick, fix #1298 ([`f91c300`](https://github.com/hydro-project/hydroflow/commit/f91c30045dfdf92cf3d383676875d9e749cb8d93)) + * **[#1309](https://github.com/hydro-project/hydroflow/issues/1309)** + - Update pinned rust version to 2024-06-17 ([`3098f77`](https://github.com/hydro-project/hydroflow/commit/3098f77fd99882aae23c4b31017aa4b761306197)) + * **[#1312](https://github.com/hydro-project/hydroflow/issues/1312)** + - Improve spanning of singleton substitution code, add compile-fail test, fix #1294 ([`404f0ac`](https://github.com/hydro-project/hydroflow/commit/404f0accf08e643a1c5e815f06bb31a65379e8c8)) + * **[#1332](https://github.com/hydro-project/hydroflow/issues/1332)** + - Remove singleton referencer `persist::<'static>()` insertion ([`755e8a6`](https://github.com/hydro-project/hydroflow/commit/755e8a6d2c2b30b5d28b60315bb099030d3f3964)) + * **[#1345](https://github.com/hydro-project/hydroflow/issues/1345)** + - Enable clippy `upper-case-acronyms-aggressive` ([`12b8ba5`](https://github.com/hydro-project/hydroflow/commit/12b8ba53f28eb9de1318b41cdf1e23282f6f0eb6)) + * **Uncategorized** + - Release hydroflow_lang v0.8.0, hydroflow_datalog_core v0.8.0, hydroflow_datalog v0.8.0, hydroflow_macro v0.8.0, lattices_macro v0.5.5, lattices v0.5.6, variadics v0.0.5, pusherator v0.0.7, hydroflow v0.8.0, hydroflow_plus v0.8.0, hydro_deploy v0.8.0, hydro_cli v0.8.0, hydroflow_plus_cli_integration v0.8.0, safety bump 7 crates ([`ca6c16b`](https://github.com/hydro-project/hydroflow/commit/ca6c16b4a7ce35e155fe7fc6c7d1676c37c9e4de)) +
+ + +Other 'tick state will need to be cleared, but existing implementation does that when the iterator runs, which is good enough. There is only a problem if a singleton can reference the state before the iterator runs, in that case. allow use of generics in demux_enum::<...>() opensures turbofish syntax for the match clauses, which is needed when there are generic parameters in the typeadds a test as well yield before collect_ready_async to ensure background async tasks can send to the stream make sure tasks are spawned!!!!fix bug introduced in #978 Make inner for WithTop & WithBot privateOption is not a lattice, so it is unsafe to expose as public.I also updated documentation to lead with intention beforeimplementation (minor cleanup).To emulate this unintuitive behavior, we currently ensure that apersist::<'static>() exists before operator that references thesingleton (filter, in this case). (Note that this is equivalent tocross_join::<'static>() and not cross_join::<'tick>())However singletons also have had a different mechanism that affectsthis- currently singleton references create a next-stratum constraint,that ensures a singleton referencer must be in a later stratum than thesingleton it is referencing.Note that this actually prevents the example situation above fromhappening– the updates to y will be received all at once at the startof the next stratum.This means that actually, currently singletons are equivalent tosomething like:ruststream -> cj[0]; +y -> next_stratum() -> last() -> cj[1]; +cj = cross_join() -> filter(|(item, y)| ...) -> ... +last() is a hypothetical operator that only keeps the most recent itemoutput by y. next_stratum() -> last() is equivalent to reduce(|acc, item| *acc = item) (since that comes with a stratum barrier). Sotechnically this is a slightly different behavior than just cross_join,but it is more intuitive.ruststream -> cj[0]; +y -> reduce(|acc, item| { *acc = item; }) -> cj[1]; +cj = cross_join() -> filter(|(item, y)| ...) -> ... +Also fixes #1293 + +## 0.7.0 (2024-05-24) + + + + + + + + + + + + + + + + +### Chore + + - use workaround for `cargo smart-release` not properly ordering `dev-`/`build-dependencies` + - expect custom config names to prevent warnings + See + https://doc.rust-lang.org/nightly/cargo/reference/build-scripts.html#rustc-check-cfg + - update pyo3, silence warnings in generated code + +### Documentation + + - Updating CONTRIBUTING.md with some info about feature branches + Also updating GitHub workflows to run on feature branches as well. + - improve docs/README.md, fix #856 + +### New Features + + - add `'static`/`'tick` support for `state()` operator + - insert `persist()` before singleton referencers to enable replay + - allow `defer_tick()`, `next_stratum()` to have a type hint arg + - error on unused ports, fix #1108 + - Added poll_futures and poll_futures_async operators. + +### Bug Fixes + + - fix #1050 for `reduce_keyed`, fix #1050 + - fix scheduler spinning on replay, fix #961 + fixes the added tests failing in the previous commit + - fix `TeeingHandoff` not scheduling outputs `tee()`d after send port is already used via `add_subgraph*`, fix #1163 + Add test for it as well + +### Refactor + + - simplify `demux_enum()`, somewhat improves error messages #1201 + - use `Duration` instead of int seconds + - reorder error message emission for conflicting varnames + - improve `TeeingHandoff` drop implementation + +### Style + + - fix warning unnecessary qualification + - appease latest nightly clippy + +### Test + + - join replay static test + - add tests for scheduler spinning #961 + failing + - update some compile-fail outputs caused by rustc regression + +### New Features (BREAKING) + + - Change current_tick_start to wall clock time + - Introduce newtypes for working with ticks + - add detupling syntax and allow interleaving with flattening + +### Refactor (BREAKING) + + - change `lattice_bimorphism` to take state via singleton arguments #969 #1058 + - replace `state()` with `state_ref()` implementation + +### Commit Statistics + + + + - 31 commits contributed to the release. + - 27 commits were understood as [conventional](https://www.conventionalcommits.org). + - 23 unique issues were worked on: [#1120](https://github.com/hydro-project/hydroflow/issues/1120), [#1143](https://github.com/hydro-project/hydroflow/issues/1143), [#1152](https://github.com/hydro-project/hydroflow/issues/1152), [#1159](https://github.com/hydro-project/hydroflow/issues/1159), [#1164](https://github.com/hydro-project/hydroflow/issues/1164), [#1166](https://github.com/hydro-project/hydroflow/issues/1166), [#1167](https://github.com/hydro-project/hydroflow/issues/1167), [#1171](https://github.com/hydro-project/hydroflow/issues/1171), [#1176](https://github.com/hydro-project/hydroflow/issues/1176), [#1178](https://github.com/hydro-project/hydroflow/issues/1178), [#1182](https://github.com/hydro-project/hydroflow/issues/1182), [#1190](https://github.com/hydro-project/hydroflow/issues/1190), [#1191](https://github.com/hydro-project/hydroflow/issues/1191), [#1192](https://github.com/hydro-project/hydroflow/issues/1192), [#1193](https://github.com/hydro-project/hydroflow/issues/1193), [#1196](https://github.com/hydro-project/hydroflow/issues/1196), [#1197](https://github.com/hydro-project/hydroflow/issues/1197), [#1198](https://github.com/hydro-project/hydroflow/issues/1198), [#1199](https://github.com/hydro-project/hydroflow/issues/1199), [#1204](https://github.com/hydro-project/hydroflow/issues/1204), [#1232](https://github.com/hydro-project/hydroflow/issues/1232), [#1236](https://github.com/hydro-project/hydroflow/issues/1236), [#1238](https://github.com/hydro-project/hydroflow/issues/1238) + +### Commit Details + + + +
view details + + * **[#1120](https://github.com/hydro-project/hydroflow/issues/1120)** + - Extend Chat Example w/ Gossip Based Backend ([`55051e1`](https://github.com/hydro-project/hydroflow/commit/55051e1c8db2ba8e36c6b6ed0e888996e9501f3f)) + * **[#1143](https://github.com/hydro-project/hydroflow/issues/1143)** + - Added poll_futures and poll_futures_async operators. ([`997d90a`](https://github.com/hydro-project/hydroflow/commit/997d90a76db9a4e05dbac35073a09548750ce342)) + * **[#1152](https://github.com/hydro-project/hydroflow/issues/1152)** + - Update some compile-fail outputs caused by rustc regression ([`d2427e2`](https://github.com/hydro-project/hydroflow/commit/d2427e2cc901c4174830d41b4a1dfc52fd4f19ce)) + - Update pyo3, silence warnings in generated code ([`1801502`](https://github.com/hydro-project/hydroflow/commit/18015029a725b068696ed9edefd1097583c858a6)) + - Appease latest nightly clippy ([`d9b2c02`](https://github.com/hydro-project/hydroflow/commit/d9b2c0263d508e6f6855f49504896d4ea670c355)) + * **[#1159](https://github.com/hydro-project/hydroflow/issues/1159)** + - Change `lattice_bimorphism` to take state via singleton arguments #969 #1058 ([`4386fac`](https://github.com/hydro-project/hydroflow/commit/4386fac824d64f63eae7629292675ac6bc8df9f7)) + - Replace `state()` with `state_ref()` implementation ([`d7e579c`](https://github.com/hydro-project/hydroflow/commit/d7e579c39b370a0ea0b0385d1029e9f8a7351d68)) + * **[#1164](https://github.com/hydro-project/hydroflow/issues/1164)** + - Improve `TeeingHandoff` drop implementation ([`b072ee0`](https://github.com/hydro-project/hydroflow/commit/b072ee026f97f8537165e1fb247101e0ab2fb320)) + - Fix `TeeingHandoff` not scheduling outputs `tee()`d after send port is already used via `add_subgraph*`, fix #1163 ([`69abccd`](https://github.com/hydro-project/hydroflow/commit/69abccde27644d7d0ed2fdf7d75f491fdbc41de8)) + * **[#1166](https://github.com/hydro-project/hydroflow/issues/1166)** + - Add detupling syntax and allow interleaving with flattening ([`c9dc66d`](https://github.com/hydro-project/hydroflow/commit/c9dc66db2bde34aa1385c8847d0ba7c16472f75d)) + * **[#1167](https://github.com/hydro-project/hydroflow/issues/1167)** + - Error on unused ports, fix #1108 ([`9df9c62`](https://github.com/hydro-project/hydroflow/commit/9df9c6251526903dbe7288e2fd9a532c63a9412c)) + - Reorder error message emission for conflicting varnames ([`20471f1`](https://github.com/hydro-project/hydroflow/commit/20471f11901e3fb15a2efea61752d836d4facba5)) + * **[#1171](https://github.com/hydro-project/hydroflow/issues/1171)** + - Fix scheduler spinning on replay, fix #961 ([`d773f9a`](https://github.com/hydro-project/hydroflow/commit/d773f9a6938fe6d1521516f7a7c441c6c281a9fa)) + - Add tests for scheduler spinning #961 ([`8f4cd7b`](https://github.com/hydro-project/hydroflow/commit/8f4cd7bfc13bcbd89ff4a0e48c7e95cde5039f1b)) + * **[#1176](https://github.com/hydro-project/hydroflow/issues/1176)** + - Fix #1050 for `reduce_keyed`, fix #1050 ([`e2fa6b0`](https://github.com/hydro-project/hydroflow/commit/e2fa6b0729cb92b29e1d293c2788458845ff306a)) + * **[#1178](https://github.com/hydro-project/hydroflow/issues/1178)** + - Fix warning unnecessary qualification ([`a4cd5fe`](https://github.com/hydro-project/hydroflow/commit/a4cd5fe57130e3b517872d1c27a274f9c9f601c2)) + * **[#1182](https://github.com/hydro-project/hydroflow/issues/1182)** + - Allow `defer_tick()`, `next_stratum()` to have a type hint arg ([`4ca8ce4`](https://github.com/hydro-project/hydroflow/commit/4ca8ce43c0998296e2d86bd74800585ebb24123a)) + * **[#1190](https://github.com/hydro-project/hydroflow/issues/1190)** + - `source_interval` no longer emits Instant instances on output. ([`b292f11`](https://github.com/hydro-project/hydroflow/commit/b292f111715fd9c397ffb35cf991bd0bfb01c1e1)) + * **[#1191](https://github.com/hydro-project/hydroflow/issues/1191)** + - Fixup! test(hydroflow): join replay static test ([`c09a23e`](https://github.com/hydro-project/hydroflow/commit/c09a23e854b10202dd7eec0ad0076de2bb196f61)) + - Join replay static test ([`f46e0ac`](https://github.com/hydro-project/hydroflow/commit/f46e0ac84e13ad6d713836c948b02e7b199f5faa)) + * **[#1192](https://github.com/hydro-project/hydroflow/issues/1192)** + - Expect custom config names to prevent warnings ([`b86f11a`](https://github.com/hydro-project/hydroflow/commit/b86f11aad344fef6ad9cdd1db0b45bb738c48bd6)) + * **[#1193](https://github.com/hydro-project/hydroflow/issues/1193)** + - Introduce newtypes for working with ticks ([`c2f6c95`](https://github.com/hydro-project/hydroflow/commit/c2f6c9578127a71c879752d52e115df75659e2b0)) + * **[#1196](https://github.com/hydro-project/hydroflow/issues/1196)** + - Change current_tick_start to wall clock time ([`218175c`](https://github.com/hydro-project/hydroflow/commit/218175cd935c4d94efb2f01f6e73300ace978283)) + * **[#1197](https://github.com/hydro-project/hydroflow/issues/1197)** + - Fixup! test(hydroflow): join replay static test ([`c09a23e`](https://github.com/hydro-project/hydroflow/commit/c09a23e854b10202dd7eec0ad0076de2bb196f61)) + * **[#1198](https://github.com/hydro-project/hydroflow/issues/1198)** + - Insert `persist()` before singleton referencers to enable replay ([`f184ea1`](https://github.com/hydro-project/hydroflow/commit/f184ea145f0c7c3072d7d0f94d42fcda717ac8d9)) + * **[#1199](https://github.com/hydro-project/hydroflow/issues/1199)** + - Use `Duration` instead of int seconds ([`b649d47`](https://github.com/hydro-project/hydroflow/commit/b649d470cc28a2bbb7e31426adcd050b455bf803)) + - Improve docs/README.md, fix #856 ([`9bcc526`](https://github.com/hydro-project/hydroflow/commit/9bcc5265044b467e9fe4ada0a89c7e84fd66dd43)) + * **[#1204](https://github.com/hydro-project/hydroflow/issues/1204)** + - Simplify `demux_enum()`, somewhat improves error messages #1201 ([`826dbd9`](https://github.com/hydro-project/hydroflow/commit/826dbd9a709de2f883992bdcefa8f2d566d74ecb)) + * **[#1232](https://github.com/hydro-project/hydroflow/issues/1232)** + - Add `'static`/`'tick` support for `state()` operator ([`b0692b0`](https://github.com/hydro-project/hydroflow/commit/b0692b0d697980eaf9893c07a443a257e04786c5)) + * **[#1236](https://github.com/hydro-project/hydroflow/issues/1236)** + - Updating CONTRIBUTING.md with some info about feature branches ([`0d2f14b`](https://github.com/hydro-project/hydroflow/commit/0d2f14b9237c0eaa8131d1d1118768357ac8133b)) + * **[#1238](https://github.com/hydro-project/hydroflow/issues/1238)** + - Use workaround for `cargo smart-release` not properly ordering `dev-`/`build-dependencies` ([`c9dfddc`](https://github.com/hydro-project/hydroflow/commit/c9dfddc680e0ce5415539d7b77bc5beb97ab59d9)) + * **Uncategorized** + - Release hydroflow_lang v0.7.0, hydroflow_datalog_core v0.7.0, hydroflow_datalog v0.7.0, hydroflow_macro v0.7.0, lattices v0.5.5, multiplatform_test v0.1.0, pusherator v0.0.6, hydroflow v0.7.0, stageleft_macro v0.2.0, stageleft v0.3.0, stageleft_tool v0.2.0, hydroflow_plus v0.7.0, hydro_deploy v0.7.0, hydro_cli v0.7.0, hydroflow_plus_cli_integration v0.7.0, safety bump 8 crates ([`2852147`](https://github.com/hydro-project/hydroflow/commit/285214740627685e911781793e05d234ab2ad2bd)) +
+ +## 0.6.2 (2024-04-09) + + + +### New Features + + + + + - allow `reduce()` to be referenceable as a singleton, fix docs and bugs + * fixed bug: accumulator closures could have return values, which would be ignored +* updated docs + +### Bug Fixes + + - fix singleton reference edges being backwards, fix #1147 + +### Test + + - add tests for #1147 (failing) + +### Commit Statistics + + + + - 6 commits contributed to the release. + - 5 commits were understood as [conventional](https://www.conventionalcommits.org). + - 3 unique issues were worked on: [#1134](https://github.com/hydro-project/hydroflow/issues/1134), [#1148](https://github.com/hydro-project/hydroflow/issues/1148), [#1150](https://github.com/hydro-project/hydroflow/issues/1150) + +### Commit Details + + + +
view details + + * **[#1134](https://github.com/hydro-project/hydroflow/issues/1134)** + - Make fold output optional, usable as just a singleton reference ([`0f16d1f`](https://github.com/hydro-project/hydroflow/commit/0f16d1f50cd64d9ca52ec811acc4a643a86f14fe)) + - Allow `fold()` to be referenceable as a singleton ([`5c5b652`](https://github.com/hydro-project/hydroflow/commit/5c5b6523d96a22c97382a7c61ee0e36ad77c0a0f)) + * **[#1148](https://github.com/hydro-project/hydroflow/issues/1148)** + - Fix singleton reference edges being backwards, fix #1147 ([`82b3030`](https://github.com/hydro-project/hydroflow/commit/82b3030eefb759a97053057f717efb95491802b3)) + - Add tests for #1147 (failing) ([`d9f72a6`](https://github.com/hydro-project/hydroflow/commit/d9f72a62c0099d69986fe28f484bb2bf574caaac)) + * **[#1150](https://github.com/hydro-project/hydroflow/issues/1150)** + - Allow `reduce()` to be referenceable as a singleton, fix docs and bugs ([`5679bfb`](https://github.com/hydro-project/hydroflow/commit/5679bfb7d3b96089cc020308c7d88021a254e63c)) + * **Uncategorized** + - Release hydroflow_lang v0.6.2, hydroflow v0.6.2, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`23cfe08`](https://github.com/hydro-project/hydroflow/commit/23cfe0839079aa17d042bbd3976f6d188689d290)) +
+ +## 0.6.1 (2024-04-05) + + + + + + + + + + + +### Chore + + - appease latest nightly clippy + Also updates `surface_keyed_fold.rs` `test_fold_keyed_infer_basic` test. + +### Documentation + + - more improvements to `persist_mut[_keyed]` docs + +### New Features + + + + + + - Render singleton references in graphvis + - enable inspect to have no ouputs + - track which ops have singleton state + Add a nice error message for referencing a non-singleton op + - improve singleton error messages/handling + - enable singleton reference usage in all operators + also cleans up local imports + - switch singletons references to being per-op intead of per-edge + Only testing in `filter` op for now + - initial proof-of-concept for singletons + - include subgraph ID in pivot_run codegen + - switch launch to be a macro to enable borrowing locals + - add syntax for "splatting" columns + feat(hydroflow_datalog): add syntax for "splatting" columns + + The * operator can be used on the left-hand side of a rule to indicate + that each row should be duplicated into many rows with that column being + replaced by every value in iterable corresponding to the original + expression. + - allow dropping of tee handoff output to prevent memory leak + - implement teeing handoff for scheduled layer + - Cleanup/update scheduled layer `TeeingHandoff` tests + +### Bug Fixes + + - `HandoffMeta::is_bottom` impl for `TeeingHandoff` [ci-bench] + - #1050 for `fold_keyed` + +### Refactor + + - consider singleton references as `DelayType::Stratum` barriers + - Cleanup user-facing teeing APIs + - Teeing overhead moved to graph assembly time, instead of scheduler + - use smallvec `[_; 1]` for handoff pred(s)/succ(s) [ci-bench] + +### Style + + - `Never` -> `std::convert::Infallible` + - qualified path cleanups for clippy + +### Test + + - fix some `surface_codegen.rs` tests, actually test output, fix #1096 + - add hydroflow scheduling spin test for #1050 #800 + +### Commit Statistics + + + + - 28 commits contributed to the release. + - 27 commits were understood as [conventional](https://www.conventionalcommits.org). + - 18 unique issues were worked on: [#1068](https://github.com/hydro-project/hydroflow/issues/1068), [#1086](https://github.com/hydro-project/hydroflow/issues/1086), [#1087](https://github.com/hydro-project/hydroflow/issues/1087), [#1089](https://github.com/hydro-project/hydroflow/issues/1089), [#1090](https://github.com/hydro-project/hydroflow/issues/1090), [#1091](https://github.com/hydro-project/hydroflow/issues/1091), [#1093](https://github.com/hydro-project/hydroflow/issues/1093), [#1094](https://github.com/hydro-project/hydroflow/issues/1094), [#1102](https://github.com/hydro-project/hydroflow/issues/1102), [#1113](https://github.com/hydro-project/hydroflow/issues/1113), [#1125](https://github.com/hydro-project/hydroflow/issues/1125), [#1128](https://github.com/hydro-project/hydroflow/issues/1128), [#1132](https://github.com/hydro-project/hydroflow/issues/1132), [#1133](https://github.com/hydro-project/hydroflow/issues/1133), [#1137](https://github.com/hydro-project/hydroflow/issues/1137), [#1140](https://github.com/hydro-project/hydroflow/issues/1140), [#1145](https://github.com/hydro-project/hydroflow/issues/1145), [#1146](https://github.com/hydro-project/hydroflow/issues/1146) + +### Commit Details + + + +
view details + + * **[#1068](https://github.com/hydro-project/hydroflow/issues/1068)** + - `HandoffMeta::is_bottom` impl for `TeeingHandoff` [ci-bench] ([`f29c710`](https://github.com/hydro-project/hydroflow/commit/f29c710e62d45a819b0c44125f94be207ffd5d83)) + - Cleanup user-facing teeing APIs ([`8a22b3a`](https://github.com/hydro-project/hydroflow/commit/8a22b3a27282b5076db1c89bc3a8572dc16a5e7c)) + - Teeing overhead moved to graph assembly time, instead of scheduler ([`0ac2d46`](https://github.com/hydro-project/hydroflow/commit/0ac2d46dcfc9756219806663042eca18e4eeff3f)) + - Allow dropping of tee handoff output to prevent memory leak ([`31a543c`](https://github.com/hydro-project/hydroflow/commit/31a543c236b874a60b7a152f11377013bcd15221)) + - Implement teeing handoff for scheduled layer ([`5f21e4c`](https://github.com/hydro-project/hydroflow/commit/5f21e4c8c565f1c87694d692fcec33ce1cb9d77f)) + * **[#1086](https://github.com/hydro-project/hydroflow/issues/1086)** + - #1050 for `fold_keyed` ([`f8311db`](https://github.com/hydro-project/hydroflow/commit/f8311db2cd9628607887fc04f2ea5933c8b7c11e)) + - Scheduler `log` subgraph name (which includes surface syntax sg id) ([`99657d5`](https://github.com/hydro-project/hydroflow/commit/99657d528c36e4e25b7eaa536c480e7b3d859443)) + - Add hydroflow scheduling spin test for #1050 #800 ([`118a563`](https://github.com/hydro-project/hydroflow/commit/118a563dbe627e730797128098698739db5588e5)) + * **[#1087](https://github.com/hydro-project/hydroflow/issues/1087)** + - Add `kvs_mut` example, fix #785 ([`fd90d41`](https://github.com/hydro-project/hydroflow/commit/fd90d4130e86b16139f6d3a386fbbfb49fae5a9b)) + * **[#1089](https://github.com/hydro-project/hydroflow/issues/1089)** + - More improvements to `persist_mut[_keyed]` docs ([`550b17c`](https://github.com/hydro-project/hydroflow/commit/550b17c0e0edd233b0cceab55fed0de309051503)) + * **[#1090](https://github.com/hydro-project/hydroflow/issues/1090)** + - Qualified path cleanups for clippy ([`7958fb0`](https://github.com/hydro-project/hydroflow/commit/7958fb0d900be8fe7359326abfa11dcb8fb35e8a)) + * **[#1091](https://github.com/hydro-project/hydroflow/issues/1091)** + - Switch singletons references to being per-op intead of per-edge ([`f9b26b6`](https://github.com/hydro-project/hydroflow/commit/f9b26b6df7ff7d2a3530527579c60f41d93e9d45)) + - Initial proof-of-concept for singletons ([`0a5e785`](https://github.com/hydro-project/hydroflow/commit/0a5e785d4bf0729ba0fe95466c822e8304f96e24)) + * **[#1093](https://github.com/hydro-project/hydroflow/issues/1093)** + - `Never` -> `std::convert::Infallible` ([`76e501a`](https://github.com/hydro-project/hydroflow/commit/76e501a35ba47803b69ea62920aae570fb60e2f0)) + * **[#1094](https://github.com/hydro-project/hydroflow/issues/1094)** + - Add aggregation for collecting values into a vector ([`61cb55a`](https://github.com/hydro-project/hydroflow/commit/61cb55a04e518560beff1ea5d446927b6d7096b3)) + * **[#1102](https://github.com/hydro-project/hydroflow/issues/1102)** + - Switch launch to be a macro to enable borrowing locals ([`68d8ffc`](https://github.com/hydro-project/hydroflow/commit/68d8ffcfb8f378e603d99bd6cac4c0ce69dc25ba)) + * **[#1113](https://github.com/hydro-project/hydroflow/issues/1113)** + - Use smallvec `[_; 1]` for handoff pred(s)/succ(s) [ci-bench] ([`be8d767`](https://github.com/hydro-project/hydroflow/commit/be8d767aa6347f6df57908d1e9beae7224ea2e53)) + * **[#1125](https://github.com/hydro-project/hydroflow/issues/1125)** + - Fix some `surface_codegen.rs` tests, actually test output, fix #1096 ([`99bbe50`](https://github.com/hydro-project/hydroflow/commit/99bbe50a6cd214a974aec72d95bf10b9b6da33f6)) + * **[#1128](https://github.com/hydro-project/hydroflow/issues/1128)** + - Enable inspect to have no ouputs ([`54c6874`](https://github.com/hydro-project/hydroflow/commit/54c6874931ebd7ba13140dbcf3e75ad9b2852331)) + * **[#1132](https://github.com/hydro-project/hydroflow/issues/1132)** + - Add syntax for "splatting" columns ([`916a366`](https://github.com/hydro-project/hydroflow/commit/916a366cba94c739a6e170d281663f19f706d6be)) + * **[#1133](https://github.com/hydro-project/hydroflow/issues/1133)** + - Track which ops have singleton state ([`fa34dd3`](https://github.com/hydro-project/hydroflow/commit/fa34dd317ab0cb59753bd884acae2605a7630b10)) + - Improve singleton error messages/handling ([`601c484`](https://github.com/hydro-project/hydroflow/commit/601c484bf9afe9aada61c1a40478c0e093140a56)) + - Enable singleton reference usage in all operators ([`8cb29fa`](https://github.com/hydro-project/hydroflow/commit/8cb29fa809e255195b65fd23b589bf7c970c5599)) + * **[#1137](https://github.com/hydro-project/hydroflow/issues/1137)** + - Include subgraph ID in pivot_run codegen ([`602e21f`](https://github.com/hydro-project/hydroflow/commit/602e21fc04da5a0d65ac7128688760c92ffba3c1)) + * **[#1140](https://github.com/hydro-project/hydroflow/issues/1140)** + - Appease latest nightly clippy ([`fc447ff`](https://github.com/hydro-project/hydroflow/commit/fc447ffdf8fd1b2189545a991f08588238182f00)) + * **[#1145](https://github.com/hydro-project/hydroflow/issues/1145)** + - Consider singleton references as `DelayType::Stratum` barriers ([`1b19361`](https://github.com/hydro-project/hydroflow/commit/1b19361a87bafb7d7c12be04cc2ce3370d71439a)) + * **[#1146](https://github.com/hydro-project/hydroflow/issues/1146)** + - Render singleton references in graphvis ([`1bde8a2`](https://github.com/hydro-project/hydroflow/commit/1bde8a2443523fa8d3aafd7459c9a484de7724b7)) + * **Uncategorized** + - Release hydroflow_cli_integration v0.5.2, hydroflow_lang v0.6.1, hydroflow_datalog_core v0.6.1, lattices v0.5.4, hydroflow v0.6.1, stageleft_macro v0.1.1, stageleft v0.2.1, hydroflow_plus v0.6.1, hydro_deploy v0.6.1, hydro_cli v0.6.1, hydroflow_plus_cli_integration v0.6.1, stageleft_tool v0.1.1 ([`cd63f22`](https://github.com/hydro-project/hydroflow/commit/cd63f2258c961a40f0e5dbef20ac329a2d570ad0)) +
+ ## 0.6.0 (2024-03-02) + + + + + + + + ### Chore - prep for 0.0.4 release @@ -35,8 +629,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 9 commits contributed to the release over the course of 25 calendar days. - - 28 days passed between releases. + - 10 commits contributed to the release. - 9 commits were understood as [conventional](https://www.conventionalcommits.org). - 6 unique issues were worked on: [#1015](https://github.com/hydro-project/hydroflow/issues/1015), [#1057](https://github.com/hydro-project/hydroflow/issues/1057), [#1060](https://github.com/hydro-project/hydroflow/issues/1060), [#1061](https://github.com/hydro-project/hydroflow/issues/1061), [#1084](https://github.com/hydro-project/hydroflow/issues/1084), [#1085](https://github.com/hydro-project/hydroflow/issues/1085) @@ -59,6 +652,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **[#1085](https://github.com/hydro-project/hydroflow/issues/1085)** - Cleanup timing in some `surface_async` tests, #1078 ([`18ee2ad`](https://github.com/hydro-project/hydroflow/commit/18ee2ad245dad997494267050a9568f20cbe215f)) * **Uncategorized** + - Release hydroflow_lang v0.6.0, hydroflow_datalog_core v0.6.0, hydroflow_datalog v0.6.0, hydroflow_macro v0.6.0, lattices v0.5.3, variadics v0.0.4, pusherator v0.0.5, hydroflow v0.6.0, stageleft v0.2.0, hydroflow_plus v0.6.0, hydro_deploy v0.6.0, hydro_cli v0.6.0, hydroflow_plus_cli_integration v0.6.0, safety bump 7 crates ([`09ea65f`](https://github.com/hydro-project/hydroflow/commit/09ea65fe9cd45c357c43bffca30e60243fa45cc8)) - Prep for 0.0.4 release ([`5a451ac`](https://github.com/hydro-project/hydroflow/commit/5a451ac4ae75024153a06416fc81d834d1fdae6f)) - Fix imports for clippy ([`71353f0`](https://github.com/hydro-project/hydroflow/commit/71353f0d4dfd9766dfdc715c4a91a028081f910f)) - Fix imports ([`b391447`](https://github.com/hydro-project/hydroflow/commit/b391447ec13f1f79c99142f296dc2fa8640034f4)) @@ -84,8 +678,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 5 commits contributed to the release over the course of 2 calendar days. - - 3 days passed between releases. + - 5 commits contributed to the release. - 4 commits were understood as [conventional](https://www.conventionalcommits.org). - 4 unique issues were worked on: [#1041](https://github.com/hydro-project/hydroflow/issues/1041), [#1051](https://github.com/hydro-project/hydroflow/issues/1051), [#1054](https://github.com/hydro-project/hydroflow/issues/1054), [#1055](https://github.com/hydro-project/hydroflow/issues/1055) @@ -147,6 +740,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 -- - new implementation and Hydro Deploy setup -- + - new implementation and Hydro Deploy setup + -- + - new implementation and Hydro Deploy setup + -- + - new implementation and Hydro Deploy setup + -- + - new implementation and Hydro Deploy setup + -- + - new implementation and Hydro Deploy setup + -- ### Bug Fixes @@ -184,8 +787,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 27 commits contributed to the release over the course of 109 calendar days. - - 110 days passed between releases. + - 27 commits contributed to the release. - 26 commits were understood as [conventional](https://www.conventionalcommits.org). - 23 unique issues were worked on: [#1003](https://github.com/hydro-project/hydroflow/issues/1003), [#1005](https://github.com/hydro-project/hydroflow/issues/1005), [#1024](https://github.com/hydro-project/hydroflow/issues/1024), [#1025](https://github.com/hydro-project/hydroflow/issues/1025), [#1026](https://github.com/hydro-project/hydroflow/issues/1026), [#1032](https://github.com/hydro-project/hydroflow/issues/1032), [#1036](https://github.com/hydro-project/hydroflow/issues/1036), [#899](https://github.com/hydro-project/hydroflow/issues/899), [#909](https://github.com/hydro-project/hydroflow/issues/909), [#942](https://github.com/hydro-project/hydroflow/issues/942), [#945](https://github.com/hydro-project/hydroflow/issues/945), [#948](https://github.com/hydro-project/hydroflow/issues/948), [#950](https://github.com/hydro-project/hydroflow/issues/950), [#959](https://github.com/hydro-project/hydroflow/issues/959), [#960](https://github.com/hydro-project/hydroflow/issues/960), [#967](https://github.com/hydro-project/hydroflow/issues/967), [#971](https://github.com/hydro-project/hydroflow/issues/971), [#974](https://github.com/hydro-project/hydroflow/issues/974), [#978](https://github.com/hydro-project/hydroflow/issues/978), [#979](https://github.com/hydro-project/hydroflow/issues/979), [#984](https://github.com/hydro-project/hydroflow/issues/984), [#986](https://github.com/hydro-project/hydroflow/issues/986), [#996](https://github.com/hydro-project/hydroflow/issues/996) @@ -340,8 +942,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 46 commits contributed to the release over the course of 49 calendar days. - - 56 days passed between releases. + - 46 commits contributed to the release. - 43 commits were understood as [conventional](https://www.conventionalcommits.org). - 19 unique issues were worked on: [#882](https://github.com/hydro-project/hydroflow/issues/882), [#884](https://github.com/hydro-project/hydroflow/issues/884), [#885](https://github.com/hydro-project/hydroflow/issues/885), [#886](https://github.com/hydro-project/hydroflow/issues/886), [#887](https://github.com/hydro-project/hydroflow/issues/887), [#892](https://github.com/hydro-project/hydroflow/issues/892), [#893](https://github.com/hydro-project/hydroflow/issues/893), [#896](https://github.com/hydro-project/hydroflow/issues/896), [#897](https://github.com/hydro-project/hydroflow/issues/897), [#898](https://github.com/hydro-project/hydroflow/issues/898), [#902](https://github.com/hydro-project/hydroflow/issues/902), [#906](https://github.com/hydro-project/hydroflow/issues/906), [#918](https://github.com/hydro-project/hydroflow/issues/918), [#919](https://github.com/hydro-project/hydroflow/issues/919), [#923](https://github.com/hydro-project/hydroflow/issues/923), [#924](https://github.com/hydro-project/hydroflow/issues/924), [#926](https://github.com/hydro-project/hydroflow/issues/926), [#932](https://github.com/hydro-project/hydroflow/issues/932), [#935](https://github.com/hydro-project/hydroflow/issues/935) @@ -496,8 +1097,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 25 commits contributed to the release over the course of 39 calendar days. - - 42 days passed between releases. + - 25 commits contributed to the release. - 22 commits were understood as [conventional](https://www.conventionalcommits.org). - 23 unique issues were worked on: [#820](https://github.com/hydro-project/hydroflow/issues/820), [#821](https://github.com/hydro-project/hydroflow/issues/821), [#822](https://github.com/hydro-project/hydroflow/issues/822), [#823](https://github.com/hydro-project/hydroflow/issues/823), [#833](https://github.com/hydro-project/hydroflow/issues/833), [#835](https://github.com/hydro-project/hydroflow/issues/835), [#837](https://github.com/hydro-project/hydroflow/issues/837), [#840](https://github.com/hydro-project/hydroflow/issues/840), [#842](https://github.com/hydro-project/hydroflow/issues/842), [#843](https://github.com/hydro-project/hydroflow/issues/843), [#844](https://github.com/hydro-project/hydroflow/issues/844), [#845](https://github.com/hydro-project/hydroflow/issues/845), [#846](https://github.com/hydro-project/hydroflow/issues/846), [#848](https://github.com/hydro-project/hydroflow/issues/848), [#851](https://github.com/hydro-project/hydroflow/issues/851), [#853](https://github.com/hydro-project/hydroflow/issues/853), [#857](https://github.com/hydro-project/hydroflow/issues/857), [#861](https://github.com/hydro-project/hydroflow/issues/861), [#870](https://github.com/hydro-project/hydroflow/issues/870), [#872](https://github.com/hydro-project/hydroflow/issues/872), [#874](https://github.com/hydro-project/hydroflow/issues/874), [#878](https://github.com/hydro-project/hydroflow/issues/878), [#880](https://github.com/hydro-project/hydroflow/issues/880) @@ -656,8 +1256,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 34 commits contributed to the release over the course of 31 calendar days. - - 33 days passed between releases. + - 34 commits contributed to the release. - 31 commits were understood as [conventional](https://www.conventionalcommits.org). - 25 unique issues were worked on: [#739](https://github.com/hydro-project/hydroflow/issues/739), [#743](https://github.com/hydro-project/hydroflow/issues/743), [#745](https://github.com/hydro-project/hydroflow/issues/745), [#748](https://github.com/hydro-project/hydroflow/issues/748), [#749](https://github.com/hydro-project/hydroflow/issues/749), [#755](https://github.com/hydro-project/hydroflow/issues/755), [#761](https://github.com/hydro-project/hydroflow/issues/761), [#763](https://github.com/hydro-project/hydroflow/issues/763), [#765](https://github.com/hydro-project/hydroflow/issues/765), [#772](https://github.com/hydro-project/hydroflow/issues/772), [#773](https://github.com/hydro-project/hydroflow/issues/773), [#774](https://github.com/hydro-project/hydroflow/issues/774), [#775](https://github.com/hydro-project/hydroflow/issues/775), [#778](https://github.com/hydro-project/hydroflow/issues/778), [#780](https://github.com/hydro-project/hydroflow/issues/780), [#784](https://github.com/hydro-project/hydroflow/issues/784), [#788](https://github.com/hydro-project/hydroflow/issues/788), [#789](https://github.com/hydro-project/hydroflow/issues/789), [#791](https://github.com/hydro-project/hydroflow/issues/791), [#792](https://github.com/hydro-project/hydroflow/issues/792), [#799](https://github.com/hydro-project/hydroflow/issues/799), [#801](https://github.com/hydro-project/hydroflow/issues/801), [#803](https://github.com/hydro-project/hydroflow/issues/803), [#804](https://github.com/hydro-project/hydroflow/issues/804), [#809](https://github.com/hydro-project/hydroflow/issues/809) @@ -751,7 +1350,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 4 commits contributed to the release. - - 1 day passed between releases. - 3 commits were understood as [conventional](https://www.conventionalcommits.org). - 0 issues like '(#ID)' were seen in commit messages @@ -828,7 +1426,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 18 commits contributed to the release. - - 6 days passed between releases. - 17 commits were understood as [conventional](https://www.conventionalcommits.org). - 12 unique issues were worked on: [#686](https://github.com/hydro-project/hydroflow/issues/686), [#690](https://github.com/hydro-project/hydroflow/issues/690), [#692](https://github.com/hydro-project/hydroflow/issues/692), [#696](https://github.com/hydro-project/hydroflow/issues/696), [#697](https://github.com/hydro-project/hydroflow/issues/697), [#702](https://github.com/hydro-project/hydroflow/issues/702), [#706](https://github.com/hydro-project/hydroflow/issues/706), [#708](https://github.com/hydro-project/hydroflow/issues/708), [#714](https://github.com/hydro-project/hydroflow/issues/714), [#716](https://github.com/hydro-project/hydroflow/issues/716), [#719](https://github.com/hydro-project/hydroflow/issues/719), [#721](https://github.com/hydro-project/hydroflow/issues/721) @@ -897,7 +1494,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 8 commits contributed to the release. - - 2 days passed between releases. - 5 commits were understood as [conventional](https://www.conventionalcommits.org). - 4 unique issues were worked on: [#661](https://github.com/hydro-project/hydroflow/issues/661), [#671](https://github.com/hydro-project/hydroflow/issues/671), [#677](https://github.com/hydro-project/hydroflow/issues/677), [#684](https://github.com/hydro-project/hydroflow/issues/684) @@ -945,8 +1541,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 19 commits contributed to the release over the course of 17 calendar days. - - 18 days passed between releases. + - 19 commits contributed to the release. - 5 commits were understood as [conventional](https://www.conventionalcommits.org). - 14 unique issues were worked on: [#625](https://github.com/hydro-project/hydroflow/issues/625), [#638](https://github.com/hydro-project/hydroflow/issues/638), [#640](https://github.com/hydro-project/hydroflow/issues/640), [#641](https://github.com/hydro-project/hydroflow/issues/641), [#642](https://github.com/hydro-project/hydroflow/issues/642), [#644](https://github.com/hydro-project/hydroflow/issues/644), [#649](https://github.com/hydro-project/hydroflow/issues/649), [#650](https://github.com/hydro-project/hydroflow/issues/650), [#651](https://github.com/hydro-project/hydroflow/issues/651), [#654](https://github.com/hydro-project/hydroflow/issues/654), [#656](https://github.com/hydro-project/hydroflow/issues/656), [#657](https://github.com/hydro-project/hydroflow/issues/657), [#660](https://github.com/hydro-project/hydroflow/issues/660), [#667](https://github.com/hydro-project/hydroflow/issues/667) @@ -1011,8 +1606,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 7 commits contributed to the release over the course of 5 calendar days. - - 6 days passed between releases. + - 7 commits contributed to the release. - 2 commits were understood as [conventional](https://www.conventionalcommits.org). - 6 unique issues were worked on: [#622](https://github.com/hydro-project/hydroflow/issues/622), [#629](https://github.com/hydro-project/hydroflow/issues/629), [#632](https://github.com/hydro-project/hydroflow/issues/632), [#633](https://github.com/hydro-project/hydroflow/issues/633), [#634](https://github.com/hydro-project/hydroflow/issues/634), [#635](https://github.com/hydro-project/hydroflow/issues/635) @@ -1038,7 +1632,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Release hydroflow v0.0.1 ([`0f91773`](https://github.com/hydro-project/hydroflow/commit/0f917734bd2c840e47acf788322dd1a4a4100488)) -## 0.0.0 (2023-04-26) +## 0.0.0 (2023-04-25) @@ -1064,7 +1658,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - - 489 commits contributed to the release over the course of 552 calendar days. + - 489 commits contributed to the release. - 6 commits were understood as [conventional](https://www.conventionalcommits.org). - 191 unique issues were worked on: [#100](https://github.com/hydro-project/hydroflow/issues/100), [#101](https://github.com/hydro-project/hydroflow/issues/101), [#102](https://github.com/hydro-project/hydroflow/issues/102), [#111](https://github.com/hydro-project/hydroflow/issues/111), [#113](https://github.com/hydro-project/hydroflow/issues/113), [#116](https://github.com/hydro-project/hydroflow/issues/116), [#12](https://github.com/hydro-project/hydroflow/issues/12), [#120](https://github.com/hydro-project/hydroflow/issues/120), [#121](https://github.com/hydro-project/hydroflow/issues/121), [#122](https://github.com/hydro-project/hydroflow/issues/122), [#127](https://github.com/hydro-project/hydroflow/issues/127), [#137](https://github.com/hydro-project/hydroflow/issues/137), [#146](https://github.com/hydro-project/hydroflow/issues/146), [#147](https://github.com/hydro-project/hydroflow/issues/147), [#15](https://github.com/hydro-project/hydroflow/issues/15), [#155](https://github.com/hydro-project/hydroflow/issues/155), [#160](https://github.com/hydro-project/hydroflow/issues/160), [#162](https://github.com/hydro-project/hydroflow/issues/162), [#163](https://github.com/hydro-project/hydroflow/issues/163), [#164](https://github.com/hydro-project/hydroflow/issues/164), [#165](https://github.com/hydro-project/hydroflow/issues/165), [#18](https://github.com/hydro-project/hydroflow/issues/18), [#184](https://github.com/hydro-project/hydroflow/issues/184), [#187](https://github.com/hydro-project/hydroflow/issues/187), [#190](https://github.com/hydro-project/hydroflow/issues/190), [#207](https://github.com/hydro-project/hydroflow/issues/207), [#209](https://github.com/hydro-project/hydroflow/issues/209), [#211](https://github.com/hydro-project/hydroflow/issues/211), [#213](https://github.com/hydro-project/hydroflow/issues/213), [#222](https://github.com/hydro-project/hydroflow/issues/222), [#230](https://github.com/hydro-project/hydroflow/issues/230), [#231](https://github.com/hydro-project/hydroflow/issues/231), [#234](https://github.com/hydro-project/hydroflow/issues/234), [#235](https://github.com/hydro-project/hydroflow/issues/235), [#236](https://github.com/hydro-project/hydroflow/issues/236), [#237](https://github.com/hydro-project/hydroflow/issues/237), [#238](https://github.com/hydro-project/hydroflow/issues/238), [#239](https://github.com/hydro-project/hydroflow/issues/239), [#248](https://github.com/hydro-project/hydroflow/issues/248), [#249](https://github.com/hydro-project/hydroflow/issues/249), [#250](https://github.com/hydro-project/hydroflow/issues/250), [#254](https://github.com/hydro-project/hydroflow/issues/254), [#259](https://github.com/hydro-project/hydroflow/issues/259), [#261](https://github.com/hydro-project/hydroflow/issues/261), [#262](https://github.com/hydro-project/hydroflow/issues/262), [#268](https://github.com/hydro-project/hydroflow/issues/268), [#277](https://github.com/hydro-project/hydroflow/issues/277), [#278](https://github.com/hydro-project/hydroflow/issues/278), [#279](https://github.com/hydro-project/hydroflow/issues/279), [#28](https://github.com/hydro-project/hydroflow/issues/28), [#282](https://github.com/hydro-project/hydroflow/issues/282), [#284](https://github.com/hydro-project/hydroflow/issues/284), [#285](https://github.com/hydro-project/hydroflow/issues/285), [#288](https://github.com/hydro-project/hydroflow/issues/288), [#294](https://github.com/hydro-project/hydroflow/issues/294), [#295](https://github.com/hydro-project/hydroflow/issues/295), [#296](https://github.com/hydro-project/hydroflow/issues/296), [#298](https://github.com/hydro-project/hydroflow/issues/298), [#3](https://github.com/hydro-project/hydroflow/issues/3), [#30](https://github.com/hydro-project/hydroflow/issues/30), [#300](https://github.com/hydro-project/hydroflow/issues/300), [#301](https://github.com/hydro-project/hydroflow/issues/301), [#307](https://github.com/hydro-project/hydroflow/issues/307), [#309](https://github.com/hydro-project/hydroflow/issues/309), [#32](https://github.com/hydro-project/hydroflow/issues/32), [#321](https://github.com/hydro-project/hydroflow/issues/321), [#329](https://github.com/hydro-project/hydroflow/issues/329), [#33](https://github.com/hydro-project/hydroflow/issues/33), [#333](https://github.com/hydro-project/hydroflow/issues/333), [#34](https://github.com/hydro-project/hydroflow/issues/34), [#344](https://github.com/hydro-project/hydroflow/issues/344), [#350](https://github.com/hydro-project/hydroflow/issues/350), [#358](https://github.com/hydro-project/hydroflow/issues/358), [#363](https://github.com/hydro-project/hydroflow/issues/363), [#369](https://github.com/hydro-project/hydroflow/issues/369), [#37](https://github.com/hydro-project/hydroflow/issues/37), [#372](https://github.com/hydro-project/hydroflow/issues/372), [#374](https://github.com/hydro-project/hydroflow/issues/374), [#376](https://github.com/hydro-project/hydroflow/issues/376), [#38](https://github.com/hydro-project/hydroflow/issues/38), [#381](https://github.com/hydro-project/hydroflow/issues/381), [#382](https://github.com/hydro-project/hydroflow/issues/382), [#383](https://github.com/hydro-project/hydroflow/issues/383), [#388](https://github.com/hydro-project/hydroflow/issues/388), [#39](https://github.com/hydro-project/hydroflow/issues/39), [#397](https://github.com/hydro-project/hydroflow/issues/397), [#40](https://github.com/hydro-project/hydroflow/issues/40), [#403](https://github.com/hydro-project/hydroflow/issues/403), [#409](https://github.com/hydro-project/hydroflow/issues/409), [#411](https://github.com/hydro-project/hydroflow/issues/411), [#412](https://github.com/hydro-project/hydroflow/issues/412), [#413](https://github.com/hydro-project/hydroflow/issues/413), [#417](https://github.com/hydro-project/hydroflow/issues/417), [#420](https://github.com/hydro-project/hydroflow/issues/420), [#43](https://github.com/hydro-project/hydroflow/issues/43), [#431](https://github.com/hydro-project/hydroflow/issues/431), [#435](https://github.com/hydro-project/hydroflow/issues/435), [#437](https://github.com/hydro-project/hydroflow/issues/437), [#442](https://github.com/hydro-project/hydroflow/issues/442), [#443](https://github.com/hydro-project/hydroflow/issues/443), [#444](https://github.com/hydro-project/hydroflow/issues/444), [#445](https://github.com/hydro-project/hydroflow/issues/445), [#448 1/2](https://github.com/hydro-project/hydroflow/issues/448 1/2), [#448 2/2](https://github.com/hydro-project/hydroflow/issues/448 2/2), [#452](https://github.com/hydro-project/hydroflow/issues/452), [#459](https://github.com/hydro-project/hydroflow/issues/459), [#46](https://github.com/hydro-project/hydroflow/issues/46), [#460](https://github.com/hydro-project/hydroflow/issues/460), [#461](https://github.com/hydro-project/hydroflow/issues/461), [#465](https://github.com/hydro-project/hydroflow/issues/465), [#466](https://github.com/hydro-project/hydroflow/issues/466), [#468](https://github.com/hydro-project/hydroflow/issues/468), [#469](https://github.com/hydro-project/hydroflow/issues/469), [#470](https://github.com/hydro-project/hydroflow/issues/470), [#471](https://github.com/hydro-project/hydroflow/issues/471), [#472](https://github.com/hydro-project/hydroflow/issues/472), [#475](https://github.com/hydro-project/hydroflow/issues/475), [#477](https://github.com/hydro-project/hydroflow/issues/477), [#479](https://github.com/hydro-project/hydroflow/issues/479), [#48](https://github.com/hydro-project/hydroflow/issues/48), [#484](https://github.com/hydro-project/hydroflow/issues/484), [#487](https://github.com/hydro-project/hydroflow/issues/487), [#492](https://github.com/hydro-project/hydroflow/issues/492), [#493](https://github.com/hydro-project/hydroflow/issues/493), [#495](https://github.com/hydro-project/hydroflow/issues/495), [#497](https://github.com/hydro-project/hydroflow/issues/497), [#499](https://github.com/hydro-project/hydroflow/issues/499), [#500](https://github.com/hydro-project/hydroflow/issues/500), [#501](https://github.com/hydro-project/hydroflow/issues/501), [#502](https://github.com/hydro-project/hydroflow/issues/502), [#505](https://github.com/hydro-project/hydroflow/issues/505), [#509](https://github.com/hydro-project/hydroflow/issues/509), [#511](https://github.com/hydro-project/hydroflow/issues/511), [#512](https://github.com/hydro-project/hydroflow/issues/512), [#516](https://github.com/hydro-project/hydroflow/issues/516), [#518](https://github.com/hydro-project/hydroflow/issues/518), [#520](https://github.com/hydro-project/hydroflow/issues/520), [#521](https://github.com/hydro-project/hydroflow/issues/521), [#522](https://github.com/hydro-project/hydroflow/issues/522), [#523](https://github.com/hydro-project/hydroflow/issues/523), [#524](https://github.com/hydro-project/hydroflow/issues/524), [#526](https://github.com/hydro-project/hydroflow/issues/526), [#529](https://github.com/hydro-project/hydroflow/issues/529), [#530](https://github.com/hydro-project/hydroflow/issues/530), [#536](https://github.com/hydro-project/hydroflow/issues/536), [#538](https://github.com/hydro-project/hydroflow/issues/538), [#540](https://github.com/hydro-project/hydroflow/issues/540), [#541](https://github.com/hydro-project/hydroflow/issues/541), [#543](https://github.com/hydro-project/hydroflow/issues/543), [#547](https://github.com/hydro-project/hydroflow/issues/547), [#548 #550](https://github.com/hydro-project/hydroflow/issues/548 #550), [#549](https://github.com/hydro-project/hydroflow/issues/549), [#551](https://github.com/hydro-project/hydroflow/issues/551), [#555](https://github.com/hydro-project/hydroflow/issues/555), [#556](https://github.com/hydro-project/hydroflow/issues/556), [#558](https://github.com/hydro-project/hydroflow/issues/558), [#559](https://github.com/hydro-project/hydroflow/issues/559), [#56](https://github.com/hydro-project/hydroflow/issues/56), [#566](https://github.com/hydro-project/hydroflow/issues/566), [#567](https://github.com/hydro-project/hydroflow/issues/567), [#568](https://github.com/hydro-project/hydroflow/issues/568), [#571](https://github.com/hydro-project/hydroflow/issues/571), [#576](https://github.com/hydro-project/hydroflow/issues/576), [#578](https://github.com/hydro-project/hydroflow/issues/578), [#584](https://github.com/hydro-project/hydroflow/issues/584), [#586](https://github.com/hydro-project/hydroflow/issues/586), [#589](https://github.com/hydro-project/hydroflow/issues/589), [#590](https://github.com/hydro-project/hydroflow/issues/590), [#591](https://github.com/hydro-project/hydroflow/issues/591), [#593](https://github.com/hydro-project/hydroflow/issues/593), [#597](https://github.com/hydro-project/hydroflow/issues/597), [#598](https://github.com/hydro-project/hydroflow/issues/598), [#6](https://github.com/hydro-project/hydroflow/issues/6), [#60](https://github.com/hydro-project/hydroflow/issues/60), [#602](https://github.com/hydro-project/hydroflow/issues/602), [#605](https://github.com/hydro-project/hydroflow/issues/605), [#607](https://github.com/hydro-project/hydroflow/issues/607), [#608](https://github.com/hydro-project/hydroflow/issues/608), [#61](https://github.com/hydro-project/hydroflow/issues/61), [#617](https://github.com/hydro-project/hydroflow/issues/617), [#618](https://github.com/hydro-project/hydroflow/issues/618), [#63](https://github.com/hydro-project/hydroflow/issues/63), [#64](https://github.com/hydro-project/hydroflow/issues/64), [#8](https://github.com/hydro-project/hydroflow/issues/8), [#80](https://github.com/hydro-project/hydroflow/issues/80), [#84](https://github.com/hydro-project/hydroflow/issues/84), [#86](https://github.com/hydro-project/hydroflow/issues/86), [#89](https://github.com/hydro-project/hydroflow/issues/89), [#91](https://github.com/hydro-project/hydroflow/issues/91), [#95](https://github.com/hydro-project/hydroflow/issues/95), [#98](https://github.com/hydro-project/hydroflow/issues/98) diff --git a/hydroflow/Cargo.toml b/hydroflow/Cargo.toml index f30e1bfbfa05..236d5ba6baca 100644 --- a/hydroflow/Cargo.toml +++ b/hydroflow/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "hydroflow" publish = true -version = "0.6.0" +version = "0.9.0" edition = "2021" license = "Apache-2.0" documentation = "https://docs.rs/hydroflow/" description = "Hydro's low-level dataflow runtime and IR" [features] -default = [ "macros" , "nightly", "debugging" ] +default = [ "macros", "nightly", "debugging" ] nightly = [ "hydroflow_macro", "hydroflow_macro/diagnostics" ] macros = [ "hydroflow_macro", "hydroflow_datalog" ] hydroflow_macro = [ "dep:hydroflow_macro" ] hydroflow_datalog = [ "dep:hydroflow_datalog" ] -cli_integration = [ "dep:hydroflow_cli_integration" ] +deploy_integration = [ "dep:hydroflow_deploy_integration" ] python = [ "dep:pyo3" ] debugging = [ "hydroflow_lang/debugging" ] @@ -26,39 +26,50 @@ required-features = [ "nightly" ] name = "python_udf" required-features = [ "python" ] +[[example]] +name = "modules_outer_join" +required-features = [ "debugging" ] + +[[example]] +name = "modules_triple_cross_join" +required-features = [ "debugging" ] + [dependencies] -bincode = "1.3" -byteorder = "1.4.3" +bincode = "1.3.1" +byteorder = "1.3.2" bytes = "1.1.0" -futures = "0.3" -hydroflow_cli_integration = { optional = true, path = "../hydro_deploy/hydroflow_cli_integration", version = "^0.5.1" } -hydroflow_datalog = { optional = true, path = "../hydroflow_datalog", version = "^0.6.0" } -hydroflow_lang = { path = "../hydroflow_lang", version = "^0.6.0" } -hydroflow_macro = { optional = true, path = "../hydroflow_macro", version = "^0.6.0" } -itertools = "0.10" -lattices = { path = "../lattices", version = "^0.5.3", features = [ "serde" ] } -pusherator = { path = "../pusherator", version = "^0.0.5" } -pyo3 = { optional = true, version = "0.18" } -ref-cast = "1.0" -regex = "1.8.4" +futures = "0.3.0" +hydroflow_deploy_integration = { optional = true, path = "../hydro_deploy/hydroflow_deploy_integration", version = "^0.9.0" } +hydroflow_datalog = { optional = true, path = "../hydroflow_datalog", version = "^0.9.0" } +hydroflow_lang = { path = "../hydroflow_lang", version = "^0.9.0", features = [ "clap-derive" ] } +hydroflow_macro = { optional = true, path = "../hydroflow_macro", version = "^0.9.0" } +itertools = "0.10.0" +lattices = { path = "../lattices", version = "^0.5.7", features = [ "serde" ] } +pusherator = { path = "../pusherator", version = "^0.0.8" } +pyo3 = { optional = true, version = "0.20" } +ref-cast = "1.0.0" +regex = "1.10.4" rustc-hash = "1.1.0" -sealed = "0.5" -serde = { version = "1", features = [ "derive" ] } -serde_json = "1" -slotmap = "1.0" -smallvec = "1.10.0" -tokio-stream = { version = "0.1.10", features = [ "io-util", "sync" ] } -tracing = "0.1" -variadics = { path = "../variadics", version = "^0.0.4" } -instant = { version = "0.1.12", features = ["wasm-bindgen"] } # Instant::now() is not supported on wasm, use this shim instead. +sealed = "0.5.0" +serde = { version = "1.0.197", features = [ "derive" ] } +serde_json = "1.0.115" +slotmap = "1.0.0" +smallvec = "1.6.1" +tokio-stream = { version = "0.1.3", default-features = false, features = [ "time", "io-util", "sync" ] } +tracing = "0.1.37" +variadics = { path = "../variadics", version = "^0.0.6" } +web-time = "1.0.0" + +# added to workaround `cargo smart-release` https://github.com/Byron/cargo-smart-release/issues/16 +multiplatform_test = { path = "../multiplatform_test", version = "^0.2.0", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -tokio = { version = "1.16", features = [ "full" ] } -tokio-util = { version = "0.7.4", features = [ "net", "codec" ] } +tokio = { version = "1.29.0", features = [ "full" ] } +tokio-util = { version = "0.7.5", features = [ "net", "codec" ] } [target.'cfg(target_arch = "wasm32")'.dependencies] -tokio = { version = "1.16", features = [ "rt" , "sync", "macros", "io-util", "time" ] } -tokio-util = { version = "0.7.4", features = [ "codec" ] } +tokio = { version = "1.29.0", features = [ "rt" , "sync", "macros", "io-util", "time" ] } +tokio-util = { version = "0.7.5", features = [ "codec" ] } # We depend on getrandom transitively through rand. To compile getrandom to # WASM, we need to enable its "js" feature. However, rand does not expose a # passthrough to enable "js" on getrandom. As a workaround, we enable the @@ -68,25 +79,20 @@ getrandom = { version = "0.2.6", features = [ "js" ] } [dev-dependencies] chrono = { version = "0.4.20", features = [ "serde", "clock" ], default-features = false } -clap = { version = "4.1.8", features = [ "derive" ] } +clap = { version = "4.5.4", features = [ "derive" ] } colored = "2.0" -core_affinity = "0.5.10" -ctor = "0.2" -futures = { version = "0.3" } -hdrhistogram = "7" -insta = "1.7.1" -multiplatform_test = { path = "../multiplatform_test", version = "0.0.0" } -wasm-bindgen-test = "0.3.34" -rand = {version = "0.8.4", features = ["small_rng"]} +insta = "1.39" +multiplatform_test = { path = "../multiplatform_test", version = "^0.2.0" } +wasm-bindgen-test = "0.3.33" +rand = { version = "0.8.0", features = [ "small_rng" ] } rand_distr = "0.4.3" -regex = "1" -static_assertions = "1.1.0" -textnonce = "1.0.0" +regex = "1.10.4" +static_assertions = "1.0.0" time = "0.3" tracing-subscriber = { version = "0.3", features = [ "env-filter" ] } -trybuild = "1.0.80" -zipf = "7.0.0" +trybuild = "1" +zipf = "7" [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] # Rayon (rust data-parallelism library) does not compile on WASM. -criterion = { version = "0.5", features = [ "async_tokio", "html_reports" ] } +criterion = { version = "0.5.0", features = [ "async_tokio", "html_reports" ] } diff --git a/hydroflow/examples/chat/README.md b/hydroflow/examples/chat/README.md index a4021ceef608..24145a23cc57 100644 --- a/hydroflow/examples/chat/README.md +++ b/hydroflow/examples/chat/README.md @@ -1,22 +1,95 @@ -Simple chat example, with a single central server broadcasting to clients. +## Chat Examples +There are two flavors of the chat example. The first is a broadcast-based example in which multiple clients connect to a +single server. Whenever a client wishes to send a message, it sends it to the server and the server broadcasts it to +the other clients. The second example is a gossip-based, multi-server example. The example is compatible with the same +client used in the broadcast-based example. Clients can connect to any one of the running servers. As before, when a +client wishes to send a message, it sends it to the server. The server gossips the message to other servers using a +gossip algorithm. Whenever a server learns of a new message, it is broadcast to all the clients connected to it. + +### Broadcast Example To run the example, open 3 terminals. +#### Running the Server In one terminal run the server like so: +```shell +cargo run -p hydroflow --example chat -- --name "_" --role server +``` + +#### Running the Clients +In another terminal run the first client: +```shell +cargo run -p hydroflow --example chat -- --name "alice" --role client +``` + +In the third terminal run the second client: +```shell +cargo run -p hydroflow --example chat -- --name "bob" --role client +``` + +If you type in the client terminals the messages should appear everywhere. + +### Gossip Example +#### Running the Servers +The gossip-based servers rely on static membership for discovery. The servers run a gossip protocol (parallel to the +client-server protocol). The roles `gossiping-server1`, ..., `gossiping-server5` determine which pre-configured port +will be used by the server to send/receive gossip protocol messages. + +In (up to) five separate tabs, run the following servers. + +##### First +```shell +cargo run -p hydroflow --example chat -- --name "_" --address 127.0.0.1:12345 --role gossiping-server1 +``` +##### Second +```shell +cargo run -p hydroflow --example chat -- --name "_" --address 127.0.0.1:12346 --role gossiping-server2 ``` -cargo run -p hydroflow --example chat -- --name "_" --role server --addr 127.0.0.1:12347 +##### Third +```shell +cargo run -p hydroflow --example chat -- --name "_" --address 127.0.0.1:12347 --role gossiping-server3 ``` +##### Fourth +```shell +cargo run -p hydroflow --example chat -- --name "_" --address 127.0.0.1:12348 --role gossiping-server4 +``` + +##### Fifth +```shell +cargo run -p hydroflow --example chat -- --name "_" --address 127.0.0.1:12349 --role gossiping-server5 +``` +#### Running the Clients In another terminal run the first client: +```shell +cargo run -p hydroflow --example chat -- --name "alice" --address 127.0.0.1:12345 --role client ``` -cargo run -p hydroflow --example chat -- --name "alice" --role client --server-addr 127.0.0.1:12347 + +In another terminal run the second client: +```shell +cargo run -p hydroflow --example chat -- --name "bob" --address 127.0.0.1:12349 --role client ``` -In the third terminal run the second client: +If you type in the client terminals the messages should appear everywhere. Give it a few seconds though - unlike the +broadcast example, the message delivery isn't instantaneous. The gossip protocol runs in cycles and it could take a few +cycles for the message to be delivered everywhere. + +### Dump Graphs of the Flows +#### Client +```shell +cargo run -p hydroflow --example chat -- --name "alice" --role client --graph mermaid ``` -cargo run -p hydroflow --example chat -- --name "bob" --role client --server-addr 127.0.0.1:12347 +#### Broadcast Server +```shell +cargo run -p hydroflow --example chat -- --name "_" --role server --graph mermaid ``` -If you type in the client terminals the messages should appear everywhere. +#### Gossip Server +```shell +cargo run -p hydroflow --example chat -- --name "_" --role gossiping-server1 --graph mermaid +``` -Adding the `--graph ` flag to the end of the command lines above will print out a node-and-edge diagram of the program. Supported values for `` include [mermaid](https://mermaid-js.github.io/) and [dot](https://graphviz.org/doc/info/lang.html). +### Display Help +```shell +cargo run -p hydroflow --example chat -- --help +``` \ No newline at end of file diff --git a/hydroflow/examples/chat/client.rs b/hydroflow/examples/chat/client.rs index a9e6592d4641..c926054459b4 100644 --- a/hydroflow/examples/chat/client.rs +++ b/hydroflow/examples/chat/client.rs @@ -1,10 +1,10 @@ use chrono::prelude::*; use colored::Colorize; use hydroflow::hydroflow_syntax; -use hydroflow::util::{UdpSink, UdpStream}; +use hydroflow::util::{bind_udp_bytes, ipv4_resolve}; use crate::protocol::Message; -use crate::Opts; +use crate::{default_server_address, Opts}; fn pretty_print_msg(nickname: String, message: String, ts: DateTime) { println!( @@ -19,10 +19,20 @@ fn pretty_print_msg(nickname: String, message: String, ts: DateTime) { ); } -pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts) { - // server_addr is required for client - let server_addr = opts.server_addr.expect("Client requires a server address"); - println!("Client live!"); +pub(crate) async fn run_client(opts: Opts) { + // Client listens on a port picked by the OS. + let client_addr = ipv4_resolve("localhost:0").unwrap(); + + // Use the server address that was provided in the command-line arguments, or use the default + // if one was not provided. + let server_addr = opts.address.unwrap_or_else(default_server_address); + + let (outbound, inbound, allocated_client_addr) = bind_udp_bytes(client_addr).await; + + println!( + "Client is live! Listening on {:?} and talking to server on {:?}", + allocated_client_addr, server_addr + ); let mut hf = hydroflow_syntax! { // set up channels @@ -44,7 +54,7 @@ pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts message: l.unwrap(), ts: Utc::now()}) -> [input]msg_send; - inbound_chan[ConnectResponse] -> persist() -> [signal]msg_send; + inbound_chan[ConnectResponse] -> persist::<'static>() -> [signal]msg_send; msg_send = defer_signal() -> map(|msg| (msg, server_addr)) -> [1]outbound_chan; // receive and print messages @@ -52,6 +62,7 @@ pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts }; // optionally print the dataflow graph + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/chat/main.rs b/hydroflow/examples/chat/main.rs index 3e8fd1c4b403..afc61ce57360 100644 --- a/hydroflow/examples/chat/main.rs +++ b/hydroflow/examples/chat/main.rs @@ -2,18 +2,32 @@ use std::net::SocketAddr; use clap::{Parser, ValueEnum}; use client::run_client; -use hydroflow::util::{bind_udp_bytes, ipv4_resolve}; +use hydroflow::util::ipv4_resolve; use hydroflow_lang::graph::{WriteConfig, WriteGraphType}; use server::run_server; +use crate::randomized_gossiping_server::run_gossiping_server; + mod client; mod protocol; +mod randomized_gossiping_server; mod server; -#[derive(Clone, ValueEnum, Debug)] +#[derive(Clone, Copy, ValueEnum, Debug, Eq, PartialEq)] enum Role { Client, Server, + + // These roles are only used by the randomized-gossip variant of the chat example. + GossipingServer1, + GossipingServer2, + GossipingServer3, + GossipingServer4, + GossipingServer5, +} + +pub fn default_server_address() -> SocketAddr { + ipv4_resolve("localhost:54321").unwrap() } #[derive(Parser, Debug)] @@ -23,9 +37,7 @@ struct Opts { #[clap(value_enum, long)] role: Role, #[clap(long, value_parser = ipv4_resolve)] - addr: Option, - #[clap(long, value_parser = ipv4_resolve)] - server_addr: Option, + address: Option, #[clap(long)] graph: Option, #[clap(flatten)] @@ -35,22 +47,19 @@ struct Opts { #[hydroflow::main] async fn main() { let opts = Opts::parse(); - // if no addr was provided, we ask the OS to assign a local port by passing in "localhost:0" - let addr = opts - .addr - .unwrap_or_else(|| ipv4_resolve("localhost:0").unwrap()); - - // allocate `outbound` sink and `inbound` stream - let (outbound, inbound, addr) = bind_udp_bytes(addr).await; - println!("Listening on {:?}", addr); match opts.role { Role::Client => { - run_client(outbound, inbound, opts).await; + run_client(opts).await; } Role::Server => { - run_server(outbound, inbound, opts).await; + run_server(opts).await; } + Role::GossipingServer1 + | Role::GossipingServer2 + | Role::GossipingServer3 + | Role::GossipingServer4 + | Role::GossipingServer5 => run_gossiping_server(opts).await, } } @@ -60,8 +69,10 @@ fn test() { use hydroflow::util::{run_cargo_example, wait_for_process_output}; - let (_server, _, mut server_output) = - run_cargo_example("chat", "--role server --name server --addr 127.0.0.1:11247"); + let (_server, _, mut server_output) = run_cargo_example( + "chat", + "--role server --name server --address 127.0.0.1:11247", + ); let mut server_output_so_far = String::new(); wait_for_process_output( @@ -72,12 +83,12 @@ fn test() { let (_client1, mut client1_input, mut client1_output) = run_cargo_example( "chat", - "--role client --name client1 --server-addr 127.0.0.1:11247", + "--role client --name client1 --address 127.0.0.1:11247", ); let (_client2, _, mut client2_output) = run_cargo_example( "chat", - "--role client --name client2 --server-addr 127.0.0.1:11247", + "--role client --name client2 --address 127.0.0.1:11247", ); let mut client1_output_so_far = String::new(); @@ -86,12 +97,12 @@ fn test() { wait_for_process_output( &mut client1_output_so_far, &mut client1_output, - "Client live!", + "Client is live!.", ); wait_for_process_output( &mut client2_output_so_far, &mut client2_output, - "Client live!", + "Client is live!", ); // wait 100ms so we don't drop a packet @@ -106,3 +117,74 @@ fn test() { ".*, .* client1: Hello", ); } + +#[test] +fn test_gossip() { + use std::io::Write; + + use hydroflow::util::{run_cargo_example, wait_for_process_output}; + + let (_server1, _, mut server1_output) = run_cargo_example( + "chat", + "--role gossiping-server1 --name server --address 127.0.0.1:11248", + ); + + let mut server1_output_so_far = String::new(); + wait_for_process_output( + &mut server1_output_so_far, + &mut server1_output, + "Server is live!", + ); + + let (_server2, _, mut server2_output) = run_cargo_example( + "chat", + "--role gossiping-server2 --name server --address 127.0.0.1:11249", + ); + + let mut server2_output_so_far = String::new(); + wait_for_process_output( + &mut server2_output_so_far, + &mut server2_output, + "Server is live!", + ); + + let (_client1, mut client1_input, mut client1_output) = run_cargo_example( + "chat", + "--role client --name client1 --address 127.0.0.1:11248", + ); + + let (_client2, _, mut client2_output) = run_cargo_example( + "chat", + "--role client --name client2 --address 127.0.0.1:11249", + ); + + let mut client1_output_so_far = String::new(); + let mut client2_output_so_far = String::new(); + + wait_for_process_output( + &mut client1_output_so_far, + &mut client1_output, + "Client is live!.", + ); + wait_for_process_output( + &mut client2_output_so_far, + &mut client2_output, + "Client is live!", + ); + + // wait 100ms so we don't drop a packet + let hundo_millis = std::time::Duration::from_millis(100); + std::thread::sleep(hundo_millis); + + // Since gossiping has a small probability of a message not being received (maybe more so with + // 2 servers), we define success as any one of these messages reaching. + for _ in 1..=50 { + client1_input.write_all(b"Hello\n").unwrap(); + } + + wait_for_process_output( + &mut client2_output_so_far, + &mut client2_output, + ".*, .* client1: Hello", + ); +} diff --git a/hydroflow/examples/chat/randomized_gossiping_server.rs b/hydroflow/examples/chat/randomized_gossiping_server.rs new file mode 100644 index 000000000000..8b7d21736b74 --- /dev/null +++ b/hydroflow/examples/chat/randomized_gossiping_server.rs @@ -0,0 +1,218 @@ +use std::collections::HashSet; +use std::net::SocketAddr; +use std::time::Duration; + +use chrono::{DateTime, Utc}; +use hydroflow::scheduled::graph::Hydroflow; +use hydroflow::util::{bind_udp_bytes, ipv4_resolve}; +use hydroflow_macro::hydroflow_syntax; +use rand::seq::SliceRandom; +use rand::thread_rng; +use serde::{Deserialize, Serialize}; + +use crate::protocol::{Message, MessageWithAddr}; +use crate::Role::{ + Client, GossipingServer1, GossipingServer2, GossipingServer3, GossipingServer4, + GossipingServer5, Server, +}; +use crate::{default_server_address, Opts, Role}; + +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Hash)] +pub struct ChatMessage { + nickname: String, + message: String, + ts: DateTime, +} + +/// Used to model add and remove operations on a set of infecting messages. +enum InfectionOperation { + /// Add an infecting message to the current set + InfectWithMessage { msg: ChatMessage }, + + /// Remove an infecting message from the current set + RemoveForMessage { msg: ChatMessage }, +} + +pub const REMOVAL_PROBABILITY: f32 = 1.0 / 4.0; + +/// Runs an instance of a server that gossips new chat messages with other instances of the server. +/// +/// The servers are protocol-compatible with the broadcast-based server run by +/// [crate::server::run_server], so can be used with the existing client +/// ([crate::client::run_client]). +/// +/// The implementation is based on "Epidemic algorithms for replicated database maintenance" +/// (https://dl.acm.org/doi/epdf/10.1145/41840.41841). Specifically, it implements push-based +/// "rumor-mongering" with a blind-coin removal process described below. +/// +/// At every "cycle" a server chooses, randomly, one peer from a group of five servers. It then +/// "pushes" all the "rumors" (messages) that it has heard so far to that peer. This is how new +/// messages propagate through the system. Without a removal process, the messages would bounce +/// around forever. +/// +/// A "blind-coin" removal process is used. After a server gossips the known rumors with randomly +/// selected peers, each message is dropped with a 1/K probability. K can be configured by changing +/// [REMOVAL_PROBABILITY]. The removal is "blind" because the server doesn't check if the receiving +/// peer already knew the message, i.e. it doesn't rely on a feedback mechanism from the peer to +/// drive the process. The removal is "coin" based because it relies on pure chance (instead of +/// keeping track using a counter). +/// +/// To keep things simple, the peer-group of servers is based on static membership - it contains +/// 5 members that communicate with each other on fixed ports. +pub(crate) async fn run_gossiping_server(opts: Opts) { + // If a server address & port are provided as command-line inputs, use those, else use the + // default. + let server_address = opts.address.unwrap_or_else(default_server_address); + + let all_members = [ + GossipingServer1, + GossipingServer2, + GossipingServer3, + GossipingServer4, + GossipingServer5, + ]; + + let other_members: Vec = all_members + .into_iter() + .filter(|role| *role != opts.role) + .collect(); + + let gossip_listening_addr = gossip_address(&opts.role); + + println!("Starting server on {:?}", server_address); + + // Separate sinks and streams for client-server protocol & gossip protocol. + let (client_outbound, client_inbound, actual_server_addr) = + bind_udp_bytes(server_address).await; + let (gossip_outbound, gossip_inbound, _) = bind_udp_bytes(gossip_listening_addr).await; + + println!( + "Server is live! Listening on {:?}. Gossiping On: {:?}", + actual_server_addr, gossip_listening_addr + ); + let mut hf: Hydroflow = hydroflow_syntax! { + // Define shared inbound and outbound channels + client_out = union() -> dest_sink_serde(client_outbound); + client_in = source_stream_serde(client_inbound) + -> map(Result::unwrap) + -> map(|(msg, addr)| MessageWithAddr::from_message(msg, addr)) + -> demux_enum::(); + clients = client_in[ConnectRequest] -> map(|(addr,)| addr) -> tee(); + client_in[ConnectResponse] -> for_each(|(addr,)| println!("Received unexpected `ConnectResponse` as server from addr {}.", addr)); + + // Pipeline 1: Acknowledge client connections + clients[0] -> map(|addr| (Message::ConnectResponse, addr)) -> [0]client_out; + + // Pipeline 2: When a message arrives from a client, it is the first time the message is + // seen. Still, send it to the "maybe_new_messages" flow for simplicity. + messages_from_connected_client = client_in[ChatMsg] + -> map(|(_addr, nickname, message, ts)| ChatMessage { nickname, message, ts }) + -> maybe_new_messages; + + // Pipeline 3: When you want to send a message to all the connected clients, send it to + // "broadcast" + clients[1] -> [1]broadcast; + broadcast = cross_join::<'tick, 'static>() -> [1]client_out; + + // Pipeline 3: Gossip-based broadcast to other servers. + gossip_out = dest_sink_serde(gossip_outbound); + gossip_in = source_stream_serde(gossip_inbound) + -> map(Result::unwrap) + -> map(|(message, _)| message) + -> maybe_new_messages; + + // If you think there may be a new message, send it here. + maybe_new_messages = union(); + + // actually_new_messages are a stream of messages that the server is definitely seeing + // for the first time. + actually_new_messages = difference() -> tee(); + maybe_new_messages -> [pos]actually_new_messages; + all_messages -> [neg]actually_new_messages; + + // When we have a new message, we should do 3 things + // 1. Add it to the set of known messages. + // 2. Broadcast it to the clients connected locally. + // 3. Add it to the set of messages currently infecting this server. + actually_new_messages -> defer_tick() -> all_messages; // Add to known messages + actually_new_messages + -> map(|chat_msg: ChatMessage| Message::ChatMsg { + nickname: chat_msg.nickname, + message: chat_msg.message, + ts: chat_msg.ts}) + -> [0]broadcast; // Broadcast to locally connected clients. + actually_new_messages + -> map(|msg: ChatMessage| InfectionOperation::InfectWithMessage { msg }) + -> infecting_messages; + + // Holds all the known messages. + all_messages = fold::<'static>(HashSet::::new, |accum, message| { + accum.insert(message); + }) -> flatten(); + + // Holds a set of messages that are currently infecting this server + infecting_messages = union() -> fold::<'static>(HashSet::::new, |accum, op| { + match op { + InfectionOperation::InfectWithMessage{ msg } => {accum.insert(msg);}, + InfectionOperation::RemoveForMessage{ msg } => { accum.remove(&msg);} + } + }); + + // Infection process. + // Every 1 second, the infecting messages are dispatched to a randomly selected peer. They + // are blindly removed with a 1/K probability after this. + source_interval(Duration::from_secs(1)) -> [0]triggered_messages; // The time trigger to perform a round of gossip + triggered_messages = cross_join() + -> map(|(_, message)| { + // Choose a random peer + let random_peer = other_members.choose(&mut thread_rng()).unwrap(); + (message, gossip_address(random_peer)) + }) + -> tee(); + + infecting_messages -> flatten() -> [1]triggered_messages; + + triggered_messages + -> inspect(|(msg, addr)| println!("Gossiping {:?} to {:?}", msg, addr)) + -> gossip_out; + + triggered_messages + -> filter_map(|(msg, _addr)| { + if rand::random::() < REMOVAL_PROBABILITY{ + println!("Dropping Message {:?}", msg); + Some(InfectionOperation::RemoveForMessage{ msg }) + } else { + None + } + }) + -> defer_tick() + -> infecting_messages; + + }; + + #[cfg(feature = "debugging")] + if let Some(graph) = opts.graph { + let serde_graph = hf + .meta_graph() + .expect("No graph found, maybe failed to parse."); + serde_graph.open_graph(graph, opts.write_config).unwrap(); + } + + hf.run_async().await.unwrap(); +} + +/// The address on which the gossip protocol runs. Servers communicate with each other using these +/// addresses. This is different from the ports on which clients connect to servers. +fn gossip_address(role: &Role) -> SocketAddr { + match role { + Client | Server => { + panic!("Incorrect role {:?} for gossip server.", role) + } + GossipingServer1 => ipv4_resolve("localhost:54322"), + GossipingServer2 => ipv4_resolve("localhost:54323"), + GossipingServer3 => ipv4_resolve("localhost:54324"), + GossipingServer4 => ipv4_resolve("localhost:54325"), + GossipingServer5 => ipv4_resolve("localhost:54326"), + } + .unwrap() +} diff --git a/hydroflow/examples/chat/server.rs b/hydroflow/examples/chat/server.rs index 19ec87c0cbe1..f4cc07fea06f 100644 --- a/hydroflow/examples/chat/server.rs +++ b/hydroflow/examples/chat/server.rs @@ -1,13 +1,23 @@ use hydroflow::hydroflow_syntax; use hydroflow::scheduled::graph::Hydroflow; -use hydroflow::util::{UdpSink, UdpStream}; +use hydroflow::util::bind_udp_bytes; use crate::protocol::{Message, MessageWithAddr}; -use crate::Opts; +use crate::{default_server_address, Opts}; -pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts) { +pub(crate) async fn run_server(opts: Opts) { println!("Server live!"); + // If a server address & port are provided as command-line inputs, use those, else use the + // default. + let server_address = opts.address.unwrap_or_else(default_server_address); + + println!("Starting server on {:?}", server_address); + + let (outbound, inbound, actual_server_addr) = bind_udp_bytes(server_address).await; + + println!("Server is live! Listening on {:?}", actual_server_addr); + let mut hf: Hydroflow = hydroflow_syntax! { // Define shared inbound and outbound channels outbound_chan = union() -> dest_sink_serde(outbound); @@ -27,6 +37,7 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts broadcast = cross_join::<'tick, 'static>() -> [1]outbound_chan; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/deadlock_detector/peer.rs b/hydroflow/examples/deadlock_detector/peer.rs index 6de3bbd2fb3e..1402ebce45de 100644 --- a/hydroflow/examples/deadlock_detector/peer.rs +++ b/hydroflow/examples/deadlock_detector/peer.rs @@ -47,8 +47,8 @@ pub(crate) async fn run_detector(opts: Opts, peer_list: Vec) { // setup gossip channel to all peers. gen_bool chooses True with the odds passed in. gossip_join = cross_join::<'tick>() -> filter(|_| gen_bool(0.8)) -> outbound_chan; - gossip = map(identity) -> persist() -> [0]gossip_join; - peers[1] -> persist() -> [1]gossip_join; + gossip = map(identity) -> persist::<'static>() -> [0]gossip_join; + peers[1] -> persist::<'static>() -> [1]gossip_join; peers[2] -> for_each(|s| println!("Peer: {:?}", s)); // prompt for input @@ -102,6 +102,7 @@ pub(crate) async fn run_detector(opts: Opts, peer_list: Vec) { }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/example_surface_flows_1_basic.rs b/hydroflow/examples/example_surface_flows_1_basic.rs new file mode 100644 index 000000000000..af13b5816f2b --- /dev/null +++ b/hydroflow/examples/example_surface_flows_1_basic.rs @@ -0,0 +1,9 @@ +use hydroflow::hydroflow_syntax; + +pub fn main() { + let mut flow = hydroflow_syntax! { + source_iter(vec!["Hello", "world"]) + -> map(|x| x.to_uppercase()) -> for_each(|x| println!("{}", x)); + }; + flow.run_available(); +} diff --git a/hydroflow/examples/example_surface_flows_2_varname.rs b/hydroflow/examples/example_surface_flows_2_varname.rs new file mode 100644 index 000000000000..c8871d4e853f --- /dev/null +++ b/hydroflow/examples/example_surface_flows_2_varname.rs @@ -0,0 +1,9 @@ +use hydroflow::hydroflow_syntax; + +pub fn main() { + let mut flow = hydroflow_syntax! { + source_iter(vec!["Hello", "world"]) -> upper_print; + upper_print = map(|x| x.to_uppercase()) -> for_each(|x| println!("{}", x)); + }; + flow.run_available(); +} diff --git a/hydroflow/examples/example_surface_flows_3_ports.rs b/hydroflow/examples/example_surface_flows_3_ports.rs new file mode 100644 index 000000000000..3cdd887a22e2 --- /dev/null +++ b/hydroflow/examples/example_surface_flows_3_ports.rs @@ -0,0 +1,17 @@ +use hydroflow::hydroflow_syntax; + +pub fn main() { + let mut flow = hydroflow_syntax! { + my_tee = source_iter(vec!["Hello", "world"]) -> tee(); + my_tee -> map(|x| x.to_uppercase()) -> [low_road]my_union; + my_tee -> map(|x| x.to_lowercase()) -> [high_road]my_union; + my_union = union() -> for_each(|x| println!("{}", x)); + }; + println!( + "{}", + flow.meta_graph() + .expect("No graph found, maybe failed to parse.") + .to_mermaid(&Default::default()) + ); + flow.run_available(); +} diff --git a/hydroflow/examples/example_surface_flows_4_context.rs b/hydroflow/examples/example_surface_flows_4_context.rs new file mode 100644 index 000000000000..6650a3564d0b --- /dev/null +++ b/hydroflow/examples/example_surface_flows_4_context.rs @@ -0,0 +1,9 @@ +use hydroflow::hydroflow_syntax; + +pub fn main() { + let mut flow = hydroflow_syntax! { + source_iter([()]) + -> for_each(|()| println!("Current tick: {}, stratum: {}", context.current_tick(), context.current_stratum())); + }; + flow.run_available(); +} diff --git a/hydroflow/examples/kvs/client.rs b/hydroflow/examples/kvs/client.rs index e5126f5e06c3..c090c753b3df 100644 --- a/hydroflow/examples/kvs/client.rs +++ b/hydroflow/examples/kvs/client.rs @@ -30,12 +30,14 @@ pub(crate) async fn run_client( inbound_chan -> for_each(|(response, _addr): (KvsResponse, _)| println!("Got a Response: {:?}", response)); }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; hf.run_async().await.unwrap(); } diff --git a/hydroflow/examples/kvs/server.rs b/hydroflow/examples/kvs/server.rs index 6121aa138b39..845e85b36265 100644 --- a/hydroflow/examples/kvs/server.rs +++ b/hydroflow/examples/kvs/server.rs @@ -12,7 +12,6 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts // Setup network channels. network_send = dest_sink_serde(outbound); network_recv = source_stream_serde(inbound) - -> _upcast(Some(Delta)) -> map(Result::unwrap) -> inspect(|(msg, addr)| println!("Message received {:?} from {:?}", msg, addr)) -> map(|(msg, addr)| KvsMessageWithAddr::from_message(msg, addr)) @@ -33,12 +32,14 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts -> network_send; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; hf.run_async().await.unwrap(); } diff --git a/hydroflow/examples/kvs_bench/README.md b/hydroflow/examples/kvs_bench/README.md index a1d702ccdca8..946f1b404169 100644 --- a/hydroflow/examples/kvs_bench/README.md +++ b/hydroflow/examples/kvs_bench/README.md @@ -1,5 +1,11 @@ # `kvs_bench` +For run information: ``` -cargo run -p hydroflow --example kvs_bench -- bench +cargo run -p hydroflow --example kvs_bench -- help +``` + +Example: +``` +cargo run -p hydroflow --example kvs_bench -- bench --threads 8 ``` diff --git a/hydroflow/examples/kvs_bench/main.rs b/hydroflow/examples/kvs_bench/main.rs index 1f63696e979f..635fdfb86c08 100644 --- a/hydroflow/examples/kvs_bench/main.rs +++ b/hydroflow/examples/kvs_bench/main.rs @@ -3,6 +3,7 @@ mod protocol; mod server; use std::collections::HashMap; +use std::num::ParseFloatError; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -26,18 +27,23 @@ struct Cli { #[derive(Debug, Subcommand)] enum Commands { Bench { + /// Number of threads to run on . #[clap(long, default_value_t = 1)] threads: usize, + /// `s` value for the zipf sampling distribution for keys. #[clap(long, default_value_t = 4.0)] dist: f64, - #[clap(long, default_value_t = 2)] - warmup: u64, + /// How long to warm up for, in seconds. + #[clap(long, default_value = "2", value_parser = clap_duration_from_secs)] + warmup: Duration, - #[clap(long, default_value_t = 10)] - duration: u64, + /// How long to run for, in seconds. + #[clap(long, default_value = "10", value_parser = clap_duration_from_secs)] + duration: Duration, + /// Write the puts/s every second while running. #[clap(long, default_value_t = false)] report: bool, @@ -48,6 +54,11 @@ enum Commands { }, } +/// Parse duration from float string for clap args. +fn clap_duration_from_secs(arg: &str) -> Result { + arg.parse().map(Duration::from_secs_f32) +} + pub struct Topology where RX: Stream, @@ -137,14 +148,14 @@ fn main() { let mut total_writes_so_far = 0; - std::thread::sleep(Duration::from_secs(warmup)); + std::thread::sleep(warmup); get_reset_throughputs(); let start_time = Instant::now(); let mut time_last_interval = start_time; loop { - if start_time.elapsed().as_secs_f64() >= duration as f64 { + if start_time.elapsed() >= duration { break; } diff --git a/hydroflow/examples/kvs_bench/protocol/mod.rs b/hydroflow/examples/kvs_bench/protocol/mod.rs index 950735c34a45..6bbd083fb271 100644 --- a/hydroflow/examples/kvs_bench/protocol/mod.rs +++ b/hydroflow/examples/kvs_bench/protocol/mod.rs @@ -38,6 +38,7 @@ pub enum KvsResponse { _PutResponse { key: u64, }, + #[allow(dead_code)] GetResponse { key: u64, reg: MyLastWriteWins, diff --git a/hydroflow/examples/kvs_bench/protocol/serialization/lattices/with_bot.rs b/hydroflow/examples/kvs_bench/protocol/serialization/lattices/with_bot.rs index d3ce22087fa1..f5204743b698 100644 --- a/hydroflow/examples/kvs_bench/protocol/serialization/lattices/with_bot.rs +++ b/hydroflow/examples/kvs_bench/protocol/serialization/lattices/with_bot.rs @@ -19,7 +19,7 @@ impl<'a, const SIZE: usize> Serialize for WithBotWrapper<'a, SIZE> { where S: Serializer, { - if let Some(inner) = &self.0 .0 { + if let Some(inner) = self.0.as_reveal_ref() { serializer.serialize_some(&PointWrapper(inner)) } else { serializer.serialize_none() diff --git a/hydroflow/examples/kvs_bench/protocol/serialization/mod.rs b/hydroflow/examples/kvs_bench/protocol/serialization/mod.rs index d352bfdbaf0e..db7686f995a2 100644 --- a/hydroflow/examples/kvs_bench/protocol/serialization/mod.rs +++ b/hydroflow/examples/kvs_bench/protocol/serialization/mod.rs @@ -115,8 +115,8 @@ enum KvsRequestField { Gossip, Delete, } -struct KVSRequestFieldVisitor; -impl<'de> Visitor<'de> for KVSRequestFieldVisitor { +struct KvsRequestFieldVisitor; +impl<'de> Visitor<'de> for KvsRequestFieldVisitor { type Value = KvsRequestField; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -154,6 +154,6 @@ impl<'de> Deserialize<'de> for KvsRequestField { where D: Deserializer<'de>, { - Deserializer::deserialize_identifier(deserializer, KVSRequestFieldVisitor) + Deserializer::deserialize_identifier(deserializer, KvsRequestFieldVisitor) } } diff --git a/hydroflow/examples/kvs_bench/server.rs b/hydroflow/examples/kvs_bench/server.rs index 62a2d1fcaad9..30687f20692c 100644 --- a/hydroflow/examples/kvs_bench/server.rs +++ b/hydroflow/examples/kvs_bench/server.rs @@ -8,6 +8,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use futures::Stream; use hydroflow::compiled::pull::HalfMultisetJoinState; use hydroflow::hydroflow_syntax; +use hydroflow::scheduled::ticks::TickInstant; use hydroflow_lang::graph::{WriteConfig, WriteGraphType}; use lattices::map_union::{MapUnionHashMap, MapUnionSingletonMap}; use lattices::set_union::SetUnionSingletonSet; @@ -122,15 +123,15 @@ pub fn run_server( let mut pre_gen_index = 0; let pre_gen_random_numbers: Vec = (0..(128*1024)).map(|_| rng.sample(dist) as u64).collect(); - let create_unique_id = move |server_id: u128, tick: usize, e: u128| -> u128 { - assert!(tick < 1_000_000_000); + let create_unique_id = move |server_id: u128, tick: TickInstant, e: u128| -> u128 { + assert!(tick < TickInstant(1_000_000_000)); assert!(e < 1_000_000_000); (relatively_recent_timestamp.load(Ordering::Relaxed) as u128) .checked_mul(100).unwrap() .checked_add(server_id).unwrap() .checked_mul(1_000_000_000).unwrap() - .checked_add(tick as u128).unwrap() + .checked_add(tick.0 as u128).unwrap() .checked_mul(1_000_000_000).unwrap() .checked_add(e).unwrap() }; diff --git a/hydroflow/examples/kvs_mut/client.rs b/hydroflow/examples/kvs_mut/client.rs index e5126f5e06c3..c090c753b3df 100644 --- a/hydroflow/examples/kvs_mut/client.rs +++ b/hydroflow/examples/kvs_mut/client.rs @@ -30,12 +30,14 @@ pub(crate) async fn run_client( inbound_chan -> for_each(|(response, _addr): (KvsResponse, _)| println!("Got a Response: {:?}", response)); }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; hf.run_async().await.unwrap(); } diff --git a/hydroflow/examples/kvs_mut/server.rs b/hydroflow/examples/kvs_mut/server.rs index 254315ab2e4a..61f0d6e1181d 100644 --- a/hydroflow/examples/kvs_mut/server.rs +++ b/hydroflow/examples/kvs_mut/server.rs @@ -12,7 +12,6 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts // Setup network channels. network_send = dest_sink_serde(outbound); network_recv = source_stream_serde(inbound) - -> _upcast(Some(Delta)) -> map(Result::unwrap) -> inspect(|(msg, addr)| println!("Message received {:?} from {:?}", msg, addr)) -> map(|(msg, addr)| KvsMessageWithAddr::from_message(msg, addr)) @@ -35,7 +34,7 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts ], } }) - -> persist_mut_keyed() + -> persist_mut_keyed::<'static>() -> [0]lookup; gets -> [1]lookup; // Join PUTs and GETs by key, persisting the PUTs. @@ -48,12 +47,14 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts -> network_send; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; hf.run_async().await.unwrap(); } diff --git a/hydroflow/examples/kvs_replicated/client.rs b/hydroflow/examples/kvs_replicated/client.rs index c14c01a45157..eb4f2c882d5e 100644 --- a/hydroflow/examples/kvs_replicated/client.rs +++ b/hydroflow/examples/kvs_replicated/client.rs @@ -24,6 +24,7 @@ pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts inbound_chan -> for_each(|(response, _addr): (KvsMessage, _)| println!("Got a Response: {:?}", response)); }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/kvs_replicated/server.rs b/hydroflow/examples/kvs_replicated/server.rs index 49cb004844d3..82a0d98257dd 100644 --- a/hydroflow/examples/kvs_replicated/server.rs +++ b/hydroflow/examples/kvs_replicated/server.rs @@ -14,7 +14,6 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts // Setup network channels. network_send = union() -> dest_sink_serde(outbound); network_recv = source_stream_serde(inbound) - -> _upcast(Some(Delta)) -> map(Result::unwrap) -> inspect(|(msg, addr)| println!("Message received {:?} from {:?}", msg, addr)) -> map(|(msg, addr)| KvsMessageWithAddr::from_message(msg, addr)) @@ -28,7 +27,7 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts // Join PUTs and GETs by key writes -> map(|(key, value, _addr)| (key, value)) -> writes_store; - writes_store = persist() -> tee(); + writes_store = persist::<'static>() -> tee(); writes_store -> [0]lookup; gets -> [1]lookup; lookup = join(); @@ -40,7 +39,7 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts -> network_send; // Join as a peer if peer_server is set. - source_iter_delta(peer_server) -> map(|peer_addr| (KvsMessage::PeerJoin, peer_addr)) -> network_send; + source_iter(peer_server) -> map(|peer_addr| (KvsMessage::PeerJoin, peer_addr)) -> network_send; // Peers: When a new peer joins, send them all data. writes_store -> [0]peer_join; @@ -51,8 +50,8 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts // Outbound gossip. Send updates to peers. peers -> peer_store; - source_iter_delta(peer_server) -> peer_store; - peer_store = union() -> persist(); + source_iter(peer_server) -> peer_store; + peer_store = union() -> persist::<'static>(); writes -> [0]outbound_gossip; peer_store -> [1]outbound_gossip; outbound_gossip = cross_join() @@ -62,6 +61,7 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts -> network_send; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/lamport_clock/client.rs b/hydroflow/examples/lamport_clock/client.rs index 40caff5c3bad..680ca15b5b12 100644 --- a/hydroflow/examples/lamport_clock/client.rs +++ b/hydroflow/examples/lamport_clock/client.rs @@ -49,6 +49,7 @@ pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts stamped_output[send] -> outbound_chan; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = flow .meta_graph() diff --git a/hydroflow/examples/lamport_clock/server.rs b/hydroflow/examples/lamport_clock/server.rs index b36d4dbff967..76ae73dbb572 100644 --- a/hydroflow/examples/lamport_clock/server.rs +++ b/hydroflow/examples/lamport_clock/server.rs @@ -41,12 +41,14 @@ pub(crate) async fn run_server(outbound: UdpSink, inbound: UdpStream, opts: Opts -> dest_sink_serde(outbound); }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = flow .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; // run the server flow.run_async().await; diff --git a/hydroflow/examples/rga/datalog_agg.rs b/hydroflow/examples/rga/datalog_agg.rs index d516c3983fbe..3455351f7054 100644 --- a/hydroflow/examples/rga/datalog_agg.rs +++ b/hydroflow/examples/rga/datalog_agg.rs @@ -24,7 +24,7 @@ pub(crate) fn rga_datalog_agg( firstLastChild = insertAfter[firstLastChild] -> map(|(c, p)| (p, c)) -> fold_keyed::<'static, Timestamp, (Timestamp, Timestamp)>( - || (Timestamp{node_ts: 0, node_id: 0}, Timestamp{node_ts: std::usize::MAX, node_id: std::usize::MAX}), + || (Timestamp{node_ts: 0, node_id: 0}, Timestamp{node_ts: usize::MAX, node_id: usize::MAX}), |(first, last): &mut (Timestamp, Timestamp), s2: Timestamp| { if s2 > *first { *first = s2 }; if s2 < *last { *last = s2 }; diff --git a/hydroflow/examples/rga/main.rs b/hydroflow/examples/rga/main.rs index af9ca546548c..52f96660d0a0 100644 --- a/hydroflow/examples/rga/main.rs +++ b/hydroflow/examples/rga/main.rs @@ -60,6 +60,7 @@ pub async fn main() { None => rga_adjacency(input_recv, rga_send, list_send), }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/shopping/driver.rs b/hydroflow/examples/shopping/driver.rs index 2ee5056ceabe..f7bfdd7c56de 100644 --- a/hydroflow/examples/shopping/driver.rs +++ b/hydroflow/examples/shopping/driver.rs @@ -230,6 +230,7 @@ pub(crate) async fn run_driver(opts: Opts) { }; // optionally print the dataflow graph + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = hf .meta_graph() diff --git a/hydroflow/examples/shopping/lattices.rs b/hydroflow/examples/shopping/lattices.rs index 13dc3c5364b8..b3c21b661f15 100644 --- a/hydroflow/examples/shopping/lattices.rs +++ b/hydroflow/examples/shopping/lattices.rs @@ -253,9 +253,9 @@ mod test { } } - hydroflow::lattices::test::check_lattice_ord(&test_vec); - hydroflow::lattices::test::check_partial_ord_properties(&test_vec); - hydroflow::lattices::test::check_lattice_properties(&test_vec); + lattices::test::check_lattice_ord(&test_vec); + lattices::test::check_partial_ord_properties(&test_vec); + lattices::test::check_lattice_properties(&test_vec); } #[test] @@ -268,8 +268,8 @@ mod test { } } - hydroflow::lattices::test::check_lattice_ord(&test_vec); - hydroflow::lattices::test::check_partial_ord_properties(&test_vec); - hydroflow::lattices::test::check_lattice_properties(&test_vec); + lattices::test::check_lattice_ord(&test_vec); + lattices::test::check_partial_ord_properties(&test_vec); + lattices::test::check_lattice_properties(&test_vec); } } diff --git a/hydroflow/examples/three_clique/main.rs b/hydroflow/examples/three_clique/main.rs index 48acb316a2c3..7c38d89616f9 100644 --- a/hydroflow/examples/three_clique/main.rs +++ b/hydroflow/examples/three_clique/main.rs @@ -42,12 +42,14 @@ pub fn main() { }) -> for_each(|e| println!("three_clique found: {:?}", e)); }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = df .meta_graph() .expect("No graph found, maybe failed to parse."); serde_graph.open_graph(graph, opts.write_config).unwrap(); } + let _ = opts; df.run_available(); diff --git a/hydroflow/examples/two_pc/coordinator.rs b/hydroflow/examples/two_pc/coordinator.rs index 72bd6ad9b33e..c71b68d0f516 100644 --- a/hydroflow/examples/two_pc/coordinator.rs +++ b/hydroflow/examples/two_pc/coordinator.rs @@ -25,7 +25,7 @@ pub(crate) async fn run_coordinator(outbound: UdpSink, inbound: UdpStream, opts: // 2. coordinator send final decision, subordinates ACK // 3. coordinate sends END, subordinates respond with ENDED // After phase 3 we delete the xid from the phase_map - phase_map = union() -> persist_mut_keyed(); + phase_map = union() -> persist_mut_keyed::<'static>(); // set up channels outbound_chan = tee(); @@ -127,6 +127,7 @@ pub(crate) async fn run_coordinator(outbound: UdpSink, inbound: UdpStream, opts: // Handler for ended acknowledgments not necessary; we just print them }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = df .meta_graph() diff --git a/hydroflow/examples/two_pc/subordinate.rs b/hydroflow/examples/two_pc/subordinate.rs index 60dcc41e3e7c..52a1e703fa1b 100644 --- a/hydroflow/examples/two_pc/subordinate.rs +++ b/hydroflow/examples/two_pc/subordinate.rs @@ -61,6 +61,7 @@ pub(crate) async fn run_subordinate(outbound: UdpSink, inbound: UdpStream, opts: }) -> [2]outbound_chan; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = df .meta_graph() diff --git a/hydroflow/examples/vector_clock/client.rs b/hydroflow/examples/vector_clock/client.rs index 70dd3452f338..7a172a870a74 100644 --- a/hydroflow/examples/vector_clock/client.rs +++ b/hydroflow/examples/vector_clock/client.rs @@ -7,7 +7,7 @@ use lattices::map_union::MapUnionSingletonMap; use lattices::{Max, Merge}; use crate::protocol::{EchoMsg, VecClock}; -use crate::{GraphType, Opts}; +use crate::Opts; pub(crate) async fn run_client( outbound: UdpSink, @@ -53,18 +53,19 @@ pub(crate) async fn run_client( stamped_output[send] -> outbound_chan; }; + #[cfg(feature = "debugging")] if let Some(graph) = opts.graph { let serde_graph = flow .meta_graph() .expect("No graph found, maybe failed to parse."); match graph { - GraphType::Mermaid => { + crate::GraphType::Mermaid => { serde_graph.open_mermaid(&Default::default()).unwrap(); } - GraphType::Dot => { + crate::GraphType::Dot => { serde_graph.open_dot(&Default::default()).unwrap(); } - GraphType::Json => { + crate::GraphType::Json => { unimplemented!(); } } diff --git a/hydroflow/src/declarative_macro.rs b/hydroflow/src/declarative_macro.rs index c5c048f4293b..54d790c9c7bd 100644 --- a/hydroflow/src/declarative_macro.rs +++ b/hydroflow/src/declarative_macro.rs @@ -62,9 +62,8 @@ macro_rules! hydroflow_expect_warnings { let actuals = __hf.diagnostics().expect("Expected `diagnostics()` to be set."); let actuals_len = actuals.len(); let actuals = std::collections::BTreeSet::from_iter(actuals.iter().cloned().map(|mut actual| { - println!("X {}", actual.to_string()); actual.span.line = actual.span.line.saturating_sub(__line); - std::borrow::Cow::Owned(actual.to_string().replace(__file, "$FILE")) + std::borrow::Cow::<'static, str>::Owned(actual.to_string().replace(__file, "$FILE")) })); let expecteds = [ diff --git a/hydroflow/src/lib.rs b/hydroflow/src/lib.rs index 9d1c541769e4..2dc75426a9ef 100644 --- a/hydroflow/src/lib.rs +++ b/hydroflow/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(nightly, feature(never_type))] +#![cfg_attr(feature = "nightly", feature(never_type))] #![allow(type_alias_bounds)] #![allow(clippy::let_and_return)] #![allow(clippy::iter_with_drain)] @@ -28,12 +28,10 @@ pub mod util; #[cfg(feature = "python")] pub use pyo3; -#[cfg(feature = "tracing")] -pub use tracing; pub use variadics::{self, var_args, var_expr, var_type}; pub use { - bincode, bytes, futures, hydroflow_lang as lang, instant, itertools, lattices, pusherator, - rustc_hash, serde, serde_json, tokio, tokio_stream, tokio_util, + bincode, bytes, futures, hydroflow_lang as lang, itertools, lattices, pusherator, rustc_hash, + serde, serde_json, tokio, tokio_stream, tokio_util, tracing, web_time, }; /// `#[macro_use]` automagically brings the declarative macro export to the crate-level. @@ -46,10 +44,11 @@ pub use hydroflow_macro::{ hydroflow_test as test, monotonic_fn, morphism, DemuxEnum, }; -#[cfg(not(nightly))] /// Stand-in for the [nightly "never" type `!`](https://doc.rust-lang.org/std/primitive.never.html) +#[cfg(not(feature = "nightly"))] pub type Never = std::convert::Infallible; -#[cfg(nightly)] +/// The [nightly "never" type `!`](https://doc.rust-lang.org/std/primitive.never.html) +#[cfg(feature = "nightly")] pub type Never = !; #[cfg(doctest)] diff --git a/hydroflow/src/scheduled/context.rs b/hydroflow/src/scheduled/context.rs index 833af16cdd56..029f43736ba4 100644 --- a/hydroflow/src/scheduled/context.rs +++ b/hydroflow/src/scheduled/context.rs @@ -3,15 +3,16 @@ use std::any::Any; use std::future::Future; use std::marker::PhantomData; +use std::ops::DerefMut; use std::pin::Pin; -use instant::Instant; use tokio::sync::mpsc::UnboundedSender; use tokio::task::JoinHandle; +use web_time::SystemTime; -use super::graph::StateData; use super::state::StateHandle; use super::{StateId, SubgraphId}; +use crate::scheduled::ticks::TickInstant; /// The main state of the Hydroflow instance, which is provided as a reference /// to each operator as it is run. @@ -21,36 +22,37 @@ use super::{StateId, SubgraphId}; /// Before the `Context` is provided to a running operator, the `subgraph_id` /// field must be updated. pub struct Context { - pub(crate) states: Vec, + states: Vec, // TODO(mingwei): as long as this is here, it's impossible to know when all work is done. // Second field (bool) is for if the event is an external "important" event (true). pub(crate) event_queue_send: UnboundedSender<(SubgraphId, bool)>, - pub(crate) current_tick: usize, + pub(crate) current_tick: TickInstant, pub(crate) current_stratum: usize, - pub(crate) current_tick_start: Instant, - pub(crate) subgraph_last_tick_run_in: Option, + pub(crate) current_tick_start: SystemTime, + pub(crate) subgraph_last_tick_run_in: Option, /// The SubgraphId of the currently running operator. When this context is /// not being forwarded to a running operator, this field is (mostly) /// meaningless. pub(crate) subgraph_id: SubgraphId, - pub(crate) tasks_to_spawn: Vec + 'static>>>, + tasks_to_spawn: Vec + 'static>>>, /// Join handles for spawned tasks. - pub(crate) task_join_handles: Vec>, + task_join_handles: Vec>, } +/// Public APIs. impl Context { /// Gets the current tick (local time) count. - pub fn current_tick(&self) -> usize { + pub fn current_tick(&self) -> TickInstant { self.current_tick } /// Gets the timestamp of the beginning of the current tick. - pub fn current_tick_start(&self) -> Instant { + pub fn current_tick_start(&self) -> SystemTime { self.current_tick_start } @@ -137,6 +139,7 @@ impl Context { let state_data = StateData { state: Box::new(state), + tick_reset: None, }; self.states.push(state_data); @@ -146,6 +149,22 @@ impl Context { } } + /// Sets a hook to modify the state at the end of each tick, using the supplied closure. + pub fn set_state_tick_hook( + &mut self, + handle: StateHandle, + mut tick_hook_fn: impl 'static + FnMut(&mut T), + ) where + T: Any, + { + self.states + .get_mut(handle.state_id.0) + .expect("Failed to find state with given handle.") + .tick_reset = Some(Box::new(move |state| { + (tick_hook_fn)(state.downcast_mut::().unwrap()); + })); + } + /// Removes state from the context returns it as an owned heap value. pub fn remove_state(&mut self, handle: StateHandle) -> Box where @@ -187,3 +206,40 @@ impl Context { futures::future::join_all(self.task_join_handles.drain(..)).await; } } +/// Internal APIs. +impl Context { + /// Create a new context for the Hydroflow graph instance, used internally. + pub(crate) fn new(event_queue_send: UnboundedSender<(SubgraphId, bool)>) -> Self { + Context { + states: Vec::new(), + + event_queue_send, + + current_stratum: 0, + current_tick: TickInstant::default(), + + current_tick_start: SystemTime::now(), + subgraph_last_tick_run_in: None, + + subgraph_id: SubgraphId(0), + + tasks_to_spawn: Vec::new(), + task_join_handles: Vec::new(), + } + } + + pub(crate) fn reset_state_at_end_of_tick(&mut self) { + for StateData { state, tick_reset } in self.states.iter_mut() { + if let Some(tick_reset) = tick_reset { + (tick_reset)(Box::deref_mut(state)); + } + } + } +} + +/// Internal struct containing a pointer to [`Hydroflow`]-owned state. +struct StateData { + state: Box, + tick_reset: Option, +} +type TickResetFn = Box; diff --git a/hydroflow/src/scheduled/graph.rs b/hydroflow/src/scheduled/graph.rs index 9ad2f9edfafb..10ab6a3375a4 100644 --- a/hydroflow/src/scheduled/graph.rs +++ b/hydroflow/src/scheduled/graph.rs @@ -9,24 +9,27 @@ use std::marker::PhantomData; use hydroflow_lang::diagnostic::{Diagnostic, SerdeSpan}; use hydroflow_lang::graph::HydroflowGraph; -use instant::Instant; use ref_cast::RefCast; +use smallvec::SmallVec; use tokio::sync::mpsc::{self, UnboundedReceiver}; +use web_time::SystemTime; use super::context::Context; use super::handoff::handoff_list::PortList; -use super::handoff::{Handoff, HandoffMeta}; +use super::handoff::{Handoff, HandoffMeta, TeeingHandoff}; use super::port::{RecvCtx, RecvPort, SendCtx, SendPort, RECV, SEND}; use super::reactor::Reactor; use super::state::StateHandle; use super::subgraph::Subgraph; use super::{HandoffId, SubgraphId}; +use crate::scheduled::ticks::{TickDuration, TickInstant}; use crate::Never; /// A Hydroflow graph. Owns, schedules, and runs the compiled subgraphs. pub struct Hydroflow<'a> { pub(super) subgraphs: Vec>, pub(super) context: Context, + handoffs: Vec, /// TODO(mingwei): separate scheduler into its own struct/trait? @@ -48,22 +51,7 @@ impl<'a> Default for Hydroflow<'a> { fn default() -> Self { let stratum_queues = vec![Default::default()]; // Always initialize stratum #0. let (event_queue_send, event_queue_recv) = mpsc::unbounded_channel(); - let context = Context { - states: Vec::new(), - - event_queue_send, - - current_stratum: 0, - current_tick: 0, - - current_tick_start: Instant::now(), - subgraph_last_tick_run_in: None, - - subgraph_id: SubgraphId(0), - - tasks_to_spawn: Vec::new(), - task_join_handles: Vec::new(), - }; + let context = Context::new(event_queue_send); Self { subgraphs: Vec::new(), context, @@ -79,6 +67,93 @@ impl<'a> Default for Hydroflow<'a> { } } } + +/// Methods for [`TeeingHandoff`] teeing and dropping. +impl<'a> Hydroflow<'a> { + /// Tees a [`TeeingHandoff`]. + pub fn teeing_handoff_tee( + &mut self, + tee_parent_port: &RecvPort>, + ) -> RecvPort> + where + T: Clone, + { + // Handoff ID of new tee output. + let new_hoff_id = HandoffId(self.handoffs.len()); + + // If we're teeing from a child make sure to find root. + let tee_root = self.handoffs[tee_parent_port.handoff_id.0].pred_handoffs[0]; + + // Set up teeing metadata. + // Go to `tee_root`'s successors and insert self (the new tee output). + let tee_root_data = &mut self.handoffs[tee_root.0]; + tee_root_data.succ_handoffs.push(new_hoff_id); + + // Add our new handoff id into the subgraph data if the send `tee_root` has already been + // used to add a subgraph. + assert!( + tee_root_data.preds.len() <= 1, + "Tee send side should only have one sender (or none set yet)." + ); + if let Some(&pred_sg_id) = tee_root_data.preds.first() { + self.subgraphs[pred_sg_id.0].succs.push(new_hoff_id); + } + + // Insert new handoff output. + let teeing_handoff = tee_root_data + .handoff + .any_ref() + .downcast_ref::>() + .unwrap(); + let new_handoff = teeing_handoff.tee(); + let new_name = Cow::Owned(format!("{} tee {:?}", tee_root_data.name, new_hoff_id)); + let mut new_handoff_data = HandoffData::new(new_name, new_handoff, new_hoff_id); + // Set self's predecessor as `tee_root`. + new_handoff_data.pred_handoffs = vec![tee_root]; + self.handoffs.push(new_handoff_data); + + let output_port = RecvPort { + handoff_id: new_hoff_id, + _marker: PhantomData, + }; + output_port + } + + /// Marks an output of a [`TeeingHandoff`] as dropped so that no more data will be sent to it. + /// + /// It is recommended to not not use this method and instead simply avoid teeing a + /// [`TeeingHandoff`] when it is not needed. + pub fn teeing_handoff_drop(&mut self, tee_port: RecvPort>) + where + T: Clone, + { + let data = &self.handoffs[tee_port.handoff_id.0]; + let teeing_handoff = data + .handoff + .any_ref() + .downcast_ref::>() + .unwrap(); + teeing_handoff.drop(); + + let tee_root = data.pred_handoffs[0]; + let tee_root_data = &mut self.handoffs[tee_root.0]; + // Remove this output from the send succ handoff list. + tee_root_data + .succ_handoffs + .retain(|&succ_hoff| succ_hoff != tee_port.handoff_id); + // Remove from subgraph successors if send port was already connected. + assert!( + tee_root_data.preds.len() <= 1, + "Tee send side should only have one sender (or none set yet)." + ); + if let Some(&pred_sg_id) = tee_root_data.preds.first() { + self.subgraphs[pred_sg_id.0] + .succs + .retain(|&succ_hoff| succ_hoff != tee_port.handoff_id); + } + } +} + impl<'a> Hydroflow<'a> { /// Create a new empty Hydroflow graph. pub fn new() -> Self { @@ -93,7 +168,11 @@ impl<'a> Hydroflow<'a> { let mut op_inst_diagnostics = Vec::new(); meta_graph.insert_node_op_insts_all(&mut op_inst_diagnostics); - assert!(op_inst_diagnostics.is_empty()); + assert!( + op_inst_diagnostics.is_empty(), + "Expected no diagnostics, got: {:#?}", + op_inst_diagnostics + ); assert!(self.meta_graph.replace(meta_graph).is_none()); } @@ -128,7 +207,7 @@ impl<'a> Hydroflow<'a> { } /// Gets the current tick (local time) count. - pub fn current_tick(&self) -> usize { + pub fn current_tick(&self) -> TickInstant { self.context.current_tick } @@ -190,8 +269,12 @@ impl<'a> Hydroflow<'a> { /// Runs the current stratum of the dataflow until no more local work is available (does not receive events). /// Returns true if any work was done. - #[tracing::instrument(level = "trace", skip(self), fields(tick = self.context.current_tick, stratum = self.context.current_stratum), ret)] + #[tracing::instrument(level = "trace", skip(self), fields(tick = u64::from(self.context.current_tick), stratum = self.context.current_stratum), ret)] pub fn run_stratum(&mut self) -> bool { + // Make sure to spawn tasks once hydroflow is running! + // This drains the task buffer, so becomes a no-op after first call. + self.context.spawn_tasks(); + let current_tick = self.context.current_tick; let mut work_done = false; @@ -215,7 +298,6 @@ impl<'a> Hydroflow<'a> { } let sg_data = &self.subgraphs[sg_id.0]; - for &handoff_id in sg_data.succs.iter() { let handoff = &self.handoffs[handoff_id.0]; if !handoff.handoff.is_bottom() { @@ -259,7 +341,7 @@ impl<'a> Hydroflow<'a> { // Starting the tick, reset this to `false`. tracing::trace!("Starting tick, setting `can_start_tick = false`."); self.can_start_tick = false; - self.context.current_tick_start = Instant::now(); + self.context.current_tick_start = SystemTime::now(); // Ensure external events are received before running the tick. if !self.events_received_tick { @@ -273,7 +355,7 @@ impl<'a> Hydroflow<'a> { loop { tracing::trace!( - tick = self.context.current_tick, + tick = u64::from(self.context.current_tick), stratum = self.context.current_stratum, "Looking for work on stratum." ); @@ -281,7 +363,7 @@ impl<'a> Hydroflow<'a> { // If current stratum has work, return true. if !self.stratum_queues[self.context.current_stratum].is_empty() { tracing::trace!( - tick = self.context.current_tick, + tick = u64::from(self.context.current_tick), stratum = self.context.current_stratum, "Work found on stratum." ); @@ -295,11 +377,12 @@ impl<'a> Hydroflow<'a> { can_start_tick = self.can_start_tick, "End of tick {}, starting tick {}.", self.context.current_tick, - self.context.current_tick + 1, + self.context.current_tick + TickDuration::SINGLE_TICK, ); + self.context.reset_state_at_end_of_tick(); self.context.current_stratum = 0; - self.context.current_tick += 1; + self.context.current_tick += TickDuration::SINGLE_TICK; self.events_received_tick = false; if current_tick_only { @@ -311,7 +394,7 @@ impl<'a> Hydroflow<'a> { self.try_recv_events(); if std::mem::replace(&mut self.can_start_tick, false) { tracing::trace!( - tick = self.context.current_tick, + tick = u64::from(self.context.current_tick), "`can_start_tick` is `true`, continuing." ); // Do a full loop more to find where events have been added. @@ -356,7 +439,6 @@ impl<'a> Hydroflow<'a> { /// TODO(mingwei): Currently blocks forever, no notion of "completion." #[tracing::instrument(level = "trace", skip(self), ret)] pub async fn run_async(&mut self) -> Option { - self.context.spawn_tasks(); loop { // Run any work which is immediately available. self.run_available_async().await; @@ -483,6 +565,18 @@ impl<'a> Hydroflow<'a> { Some(count + extra_count) } + /// Schedules a subgraph to be run. See also: [`Context::schedule_subgraph`]. + pub fn schedule_subgraph(&mut self, sg_id: SubgraphId) -> bool { + let sg_data = &self.subgraphs[sg_id.0]; + let already_scheduled = sg_data.is_scheduled.replace(true); + if !already_scheduled { + self.stratum_queues[sg_data.stratum].push_back(sg_id); + true + } else { + false + } + } + /// Adds a new compiled subgraph with the specified inputs and outputs in stratum 0. pub fn add_subgraph( &mut self, @@ -521,8 +615,8 @@ impl<'a> Hydroflow<'a> { let sg_id = SubgraphId(self.subgraphs.len()); let (mut subgraph_preds, mut subgraph_succs) = Default::default(); - recv_ports.set_graph_meta(&mut *self.handoffs, None, Some(sg_id), &mut subgraph_preds); - send_ports.set_graph_meta(&mut *self.handoffs, Some(sg_id), None, &mut subgraph_succs); + recv_ports.set_graph_meta(&mut *self.handoffs, &mut subgraph_preds, sg_id, true); + send_ports.set_graph_meta(&mut *self.handoffs, &mut subgraph_succs, sg_id, false); let subgraph = move |context: &mut Context, handoffs: &mut Vec| { let recv = recv_ports.make_ctx(&*handoffs); @@ -654,7 +748,8 @@ impl<'a> Hydroflow<'a> { // Create and insert handoff. let handoff = H::default(); - self.handoffs.push(HandoffData::new(name.into(), handoff)); + self.handoffs + .push(HandoffData::new(name.into(), handoff, handoff_id)); // Make ports. let input_port = SendPort { @@ -679,6 +774,19 @@ impl<'a> Hydroflow<'a> { self.context.add_state(state) } + /// Sets a hook to modify the state at the end of each tick, using the supplied closure. + /// + /// This is part of the "state API". + pub fn set_state_tick_hook( + &mut self, + handle: StateHandle, + tick_hook_fn: impl 'static + FnMut(&mut T), + ) where + T: Any, + { + self.context.set_state_tick_hook(handle, tick_hook_fn) + } + /// Gets a exclusive (mut) ref to the internal context, setting the subgraph ID. pub fn context_mut(&mut self, sg_id: SubgraphId) -> &mut Context { self.context.subgraph_id = sg_id; @@ -724,8 +832,23 @@ pub struct HandoffData { pub(super) name: Cow<'static, str>, /// Crate-visible to crate for `handoff_list` internals. pub(super) handoff: Box, - pub(super) preds: Vec, - pub(super) succs: Vec, + /// Preceeding subgraphs (including the send side of a teeing handoff). + pub(super) preds: SmallVec<[SubgraphId; 1]>, + /// Successor subgraphs (including recv sides of teeing handoffs). + pub(super) succs: SmallVec<[SubgraphId; 1]>, + + /// Predecessor handoffs, used by teeing handoffs. + /// Should be `self` on any teeing send sides (input). + /// Should be the send `HandoffId` if this is teeing recv side (output). + /// Should be just `self`'s `HandoffId` on other handoffs. + /// This field is only used in initialization. + pub(super) pred_handoffs: Vec, + /// Successor handoffs, used by teeing handoffs. + /// Should be a list of outputs on the teeing send side (input). + /// Should be `self` on any teeing recv sides (outputs). + /// Should be just `self`'s `HandoffId` on other handoffs. + /// This field is only used in initialization. + pub(super) succ_handoffs: Vec, } impl std::fmt::Debug for HandoffData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { @@ -736,13 +859,20 @@ impl std::fmt::Debug for HandoffData { } } impl HandoffData { - pub fn new(name: Cow<'static, str>, handoff: impl 'static + HandoffMeta) -> Self { + /// New with `pred_handoffs` and `succ_handoffs` set to its own [`HandoffId`]: `vec![hoff_id]`. + pub fn new( + name: Cow<'static, str>, + handoff: impl 'static + HandoffMeta, + hoff_id: HandoffId, + ) -> Self { let (preds, succs) = Default::default(); Self { name, handoff: Box::new(handoff), preds, succs, + pred_handoffs: vec![hoff_id], + succ_handoffs: vec![hoff_id], } } } @@ -770,7 +900,7 @@ pub(super) struct SubgraphData<'a> { is_scheduled: Cell, /// Keep track of the last tick that this subgraph was run in - last_tick_run_in: Option, + last_tick_run_in: Option, /// If this subgraph is marked as lazy, then sending data back to a lower stratum does not trigger a new tick to be run. is_lazy: bool, @@ -797,8 +927,3 @@ impl<'a> SubgraphData<'a> { } } } - -/// Internal struct containing a pointer to [`Hydroflow`]-owned state. -pub(crate) struct StateData { - pub state: Box, -} diff --git a/hydroflow/src/scheduled/handoff/handoff_list.rs b/hydroflow/src/scheduled/handoff/handoff_list.rs index 60729f995057..87d4e4f3776f 100644 --- a/hydroflow/src/scheduled/handoff/handoff_list.rs +++ b/hydroflow/src/scheduled/handoff/handoff_list.rs @@ -18,17 +18,29 @@ where S: Polarity, { /// Iteratively/recursively set the graph metadata for each port in this list. + /// + /// Specifically sets: + /// - `HandoffData::preds` and `HandoffData::succs` in the `handoffs` slice for the + /// handoffs in this [`PortList`] (using `pred` and/or `succ`). + /// - `out_handoff_ids` will be extended with all the handoff IDs in this [`PortList`]. + /// + /// `handoffs_are_preds`: + /// - `true`: Handoffs are predecessors (inputs) to subgraph `sg_id`. + /// - `false`: Handoffs are successors (outputs) from subgraph `sg_id`. fn set_graph_meta( &self, handoffs: &mut [HandoffData], - pred: Option, - succ: Option, out_handoff_ids: &mut Vec, + sg_id: SubgraphId, + handoffs_are_preds: bool, ); /// The [`Variadic`] return type of [`Self::make_ctx`]. type Ctx<'a>: Variadic; /// Iteratively/recursively construct a `Ctx` variadic list. + /// + /// (Note that unlike [`Self::set_graph_meta`], this does not mess with pred/succ handoffs for + /// teeing). fn make_ctx<'a>(&self, handoffs: &'a [HandoffData]) -> Self::Ctx<'a>; } #[sealed] @@ -41,22 +53,33 @@ where fn set_graph_meta( &self, handoffs: &mut [HandoffData], - pred: Option, - succ: Option, out_handoff_ids: &mut Vec, + sg_id: SubgraphId, + handoffs_are_preds: bool, ) { let (this, rest) = self; + let this_handoff = &mut handoffs[this.handoff_id.0]; - out_handoff_ids.push(this.handoff_id); + // Set subgraph's info (`out_handoff_ids`) about neighbor handoffs. + // Use the "representative" handoff (pred or succ) for teeing handoffs, for the subgraph metadata. + // For regular Vec handoffs, `pred_handoffs` and `succ_handoffs` will just be the handoff itself. + out_handoff_ids.extend(if handoffs_are_preds { + this_handoff.pred_handoffs.iter().copied() + } else { + this_handoff.succ_handoffs.iter().copied() + }); - let handoff = handoffs.get_mut(this.handoff_id.0).unwrap(); - if let Some(pred) = pred { - handoff.preds.push(pred); - } - if let Some(succ) = succ { - handoff.succs.push(succ); + // Set handoff's info (`preds`/`succs`) about neighbor subgraph (`sg_id`). + if handoffs_are_preds { + for succ_hoff in this_handoff.succ_handoffs.clone() { + handoffs[succ_hoff.0].succs.push(sg_id); + } + } else { + for pred_hoff in this_handoff.pred_handoffs.clone() { + handoffs[pred_hoff.0].preds.push(sg_id); + } } - rest.set_graph_meta(handoffs, pred, succ, out_handoff_ids); + rest.set_graph_meta(handoffs, out_handoff_ids, sg_id, handoffs_are_preds); } type Ctx<'a> = (&'a PortCtx, Rest::Ctx<'a>); @@ -83,9 +106,9 @@ where fn set_graph_meta( &self, _handoffs: &mut [HandoffData], - _pred: Option, - _succ: Option, _out_handoff_ids: &mut Vec, + _sg_id: SubgraphId, + _handoffs_are_preds: bool, ) { } diff --git a/hydroflow/src/scheduled/handoff/tee.rs b/hydroflow/src/scheduled/handoff/tee.rs index cb252f471b1d..c05db5fee20f 100644 --- a/hydroflow/src/scheduled/handoff/tee.rs +++ b/hydroflow/src/scheduled/handoff/tee.rs @@ -21,11 +21,12 @@ impl Default for ReaderHandoff { } struct TeeingHandoffInternal { - readers: Vec>, + /// (is alive, reader) + readers: Vec<(bool, ReaderHandoff)>, } -// A [Handoff] which is part of a "family" of handoffs. Writing to this handoff -// will write to every reader. New readers can be created by calling `tee`. +/// A [Handoff] which is part of a "family" of handoffs. Writing to this handoff +/// will write to every reader. New readers can be created by calling `tee`. #[derive(Clone)] pub struct TeeingHandoff where @@ -40,7 +41,7 @@ impl Default for TeeingHandoff { TeeingHandoff { read_from: 0, internal: Rc::new(RefCell::new(TeeingHandoffInternal { - readers: vec![Default::default()], + readers: vec![(true, ReaderHandoff::::default())], })), } } @@ -50,18 +51,24 @@ impl TeeingHandoff where T: Clone, { + /// Tee the internal shared datastructure to create a new tee output. #[must_use] - pub fn tee(&self) -> Self { + pub(crate) fn tee(&self) -> Self { let id = (*self.internal).borrow().readers.len(); (*self.internal) .borrow_mut() .readers - .push(ReaderHandoff::default()); + .push((true, ReaderHandoff::default())); Self { read_from: id, internal: self.internal.clone(), } } + + /// Mark this particular teeing handoff output as dead, so no more data will be written to it. + pub(crate) fn drop(&self) { + self.internal.borrow_mut().readers[self.read_from].0 = false; + } } impl HandoffMeta for TeeingHandoff { @@ -69,8 +76,13 @@ impl HandoffMeta for TeeingHandoff { self } + /// If this output's buffer is empty, return true. fn is_bottom(&self) -> bool { - true + self.internal.borrow().readers[self.read_from] + .1 + .contents + .iter() + .all(Vec::is_empty) } } @@ -78,7 +90,11 @@ impl Handoff for TeeingHandoff { type Inner = VecDeque>; fn take_inner(&self) -> Self::Inner { - std::mem::take(&mut (*self.internal).borrow_mut().readers[self.read_from].contents) + std::mem::take( + &mut (*self.internal).borrow_mut().readers[self.read_from] + .1 + .contents, + ) } fn borrow_mut_swap(&self) -> std::cell::RefMut { @@ -92,11 +108,16 @@ where { fn give(&self, vec: Vec) -> Vec { let readers = &mut (*self.internal).borrow_mut().readers; - for i in 0..(readers.len() - 1) { - readers[i].contents.push_back(vec.clone()); + if let Some((last, rest)) = readers.split_last_mut() { + for reader in rest { + if reader.0 { + reader.1.contents.push_back(vec.clone()); + } + } + if last.0 { + last.1.contents.push_back(vec); + } } - let last = readers.len() - 1; - readers[last].contents.push_back(vec); Vec::new() } } diff --git a/hydroflow/src/scheduled/mod.rs b/hydroflow/src/scheduled/mod.rs index e414fdfab324..0b385ee01a83 100644 --- a/hydroflow/src/scheduled/mod.rs +++ b/hydroflow/src/scheduled/mod.rs @@ -19,6 +19,8 @@ pub mod reactor; pub mod state; pub(crate) mod subgraph; +pub mod ticks; + /// A subgraph's ID. Invalid if used in a different [`graph::Hydroflow`] /// instance than the original that created it. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)] @@ -32,7 +34,7 @@ impl Display for SubgraphId { /// A handoff's ID. Invalid if used in a different [`graph::Hydroflow`] /// instance than the original that created it. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] #[repr(transparent)] pub struct HandoffId(pub(crate) usize); impl Display for HandoffId { diff --git a/hydroflow/src/scheduled/port.rs b/hydroflow/src/scheduled/port.rs index fc7a7870b9b8..0af3a316b19a 100644 --- a/hydroflow/src/scheduled/port.rs +++ b/hydroflow/src/scheduled/port.rs @@ -6,7 +6,8 @@ use ref_cast::RefCast; use sealed::sealed; use super::HandoffId; -use crate::scheduled::handoff::{CanReceive, Handoff, TryCanReceive}; +use crate::scheduled::graph::Hydroflow; +use crate::scheduled::handoff::{CanReceive, Handoff, TeeingHandoff, TryCanReceive}; /// An empty trait used to denote [`Polarity`]: either **send** or **receive**. /// @@ -19,10 +20,12 @@ pub trait Polarity: 'static {} /// An uninstantiable type used to tag port [`Polarity`] as **send**. /// /// See also: [`RECV`]. +#[allow(clippy::upper_case_acronyms)] pub enum SEND {} /// An uninstantiable type used to tag port [`Polarity`] as **receive**. /// /// See also: [`SEND`]. +#[allow(clippy::upper_case_acronyms)] pub enum RECV {} #[sealed] impl Polarity for SEND {} @@ -30,7 +33,7 @@ impl Polarity for SEND {} impl Polarity for RECV {} /// Lightweight ID struct representing an input or output port for a [`Handoff`] added to a -/// [`Hydroflow`](super::graph::Hydroflow) instance.. +/// [`Hydroflow`] instance.. #[must_use] pub struct Port where @@ -45,6 +48,23 @@ pub type SendPort = Port; /// Recv-specific variant of [`Port`]. An input port. pub type RecvPort = Port; +/// Methods for [`TeeingHandoff`] teeing and dropping. +impl RecvPort> { + /// Tees this [`TeeingHandoff`], given the [`Hydroflow`] instance it belongs to. + pub fn tee(&self, hf: &mut Hydroflow) -> RecvPort> { + hf.teeing_handoff_tee(self) + } + + /// Marks this output of a [`TeeingHandoff`] as dropped so that no more data will be sent to + /// it, given the [`Hydroflow`] instance it belongs to. + /// + /// It is recommended to not not use this method and instead simply avoid teeing a + /// [`TeeingHandoff`] when it is not needed. + pub fn drop(self, hf: &mut Hydroflow) { + hf.teeing_handoff_drop(self) + } +} + /// Wrapper around a handoff to differentiate between output and input. #[derive(RefCast)] #[repr(transparent)] diff --git a/hydroflow/src/scheduled/reactor.rs b/hydroflow/src/scheduled/reactor.rs index 956861ed0c31..5add64ad9ff4 100644 --- a/hydroflow/src/scheduled/reactor.rs +++ b/hydroflow/src/scheduled/reactor.rs @@ -23,7 +23,6 @@ impl Reactor { self.event_queue_send.send((sg_id, true)) } - #[cfg(feature = "async")] /// Convert this `Reactor` into a [`std::task::Waker`] for use with async runtimes. pub fn into_waker(self, sg_id: SubgraphId) -> std::task::Waker { use std::sync::Arc; diff --git a/hydroflow/src/scheduled/ticks.rs b/hydroflow/src/scheduled/ticks.rs new file mode 100644 index 000000000000..22b981a99c00 --- /dev/null +++ b/hydroflow/src/scheduled/ticks.rs @@ -0,0 +1,247 @@ +//! This module contains types to work with ticks. +//! +//! Each iteration of a Hydroflow transducer loop is called a tick. Associated with the transducer +//! is a clock value, which tells you how many ticks were executed by this transducer prior to the +//! current tick. Each transducer produces totally ordered, sequentially increasing clock values, +//! which you can think of as the "local logical time" at the transducer. + +use std::fmt::{Display, Formatter}; +use std::ops::{Add, AddAssign, Neg, Sub, SubAssign}; + +use serde::{Deserialize, Serialize}; + +/// A point in time during execution on transducer. +/// +/// `TickInstant` instances can be subtracted to calculate the `TickDuration` between them. +/// +/// ``` +/// # use hydroflow::scheduled::ticks::{TickDuration, TickInstant}; +/// +/// assert_eq!(TickInstant(1) - TickInstant(0), TickDuration::SINGLE_TICK); +/// assert_eq!(TickInstant(0) - TickInstant(1), -TickDuration::SINGLE_TICK); +/// ``` +#[derive( + Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Default, Debug, Serialize, Deserialize, +)] +pub struct TickInstant(pub u64); + +/// The duration between two ticks. +/// +/// `TickDuration` instances can be negative to allow for calculation of `TickInstant` instances in the past. +/// +/// ``` +/// # use hydroflow::scheduled::ticks::{TickDuration, TickInstant}; +/// assert_eq!(TickInstant(1) + TickDuration::new(-1), TickInstant(0)) +/// ``` +/// `TickDuration` instances can be added/subtracted to/from other `TickDuration` instances +/// +/// ``` +/// # use hydroflow::scheduled::ticks::TickDuration; +/// assert_eq!(TickDuration::ZERO + TickDuration::ZERO, TickDuration::ZERO); +/// assert_eq!( +/// TickDuration::ZERO + TickDuration::SINGLE_TICK, +/// TickDuration::SINGLE_TICK +/// ); +/// assert_eq!( +/// TickDuration::SINGLE_TICK - TickDuration::ZERO, +/// TickDuration::SINGLE_TICK +/// ); +/// assert_eq!( +/// TickDuration::SINGLE_TICK - TickDuration::SINGLE_TICK, +/// TickDuration::ZERO +/// ); +/// assert_eq!( +/// TickDuration::ZERO - TickDuration::SINGLE_TICK, +/// -TickDuration::SINGLE_TICK +/// ); +/// ``` +#[derive( + Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Default, Debug, Serialize, Deserialize, +)] +pub struct TickDuration { + /// The length of the duration, measured in ticks. + pub ticks: i64, +} + +impl TickInstant { + /// Create a new TickInstant + /// + /// The specified parameter indicates the number of ticks that have elapsed on the transducer, + /// prior to this one. + pub fn new(ticks: u64) -> Self { + TickInstant(ticks) + } +} + +impl TickDuration { + /// A zero duration + /// + /// It is the identity element for addition for both `TickDuration` and + /// `TickInstant` (i.e. adding zero duration to a `TickInstant` or `TickDuration` results in + /// the same `TickInstant` or `TickDuration`. + /// + /// ``` + /// # use hydroflow::scheduled::ticks::{TickDuration, TickInstant}; + /// # use hydroflow_lang::graph::ops::DelayType::Tick; + /// let ticks = TickInstant::new(100); + /// assert_eq!(ticks + TickDuration::ZERO, ticks); + /// assert_eq!(ticks - TickDuration::ZERO, ticks); + /// + /// let duration = TickDuration::new(100); + /// assert_eq!(duration + TickDuration::ZERO, duration); + /// assert_eq!(duration - TickDuration::ZERO, duration); + /// ``` + pub const ZERO: Self = TickDuration { ticks: 0 }; + + /// A single tick duration. + /// + /// It is the duration between two consecutive `TickInstant` instances. + /// + /// ``` + /// # use hydroflow::scheduled::ticks::{TickDuration, TickInstant}; + /// assert_eq!(TickInstant(0) + TickDuration::SINGLE_TICK, TickInstant(1)) + /// ``` + pub const SINGLE_TICK: Self = TickDuration { ticks: 1 }; + + /// Create a new `TickDuration` for the specified tick interval. + /// + /// A negative duration allows for calculating `TickInstants` in the past and represents a + /// backward movement in time. + pub fn new(ticks: i64) -> TickDuration { + TickDuration { ticks } + } +} + +impl Add for TickInstant { + type Output = TickInstant; + + fn add(self, rhs: TickDuration) -> Self::Output { + let mut result = self; + result += rhs; + result + } +} + +impl AddAssign for TickInstant { + fn add_assign(&mut self, rhs: TickDuration) { + self.0 = self + .0 + .checked_add_signed(rhs.ticks) + .expect("overflow while adding tick duration to tick instant."); + } +} + +impl Sub for TickInstant { + type Output = TickInstant; + + fn sub(self, rhs: TickDuration) -> Self::Output { + let mut result = self; + result -= rhs; + result + } +} + +impl SubAssign for TickInstant { + fn sub_assign(&mut self, rhs: TickDuration) { + if rhs.ticks.is_positive() { + self.0 = self + .0 + .checked_sub(rhs.ticks.unsigned_abs()) + .expect("overflow while subtracting duration from instant."); + } else if rhs.ticks.is_negative() { + self.0 = self + .0 + .checked_add(rhs.ticks.unsigned_abs()) + .expect("overflow while subtracting duration from instant.") + } + } +} + +impl Sub for TickInstant { + type Output = TickDuration; + + fn sub(self, rhs: TickInstant) -> Self::Output { + let minuend = (self.0 as i64).wrapping_add(i64::MIN); + let subtrahend = (rhs.0 as i64).wrapping_add(i64::MIN); + let (difference, overflowed) = minuend.overflowing_sub(subtrahend); + if overflowed { + panic!("overflow while subtracting two TickInstants.") + } + TickDuration { ticks: difference } + } +} + +impl Add for TickDuration { + type Output = TickDuration; + + fn add(self, rhs: Self) -> Self::Output { + let mut result = self; + result += rhs; + result + } +} + +impl AddAssign for TickDuration { + fn add_assign(&mut self, rhs: Self) { + self.ticks = self + .ticks + .checked_add(rhs.ticks) + .expect("Overflow occurred while adding TickDuration instances.") + } +} + +impl Sub for TickDuration { + type Output = TickDuration; + + fn sub(self, rhs: Self) -> Self::Output { + let mut result = self; + result -= rhs; + result + } +} + +impl SubAssign for TickDuration { + fn sub_assign(&mut self, rhs: Self) { + self.ticks = self + .ticks + .checked_sub(rhs.ticks) + .expect("Overflow occurred while subtracting TickDuration instances."); + } +} + +impl Neg for TickDuration { + type Output = TickDuration; + + fn neg(self) -> Self::Output { + TickDuration { + ticks: self + .ticks + .checked_neg() + .expect("Overflow while negating duration."), + } + } +} + +impl Display for TickInstant { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "[{}]", self.0) + } +} + +impl Display for TickDuration { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "<{}>", self.ticks) + } +} + +impl From for u64 { + fn from(value: TickInstant) -> Self { + value.0 + } +} + +impl From for i64 { + fn from(value: TickDuration) -> Self { + value.ticks + } +} diff --git a/hydroflow/src/util/demux_enum.rs b/hydroflow/src/util/demux_enum.rs index 256e03d2907e..d9ef820dcd63 100644 --- a/hydroflow/src/util/demux_enum.rs +++ b/hydroflow/src/util/demux_enum.rs @@ -1,43 +1,34 @@ -//! Traits for the `demux_enum` derive and operator. +//! Trait for the `demux_enum` derive and operator. pub use hydroflow_macro::DemuxEnum; -use pusherator::demux::PusheratorList; -use pusherator::Pusherator; -use variadics::Variadic; /// Trait for use with the `demux_enum` operator. /// -/// This trait is meant to be derived: `#[derive(DemuEnum)]`. -pub trait DemuxEnum: DemuxEnumItems -where - Nexts: PusheratorListForItems, -{ - /// Pushes self into the corresponding output pusherator. - fn demux_enum(self, outputs: &mut Nexts); -} - -/// Fixed output item list for [`DemuxEnum`]. +/// This trait is meant to be derived: `#[derive(DemuxEnum)]`. /// -/// This trait is meant to be derived: `#[derive(DemuEnum)]`. -pub trait DemuxEnumItems { - /// A `var_type!(...)` list of items corresponding to each variant's output type. - type Items: Variadic; +/// The derive will implement this such that `Outputs` can be any tuple where each item is a +/// `Pusherator` that corresponds to each of the variants of the tuple, in alphabetic order. +#[diagnostic::on_unimplemented( + note = "ensure there is exactly one output for each enum variant.", + note = "ensure that the type for each output is a tuple of the field for the variant: `()`, `(a,)`, or `(a, b, ...)`." +)] +pub trait DemuxEnum: DemuxEnumBase { + /// Pushes self into the corresponding output pusherator in `outputs`. + fn demux_enum(self, outputs: &mut Outputs); } -/// Helper trait to bound a [`PusheratorList`] variadic to some coresponding item list variadic. -/// -/// A pusherator list `var_type!(PushA, PushB, PushC)` implements `PusheratorListForItems`, -/// where `PushA: Pusherator`, etc. -pub trait PusheratorListForItems: PusheratorList -where - Items: Variadic, -{ +/// Special case of [`DemuxEnum`] for when there is only one variant. +#[diagnostic::on_unimplemented( + note = "requires that the enum have only one variant.", + note = "ensure there are no missing outputs; there must be exactly one output for each enum variant." +)] +pub trait SingleVariant: DemuxEnumBase { + /// Output tuple type. + type Output; + /// Convert self into it's single variant tuple Output. + fn single_variant(self) -> Self::Output; } -impl PusheratorListForItems<(Head, Rest)> for (HeadPush, RestPush) -where - HeadPush: Pusherator, - RestPush: PusheratorListForItems, - Rest: Variadic, -{ -} -impl PusheratorListForItems<()> for () {} + +/// Base implementation to constrain that [`DemuxEnum`] is implemented. +#[diagnostic::on_unimplemented(note = "use `#[derive(hydroflow::DemuxEnum)]`")] +pub trait DemuxEnumBase {} diff --git a/hydroflow/src/util/cli.rs b/hydroflow/src/util/deploy.rs similarity index 71% rename from hydroflow/src/util/cli.rs rename to hydroflow/src/util/deploy.rs index b83de573c95c..522550ec768d 100644 --- a/hydroflow/src/util/cli.rs +++ b/hydroflow/src/util/deploy.rs @@ -3,35 +3,41 @@ use std::cell::RefCell; use std::collections::HashMap; -pub use hydroflow_cli_integration::*; +pub use hydroflow_deploy_integration::*; use serde::de::DeserializeOwned; use crate::scheduled::graph::Hydroflow; -pub async fn launch( - flow: impl FnOnce(&HydroCLI) -> Hydroflow<'_>, -) { - let ports = init_no_ack_start::().await; - let flow = flow(&ports); +#[macro_export] +macro_rules! launch { + ($f:expr) => { + async { + let ports = $crate::util::deploy::init_no_ack_start().await; + let flow = $f(&ports); - println!("ack start"); + println!("ack start"); - launch_flow(flow).await; + $crate::util::deploy::launch_flow(flow).await + } + }; } +pub use crate::launch; + pub async fn launch_flow(mut flow: Hydroflow<'_>) { let stop = tokio::sync::oneshot::channel(); tokio::task::spawn_blocking(|| { let mut line = String::new(); std::io::stdin().read_line(&mut line).unwrap(); - assert!(line.starts_with("stop")); - stop.0.send(()).unwrap(); + if line.starts_with("stop") { + stop.0.send(()).unwrap(); + } else { + eprintln!("Unexpected stdin input: {:?}", line); + } }); let local_set = tokio::task::LocalSet::new(); - let flow = local_set.run_until(async move { - flow.run_async().await; - }); + let flow = local_set.run_until(flow.run_async()); tokio::select! { _ = stop.1 => {}, @@ -39,12 +45,14 @@ pub async fn launch_flow(mut flow: Hydroflow<'_>) { } } -pub struct HydroCLI> { +/// Contains runtime information passed by Hydro Deploy to a program, +/// describing how to connect to other services and metadata about them. +pub struct DeployPorts> { ports: RefCell>, pub meta: T, } -impl HydroCLI { +impl DeployPorts { pub fn port(&self, name: &str) -> ServerOrBound { self.ports .try_borrow_mut() @@ -54,7 +62,7 @@ impl HydroCLI { } } -async fn init_no_ack_start() -> HydroCLI { +pub async fn init_no_ack_start() -> DeployPorts { let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); let trimmed = input.trim(); @@ -93,7 +101,7 @@ async fn init_no_ack_start() -> HydroCLI { all_connected.insert(name, ServerOrBound::Bound(defn)); } - HydroCLI { + DeployPorts { ports: RefCell::new(all_connected), meta: bind_config .1 @@ -102,7 +110,7 @@ async fn init_no_ack_start() -> HydroCLI { } } -pub async fn init() -> HydroCLI { +pub async fn init() -> DeployPorts { let ret = init_no_ack_start::().await; println!("ack start"); diff --git a/hydroflow/src/util/mod.rs b/hydroflow/src/util/mod.rs index 150f24bed878..4dd2332d4f36 100644 --- a/hydroflow/src/util/mod.rs +++ b/hydroflow/src/util/mod.rs @@ -9,6 +9,8 @@ pub mod multiset; pub mod sparse_vec; pub mod unsync; +pub mod simulation; + mod monotonic; pub use monotonic::*; @@ -25,8 +27,8 @@ mod socket; #[cfg(unix)] pub use socket::*; -#[cfg(feature = "cli_integration")] -pub mod cli; +#[cfg(feature = "deploy_integration")] +pub mod deploy; use std::io::Read; use std::net::SocketAddr; @@ -110,10 +112,16 @@ where C: Default + Extend, S: Stream, { - let any = std::cell::Cell::new(true); - let mut unfused_iter = ready_iter(stream).inspect(|_| any.set(true)); + use std::sync::atomic::Ordering; + + // Yield to let any background async tasks send to the stream. + tokio::task::yield_now().await; + + let got_any_items = std::sync::atomic::AtomicBool::new(true); + let mut unfused_iter = + ready_iter(stream).inspect(|_| got_any_items.store(true, Ordering::Relaxed)); let mut out = C::default(); - while any.replace(false) { + while got_any_items.swap(false, Ordering::Relaxed) { out.extend(unfused_iter.by_ref()); // Tokio unbounded channel returns items in lenght-128 chunks, so we have to be careful // that everything gets returned. That is why we yield here and loop. @@ -169,6 +177,7 @@ pub async fn bind_udp_lines(addr: SocketAddr) -> (UdpLinesSink, UdpLinesStream, } /// Returns a newline-delimited bytes `Sender`, `Receiver`, and `SocketAddr` bound to the given address. +/// /// The input `addr` may have a port of `0`, the returned `SocketAddr` will be the address of the newly bound endpoint. /// The inbound connections can be used in full duplex mode. When a `(T, SocketAddr)` pair is fed to the `Sender` /// returned by this function, the `SocketAddr` will be looked up against the currently existing connections. @@ -200,7 +209,9 @@ pub async fn bind_tcp_lines( .unwrap() } -/// This is inverse of bind_tcp_bytes. `(Bytes, SocketAddr)` pairs fed to the returned `Sender` will initiate new tcp connections to the specified `SocketAddr`. +/// The inverse of [`bind_tcp_bytes`]. +/// +/// `(Bytes, SocketAddr)` pairs fed to the returned `Sender` will initiate new tcp connections to the specified `SocketAddr`. /// These connections will be cached and reused, so that there will only be one connection per destination endpoint. When the endpoint sends data back it will be available via the returned `Receiver` #[cfg(not(target_arch = "wasm32"))] pub fn connect_tcp_bytes() -> ( @@ -231,9 +242,13 @@ where slice.sort_unstable_by(|a, b| f(a).cmp(f(b))) } -/// When a child process is spawned often you want to wait until the child process is ready before moving on. -/// One way to do that synchronization is by waiting for the child process to output something and match regex against that output. -/// For example, you could wait until the child process outputs "Client live!" which would indicate that it is ready to receive input now on stdin. +/// Waits for a specific process output before returning. +/// +/// When a child process is spawned often you want to wait until the child process is ready before +/// moving on. One way to do that synchronization is by waiting for the child process to output +/// something and match regex against that output. For example, you could wait until the child +/// process outputs "Client live!" which would indicate that it is ready to receive input now on +/// stdin. pub fn wait_for_process_output( output_so_far: &mut String, output: &mut ChildStdout, @@ -256,8 +271,10 @@ pub fn wait_for_process_output( } } -/// When a `Child` is dropped normally nothing happens but in unit tests you usually want to terminate -/// the child and wait for it to terminate. `DroppableChild` does that for us. +/// Terminates the inner [`Child`] process when dropped. +/// +/// When a `Child` is dropped normally nothing happens but in unit tests you usually want to +/// terminate the child and wait for it to terminate. `DroppableChild` does that for us. pub struct DroppableChild(Child); impl Drop for DroppableChild { @@ -271,9 +288,12 @@ impl Drop for DroppableChild { } } -/// rust examples are meant to be run by people and have a natural interface for that. This makes unit testing them cumbersome. -/// This function wraps calling cargo run and piping the stdin/stdout of the example to easy to handle returned objects. -/// The function also returns a `DroppableChild` which will ensure that the child processes will be cleaned up appropriately. +/// Run a rust example as a test. +/// +/// Rust examples are meant to be run by people and have a natural interface for that. This makes +/// unit testing them cumbersome. This function wraps calling cargo run and piping the stdin/stdout +/// of the example to easy to handle returned objects. The function also returns a `DroppableChild` +/// which will ensure that the child processes will be cleaned up appropriately. pub fn run_cargo_example(test_name: &str, args: &str) -> (DroppableChild, ChildStdin, ChildStdout) { let mut server = if args.is_empty() { std::process::Command::new("cargo") @@ -301,19 +321,21 @@ pub fn run_cargo_example(test_name: &str, args: &str) -> (DroppableChild, ChildS (DroppableChild(server), stdin, stdout) } -/// Returns an [`Stream`] that emits `n` items at a time from `iter` at a time, yielding in-between. +/// Converts an iterator into a stream that emits `n` items at a time, yielding between each batch. +/// /// This is useful for breaking up a large iterator across several ticks: `source_iter(...)` always /// releases all items in the first tick. However using `iter_batches_stream` with `source_stream(...)` /// will cause `n` items to be released each tick. (Although more than that may be emitted if there /// are loops in the stratum). pub fn iter_batches_stream( - mut iter: I, + iter: I, n: usize, ) -> futures::stream::PollFn) -> Poll>> where - I: Iterator + Unpin, + I: IntoIterator + Unpin, { let mut count = 0; + let mut iter = iter.into_iter(); futures::stream::poll_fn(move |ctx| { count += 1; if n < count { diff --git a/hydroflow/src/util/monotonic_map.rs b/hydroflow/src/util/monotonic_map.rs index 4d29c22c22d9..3a3617cdc583 100644 --- a/hydroflow/src/util/monotonic_map.rs +++ b/hydroflow/src/util/monotonic_map.rs @@ -2,10 +2,12 @@ use super::clear::Clear; -/// A map-like interface which in reality only stores one value at a time. The keys must be -/// monotonically increasing (i.e. timestamps). For Hydroflow, this allows state to be stored which -/// resets each tick by using the tick counter as the key. In the generic `Map` case it can be -/// swapped out for a true map to allow processing of multiple ticks of data at once. +/// A map-like interface which in reality only stores one value at a time. +/// +/// The keys must be monotonically increasing (i.e. timestamps). For Hydroflow, this allows state +/// to be stored which resets each tick by using the tick counter as the key. In the generic `Map` +/// case it can be swapped out for a true map to allow processing of multiple ticks of data at +/// once. #[derive(Clone, Debug)] pub struct MonotonicMap where diff --git a/hydroflow/src/util/simulation.rs b/hydroflow/src/util/simulation.rs new file mode 100644 index 000000000000..f1611be636e7 --- /dev/null +++ b/hydroflow/src/util/simulation.rs @@ -0,0 +1,440 @@ +//! # Hydroflow Deterministic Simulation Testing Framework +//! +//! This module provides a deterministic simulation testing framework for testing Hydroflow +//! transducers. +//! +//! It can be used to test complex interactions between multiple Hydroflow transducers in a +//! deterministic manner by running them in a single-threaded environment. The framework also +//! provides a "virtual network" implementation that allows production transducers to exchange +//! messages within the simulation. More importantly, the network is fully under control of the +//! unit test and the test can introduce faults such as message delays, message drops and +//! network partitions. +//! +//! ## Overview +//! +//! Conceptually, the simulation contains a "Fleet", which is a collection of "Hosts". These +//! aren't real hosts, but rather a collection of individual Hydroflow transducers (one per host) +//! that can communicate with each other over a virtual network. Every host has a "hostname" +//! which uniquely identifies it within the fleet. +//! +//! ```text +//! ┌───────────────────────────────────────────────────────────────────────────────────────────┐ +//! │SIMULATION │ +//! │ ┌───────────────────────────────────────────────────────────────────────────────────────┐ │ +//! │ │FLEET │ │ +//! │ │ ┌───────────────────────────────┐ ┌───────────────────────────────┐ │ │ +//! │ │ │HOST │ │HOST │ │ │ +//! │ │ │ ┌──────┐ ┌──────┐ ┌──────┐ │ │ ┌──────┐ ┌──────┐ ┌──────┐ │ │ │ +//! │ │ │ │INBOX │ │INBOX │ │INBOX │ │ ┌-┼-►INBOX │ │INBOX │ │INBOX │ │ │ │ +//! │ │ │ └──┬───┘ └──┬───┘ └──┬───┘ │ │ │ └──┬───┘ └──┬───┘ └──┬───┘ │ │ │ +//! │ │ │ ┌──▼──────────▼─────────▼───┐ │ │ │ ┌──▼──────────▼─────────▼───┐ │ │ │ +//! │ │ │ │ │ │ │ │ │ │ │ │ │ +//! │ │ │ │ TRANSDUCER │ │ │ │ │ TRANSDUCER │ │ │ │ +//! │ │ │ │ │ │ │ │ │ │ │ │ │ +//! │ │ │ └───┬─────────┬──────────┬──┘ │ │ │ └───┬─────────┬─────────┬───┘ │ │ │ +//! │ │ │ ┌──▼───┐ ┌──▼───┐ ┌───▼──┐ │ │ │ ┌──▼───┐ ┌──▼───┐ ┌──▼───┐ │ │ │ +//! │ │ │ │OUTBOX│ │OUTBOX│ │OUTBOX┼-┼--┐ │ │ │OUTBOX│ │OUTBOX│ │OUTBOX│ │ │ │ +//! │ │ │ └──────┘ └──────┘ └──────┘ │ │ │ │ └──────┘ └──────┘ └──────┘ │ │ │ +//! │ │ └───────────────────────────────┘ │ │ └───────────────────────────────┘ │ │ +//! │ └────────────────────────────────────┼──────────────┼───────────────────────────────────┘ │ +//! │ ┌─┼──────────────┼─┐ │ +//! │ │ └--------------┘ │ │ +//! │ │ NETWORK MESSAGE │ │ +//! │ │ PROCESSING │ │ +//! │ └──────────────────┘ │ +//! └───────────────────────────────────────────────────────────────────────────────────────────┘ +//! ``` +//! ## Network Processing +//! +//! ### Outboxes & Inboxes +//! When a transducer wishes to send a message to another transducer, it sends the message to an +//! "outbox" on its host. The unit test invokes the simulation's network message processing logic +//! at some desired cadence to pick up all messages from all outboxes and deliver them to the +//! corresponding inboxes on the destination hosts. The network message processing logic is the +//! point at which failures can be injected to change the behavior of the network. +//! +//! ### Interface Names +//! Every inbox and outbox is associated with an "interface name". This is a string that uniquely +//! identifies the interface on the host. When a transducer sends a message, it specifies the +//! destination hostname and the interface name on that host to which the message should be +//! delivered. +//! +//! ## Progress of Time in the Simulation +//! The single-threaded unit test can drive time forward on every host by invoking the `run_tick` +//! method on the host. This ultimately runs a single tick on the transducer. The unit test is +//! also responsible for invoking the network message processing at the time of its choosing and +//! can interleave the progress of time on various hosts and network processing as it sees fit. +//! +//! ## Examples +//! Check the tests module for examples on how to use the simulation framework. +use std::any::Any; +use std::collections::HashMap; +use std::convert::Infallible; +use std::fmt::Debug; +use std::future::ready; +use std::pin::Pin; + +use futures::{sink, Sink, SinkExt, StreamExt}; +use serde::{Deserialize, Serialize}; +use tokio::sync::mpsc::UnboundedSender; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::Stream; +use tracing::trace; + +use crate::scheduled::graph::Hydroflow; +use crate::util::{collect_ready_async, unbounded_channel}; + +/// A hostname is a unique identifier for a host in the simulation. It is used to address messages +/// to a specific host (and thus a specific Hydroflow transducer). +pub type Hostname = String; + +/// An interface name is a unique identifier for an inbox or an outbox on host. +type InterfaceName = String; + +/// An address is a combination of a hostname and an interface name. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Address { + host: Hostname, + interface: InterfaceName, +} + +impl Address { + /// Create a new address with the given hostname and interface name. + pub fn new(host: Hostname, interface: InterfaceName) -> Self { + Address { host, interface } + } +} + +/// A message sender is used to send messages to an inbox on a host. +pub trait MessageSender { + /// Send a message to the inbox on the host. + fn send(&self, message: MessageWithAddress); +} + +impl MessageSender for UnboundedSender<(T, Address)> { + fn send(&self, message: (Box, Address)) { + match message.0.downcast::() { + Ok(msg) => { + self.send((*msg, message.1)).unwrap(); + } + Err(e) => { + panic!("Failed to downcast message to expected type: {:?}", e); + } + } + } +} + +/// A message with an delivery address. +pub type MessageWithAddress = (Box, Address); + +/// An inbox is used by a host to receive messages for the transducer. +pub struct Inbox { + sender: Box, +} + +/// Transducers can send messages to other transducers by putting those messages in an outbox +/// on their host. +pub struct Outbox { + receiver: Pin>>, +} + +/// A host is a single Hydroflow transducer running in the simulation. It has a unique hostname +/// and can communicate with other hosts over the virtual network. It has a collection of inboxes +/// and outboxes. +pub struct Host { + name: Hostname, + transducer: Hydroflow<'static>, + inputs: HashMap, + output: HashMap, +} + +impl Host { + /// Run a single tick on the host's transducer. Returns true if any work was done by the + /// transducer. This effectively "advances" time on the transducer. + pub fn run_tick(&mut self) -> bool { + self.transducer.run_tick() + } +} + +/// A builder for constructing a host in the simulation. +pub struct HostBuilder { + name: Hostname, + transducer: Option>, + inboxes: HashMap, + outboxes: HashMap, +} + +/// Used in conjunction with the `HostBuilder` to construct a host in the simulation. +pub struct TransducerBuilderContext<'context> { + inboxes: &'context mut HashMap, + outboxes: &'context mut HashMap, +} + +fn sink_from_fn(mut f: impl FnMut(T)) -> impl Sink { + sink::drain().with(move |item| { + (f)(item); + ready(Result::<(), Infallible>::Ok(())) + }) +} + +impl<'context> TransducerBuilderContext<'context> { + /// Create a new inbox on the host with the given interface name. Returns a stream that can + /// be read by the transducer using the source_stream hydroflow operator. + pub fn new_inbox( + &mut self, + interface: InterfaceName, + ) -> UnboundedReceiverStream<(T, Address)> { + let (sender, receiver) = unbounded_channel::<(T, Address)>(); + self.inboxes.insert( + interface, + Inbox { + sender: Box::new(sender), + }, + ); + receiver + } + + /// Creates a new outbox on the host with the given interface name. Returns a sink that can + /// be written to by the transducer using the dest_sink hydroflow operator. + pub fn new_outbox( + &mut self, + interface: InterfaceName, + ) -> impl Sink<(T, Address), Error = Infallible> { + let (sender, receiver) = unbounded_channel::<(T, Address)>(); + + let receiver = receiver.map(|(msg, addr)| (Box::new(msg) as Box, addr)); + + self.outboxes.insert( + interface, + Outbox { + receiver: Box::pin(receiver), + }, + ); + + sink_from_fn(move |message: (T, Address)| sender.send((message.0, message.1)).unwrap()) + } +} + +impl HostBuilder { + /// Creates a new instance of HostBuilder for a given hostname, + pub fn new(name: Hostname) -> Self { + HostBuilder { + name, + transducer: None, + inboxes: Default::default(), + outboxes: Default::default(), + } + } + + /// Supplies the (mandatory) transducer that runs on this host. + pub fn with_transducer(mut self, builder: F) -> Self + where + F: FnOnce(&mut TransducerBuilderContext) -> Hydroflow<'static>, + { + let mut context = TransducerBuilderContext { + inboxes: &mut self.inboxes, + outboxes: &mut self.outboxes, + }; + let transducer = builder(&mut context); + self.transducer = Some(transducer); + self + } + + /// Builds the host with the supplied configuration. + pub fn build(self) -> Host { + if self.transducer.is_none() { + panic!("Transducer is required to build a host"); + } + + Host { + name: self.name, + transducer: self.transducer.unwrap(), + inputs: self.inboxes, + output: self.outboxes, + } + } +} + +/// A fleet is a collection of hosts in the simulation. It is responsible for running the +/// simulation and processing network messages. +pub struct Fleet { + hosts: HashMap, +} + +impl Fleet { + /// Creates a new instance of Fleet. + pub fn new() -> Self { + Fleet { + hosts: HashMap::new(), + } + } + + /// Adds a new host to the fleet with the given name and transducer. + pub fn add_host(&mut self, name: String, transducer_builder: F) -> &Host + where + F: FnOnce(&mut TransducerBuilderContext) -> Hydroflow<'static>, + { + let host = HostBuilder::new(name.clone()) + .with_transducer(transducer_builder) + .build(); + assert!( + self.hosts.insert(host.name.clone(), host).is_none(), + "Host with name {} already exists", + name + ); + self.get_host(&name).unwrap() + } + + /// Get a host by name. + pub fn get_host(&self, name: &str) -> Option<&Host> { + self.hosts.get(name) + } + + /// Get a host by name. + pub fn get_host_mut(&mut self, name: &str) -> Option<&mut Host> { + self.hosts.get_mut(name) + } + + /// Advance time on all hosts by a single tick. Returns true if any work was done by any of the + /// hosts. After ticking once on all the hosts, the method also processes network messages. + /// + /// The order in which the ticks are processed is not guaranteed. + pub async fn run_single_tick_all_hosts(&mut self) -> bool { + let mut work_done: bool = false; + + for (name, host) in self.hosts.iter_mut() { + trace!("Running tick for host: {}", name); + work_done |= host.run_tick(); + } + + self.process_network().await; + + work_done + } + + /// Process all network messages in the simulation. This method picks up all messages from all + /// outboxes on all hosts and delivers them to the corresponding inboxes on the destination. + /// + /// The order in which the messages are processed is not guaranteed. + pub async fn process_network(&mut self) { + let mut all_messages: Vec<(Address, MessageWithAddress)> = Vec::new(); + + // Collect all messages from all outboxes on all hosts. + for (name, host) in self.hosts.iter_mut() { + for (interface, output) in host.output.iter_mut() { + let src_address = Address::new(name.clone(), interface.clone()); + let all_messages_on_interface: Vec<_> = + collect_ready_async(&mut output.receiver).await; + for message_on_interface in all_messages_on_interface { + all_messages.push((src_address.clone(), message_on_interface)); + } + } + } + + // Deliver all messages to the corresponding inboxes on the destination hosts. + for (src_address, (msg, addr)) in all_messages { + if let Some(destination_host) = self.hosts.get(&addr.host) { + if let Some(input) = destination_host.inputs.get(&addr.interface) { + input.sender.send((msg, src_address.clone())); + } else { + trace!( + "No interface named {:?} found on host {:?}. Dropping message {:?}.", + addr.interface, + addr.host, + msg + ); + } + } else { + trace!( + "No host named {:?} found. Dropping message {:?}.", + addr.host, + msg + ); + } + } + } + + /// Tick all hosts until all hosts are quiescent (i.e. no new work is done by any host). Ticking + /// is done in "rounds". At each round, all hosts are ticked once and then network messages are + /// processed. The process continues until no work is done by any host in a round. + pub async fn run_until_quiescent(&mut self) { + while self.run_single_tick_all_hosts().await {} + } +} + +impl Default for Fleet { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use futures::StreamExt; + use hydroflow_macro::{hydroflow_syntax, hydroflow_test}; + + use crate::util::simulation::{Address, Fleet, Hostname}; + use crate::util::unbounded_channel; + + /// A simple test to demonstrate use of the simulation framework. Implements an echo server + /// and client. + #[hydroflow_test] + async fn test_echo() { + let mut fleet = Fleet::new(); + + // Hostnames for the server and client + let server: Hostname = "server".to_string(); + let client: Hostname = "client".to_string(); + + // Interface name for the echo "protocol" + let interface: String = "echo".to_string(); + + let server_address = Address::new(server.clone(), interface.clone()); + + // Create the echo server + fleet.add_host(server.clone(), |ctx| { + let network_input = ctx.new_inbox::(interface.clone()); + let network_output = ctx.new_outbox::(interface.clone()); + hydroflow_syntax! { + out = dest_sink(network_output); + + source_stream(network_input) + -> inspect(|(msg, addr)| println!("Received {:?} from {:?}", msg, addr)) + -> out; + } + }); + + // The client trigger channel is used to trigger the client into sending a message to the + // server. This allows the unit test to control when the client sends a message. + let (client_trigger_tx, client_trigger_rx) = unbounded_channel::(); + let (client_response_tx, mut client_response_rx) = unbounded_channel::(); + + fleet.add_host(client.clone(), |ctx| { + let network_out = ctx.new_outbox::(interface.clone()); + let network_in = ctx.new_inbox::(interface.clone()); + + hydroflow_syntax! { + out = dest_sink(network_out); + + source_stream(client_trigger_rx) + -> map(|msg| (msg, server_address.clone())) + -> out; + + source_stream(network_in) + -> inspect(|(msg, addr)| println!("Received {:?} from {:?}", msg, addr)) + -> for_each(|(msg, _addr)| client_response_tx.send(msg).unwrap()); + + } + }); + + // Trigger the client to send a message. + client_trigger_tx.send("Hello, world!".to_string()).unwrap(); + + // Run the simulation until no new work is done by any host. + fleet.run_until_quiescent().await; + + // Check that the message was received. + let response = client_response_rx.next().await.unwrap(); + assert_eq!(response, "Hello, world!"); + } +} diff --git a/hydroflow/src/util/tcp.rs b/hydroflow/src/util/tcp.rs index 7c893d4da1ea..e1c766aa01c1 100644 --- a/hydroflow/src/util/tcp.rs +++ b/hydroflow/src/util/tcp.rs @@ -138,8 +138,11 @@ pub async fn bind_tcp> Ok((tx_egress, rx_ingress, bound_endpoint)) } -/// This is the inverse of bind_tcp, when messages enqueued into the returned sender, tcp sockets will be created and connected as necessary to send out the requests. -/// As the responses come back, they will be forwarded to the returned receiver. +/// The inverse of [`bind_tcp`]. +/// +/// When messages enqueued into the returned sender, tcp sockets will be created and connected as +/// necessary to send out the requests. As the responses come back, they will be forwarded to the +/// returned receiver. pub fn connect_tcp>( codec: Codec, ) -> (TcpFramedSink, TcpFramedStream) { diff --git a/hydroflow/src/util/udp.rs b/hydroflow/src/util/udp.rs index 1fa2f6dc591c..42da8468e1e4 100644 --- a/hydroflow/src/util/udp.rs +++ b/hydroflow/src/util/udp.rs @@ -13,9 +13,8 @@ use tokio_util::udp::UdpFramed; pub type UdpFramedSink = SplitSink, (Item, SocketAddr)>; /// A framed UDP `Stream` (receiving). pub type UdpFramedStream = SplitStream>; -/// Helper creates a UDP `Stream` and `Sink` from the given socket, using the given `Codec` to -/// handle delineation between inputs/outputs. Also returns the bound UdpSocket, which will be -/// different than the input UdpSocket if the input socket was set to port 0. +/// Returns a UDP `Stream`, `Sink`, and address for the given socket, using the given `Codec` to +/// handle delineation between inputs/outputs. pub fn udp_framed( socket: UdpSocket, codec: Codec, diff --git a/hydroflow/tests/compile-fail/datalog_join_badtypes.stderr b/hydroflow/tests/compile-fail/datalog_join_badtypes.stderr index 7f1153258496..af9b790af516 100644 --- a/hydroflow/tests/compile-fail/datalog_join_badtypes.stderr +++ b/hydroflow/tests/compile-fail/datalog_join_badtypes.stderr @@ -14,3 +14,66 @@ note: required by a bound in `check_inputs` | 8 | out(a) :- in1(a, b), in2(b) | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `check_inputs` + +error[E0271]: expected `impl Iterator` to be an iterator that yields `(({integer},), _)`, but it yields `((&str,), ())` + --> tests/compile-fail/datalog_join_badtypes.rs:8:9 + | +8 | out(a) :- in1(a, b), in2(b) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `(({integer},), _)`, found `((&str,), ())` + | + = note: expected tuple `(({integer},), _)` + found tuple `((&str,), ())` +note: required by a bound in `check_inputs` + --> tests/compile-fail/datalog_join_badtypes.rs:8:9 + | +8 | out(a) :- in1(a, b), in2(b) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `check_inputs` + +error[E0271]: expected `impl Iterator` to be an iterator that yields `(({integer},), ())`, but it yields `((&str,), ())` + --> tests/compile-fail/datalog_join_badtypes.rs:4:18 + | +4 | let mut df = datalog!(r#" + | __________________^ +5 | | .input in1 `source_iter(0..10) -> map(|x| (x, x))` +6 | | .input in2 `source_iter(0..10) -> map(|_| ("string",))` +7 | | .output out `null::<(u32,)>()` +8 | | out(a) :- in1(a, b), in2(b) +9 | | "#); + | |_______^ expected `(({integer},), ())`, found `((&str,), ())` + | + = note: expected tuple `(({integer},), ())` + found tuple `((&str,), ())` +note: required by a bound in `check_inputs` + --> tests/compile-fail/datalog_join_badtypes.rs:8:9 + | +8 | out(a) :- in1(a, b), in2(b) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `check_inputs` + = note: this error originates in the macro `datalog` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0271]: expected `impl Iterator` to be an iterator that yields `(({integer},), ())`, but it yields `((&str,), ())` + --> tests/compile-fail/datalog_join_badtypes.rs:7:17 + | +7 | .output out `null::<(u32,)>()` + | ^^^ expected `(({integer},), ())`, found `((&str,), ())` + | + = note: expected tuple `(({integer},), ())` + found tuple `((&str,), ())` +note: required by a bound in `check_inputs` + --> tests/compile-fail/datalog_join_badtypes.rs:8:9 + | +8 | out(a) :- in1(a, b), in2(b) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `check_inputs` + +error[E0271]: expected `impl Iterator` to be an iterator that yields `(({integer},), ())`, but it yields `((&str,), ())` + --> tests/compile-fail/datalog_join_badtypes.rs:7:17 + | +7 | .output out `null::<(u32,)>()` + | ^^^^^^^^^^^^^^^^^^^^^ expected `(({integer},), ())`, found `((&str,), ())` + | + = note: expected tuple `(({integer},), ())` + found tuple `((&str,), ())` +note: required by a bound in `check_inputs` + --> tests/compile-fail/datalog_join_badtypes.rs:8:9 + | +8 | out(a) :- in1(a, b), in2(b) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `check_inputs` diff --git a/hydroflow/tests/compile-fail/surface_anti_join_badtypes.stderr b/hydroflow/tests/compile-fail/surface_anti_join_badtypes.stderr index 849dee82b004..c8aa5b266d47 100644 --- a/hydroflow/tests/compile-fail/surface_anti_join_badtypes.stderr +++ b/hydroflow/tests/compile-fail/surface_anti_join_badtypes.stderr @@ -10,11 +10,11 @@ error[E0271]: type mismatch resolving ` map(|_| "string") -> [neg]j; | |_______________________________________________- required by a bound introduced by this call | -note: required by a bound in `main::{closure#1}::check_pivot_run` +note: required by a bound in `pivot_run_sg_2v1` --> tests/compile-fail/surface_anti_join_badtypes.rs:5:13 | 5 | j = anti_join() -> for_each(std::mem::drop); | _____________^ 6 | | source_iter(0..10) -> map(|x| (x, x)) -> [pos]j; 7 | | source_iter(0..10) -> map(|_| "string") -> [neg]j; - | |_______________________________________________^ required by this bound in `check_pivot_run` + | |_______________________________________________^ required by this bound in `pivot_run_sg_2v1` diff --git a/hydroflow/tests/compile-fail/surface_badgeneric_type.rs b/hydroflow/tests/compile-fail/surface_badgeneric_type.rs index f7e1b1fe3a90..ef2c290c63bc 100644 --- a/hydroflow/tests/compile-fail/surface_badgeneric_type.rs +++ b/hydroflow/tests/compile-fail/surface_badgeneric_type.rs @@ -2,8 +2,8 @@ use hydroflow::hydroflow_syntax; fn main() { let mut df = hydroflow_syntax! { - // no generic arguments for `defer`. - source_iter(0..10) -> defer_tick::() -> for_each(std::mem::drop); + // no generic arguments for `inspect`. + source_iter(0..10) -> inspect::(std::mem::drop) -> for_each(std::mem::drop); }; df.run_available(); } diff --git a/hydroflow/tests/compile-fail/surface_badgeneric_type.stderr b/hydroflow/tests/compile-fail/surface_badgeneric_type.stderr index 6a8ab9bffcec..3f49e6ac58ea 100644 --- a/hydroflow/tests/compile-fail/surface_badgeneric_type.stderr +++ b/hydroflow/tests/compile-fail/surface_badgeneric_type.stderr @@ -1,5 +1,5 @@ -error: `defer_tick` should have exactly 0 generic type arguments, actually has 1. - --> tests/compile-fail/surface_badgeneric_type.rs:6:44 +error: `inspect` should have exactly 0 generic type arguments, actually has 1. + --> tests/compile-fail/surface_badgeneric_type.rs:6:41 | -6 | source_iter(0..10) -> defer_tick::() -> for_each(std::mem::drop); - | ^^^^^ +6 | source_iter(0..10) -> inspect::(std::mem::drop) -> for_each(std::mem::drop); + | ^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_conflicting_name.stderr b/hydroflow/tests/compile-fail/surface_conflicting_name.stderr index 5aef3f1b0fa2..589f496cd895 100644 --- a/hydroflow/tests/compile-fail/surface_conflicting_name.stderr +++ b/hydroflow/tests/compile-fail/surface_conflicting_name.stderr @@ -1,11 +1,11 @@ -error: Name assignment to `a` conflicts with existing assignment: $DIR/tests/compile-fail/surface_conflicting_name.rs:5:9 (1/2) - --> tests/compile-fail/surface_conflicting_name.rs:6:9 - | -6 | a = null() -> null(); - | ^^^^^^^^^^^^^^^^^^^^^ - -error: Existing assignment to `a` conflicts with later assignment: $DIR/tests/compile-fail/surface_conflicting_name.rs:6:9 (2/2) +error: Existing assignment to `a` conflicts with later assignment: $DIR/tests/compile-fail/surface_conflicting_name.rs:6:9 (1/2) --> tests/compile-fail/surface_conflicting_name.rs:5:9 | 5 | a = null() -> null(); | ^ + +error: Name assignment to `a` conflicts with existing assignment: $DIR/tests/compile-fail/surface_conflicting_name.rs:5:9 (2/2) + --> tests/compile-fail/surface_conflicting_name.rs:6:9 + | +6 | a = null() -> null(); + | ^^^^^^^^^^^^^^^^^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_demux_badclosure.stderr b/hydroflow/tests/compile-fail/surface_demux_badclosure.stderr index ce21cba479a3..0384823a88bb 100644 --- a/hydroflow/tests/compile-fail/surface_demux_badclosure.stderr +++ b/hydroflow/tests/compile-fail/surface_demux_badclosure.stderr @@ -1,15 +1,8 @@ error: Closure provided to `demux(..)` must have two arguments: the first argument is the item, and the second argument lists ports. E.g. the second argument could be `var_args!(port_a, port_b, ..)`. - --> tests/compile-fail/surface_demux_badclosure.rs:5:48 - | -5 | my_demux = source_iter(0..10) -> demux(|var_args!(a, b, c)| { - | ________________________________________________^ -6 | | match item % 3 { -7 | | 0 => a.give(item), -8 | | 1 => b.give(item), -9 | | 2 => c.give(item), -10 | | } -11 | | }); - | |_________^ + --> tests/compile-fail/surface_demux_badclosure.rs:5:49 + | +5 | my_demux = source_iter(0..10) -> demux(|var_args!(a, b, c)| { + | ^^^^^^^^^^^^^^^^^^ warning: unused import: `var_args` --> tests/compile-fail/surface_demux_badclosure.rs:1:35 diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.rs b/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.rs new file mode 100644 index 000000000000..a8540f94d3ba --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.rs @@ -0,0 +1,23 @@ +use hydroflow::util::demux_enum::DemuxEnum; +use hydroflow::hydroflow_syntax; + +fn main() { + #[derive(DemuxEnum)] + enum Shape { + Square(f64), + Rectangle { w: f64, h: f64 }, + Circle { r: f64 }, + } + + let mut df = hydroflow_syntax! { + my_demux = source_iter([ + Shape::Rectangle { w: 10.0, h: 8.0 }, + Shape::Square(9.0), + Shape::Circle { r: 5.0 }, + ]) -> demux_enum(); + my_demux[Rectangle] -> for_each(std::mem::drop); + my_demux[Circle] -> for_each(std::mem::drop); + my_demux[Square] -> for_each(std::mem::drop); + }; + df.run_available(); +} diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.stderr new file mode 100644 index 000000000000..fd76dc15f52c --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_missingtypeparam.stderr @@ -0,0 +1,5 @@ +error: `demux_enum` should have exactly 1 generic type arguments, actually has 0. + --> tests/compile-fail/surface_demuxenum_missingtypeparam.rs:17:15 + | +17 | ]) -> demux_enum(); + | ^^^^^^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_notenum.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_notenum.stderr index 2f124fa87951..be5512428e64 100644 --- a/hydroflow/tests/compile-fail/surface_demuxenum_notenum.stderr +++ b/hydroflow/tests/compile-fail/surface_demuxenum_notenum.stderr @@ -1,22 +1,15 @@ -error[E0277]: the trait bound `Shape: DemuxEnumItems` is not satisfied +error[E0277]: the trait bound `Shape: DemuxEnumBase` is not satisfied --> tests/compile-fail/surface_demuxenum_notenum.rs:12:28 | 12 | ]) -> demux_enum::(); - | ^^^^^ the trait `DemuxEnumItems` is not implemented for `Shape` + | ^^^^^ the trait `DemuxEnumBase` is not implemented for `Shape` | + = note: use `#[derive(hydroflow::DemuxEnum)]` note: required by a bound in `check_impl_demux_enum` - --> tests/compile-fail/surface_demuxenum_notenum.rs:8:18 - | -8 | let mut df = hydroflow_syntax! { - | __________________^ -9 | | my_demux = source_iter([ -10 | | Shape { area: 10.0 }, -11 | | Shape { area: 9.0 }, -... | -16 | | my_demux[Ellipse] -> for_each(std::mem::drop); -17 | | }; - | |_____^ required by this bound in `check_impl_demux_enum` - = note: this error originates in the macro `hydroflow_syntax` (in Nightly builds, run with -Z macro-backtrace for more info) + --> tests/compile-fail/surface_demuxenum_notenum.rs:12:28 + | +12 | ]) -> demux_enum::(); + | ^^^^^ required by this bound in `check_impl_demux_enum` error[E0223]: ambiguous associated type --> tests/compile-fail/surface_demuxenum_notenum.rs:14:18 @@ -62,28 +55,11 @@ help: if there were a trait named `Example` with associated type `Square` implem 15 | my_demux[::Square] -> for_each(std::mem::drop); | ~~~~~~~~~~~~~~~~~~~~~~~~~~ -error[E0277]: the trait bound `Shape: DemuxEnumItems` is not satisfied +error[E0277]: the trait bound `Shape: DemuxEnum<_>` is not satisfied --> tests/compile-fail/surface_demuxenum_notenum.rs:12:15 | 12 | ]) -> demux_enum::(); - | ^^^^^^^^^^^^^^^^^^^^^ the trait `DemuxEnumItems` is not implemented for `Shape` - -error[E0277]: the trait bound `Shape: DemuxEnum<_>` is not satisfied - --> tests/compile-fail/surface_demuxenum_notenum.rs:12:28 + | ^^^^^^^^^^^^^^^^^^^^^ the trait `DemuxEnum<_>` is not implemented for `Shape` | -12 | ]) -> demux_enum::(); - | ^^^^^ the trait `DemuxEnum<_>` is not implemented for `Shape` - | -note: required by a bound in `__typeguard_demux_enum_fn` - --> tests/compile-fail/surface_demuxenum_notenum.rs:8:18 - | -8 | let mut df = hydroflow_syntax! { - | __________________^ -9 | | my_demux = source_iter([ -10 | | Shape { area: 10.0 }, -11 | | Shape { area: 9.0 }, -... | -16 | | my_demux[Ellipse] -> for_each(std::mem::drop); -17 | | }; - | |_____^ required by this bound in `__typeguard_demux_enum_fn` - = note: this error originates in the macro `hydroflow_syntax` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: ensure there is exactly one output for each enum variant. + = note: ensure that the type for each output is a tuple of the field for the variant: `()`, `(a,)`, or `(a, b, ...)`. diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.rs b/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.rs new file mode 100644 index 000000000000..0b193eaba6e8 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.rs @@ -0,0 +1,18 @@ +use hydroflow::hydroflow_syntax; +use hydroflow::util::demux_enum::DemuxEnum; + +fn main() { + #[derive(DemuxEnum)] + enum Shape { + Square(f64), + } + + let mut df = hydroflow_syntax! { + my_demux = source_iter([ + Shape::Square(9.0), + ]) -> demux_enum::(); + my_demux[Square] -> for_each(std::mem::drop); + my_demux[Square] -> for_each(std::mem::drop); + }; + df.run_available(); +} diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.stderr new file mode 100644 index 000000000000..b641266fa04f --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_duplicate_one.stderr @@ -0,0 +1,11 @@ +error: Output connection conflicts with below ($DIR/tests/compile-fail/surface_demuxenum_port_duplicate_one.rs:15:18) (1/2) + --> tests/compile-fail/surface_demuxenum_port_duplicate_one.rs:14:18 + | +14 | my_demux[Square] -> for_each(std::mem::drop); + | ^^^^^^ + +error: Output connection conflicts with above ($DIR/tests/compile-fail/surface_demuxenum_port_duplicate_one.rs:14:18) (2/2) + --> tests/compile-fail/surface_demuxenum_port_duplicate_one.rs:15:18 + | +15 | my_demux[Square] -> for_each(std::mem::drop); + | ^^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_extra.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra.stderr index 5732d5cf44ea..f0f85ec90a72 100644 --- a/hydroflow/tests/compile-fail/surface_demuxenum_port_extra.stderr +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra.stderr @@ -8,20 +8,26 @@ error[E0599]: no variant named `Ellipse` found for enum `Shape` | ^^^^^^^ variant not found in `Shape` error[E0308]: mismatched types - --> tests/compile-fail/surface_demuxenum_port_extra.rs:12:18 + --> tests/compile-fail/surface_demuxenum_port_extra.rs:17:15 | -12 | let mut df = hydroflow_syntax! { - | __________________^ -13 | | my_demux = source_iter([ -14 | | Shape::Rectangle { w: 10.0, h: 8.0 }, -15 | | Shape::Square(9.0), -... | -20 | | my_demux[Square] -> for_each(std::mem::drop); - | | ------------------------ the found opaque type -21 | | my_demux[Ellipse] -> for_each(std::mem::drop); -22 | | }; - | |_____^ expected `()`, found `(impl Pusherator, ())` +17 | ]) -> demux_enum::(); + | ^^^^^^^^^^^^^^^^^^^^^ + | | + | expected a tuple with 3 elements, found one with 4 elements + | arguments to this function are incorrect +18 | my_demux[Rectangle] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types +19 | my_demux[Circle] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types +20 | my_demux[Square] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types +21 | my_demux[Ellipse] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types + | + = note: expected mutable reference `&mut (_, _, _)` + found mutable reference `&mut (impl Pusherator, impl Pusherator, impl Pusherator, impl Pusherator)` +note: method defined here + --> src/util/demux_enum.rs | - = note: expected unit type `()` - found tuple `(impl Pusherator, ())` - = note: this error originates in the macro `$crate::var_expr` which comes from the expansion of the macro `hydroflow_syntax` (in Nightly builds, run with -Z macro-backtrace for more info) + | fn demux_enum(self, outputs: &mut Outputs); + | ^^^^^^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.rs b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.rs new file mode 100644 index 000000000000..8317420f9cb1 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.rs @@ -0,0 +1,14 @@ +use hydroflow::util::demux_enum::DemuxEnum; +use hydroflow::hydroflow_syntax; + +fn main() { + #[derive(DemuxEnum)] + enum Shape { + } + + let mut df = hydroflow_syntax! { + my_demux = source_iter([]) -> demux_enum::(); + my_demux[Square] -> for_each(std::mem::drop); + }; + df.run_available(); +} diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.stderr new file mode 100644 index 000000000000..ae41a60d588c --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_extra_zero.stderr @@ -0,0 +1,37 @@ +error[E0599]: no variant named `Square` found for enum `Shape` + --> tests/compile-fail/surface_demuxenum_port_extra_zero.rs:11:18 + | +6 | enum Shape { + | ---------- variant `Square` not found here +... +11 | my_demux[Square] -> for_each(std::mem::drop); + | ^^^^^^ variant not found in `Shape` + +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_extra_zero.rs:10:52 + | +10 | my_demux = source_iter([]) -> demux_enum::(); + | ^^^^^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. + +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_extra_zero.rs:10:39 + | +10 | my_demux = source_iter([]) -> demux_enum::(); + | ^^^^^^^^^^^^^^^^^^^^^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. + +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_extra_zero.rs:10:39 + | +10 | my_demux = source_iter([]) -> demux_enum::(); + | _______________________________________^ +11 | | my_demux[Square] -> for_each(std::mem::drop); + | |____________________________________________________^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_missing.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing.stderr index 70e3f9585ec2..79452dc99682 100644 --- a/hydroflow/tests/compile-fail/surface_demuxenum_port_missing.stderr +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing.stderr @@ -1,16 +1,20 @@ error[E0308]: mismatched types - --> tests/compile-fail/surface_demuxenum_port_missing.rs:12:18 + --> tests/compile-fail/surface_demuxenum_port_missing.rs:17:15 | -12 | let mut df = hydroflow_syntax! { - | __________________^ -13 | | my_demux = source_iter([ -14 | | Shape::Rectangle { w: 10.0, h: 8.0 }, -15 | | Shape::Square(9.0), -... | -19 | | my_demux[Circle] -> for_each(std::mem::drop); -20 | | }; - | |_____^ expected `(_, ())`, found `()` +17 | ]) -> demux_enum::(); + | ^^^^^^^^^^^^^^^^^^^^^ + | | + | expected a tuple with 3 elements, found one with 2 elements + | arguments to this function are incorrect +18 | my_demux[Rectangle] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types +19 | my_demux[Circle] -> for_each(std::mem::drop); + | ------------------------ one of the found opaque types | - = note: expected tuple `(_, ())` - found unit type `()` - = note: this error originates in the macro `$crate::var_expr` which comes from the expansion of the macro `hydroflow_syntax` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: expected mutable reference `&mut (_, _, _)` + found mutable reference `&mut (impl Pusherator, impl Pusherator)` +note: method defined here + --> src/util/demux_enum.rs + | + | fn demux_enum(self, outputs: &mut Outputs); + | ^^^^^^^^^^ diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.rs b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.rs new file mode 100644 index 000000000000..7cf98339ada3 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.rs @@ -0,0 +1,19 @@ +use hydroflow::util::demux_enum::DemuxEnum; +use hydroflow::hydroflow_syntax; + +fn main() { + #[derive(DemuxEnum)] + enum Shape { + Square(f64), + Rectangle { w: f64, h: f64 }, + } + + let mut df = hydroflow_syntax! { + my_demux = source_iter([ + Shape::Rectangle { w: 10.0, h: 8.0 }, + Shape::Square(9.0), + ]) -> demux_enum::(); + my_demux[Rectangle] -> for_each(std::mem::drop); + }; + df.run_available(); +} diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.stderr new file mode 100644 index 000000000000..1290cb4aa251 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_missing_one.stderr @@ -0,0 +1,28 @@ +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_missing_one.rs:15:28 + | +15 | ]) -> demux_enum::(); + | ^^^^^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. + +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_missing_one.rs:15:15 + | +15 | ]) -> demux_enum::(); + | ^^^^^^^^^^^^^^^^^^^^^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. + +error[E0277]: the trait bound `Shape: SingleVariant` is not satisfied + --> tests/compile-fail/surface_demuxenum_port_missing_one.rs:15:15 + | +15 | ]) -> demux_enum::(); + | _______________^ +16 | | my_demux[Rectangle] -> for_each(std::mem::drop); + | |_______________________________________________________^ the trait `SingleVariant` is not implemented for `Shape` + | + = note: requires that the enum have only one variant. + = note: ensure there are no missing outputs; there must be exactly one output for each enum variant. diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.rs b/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.rs new file mode 100644 index 000000000000..293a4b66a748 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.rs @@ -0,0 +1,17 @@ +use hydroflow::util::demux_enum::DemuxEnum; +use hydroflow::hydroflow_syntax; + +fn main() { + #[derive(DemuxEnum)] + enum Shape { + Square(f64), + } + + let mut df = hydroflow_syntax! { + my_demux = source_iter([ + Shape::Square(9.0), + ]) -> demux_enum::(); + my_demux[Circle] -> for_each(std::mem::drop); + }; + df.run_available(); +} diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.stderr b/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.stderr new file mode 100644 index 000000000000..129d7ed698b5 --- /dev/null +++ b/hydroflow/tests/compile-fail/surface_demuxenum_port_wrong_one.stderr @@ -0,0 +1,8 @@ +error[E0599]: no variant named `Circle` found for enum `Shape` + --> tests/compile-fail/surface_demuxenum_port_wrong_one.rs:14:18 + | +6 | enum Shape { + | ---------- variant `Circle` not found here +... +14 | my_demux[Circle] -> for_each(std::mem::drop); + | ^^^^^^ variant not found in `Shape` diff --git a/hydroflow/tests/compile-fail/surface_demuxenum_wrongenum.rs b/hydroflow/tests/compile-fail/surface_demuxenum_wrongenum.rs index b5e6acffc827..71ef0b72edc5 100644 --- a/hydroflow/tests/compile-fail/surface_demuxenum_wrongenum.rs +++ b/hydroflow/tests/compile-fail/surface_demuxenum_wrongenum.rs @@ -14,11 +14,10 @@ fn main() { Shape::Rectangle { w: 10.0, h: 8.0 }, Shape::Square(9.0), Shape::Circle { r: 5.0 }, - ]) -> demux_enum::