Skip to content

Commit

Permalink
wasi-nn: adapt to new test infrastructure (#7679)
Browse files Browse the repository at this point in the history
* wasi-nn: add test programs

This change adds new test programs for wasi-nn in a way fits in with the
existing WASI test infrastructure. The code is not new, though: this
reuses the wasi-nn `examples`, which are currently used by the
`run-wasi-nn-example.sh` CI script. Eventually the examples will be
removed in favor of these tests.

Because wasi-nn's component model support is still in flight, this
change also skips the generation of components for `nn_`-prefixed tests.

* wasi-nn: add `testing` module

This testing-only module has code (i.e., `check_test!`) to check whether
OpenVINO and some test artifacts are available. The test artifacts are
downloaded and cached if not present, expecting `curl` to be present on
the command line (as discussed in the previous version of this, #6895).

* wasi-nn: run `nn_*` test programs as integration tests

Following the pattern of other WASI crates, this change adds the
necessary infrastructure to run the `nn_*` files in
`crates/test-programs` (built by `test-program-artifacts`). These tests
are only run when two sets of conditions are true:
- statically: we only run these tests where we expect OpenVINO to be
  easy to install and run (e.g., the `cfg_attr` parts)
- dynamically: we also only run these tests when the OpenVINO libraries
  can be located and the model artifacts can be downloaded

* ci: install OpenVINO for running wasi-nn tests

prtest:full

* vet: certify the `wasi-nn` crate

* ci: remove wasi-nn test script
  • Loading branch information
abrown authored Dec 15, 2023
1 parent 81e383f commit 54d3727
Show file tree
Hide file tree
Showing 13 changed files with 345 additions and 98 deletions.
31 changes: 4 additions & 27 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,10 @@ jobs:
- run: echo CARGO_BUILD_TARGET=${{ matrix.target }} >> $GITHUB_ENV
if: matrix.target != ''

# Install OpenVINO for testing wasmtime-wasi-nn.
- uses: abrown/install-openvino-action@v8
if: runner.arch == 'X64'

# Fix an ICE for now in gcc when compiling zstd with debuginfo (??)
- run: echo CFLAGS=-g0 >> $GITHUB_ENV
if: matrix.target == 'x86_64-pc-windows-gnu'
Expand Down Expand Up @@ -522,32 +526,6 @@ jobs:
# Windows fails GitHub Actions will confusingly mark the failed Windows job
# as cancelled instead of failed.

# Build and test the wasi-nn module.
test_wasi_nn:
needs: determine
if: needs.determine.outputs.run-full
name: Test wasi-nn module
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/actions/install-rust
- run: rustup target add wasm32-wasi
- uses: abrown/install-openvino-action@v7
with:
version: 2022.3.0
apt: true
- run: ./ci/run-wasi-nn-example.sh
env:
RUST_BACKTRACE: 1

# common logic to cancel the entire run if this job fails
- run: gh run cancel ${{ github.run_id }}
if: failure() && github.event_name != 'pull_request'
env:
GH_TOKEN: ${{ github.token }}

build-preview1-component-adapter:
name: Build wasi-preview1-component-adapter
needs: determine
Expand Down Expand Up @@ -775,7 +753,6 @@ jobs:
- checks
- checks_winarm64
- fuzz_targets
- test_wasi_nn
- bench
- meta_deterministic_check
- verify-publish
Expand Down
13 changes: 13 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

57 changes: 0 additions & 57 deletions ci/run-wasi-nn-example.sh

This file was deleted.

1 change: 1 addition & 0 deletions crates/test-programs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
wasi = "0.11.0"
wasi-nn = "0.6.0"
wit-bindgen = { workspace = true, features = ['default'] }
libc = { workspace = true }
getrandom = "0.2.9"
Expand Down
21 changes: 13 additions & 8 deletions crates/test-programs/artifacts/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,6 @@ fn build_and_generate_tests() {

generated_code += &format!("pub const {camel}: &'static str = {wasm:?};\n");

let adapter = match target.as_str() {
"reactor" => &reactor_adapter,
s if s.starts_with("api_proxy") => &proxy_adapter,
_ => &command_adapter,
};
let path = compile_component(&wasm, adapter);
generated_code += &format!("pub const {camel}_COMPONENT: &'static str = {path:?};\n");

// Bucket, based on the name of the test, into a "kind" which generates
// a `foreach_*` macro below.
let kind = match target.as_str() {
Expand All @@ -81,6 +73,7 @@ fn build_and_generate_tests() {
s if s.starts_with("preview2_") => "preview2",
s if s.starts_with("cli_") => "cli",
s if s.starts_with("api_") => "api",
s if s.starts_with("nn_") => "nn",
// If you're reading this because you hit this panic, either add it
// to a test suite above or add a new "suite". The purpose of the
// categorization above is to have a static assertion that tests
Expand All @@ -93,6 +86,18 @@ fn build_and_generate_tests() {
if !kind.is_empty() {
kinds.entry(kind).or_insert(Vec::new()).push(target);
}

// Generate a component from each test.
if kind == "nn" {
continue;
}
let adapter = match target.as_str() {
"reactor" => &reactor_adapter,
s if s.starts_with("api_proxy") => &proxy_adapter,
_ => &command_adapter,
};
let path = compile_component(&wasm, adapter);
generated_code += &format!("pub const {camel}_COMPONENT: &'static str = {path:?};\n");
}

for (kind, targets) in kinds {
Expand Down
59 changes: 59 additions & 0 deletions crates/test-programs/src/bin/nn_image_classification.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
use anyhow::Result;
use std::fs;
use wasi_nn::*;

pub fn main() -> Result<()> {
let xml = fs::read_to_string("fixture/model.xml").unwrap();
println!("Read graph XML, first 50 characters: {}", &xml[..50]);

let weights = fs::read("fixture/model.bin").unwrap();
println!("Read graph weights, size in bytes: {}", weights.len());

let graph = GraphBuilder::new(GraphEncoding::Openvino, ExecutionTarget::CPU)
.build_from_bytes([&xml.into_bytes(), &weights])?;
println!("Loaded graph into wasi-nn with ID: {}", graph);

let mut context = graph.init_execution_context()?;
println!("Created wasi-nn execution context with ID: {}", context);

// Load a tensor that precisely matches the graph input tensor (see
// `fixture/frozen_inference_graph.xml`).
let data = fs::read("fixture/tensor.bgr").unwrap();
println!("Read input tensor, size in bytes: {}", data.len());
context.set_input(0, wasi_nn::TensorType::F32, &[1, 3, 224, 224], &data)?;

// Execute the inference.
context.compute()?;
println!("Executed graph inference");

// Retrieve the output.
let mut output_buffer = vec![0f32; 1001];
context.get_output(0, &mut output_buffer[..])?;
println!(
"Found results, sorted top 5: {:?}",
&sort_results(&output_buffer)[..5]
);

Ok(())
}

// Sort the buffer of probabilities. The graph places the match probability for
// each class at the index for that class (e.g. the probability of class 42 is
// placed at buffer[42]). Here we convert to a wrapping InferenceResult and sort
// the results. It is unclear why the MobileNet output indices are "off by one"
// but the `.skip(1)` below seems necessary to get results that make sense (e.g.
// 763 = "revolver" vs 762 = "restaurant").
fn sort_results(buffer: &[f32]) -> Vec<InferenceResult> {
let mut results: Vec<InferenceResult> = buffer
.iter()
.skip(1)
.enumerate()
.map(|(c, p)| InferenceResult(c, *p))
.collect();
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
results
}

// A wrapper for class ID and match probabilities.
#[derive(Debug, PartialEq)]
struct InferenceResult(usize, f32);
53 changes: 53 additions & 0 deletions crates/test-programs/src/bin/nn_image_classification_named.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
use anyhow::Result;
use std::fs;
use wasi_nn::*;

pub fn main() -> Result<()> {
let graph = GraphBuilder::new(GraphEncoding::Openvino, ExecutionTarget::CPU)
.build_from_cache("mobilenet")?;
println!("Loaded a graph: {:?}", graph);

let mut context = graph.init_execution_context()?;
println!("Created an execution context: {:?}", context);

// Load a tensor that precisely matches the graph input tensor (see
// `fixture/frozen_inference_graph.xml`).
let tensor_data = fs::read("fixture/tensor.bgr")?;
println!("Read input tensor, size in bytes: {}", tensor_data.len());
context.set_input(0, TensorType::F32, &[1, 3, 224, 224], &tensor_data)?;

// Execute the inference.
context.compute()?;
println!("Executed graph inference");

// Retrieve the output.
let mut output_buffer = vec![0f32; 1001];
context.get_output(0, &mut output_buffer[..])?;

println!(
"Found results, sorted top 5: {:?}",
&sort_results(&output_buffer)[..5]
);
Ok(())
}

// Sort the buffer of probabilities. The graph places the match probability for
// each class at the index for that class (e.g. the probability of class 42 is
// placed at buffer[42]). Here we convert to a wrapping InferenceResult and sort
// the results. It is unclear why the MobileNet output indices are "off by one"
// but the `.skip(1)` below seems necessary to get results that make sense (e.g.
// 763 = "revolver" vs 762 = "restaurant").
fn sort_results(buffer: &[f32]) -> Vec<InferenceResult> {
let mut results: Vec<InferenceResult> = buffer
.iter()
.skip(1)
.enumerate()
.map(|(c, p)| InferenceResult(c, *p))
.collect();
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
results
}

// A wrapper for class ID and match probabilities.
#[derive(Debug, PartialEq)]
struct InferenceResult(usize, f32);
6 changes: 6 additions & 0 deletions crates/wasi-nn/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,9 @@ thiserror = { workspace = true }

[build-dependencies]
walkdir = { workspace = true }

[dev-dependencies]
cap-std = { workspace = true }
test-programs-artifacts = { workspace = true }
wasmtime-wasi = { workspace = true, features = ["sync"] }
wasmtime = { workspace = true, features = ["cranelift"] }
6 changes: 0 additions & 6 deletions crates/wasi-nn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,3 @@ An end-to-end example demonstrating ML classification is included in [examples]:
`examples/classification-example` contains a standalone Rust project that uses
the [wasi-nn] APIs and is compiled to the `wasm32-wasi` target using the
high-level `wasi-nn` [bindings].

Run the example from the Wasmtime project directory:

```sh
$ ci/run-wasi-nn-example.sh
```
1 change: 1 addition & 0 deletions crates/wasi-nn/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ mod registry;
pub mod backend;
pub use ctx::{preload, WasiNnCtx};
pub use registry::{GraphRegistry, InMemoryRegistry};
pub mod testing;
pub mod wit;
pub mod witx;

Expand Down
Loading

0 comments on commit 54d3727

Please sign in to comment.