bevy burn async compute nodes. write compute shaders in burn with wgpu input and output buffers shared with bevy's render pipeline.
use bevy::prelude::*;
use bevy_burn::{
BurnInference,
BurnModel,
BurnPlugin,
};
fn main() {
App::build()
.add_plugins(DefaultPlugins)
.add_plugin(BurnPlugin)
.add_system(Startup, setup)
.add_system(Update, burn_inference)
.run();
}
fn setup(
mut commands: Commands,
burn_models: Res<Assets<BurnModel>>,
) {
let model = burn_models.load("model.onnx");
let input = SomeInput::default();
commands.spawn().insert(input).insert(model);
}
fn burn_inference(
mut commands: Commands,
burn_inference: Res<BurnInference>,
burn_models: Res<Assets<BurnModel>>,
input_data: Query<(
Entity,
&SomeInput,
&Handle<BurnModel>,
Without<BurnOutput>,
)>,
) {
for (entity, model_handle, input, _) in input_data.iter() {
if let Some(model) = burn_models.get(model_handle) {
let output = model.inference(input).unwrap();
commands.entity(entity).insert(output);
}
}
}