1 unstable release
0.1.0 | Dec 28, 2023 |
---|
#14 in #burn
6KB
bevy_burn
bevy burn async compute nodes. write compute shaders in burn with wgpu input and output buffers shared with bevy's render pipeline.
usage
use bevy::prelude::*;
use bevy_burn::{
BurnInference,
BurnModel,
BurnPlugin,
};
fn main() {
App::build()
.add_plugins(DefaultPlugins)
.add_plugin(BurnPlugin)
.add_system(burn_inference)
.run();
}
fn burn_inference(
mut commands: Commands,
burn_inference: Res<BurnInference>,
input_data: Query<(
Entity,
&SomeInput,
Without<BurnOutput>,
)>,
mut model: Local<BurnModel>,
) {
if model.is_none() {
*model = burn_inference.model("model.onnx").unwrap();
}
for (entity, input) in input_data.iter() {
let output = model.inference(input).unwrap();
commands.entity(entity).insert(output);
}
}
Dependencies
~48–83MB
~1.5M SLoC