1 unstable release

0.1.0 Dec 28, 2023

#282 in Rendering

MIT license

6KB

bevy_burn

test GitHub License GitHub Last Commit GitHub Releases GitHub Issues Average time to resolve an issue crates.io

bevy burn async compute nodes. write compute shaders in burn with wgpu input and output buffers shared with bevy's render pipeline.

usage

use bevy::prelude::*;
use bevy_burn::{
    BurnInference,
    BurnModel,
    BurnPlugin,
};


fn main() {
    App::build()
        .add_plugins(DefaultPlugins)
        .add_plugin(BurnPlugin)
        .add_system(burn_inference)
        .run();
}

fn burn_inference(
    mut commands: Commands,
    burn_inference: Res<BurnInference>,
    input_data: Query<(
        Entity,
        &SomeInput,
        Without<BurnOutput>,
    )>,
    mut model: Local<BurnModel>,
) {
    if model.is_none() {
        *model = burn_inference.model("model.onnx").unwrap();
    }

    for (entity, input) in input_data.iter() {
        let output = model.inference(input).unwrap();

        commands.entity(entity).insert(output);
    }
}

Dependencies

~51–87MB
~1.5M SLoC