Try out `juice` crate; ignore `__pycache__`

This commit is contained in:
Daniil Fajnberg 2023-08-02 19:58:57 +02:00
parent fad8640bbe
commit a5dec86fff
Signed by: daniil-berg
GPG Key ID: BE187C50903BEE97
5 changed files with 169 additions and 0 deletions

2
.gitignore vendored
View File

@ -1,5 +1,7 @@
# Python virtual environment:
.venv/
# Python cache:
__pycache__/
# IDE project settings:
.vscode/
.idea/

16
rs_juice/Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
name = "feed_forward_juice"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "feed_forward"
path = "src/feed_forward.rs"
[dependencies]
juice = { version = "0.3", features = ["cuda"] }
coaster = { version = "0.2.0" }
coaster-blas = { version = "0.4.0" }
coaster-nn = { version = "0.5.0" }
csv = "*"
glob = "*"

69
rs_juice/src/drafts.rs Normal file
View File

@ -0,0 +1,69 @@
// use std::iter::repeat;
use coaster::backend::Backend;
use coaster::IFramework;
use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
use coaster::frameworks::native::{Cpu, Native};
use coaster::frameworks::native::flatbox::FlatBox;
use coaster::tensor::SharedTensor;
use coaster_nn::Sigmoid;
use coaster_blas::plugin::{Asum, Dot};
pub fn write_to_memory<T: Copy>(mem: &mut FlatBox, data: &[T]) {
let mem_buffer: &mut[T] = mem.as_mut_slice::<T>();
for (index, datum) in data.iter().enumerate() {
mem_buffer[index] = *datum;
}
}
pub fn test_layer() {
let backend: Backend<Cuda> = get_cuda_backend();
let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(3, 1));
let mut w: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 3));
let mut b: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
let mut result1: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
let mut result2: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
// let x_values: &Vec<f32> = &repeat(0f32).take(x.capacity()).collect::<Vec<f32>>();
let x_values: &Vec<f32> = &vec![1f32, -2.0, 0.2];
let w_values: &Vec<f32> = &vec![1f32, -0.5, 0.0,
3.0, 2.0, -5.0];
let b_values: &Vec<f32> = &vec![-1f32,
2.0];
println!("x = {:?}", x_values);
println!("w = [{:?},\n {:?}]", &w_values[..3], &w_values[3..]);
println!("b = {:?}", b_values);
println!("w*x+b = [1.0, 0.0]");
let native: Native = Native::new();
let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
write_to_memory(x.write_only(&cpu).unwrap(), x_values);
write_to_memory(w.write_only(&cpu).unwrap(), w_values);
write_to_memory(b.write_only(&cpu).unwrap(), b_values);
println!("Computing layer...");
backend.dot(&w, &x, &mut result1).unwrap();
println!("x = {:?}", x.read(&cpu).unwrap().as_slice::<f32>());
println!("w = {:?}", w.read(&cpu).unwrap().as_slice::<f32>());
println!("w*x = {:?}", result1.read(&cpu).unwrap().as_slice::<f32>());
// backend.sigmoid(&result1, &mut result2).unwrap();
// println!("y = {:?}", result2.read(&cpu).unwrap().as_slice::<f32>());
}
pub fn test_example() {
let backend: Backend<Cuda> = get_cuda_backend();
// Initialize two SharedTensors.
let mut x = SharedTensor::<f32>::new(&(1, 1, 3));
let mut result = SharedTensor::<f32>::new(&(1, 1, 3));
// Fill `x` with some data.
let payload: &[f32] = &::std::iter::repeat(1f32).take(x.capacity()).collect::<Vec<f32>>();
let native = Native::new();
let cpu = native.new_device(native.hardwares()).unwrap();
write_to_memory(x.write_only(&cpu).unwrap(), payload); // Write to native host memory.
// Run the sigmoid operation, provided by the NN Plugin, on your CUDA enabled GPU.
backend.sigmoid(&mut x, &mut result).unwrap();
// See the result.
println!("{:?}", result.read(&cpu).unwrap().as_slice::<f32>());
}

View File

@ -0,0 +1,39 @@
#![allow(dead_code)]
// use std::rc::Rc;
// use std::sync::{Arc, RwLock};
//
// use coaster::backend::Backend;
// use coaster::IFramework;
// use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
// use coaster::frameworks::native::{Cpu, Native};
// use coaster::tensor::SharedTensor;
// use juice::layer::{LayerConfig, Layer, ComputeOutput, ILayer};
// use juice::layers::{LinearConfig, SequentialConfig, Sequential};
// use juice::util::ArcLock;
// mod drafts;
// use drafts::write_to_memory;
mod simple;
fn main() {
simple::test_simple();
// let backend: Rc<Backend<Cuda>> = Rc::new(get_cuda_backend());
// let native: Native = Native::new();
// let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
//
// let mut net_cfg: SequentialConfig = SequentialConfig::default();
// net_cfg.add_input("data", &vec![1, 3, 1]);
// net_cfg.add_layer(
// LayerConfig::new("linear1", LinearConfig { output_size: 2 })
// );
// let mut net = Sequential::from_config(backend.clone(), &net_cfg);
//
// let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(3, 1));
// let x_values: &Vec<f32> = &vec![1f32, -2.0, 0.2];
// write_to_memory(x.write_only(&cpu).unwrap(), x_values);
// let x_lock: ArcLock<SharedTensor<f32>> = Arc::new(RwLock::new(x));
//
// net.forward(&backend, &[x_lock],)
}

43
rs_juice/src/simple.rs Normal file
View File

@ -0,0 +1,43 @@
use coaster::backend::Backend;
use coaster::IFramework;
use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
use coaster::frameworks::native::{Cpu, Native};
use coaster::frameworks::native::flatbox::FlatBox;
use coaster::tensor::SharedTensor;
use coaster_blas::plugin::Dot;
pub fn write_to_memory<T: Copy>(mem: &mut FlatBox, data: &[T]) {
let mem_buffer: &mut[T] = mem.as_mut_slice::<T>();
for (index, datum) in data.iter().enumerate() {
mem_buffer[index] = *datum;
}
}
pub fn test_simple() {
let backend: Backend<Cuda> = get_cuda_backend();
let native: Native = Native::new();
let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
let mut w: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 2));
let mut wx: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
let x_values: &Vec<f32> = &vec![1f32, 2.0];
let w_values: &Vec<f32> = &vec![1f32, 1.0,
3.0, -1.0];
println!("Data:");
println!("x = {:?}", x_values);
println!("w = [{:?},\n {:?}]", &w_values[..2], &w_values[2..]);
println!("Expected result:");
println!("w*x = [3.0, 1.0]");
write_to_memory(x.write_only(&cpu).unwrap(), x_values);
write_to_memory(w.write_only(&cpu).unwrap(), w_values);
backend.dot(&w, &x, &mut wx).unwrap();
println!("Actual result:");
println!("w*x = {:?}", wx.read(&cpu).unwrap().as_slice::<f32>());
}