diff --git a/.gitignore b/.gitignore index 6626a9a..2d487b3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ # Python virtual environment: .venv/ +# Python cache: +__pycache__/ # IDE project settings: .vscode/ .idea/ diff --git a/rs_juice/Cargo.toml b/rs_juice/Cargo.toml new file mode 100644 index 0000000..f11a407 --- /dev/null +++ b/rs_juice/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "feed_forward_juice" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "feed_forward" +path = "src/feed_forward.rs" + +[dependencies] +juice = { version = "0.3", features = ["cuda"] } +coaster = { version = "0.2.0" } +coaster-blas = { version = "0.4.0" } +coaster-nn = { version = "0.5.0" } +csv = "*" +glob = "*" diff --git a/rs_juice/src/drafts.rs b/rs_juice/src/drafts.rs new file mode 100644 index 0000000..1022a57 --- /dev/null +++ b/rs_juice/src/drafts.rs @@ -0,0 +1,69 @@ +// use std::iter::repeat; + +use coaster::backend::Backend; +use coaster::IFramework; +use coaster::frameworks::cuda::{Cuda, get_cuda_backend}; +use coaster::frameworks::native::{Cpu, Native}; +use coaster::frameworks::native::flatbox::FlatBox; +use coaster::tensor::SharedTensor; +use coaster_nn::Sigmoid; +use coaster_blas::plugin::{Asum, Dot}; + + +pub fn write_to_memory(mem: &mut FlatBox, data: &[T]) { + let mem_buffer: &mut[T] = mem.as_mut_slice::(); + for (index, datum) in data.iter().enumerate() { + mem_buffer[index] = *datum; + } +} + + +pub fn test_layer() { + let backend: Backend = get_cuda_backend(); + let mut x: SharedTensor = SharedTensor::::new(&(3, 1)); + let mut w: SharedTensor = SharedTensor::::new(&(2, 3)); + let mut b: SharedTensor = SharedTensor::::new(&(2, 1)); + let mut result1: SharedTensor = SharedTensor::::new(&(2, 1)); + let mut result2: SharedTensor = SharedTensor::::new(&(2, 1)); + + // let x_values: &Vec = &repeat(0f32).take(x.capacity()).collect::>(); + let x_values: &Vec = &vec![1f32, -2.0, 0.2]; + let w_values: &Vec = &vec![1f32, -0.5, 0.0, + 3.0, 2.0, -5.0]; + let b_values: &Vec = &vec![-1f32, + 2.0]; + println!("x = {:?}", x_values); + println!("w = [{:?},\n {:?}]", &w_values[..3], &w_values[3..]); + println!("b = {:?}", b_values); + println!("w*x+b = [1.0, 0.0]"); + + let native: Native = Native::new(); + let cpu: Cpu = native.new_device(native.hardwares()).unwrap(); + write_to_memory(x.write_only(&cpu).unwrap(), x_values); + write_to_memory(w.write_only(&cpu).unwrap(), w_values); + write_to_memory(b.write_only(&cpu).unwrap(), b_values); + + println!("Computing layer..."); + backend.dot(&w, &x, &mut result1).unwrap(); + println!("x = {:?}", x.read(&cpu).unwrap().as_slice::()); + println!("w = {:?}", w.read(&cpu).unwrap().as_slice::()); + println!("w*x = {:?}", result1.read(&cpu).unwrap().as_slice::()); + // backend.sigmoid(&result1, &mut result2).unwrap(); + // println!("y = {:?}", result2.read(&cpu).unwrap().as_slice::()); +} + +pub fn test_example() { + let backend: Backend = get_cuda_backend(); + // Initialize two SharedTensors. + let mut x = SharedTensor::::new(&(1, 1, 3)); + let mut result = SharedTensor::::new(&(1, 1, 3)); + // Fill `x` with some data. + let payload: &[f32] = &::std::iter::repeat(1f32).take(x.capacity()).collect::>(); + let native = Native::new(); + let cpu = native.new_device(native.hardwares()).unwrap(); + write_to_memory(x.write_only(&cpu).unwrap(), payload); // Write to native host memory. + // Run the sigmoid operation, provided by the NN Plugin, on your CUDA enabled GPU. + backend.sigmoid(&mut x, &mut result).unwrap(); + // See the result. + println!("{:?}", result.read(&cpu).unwrap().as_slice::()); +} diff --git a/rs_juice/src/feed_forward.rs b/rs_juice/src/feed_forward.rs new file mode 100644 index 0000000..ae8f445 --- /dev/null +++ b/rs_juice/src/feed_forward.rs @@ -0,0 +1,39 @@ +#![allow(dead_code)] + +// use std::rc::Rc; +// use std::sync::{Arc, RwLock}; +// +// use coaster::backend::Backend; +// use coaster::IFramework; +// use coaster::frameworks::cuda::{Cuda, get_cuda_backend}; +// use coaster::frameworks::native::{Cpu, Native}; +// use coaster::tensor::SharedTensor; +// use juice::layer::{LayerConfig, Layer, ComputeOutput, ILayer}; +// use juice::layers::{LinearConfig, SequentialConfig, Sequential}; +// use juice::util::ArcLock; + +// mod drafts; +// use drafts::write_to_memory; +mod simple; + + +fn main() { + simple::test_simple(); + // let backend: Rc> = Rc::new(get_cuda_backend()); + // let native: Native = Native::new(); + // let cpu: Cpu = native.new_device(native.hardwares()).unwrap(); + // + // let mut net_cfg: SequentialConfig = SequentialConfig::default(); + // net_cfg.add_input("data", &vec![1, 3, 1]); + // net_cfg.add_layer( + // LayerConfig::new("linear1", LinearConfig { output_size: 2 }) + // ); + // let mut net = Sequential::from_config(backend.clone(), &net_cfg); + // + // let mut x: SharedTensor = SharedTensor::::new(&(3, 1)); + // let x_values: &Vec = &vec![1f32, -2.0, 0.2]; + // write_to_memory(x.write_only(&cpu).unwrap(), x_values); + // let x_lock: ArcLock> = Arc::new(RwLock::new(x)); + // + // net.forward(&backend, &[x_lock],) +} diff --git a/rs_juice/src/simple.rs b/rs_juice/src/simple.rs new file mode 100644 index 0000000..a59e0b1 --- /dev/null +++ b/rs_juice/src/simple.rs @@ -0,0 +1,43 @@ +use coaster::backend::Backend; +use coaster::IFramework; +use coaster::frameworks::cuda::{Cuda, get_cuda_backend}; +use coaster::frameworks::native::{Cpu, Native}; +use coaster::frameworks::native::flatbox::FlatBox; +use coaster::tensor::SharedTensor; +use coaster_blas::plugin::Dot; + + +pub fn write_to_memory(mem: &mut FlatBox, data: &[T]) { + let mem_buffer: &mut[T] = mem.as_mut_slice::(); + for (index, datum) in data.iter().enumerate() { + mem_buffer[index] = *datum; + } +} + + +pub fn test_simple() { + let backend: Backend = get_cuda_backend(); + let native: Native = Native::new(); + let cpu: Cpu = native.new_device(native.hardwares()).unwrap(); + + let mut x: SharedTensor = SharedTensor::::new(&(2, 1)); + let mut w: SharedTensor = SharedTensor::::new(&(2, 2)); + let mut wx: SharedTensor = SharedTensor::::new(&(2, 1)); + + let x_values: &Vec = &vec![1f32, 2.0]; + let w_values: &Vec = &vec![1f32, 1.0, + 3.0, -1.0]; + + println!("Data:"); + println!("x = {:?}", x_values); + println!("w = [{:?},\n {:?}]", &w_values[..2], &w_values[2..]); + println!("Expected result:"); + println!("w*x = [3.0, 1.0]"); + + write_to_memory(x.write_only(&cpu).unwrap(), x_values); + write_to_memory(w.write_only(&cpu).unwrap(), w_values); + + backend.dot(&w, &x, &mut wx).unwrap(); + println!("Actual result:"); + println!("w*x = {:?}", wx.read(&cpu).unwrap().as_slice::()); +} \ No newline at end of file