Try out juice
crate; ignore __pycache__
This commit is contained in:
69
rs_juice/src/drafts.rs
Normal file
69
rs_juice/src/drafts.rs
Normal file
@ -0,0 +1,69 @@
|
||||
// use std::iter::repeat;
|
||||
|
||||
use coaster::backend::Backend;
|
||||
use coaster::IFramework;
|
||||
use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
|
||||
use coaster::frameworks::native::{Cpu, Native};
|
||||
use coaster::frameworks::native::flatbox::FlatBox;
|
||||
use coaster::tensor::SharedTensor;
|
||||
use coaster_nn::Sigmoid;
|
||||
use coaster_blas::plugin::{Asum, Dot};
|
||||
|
||||
|
||||
pub fn write_to_memory<T: Copy>(mem: &mut FlatBox, data: &[T]) {
|
||||
let mem_buffer: &mut[T] = mem.as_mut_slice::<T>();
|
||||
for (index, datum) in data.iter().enumerate() {
|
||||
mem_buffer[index] = *datum;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn test_layer() {
|
||||
let backend: Backend<Cuda> = get_cuda_backend();
|
||||
let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(3, 1));
|
||||
let mut w: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 3));
|
||||
let mut b: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||
let mut result1: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||
let mut result2: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||
|
||||
// let x_values: &Vec<f32> = &repeat(0f32).take(x.capacity()).collect::<Vec<f32>>();
|
||||
let x_values: &Vec<f32> = &vec![1f32, -2.0, 0.2];
|
||||
let w_values: &Vec<f32> = &vec![1f32, -0.5, 0.0,
|
||||
3.0, 2.0, -5.0];
|
||||
let b_values: &Vec<f32> = &vec![-1f32,
|
||||
2.0];
|
||||
println!("x = {:?}", x_values);
|
||||
println!("w = [{:?},\n {:?}]", &w_values[..3], &w_values[3..]);
|
||||
println!("b = {:?}", b_values);
|
||||
println!("w*x+b = [1.0, 0.0]");
|
||||
|
||||
let native: Native = Native::new();
|
||||
let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
|
||||
write_to_memory(x.write_only(&cpu).unwrap(), x_values);
|
||||
write_to_memory(w.write_only(&cpu).unwrap(), w_values);
|
||||
write_to_memory(b.write_only(&cpu).unwrap(), b_values);
|
||||
|
||||
println!("Computing layer...");
|
||||
backend.dot(&w, &x, &mut result1).unwrap();
|
||||
println!("x = {:?}", x.read(&cpu).unwrap().as_slice::<f32>());
|
||||
println!("w = {:?}", w.read(&cpu).unwrap().as_slice::<f32>());
|
||||
println!("w*x = {:?}", result1.read(&cpu).unwrap().as_slice::<f32>());
|
||||
// backend.sigmoid(&result1, &mut result2).unwrap();
|
||||
// println!("y = {:?}", result2.read(&cpu).unwrap().as_slice::<f32>());
|
||||
}
|
||||
|
||||
pub fn test_example() {
|
||||
let backend: Backend<Cuda> = get_cuda_backend();
|
||||
// Initialize two SharedTensors.
|
||||
let mut x = SharedTensor::<f32>::new(&(1, 1, 3));
|
||||
let mut result = SharedTensor::<f32>::new(&(1, 1, 3));
|
||||
// Fill `x` with some data.
|
||||
let payload: &[f32] = &::std::iter::repeat(1f32).take(x.capacity()).collect::<Vec<f32>>();
|
||||
let native = Native::new();
|
||||
let cpu = native.new_device(native.hardwares()).unwrap();
|
||||
write_to_memory(x.write_only(&cpu).unwrap(), payload); // Write to native host memory.
|
||||
// Run the sigmoid operation, provided by the NN Plugin, on your CUDA enabled GPU.
|
||||
backend.sigmoid(&mut x, &mut result).unwrap();
|
||||
// See the result.
|
||||
println!("{:?}", result.read(&cpu).unwrap().as_slice::<f32>());
|
||||
}
|
Reference in New Issue
Block a user