Compare commits
2 Commits
fa37c40f0b
...
a5dec86fff
Author | SHA1 | Date | |
---|---|---|---|
a5dec86fff | |||
fad8640bbe |
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,5 +1,7 @@
|
|||||||
# Python virtual environment:
|
# Python virtual environment:
|
||||||
.venv/
|
.venv/
|
||||||
|
# Python cache:
|
||||||
|
__pycache__/
|
||||||
# IDE project settings:
|
# IDE project settings:
|
||||||
.vscode/
|
.vscode/
|
||||||
.idea/
|
.idea/
|
||||||
|
@ -11,6 +11,8 @@ from tensorflow.python.ops.gen_math_ops import mat_mul, sigmoid
|
|||||||
from tensorflow.python.ops.gen_nn_ops import bias_add
|
from tensorflow.python.ops.gen_nn_ops import bias_add
|
||||||
from tensorflow.python.ops.resource_variable_ops import ResourceVariable
|
from tensorflow.python.ops.resource_variable_ops import ResourceVariable
|
||||||
from keras.engine.keras_tensor import KerasTensor
|
from keras.engine.keras_tensor import KerasTensor
|
||||||
|
from keras.api._v2.keras.callbacks import Callback
|
||||||
|
from keras.api._v2.keras.constraints import Constraint
|
||||||
from keras.api._v2.keras.layers import Dense, Input
|
from keras.api._v2.keras.layers import Dense, Input
|
||||||
from keras.api._v2.keras.models import Model
|
from keras.api._v2.keras.models import Model
|
||||||
|
|
||||||
@ -41,7 +43,16 @@ def simple_layer_test() -> None:
|
|||||||
b_data = np.array([-1.0, 2.0], dtype=F32)
|
b_data = np.array([-1.0, 2.0], dtype=F32)
|
||||||
w_init = partial(init_params, w_data.T)
|
w_init = partial(init_params, w_data.T)
|
||||||
b_init = partial(init_params, b_data.T)
|
b_init = partial(init_params, b_data.T)
|
||||||
layer = Dense(units=2, activation='sigmoid', kernel_initializer=w_init, bias_initializer=b_init)(inputs)
|
|
||||||
|
class Const(Constraint):
|
||||||
|
def __init__(self, zero_mask: np.ndarray) -> None:
|
||||||
|
self.mask = zero_mask
|
||||||
|
|
||||||
|
def __call__(self, weights: ResourceVariable) -> ResourceVariable:
|
||||||
|
weights.assign(weights - self.mask * weights)
|
||||||
|
return weights
|
||||||
|
|
||||||
|
layer = Dense(units=2, activation='sigmoid', kernel_initializer=w_init, bias_initializer=b_init, kernel_constraint=Const(w_data.T == 0))(inputs)
|
||||||
assert isinstance(layer, KerasTensor)
|
assert isinstance(layer, KerasTensor)
|
||||||
model = Model(inputs=inputs, outputs=layer)
|
model = Model(inputs=inputs, outputs=layer)
|
||||||
w_tensor = model.trainable_variables[0]
|
w_tensor = model.trainable_variables[0]
|
||||||
@ -50,13 +61,21 @@ def simple_layer_test() -> None:
|
|||||||
assert isinstance(b_tensor, ResourceVariable)
|
assert isinstance(b_tensor, ResourceVariable)
|
||||||
assert np.equal(w_tensor.numpy().T, w_data).all()
|
assert np.equal(w_tensor.numpy().T, w_data).all()
|
||||||
assert np.equal(b_tensor.numpy().T, b_data).all()
|
assert np.equal(b_tensor.numpy().T, b_data).all()
|
||||||
model.compile()
|
model.compile(optimizer='adam', loss='categorical_crossentropy')
|
||||||
x = np.array([[1.0, -2.0, 0.2]], dtype=F32)
|
x = np.array([[1.0, -2.0, 0.2]], dtype=F32)
|
||||||
print("input", x[0])
|
print("input", x[0])
|
||||||
y = model(x)
|
y = model(x)
|
||||||
assert isinstance(y, tf.Tensor)
|
assert isinstance(y, tf.Tensor)
|
||||||
print("output", np.array(y)[0])
|
print("output", np.array(y)[0])
|
||||||
assert y[0][1] == 0.5
|
assert y[0][1] == 0.5
|
||||||
|
samples = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]], dtype=F32)
|
||||||
|
labels = np.array([[0., 1.], [0., 2.], [3., 0.]], dtype=F32)
|
||||||
|
|
||||||
|
class CB(Callback):
|
||||||
|
def on_train_batch_begin(self, batch, logs=None):
|
||||||
|
print(f"...start of batch {batch}; model weights:")
|
||||||
|
print(self.model.trainable_variables[0].numpy())
|
||||||
|
model.fit(samples, labels, batch_size=1, callbacks=[CB()], verbose=0)
|
||||||
|
|
||||||
|
|
||||||
def build_model(input_shape: Sequence[int], *layers: tuple[np.ndarray, np.ndarray]) -> Model:
|
def build_model(input_shape: Sequence[int], *layers: tuple[np.ndarray, np.ndarray]) -> Model:
|
||||||
@ -127,5 +146,5 @@ def main(n: int) -> None:
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# simple_layer_test()
|
simple_layer_test()
|
||||||
main(int(sys.argv[1]))
|
# main(int(sys.argv[1]))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "feed_forward"
|
name = "feed_forward_ndarray"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
16
rs_juice/Cargo.toml
Normal file
16
rs_juice/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "feed_forward_juice"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "feed_forward"
|
||||||
|
path = "src/feed_forward.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
juice = { version = "0.3", features = ["cuda"] }
|
||||||
|
coaster = { version = "0.2.0" }
|
||||||
|
coaster-blas = { version = "0.4.0" }
|
||||||
|
coaster-nn = { version = "0.5.0" }
|
||||||
|
csv = "*"
|
||||||
|
glob = "*"
|
69
rs_juice/src/drafts.rs
Normal file
69
rs_juice/src/drafts.rs
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// use std::iter::repeat;
|
||||||
|
|
||||||
|
use coaster::backend::Backend;
|
||||||
|
use coaster::IFramework;
|
||||||
|
use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
|
||||||
|
use coaster::frameworks::native::{Cpu, Native};
|
||||||
|
use coaster::frameworks::native::flatbox::FlatBox;
|
||||||
|
use coaster::tensor::SharedTensor;
|
||||||
|
use coaster_nn::Sigmoid;
|
||||||
|
use coaster_blas::plugin::{Asum, Dot};
|
||||||
|
|
||||||
|
|
||||||
|
pub fn write_to_memory<T: Copy>(mem: &mut FlatBox, data: &[T]) {
|
||||||
|
let mem_buffer: &mut[T] = mem.as_mut_slice::<T>();
|
||||||
|
for (index, datum) in data.iter().enumerate() {
|
||||||
|
mem_buffer[index] = *datum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn test_layer() {
|
||||||
|
let backend: Backend<Cuda> = get_cuda_backend();
|
||||||
|
let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(3, 1));
|
||||||
|
let mut w: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 3));
|
||||||
|
let mut b: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||||
|
let mut result1: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||||
|
let mut result2: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||||
|
|
||||||
|
// let x_values: &Vec<f32> = &repeat(0f32).take(x.capacity()).collect::<Vec<f32>>();
|
||||||
|
let x_values: &Vec<f32> = &vec![1f32, -2.0, 0.2];
|
||||||
|
let w_values: &Vec<f32> = &vec![1f32, -0.5, 0.0,
|
||||||
|
3.0, 2.0, -5.0];
|
||||||
|
let b_values: &Vec<f32> = &vec![-1f32,
|
||||||
|
2.0];
|
||||||
|
println!("x = {:?}", x_values);
|
||||||
|
println!("w = [{:?},\n {:?}]", &w_values[..3], &w_values[3..]);
|
||||||
|
println!("b = {:?}", b_values);
|
||||||
|
println!("w*x+b = [1.0, 0.0]");
|
||||||
|
|
||||||
|
let native: Native = Native::new();
|
||||||
|
let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
|
||||||
|
write_to_memory(x.write_only(&cpu).unwrap(), x_values);
|
||||||
|
write_to_memory(w.write_only(&cpu).unwrap(), w_values);
|
||||||
|
write_to_memory(b.write_only(&cpu).unwrap(), b_values);
|
||||||
|
|
||||||
|
println!("Computing layer...");
|
||||||
|
backend.dot(&w, &x, &mut result1).unwrap();
|
||||||
|
println!("x = {:?}", x.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
println!("w = {:?}", w.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
println!("w*x = {:?}", result1.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
// backend.sigmoid(&result1, &mut result2).unwrap();
|
||||||
|
// println!("y = {:?}", result2.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn test_example() {
|
||||||
|
let backend: Backend<Cuda> = get_cuda_backend();
|
||||||
|
// Initialize two SharedTensors.
|
||||||
|
let mut x = SharedTensor::<f32>::new(&(1, 1, 3));
|
||||||
|
let mut result = SharedTensor::<f32>::new(&(1, 1, 3));
|
||||||
|
// Fill `x` with some data.
|
||||||
|
let payload: &[f32] = &::std::iter::repeat(1f32).take(x.capacity()).collect::<Vec<f32>>();
|
||||||
|
let native = Native::new();
|
||||||
|
let cpu = native.new_device(native.hardwares()).unwrap();
|
||||||
|
write_to_memory(x.write_only(&cpu).unwrap(), payload); // Write to native host memory.
|
||||||
|
// Run the sigmoid operation, provided by the NN Plugin, on your CUDA enabled GPU.
|
||||||
|
backend.sigmoid(&mut x, &mut result).unwrap();
|
||||||
|
// See the result.
|
||||||
|
println!("{:?}", result.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
}
|
39
rs_juice/src/feed_forward.rs
Normal file
39
rs_juice/src/feed_forward.rs
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
// use std::rc::Rc;
|
||||||
|
// use std::sync::{Arc, RwLock};
|
||||||
|
//
|
||||||
|
// use coaster::backend::Backend;
|
||||||
|
// use coaster::IFramework;
|
||||||
|
// use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
|
||||||
|
// use coaster::frameworks::native::{Cpu, Native};
|
||||||
|
// use coaster::tensor::SharedTensor;
|
||||||
|
// use juice::layer::{LayerConfig, Layer, ComputeOutput, ILayer};
|
||||||
|
// use juice::layers::{LinearConfig, SequentialConfig, Sequential};
|
||||||
|
// use juice::util::ArcLock;
|
||||||
|
|
||||||
|
// mod drafts;
|
||||||
|
// use drafts::write_to_memory;
|
||||||
|
mod simple;
|
||||||
|
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
simple::test_simple();
|
||||||
|
// let backend: Rc<Backend<Cuda>> = Rc::new(get_cuda_backend());
|
||||||
|
// let native: Native = Native::new();
|
||||||
|
// let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
|
||||||
|
//
|
||||||
|
// let mut net_cfg: SequentialConfig = SequentialConfig::default();
|
||||||
|
// net_cfg.add_input("data", &vec![1, 3, 1]);
|
||||||
|
// net_cfg.add_layer(
|
||||||
|
// LayerConfig::new("linear1", LinearConfig { output_size: 2 })
|
||||||
|
// );
|
||||||
|
// let mut net = Sequential::from_config(backend.clone(), &net_cfg);
|
||||||
|
//
|
||||||
|
// let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(3, 1));
|
||||||
|
// let x_values: &Vec<f32> = &vec![1f32, -2.0, 0.2];
|
||||||
|
// write_to_memory(x.write_only(&cpu).unwrap(), x_values);
|
||||||
|
// let x_lock: ArcLock<SharedTensor<f32>> = Arc::new(RwLock::new(x));
|
||||||
|
//
|
||||||
|
// net.forward(&backend, &[x_lock],)
|
||||||
|
}
|
43
rs_juice/src/simple.rs
Normal file
43
rs_juice/src/simple.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
use coaster::backend::Backend;
|
||||||
|
use coaster::IFramework;
|
||||||
|
use coaster::frameworks::cuda::{Cuda, get_cuda_backend};
|
||||||
|
use coaster::frameworks::native::{Cpu, Native};
|
||||||
|
use coaster::frameworks::native::flatbox::FlatBox;
|
||||||
|
use coaster::tensor::SharedTensor;
|
||||||
|
use coaster_blas::plugin::Dot;
|
||||||
|
|
||||||
|
|
||||||
|
pub fn write_to_memory<T: Copy>(mem: &mut FlatBox, data: &[T]) {
|
||||||
|
let mem_buffer: &mut[T] = mem.as_mut_slice::<T>();
|
||||||
|
for (index, datum) in data.iter().enumerate() {
|
||||||
|
mem_buffer[index] = *datum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn test_simple() {
|
||||||
|
let backend: Backend<Cuda> = get_cuda_backend();
|
||||||
|
let native: Native = Native::new();
|
||||||
|
let cpu: Cpu = native.new_device(native.hardwares()).unwrap();
|
||||||
|
|
||||||
|
let mut x: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||||
|
let mut w: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 2));
|
||||||
|
let mut wx: SharedTensor<f32> = SharedTensor::<f32>::new(&(2, 1));
|
||||||
|
|
||||||
|
let x_values: &Vec<f32> = &vec![1f32, 2.0];
|
||||||
|
let w_values: &Vec<f32> = &vec![1f32, 1.0,
|
||||||
|
3.0, -1.0];
|
||||||
|
|
||||||
|
println!("Data:");
|
||||||
|
println!("x = {:?}", x_values);
|
||||||
|
println!("w = [{:?},\n {:?}]", &w_values[..2], &w_values[2..]);
|
||||||
|
println!("Expected result:");
|
||||||
|
println!("w*x = [3.0, 1.0]");
|
||||||
|
|
||||||
|
write_to_memory(x.write_only(&cpu).unwrap(), x_values);
|
||||||
|
write_to_memory(w.write_only(&cpu).unwrap(), w_values);
|
||||||
|
|
||||||
|
backend.dot(&w, &x, &mut wx).unwrap();
|
||||||
|
println!("Actual result:");
|
||||||
|
println!("w*x = {:?}", wx.read(&cpu).unwrap().as_slice::<f32>());
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user