pub struct NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,{ /* private fields */ }
Expand description
Neural Network Model
The Neural Network struct specifies a Criterion
and
a gradient descent algorithm.
Implementations
sourceimpl NeuralNet<BCECriterion, StochasticGD>
impl NeuralNet<BCECriterion, StochasticGD>
sourcepub fn default(layer_sizes: &[usize]) -> NeuralNet<BCECriterion, StochasticGD>
pub fn default(layer_sizes: &[usize]) -> NeuralNet<BCECriterion, StochasticGD>
Creates a neural network with the specified layer sizes.
The layer sizes slice should include the input, hidden layers, and output layer sizes. The type of activation function must be specified.
Uses the default settings (stochastic gradient descent and sigmoid activation function).
Examples
use rusty_machine::learning::nnet::NeuralNet;
// Create a neural net with 4 layers, 3 neurons in each.
let layers = &[3; 4];
let mut net = NeuralNet::default(layers);
sourceimpl<T, A> NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
impl<T, A> NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
sourcepub fn new(criterion: T, alg: A) -> NeuralNet<T, A>
pub fn new(criterion: T, alg: A) -> NeuralNet<T, A>
Create a new neural network with no layers
Examples
use rusty_machine::learning::nnet::BCECriterion;
use rusty_machine::learning::nnet::NeuralNet;
use rusty_machine::learning::optim::grad_desc::StochasticGD;
// Create a an empty neural net
let mut net = NeuralNet::new(BCECriterion::default(), StochasticGD::default());
sourcepub fn mlp<U>(
layer_sizes: &[usize],
criterion: T,
alg: A,
activ_fn: U
) -> NeuralNet<T, A>where
U: ActivationFunc + 'static,
pub fn mlp<U>(
layer_sizes: &[usize],
criterion: T,
alg: A,
activ_fn: U
) -> NeuralNet<T, A>where
U: ActivationFunc + 'static,
Create a multilayer perceptron with the specified layer sizes.
The layer sizes slice should include the input, hidden layers, and output layer sizes. The type of activation function must be specified.
Currently defaults to simple batch Gradient Descent for optimization.
Examples
use rusty_machine::learning::nnet::BCECriterion;
use rusty_machine::learning::nnet::NeuralNet;
use rusty_machine::learning::toolkit::activ_fn::Sigmoid;
use rusty_machine::learning::optim::grad_desc::StochasticGD;
// Create a neural net with 4 layers, 3 neurons in each.
let layers = &[3; 4];
let mut net = NeuralNet::mlp(layers, BCECriterion::default(), StochasticGD::default(), Sigmoid);
sourcepub fn add<'a>(&'a mut self, layer: Box<dyn NetLayer>) -> &'a mut NeuralNet<T, A>
pub fn add<'a>(&'a mut self, layer: Box<dyn NetLayer>) -> &'a mut NeuralNet<T, A>
Adds the specified layer to the end of the network
Examples
use rusty_machine::linalg::BaseMatrix;
use rusty_machine::learning::nnet::BCECriterion;
use rusty_machine::learning::nnet::NeuralNet;
use rusty_machine::learning::nnet::net_layer::Linear;
use rusty_machine::learning::optim::grad_desc::StochasticGD;
// Create a new neural net
let mut net = NeuralNet::new(BCECriterion::default(), StochasticGD::default());
// Give net an input layer of size 3, hidden layer of size 4, and output layer of size 5
// This net will not apply any activation function to the Linear layer outputs
net.add(Box::new(Linear::new(3, 4)))
.add(Box::new(Linear::new(4, 5)));
sourcepub fn add_layers<'a, U>(&'a mut self, layers: U) -> &'a mut NeuralNet<T, A>where
U: IntoIterator<Item = Box<dyn NetLayer>>,
pub fn add_layers<'a, U>(&'a mut self, layers: U) -> &'a mut NeuralNet<T, A>where
U: IntoIterator<Item = Box<dyn NetLayer>>,
Adds multiple layers to the end of the network
Examples
use rusty_machine::linalg::BaseMatrix;
use rusty_machine::learning::nnet::BCECriterion;
use rusty_machine::learning::nnet::NeuralNet;
use rusty_machine::learning::nnet::net_layer::{NetLayer, Linear};
use rusty_machine::learning::toolkit::activ_fn::Sigmoid;
use rusty_machine::learning::optim::grad_desc::StochasticGD;
// Create a new neural net
let mut net = NeuralNet::new(BCECriterion::default(), StochasticGD::default());
let linear_sig: Vec<Box<NetLayer>> = vec![Box::new(Linear::new(5, 5)), Box::new(Sigmoid)];
// Give net a layer of size 5, followed by a Sigmoid activation function
net.add_layers(linear_sig);
sourcepub fn get_net_weights(&self, idx: usize) -> MatrixSlice<'_, f64>
pub fn get_net_weights(&self, idx: usize) -> MatrixSlice<'_, f64>
Gets matrix of weights between specified layer and forward layer.
Examples
use rusty_machine::linalg::BaseMatrix;
use rusty_machine::learning::nnet::NeuralNet;
// Create a neural net with 4 layers, 3 neurons in each.
let layers = &[3; 4];
let mut net = NeuralNet::default(layers);
let w = &net.get_net_weights(2);
// We add a bias term to the weight matrix
assert_eq!(w.rows(), 4);
assert_eq!(w.cols(), 3);
Trait Implementations
sourceimpl<T: Debug, A: Debug> Debug for NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
impl<T: Debug, A: Debug> Debug for NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
sourceimpl<T, A> SupModel<Matrix<f64>, Matrix<f64>> for NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
impl<T, A> SupModel<Matrix<f64>, Matrix<f64>> for NeuralNet<T, A>where
T: Criterion,
A: OptimAlgorithm<BaseNeuralNet<T>>,
Supervised learning for the Neural Network.
The model is trained using back propagation.
sourcefn predict(&self, inputs: &Matrix<f64>) -> LearningResult<Matrix<f64>>
fn predict(&self, inputs: &Matrix<f64>) -> LearningResult<Matrix<f64>>
Predict neural network output using forward propagation.
sourcefn train(
&mut self,
inputs: &Matrix<f64>,
targets: &Matrix<f64>
) -> LearningResult<()>
fn train(
&mut self,
inputs: &Matrix<f64>,
targets: &Matrix<f64>
) -> LearningResult<()>
Train the model using gradient optimization and back propagation.