76 printf(
"Error setting layer vptr for type: %d\n", l->
type);
90 const double orig = l->
eta;
148 for (
int i = old_n_weights; i < l->
n_weights; ++i) {
158 for (
int i = old_n_outputs; i < l->
n_outputs; ++i) {
177 const double mu_disable)
210 for (
int j = 0; j < l->
n_inputs; ++j) {
224 for (
int i = 0; i < l->
n_inputs; ++i) {
257 const double orig = l->
weights[i];
265 for (
int i = 0; i < l->
n_biases; ++i) {
266 const double orig = l->
biases[i];
269 if (l->
biases[i] != orig) {
312 printf(
"%s\n", json_str);
326 cJSON *json = cJSON_CreateObject();
327 cJSON_AddNumberToObject(json,
"n_weights", l->
n_weights);
328 if (return_weights) {
330 cJSON_AddItemToObject(json,
"weights", weights);
332 cJSON_AddNumberToObject(json,
"n_biases", l->
n_biases);
333 if (return_weights) {
335 cJSON_AddItemToObject(json,
"biases", biases);
337 cJSON_AddNumberToObject(json,
"n_active", l->
n_active);
338 char *
string = cJSON_Print(json);
355 for (
int i = 0; i < l->
n_biases; ++i) {
374 for (
int i = 0; i < l->
n_biases; ++i) {
510 printf(
"layer_type_as_string(): invalid type: %d\n", type);
572 printf(
"layer_type_as_int(): invalid type: %s\n", type);
584 printf(
"layer_guard_biases() invalid size\n");
598 printf(
"layer_guard_outputs() invalid size\n");
612 printf(
"layer_guard_weights() invalid size\n");
Neural network activation functions.
#define NUM_ACTIVATIONS
Number of activations available.
void layer_guard_biases(const struct Layer *l)
Check number of biases is within bounds.
bool layer_mutate_connectivity(struct Layer *l, const double mu_enable, const double mu_disable)
Mutates a layer's connectivity by zeroing weights.
void layer_weight_print(const struct Layer *l, const bool print_weights)
Prints a layer's weights and biases.
const char * layer_type_as_string(const int type)
Returns a string representation of a layer type from an integer.
void layer_defaults(struct Layer *l)
Initialises a layer to default values.
int layer_mutate_neurons(const struct Layer *l, const double mu)
Returns the number of neurons to add or remove from a layer.
bool layer_mutate_functions(struct Layer *l, const double mu)
Mutates a layer's activation function by random selection.
int layer_type_as_int(const char *type)
Returns the integer representation of a layer type given a name.
char * layer_weight_json(const struct Layer *l, const bool return_weights)
Returns a json formatted string representation of a layer's weights.
void layer_set_vptr(struct Layer *l)
Sets a neural network layer's functions to the implementations.
void layer_weight_clamp(const struct Layer *l)
Clamps a layer's weights and biases in range [WEIGHT_MIN, WEIGHT_MAX].
void layer_guard_outputs(const struct Layer *l)
Check number of outputs is within bounds.
void layer_weight_rand(struct Layer *l)
Randomises a layer's weights and biases.
void layer_add_neurons(struct Layer *l, const int N)
Adds N neurons to a layer. Negative N removes neurons.
void layer_ensure_input_represention(struct Layer *l)
Ensures that each neuron is connected to at least one input and each input is connected to at least o...
void layer_calc_n_active(struct Layer *l)
Recalculates the number of active connections within a layer.
bool layer_receives_images(const int type)
Returns a whether a layer type expects images as input.
void layer_init_eta(struct Layer *l)
Initialises a layer's gradient descent rate.
bool layer_mutate_eta(struct Layer *l, const double mu)
Mutates the gradient descent rate of a neural layer.
bool layer_mutate_weights(struct Layer *l, const double mu)
Mutates a layer's weights and biases by adding random numbers from a Gaussian normal distribution wit...
void layer_guard_weights(const struct Layer *l)
Check number of weights is within bounds.
#define WEIGHT_SD_RAND
Std dev of Gaussian for weight randomising.
#define STRING_CONVOLUTIONAL
Convolutional.
#define NOISE
Layer type noise.
#define STRING_MAXPOOL
Maxpool.
#define STRING_DROPOUT
Dropout.
#define LAYER_EVOLVE_ETA
Layer may evolve rate of gradient descent.
#define STRING_NOISE
Noise.
#define STRING_RECURRENT
Recurrent.
#define UPSAMPLE
Layer type upsample.
#define STRING_AVGPOOL
Avgpool.
#define LSTM
Layer type LSTM.
#define WEIGHT_MAX
Maximum value of a weight or bias.
#define SOFTMAX
Layer type softmax.
#define N_OUTPUTS_MAX
Maximum number of outputs per layer.
#define LAYER_EVOLVE_CONNECT
Layer may evolve connectivity.
#define WEIGHT_SD
Std dev of Gaussian for weight resizing.
#define WEIGHT_MIN
Minimum value of a weight or bias.
#define RECURRENT
Layer type recurrent.
#define AVGPOOL
Layer type average pooling.
#define STRING_CONNECTED
Connected.
#define DROPOUT
Layer type dropout.
#define STRING_UPSAMPLE
Upsample.
#define CONVOLUTIONAL
Layer type convolutional.
#define MAXPOOL
Layer type maxpooling.
static void layer_print(const struct Layer *l, const bool print_weights)
Prints the layer.
#define CONNECTED
Layer type connected.
#define STRING_SOFTMAX
Softmax.
#define N_WEIGHTS_MAX
Maximum number of weights per layer.
An implementation of an average pooling layer.
static struct LayerVtbl const layer_avgpool_vtbl
Neural average pooling layer implemented functions.
An implementation of a fully-connected layer of perceptrons.
static struct LayerVtbl const layer_connected_vtbl
Neural connected layer implemented functions.
An implementation of a 2D convolutional layer.
static struct LayerVtbl const layer_convolutional_vtbl
Neural convolutional layer implemented functions.
An implementation of a dropout layer.
static struct LayerVtbl const layer_dropout_vtbl
Neural dropout layer implemented functions.
An implementation of a long short-term memory layer.
static struct LayerVtbl const layer_lstm_vtbl
Neural long short-term memory layer implemented functions.
An implementation of a 2D maxpooling layer.
static struct LayerVtbl const layer_maxpool_vtbl
Neural maxpooling layer implemented functions.
An implementation of a Gaussian noise adding layer.
static struct LayerVtbl const layer_noise_vtbl
Neural Gaussian noise layer implemented functions.
An implementation of a recurrent layer of perceptrons.
static struct LayerVtbl const layer_recurrent_vtbl
Neural recurrent layer implemented functions.
An implementation of a softmax layer.
static struct LayerVtbl const layer_softmax_vtbl
Neural softmax layer implemented functions.
An implementation of a 2D upsampling layer.
static struct LayerVtbl const layer_upsample_vtbl
Neural upsampling layer implemented functions.
Neural network layer data structure.
double * output
Current neuron outputs (after activation function)
double decay
Weight decay for gradient descent.
struct Layer * input_layer
Recursive layer input.
double * state
Current neuron states (before activation function)
int recurrent_function
LSTM.
int max_neuron_grow
Maximum number neurons to add per mutation event.
int stride
Pool, Conv, and Upsample.
int n_inputs
Number of layer inputs.
int n_biases
Number of layer biases.
bool * weight_active
Whether each connection is present in the layer.
double * weights
Weights for calculating neuron states.
double * weight_updates
Updates to weights.
double * mu
Mutation rates.
int channels
Pool, Conv, and Upsample.
double scale
Usage depends on layer implementation.
int function
Layer activation function.
int height
Pool, Conv, and Upsample.
struct LayerVtbl const * layer_vptr
Functions acting on layers.
int max_outputs
Maximum number of neurons in the layer.
int width
Pool, Conv, and Upsample.
int n_weights
Number of layer weights.
double probability
Usage depends on layer implementation.
double * bias_updates
Updates to biases.
double eta_max
Maximum gradient descent rate.
struct Layer * output_layer
Recursive layer output.
int n_outputs
Number of layer outputs.
double * biases
Biases for calculating neuron states.
struct Layer * self_layer
Recursive layer self.
int n_active
Number of active weights / connections.
double * prev_state
Previous state for recursive layers.
int out_w
Pool, Conv, and Upsample.
int type
Layer type: CONNECTED, DROPOUT, etc.
int out_c
Pool, Conv, and Upsample.
double * delta
Delta for updating weights.
uint32_t options
Bitwise layer options permitting evolution, SGD, etc.
int out_h
Pool, Conv, and Upsample.
double eta_min
Minimum gradient descent rate.
double eta
Gradient descent rate.
double momentum
Momentum for gradient descent.
int rand_uniform_int(const int min, const int max)
Returns a uniform random integer [min,max] not inclusive of max.
double rand_normal(const double mu, const double sigma)
Returns a random Gaussian with specified mean and standard deviation.
double rand_uniform(const double min, const double max)
Returns a uniform random float [min,max].
Utility functions for random number handling, etc.
static double clamp(const double a, const double min, const double max)
Returns a float clamped within the specified range.