next up previous contents
Next: Object oriented back-propagation program Up: Introduction Previous: The XOR program -

Extended XOR program - 3 input, 3 hidden

The XOR program works, and now it's time to expand the network, I did this by trying to alter the code of my first working program. I tried to add 1 input unit and 1 hidden layer unit. The result is the following program:

/*
 * The XOR backpropagation network example
 * by R.M.Morrien, Copyright 2001
 */
#include <iostream.h>
#include <math.h>

#define ITER      100000
#define LEARNRATE 0.5

// Function declaratation
void  Train(float a, float b, float c, float result);
void  Run(float a, float b, float c);
float Sigmoid(float num);

// Variable declaration
float input1; // The input
float input2; // The input
float input3; // The input
float target; // The desired output

// The neural network variables (the neurons and the weights)
float hidden1      =  0.2; // These values should be random... between -1.0 and 1.0
float hidden2      = -0.3;
float hidden3      = -0.3;
float output1      =  0.4;

float weight_i1_h1 =  0.1;
float weight_i1_h2 = -0.2;
float weight_i1_h3 = -0.2;

float weight_i2_h1 =  0.3;
float weight_i2_h2 =  0.4;
float weight_i2_h3 =  0.4;

float weight_i3_h1 =  0.3;
float weight_i3_h2 =  0.4;
float weight_i3_h3 =  0.4;

float weight_h1_o1 =  0.5;
float weight_h2_o1 = -0.4;
float weight_h3_o1 =  0.4;



/*
 * The main function
 */
void main() {
    // Train
    for(int i=0; i<ITER; i++){
    	Train(1.0,1.0,1.0,0.0);
    	Train(1.0,0.0,1.0,1.0);
    	Train(0.0,0.0,1.0,0.0);
    	Train(0.0,1.0,1.0,1.0);
    	Train(1.0,1.0,0.0,0.0);
    	Train(1.0,0.0,0.0,1.0);
    	Train(0.0,0.0,0.0,0.0);
    	Train(0.0,1.0,0.0,1.0);
    }
    
    // Show results
    Run(1.0,1.0,1.0);
    Run(1.0,0.0,1.0);
    Run(0.0,0.0,1.0);
    Run(0.0,1.0,1.0);
    Run(1.0,1.0,0.0);
    Run(1.0,0.0,0.0);
    Run(0.0,0.0,0.0);
    Run(0.0,1.0,0.0);
}


/*
 * Name:   Train
 * Input:  Two inputs and the desired result
 * Pre:    The inputs and results are floats in the range 0.0 - 1.0
 * Post:   The weight of the neural network are adjusted (trained..)
 */
void Train(float a, float b, float c, float result){
    input1 = a;	
    input2 = b;	
    input3 = c;	
    target = result;		
    
    float output_hidden1;
    float output_hidden2;
    float output_hidden3;
    float output_output1;
	
    // Calculate the outputs
    output_hidden1 = input1         * weight_i1_h1 + input2         * weight_i2_h1 + hidden1 + input3 * weight_i3_h1;
    output_hidden1 = Sigmoid(output_hidden1);
    
    output_hidden2 = input1         * weight_i1_h2 + input2         * weight_i2_h2 + hidden2 + input3 * weight_i3_h2;
    output_hidden2 = Sigmoid(output_hidden2);

    output_hidden3 = input1         * weight_i1_h3 + input2         * weight_i2_h3 + hidden2 + input3 * weight_i3_h3;
    output_hidden3 = Sigmoid(output_hidden2);

    output_output1 = output_hidden1 * weight_h1_o1 + output_hidden2 * weight_h2_o1 + output_hidden3 * weight_h3_o1 + output1;
    output_output1 = Sigmoid(output_output1);
    
    // Calculate the error
    float output1_adjustment = output_output1 * (1-output_output1) * (target - output_output1);
    float hidden3_adjustment = output_hidden3 * (1-output_hidden3) * output1_adjustment * weight_h3_o1;
    float hidden2_adjustment = output_hidden2 * (1-output_hidden2) * output1_adjustment * weight_h2_o1;
    float hidden1_adjustment = output_hidden1 * (1-output_hidden1) * output1_adjustment * weight_h1_o1;
    
    // Adjust the weights
    weight_i1_h1 = weight_i1_h1 + LEARNRATE * hidden1_adjustment * input1;
    weight_i1_h2 = weight_i1_h2 + LEARNRATE * hidden2_adjustment * input1;
    weight_i1_h3 = weight_i1_h3 + LEARNRATE * hidden3_adjustment * input1;
    
    weight_i2_h1 = weight_i2_h1 + LEARNRATE * hidden1_adjustment * input2;
    weight_i2_h2 = weight_i2_h2 + LEARNRATE * hidden2_adjustment * input2;
    weight_i2_h3 = weight_i2_h3 + LEARNRATE * hidden3_adjustment * input2;
    
    weight_i3_h1 = weight_i3_h1 + LEARNRATE * hidden1_adjustment * input3;
    weight_i3_h2 = weight_i3_h2 + LEARNRATE * hidden2_adjustment * input3;
    weight_i3_h3 = weight_i3_h3 + LEARNRATE * hidden3_adjustment * input3;
    
    weight_h1_o1 = weight_h1_o1 + LEARNRATE * output1_adjustment * output_hidden1; 
    weight_h2_o1 = weight_h2_o1 + LEARNRATE * output1_adjustment * output_hidden2;     
    weight_h3_o1 = weight_h3_o1 + LEARNRATE * output1_adjustment * output_hidden3;
    
    hidden1      = hidden1      + LEARNRATE * hidden1_adjustment; 
    hidden2      = hidden2      + LEARNRATE * hidden2_adjustment; 
    hidden3      = hidden3      + LEARNRATE * hidden3_adjustment;     
    output1      = output1      + LEARNRATE * output1_adjustment; 
}


/*
 * Name:   Run
 * Input:  Two inputs
 * Pre:    The inputs are floats in the range 0.0 - 1.0
 * Post:   Shows the output of the neural network 
 */
void Run(float a, float b, float c){
    input1 = a;	
    input2 = b;	
    input3 = c;	
    
    float output_hidden1;
    float output_hidden2;
    float output_hidden3;
    float output_output1;
	
    // Calculate the outputs
    output_hidden1 = input1         * weight_i1_h1 + input2         * weight_i2_h1 + hidden1 + input3 * weight_i3_h1;
    output_hidden1 = Sigmoid(output_hidden1);
    
    output_hidden2 = input1         * weight_i1_h2 + input2         * weight_i2_h2 + hidden2 + input3 * weight_i3_h2;
    output_hidden2 = Sigmoid(output_hidden2);

    output_hidden3 = input1         * weight_i1_h3 + input2         * weight_i2_h3 + hidden2 + input3 * weight_i3_h3;
    output_hidden3 = Sigmoid(output_hidden2);

    output_output1 = output_hidden1 * weight_h1_o1 + output_hidden2 * weight_h2_o1 + output_hidden3 * weight_h3_o1 + output1;
    output_output1 = Sigmoid(output_output1);
    
    // Show the result
    cout << output_output1;
     if(output_output1 >= 0.5) cout << "\t (1)";
     if(output_output1 < 0.5) cout << "\t (0)";
    cout << endl;
}


/*
 * Name:   Sigmoid
 * Input:  One input
 * Pre:    The input is a float (positive or negative)
 * Post:   Returns the result of the sigmoid function (also known as the logistic function)
 */
float Sigmoid(float num) {
    return (float)(1/(1+exp(-num)));
}

Download source here.

The results (100.000 iterations on a Pentium 166):

[rmorrien@escay backpropagation]$ time ./a.out
0.00405455       (0)
0.99688          (1)
0.00285217       (0)
0.996876         (1)
0.00358493       (0)
0.996643         (1)
0.00328084       (0)
0.996642         (1)

real    0m8.298s
user    0m8.110s
sys     0m0.080s
[rmorrien@escay backpropagation]$



Copyright © 2001, R.M. Morriën