Advertisement

Help me with my back prop. neural net

Started by April 11, 2001 04:49 PM
0 comments, last by +AA_970+ 23 years, 7 months ago
I've worked on a very simple back prop. neural net, but it dosen't solve the XOR problem. I tried it with other conditions such as AND and OR and it works perfect. Here's the code (I suspect there might be a problem in the training), please help me if u can.
    

float bias = 1.0f;

// In order hidden neuron 1, 

//			hidden neuron 2, 

//			output


float bias_weights[3] = {0,0,0};
float h1_weights[2] = {0,0};
float h2_weights[2] = {0,0};
float out_weights[2] = {0,0};

float ActivationFunction(float weighted_sum)
{
	return (float)(1 / (1 + exp(-weighted_sum)));
}

float OutputNeuron(float i1, float i2)
{
	float wsum = (bias*bias_weights[2])+(i1*out_weights[0])+(i2*out_weights[1]);
	return ActivationFunction(wsum);
}

float HiddenNeuron1(float i1, float i2)
{
	float wsum = (bias*bias_weights[0])+(i1*h1_weights[0])+(i2*h1_weights[1]);
	return ActivationFunction(wsum);
}

float HiddenNeuron2(float i1, float i2)
{
	float wsum = (bias*bias_weights[1])+(i1*h2_weights[0])+(i2*h2_weights[1]);
	return ActivationFunction(wsum);
}


float CalcOutput(float i1, float i2)
{
	return OutputNeuron(HiddenNeuron1(i1,i2),HiddenNeuron2(i1,i2));
}

void Train(float i1, float i2, float desired_output)
{
	float h1_ret = HiddenNeuron1(i1,i2);
	float h2_ret = HiddenNeuron2(i1,i2);
	float ActualOutput = CalcOutput(i1,i2);
	float learning_rate = 0.5f;
	
	// Calculate deltas for output layer then hidden layer

	float delta[3];
	delta[2] = ActualOutput*(1 - ActualOutput)*(desired_output - ActualOutput);
	delta[0] = h1_ret * (1 - h1_ret) * (out_weights[0]) * (delta[2]);
	delta[1] = h2_ret * (1 - h2_ret) * (out_weights[1]) * (delta[2]);


	// Calculate weights for hidden layers

	bias_weights[0] += learning_rate * bias * delta[0];
	h1_weights[0] += learning_rate * i1 * delta[0];
	h1_weights[1] += learning_rate * i2 * delta[0];
	
	bias_weights[1] += learning_rate * bias * delta[1];
	h2_weights[0] += learning_rate * i1 * delta[1];
	h2_weights[1] += learning_rate * i2 * delta[1];

	// Calculate weights for output layer

	bias_weights[2] += learning_rate * bias * delta[2];
	out_weights[0] += learning_rate * h1_ret * delta[2];
	out_weights[1] += learning_rate * h2_ret * delta[2];

}


void main()
{

	for(int i=0; i<10000; i++)
	{
		Train(0,0,0);
		Train(0,1,1);
		Train(1,0,1);
		Train(1,1,0);
	}


	cout << "(0,0) = " << CalcOutput(0,0) << "\n";
	cout << "(0,1) = " << CalcOutput(0,1) << "\n";
	cout << "(1,0) = " << CalcOutput(1,0) << "\n";
	cout << "(1,1) = " << CalcOutput(1,1) << "\n";
	cout << flush;

	Sleep(5000);
}

    
Digital Radiation Edited by - +AA_970+ on April 11, 2001 6:44:15 PM
Stupid me I didn''t realize the weights cannot be init. w/ 0 and have to be assigned random numbers between -1 and 1

Digital Radiation

This topic is closed to new replies.

Advertisement