Dr. Mark Humphrys

School of Computing. Dublin City University.

Home      Blog      Teaching      Research      Contact

Search:

CA216      CA249      CA318

CA400      CA651      CA668


Using the neural network as a function approximator

// input x
// output y
// adjust difference between y and f(x)



Define the function f:


double f ( double x )
{
// 	return sqrt(x);
 	return sin(x);
//	return sin(x)+sin(2*x)+sin(5*x)+cos(x);
}



// I = x = double lox to hix
const double lox = 0;
const double hix = 9;

// want it to store f(x) = double lof to hif
const double lof = -2.5;		// approximate bounds
const double hif = 3.2;




// O = f(x) normalised to range 0 to 1 

double normalise ( double t )
{
 return (t-lof) / (hif-lof);
}

double expand ( double t )		// goes the other way
{
 return lof + t*(hif-lof);
}




Define the kind of Neural Network we will need to represent f:



const int NOINPUT  = 1;
const int NOHIDDEN = 30;
const int NOOUTPUT = 1;


const double RATE = 0.3;

const double C = 0.1;				// start w's in range -C, C



// #include the basic Neural Network code at this point




NeuralNetwork :: newIO()
{
 double x = float_randomAtoB ( lox, hix );	

// there is only one, just don't want to remember number:
 for_i
  I[i] = x;	

// there is only one, just don't want to remember number:
 for_k			
  O[k] = normalise(f(x));	
}



// Note it never even sees the same exemplar twice!



NeuralNetwork :: reportIO ( ostream& stream )
{
 double x,_y;
 for_i
  x = I[i];
 for_k
  _y = expand(y[k]);

 sprintf ( buf, "x    %.2f",   x  ); stream << buf << "\n";
 sprintf ( buf, "y    %.2f",  _y  ); stream << buf << "\n";
 sprintf ( buf, "f(x) %.2f", f(x) ); stream << buf << "\n";
}



Finally the main function:


main ( int argc, char **argv )
{
 int CEILING = atoi ( argv[1] );

 net.init();

 net.print(cout); 
 net.learn ( CEILING );
 net.print(cout); 

 net.exploit();
}


Feeds      w2mind.org

On Internet since 1987.