i tryed to translate it
i do not get anything
what did i not good
Code: Select all
'' http://computing.dcu.ie/~humphrys/Notes/Neural/Code/index.html
'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1
'' assumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
'' Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
'' if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this link
function sigmoid( x as double ) as double
return 1.0 / (1 + exp( -x ) )
end function
''test data
const as integer NOINPUT = 1
const as integer NOHIDDEN = 30
const as integer NOOUTPUT = 1
const as double RATE = 0.3
const as double C = 0.1 '' start w's in range -C, C
const as integer TOTAL = NOINPUT + NOHIDDEN + NOOUTPUT
'' units all unique ids - so no ambiguity about which we refer to:
const as integer loi = 0
const as integer hii = NOINPUT - 1
const as integer loj = NOINPUT
const as integer hij = NOINPUT + NOHIDDEN - 1
const as integer lok = NOINPUT + NOHIDDEN
const as integer hik = NOINPUT + NOHIDDEN + NOOUTPUT - 1
'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1
'' ssumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
'' Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
'' if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this lin
type NeuralNetwork
dim as integer i , j , k
dim as double in( TOTAL )
dim as double y( TOTAL )
dim as double O( TOTAL )
dim as double w( TOTAL , TOTAL ) '' w[i][j]
dim as double wt( TOTAL ) '' bias weights wt[i]
dim as double dx( TOTAL ) '' dE/dx[i]
dim as double dy( TOTAL ) '' dE/dy[i]
declare sub backpropagate()
declare sub exploit()
declare sub forwardpass()
declare sub init()
declare sub learn( m as integer )
declare sub newIO()
declare sub reportIO()
end type
''How Input is passed forward through the network:
sub NeuralNetwork.forwardpass()
dim as double x '' temporary variable - x[i]
dim as integer i , j , k
''----- forwardpass I[i] -> y[j] ------------------------------------------------
for j = loj to hij
x = 0
for i = loi to hii
x = x + in( i ) * w( i , j )
next i
y( j ) = sigmoid( x - wt( j ) )
next j
''----- forwardpass y[j] -> y[k] ------------------------------------------------
for k = lok to hik
x = 0
for j = loj to hij
x = x + ( y( j ) * w( j , k ) )
y( k ) = sigmoid( x - wt( k ) )
next j
next k
end sub
''Initialisation:
'' going to do w++ and w--
'' so might think should start with all w=0
'' (all y[k]=0.5 - halfway between possibles 0 and 1)
'' in fact, if all w same they tend to march in step together
'' need *asymmetry* if want them to specialise (form a representation scheme)
'' best to start with diverse w
''
'' also, large positive or negative w -> slow learning
'' so start with small absolute w -> fast learning
function range( l as double , h as double ) as double
return rnd * ( h - l ) + l
end function
sub NeuralNetwork.init()
dim as integer visits = 0 , i , j , k
for i = loi to hii
for j = loj to hij
w( i , j ) = range( -c , c )
next j
next i
for j = loj to hij
for k = lok to hik
w( j , k ) = range( -c , c )
next k
next j
for j = loj to hij
wt( j ) = range( -c , c )
next j
for k = lok to hik
wt( k ) = range( -c , c )
next k
end sub
''How Error is back-propagated through the network:
sub NeuralNetwork.backpropagate()
dim as double dw '' temporary variable - dE/dw[i][j]
dim as integer i , j , k
''----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
for k = lok to hik
dy( k ) = y( k ) - O( k )
dx( k ) = ( dy( k ) ) * y( k ) * ( 1- y( k ) )
next k
''----- backpropagate dx[k],w[j][k] -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
''----- use OLD w values here (that's what the equations refer to) .. -------------------------
dim as double t
for j = loj to hij
t = 0
for k = lok to hik
t = t + ( dx( k ) * w( j , k ) )
next k
dy( j ) = t
dx( j ) = dy( j ) * y( j ) * ( 1 - y( j ) )
next j
''----- .. do all w changes together at end ---------------------------------------------------
for j = loj to hij
for k = lok to hik
dw = dx( k ) * y( j )
w( j , k ) = w( j , k ) - ( RATE * dw )
next k
next j
for i = loi to hii
for j = loj to hij
dw = dx( j ) * in( i )
w( i , j ) = w( i , j ) - ( RATE * dw )
next j
next i
for k = lok to hik
dw = -dx( k )
wt( k ) = wt( k ) - ( RATE * dw )
next k
for j = loj to hij
dw = -dx( j )
wt( j ) = wt( j ) - ( RATE * dw )
next j
end sub
''Ways of using the Network:
sub NeuralNetwork.learn( CEILING as integer )
dim as integer m
for m =1 to CEILING
newIO
'' new I/O pair
'' put I into I[i]
'' put O into O[k]
forwardpass
backpropagate
next m
end sub
sub NeuralNetwork.exploit()
dim as integer m
for m =1 to 30
newIO
forwardpass
next m
end sub
dim as NeuralNetwork net
'' input x
'' output y
'' adjust difference between y and f(x)
function f( x as double ) as double
'' return sqr( x )
return sin( x )
'' return sin( x ) + sin( 2 * x ) + sin( 5 * x ) + cos( x )
end function
'' I = x = double lox to hix
const as double lox = 0
const as double hix = 9
'' want it to store f(x) = double lof to hif
const as double lofc = -2.5 '' approximate bounds
const as double hifc = 3.2
'' O = f(x) normalised to range 0 to 1
function normalise( t as double ) as double
return ( t - lofc ) / ( hifc - lofc )
end function
function expand( t as double ) as double '' goes the other way
return lofc + t * ( hifc - lofc )
end function
sub NeuralNetwork.newIO()
dim as double x = range( lox , hix )
dim as integer i , j , k
''there is only one, just don't want to remember number:
for i = loi to hii
in( i ) = x
next i
'' there is only one, just don't want to remember number:
for k = lok to hik
O( k ) = normalise( f( x ) )
next k
end sub
'' Note it never even sees the same exemplar twice!
sub NeuralNetwork.reportIO()
dim as double xx , yy
dim as integer i , j , k
for i = loi to hii
xx = in( i )
next i
for k = lok to hik
yy = expand( y( k ) )
next k
print "x = ", xx
print "y = ", yy
print "f(x) = ", f( xx )
end sub
net.init
net.learn 1000
net.exploit
sleep