neural net try

General FreeBASIC programming questions.
Post Reply
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

neural net try

Post by bluatigro »

i fount some c++ code about this
i tryed to translate it

i do not get anything
what did i not good

Code: Select all

'' http://computing.dcu.ie/~humphrys/Notes/Neural/Code/index.html
'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1

'' assumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
''   Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
''   if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this link

function sigmoid( x as double ) as double 
  return 1.0 / (1 + exp( -x ) )
end function

''test data
const as integer NOINPUT  = 1 
const as integer NOHIDDEN = 30 
const as integer NOOUTPUT = 1 

const as double RATE = 0.3
const as double C = 0.1 '' start w's in range -C, C

const as integer TOTAL = NOINPUT + NOHIDDEN + NOOUTPUT 

'' units all unique ids - so no ambiguity about which we refer to:

const as integer loi = 0
const as integer hii = NOINPUT - 1
const as integer loj = NOINPUT
const as integer hij = NOINPUT + NOHIDDEN - 1
const as integer lok = NOINPUT + NOHIDDEN
const as integer hik = NOINPUT + NOHIDDEN + NOOUTPUT - 1

'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1


'' ssumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
''   Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
''   if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this lin

type NeuralNetwork
  dim as integer i , j , k 
  dim as double in( TOTAL )            
  dim as double y( TOTAL )            
  dim as double O( TOTAL )    
  dim as double w( TOTAL , TOTAL )  '' w[i][j] 
  dim as double wt( TOTAL )         '' bias weights wt[i]
  dim as double dx( TOTAL )         '' dE/dx[i] 
  dim as double dy( TOTAL )         '' dE/dy[i] 
  declare sub backpropagate()
  declare sub exploit()
  declare sub forwardpass()
  declare sub init()
  declare sub learn( m as integer )
  declare sub newIO()
  declare sub reportIO()
end type

''How Input is passed forward through the network:

sub NeuralNetwork.forwardpass()
  dim as double x  '' temporary variable - x[i]
  dim as integer i , j , k
''----- forwardpass I[i] -> y[j] ------------------------------------------------
  for j = loj to hij
    x = 0
    for i = loi to hii       
      x = x + in( i ) * w( i , j )
    next i
    y( j ) = sigmoid( x - wt( j ) )
  next j
''----- forwardpass y[j] -> y[k] ------------------------------------------------
  for k = lok to hik
    x = 0
    for j = loj to hij         
      x = x + ( y( j ) * w( j , k ) )  
      y( k ) = sigmoid( x - wt( k ) )
    next j
  next k
end sub

''Initialisation:

'' going to do w++ and w--
'' so might think should start with all w=0
'' (all y[k]=0.5 - halfway between possibles 0 and 1)
'' in fact, if all w same they tend to march in step together
'' need *asymmetry* if want them to specialise (form a representation scheme)
'' best to start with diverse w
''
'' also, large positive or negative w -> slow learning
'' so start with small absolute w -> fast learning

function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function

sub NeuralNetwork.init()
  dim as integer visits = 0 , i , j , k
  for i = loi to hii
    for j = loj to hij
      w( i , j ) = range( -c , c )
    next j
  next i
  for j = loj to hij
    for k = lok to hik
      w( j , k ) = range( -c , c )
    next k
  next j
  for j = loj to hij
    wt( j ) = range( -c , c )
  next j
  for k = lok to hik
    wt( k ) = range( -c , c )
  next k
end sub

''How Error is back-propagated through the network:


sub NeuralNetwork.backpropagate()
  dim as double dw '' temporary variable - dE/dw[i][j]
  dim as integer i , j , k
''----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
  for k = lok to hik
    dy( k ) = y( k ) - O( k )
    dx( k ) = ( dy( k ) ) * y( k ) * ( 1- y( k ) )
  next k
''----- backpropagate dx[k],w[j][k] -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
''----- use OLD w values here (that's what the equations refer to) .. -------------------------
  dim as double t
  for j = loj to hij
     t = 0
    for k = lok to hik
      t = t + ( dx( k ) * w( j , k ) )
    next k
    dy( j ) = t
    dx( j ) = dy( j ) * y( j ) * ( 1 - y( j ) )
  next j
''----- .. do all w changes together at end ---------------------------------------------------
  for j = loj to hij
    for k = lok to hik 
      dw = dx( k ) * y( j )            
      w( j , k ) = w( j , k ) - ( RATE * dw )
    next k
  next j
  for i = loi to hii
    for j = loj to hij
      dw = dx( j ) * in( i )            
      w( i , j ) = w( i , j ) - ( RATE * dw )
    next j
  next i
  for k = lok to hik
    dw = -dx( k )          
    wt( k ) = wt( k ) - ( RATE * dw )
  next k
  for j = loj to hij
    dw = -dx( j ) 
    wt( j ) = wt( j ) - ( RATE * dw )
  next j
end sub

''Ways of using the Network:

sub NeuralNetwork.learn( CEILING as integer )
  dim as integer m
  for m =1 to CEILING
    newIO
    '' new I/O pair
    '' put I into I[i]
    '' put O into O[k]
    forwardpass
    backpropagate
  next m
end sub

sub NeuralNetwork.exploit()
  dim as integer m
  for m =1 to 30
    newIO
    forwardpass
  next m
end sub

dim as NeuralNetwork net 

'' input x
'' output y
'' adjust difference between y and f(x)

function f( x as double ) as double
'' return sqr( x )
  return sin( x )
'' return sin( x ) + sin( 2 * x ) + sin( 5 * x ) + cos( x )
end function

'' I = x = double lox to hix
const as double lox = 0
const as double hix = 9

'' want it to store f(x) = double lof to hif
const as double lofc = -2.5 '' approximate bounds
const as double hifc = 3.2 

'' O = f(x) normalised to range 0 to 1 

function normalise( t as double ) as double
  return ( t - lofc ) / ( hifc - lofc )
end function

function expand( t as double ) as double '' goes the other way
  return lofc + t * ( hifc - lofc )
end function

sub NeuralNetwork.newIO()
  dim as double x = range( lox , hix ) 
  dim as integer i , j , k
''there is only one, just don't want to remember number:
  for i = loi to hii
    in( i ) = x
  next i
'' there is only one, just don't want to remember number:
  for k = lok to hik                  
    O( k ) = normalise( f( x ) )     
  next k
end sub

'' Note it never even sees the same exemplar twice!

sub NeuralNetwork.reportIO()
  dim as double xx , yy
  dim as integer i , j , k
  for i = loi to hii
    xx = in( i )
  next i
  for k = lok to hik
    yy = expand( y( k ) ) 
  next k
  print "x    = ",    xx  
  print "y    = ",    yy 
  print "f(x) = ", f( xx ) 
end sub

net.init
net.learn 1000
net.exploit

sleep
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net try

Post by bluatigro »

update :
tryed it whit a genetic algoritm

error :
no results jet

REM :
the NN is trying to gues if it is a square or a circle

Code: Select all

'' bluatigro 10 aug 2018

const as long factor = 2 ^ 30
screen 20 , 32
type NNet
public :
  dim as double in( 0 )
  dim as integer inmax 
  dim as double wih( 0 , 0 )
  dim as double h( 0 , 0 )
  dim as integer hidcelmax 
  dim as integer hidlayermax
  dim as double whh( 0 , 0 , 0 )
  dim as double whu( 0 , 0 )
  dim as double uit( 0 )
  dim as integer uitmax
  declare sub init( i as integer , hc as integer , hl as integer , u as integer )
  declare sub calculate()
  declare sub mutate( r as double )
end type
sub NNet.init( i as integer , hc as integer , hl as integer , u as integer )
  inmax = i
  hidcelmax = hc
  hidlayermax = hl
  uitmax = u
  redim as double in( inmax ) , h( hidlayermax , hidcelmax ) , uit( uitmax )
  redim as double whi( inmax , hidcelmax )
  for i = 0 to inmax
    for hc = 0 to hidcelmax
      wih( i , hc ) = rnd
    next hc
  next i
  redim as double whh( hidlayermax , hidcelmax , hidcelmax )
  for i = 0 to hidlayermax
    for hl = 0 to hidcelmax
      for hc = 0 to hidcelmax 
        whh( i , hl , hc ) = rnd
      next hc
    next hl
  next i
  redim as double whu( hidcelmax , uitmax )
  for i = 0 to uitmax
    for hc = 0 to hidcelmax
      whu( hc , i ) = rnd
    next hc
  next i
end sub 
function signoid( x as double ) as double
  return 1 / ( 1 + exp( -x ) )
end function
sub NNet.calculate()
  dim as integer i , hid , cel , u
  dim as double sum
  for cel = 0 to hidcelmax
    sum = 0.0
    for i = 0 to inmax
      sum += in( i ) * wih( i , cel )
    next i
    h( 0 , cel ) = signoid( sum ) / inmax
  next cel 
  for i = 1 to hidlayermax
    for cel = 0 to hidcelmax
      sum = 0.0
      for hid = 1 to hidcelmax
        sum += h( i - 1 , hid ) * whh( i - 1 , hid , cel )
      next hid
      h( i , cel ) = signoid( sum ) / hidcelmax
    next cel
  next i
  for u = 0 to uitmax
    sum = 0.0
    for i = 0 to hidcelmax
      sum += h( hidlayermax , i ) * whu( i , u )
    next i
  next u
end sub 
function verander( x as double ) as double
  dim as long i
  i = clng( x * factor )
  i = i xor 2 ^ int( rnd * 30 )
  return cdbl( i ) / factor
end function
sub NNet.mutate( radiation as double )
  dim as integer i , hc , hl , u
  for i = 0 to inmax
    for hc = 0 to hidcelmax
      wih( i , hc ) = iif( rnd < radiation _
      , verander( wih( i , hc ) ) , wih( i , hc ) )
    next hc
  next i
  for i = 0 to hidlayermax
    for hl = 0 to hidcelmax
      for hc = 0 to hidcelmax 
        whh( i , hl , hc ) = iif( rnd < radiation _
        , verander( whh( i , hl , hc ) ) , whh( i , hl , hc ) )
      next hc
    next hl
  next i
  for i = 0 to uitmax
    for hc = 0 to hidcelmax
      whu( hc , i ) = iif( rnd < radiation _
      , verander( whu( hc , i ) ) , whu( hc , i ) )
    next hc
  next i
end sub 
function mix( a as NNet , b as NNet ) as NNet
  dim as NNet uit
  uit.init a.inmax , a.hidcelmax , a.hidlayermax , a.uitmax
  dim as integer i , hc , hl , u
  for i = 0 to uit.inmax
    for hc = 0 to uit.hidcelmax
      uit.wih( i , hc ) = iif( rnd < .5 _
      , a.wih( i , hc ) , b.wih( i , hc ) )
    next hc
  next i
  for i = 0 to uit.hidlayermax
    for hl = 0 to uit.hidcelmax
      for hc = 0 to uit.hidcelmax 
        uit.whh( i , hl , hc ) = iif( rnd < .5 _
        , a.whh( i , hl , hc ) , b.whh( i , hl , hc ) )
      next hc
    next hl
  next i
  for i = 0 to uit.uitmax
    for hc = 0 to uit.hidcelmax
      uit.whu( hc , i ) = iif( rnd < .5 _
      , a.whu( hc , i ) , b.whu( hc , i ) )
    next hc
  next i
  return uit
end function 
function pixel( x as integer , y as integer ) as double
  return iif( point( x , y ) <> -16777216 , 1.0 , 0.0 )
end function
function range( l as integer , h as integer ) as integer 
  return int( rnd * ( h - l + 1 ) + l )
end function
dim as NNet ann( 200 )
dim as integer i
for i = 0 to 200
  ann( i ).init 100 * 100 , 100 , 1 , 1
next i

function rect() as double
  cls
  dim as integer x , y , d
  d = range( 10 , 30 )
  x = range( d , 100 - d )
  y = range( d , 100 - d )
  if rnd < .5 then  
    circle ( x , y ) , d , &hffffff ,,,, f
    return 0.0
  end if
  line ( x - d , y - d ) - ( x + d , y + d ) , &hffffff , bf
  return 1.0
end function

dim as integer n , g , x , y , h , l , ry( 200 )
dim as double fout( 200 ) , try

for i = 0 to 200
  ry( i ) = i
next i

for g = 0 to 100
  for n = 0 to 200
    fout( n ) = 0.0
    for i = 0 to 100
      try = rect()
      for x = 0 to 100
        for y = 0 to 100
          ann( n ).in( x + 100 * y ) = pixel( x , y )
        next y
      next x
      ann( n ).calculate()
      fout( n ) += abs( try - ann( n ).uit( 0 ) )
    next i
  next n
  for h = 1 to 200
    for l = 0 to h - 1
      if fout( ry( h ) ) < fout( ry( l ) ) then
        swap ry( h ) , ry( l )
      end if
    next l
  next h
  for i = 20 to 200
    x = range( 0 , 20 )
    y = range( 0 , 20 )
    ann( i ) = mix( ann( x ) , ann( y ) )
    ann( i ).mutate 1e-5
  next i
  print g , fout( ry( 0 ) )
next g    
    
    

sleep 
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net try

Post by bluatigro »

update :
translation try of python NN code

error :
duplicate definision

redim does not work Always as expected

Code: Select all

'' bluatigro 27 sept 2018
'' ann try 4
'' based on :
''http://code.activestate.com/recipes/578148-simple-back-propagation-neural-network-in-python-s/
function tanh( x as double ) as double
  return ( 1 -exp( -2 * x ) ) / ( 1 + exp( -2 * x ) )
end function
function signoid( x as double ) as double
  return tanh( x )
end function
function dsignoid( x as double ) as double 
  return 1 - x ^ 2
end function
function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function

dim shared as double wish( 0 )
type ann 
public :
  dim as integer ni , nh , no
  dim as double ai( 0 ) , ah( 0 )
  dim as double ao( 0 ) , wi( 0 , 0 ) , wo( 0 , 0 )
  dim as double co( 0 , 0 ) , ci( 0 , 0 )
  dim as double od( 0 ) , hd( 0 )
  declare constructor ( a as integer _
  , b as integer , c as integer )
  declare sub calc()
  declare function backprop( a as double , b as double ) as double
end type
constructor ann( a as integer _
  , b as integer , c as integer )
  ni = a
  nh = b
  no = c
  redim as double ai( ni ) , ah( nh - 1 ) , ao( no - 1 ) 
  redim as double wi( ni , nh - 1 ) , wo( nh - 1 , no - 1 )
  redim as double ci( ni , nh - 1 ) , co( nh - 1 , no - 1 )
  redim as double od( nh ) , hd( nh )
''init neural net
  dim as integer i , h , o
  for i = 0 to ni
    ai( i ) = 1
  next i
  for i = 0 to nh - 1
    ah( i ) = 1
  next i
  for i = 0 to no - 1
    ao( i ) = 1
  next i
  for i = 0 to ni
    for h = 0 to nh - 1
      wi( i , h ) = range( -1 , 1 )
    next h
  next i
  for h = 0 to nh - 1
    for o = 0 to no - 1
      wo( h , o ) = range( -1 , 1 )
    next o
  next h
end constructor
sub ann.calc()
''forwart pass of neural net
  dim as integer i , h , o
  dim as double sum
  for i = 0 to ni
    ai( i ) = in( i )
  next i
  for h = 0 to nh - 1
    sum = 0
    for i = 0 to ni
      sum = sum + ai( i ) * wi( i , h )
    next i
    ah( h ) = signoid( sum / ni )
  next h
  for o = 0 to no - 1
    sum = 0
    for h = 0 to nh - 1
      sum = sum + ah( h ) * wo( h , o )
    next h
    ao( o ) = signoid( sum / nh )
  next o
end sub
function ann.backprop( n as double , m as double ) as double
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
  dim as integer i , j , k
  dim as double fout , c
  for k = 0 to no - 1
    fout = wish( k ) - ao( k )
    od( k ) = fout * dsignoid( ao( k ) )
  next k
'' update output weights
  for j = 0 to nh - 1
    for k = 0 to no - 1
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
      c = od( k ) * ah( j )
      wo( j , k ) += n * c + m * co( j , k )
      co( j , k ) = c
    next k
  next j
'' calc hidden deltas
  for j = 0 to nh - 1
    fout = 0
    for k = 0 to no - 1
      fout += od( k ) * wo( j , k )
    next k
    hd( j ) = fout * dsignoid( ah( j ) )
  next j
'' update input weights
  for i = 0 to ni
    for j = 0 to nh - 1
      c = hd( j ) * ai( i )
      wi( i , j ) += n * c + m * ci( i , j )
      ci( i , j ) = c
    next j
  next i
  fout = 0
  for k = 0 to no - 1
    fout += ( wish( k ) - ao( k ) ) ^ 2
  next k
  return fout / 2
end function

const as integer paterns = 4
dim as ann nn = ann( 2 , 2 , 1 )
dim as double p( nn.ni - 1 , paterns - 1 )
dim as double uit( nn.no - 1 , paterns - 1 )
redim shared as double wish( nn.no )

''init inpout and output paterns
dim as integer a , b , e , pp , i , o
for pp = 0 to paterns - 1
  read a , b
  p( 0 , pp ) = a
  p( 1 , pp ) = b
  uit( 0 , pp ) = a xor b
next pp
data 0,0 , 0,1 , 1,0 , 1,1
dim as double fout
''let NN live and learn
for e = 0 to 1000
  ''for eatch patern
  fout = 0
  for pp = 0 to paterns - 1
    ''fill input cel's
    for i = 0 to nn.ni - 1
      nn.ai( i ) = p( i , pp )
    next i
    ''fil target
    for o = 0 to nn.no - 1
      wish( o ) = uit( o , pp )
    next o
    nn.calc
    fout += nn.backprop( .5 , .5 )
  next pp
  print e , fout
next e
end





paul doe
Moderator
Posts: 1730
Joined: Jul 25, 2017 17:22
Location: Argentina

Re: neural net try

Post by paul doe »

bluatigro wrote:redim does not work Always as expected
Redim works fine, but not on static arrays:

Code: Select all

'' bluatigro 27 sept 2018
'' ann try 4
'' based on :
''http://code.activestate.com/recipes/578148-simple-back-propagation-neural-network-in-python-s/
function tanh( x as double ) as double
  return ( 1 -exp( -2 * x ) ) / ( 1 + exp( -2 * x ) )
end function
function signoid( x as double ) as double
  return tanh( x )
end function
function dsignoid( x as double ) as double
  return 1 - x ^ 2
end function
function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function

dim shared as double wish( any )

type ann
public :
  dim as integer ni , nh , no
  dim as double ai( any ) , ah( any )
  dim as double ao( any ) , wi( any, any ) , wo( any, any )
  dim as double co( any, any ) , ci( any, any )
  dim as double od( any ) , hd( any )
  declare constructor ( a as integer _
  , b as integer , c as integer )
  declare sub calc()
  declare function backprop( a as double , b as double ) as double
end type
constructor ann( a as integer _
  , b as integer , c as integer )
  ni = a
  nh = b
  no = c
  redim as double ai( ni ) , ah( nh - 1 ) , ao( no - 1 )
  redim as double wi( ni , nh - 1 ) , wo( nh - 1 , no - 1 )
  redim as double ci( ni , nh - 1 ) , co( nh - 1 , no - 1 )
  redim as double od( nh ) , hd( nh )
''init neural net
  dim as integer i , h , o
  for i = 0 to ni
    ai( i ) = 1
  next i
  for i = 0 to nh - 1
    ah( i ) = 1
  next i
  for i = 0 to no - 1
    ao( i ) = 1
  next i
  for i = 0 to ni
    for h = 0 to nh - 1
      wi( i , h ) = range( -1 , 1 )
    next h
  next i
  for h = 0 to nh - 1
    for o = 0 to no - 1
      wo( h , o ) = range( -1 , 1 )
    next o
  next h
end constructor
sub ann.calc()
''forwart pass of neural net
  dim as integer i , h , o
  dim as double sum
  for i = 0 to ni
    ai( i ) = in( i )
  next i
  for h = 0 to nh - 1
    sum = 0
    for i = 0 to ni
      sum = sum + ai( i ) * wi( i , h )
    next i
    ah( h ) = signoid( sum / ni )
  next h
  for o = 0 to no - 1
    sum = 0
    for h = 0 to nh - 1
      sum = sum + ah( h ) * wo( h , o )
    next h
    ao( o ) = signoid( sum / nh )
  next o
end sub
function ann.backprop( n as double , m as double ) as double
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
  dim as integer i , j , k
  dim as double fout , c
  for k = 0 to no - 1
    fout = wish( k ) - ao( k )
    od( k ) = fout * dsignoid( ao( k ) )
  next k
'' update output weights
  for j = 0 to nh - 1
    for k = 0 to no - 1
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
      c = od( k ) * ah( j )
      wo( j , k ) += n * c + m * co( j , k )
      co( j , k ) = c
    next k
  next j
'' calc hidden deltas
  for j = 0 to nh - 1
    fout = 0
    for k = 0 to no - 1
      fout += od( k ) * wo( j , k )
    next k
    hd( j ) = fout * dsignoid( ah( j ) )
  next j
'' update input weights
  for i = 0 to ni
    for j = 0 to nh - 1
      c = hd( j ) * ai( i )
      wi( i , j ) += n * c + m * ci( i , j )
      ci( i , j ) = c
    next j
  next i
  fout = 0
  for k = 0 to no - 1
    fout += ( wish( k ) - ao( k ) ) ^ 2
  next k
  return fout / 2
end function

const as integer paterns = 4
dim as ann nn = ann( 2 , 2 , 1 )
dim as double p( nn.ni - 1 , paterns - 1 )
dim as double uit( nn.no - 1 , paterns - 1 )
redim wish( nn.no )

''init inpout and output paterns
dim as integer a , b , e , pp , i , o
for pp = 0 to paterns - 1
  read a , b
  p( 0 , pp ) = a
  p( 1 , pp ) = b
  uit( 0 , pp ) = a xor b
next pp
data 0,0 , 0,1 , 1,0 , 1,1
dim as double fout
''let NN live and learn
for e = 0 to 1000
  ''for eatch patern
  fout = 0
  for pp = 0 to paterns - 1
    ''fill input cel's
    for i = 0 to nn.ni - 1
      nn.ai( i ) = p( i , pp )
    next i
    ''fil target
    for o = 0 to nn.no - 1
      wish( o ) = uit( o , pp )
    next o
    nn.calc
    fout += nn.backprop( .5 , .5 )
  next pp
  print e , fout
next e
end
Now it shows:

Code: Select all

Build error(s)
C:\Programming\Freebasic\FreeBASIC-1.05.0-win64\fbc -s console -gen gcc -Wc -Ofast "FbTemp.bas"
FbTemp.bas(68) error 41: Variable not declared, in in 'ai( i ) = in( i )'

Build error(s)
But that's of course your problem.
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net try

Post by bluatigro »

@ paul doe
dim as double wish( any ) , in( any )
gifs a error
so i tryed somthing else

error :
output ann.backprop() is not getting smaler

Code: Select all

'' bluatigro 28 sept 2018
'' ann try 6
'' based on :
''http://code.activestate.com/recipes/578148-simple-back-propagation-neural-network-in-python-s/
function tanh( x as double ) as double
  return ( 1 -exp( -2 * x ) ) / ( 1 + exp( -2 * x ) )
end function
function signoid( x as double ) as double
  return tanh( x )
end function
function dsignoid( x as double ) as double
  return 1 - x ^ 2
end function
function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function

randomize timer

const as integer ni = 2
const as integer nh = 2
const as integer no = 1

dim shared as double wish( no ) , in( ni )

type ann
public :
  dim as double ai( ni ) , ah( nh )
  dim as double ao( no ) , wi( ni , nh ) , wo( nh , no )
  dim as double co( nh , no ) , ci( ni , nh )
  dim as double od( nh ) , hd( nh )
  declare constructor ()
  declare sub calc()
  declare function backprop( a as double , b as double ) as double
end type
constructor ann()
''init neural net
  dim as integer i , h , o
  for i = 0 to ni
    ai( i ) = 1
  next i
  for i = 0 to nh - 1
    ah( i ) = 1
  next i
  for i = 0 to no - 1
    ao( i ) = 1
  next i
  for i = 0 to ni
    for h = 0 to nh - 1
      wi( i , h ) = range( -1 , 1 )
    next h
  next i
  for h = 0 to nh - 1
    for o = 0 to no - 1
      wo( h , o ) = range( -1 , 1 )
    next o
  next h
end constructor
sub ann.calc()
''forwart pass of neural net
  dim as integer i , h , o
  dim as double sum
  for i = 0 to ni
    ai( i ) = in( i )
  next i
  for h = 0 to nh - 1
    sum = 0
    for i = 0 to ni
      sum = sum + ai( i ) * wi( i , h )
    next i
    ah( h ) = signoid( sum / ni )
  next h
  for o = 0 to no - 1
    sum = 0
    for h = 0 to nh - 1
      sum = sum + ah( h ) * wo( h , o )
    next h
    ao( o ) = signoid( sum / nh )
  next o
end sub
function ann.backprop( n as double , m as double ) as double
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
  dim as integer i , j , k
  dim as double fout , c
  for k = 0 to no - 1
    fout = wish( k ) - ao( k )
    od( k ) = fout * dsignoid( ao( k ) )
  next k
'' update output weights
  for j = 0 to nh - 1
    for k = 0 to no - 1
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
      c = od( k ) * ah( j )
      wo( j , k ) += n * c + m * co( j , k )
      co( j , k ) = c
    next k
  next j
'' calc hidden deltas
  for j = 0 to nh - 1
    fout = 0
    for k = 0 to no - 1
      fout += od( k ) * wo( j , k )
    next k
    hd( j ) = fout * dsignoid( ah( j ) )
  next j
'' update input weights
  for i = 0 to ni
    for j = 0 to nh - 1
      c = hd( j ) * ai( i )
      wi( i , j ) += n * c + m * ci( i , j )
      ci( i , j ) = c
    next j
  next i
  fout = 0
  for k = 0 to no - 1
    fout += ( wish( k ) - ao( k ) ) ^ 2
  next k
  return fout / 2
end function

const as integer paterns = 4
dim as ann nn 
dim as double p( ni - 1 , paterns - 1 )
dim as double uit( no - 1 , paterns - 1 )

''init inpout and output paterns
dim as integer a , b , e , pp , i , o
for pp = 0 to paterns - 1
  read a , b
  p( 0 , pp ) = a
  p( 1 , pp ) = b
  uit( 0 , pp ) = a xor b
next pp
data 0,0 , 0,1 , 1,0 , 1,1
dim as double fout
''let NN live and learn
for e = 0 to 1000
  ''for eatch patern
  fout = 0
  for pp = 0 to paterns - 1
    ''fill input cel's
    for i = 0 to ni - 1
      nn.ai( i ) = p( i , pp )
    next i
    ''fil target
    for o = 0 to no - 1
      wish( o ) = uit( o , pp )
    next o
    nn.calc
    fout += nn.backprop( .5 , .5 )
  next pp
  print e , fout
next e
print "[ push return ]"
sleep 

bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net try

Post by bluatigro »

update :
EUREKA !!!
IT IS WORKING NOW !!!

what about any number of hidden layer's ?

Code: Select all

'' bluatigro 28 sept 2018
'' ann try 7
'' based on :
''http://code.activestate.com/recipes/578148-simple-back-propagation-neural-network-in-python-s/
const as integer ni = 2
const as integer nh = 2
const as integer no = 1
dim shared as double ai( ni ) , ah( nh - 1 )
dim  shared as double ao( no - 1 ) , wish( no - 1 )
dim  shared as double wi( ni , nh - 1 )
dim  shared as double wo( nh - 1 , no - 1 )
dim  shared as double ci( ni , nh - 1 )
dim  shared as double co( nh - 1 , no - 1 )
dim  shared as double od( nh ) , hd( nh )
const as integer paterns = 4
dim  shared as double p( ni , paterns )
dim  shared as double uit( no , paterns )
function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function
function tanh( x as double ) as double
  return ( 1 - exp( -2 * x ) ) _
  / ( 1 + exp( -2 * x ) )
end function
function signoid( x as double ) as double
  return tanh( x )
end function
function dsignoid( x as double ) as double
  return 1 - x ^ 2
end function
sub init
''init neural net
  dim as integer i , h , o
  for i = 0 to ni
    ai( i ) = 1
  next i
  for i = 0 to nh - 1
    ah( i ) = 1
  next i
  for i = 0 to no - 1
    ao( i ) = 1
  next i
  for i = 0 to ni
    for h = 0 to nh - 1
      wi( i , h ) = range( -1 , 1 )
    next h
  next i
  for h = 0 to nh - 1
    for o = 0 to no - 1
      wo( h , o ) = range( -1 , 1 )
    next o
  next h
end sub
sub calc( z as integer )
''forwart pass of neural net
  dim as integer i , h , o
  dim as double sum
  for i = 0 to ni - 1
    ai( i ) = p( i , z )
  next i
  for h = 0 to nh - 1
    sum = 0
    for i = 0 to ni
      sum += ai( i ) * wi( i , h )
    next i
    ah( h ) = signoid( sum / ni )
  next h
  for o = 0 to no - 1
    sum = 0
    for h = 0 to nh - 1
      sum += ah( h ) * wo( h , o )
    next h
    ao( o ) = signoid( sum / nh )
  next o
end sub
function backprop( n as double _
  , m as double ) as double
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
  dim as integer i , j , k
  dim as double fout , c
  for k = 0 to no - 1
    fout = wish( k ) - ao( k )
    od( k ) = fout * dsignoid( ao( k ) )
  next k
'' update output weights
  for j = 0 to nh - 1
    for k = 0 to no - 1
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
      c = od( k ) * ah( j )
      wo( j , k ) += n * c + m * co( j , k )
      co( j , k ) = c
    next k
  next j
'' calc hidden deltas
  for j = 0 to nh - 1
    fout = 0
    for k = 0 to no - 1
      fout += od( k ) * wo( j , k )
    next k
    hd( j ) = fout * dsignoid( ah( j ) )
  next j
'' update input weights
  for i = 0 to ni
    for j = 0 to nh - 1
      c = hd( j ) * ai( i )
      wi( i , j ) += n * c + m * ci( i , j )
      ci( i , j ) = c
    next j
  next i
  fout = 0
  for k = 0 to no - 1
    fout += ( wish( k ) - ao( k ) ) ^ 2
  next k
  return fout / 2
end function
''init inpout and output paterns
dim as integer pp , e
dim as double fout , a , b

for pp = 0 to paterns - 1
  read a , b
  p( 0 , pp ) = a
  p( 1 , pp ) = b
  uit( 0 , pp ) = a xor b
next pp
data 0,0 , 0,1 , 1,0 , 1,1
dim as integer i , h , o
''let NN live and learn
init
for e = 0 to 1000000
  ''for eatch patern
  fout = 0
  for pp = 0 to paterns - 1
    ''fill input cel's
    for i = 0 to ni - 1
      ai( i ) = p( i , pp )
    next i
    ''fil target
    for o = 0 to no - 1
      wish( o ) = uit( o , pp )
    next o
    calc pp
    fout = fout + backprop( .5 , .5 )
  next pp
  if e mod 1000 = 0 then print e , fout
next e
restore
print "a | b | a xor b" , "| nn"
for i = 0 to paterns - 1
  read a , b
  p( 0 , i ) = a
  p( 1 , i ) = b
  calc i
  print str(a)+" | "+str(b)+" | "+str(a xor b) _
  ,, "|" + str( ao( 0 ) )
next i
print "[ game over ]"
sleep
now lets try image recognision :
seperating square's and circle's
error :
not jet learning every time

Code: Select all

'' bluatigro 29 sept 2018
'' image recognision whit NN try
screen 18 , 32
randomize timer
sub square( x as integer , y as integer _
  , r as integer , t as  integer )
  if t then
    line ( x - r , y - r ) - ( x + r , y + r ) , &hffffffff , bf
  else
    circle( x , y ) , r , &hffffffff ,,,, f
  end if
end sub
function getpixel( x as integer , y as integer ) as integer
  return point( x , y ) <> -167772216
end function
const as integer ni = 100 * 100
const as integer nh = 100
const as integer no = 1
dim shared as double ai( ni ) , ah( nh - 1 )
dim  shared as double ao( no - 1 ) , wish( no - 1 )
dim  shared as double wi( ni , nh - 1 )
dim  shared as double wo( nh - 1 , no - 1 )
dim  shared as double ci( ni , nh - 1 )
dim  shared as double co( nh - 1 , no - 1 )
dim  shared as double od( nh ) , hd( nh )
function in( x as integer , y as integer ) as integer
  return x * 100 + y
end function
function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function
function tanh( x as double ) as double
  return ( 1 - exp( -2 * x ) ) _
  / ( 1 + exp( -2 * x ) )
end function
function signoid( x as double ) as double
  return tanh( x )
end function
function dsignoid( x as double ) as double
  return 1 - x ^ 2
end function
sub init
''init neural net
  dim as integer i , h , o
  for i = 0 to ni
    ai( i ) = 1
  next i
  for i = 0 to nh - 1
    ah( i ) = 1
  next i
  for i = 0 to no - 1
    ao( i ) = 1
  next i
  for i = 0 to ni
    for h = 0 to nh - 1
      wi( i , h ) = range( -1 , 1 )
    next h
  next i
  for h = 0 to nh - 1
    for o = 0 to no - 1
      wo( h , o ) = range( -1 , 1 )
    next o
  next h
end sub
sub calc
''forwart pass of neural net
  dim as integer i , h , o
  dim as double sum
  for h = 0 to nh - 1
    sum = 0
    for i = 0 to ni
      sum += ai( i ) * wi( i , h )
    next i
    ah( h ) = signoid( sum / ni )
  next h
  for o = 0 to no - 1
    sum = 0
    for h = 0 to nh - 1
      sum += ah( h ) * wo( h , o )
    next h
    ao( o ) = signoid( sum / nh )
  next o
end sub
function backprop( n as double _
  , m as double ) as double
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
  dim as integer i , j , k
  dim as double fout , c
  for k = 0 to no - 1
    fout = wish( k ) - ao( k )
    od( k ) = fout * dsignoid( ao( k ) )
  next k
'' update output weights
  for j = 0 to nh - 1
    for k = 0 to no - 1
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
      c = od( k ) * ah( j )
      wo( j , k ) += n * c + m * co( j , k )
      co( j , k ) = c
    next k
  next j
'' calc hidden deltas
  for j = 0 to nh - 1
    fout = 0
    for k = 0 to no - 1
      fout += od( k ) * wo( j , k )
    next k
    hd( j ) = fout * dsignoid( ah( j ) )
  next j
'' update input weights
  for i = 0 to ni
    for j = 0 to nh - 1
      c = hd( j ) * ai( i )
      wi( i , j ) += n * c + m * ci( i , j )
      ci( i , j ) = c
    next j
  next i
  fout = 0
  for k = 0 to no - 1
    fout += ( wish( k ) - ao( k ) ) ^ 2
  next k
  return fout / 2
end function
dim as integer i , h , o , x , y , r , dice , e
dim as double fout
''let NN live and learn
init
for e = 0 to 10000
  cls
  locate 20 , 20
  print e
  dice = int( rnd * 2 )
  r = range( 10 , 30 )
  x = range( r + 5 , 99 - r - 5 )
  y = range( r + 5 , 99 - r - 5 )
  square x , y , r , dice
  ''fill input cel's
  for x = 0 to 99
    for y = 0 to 99
      ai( in( x , y ) ) = cdbl( getpixel( x , y ) )
    next y
  next x
  ''fil target
  for o = 0 to no - 1
    wish( o ) = cdbl( dice )
  next o
  calc
  fout = fout + backprop( .5 , .5 )
next e
fout = 0
for i = 0 to 99
    cls
    dice = int( rnd * 2 )
    r = range( 10 , 30 )
    x = range( r + 5 , 99 - r - 5 )
    y = range( r + 5 , 99 - r - 5 )
    square x , y , r , dice

  ''fill input cel's
  for x = 0 to 99
    for y = 0 to 99
      ai( in( x , y ) ) = cdbl( getpixel( x , y ) )
    next y
  next x
  calc
  if dice = 0 and ao( 0 ) < .5 then fout += 1
  if dice = 1 and ao( 0 ) > .5 then fout += 1
next i
print "error : " + str( fout )
print "[ game over ]"
sleep
Post Reply