Code: Select all
type associativememory
veclen as ulong
density as ulong
hash as ulong '32 bit unsigned integer
weights as single ptr 'pointer to 32 bit floats
binarize as boolean ptr
work as single ptr
declare sub init(veclen as ulong,density as ulong,hash as ulong)
declare sub free()
declare sub train(target as single,inVec as single ptr)
declare function recall(inVec as single ptr) as single
declare sub signflip(vec as single ptr,h as long)
declare sub wht(vec as single ptr)
end type
sub associativememory.init(veclen as ulong,density as ulong, hash as ulong)
this.veclen=veclen
this.density=density
this.hash=hash
weights=callocate(veclen*density,sizeof(single)) 'allocate zeroed memory
binarize=callocate(veclen*density,sizeof(boolean))
work=callocate(veclen,sizeof(single))
end sub
sub associativememory.free()
deallocate(weights)
deallocate(binarize)
deallocate(work)
end sub
function associativememory.recall(inVec as single ptr) as single
dim as ulong i,j
dim as single x=0f
dim as single ptr wtidx=weights
dim as boolean ptr bidx=binarize
for i=0 to veclen-1
work[i]=inVec[i]
next
for i=0 to density-1
signflip(work,hash+i) 'sign flip and wht=random projection
wht(work)
for j=0 to veclen-1
if work[j]>0f then
x+=wtidx[j]
bidx[j]=true
else
x-=wtidx[j]
bidx[j]=false
end if
next
wtidx+=veclen
bidx+=veclen
next
return x
end function
sub associativememory.train(target as single,inVec as single ptr)
dim as ulong i,j
dim as single ptr wtidx=weights
dim as boolean ptr bidx=binarize
dim as single e=target-recall(inVec) 'get the prediction error
e/=veclen*density 'scale the error correctly so that it will be fully corrected
for i=0 to density-1
for j=0 to veclen-1
if bidx[j] then
wtidx[j]+=e
else
wtidx[j]-=e
end if
next
wtidx+=veclen
bidx+=veclen
next
end sub
' Pseudorandomly flip the sign of the values in vector using h as a seed
' h is a 32 bit signed interger
sub associativememory.signflip(vec as single ptr,h as long)
dim as ulong i
for i=0 to veclen-1
h*=1664525
h+=1013904223
if h<0 then vec[i]=-vec[i]
next
end sub
'Fast Walsh Hadamard transform. Leaves vector length unchanged
sub associativememory.wht(vec as single ptr)
dim as ulong i,j,hs=1
dim as single a,b,scale=1.0/sqr(veclen)
while hs<veclen
i=0
while i<veclen
j=i+hs
while i<j
a=vec[i]
b=vec[i+hs]
vec[i]=a+b
vec[i+hs]=a-b
i+=1
wend
i+=hs
wend
hs+=hs
wend
for i=0 to veclen-1
vec[i]*=scale
next
end sub
screenres 300,300,32
print "Please wait!"
dim as associativememory net
dim as single ptr vec=callocate(256,sizeof(single)) 'allocate zeroed
net.init(256,3,1234567)
for i as ulong=0 to 99
for j as ulong=0 to 254
vec[j]=1f
net.train(sin(j*.06)*100,vec)
vec[j+1]=1f
net.train(cos(j*.2)*100,vec)
vec[j]=0f
vec[j+1]=0f
next
next
cls
for i as ulong=0 to 254
vec[i]=1f
pset (i,150-sin(i*.06)*100),rgb(0,255,0)
pset (i,150-net.recall(vec)),rgb(255,255,0)
vec[i]=0f
next
for i as ulong=0 to 254
vec[i]=1f
vec[i+1]=1f
pset (i,150-cos(i*.2)*100),rgb(0,255,0)
pset (i,150-net.recall(vec)),rgb(255,0,255)
vec[i]=0f
vec[i+1]=0f
next
deallocate(vec)
net.free()
getkey
"There has been a lack of discussion about binarization in neural networks. Multiplying those +1/-1 values by weights and summing allows you to store values with a high degree of independence. For a given binary input and target value you get an error. You divide the error by the number of binary values and then you simply correct each of the weights by the reduced error taking account of the binary sign. That gives a full correction to get the correct target output. In higher dimensional space most vectors are orthogonal. For a different binary input the adjustments you made to the weights will not align at all. In fact they will sum to Gaussian noise by the central limit theorem. The value you previously stored for a second binary input will now be contaminated by a slight amount of Gaussian noise which you can correct for. This will now introduce an even smaller amount of Gaussian noise on the value for the first binary input. Iterating back and forth will get rid of the noise entirely for both binary inputs.
This has high use in random projection,reservoir and extreme learning machine computing. And in fact turns a simple locality sensitive hash formed by random projection followed by binarization into a useful single layer neural network. "
https://software.intel.com/en-us/forums ... pic/734095
If you use excess memory (to get an over-determined solution) it will provide a manner of error correction.