Perceptron: Difference between revisions

Reinstated Forth/Go/Julia/Phix/REXX/Smalltalk
(Reinstated Forth/Go/Julia/Phix/REXX/Smalltalk)
Line 18:
* [https://youtu.be/dXuNAkHsos4?t=16m44s Machine Learning - Perceptrons (youtube)]
<br><br>
 
=={{header|Forth}}==
{{works with|GNU Forth}}
Where it says <code>[email protected]</code> it should say <code>f&#64;</code>.
<lang Forth>require random.fs
here seed !
 
warnings off
 
( THE PERCEPTRON )
 
: randomWeight 2000 random 1000 - s>f 1000e f/ ;
: createPerceptron create dup , 0 ?DO randomWeight f, LOOP ;
 
variable arity
variable ^weights
variable ^inputs
 
: perceptron! dup @ arity ! cell+ ^weights ! ;
: inputs! ^inputs ! ;
 
0.0001e fconstant learningConstant
: activate 0e f> IF 1e ELSE -1e THEN ;
 
: feedForward
^weights @ ^inputs @ 0e
arity @ 0 ?DO
dup f@ float + swap
dup f@ float + swap
f* f+
LOOP 2drop activate ;
 
: train
feedForward f- learningConstant f*
^weights @ ^inputs @
arity @ 0 ?DO
fdup dup f@ f* float + swap
dup f@ f+ dup f! float + swap
LOOP 2drop fdrop ;
 
( THE TRAINER )
 
create point 0e f, 0e f, 1e f, \ x y bias
 
: x point ;
: y point float + ;
: randomX 640 random s>f ;
: randomY 360 random s>f ;
 
\ y = Ax + B
2e fconstant A
1e fconstant B
 
: randomizePoint
randomY fdup y f!
randomX fdup x f!
A f* B f+ f< IF -1e ELSE 1e THEN ;
 
3 createPerceptron myPerceptron
variable trainings
10000 constant #rounds
 
: setup 0 ; \ success counter
: calculate s>f #rounds s>f f/ 100e f* ;
: report ." After " trainings @ . ." trainings: "
calculate f. ." % accurate" cr ;
: check learningConstant f~ IF 1+ THEN ;
: evaluate randomizePoint feedForward check ;
: evaluate setup #rounds 0 ?DO evaluate LOOP report ;
 
: tally 1 trainings +! ;
: timesTrain 0 ?DO randomizePoint train tally LOOP ;
 
: initialize
myPerceptron perceptron!
point inputs!
0 trainings ! ;
: go
initialize evaluate
1 timesTrain evaluate
1 timesTrain evaluate
1 timesTrain evaluate
1 timesTrain evaluate
1 timesTrain evaluate
5 timesTrain evaluate
10 timesTrain evaluate
30 timesTrain evaluate
50 timesTrain evaluate
100 timesTrain evaluate
300 timesTrain evaluate
500 timesTrain evaluate ;
 
go bye</lang>
Example output:
<pre>After 0 trainings: 10.16 % accurate
After 1 trainings: 7.43 % accurate
After 2 trainings: 7.71 % accurate
After 3 trainings: 4.93 % accurate
After 4 trainings: 3.11 % accurate
After 5 trainings: 0.6 % accurate
After 10 trainings: 48.72 % accurate
After 20 trainings: 85.55 % accurate
After 50 trainings: 86.36 % accurate
After 100 trainings: 98.59 % accurate
After 200 trainings: 98.84 % accurate
After 500 trainings: 95.86 % accurate
After 1000 trainings: 99.8 % accurate</pre>
 
=={{header|Go}}==
{{libheader|Go Graphics}}
<br>
This is based on the Java entry but just outputs the final image (as a .png file) rather than displaying its gradual build up. It also uses a different color scheme - blue and red circles with a black dividing line.
<lang go>package main
 
import (
"github.com/fogleman/gg"
"math/rand"
"time"
)
 
const c = 0.00001
 
func linear(x float64) float64 {
return x*0.7 + 40
}
 
type trainer struct {
inputs []float64
answer int
}
 
func newTrainer(x, y float64, a int) *trainer {
return &trainer{[]float64{x, y, 1}, a}
}
 
type perceptron struct {
weights []float64
training []*trainer
}
 
func newPerceptron(n, w, h int) *perceptron {
weights := make([]float64, n)
for i := 0; i < n; i++ {
weights[i] = rand.Float64()*2 - 1
}
 
training := make([]*trainer, 2000)
for i := 0; i < 2000; i++ {
x := rand.Float64() * float64(w)
y := rand.Float64() * float64(h)
answer := 1
if y < linear(x) {
answer = -1
}
training[i] = newTrainer(x, y, answer)
}
return &perceptron{weights, training}
}
 
func (p *perceptron) feedForward(inputs []float64) int {
if len(inputs) != len(p.weights) {
panic("weights and input length mismatch, program terminated")
}
sum := 0.0
for i, w := range p.weights {
sum += inputs[i] * w
}
if sum > 0 {
return 1
}
return -1
}
 
func (p *perceptron) train(inputs []float64, desired int) {
guess := p.feedForward(inputs)
err := float64(desired - guess)
for i := range p.weights {
p.weights[i] += c * err * inputs[i]
}
}
 
func (p *perceptron) draw(dc *gg.Context, iterations int) {
le := len(p.training)
for i, count := 0, 0; i < iterations; i, count = i+1, (count+1)%le {
p.train(p.training[count].inputs, p.training[count].answer)
}
x := float64(dc.Width())
y := linear(x)
dc.SetLineWidth(2)
dc.SetRGB255(0, 0, 0) // black line
dc.DrawLine(0, linear(0), x, y)
dc.Stroke()
dc.SetLineWidth(1)
for i := 0; i < le; i++ {
guess := p.feedForward(p.training[i].inputs)
x := p.training[i].inputs[0] - 4
y := p.training[i].inputs[1] - 4
if guess > 0 {
dc.SetRGB(0, 0, 1) // blue circle
} else {
dc.SetRGB(1, 0, 0) // red circle
}
dc.DrawCircle(x, y, 8)
dc.Stroke()
}
}
 
func main() {
rand.Seed(time.Now().UnixNano())
w, h := 640, 360
perc := newPerceptron(3, w, h)
dc := gg.NewContext(w, h)
dc.SetRGB(1, 1, 1) // white background
dc.Clear()
perc.draw(dc, 2000)
dc.SavePNG("perceptron.png")
}</lang>
 
=={{header|Java}}==
Line 278 ⟶ 495:
 
Well, it seems I cannot upload an image :(
 
=={{header|Julia}}==
<lang julia># file module.jl
 
module SimplePerceptrons
 
# default activation function
step(x) = x > 0 ? 1 : -1
 
mutable struct Perceptron{T, F}
weights::Vector{T}
lr::T
activate::F
end
 
Perceptron{T}(n::Integer, lr = 0.01, f::Function = step) where T =
Perceptron{T, typeof(f)}(2 .* rand(n + 1) .- 1, lr, f)
Perceptron(args...) = Perceptron{Float64}(args...)
 
@views predict(p::Perceptron, x::AbstractVector) = p.activate(p.weights[1] + x' * p.weights[2:end])
@views predict(p::Perceptron, X::AbstractMatrix) = p.activate.(p.weights[1] .+ X * p.weights[2:end])
 
function train!(p::Perceptron, X::AbstractMatrix, y::AbstractVector; epochs::Integer = 100)
for _ in Base.OneTo(epochs)
yhat = predict(p, X)
err = y .- yhat
ΔX = p.lr .* err .* X
for ind in axes(ΔX, 1)
p.weights[1] += err[ind]
p.weights[2:end] .+= ΔX[ind, :]
end
end
return p
end
 
accuracy(p, X::AbstractMatrix, y::AbstractVector) = count(y .== predict(p, X)) / length(y)
 
end # module SimplePerceptrons
</lang>
 
<lang julia># file _.jl
 
const SP = include("module.jl")
 
p = SP.Perceptron(2, 0.1)
 
a, b = 0.5, 1
X = rand(1000, 2)
y = map(x -> x[2] > a + b * x[1] ? 1 : -1, eachrow(X))
 
# Accuracy
@show SP.accuracy(p, X, y)
 
# Train
SP.train!(p, X, y, epochs = 1000)
 
ahat, bhat = p.weights[1] / p.weights[2], -p.weights[3] / p.weights[2]
 
using Plots
 
scatter(X[:, 1], X[:, 2], markercolor = map(x -> x == 1 ? :red : :blue, y))
Plots.abline!(b, a, label = "real line", linecolor = :red, linewidth = 2)
 
SP.train!(p, X, y, epochs = 1000)
ahat, bhat = p.weights[1] / p.weights[2], -p.weights[3] / p.weights[2]
Plots.abline!(bhat, ahat, label = "predicted line")
</lang>
 
=={{header|Kotlin}}==
Line 378 ⟶ 662:
 
=={{header|Lua}}==
Simple implementation allowing for any number of inputs (in this case, just 1), testing of the Perceptron, and training.
<lang lua>--SlimeMan22local Perceptron = {}
local Perceptron = {}
Perceptron.__index = Perceptron
 
Line 442 ⟶ 725:
end
node:train(trainingData, 100, .1) --trains on the set for 100 epochs with a step size of 0.1
print("Trained\nTrained results:")
for i = -2, 2, 1 do
print(i..":", node:test({i}))
Line 454 ⟶ 737:
1: 1.6849672607805
2: 2.4325140849699
 
Trained results:
-2: -3
Line 668 ⟶ 952:
#####OOOOOOOOOOOOOOO
####OOOOOOOOOOOOOOOO</pre>
 
=={{header|Phix}}==
{{libheader|pGUI}}
Interactive GUI version. Select one of five lines, set the number of points, learning constant,
learning rate, and max iterations. Plots accuracy vs. iterations and displays the training data
in blue/black=above/incorrect and green/red=below/incorrect [all blue/green = 100% accurate].
<lang Phix>-- demo\rosetta\Perceptron.exw
--
-- The learning curve turned out more haphazard than I imagined, and adding a
-- non-linear line to f() (case 5) was perhaps not such a great idea given how
-- much it sometimes struggles with some of the other straight lines anyway.
--
include pGUI.e
--#withtype Ihandle
--#withtype Ihandles
--#withtype cdCanvas
 
constant help_txt = """
A perceptron is the simplest possible neural network, consisting of just one neuron
that we train to recognise whether a point is above or below a given straight line.
NB: It would probably be unwise to overly assume that this could easily be adapted
to anything more complex, or actually useful. It is just a basic introduction, but
you have to start somewhere. What is interesting is that ultimately the neuron is
just three numbers, plus a bucket-load of training gumpf.
 
The left hand panel allows settings to be changed, in the middle we plot the rate of
learning, and on the right we show the training data colour coded as above/below and
correct/incorrect (blue/black=above/incorrect, green/red=below/incorrect). What you
want to see is all blue/green, with no black/red.
 
You can change the line algorithm (four straight and one curved that it is not meant
to be able to cope with), the number of points (size of training data), the learning
constant, learning rate (iterations/second) and the maximum number of iterations.
Note that training automatically stops once 100% accuracy is reached (since the error
is then always zero, no further changes would ever occur). Also note that a restart
is triggered when any setting is changed, not just when the restart button is pressed.
 
The learning curve was expected to start at 50% (random chance of being right) and
gradually improve towards 100%, except when the non-linear line was selected. It
turned out far more haphazard than I thought it would. Originally it allowed up to
10,000,000 iterations, but it rarely improved much beyond 1,000,000."""
 
function help_cb(Ihandln /*help*/)
IupMessage("Perceptron",help_txt)
return IUP_DEFAULT
end function
 
Ihandle dlg, plot, canvas, timer,
iteration, accuracy, w1, w2, w3
cdCanvas cddbuffer, cdcanvas
 
integer line_alg = 1
integer points = 2000,
learning_rate = 10000,
max_iterations = 1_000_000,
total_iterations = 0
atom learning_constant = 0.00001
 
enum WEIGHTS, -- The actual neuron (just 3 numbers)
TRAINING -- training data/results, variable length
enum INPUTS, ANSWER -- contents of [TRAINING]
-- note that length(inputs[i]) must = length(weights)
 
sequence perceptron = {},
last_wh -- (recreate "" on resize)
 
function activate(atom t)
return iff(t>0?+1:-1)
end function
 
function f(atom x)
switch line_alg
case 1: return x*0.7+40
case 2: return 300-0.3*x
case 3: return x*0.75
case 4: return 2*x+1
case 5: return x/2+sin(x/100)*100+100 -- (fail)
end switch
end function
 
procedure new_perceptron(integer n)
sequence weights := repeat(0, n)
for i=1 to n do
weights[i] = rnd()*2 - 1
end for
sequence training := repeat(0,points)
integer {w,h} = last_wh
for i=1 to points do
integer x := rand(w),
y := rand(h),
answer := activate(y-f(x))
sequence inputs = {x, y, 1}
-- aside: inputs is {x,y,1}, rather than {x,y} because an
-- input of {0,0} could only ever yield 0, whereas
-- {0,0,1} can yield a non-zero guess: weights[3].
training[i] = {inputs, answer} -- {INPUTS, ANSWER}
end for
perceptron = {weights, training} -- {WEIGHTS, TRAINING}
end procedure
function feed_forward(sequence inputs)
if length(inputs)!=length(perceptron[WEIGHTS]) then
throw("weights and input length mismatch, program terminated")
end if
atom total := 0.0
for i=1 to length(inputs) do
total += inputs[i] * perceptron[WEIGHTS][i]
end for
return activate(total)
end function
procedure train(sequence inputs, integer desired)
integer guess := feed_forward(inputs),
error := desired - guess
for i=1 to length(perceptron[WEIGHTS]) do
perceptron[WEIGHTS][i] += learning_constant * error * inputs[i]
end for
end procedure
--DEV add to pGUI/doc
procedure cdCanvasCircle(cdCanvas cddbuffer, atom x, y, r)
cdCanvasArc(cddbuffer,x,y,r,r,0,360)
end procedure
 
function draw(bool bDraw=true)
-- (if bDraw is false, we just want the "correct" count)
integer correct = 0
atom x, y
for i=1 to points do
{sequence inputs, integer answer} = perceptron[TRAINING][i]
integer guess := feed_forward(inputs)
correct += (guess=answer)
if bDraw then
{x,y} = inputs
-- blue/black=above/incorrect, green/red=below/incorrect
integer clr = iff(guess=answer?iff(guess>0?CD_BLUE:CD_GREEN)
:iff(guess>0?CD_BLACK:CD_RED))
cdCanvasSetForeground(cddbuffer, clr)
cdCanvasCircle(cddbuffer, x, y, 8)
end if
end for
if bDraw then
cdCanvasSetForeground(cddbuffer, CD_BLACK)
x := last_wh[1]
y := f(x)
if line_alg=5 then
-- non-linear so (crudely) draw in little segments
for i=0 to x by 20 do
cdCanvasLine(cddbuffer,i,f(i),i+20,f(i+20))
end for
else
cdCanvasLine(cddbuffer,0,f(0),x,y)
end if
end if
return correct
end function
bool re_plot = true
atom plot0
sequence plotx = repeat(0,19),
ploty = repeat(0,19)
integer imod = 1, -- keep every 1, then 10, then 100, ...
pidx = 1
 
function restart_cb(Ihandln /*restart*/)
last_wh = IupGetIntInt(canvas, "DRAWSIZE")
new_perceptron(3)
imod = 1
pidx = 1
total_iterations = 0
plot0 = (draw(false)/points)*100
re_plot = true
IupSetInt(timer,"RUN",1)
return IUP_DEFAULT
end function
 
function redraw_cb(Ihandle /*ih*/, integer /*posx*/, integer /*posy*/)
if perceptron={}
or last_wh!=IupGetIntInt(canvas, "DRAWSIZE") then
{} = restart_cb(NULL)
end if
cdCanvasActivate(cddbuffer)
cdCanvasClear(cddbuffer)
integer correct = draw()
cdCanvasFlush(cddbuffer)
 
if re_plot then
re_plot = false
IupSetAttribute(plot, "CLEAR", NULL)
IupPlotBegin(plot)
IupPlotAdd(plot, 0, plot0)
for i=1 to pidx-1 do
IupPlotAdd(plot, plotx[i], ploty[i])
end for
{} = IupPlotEnd(plot)
IupSetAttribute(plot, "REDRAW", NULL)
end if
IupSetStrAttribute(iteration,"TITLE","iteration: %d",{total_iterations})
IupSetStrAttribute(w1,"TITLE","%+f",{perceptron[WEIGHTS][1]})
IupSetStrAttribute(w2,"TITLE","%+f",{perceptron[WEIGHTS][2]})
IupSetStrAttribute(w3,"TITLE","%+f",{perceptron[WEIGHTS][3]})
IupSetStrAttribute(accuracy,"TITLE","accuracy: %.4g%%",{(correct/points)*100})
IupRefresh({iteration,w1,w2,w3,accuracy}) -- (force label resize)
if correct=points then
IupSetInt(timer,"RUN",0) -- stop at 100%
end if
return IUP_DEFAULT
end function
 
function map_cb(Ihandle ih)
cdcanvas = cdCreateCanvas(CD_IUP, ih)
cddbuffer = cdCreateCanvas(CD_DBUFFER, cdcanvas)
cdCanvasSetBackground(cddbuffer, CD_PARCHMENT)
return IUP_DEFAULT
end function
 
function valuechanged_cb(Ihandle ih)
string name = IupGetAttribute(ih, "NAME")
integer v = IupGetInt(ih, "VALUE")
switch name
case "line": line_alg = v
case "points": points = power(10,v)
case "learn": learning_constant = power(10,-v)
case "rate": learning_rate = power(10,v-1)
case "max": max_iterations = power(10,v)
end switch
{} = restart_cb(NULL)
return IUP_DEFAULT
end function
 
function timer_cb(Ihandle /*timer*/)
for i=1 to min(learning_rate,max_iterations) do
total_iterations += 1
integer c = mod(total_iterations,points)+1
train(perceptron[TRAINING][c][INPUTS], perceptron[TRAINING][c][ANSWER])
if mod(total_iterations,imod)=0 then
-- save 1,2..10, then 20,30,..100, then 200,300,..1000, etc
re_plot = true
plotx[pidx] = total_iterations
ploty[pidx] = (draw(false)/points)*100
if pidx=10 or pidx=19 then
if pidx=19 then
-- drop (eg) 1,2,..9, replace with 10,20,..90,
-- next time replace 10,20..90 with 100,200..900, etc
plotx[1..10] = plotx[10..19]
ploty[1..10] = ploty[10..19]
end if
imod *= 10
pidx = 11
else
pidx += 1
end if
end if
end for
if total_iterations>=max_iterations then
IupSetInt(timer,"RUN",0)
end if
IupUpdate(canvas)
return IUP_IGNORE
end function
 
function esc_close(Ihandle /*ih*/, atom c)
if c=K_ESC then return IUP_CLOSE end if
if c=K_F1 then return help_cb(NULL) end if
if c=K_F5 then return restart_cb(NULL) end if
return IUP_CONTINUE
end function
 
function settings(string lname, name, sequence opts, integer v=1)
Ihandle lbl = IupLabel(lname,"PADDING=0x4"),
list = IupList("NAME=%s, DROPDOWN=YES",{name}),
hbox = IupHbox({lbl,IupFill(),list})
for i=1 to length(opts) do
IupSetAttributeId(list,"",i,opts[i])
end for
IupSetInt(list,"VISIBLEITEMS",length(opts)+1)
IupSetInt(list,"VALUE",v)
IupSetCallback(list, "VALUECHANGED_CB", Icallback("valuechanged_cb"));
return hbox
end function
 
function sep()
return IupLabel("","SEPARATOR=HORIZONTAL")
end function
 
procedure main()
IupOpen()
IupControlsOpen()
 
Ihandle settings_lbl = IupHbox({IupFill(),IupLabel("Settings"),IupFill()}),
line = settings("line","line",{"x*0.7 + 40","300 - 0.3*x","x*0.75","2*x + 1","x/2+sin(x/100)*100+100"}),
points = settings("number of points","points",{"10","100","1000","10000"},3),
learn = settings("learning constant","learn",{"0.1","0.01","0.001","0.0001","0.00001"},5),
rate = settings("learning rate","rate",{"1/s","10/s","100/s","1000/s","10000/s"},5),
maxiter = settings("max iterations","max",{"10","100","1000","10,000","100,000","1,000,000"},6),
restart = IupButton("Restart (F5)", "ACTION", Icallback("restart_cb")),
helpbtn = IupButton("Help (F1)", "ACTION", Icallback("help_cb")),
buttons = IupHbox({restart,IupFill(),helpbtn})
 
iteration = IupLabel("iteration: 1")
w1 = IupLabel("1")
w2 = IupLabel("2")
w3 = IupLabel("3")
Ihandle weights = IupHbox({IupLabel("weights: ","PADDING=0x4"),IupVbox({w1,w2,w3})})
accuracy = IupLabel("accuracy: 12.34%")
 
Ihandle vbox = IupVbox({settings_lbl, sep(),
line, sep(), points, sep(), learn, sep(),
rate, sep(), maxiter, sep(), buttons, sep(),
IupHbox({iteration}), weights, IupHbox({accuracy})})
IupSetAttribute(vbox, "GAP", "4");
 
plot = IupPlot("MENUITEMPROPERTIES=Yes")
IupSetAttribute(plot, "TITLE", "Learning Curve");
IupSetAttribute(plot, "TITLEFONTSIZE", "10");
IupSetAttribute(plot, "TITLEFONTSTYLE", "ITALIC");
IupSetAttribute(plot, "GRIDLINESTYLE", "DOTTED");
IupSetAttribute(plot, "GRID", "YES");
IupSetAttribute(plot, "AXS_XLABEL", "iterations");
IupSetAttribute(plot, "AXS_YLABEL", "% correct");
IupSetAttribute(plot, "AXS_XFONTSTYLE", "ITALIC");
IupSetAttribute(plot, "AXS_YFONTSTYLE", "ITALIC");
IupSetAttribute(plot, "AXS_XTICKNUMBER", "No");
IupSetAttribute(plot, "AXS_YAUTOMIN", "No");
IupSetAttribute(plot, "AXS_YAUTOMAX", "No");
IupSetInt(plot, "AXS_YMIN", 0)
IupSetInt(plot, "AXS_YMAX", 100)
 
canvas = IupCanvas(NULL)
IupSetAttribute(canvas, "RASTERSIZE", "640x360") -- initial size
IupSetCallback(canvas, "MAP_CB", Icallback("map_cb"))
IupSetCallback(canvas, "ACTION", Icallback("redraw_cb"))
 
Ihandle hbox = IupHbox({vbox, plot, canvas},"MARGIN=4x4, GAP=10")
dlg = IupDialog(hbox);
IupSetCallback(dlg, "K_ANY", Icallback("esc_close"))
IupSetAttribute(dlg, "TITLE", "Perceptron")
IupMap(dlg)
IupSetAttribute(canvas, "RASTERSIZE", NULL) -- release limitation
IupShowXY(dlg,IUP_CENTER,IUP_CENTER)
timer = IupTimer(Icallback("timer_cb"), 100) -- (was 1 sec, now 0.1s)
IupMainLoop()
IupClose()
end procedure
main()</lang>
 
=={{header|Racket}}==
Line 750 ⟶ 1,380:
 
Run it and see the image for yourself, I can't get it onto RC!
 
=={{header|REXX}}==
{{trans|Java}}
<lang rexx>/* REXX */
Call init
Call time 'R'
try=0
Call show 0
Do d=1 To dots
x=x.d
y=y.d
Parse Value x y 1 with inputs.0 inputs.1 inputs.2
answer.d=sign(y-f(x))
Select
When f(x)<y Then r='<'
When f(x)>y Then r='>'
Otherwise r='='
End
training.d=x y 1 answer.d
End
Do try=1 To tries
Call time 'R'
zz=0
Do d=1 To dots
Parse Var training.d inputs.0 inputs.1 inputs.2 answer.d
Call train d
Do ii=1 To d
Parse Var training.ii inputs.0 inputs.1 inputs.2 answer.d
guess = feedForward(d)
End
End
Call show try
End
Exit
 
show:
Parse Arg run
show=wordpos(run,'0 1' tries)>0
If run>0 Then Say ' '
If show Then Say 'Point x f(x) r y ff ok zz'
zz=0
Do d=1 To dots
x=x.d
y=y.d
Parse Value x.d y.d 1 with inputs.0 inputs.1 inputs.2
ff=format(feedForward(),2)
Select
When f(x)<y Then r='<'
When f(x)>y Then r='>'
Otherwise r='='
End
If r='<' & ff=1 |,
r='>' & ff=-1 Then Do; tag='ok'; zz=zz+1; End
Else tag='--'
If show Then
Say format(d,5) format(x,4,0) format(f(x),4,0) r format(y,4,0) right(ff,2),
tag format(zz,4)
End
If show Then Say copies('-',33)
weights=format(weights.0,2,5) format(weights.1,2,5) format(weights.2,2,5)
Select
When run=0 Then txt='Initial pattern'
When run=1 Then txt='After one loop '
Otherwise txt='After' run 'loops'
End
Say left(txt,15) format(zz,4) 'points fire. weights='weights
Return
 
train: Procedure Expose inputs. weights.
desired=sign(inputs.1-f(inputs.0))
guess = feedForward()
error = desired-guess
Do i=0 To 2
weights.i=weights.i+0.00001*error*inputs.i
End
Return
 
f: Return arg(1)*0.7+40
 
nextDouble: /* random number between -1 and +1 */
Return random(100000)/100000
 
feedforward: Procedure Expose inputs. weights.
sum=0
Do i=0 To 2
sum=sum+inputs.i*weights.i
End
Return activate(sum)
 
activate:
If arg(1)>0 Then Return 1
Else Return -1
 
init:
Call random 10000,10000,333 /* seed the random function */
dots=30
width=640
height=360
tries=10
Do i=0 To 2
weights.i=nextDouble()
End
Do i=1 To dots
x.i=nextDouble()*width
y.i=nextDouble()*height
End
Return</lang>
{{out}}
<pre>Point x f(x) r y ff ok zz
1 100 110 < 204 1 ok 1
2 613 469 > 117 1 -- 1
3 528 409 > 125 1 -- 1
4 141 139 > 119 1 -- 1
5 32 62 < 245 1 ok 2
6 11 48 < 336 1 ok 3
7 435 344 > 270 1 -- 3
8 572 440 > 280 1 -- 3
9 442 350 > 141 1 -- 3
10 410 327 > 209 1 -- 3
11 290 243 < 355 1 ok 4
12 257 220 < 260 1 ok 5
13 235 205 > 51 1 -- 5
14 600 460 > 66 1 -- 5
15 21 55 < 182 1 ok 6
16 197 178 > 42 1 -- 6
17 444 351 > 150 1 -- 6
18 393 315 > 87 1 -- 6
19 622 475 > 280 1 -- 6
20 436 345 > 292 1 -- 6
21 553 427 > 261 1 -- 6
22 478 374 > 264 1 -- 6
23 373 301 > 120 1 -- 6
24 527 409 > 94 1 -- 6
25 558 431 > 49 1 -- 6
26 616 471 > 358 1 -- 6
27 241 209 > 68 1 -- 6
28 365 295 > 164 1 -- 6
29 371 299 > 155 1 -- 6
30 102 112 < 220 1 ok 7
---------------------------------
Initial pattern 7 points fire. weights= 0.28732 0.50931 0.45298
 
Point x f(x) r y ff ok zz
1 100 110 < 204 1 ok 1
2 613 469 > 117 1 -- 1
3 528 409 > 125 1 -- 1
4 141 139 > 119 1 -- 1
5 32 62 < 245 1 ok 2
6 11 48 < 336 1 ok 3
7 435 344 > 270 1 -- 3
8 572 440 > 280 1 -- 3
9 442 350 > 141 1 -- 3
10 410 327 > 209 1 -- 3
11 290 243 < 355 1 ok 4
12 257 220 < 260 1 ok 5
13 235 205 > 51 1 -- 5
14 600 460 > 66 1 -- 5
15 21 55 < 182 1 ok 6
16 197 178 > 42 1 -- 6
17 444 351 > 150 1 -- 6
18 393 315 > 87 1 -- 6
19 622 475 > 280 1 -- 6
20 436 345 > 292 1 -- 6
21 553 427 > 261 1 -- 6
22 478 374 > 264 1 -- 6
23 373 301 > 120 1 -- 6
24 527 409 > 94 1 -- 6
25 558 431 > 49 1 -- 6
26 616 471 > 358 1 -- 6
27 241 209 > 68 1 -- 6
28 365 295 > 164 1 -- 6
29 371 299 > 155 1 -- 6
30 102 112 < 220 1 ok 7
---------------------------------
After one loop 7 points fire. weights= 0.08433 0.43412 0.45252
 
After 2 loops 16 points fire. weights=-0.10749 0.35991 0.45208
 
After 3 loops 26 points fire. weights=-0.18168 0.31845 0.45192
 
After 4 loops 28 points fire. weights=-0.20192 0.30482 0.45186
 
After 5 loops 29 points fire. weights=-0.20473 0.30245 0.45184
 
After 6 loops 29 points fire. weights=-0.20755 0.30007 0.45182
 
After 7 loops 29 points fire. weights=-0.21037 0.29769 0.45180
 
After 8 loops 29 points fire. weights=-0.21319 0.29532 0.45178
 
After 9 loops 29 points fire. weights=-0.21601 0.29294 0.45176
 
Point x f(x) r y ff ok zz
1 100 110 < 204 1 ok 1
2 613 469 > 117 -1 ok 2
3 528 409 > 125 -1 ok 3
4 141 139 > 119 1 -- 3
5 32 62 < 245 1 ok 4
6 11 48 < 336 1 ok 5
7 435 344 > 270 -1 ok 6
8 572 440 > 280 -1 ok 7
9 442 350 > 141 -1 ok 8
10 410 327 > 209 -1 ok 9
11 290 243 < 355 1 ok 10
12 257 220 < 260 1 ok 11
13 235 205 > 51 -1 ok 12
14 600 460 > 66 -1 ok 13
15 21 55 < 182 1 ok 14
16 197 178 > 42 -1 ok 15
17 444 351 > 150 -1 ok 16
18 393 315 > 87 -1 ok 17
19 622 475 > 280 -1 ok 18
20 436 345 > 292 -1 ok 19
21 553 427 > 261 -1 ok 20
22 478 374 > 264 -1 ok 21
23 373 301 > 120 -1 ok 22
24 527 409 > 94 -1 ok 23
25 558 431 > 49 -1 ok 24
26 616 471 > 358 -1 ok 25
27 241 209 > 68 -1 ok 26
28 365 295 > 164 -1 ok 27
29 371 299 > 155 -1 ok 28
30 102 112 < 220 1 ok 29
---------------------------------
After 10 loops 29 points fire. weights=-0.21883 0.29057 0.45174</pre>
 
=={{header|Scala}}==
Line 958 ⟶ 1,813:
Trained on 19000, percent correct is 99.2
Trained on 20000, percent correct is 100.0</pre>
 
=={{header|Smalltalk}}==
{{works with|GNU Smalltalk}}
<lang Smalltalk>Number extend [
 
activate
[^self > 0 ifTrue: [1] ifFalse: [-1]]
]
 
Object subclass: Perceptron [
 
| weights |
 
feedForward: inputArray
[^(self sumOfWeighted: inputArray) activate]
 
train: inputArray desire: expected
[| actual error |
actual := self feedForward: inputArray.
error := 0.0001 * (expected - actual).
weights := weights
with: inputArray
collect: [:weight :input | weight + (error * input)]]
 
sumOfWeighted: inputArray
[^(self weighted: inputArray)
inject: 0
into: [:each :sum | each + sum]]
 
weighted: inputArray
[^weights
with: inputArray
collect: [:weight :input | weight * input]]
 
Perceptron class >> new: arity
[^self basicNew
initialize: arity;
yourself]
 
initialize: arity
[weights := 1
to: arity
collect: [:x | self randomWeight]]
 
randomWeight
[^(Random between: -1000 and: 1000) / 1000]
]
 
Perceptron class extend [
 
| perceptron trainings input expected actual |
 
evaluationSamples := 100000.
 
initializeTest
[perceptron := self new: 3.
input := Array new: 3.
trainings := 0.
input at: 1 put: 1. "Bias"]
 
randomizeSample
[| x y |
x := Random between: 0 and: 640-1.
y := Random between: 0 and: 360-1.
expected := (y >= (2*x+1)) ifTrue: [1] ifFalse: [-1].
input at: 2 put: x.
input at: 3 put: y]
 
test
[self
initializeTest; evaluate;
train: 1; evaluate;
train: 1; evaluate;
train: 1; evaluate;
train: 1; evaluate;
train: 1; evaluate;
train: 5; evaluate;
train: 10; evaluate;
train: 30; evaluate;
train: 50; evaluate;
train: 100; evaluate;
train: 300; evaluate;
train: 500; evaluate]
 
evaluate
[| hits |
hits := 0.
evaluationSamples timesRepeat:
[self randomizeSample.
expected = (perceptron feedForward: input)
ifTrue: [hits := hits + 1]].
Transcript
display: 'After ';
display: trainings;
display: ' trainings: ';
display: (hits / evaluationSamples * 100) asFloat;
display: ' % accuracy';
nl]
 
train: anInteger
[anInteger timesRepeat:
[self randomizeSample.
perceptron
train: input
desire: expected.
trainings := trainings + 1]]
]
 
Perceptron test.</lang>
Example output:
<pre>After 0 trainings: 14.158 % accuracy
After 1 trainings: 14.018 % accuracy
After 2 trainings: 14.19 % accuracy
After 3 trainings: 14.049 % accuracy
After 4 trainings: 14.029 % accuracy
After 5 trainings: 14.105 % accuracy
After 10 trainings: 20.39 % accuracy
After 20 trainings: 57.08 % accuracy
After 50 trainings: 92.998 % accuracy
After 100 trainings: 98.988 % accuracy
After 200 trainings: 98.055 % accuracy
After 500 trainings: 99.777 % accuracy
After 1000 trainings: 98.523 % accuracy</pre>
 
=={{header|XLISP}}==
7,795

edits