Gradient descent: Difference between revisions

m
(Added Perl example)
Line 209:
 
# Calculate initial norm.
my $b = $alpha / (my $delG = 0(map {@fi[$_]²}, ^N).sum ).sqrt;
for ^N { $delG += @fi[$_]² }
my $b = $alpha / $delG.sqrt;
 
while ( $delG > $tolerance ) { # Iterate until value is <= tolerance.
Line 221 ⟶ 219:
 
# Calculate next norm.
$b = $alpha / ($delG = 0(map {@fi[$_]²}, ^N).sum ).sqrt;
for ^N { $delG += @fi[$_]² }
$b = $alpha / $delG.sqrt;
 
my $g1 = g(@x); # Calculate next value.
Line 232 ⟶ 228:
 
sub gradG(@x, $h) { # Provides a rough calculation of gradient g(x).
my \N = +@x ; my ( @y ,= @zx ; my $g0 = g(@x) ;
return for ^Nmap { @y[$_] += $h ; @z[$_] = (g(@y) - $g0) / $h }, ^N
@y = @x;
my $g0 = g(@x);
for ^N { @y[$_] += $h ; @z[$_] = (g(@y) - $g0) / $h }
return @z
}
 
350

edits