(*
* lablai - An ML Artificial Inteligence library
* Copyright (C) 2006 Till Crueger
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*)
(* File $RCSfile$ *)
(* last edited by $Author: till_crueger $ *)
(* $Date: 2007-12-17 23:10:58 +0100 (Mo, 17 Dez 2007) $, $Revision: 30 $ *)
let ( *| ) v1 v2 =
let tmp = ref 0.0 in
for i = 0 to (Array.length v1) -1 do
tmp := !tmp +. Array.unsafe_get v1 i *. Array.unsafe_get v2 i
done;
!tmp +. 0.0
;;
let ( |*| ) mat vec =
let res = Array.make (Array.length mat) 0.0 in
for i = 0 to Array.length mat -1 do
Array.unsafe_set res i (Array.unsafe_get mat i *| vec)
done;
res
;;
let sigmoid x = 1.0 /. (1.0+.exp(-.x));;
class mlperceptron (size : int list) =
let create_links n m = Array.init m ( fun _ -> Array.init (n+1) (fun _ -> (Random.float 0.2) -. 0.1)) in
let rec create_rep size =
match size with
n :: m :: size -> (create_links n m) :: (create_rep (m::size))
| _ -> []
in
object(self)
val mutable rep : float array array list= create_rep size
method eval input =
(* helper to do the processing of all layers *)
let rec loop rep activation =
(* get the current layer *)
match rep with
layer :: rest_rep ->
begin
if Array.length layer.(0) != Array.length activation +1 then
failwith "Input Size Error"
else ();
(* add the bias *)
let activation = Array.append [|1.0|] activation in
(* calculate the weighted sums *)
let sums = layer |*| activation in
(* apply transfer functions *)
let output = Array.map sigmoid sums in
(* descend to next layer *)
loop rest_rep output
end
| [] -> activation (* we have reached the output layer -> return *)
in
(* apply inputs to all layers *)
loop rep input
method train input example (alpha :float) =
let loop rep activation =
(* this loop handles layers 2 to n *)
let rec loop2 rep activation =
match rep with
layer :: rest_rep ->
begin
if Array.length layer.(0) != Array.length activation +1 then
failwith "Input Size Error"
else ();
(* add the bias *)
let activation = Array.append [|1.0|] activation in
(* calculate the weighted sums *)
let sums = layer |*| activation in
(* apply transfer functions *)
let output = Array.map sigmoid sums in
(* descend to next layer and get weighted sums of deltas from there*)
let delta_sums = loop2 rest_rep output in
(* calculate deltas from those sums *)
let deltas = Array.mapi (fun i x -> output.(i) *. (1.0-.output.(i))*.x) delta_sums in
(* create array to hold new deltas*)
let new_deltas = Array.make (Array.length activation) 0.0 in
(* calculate the differences*)
let n = Array.length layer in
let m = Array.length (Array.unsafe_get layer 0) in
for i = 0 to n - 1 do
let weightsi = Array.unsafe_get layer i in
let deltai = Array.unsafe_get deltas i in
for j = 0 to m - 1 do
let weightij = Array.unsafe_get weightsi j in
let change = alpha *. deltai *. Array.unsafe_get activation j in
(* get this part of the corresponding new_delta *)
Array.unsafe_set new_deltas j (Array.unsafe_get new_deltas j +. deltai *. weightij);
Array.unsafe_set weightsi j (weightij +. change);
done
done;
(* pass the deltas to caller, but throw away the one for the bias *)
Array.sub new_deltas 1 (Array.length activation - 1)
end
| [] -> Array.mapi (fun i x -> example.(i) -. x) activation
in
(* Special faster case for the first (and most often largest) layer *)
match rep with
layer :: rest_rep ->
begin
if Array.length layer.(0) != Array.length activation +1 then
failwith "Input Size Error"
else ();
(* add the bias *)
let activation = Array.append [|1.0|] activation in
(* calculate the weighted sums *)
let sums = layer |*| activation in
(* apply transfer functions *)
let output = Array.map sigmoid sums in
(* descend to next layer and get weighted sums of deltas from there*)
let delta_sums = loop2 rest_rep output in
(* calculate deltas from those sums *)
let deltas = Array.mapi (fun i x -> output.(i) *. (1.0-.output.(i))*.x) delta_sums in
(* calculate the differences*)
let n = Array.length layer in
let m = Array.length (Array.unsafe_get layer 0) in
for i = 0 to n - 1 do
let weightsi = Array.unsafe_get layer i in
let deltai = Array.unsafe_get deltas i in
for j = 0 to m - 1 do
let weightij = Array.unsafe_get weightsi j in
let change = alpha *. deltai *. Array.unsafe_get activation j in
(* get this part of the corresponding new_delta *)
Array.unsafe_set weightsi j (weightij +. change);
done
done
(* from the first layer nothing has to be returned *)
end
| [] ->
()
in loop rep input
method dump channel () =
Marshal.to_channel channel rep []
method restore channel () =
rep <- Marshal.from_channel channel
end;;
(*
* $Log$
* Revision 1.3 2007/12/17 22:10:54 till_crueger
* - Added module for non-object-oriented Multi-Layer-Perceptrons
*
* Revision 1.2 2007/12/15 18:52:57 till_crueger
*
*
* - Updated documentation
* - Moved Log-Tags to a better position in the sources
*
*)