//**************************************************************
// File: zcs2-animat.sce
// Authors: Gerd Doeben-Henisch
// Version Start: May-18, 2010
//---------------------------------
// Update May-19-2010-1459
// Update May-19-10-2343
// Update May-27-10-1825
// Update May-31-10-2005
// Update June-1-10-1743
// Update June-1-10-2119
// Update June-6-10-1552
//**********************************************************
// diary('H:\FH\SCILAB\HISTORIES\Date.txt')
//diary('/home/gerd/public_html/uffmm/science-technology/single/themes/computer-science/personal-sites/doeben-henisch/KNOW/GBBLT/SCILAB/HISTORIES/Date.txt')
//******************************************************************
// CONTENT: Necessary code for an ANIMAT2-agent, the successor of the
// ANIMAT0- and ANIMAT1-agent.
//***********************************
// BEHAVIOR FUNCTION WITH CLASSIFIERS
//
// A CLASSIFIER has the structure
// [SE, SI, A, R]
//
// SE := External Sensory Pattern, 8 x 2Bits for the 8 surrounding cells and their propositions
// SI := Internal Sensory Pattern
// A := Action
// R := Collected Reward
//
//CLASSIF0 with a Non-Move !
CLASSIF0 = [
'################' '0' '0' '000';
'11##############' '0' '1' '000';
'##11############' '0' '2' '000';
'####11##########' '0' '3' '000' ;
'######11########' '0' '4' '000' ;
'########11######' '0' '5' '000' ;
'##########11####' '0' '6' '000' ;
'############11##' '0' '7' '000' ;
'##############11' '0' '8' '000' ;
'00##############' '0' '1' '000' ;
'##00############' '0' '2' '000' ;
'####00##########' '0' '3' '000' ;
'######00########' '0' '4' '000' ;
'########00######' '0' '5' '000' ;
'##########00####' '0' '6' '000' ;
'############00##' '0' '7' '000' ;
'##############00' '0' '8' '000';
'################' '1' '0' '000';
'11##############' '1' '1' '000';
'##11############' '1' '2' '000';
'####11##########' '1' '3' '000' ;
'######11########' '1' '4' '000' ;
'########11######' '1' '5' '000' ;
'##########11####' '1' '6' '000' ;
'############11##' '1' '7' '000' ;
'##############11' '1' '8' '000' ;
'00##############' '1' '1' '000' ;
'##00############' '1' '2' '000' ;
'####00##########' '1' '3' '000' ;
'######00########' '1' '4' '000' ;
'########00######' '1' '5' '000' ;
'##########00####' '1' '6' '000' ;
'############00##' '1' '7' '000' ;
'##############00' '1' '8' '000'
]
//CLASSIF without a Non-Move !
CLASSIF = [
'11##############' '0' '1' '000';
'##11############' '0' '2' '000';
'####11##########' '0' '3' '000' ;
'######11########' '0' '4' '000' ;
'########11######' '0' '5' '000' ;
'##########11####' '0' '6' '000' ;
'############11##' '0' '7' '000' ;
'##############11' '0' '8' '000' ;
'00##############' '0' '1' '000' ;
'##00############' '0' '2' '000' ;
'####00##########' '0' '3' '000' ;
'######00########' '0' '4' '000' ;
'########00######' '0' '5' '000' ;
'##########00####' '0' '6' '000' ;
'############00##' '0' '7' '000' ;
'##############00' '0' '8' '000';
'11##############' '1' '1' '000';
'##11############' '1' '2' '000';
'####11##########' '1' '3' '000' ;
'######11########' '1' '4' '000' ;
'########11######' '1' '5' '000' ;
'##########11####' '1' '6' '000' ;
'############11##' '1' '7' '000' ;
'##############11' '1' '8' '000' ;
'00##############' '1' '1' '000' ;
'##00############' '1' '2' '000' ;
'####00##########' '1' '3' '000' ;
'######00########' '1' '4' '000' ;
'########00######' '1' '5' '000' ;
'##########00####' '1' '6' '000' ;
'############00##' '1' '7' '000' ;
'##############00' '1' '8' '000'
]
//**************************************************************
// MATCHSET
//
// Set of Classifiers which have been selected fora possible action
//
MATCHSET=[] // (13)
//**************************************************************
// ACTIONSET
//
// Set of Classifiers which have been EXECUTED BEFORE
// No.1 = The LAST action
// No.2 = The PRECEDING action
// ....
//
ACTIONSET=[] // (14 )
//*************************************************
// ANIMAT STRUCTURE
// The ANIMAT has a POSITION = (Yanimat, Xanimat)
// and an ENERGY-state ENERGY TOTAL and ENERGY ACTUAL
// The set of CLASSIFIERS is the PREWIRED KNOWLEDGE
// connecting perceived PROPERTIES; VITAL parameters
// with a proposed ACTION and accumulated REWARDS.
// The ENERGY level is controlled
// by TIME and by ACTIVITY
// FOOD = adds the amount given in FOODIDX
// NONFOOD = adds a certain negativ amount.
// The threshold VTHRESHOL controls the VITAL state:
// if ABOVE threshold then VITAL=1, otherwise =0
// ACTDEPTH cntrols how many PREDEEDING actions are remembered
// for accumulating REW to actions
FOODIDX = 100 //(10)
NONFOODIDX = -1 // (11)
VTHRESHOLD = FOODIDX/2 // (12)
MATCHSET =[] //(13)
ACTIONSET =[] //(14)
Xanimat = 3 //(1)
Yanimat = 5 //(2)
ENERGYTotal = FOODIDX //(3)
ENERGYActual = 0 //(4)
ENERGYInput = FOODIDX //(5) Start value, could be different; part of the VITAL dimension of the agent
VITAL = 1 //(6)
ACTOLD = [] //(7) Classifier with last action.
//CLASSIF (8)
ACTDEPTH = 2 //(9) How many preceding ations can be memorized
ANIMAT = list(Xanimat, Yanimat, ENERGYTotal, ENERGYActual, ENERGYInput, VITAL, ACTOLD, CLASSIF, ACTDEPTH, FOODIDX, NONFOODIDX, VTHRESHOLD, MATCHSET, ACTIONSET )