Wednesday, 26 July 2017

LSTM with no learning - recall and count ahead.

Without any training my LSTM remembers for a specified delay and counts ahead it also has a go at predicting the future for some rudimentary time series.

But its not been trained because the wgt updates dont affect this output it does all this off the cuff!

Whats going on. I hope I can fix this.

Oh the memory thing is affected by the training to specify when to recall. I am uploading this doozy for anyone to play with and maybe get it to do the right thing!

This upload remembers stuff you can specify when to recall by seting ahead=1,2,3 steps ahead.

This upload counts ahead.

All with no learning - whats going on!

Here is roughly what Ive implemented:


 Forward Pass

Act_FGate = ft = sigmoid_(Wgt[0]*(Hin+In)+Bias[0],1,0);        //forget gate
Act_IGate = It = sigmoid_(Wgt[1]*(Hin+In)+Bias[1],1,0);        //Include Gate
Ct_= tanh_(Wgt[2]*(Hin+In)+Bias[2],1,0);
Act_CGate = Ct = ft*Ctin+It*Ct_;
                        //Out gate
Act_OGate = Ot = sigmoid_(Wgt[3]*(Hin+In)+Bias[3],1,0);
Hout = Ot * tanh(Ct);                //Outputs

Backward Pass

***Backprop error:

Hout_Err = Out - Hout

Ctin_Err  = Inv_Tanh(Wgt_O * Hout_Err)

Err_FGate = Inv_Sigmoid(Wgt_F * Hout_Err)

Err_IGate = Inv_Sigmoid(Wgt_I * Hout_Err)

Err_CGate = Inv_Tanh(Wgt_C * Hout_Err)

Hin_Err = Err_CGate + Err_IGate + Err_FGate

Next layer down Hout_Err = Hin_Err

***Update Wgts (For each Wgt_F,Wgt_I,Wgt_C,Wgt_O):

WgDelta = (Hin+In)*Err_Gate*Act_Gate*Lrt + Momentum*PreDelta - Decay*PreWgt

PreWgt = Wgt
PreDelta = WgtDelta
Wgt += WgtDelta

Here is the correct psuedo code. Borrowed / Interpreted from a translation online:

Forward

PreAct_FGate = U_FGate*(Hin+In) + W_FGate*Hout(t-1) + V_FGate*C(t-1)
PreAct_IGate = U_IGate*(Hin+In) + W_IGate*Hout(t-1) + V_IGate*C(t-1)

PreAct_CGate = U_CGate*(Hin+In) + W_CGate*Hout(t-1)

Act_IGate = Sigmoid(PreAct_IGate)
Act_FGate = Sigmoid(PreAct_FGate)
Ct_       = LSigmoid(PreAct_CGate)

Ct = Act_FGate * Ct-1 + Act_IGate * Ct_

PreAct_OGate = U_CGate*(Hin+In) + W_CGate*Hout(t-1) + V_OGate*C(t)

Act_OGate = Sigmoid(Act_OGate)

Hout = Act_OGate *tanh(Ct)



Backpass:


Hin_Err = Sum U_Gate*Err_Gate(t) + Sum W_Gate*Err_Gate(t+1)   <---- For Layer Above

Err_OGate(t) = Inv_Sig(PreAct_OGate(t))*tanh(Ct)*Hout_Err(t)

Ct_Err(t) = Act_OGate*Inv_Tanh(PreAct_CGate(t))*Hout_Err(t)

+ Act_FGate(t+1)*Ct_Err(t+1) + WgtV_I*Err_IGate(t+1)

+ WgtV_F*Err_FGate(t+1) + WgtV_O*Err_OGate(t)

Err_CGate = Inv_LSig(PreAct_CGate(t))*Act_IGate(t)*Ct_Err(t)

Err_FGate = Inv_Sig(PreAct_FGate(t))*Act_CGate(t-1)*Ct_Err(t)

Err_IGate = Inv_Sig(PreAct_IGate(t))*LSig(PreAct_CGate)*Ct_Err(t)


*(Hout_Err(prev) = Hin_Err
  Hout_Err = Out - Hout   )
 
*(Three Activation Functions
Logistic Sigmoid, Tanh, Sigmoid)

*(Three Wgts - U,W,V)




Monday, 17 July 2017

Cannot get this to work yet LSTM!

#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <algorithm>    // std::transform
#include <functional>   // std::plus

using namespace std;

double function_x(double sum,int type, double y);
double function_y(double sum,int type, double y);
double sig_vec(double sum);
double tanh_vec(double sum);
double invtanh_vec(double sum);
double invsig_vec(double sum);

class ltsm_module{
public:
double *Wgt;    //4 x Weights for each gate
double Out;    //Finall Output
double Hout;    //Recurrent Output (Hin for next Neuron)
double Hin;
double In;
double Ctin;
double *Err;    //2 x Error from Hout and Out 2 x Error at Hin/X and Ctin
double *Bias;   //Each of the bias for each gate
double Lrt,Momentum,Decay;
//Vector Inputs/Output
int Input_Size;
int Wgt_Size;
int wgt_scalar;
vector<double> Out_Vec;
vector<double> Hout_Vec;
vector<double> Hin_Vec;
vector<double> In_Vec;        //Input Vector
vector<double> Ctin_Vec;
vector<double> Ct_Vec;      //Cell Activation records Ct-1   
//Vector Error
vector<double> Ctin_Err;
vector<double> Hin_Err;
vector<double> Hout_Err;
vector<double> Out_Err;
//Act
vector<double> Act_F;
vector<double> Act_I;
vector<double> Act_C;
vector<double> Act_O;
//Vector Wgts
vector<double> Wgt_F;
vector<double> Wgt_I;
vector<double> Wgt_C;
vector<double> Wgt_O;
//Vector PreDelta
vector<double> PreDlta_F;
vector<double> PreDlta_I;
vector<double> PreDlta_C;
vector<double> PreDlta_O;
//Vector PreWgts
vector<double> PreWgt_F;
vector<double> PreWgt_I;
vector<double> PreWgt_C;
vector<double> PreWgt_O;
//Error Vect
vector<double> Err_FGate;
vector<double> Err_IGate;
vector<double> Err_CGate;

        //Update weights as though they are 3 weights entering
        //a neuron using previous weight values / error and
        //Sub activation of previous unit as
        //Output Wgt uses Hout Error Over InvSigmoid
        //Input Wgt  uses Out Error Over ""
        //Forget Wgt uses Out Error  Over ""
       

double (*m_pointertofunction)(double,int,double);  //Pointer to Func

void init_module(){

Lrt=0.095;
Decay=0.000005;
Momentum=0.005;
wgt_scalar=0;
Wgt_Size = 4;
Wgt = new double[4];
Bias = new double[4];


for(int i=0;i<4;i++){
Wgt[i] = double(rand()/(RAND_MAX + 1.0));
Bias[i] = double(rand()/(RAND_MAX + 1.0));
            }

/*Memory to Module Vector*/
Hin_Vec.resize(Input_Size);
In_Vec.resize(Input_Size);
Ctin_Vec.resize(Input_Size);
Ct_Vec.resize(Input_Size);

Ctin_Err.resize(Input_Size);
Hin_Err.resize(Input_Size);
Hout_Err.resize(Input_Size);
Out_Err.resize(Input_Size);
Wgt_F.resize(Wgt_Size);
Wgt_I.resize(Wgt_Size);
Wgt_C.resize(Wgt_Size);
Wgt_O.resize(Wgt_Size);
Act_F.resize(Wgt_Size);
Act_I.resize(Wgt_Size);
Act_C.resize(Wgt_Size);
Act_O.resize(Wgt_Size);
PreWgt_F.resize(Wgt_Size);
PreWgt_I.resize(Wgt_Size);
PreWgt_C.resize(Wgt_Size);
PreWgt_O.resize(Wgt_Size);
PreDlta_F.resize(Wgt_Size);
PreDlta_I.resize(Wgt_Size);
PreDlta_C.resize(Wgt_Size);
PreDlta_O.resize(Wgt_Size);
Err_IGate.resize(Input_Size);
Err_CGate.resize(Input_Size);
Err_FGate.resize(Input_Size);
Ctin_Err.resize(Input_Size);

Init_Wgts(Wgt_F);
Init_Wgts(Wgt_I);
Init_Wgts(Wgt_C);
Init_Wgts(Wgt_O);                       
}

void update_module(double Hin_,double In_,double Ctin_){
double ft,Ct,It,Ct_,Ot;
double (*sigmoid_)(double,int,double);
double (*tanh_)(double,int,double);
tanh_ = function_y;
sigmoid_ = function_x;
Hin = Hin_;
In = In_;
Ctin= Ctin_;
ft = sigmoid_(Wgt[0]*(Hin+In)+Bias[0],1,0);        //forget gate
It = sigmoid_(Wgt[1]*(Hin+In)+Bias[1],1,0);        //Include Gate
Ct_= tanh_(Wgt[2]*(Hin+In)+Bias[2],1,0);
Ct = ft*Ctin+It*Ct_;
                        //Out gate
Ot = sigmoid_(Wgt[3]*(Hin+In)+Bias[3],1,0);
Hout = Ot * tanh(Ct);                //Outputs
Out = Ct;
}

void update_module_vec(vector<double> Hin_,vector<double> In_,vector<double> Ctin_){
vector<double> ft,Ct,It,Ct_,Ot;

Hin_Vec = Hin_;
In_Vec = In_;
Ctin_Vec= Ctin_;

//Cycle through each Vec apply sigmoid
vector<double>::iterator it;
vector<double> Sum_Vec;
vector<double> Sum_Vec_;
Sum_Vec.resize(In_.size());
Sum_Vec_.resize(In_.size());

//Forget Gate
//Add Hin to In Vector Add
transform(Hin_.begin(),Hin_.end(),In_.begin(),Sum_Vec.begin(),plus<double>());
//Multiply by Wgt

if(wgt_scalar==1){
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[0]));
          }else{
Sum_Vec_ = Apply_Conv(Wgt_F,Sum_Vec);
             }
//Add Bias
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(plus<double>(),Bias[0]));
//Apply Sigmoid
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),sig_vec);
ft = Sum_Vec_;
Act_F = Sum_Vec_;

//Include Gate
//Add Hin to In Vector Add
transform(Hin_.begin(),Hin_.end(),In_.begin(),Sum_Vec.begin(),plus<double>());
//Multiply by Wgt
if(wgt_scalar==1){
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[1]));
        }else{
Sum_Vec_ = Apply_Conv(Wgt_I,Sum_Vec);       
        }

//Add Bias
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(plus<double>(),Bias[1]));
//Apply Sigmoid
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),sig_vec);
It = Sum_Vec_;
Act_I = Sum_Vec_;

//Out gate
//Add Hin to In Vector Add
transform(Hin_.begin(),Hin_.end(),In_.begin(),Sum_Vec.begin(),plus<double>());
//Multiply by Wgt
if(wgt_scalar==1){
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[3]));
          }else{
Sum_Vec_ = Apply_Conv(Wgt_C,Sum_Vec);             
          }
//Add Bias
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(plus<double>(),Bias[3]));
//Apply Sigmoid
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),sig_vec);
Ot = Sum_Vec;
Act_C = Sum_Vec;


//Ct Gate
//Add Hin to In Vector Add
transform(Hin_.begin(),Hin_.end(),In_.begin(),Sum_Vec.begin(),plus<double>());
//Multiply by Wgt

if(wgt_scalar==1){
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[2]));
        }else{
Sum_Vec_ = Apply_Conv(Wgt_O,Sum_Vec);       
        }

//Add Bias
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(plus<double>(),Bias[2]));
//Out_Vec =Sum_Vec_;
//Apply Tanh
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),tanh_vec);
Ct_ = Sum_Vec_;   //In online this is At it is the new candidate state
Act_C = Sum_Vec_;

//Multiply ft * Ct-1 //Forget the previous state Ct-1 (Ct_Vec)
transform(Ctin_Vec.begin(),Ctin_Vec.end(),ft.begin(),Sum_Vec.begin(),multiplies<double>());
//Multiply It * Ct_
transform(Ct_.begin(),Ct_.end(),It.begin(),Sum_Vec_.begin(),multiplies<double>());
//Calc Ct
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec_.begin(),Sum_Vec.begin(),plus<double>());
Ct=Sum_Vec;
Ct_Vec=Sum_Vec;

//Apply Tanh for Hout
transform(Ct.begin(),Ct.end(),Sum_Vec.begin(),tanh_vec);
//Calc Hout
transform(Sum_Vec.begin(),Sum_Vec.end(),Ot.begin(),Sum_Vec.begin(),multiplies<double>());


Hout_Vec = Sum_Vec;
Out_Vec  = Ct;
}

///Update_Module_Vec() New
//Backprop_Update_Out() New
//Compute Deltas for each Wgt and Delta for Ct
//Uses Ct-1(Previous Cell state) Ct(Current cell state

void error_module_vec(){  //Calculate Error at Hin/X and Ctin using Error at Out and Hout
//Error at Hout = Err_Ht + Err_Hout (Up and Across)
//Err_Ht = Err_Hin/X (Up)
vector<double> Sum_Vec;
vector<double> Sum_Vec_;

Sum_Vec.resize(Input_Size);
Sum_Vec_.resize(Input_Size);


//Multiply Wgt_Out * Hout_Err
if(wgt_scalar==1){
transform(Hout_Err.begin(),Hout_Err.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[3]));
        }else{
Sum_Vec = Apply_Conv(Wgt_O,Hout_Err);       
        }
//Inverse Tanh
transform(Sum_Vec.begin(),Sum_Vec.end(),Ctin_Err.begin(),invtanh_vec);

//Multiply Wgt_f * Out_Err
//Change to Hout
if(wgt_scalar==1){
transform(Hout_Err.begin(),Hout_Err.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[0]));
          }else{
Sum_Vec = Apply_Conv(Wgt_F,Hout_Err);         
          }
//Inverse Sigmoid
transform(Sum_Vec.begin(),Sum_Vec.end(),Err_FGate.begin(),invsig_vec);

//**change to Hout
if(wgt_scalar==1){
transform(Hout_Err.begin(),Hout_Err.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[1]));
        }else{
Sum_Vec = Apply_Conv(Wgt_I,Hout_Err);           
        }

//Inverse Sigmoid
transform(Sum_Vec.begin(),Sum_Vec.end(),Err_IGate.begin(),invsig_vec);

//**change to Hout
if(wgt_scalar==1){
transform(Hout_Err.begin(),Hout_Err.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Wgt[2]));
        }else{
Sum_Vec = Apply_Conv(Wgt_C,Hout_Err);       
        }
//Inverse Sigmoid
transform(Sum_Vec.begin(),Sum_Vec.end(),Err_CGate.begin(),invtanh_vec);

//Add All 3 Errors
transform(Err_CGate.begin(),Err_CGate.end(),Err_IGate.begin(),Sum_Vec.begin(),plus<double>());
transform(Err_FGate.begin(),Err_FGate.end(),Sum_Vec.begin(),Sum_Vec_.begin(),plus<double>());
Hin_Err = Sum_Vec_;


}

void Update_Wgts(vector<double> &Wgt,vector<double> &PreDlta,vector<double> &PreWgt, vector<double> &Err,vector<double> &Act){
vector<double> delta(Input_Size);
vector<double> Sum_Vec;
vector<double> Sum_Vec_;
Sum_Vec.resize(Input_Size);
Sum_Vec_.resize(Input_Size);

//Add Hin to In Vector Add changed Hin to Ctin
transform(Hin_Vec.begin(),Hin_Vec.end(),In_Vec.begin(),Sum_Vec.begin(),plus<double>());
//Multiply Act_in * Error
transform(Err.begin(),Err.end(),Act.begin(),Sum_Vec_.begin(),multiplies<double>());
//Lrate
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(multiplies<double>(),Lrt));


//Momentum - Convolve PreDelta onto all 1's Sum_Vec **********************
//transform(PreDlta.begin(),PreDlta.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Momentum));

fill(Sum_Vec.begin(),Sum_Vec.end(),1);
Apply_Conv(PreDlta,Sum_Vec);
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec.begin(),bind1st(multiplies<double>(),Momentum));


//Add Momentum and Lrate
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec_.begin(),Sum_Vec.begin(),plus<double>());

//Decay - Convolve PreWgt onto all 1's Sum_Vec     **********************
fill(Sum_Vec_.begin(),Sum_Vec_.end(),1);
Apply_Conv(PreWgt,Sum_Vec);
transform(Sum_Vec_.begin(),Sum_Vec_.end(),Sum_Vec_.begin(),bind1st(multiplies<double>(),Decay));
//transform(PreWgt.begin(),PreWgt.end(),Sum_Vec_.begin(),bind1st(multiplies<double>(),Decay));

//Lrate *Err * Actin + Momentum*PreDlta - Decay*PreWgt
transform(Sum_Vec.begin(),Sum_Vec.end(),Sum_Vec_.begin(),Sum_Vec.begin(),minus<double>());
PreWgt = Wgt;
PreDlta = Sum_Vec;
//Update Wgt Matrix           
Apply_DeConv(Wgt,Sum_Vec);           
//fill(Wgt.begin(),Wgt.end(),1);           
           
                              }
void Backprop(){
error_module_vec();

Update_Wgts(Wgt_F,PreDlta_F,PreWgt_F,Err_FGate,Act_F);
Update_Wgts(Wgt_I,PreDlta_I,PreWgt_I,Err_IGate,Act_I);
Update_Wgts(Wgt_C,PreDlta_C,PreWgt_C,Err_CGate,Act_C);
Update_Wgts(Wgt_O,PreDlta_O,PreWgt_O,Ctin_Err,Act_O);

}


void Init_Wgts(vector<double> &Wgt){

for(int i=0;i<Wgt_Size;i++){

Wgt[i] = double(rand()/(RAND_MAX+1.0));
}

}

vector<double> Apply_Conv(vector<double> &Wgt_Conv, vector<double> &In_Vec){
//Apply Wgt Convolution Vector to an Input Vector or Err_Vec
vector<double> Out_Vec(Input_Size);

for(int i=0;i<In_Vec.size();i++){
for(int j=0;j<Wgt_Conv.size();j++){
Out_Vec[i]=Wgt_Conv[j]*In_Vec[i];
}
return Out_Vec;
}
                                }
void Apply_DeConv(vector<double> &Wgt_Conv,vector<double> &Err_Vec){
//Change Wgt Convolution with Err_Vec use to compute Wgt Update
int i=0;
int stoch=1;

while(i<Err_Vec.size()){

for(int j=0;j<Wgt_Conv.size();j++){

Wgt_Conv[j] += Err_Vec[i]/10;

//if(stoch==1&&rand()%2==1){        //Add a stochastic element to Weight Update
i++;
//            }
                } }

}

void print_module_Err(){
cout<<"Print Error\n";
cout<<"Hout:\n";

for(vector<double>::iterator it=Hout_Vec.begin(); it!=Hout_Vec.end(); ++it){
cout<<" "<<*it<<"  ";
}

cout<<"Size of FERR = "<<Err_FGate.size();

cout<<"***********HOut_Err Vec***********\n";
for(vector<double>::iterator it=Hout_Err.begin(); it!=Hout_Err.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"***********FERR************\n";
for(vector<double>::iterator it=Err_FGate.begin(); it!=Err_FGate.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}cout<<"***********IERR********\n";
for(vector<double>::iterator it=Err_IGate.begin(); it!=Err_IGate.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}cout<<"***********CERR************\n";
for(vector<double>::iterator it=Err_CGate.begin(); it!=Err_CGate.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}

}

void print_module_wgts(){

cout<<"Print Wgts\n";
cout<<"***********FWgts************\n";
for(vector<double>::iterator it=Wgt_F.begin(); it!=Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}cout<<"***********IWgts************\n";
for(vector<double>::iterator it=Wgt_I.begin(); it!=Wgt_I.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}cout<<"***********CWgts************\n";
for(vector<double>::iterator it=Wgt_C.begin(); it!=Wgt_C.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}cout<<"***********OWgts************\n";
for(vector<double>::iterator it=Wgt_O.begin(); it!=Wgt_O.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}

}




};


int main(){
cout<<"Hello!\n";

vector<double> Hin_={0,0,0,0};
vector<double> Ctin_={0,0,0,0};
vector<double> In_ = {.1,.004,.6555,6.55};

vector<vector<double>> In_Vec(20,vector<double>(20));

In_Vec[0] = {10};//{.1,.004,.6555,6.55};
In_Vec[1] = {11};//{.7774,.3956,1.76,.006};
In_Vec[2] = {12};//{9.111,.12,.0102,2.96};
In_Vec[3] = {13};//{5.99,.204,6.0001,3.094};
In_Vec[4] = {14};//{2.4965,.694,0.5,22.003};

In_Vec[5] = {15};//{.1,.004,.6555,6.55};
In_Vec[6] = {16};//{.7774,.3956,1.76,.006};
In_Vec[7] = {17};//{9.111,.12,.0102,2.96};
In_Vec[8] = {18};//{5.99,.204,6.0001,3.094};
In_Vec[9] = {19};//{2.4965,.694,0.5,22.003};

//vector<double> Hin_={0};
//vector<double> In_ = {9.55};

ltsm_module** mymod_;
mymod_ = new ltsm_module*[28];


for(int i=0;i<28;i++){
mymod_[i] = new ltsm_module;
mymod_[i]->Input_Size=1;
mymod_[i]->init_module();

        }
int kint=0;   
int vint=0;   
int vint_=0;
int ahead=0;


for(int i=0;i<10;i++){
cout<<"***********Old Wgts************\n";
for(vector<double>::iterator it=mymod_[i]->Wgt_F.begin(); it!=mymod_[i]->Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"*******\n";            }





for(int iter=0;iter<5000;iter++){
kint++;
if(kint==9){kint=0;}

mymod_[0]->Hin_Vec = {0};
mymod_[0]->Ctin_Vec= {0};
mymod_[10]->Hin_Vec = {0};
mymod_[10]->Ctin_Vec= {0};



//Feedforward one row of inputs
for(int i=0;i<9;i++){
vint=kint+i;
if(vint>=9){vint=0+vint-9;}

mymod_[i]->update_module_vec(mymod_[i]->Hin_Vec,In_Vec[vint],mymod_[i]->Ctin_Vec);

//mymod_[i+1]->Ctin_Vec = mymod_[i]->Out_Vec;
mymod_[i+1]->Hin_Vec = mymod_[i]->Hout_Vec;

            }
           
//Add another layer
for(int i=9;i<18;i++){

mymod_[i]->update_module_vec(mymod_[i]->Hin_Vec,mymod_[i-9]->Hout_Vec,mymod_[i]->Ctin_Vec);

//mymod_[i+1]->Ctin_Vec = mymod_[i]->Out_Vec;
mymod_[i+1]->Hin_Vec = mymod_[i]->Hout_Vec;


            }
           
           
           
           
//Feedback Create Error from identity for test

for(int i=18;i>9;--i){
vint=kint+i-9;
if(vint>=9){vint=0+vint-9;}
vint_=vint+ahead;
if(vint_>=9){vint_=0+vint_-9;}

if(i>9){     //Output Neurons Err
transform(mymod_[i]->Hout_Vec.begin(),mymod_[i]->Hout_Vec.end(),In_Vec[vint_].begin(),mymod_[i]->Hout_Err.begin(),minus<double>());
    }
//mymod_[i]->print_module_Err();

mymod_[i]->Backprop();        //Create Hin_Err and change weights
            }
           

for(int i=9;i>-1;--i){
vint=kint+i;
if(vint>=9){vint=0+vint-9;}
vint_=vint+5;
if(vint_>=9){vint_=0+vint_-9;}

if(i>-1){     //Output Neurons Err
mymod_[i]->Hout_Err = mymod_[i+10]->Hin_Err;

}
//mymod_[i]->print_module_Err();

mymod_[i]->Backprop();        //Create Hin_Err and change weights
            }

}               


//Identity

ltsm_module* mymod;
mymod = new ltsm_module;

mymod->Input_Size=4;
mymod->init_module();

mymod->Hin_Vec={0,0,0,0};
mymod->Ctin_Vec={0,0,0,0};

mymod->update_module_vec(mymod->Hin_Vec,In_,mymod->Ctin_Vec);

//Create Error from identity for test
transform(mymod->Out_Vec.begin(),mymod->Out_Vec.end(),In_.begin(),mymod->Out_Err.begin(),minus<double>());
transform(mymod->Hout_Vec.begin(),mymod->Hout_Vec.end(),In_.begin(),mymod->Hout_Err.begin(),minus<double>());

//mymod->Hout_Err = {.99,.837,.455,1.22};
//mymod->Out_Err  = {.99,.837,.455,1.22};

for(vector<double>::iterator it=mymod->Wgt_F.begin(); it!=mymod->Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}

cout<<"*********************************************************\n";

cout<<"**********Out Vec************\n";
for(vector<double>::iterator it=mymod->Out_Vec.begin(); it!=mymod->Out_Vec.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}

cout<<"***********HOut Vec***********\n";
for(vector<double>::iterator it=mymod->Hout_Vec.begin(); it!=mymod->Hout_Vec.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"***********New Wgts************\n";
for(vector<double>::iterator it=mymod->Wgt_F.begin(); it!=mymod->Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"Training*************************\n";


for(int i=0;i<100;i++){

//Create Error from identity for test
transform(mymod->Out_Vec.begin(),mymod->Out_Vec.end(),In_.begin(),mymod->Out_Err.begin(),minus<double>());
transform(mymod->Hout_Vec.begin(),mymod->Hout_Vec.end(),In_.begin(),mymod->Hout_Err.begin(),minus<double>());

//mymod->print_module_Err();

mymod->Ctin_Vec = mymod->Out_Vec;    //t+1
mymod->Hin_Vec  = mymod->Hout_Vec;

mymod->update_module_vec(mymod->Hin_Vec,In_,mymod->Ctin_Vec);

mymod->Backprop();

            }
           
           
           
           
           
cout<<"**********Out Vec************\n";
for(vector<double>::iterator it=mymod->Out_Vec.begin(); it!=mymod->Out_Vec.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}

cout<<"***********HOut Vec***********\n";
for(vector<double>::iterator it=mymod->Hout_Vec.begin(); it!=mymod->Hout_Vec.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"***********New Wgts************\n";
for(vector<double>::iterator it=mymod->Wgt_F.begin(); it!=mymod->Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}


cout<<"***********HOut Vec***********\n";
mymod_[0]->Hin_Vec = {0};
mymod_[0]->Ctin_Vec= {0};
mymod_[10]->Hin_Vec = {0};
mymod_[10]->Ctin_Vec= {0};


//Feedforward input layer
for(int i=0;i<9;i++){
vint=i;

mymod_[i]->update_module_vec(mymod_[i]->Hin_Vec,In_Vec[vint],mymod_[i]->Ctin_Vec);

mymod_[i+1]->Hin_Vec = mymod_[i]->Hout_Vec;

}


//Feedforward Output layer
for(int i=9;i<18;i++){
vint=i-9+ahead;
if(vint>=9){vint=0+vint-9;}

mymod_[i]->update_module_vec(mymod_[i]->Hin_Vec,mymod_[i-9]->Hout_Vec,mymod_[i]->Ctin_Vec);

transform(mymod_[i]->Hout_Vec.begin(),mymod_[i]->Hout_Vec.end(),In_Vec[vint].begin(),mymod_[i]->Hout_Err.begin(),minus<double>());

//mymod_[i+1]->Ctin_Vec = mymod_[i]->Out_Vec;
mymod_[i+1]->Hin_Vec = mymod_[i]->Hout_Vec;

for(vector<double>::iterator it=mymod_[i]->Hout_Vec.begin(); it!=mymod_[i]->Hout_Vec.end(); ++it){
cout<<" "<<*it<<"  ";
}

for(vector<double>::iterator it=In_Vec[vint].begin(); it!=In_Vec[vint].end(); ++it){
cout<<" "<<*it<<"  ";
}

for(vector<double>::iterator it=mymod_[i]->Out_Vec.begin(); it!=mymod_[i]->Out_Vec.end(); ++it){
cout<<" "<<*it<<"  ";
}
for(vector<double>::iterator it=mymod_[i]->Hout_Err.begin(); it!=mymod_[i]->Hout_Err.end(); ++it){
cout<<" "<<*it<<"  ";
}

cout<<"******\n";            }

for(int i=0;i<10;i++){
cout<<"***********New Wgts************\n";
for(vector<double>::iterator it=mymod_[i]->Wgt_F.begin(); it!=mymod_[i]->Wgt_F.end(); ++it){
cout<<" "<<*it;
cout<<"\n";
}
cout<<"*******\n";            }


}









double invtanh_vec(double sum){

return function_y(sum,-1,0);

}

double invsig_vec(double sum){

return function_x(sum,-1,0);
}

double tanh_vec(double sum){

//double sum = Vec1+Vec2;

return function_y(sum,1,0);

}

double sig_vec(double sum){

//double sum = Vec1+Vec2;

return function_x(sum,1,0);

}

double function_y(double sum,int type, double y){  //input function
double rand_=0;
//sum=sum/1000;

if(type==1){
rand_ = ((double)(rand()%1000))/10000;

sum = tanh(sum)+rand_;
//sum = (2/(1+exp(-2*sum)))-1;        //Kick Ass


}
if(type==-1){

//sum = (1-pow(tanh(sum),2))*sum;
sum = 1-(sum*sum)/2;

}

return sum;

}

double function_x(double sum,int type,double y){  //activation function for all hidden layer neurons

double sigmoid,temp;
double rand_=0;

if(type==1){
//rand_ = ((double)(rand()%1000))/1000000;

if(sum>0.5){                    //this is actually hardlim not sigmoid
 sigmoid =  0.5;}else{
 if(sum<-0.5){
 sigmoid = 0;}
 else{
 sigmoid = 1/(1+exp((double) -sum));
     }
}
 sigmoid = sigmoid+rand_;

        }
if(type==-1){


temp = 1/(1+exp((double) -y));

//sigmoid = (0.25-(temp*temp) )* sum;
//sigmoid =  y*(1-y) * sum;        //derivative sigmoid
sigmoid = sum*(1-sum);

//sigmoid = (1-pow(tanh(y),2))*sum;


        }
return sigmoid;


}