// Class: ReadMLP // Automatically generated by MethodBase::MakeClass // /* configuration options ===================================================== #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*- Method : MLP::MLP TMVA Release : 4.2.0 [262656] ROOT Release : 5.34/14 [336398] Creator : hepg Date : Sun Aug 16 07:58:38 2015 Host : Linux whale1 2.6.32-358.el6.x86_64 #1 SMP Fri Feb 22 03:57:22 CET 2013 x86_64 x86_64 x86_64 GNU/Linux Dir : /misc/users/hepg/cowan/tmva/train Training events: 20000 Analysis type : [Classification] #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*- # Set by User: HiddenLayers: "3" [Specification of hidden layer architecture] V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)] H: "True" [Print method-specific help message] # Default: NCycles: "500" [Number of training cycles] NeuronType: "sigmoid" [Neuron activation function type] RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')] EstimatorType: "MSE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood] NeuronInputType: "sum" [Neuron input function type] VerbosityLevel: "Default" [Verbosity level] VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"] CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)] IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)] TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)] LearningRate: "2.000000e-02" [ANN learning rate parameter] DecayRate: "1.000000e-02" [Decay rate for learning parameter] TestRate: "10" [Test for overtraining performed at each #th epochs] EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)] Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch] SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training] SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.] SamplingTraining: "True" [The training sample is sampled] SamplingTesting: "False" [The testing sample is sampled] ResetStep: "50" [How often BFGS should reset history] Tau: "3.000000e+00" [LineSearch "size step"] BPMode: "sequential" [Back-propagation learning mode: sequential or batch] BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events] ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)] ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)] UseRegulator: "False" [Use regulator to avoid over-training] UpdateLimit: "10000" [Maximum times of regulator update] CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value] WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range] ## #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*- NVar 3 x x x x 'F' [-2.71237683296,3.86315917969] y y y y 'F' [-1.74197530746,3.65737724304] z z z z 'F' [0.000103908125311,0.999955058098] NSpec 0 ============================================================================ */ #include #include #include #include #ifndef IClassifierReader__def #define IClassifierReader__def class IClassifierReader { public: // constructor IClassifierReader() : fStatusIsClean( true ) {} virtual ~IClassifierReader() {} // return classifier response virtual double GetMvaValue( const std::vector& inputValues ) const = 0; // returns classifier status bool IsStatusClean() const { return fStatusIsClean; } protected: bool fStatusIsClean; }; #endif class ReadMLP : public IClassifierReader { public: // constructor ReadMLP( std::vector& theInputVars ) : IClassifierReader(), fClassName( "ReadMLP" ), fNvars( 3 ), fIsNormalised( false ) { // the training input variables const char* inputVars[] = { "x", "y", "z" }; // sanity checks if (theInputVars.size() <= 0) { std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl; fStatusIsClean = false; } if (theInputVars.size() != fNvars) { std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: " << theInputVars.size() << " != " << fNvars << std::endl; fStatusIsClean = false; } // validate input variables for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) { if (theInputVars[ivar] != inputVars[ivar]) { std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl; fStatusIsClean = false; } } // initialize min and max vectors (for normalisation) fVmin[0] = 0; fVmax[0] = 0; fVmin[1] = 0; fVmax[1] = 0; fVmin[2] = 0; fVmax[2] = 0; // initialize input variable types fType[0] = 'F'; fType[1] = 'F'; fType[2] = 'F'; // initialize constants Initialize(); } // destructor virtual ~ReadMLP() { Clear(); // method-specific } // the classifier response // "inputValues" is a vector of input values in the same order as the // variables given to the constructor double GetMvaValue( const std::vector& inputValues ) const; private: // method-specific destructor void Clear(); // common member variables const char* fClassName; const size_t fNvars; size_t GetNvar() const { return fNvars; } char GetType( int ivar ) const { return fType[ivar]; } // normalisation of input variables const bool fIsNormalised; bool IsNormalised() const { return fIsNormalised; } double fVmin[3]; double fVmax[3]; double NormVariable( double x, double xmin, double xmax ) const { // normalise to output range: [-1, 1] return 2*(x - xmin)/(xmax - xmin) - 1.0; } // type of input variable: 'F' or 'I' char fType[3]; // initialize internal variables void Initialize(); double GetMvaValue__( const std::vector& inputValues ) const; // private members (method specific) double ActivationFnc(double x) const; double OutputActivationFnc(double x) const; int fLayers; int fLayerSize[3]; double fWeightMatrix0to1[4][4]; // weight matrix from layer 0 to 1 double fWeightMatrix1to2[1][4]; // weight matrix from layer 1 to 2 double * fWeights[3]; }; inline void ReadMLP::Initialize() { // build network structure fLayers = 3; fLayerSize[0] = 4; fWeights[0] = new double[4]; fLayerSize[1] = 4; fWeights[1] = new double[4]; fLayerSize[2] = 1; fWeights[2] = new double[1]; // weight matrix from layer 0 to 1 fWeightMatrix0to1[0][0] = 1.06392518265394; fWeightMatrix0to1[1][0] = 1.37643504656176; fWeightMatrix0to1[2][0] = 2.34030360839801; fWeightMatrix0to1[0][1] = 3.87729972339743; fWeightMatrix0to1[1][1] = -1.87080656595418; fWeightMatrix0to1[2][1] = -2.19317519450234; fWeightMatrix0to1[0][2] = 2.7098697806131; fWeightMatrix0to1[1][2] = 0.944867197233981; fWeightMatrix0to1[2][2] = -1.42651195526084; fWeightMatrix0to1[0][3] = -5.181580900468; fWeightMatrix0to1[1][3] = -3.50697717673637; fWeightMatrix0to1[2][3] = -3.39062345122844; // weight matrix from layer 1 to 2 fWeightMatrix1to2[0][0] = -1.05073267797386; fWeightMatrix1to2[0][1] = 0.091066020714422; fWeightMatrix1to2[0][2] = -1.00538254117898; fWeightMatrix1to2[0][3] = 1.0517186446555; } inline double ReadMLP::GetMvaValue__( const std::vector& inputValues ) const { if (inputValues.size() != (unsigned int)fLayerSize[0]-1) { std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl; return 0; } for (int l=0; l& inputValues ) const { // classifier response value double retval = 0; // classifier response, sanity check first if (!IsStatusClean()) { std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response" << " because status is dirty" << std::endl; retval = 0; } else { if (IsNormalised()) { // normalise variables std::vector iV; iV.reserve(inputValues.size()); int ivar = 0; for (std::vector::const_iterator varIt = inputValues.begin(); varIt != inputValues.end(); varIt++, ivar++) { iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] )); } retval = GetMvaValue__( iV ); } else { retval = GetMvaValue__( inputValues ); } } return retval; }