// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss, Eckhard v. Toerne, Jan Therhaag /********************************************************************************** * Project: TMVA - a Root-integrated toolkit for multivariate data analysis * * Package: TMVA * * Class : MethodBDT (BDT = Boosted Decision Trees) * * Web : http://tmva.sourceforge.net * * * * Description: * * Analysis of Boosted Decision Trees * * * * Authors (alphabetical): * * Andreas Hoecker - CERN, Switzerland * * Helge Voss - MPI-K Heidelberg, Germany * * Kai Voss - U. of Victoria, Canada * * Doug Schouten - Simon Fraser U., Canada * * Jan Therhaag - U. of Bonn, Germany * * Eckhard v. Toerne - U of Bonn, Germany * * * * Copyright (c) 2005-2011: * * CERN, Switzerland * * U. of Victoria, Canada * * MPI-K Heidelberg, Germany * * U. of Bonn, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * * (http://tmva.sourceforge.net/LICENSE) * **********************************************************************************/ //_______________________________________________________________________ // // Analysis of Boosted Decision Trees // // Boosted decision trees have been successfully used in High Energy // Physics analysis for example by the MiniBooNE experiment // (Yang-Roe-Zhu, physics/0508045). In Boosted Decision Trees, the // selection is done on a majority vote on the result of several decision // trees, which are all derived from the same training sample by // supplying different event weights during the training. // // Decision trees: // // Successive decision nodes are used to categorize the // events out of the sample as either signal or background. Each node // uses only a single discriminating variable to decide if the event is // signal-like ("goes right") or background-like ("goes left"). This // forms a tree like structure with "baskets" at the end (leave nodes), // and an event is classified as either signal or background according to // whether the basket where it ends up has been classified signal or // background during the training. Training of a decision tree is the // process to define the "cut criteria" for each node. The training // starts with the root node. Here one takes the full training event // sample and selects the variable and corresponding cut value that gives // the best separation between signal and background at this stage. Using // this cut criterion, the sample is then divided into two subsamples, a // signal-like (right) and a background-like (left) sample. Two new nodes // are then created for each of the two sub-samples and they are // constructed using the same mechanism as described for the root // node. The devision is stopped once a certain node has reached either a // minimum number of events, or a minimum or maximum signal purity. These // leave nodes are then called "signal" or "background" if they contain // more signal respective background events from the training sample. // // Boosting: // // The idea behind adaptive boosting (AdaBoost) is, that signal events // from the training sample, that end up in a background node // (and vice versa) are given a larger weight than events that are in // the correct leave node. This results in a re-weighed training event // sample, with which then a new decision tree can be developed. // The boosting can be applied several times (typically 100-500 times) // and one ends up with a set of decision trees (a forest). // Gradient boosting works more like a function expansion approach, where // each tree corresponds to a summand. The parameters for each summand (tree) // are determined by the minimization of a error function (binomial log- // likelihood for classification and Huber loss for regression). // A greedy algorithm is used, which means, that only one tree is modified // at a time, while the other trees stay fixed. // // Bagging: // // In this particular variant of the Boosted Decision Trees the boosting // is not done on the basis of previous training results, but by a simple // stochastic re-sampling of the initial training event sample. // // Random Trees: // Similar to the "Random Forests" from Leo Breiman and Adele Cutler, it // uses the bagging algorithm together and bases the determination of the // best node-split during the training on a random subset of variables only // which is individually chosen for each split. // // Analysis: // // Applying an individual decision tree to a test event results in a // classification of the event as either signal or background. For the // boosted decision tree selection, an event is successively subjected to // the whole set of decision trees and depending on how often it is // classified as signal, a "likelihood" estimator is constructed for the // event being signal or background. The value of this estimator is the // one which is then used to select the events from an event sample, and // the cut value on this estimator defines the efficiency and purity of // the selection. // //_______________________________________________________________________ #include #include #include #include "Riostream.h" #include "TRandom3.h" #include "TMath.h" #include "TObjString.h" #include "TGraph.h" #include "TMVA/ClassifierFactory.h" #include "TMVA/MethodBDT.h" #include "TMVA/Tools.h" #include "TMVA/Timer.h" #include "TMVA/Ranking.h" #include "TMVA/SdivSqrtSplusB.h" #include "TMVA/BinarySearchTree.h" #include "TMVA/SeparationBase.h" #include "TMVA/GiniIndex.h" #include "TMVA/GiniIndexWithLaplace.h" #include "TMVA/CrossEntropy.h" #include "TMVA/MisClassificationError.h" #include "TMVA/Results.h" #include "TMVA/ResultsMulticlass.h" #include "TMVA/Interval.h" #include "TMVA/LogInterval.h" #include "TMVA/PDF.h" #include "TMVA/BDTEventWrapper.h" #include "TMatrixTSym.h" using std::vector; using std::make_pair; REGISTER_METHOD(BDT) ClassImp(TMVA::MethodBDT) const Int_t TMVA::MethodBDT::fgDebugLevel = 0; //_______________________________________________________________________ TMVA::MethodBDT::MethodBDT( const TString& jobName, const TString& methodTitle, DataSetInfo& theData, const TString& theOption, TDirectory* theTargetDir ) : TMVA::MethodBase( jobName, Types::kBDT, methodTitle, theData, theOption, theTargetDir ) , fTrainSample(0) , fNTrees(0) , fSigToBkgFraction(0) , fAdaBoostBeta(0) , fTransitionPoint(0) , fShrinkage(0) , fBaggedBoost(kFALSE) , fBaggedGradBoost(kFALSE) , fSumOfWeights(0) , fMinNodeEvents(0) , fMinNodeSize(5) , fMinNodeSizeS("5%") , fNCuts(0) , fUseFisherCuts(0) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fMinLinCorrForFisher(.8) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fUseExclusiveVars(0) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fUseYesNoLeaf(kFALSE) , fNodePurityLimit(0) , fNNodesMax(0) , fMaxDepth(0) , fPruneMethod(DecisionTree::kNoPruning) , fPruneStrength(0) , fFValidationEvents(0) , fAutomatic(kFALSE) , fRandomisedTrees(kFALSE) , fUseNvars(0) , fUsePoissonNvars(0) // don't use this initialisation, only here to make Coverity happy. Is set in Init() , fUseNTrainEvents(0) , fBaggedSampleFraction(0) , fNoNegWeightsInTraining(kFALSE) , fInverseBoostNegWeights(kFALSE) , fPairNegWeightsGlobal(kFALSE) , fTrainWithNegWeights(kFALSE) , fDoBoostMonitor(kFALSE) , fITree(0) , fBoostWeight(0) , fErrorFraction(0) , fCss(0) , fCts_sb(0) , fCtb_ss(0) , fCbb(0) , fDoPreselection(kFALSE) { // the standard constructor for the "boosted decision trees" fMonitorNtuple = NULL; fSepType = NULL; } //_______________________________________________________________________ TMVA::MethodBDT::MethodBDT( DataSetInfo& theData, const TString& theWeightFile, TDirectory* theTargetDir ) : TMVA::MethodBase( Types::kBDT, theData, theWeightFile, theTargetDir ) , fTrainSample(0) , fNTrees(0) , fSigToBkgFraction(0) , fAdaBoostBeta(0) , fTransitionPoint(0) , fShrinkage(0) , fBaggedBoost(kFALSE) , fBaggedGradBoost(kFALSE) , fSumOfWeights(0) , fMinNodeEvents(0) , fMinNodeSize(5) , fMinNodeSizeS("5%") , fNCuts(0) , fUseFisherCuts(0) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fMinLinCorrForFisher(.8) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fUseExclusiveVars(0) // don't use this initialisation, only here to make Coverity happy. Is set in DeclarOptions() , fUseYesNoLeaf(kFALSE) , fNodePurityLimit(0) , fNNodesMax(0) , fMaxDepth(0) , fPruneMethod(DecisionTree::kNoPruning) , fPruneStrength(0) , fFValidationEvents(0) , fAutomatic(kFALSE) , fRandomisedTrees(kFALSE) , fUseNvars(0) , fUsePoissonNvars(0) // don't use this initialisation, only here to make Coverity happy. Is set in Init() , fUseNTrainEvents(0) , fBaggedSampleFraction(0) , fNoNegWeightsInTraining(kFALSE) , fInverseBoostNegWeights(kFALSE) , fPairNegWeightsGlobal(kFALSE) , fTrainWithNegWeights(kFALSE) , fDoBoostMonitor(kFALSE) , fITree(0) , fBoostWeight(0) , fErrorFraction(0) , fCss(0) , fCts_sb(0) , fCtb_ss(0) , fCbb(0) , fDoPreselection(kFALSE) { fMonitorNtuple = NULL; fSepType = NULL; // constructor for calculating BDT-MVA using previously generated decision trees // the result of the previous training (the decision trees) are read in via the // weight file. Make sure the the variables correspond to the ones used in // creating the "weight"-file } //_______________________________________________________________________ Bool_t TMVA::MethodBDT::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets ) { // BDT can handle classification with multiple classes and regression with one regression-target if (type == Types::kClassification && numberClasses == 2) return kTRUE; if (type == Types::kMulticlass ) return kTRUE; if( type == Types::kRegression && numberTargets == 1 ) return kTRUE; return kFALSE; } //_______________________________________________________________________ void TMVA::MethodBDT::DeclareOptions() { // define the options (their key words) that can be set in the option string // know options: // nTrees number of trees in the forest to be created // BoostType the boosting type for the trees in the forest (AdaBoost e.t.c..) // known: AdaBoost // AdaBoostR2 (Adaboost for regression) // Bagging // GradBoost // AdaBoostBeta the boosting parameter, beta, for AdaBoost // UseRandomisedTrees choose at each node splitting a random set of variables // UseNvars use UseNvars variables in randomised trees // UsePoission Nvars use UseNvars not as fixed number but as mean of a possion distribution // SeparationType the separation criterion applied in the node splitting // known: GiniIndex // MisClassificationError // CrossEntropy // SDivSqrtSPlusB // MinNodeSize: minimum percentage of training events in a leaf node (leaf criteria, stop splitting) // nCuts: the number of steps in the optimisation of the cut for a node (if < 0, then // step size is determined by the events) // UseFisherCuts: use multivariate splits using the Fisher criterion // UseYesNoLeaf decide if the classification is done simply by the node type, or the S/B // (from the training) in the leaf node // NodePurityLimit the minimum purity to classify a node as a signal node (used in pruning and boosting to determine // misclassification error rate) // PruneMethod The Pruning method: // known: NoPruning // switch off pruning completely // ExpectedError // CostComplexity // PruneStrength a parameter to adjust the amount of pruning. Should be large enough such that overtraining is avoided. // PruningValFraction number of events to use for optimizing pruning (only if PruneStrength < 0, i.e. automatic pruning) // NegWeightTreatment IgnoreNegWeightsInTraining Ignore negative weight events in the training. // DecreaseBoostWeight Boost ev. with neg. weight with 1/boostweight instead of boostweight // PairNegWeightsGlobal Pair ev. with neg. and pos. weights in traning sample and "annihilate" them // MaxDepth maximum depth of the decision tree allowed before further splitting is stopped DeclareOptionRef(fNTrees, "NTrees", "Number of trees in the forest"); if (DoRegression()) { DeclareOptionRef(fMaxDepth=50,"MaxDepth","Max depth of the decision tree allowed"); }else{ DeclareOptionRef(fMaxDepth=3,"MaxDepth","Max depth of the decision tree allowed"); } TString tmp="5%"; if (DoRegression()) tmp="0.2%"; DeclareOptionRef(fMinNodeSizeS=tmp, "MinNodeSize", "Minimum percentage of training events required in a leaf node (default: Classification: 5%, Regression: 0.2%)"); // MinNodeSize: minimum percentage of training events in a leaf node (leaf criteria, stop splitting) DeclareOptionRef(fNCuts, "nCuts", "Number of grid points in variable range used in finding optimal cut in node splitting"); DeclareOptionRef(fBoostType, "BoostType", "Boosting type for the trees in the forest (note: AdaCost is still experimental)"); AddPreDefVal(TString("AdaBoost")); AddPreDefVal(TString("RealAdaBoost")); AddPreDefVal(TString("AdaCost")); AddPreDefVal(TString("Bagging")); // AddPreDefVal(TString("RegBoost")); AddPreDefVal(TString("AdaBoostR2")); AddPreDefVal(TString("Grad")); if (DoRegression()) { fBoostType = "AdaBoostR2"; }else{ fBoostType = "AdaBoost"; } DeclareOptionRef(fAdaBoostR2Loss="Quadratic", "AdaBoostR2Loss", "Type of Loss function in AdaBoostR2"); AddPreDefVal(TString("Linear")); AddPreDefVal(TString("Quadratic")); AddPreDefVal(TString("Exponential")); DeclareOptionRef(fBaggedBoost=kFALSE, "UseBaggedBoost","Use only a random subsample of all events for growing the trees in each boost iteration."); DeclareOptionRef(fShrinkage=1.0, "Shrinkage", "Learning rate for GradBoost algorithm"); DeclareOptionRef(fAdaBoostBeta=.5, "AdaBoostBeta", "Learning rate for AdaBoost algorithm"); DeclareOptionRef(fRandomisedTrees,"UseRandomisedTrees","Determine at each node splitting the cut variable only as the best out of a random subset of variables (like in RandomForests)"); DeclareOptionRef(fUseNvars,"UseNvars","Size of the subset of variables used with RandomisedTree option"); DeclareOptionRef(fUsePoissonNvars,"UsePoissonNvars", "Interpret \"UseNvars\" not as fixed number but as mean of a Possion distribution in each split with RandomisedTree option"); DeclareOptionRef(fBaggedSampleFraction=.6,"BaggedSampleFraction","Relative size of bagged event sample to original size of the data sample (used whenever bagging is used (i.e. UseBaggedBoost, Bagging,)" ); DeclareOptionRef(fUseYesNoLeaf=kTRUE, "UseYesNoLeaf", "Use Sig or Bkg categories, or the purity=S/(S+B) as classification of the leaf node -> Real-AdaBoost"); if (DoRegression()) { fUseYesNoLeaf = kFALSE; } DeclareOptionRef(fNegWeightTreatment="InverseBoostNegWeights","NegWeightTreatment","How to treat events with negative weights in the BDT training (particular the boosting) : IgnoreInTraining; Boost With inverse boostweight; Pair events with negative and positive weights in traning sample and *annihilate* them (experimental!)"); AddPreDefVal(TString("InverseBoostNegWeights")); AddPreDefVal(TString("IgnoreNegWeightsInTraining")); AddPreDefVal(TString("NoNegWeightsInTraining")); // well, let's be nice to users and keep at least this old name anyway .. AddPreDefVal(TString("PairNegWeightsGlobal")); AddPreDefVal(TString("Pray")); DeclareOptionRef(fCss=1., "Css", "AdaCost: cost of true signal selected signal"); DeclareOptionRef(fCts_sb=1.,"Cts_sb","AdaCost: cost of true signal selected bkg"); DeclareOptionRef(fCtb_ss=1.,"Ctb_ss","AdaCost: cost of true bkg selected signal"); DeclareOptionRef(fCbb=1., "Cbb", "AdaCost: cost of true bkg selected bkg "); DeclareOptionRef(fNodePurityLimit=0.5, "NodePurityLimit", "In boosting/pruning, nodes with purity > NodePurityLimit are signal; background otherwise."); DeclareOptionRef(fSepTypeS, "SeparationType", "Separation criterion for node splitting"); AddPreDefVal(TString("CrossEntropy")); AddPreDefVal(TString("GiniIndex")); AddPreDefVal(TString("GiniIndexWithLaplace")); AddPreDefVal(TString("MisClassificationError")); AddPreDefVal(TString("SDivSqrtSPlusB")); AddPreDefVal(TString("RegressionVariance")); if (DoRegression()) { fSepTypeS = "RegressionVariance"; }else{ fSepTypeS = "GiniIndex"; } DeclareOptionRef(fDoBoostMonitor=kFALSE,"DoBoostMonitor","Create control plot with ROC integral vs tree number"); DeclareOptionRef(fUseFisherCuts=kFALSE, "UseFisherCuts", "Use multivariate splits using the Fisher criterion"); DeclareOptionRef(fMinLinCorrForFisher=.8,"MinLinCorrForFisher", "The minimum linear correlation between two variables demanded for use in Fisher criterion in node splitting"); DeclareOptionRef(fUseExclusiveVars=kFALSE,"UseExclusiveVars","Variables already used in fisher criterion are not anymore analysed individually for node splitting"); DeclareOptionRef(fDoPreselection=kFALSE,"DoPreselection","and and apply automatic pre-selection for 100% efficient signal (bkg) cuts prior to training"); DeclareOptionRef(fSigToBkgFraction=1,"SigToBkgFraction","Sig to Bkg ratio used in Training (similar to NodePurityLimit, which cannot be used in real adaboost"); DeclareOptionRef(fPruneMethodS, "PruneMethod", "Note: for BDTs use small trees (e.g.MaxDepth=3) and NoPruning: Pruning: Method used for pruning (removal) of statistically insignificant branches "); AddPreDefVal(TString("NoPruning")); AddPreDefVal(TString("ExpectedError")); AddPreDefVal(TString("CostComplexity")); DeclareOptionRef(fPruneStrength, "PruneStrength", "Pruning strength"); DeclareOptionRef(fFValidationEvents=0.5, "PruningValFraction", "Fraction of events to use for optimizing automatic pruning."); // deprecated options, still kept for the moment: DeclareOptionRef(fMinNodeEvents=0, "nEventsMin", "deprecated: Use MinNodeSize (in % of training events) instead"); DeclareOptionRef(fBaggedGradBoost=kFALSE, "UseBaggedGrad","deprecated: Use *UseBaggedBoost* instead: Use only a random subsample of all events for growing the trees in each iteration."); DeclareOptionRef(fBaggedSampleFraction, "GradBaggingFraction","deprecated: Use *BaggedSampleFraction* instead: Defines the fraction of events to be used in each iteration, e.g. when UseBaggedGrad=kTRUE. "); DeclareOptionRef(fUseNTrainEvents,"UseNTrainEvents","deprecated: Use *BaggedSampleFraction* instead: Number of randomly picked training events used in randomised (and bagged) trees"); DeclareOptionRef(fNNodesMax,"NNodesMax","deprecated: Use MaxDepth instead to limit the tree size" ); } //_______________________________________________________________________ void TMVA::MethodBDT::DeclareCompatibilityOptions() { // options that are used ONLY for the READER to ensure backward compatibility MethodBase::DeclareCompatibilityOptions(); DeclareOptionRef(fHistoricBool=kTRUE, "UseWeightedTrees", "Use weighted trees or simple average in classification from the forest"); DeclareOptionRef(fHistoricBool=kFALSE, "PruneBeforeBoost", "Flag to prune the tree before applying boosting algorithm"); DeclareOptionRef(fHistoricBool=kFALSE,"RenormByClass","Individually re-normalize each event class to the original size after boosting"); AddPreDefVal(TString("NegWeightTreatment"),TString("IgnoreNegWeights")); } //_______________________________________________________________________ void TMVA::MethodBDT::ProcessOptions() { // the option string is decoded, for available options see "DeclareOptions" fSepTypeS.ToLower(); if (fSepTypeS == "misclassificationerror") fSepType = new MisClassificationError(); else if (fSepTypeS == "giniindex") fSepType = new GiniIndex(); else if (fSepTypeS == "giniindexwithlaplace") fSepType = new GiniIndexWithLaplace(); else if (fSepTypeS == "crossentropy") fSepType = new CrossEntropy(); else if (fSepTypeS == "sdivsqrtsplusb") fSepType = new SdivSqrtSplusB(); else if (fSepTypeS == "regressionvariance") fSepType = NULL; else { Log() << kINFO << GetOptions() << Endl; Log() << kFATAL << " unknown Separation Index option " << fSepTypeS << " called" << Endl; } fPruneMethodS.ToLower(); if (fPruneMethodS == "expectederror") fPruneMethod = DecisionTree::kExpectedErrorPruning; else if (fPruneMethodS == "costcomplexity") fPruneMethod = DecisionTree::kCostComplexityPruning; else if (fPruneMethodS == "nopruning") fPruneMethod = DecisionTree::kNoPruning; else { Log() << kINFO << GetOptions() << Endl; Log() << kFATAL << " unknown PruneMethod " << fPruneMethodS << " option called" << Endl; } if (fPruneStrength < 0 && (fPruneMethod != DecisionTree::kNoPruning) && fBoostType!="Grad") fAutomatic = kTRUE; else fAutomatic = kFALSE; if (fAutomatic && fPruneMethod==DecisionTree::kExpectedErrorPruning){ Log() << kFATAL << "Sorry autmoatic pruning strength determination is not implemented yet for ExpectedErrorPruning" << Endl; } if (fMinNodeEvents > 0){ fMinNodeSize = Double_t(fMinNodeEvents*100.) / Data()->GetNTrainingEvents(); Log() << kWARNING << "You have explicitly set ** nEventsMin = " << fMinNodeEvents<<" ** the min ablsolut number \n" << "of events in a leaf node. This is DEPRECATED, please use the option \n" << "*MinNodeSize* giving the relative number as percentage of training \n" << "events instead. \n" << "nEventsMin="< MinNodeSize="< change to *IgnoreNegWeightsInTraining*" << Endl; fNegWeightTreatment="IgnoreNegWeightsInTraining"; fNoNegWeightsInTraining=kTRUE; } } else if (fBoostType=="RealAdaBoost"){ fBoostType = "AdaBoost"; fUseYesNoLeaf = kFALSE; } else if (fBoostType=="AdaCost"){ fUseYesNoLeaf = kFALSE; } if (fFValidationEvents < 0.0) fFValidationEvents = 0.0; if (fAutomatic && fFValidationEvents > 0.5) { Log() << kWARNING << "You have chosen to use more than half of your training sample " << "to optimize the automatic pruning algorithm. This is probably wasteful " << "and your overall results will be degraded. Are you sure you want this?" << Endl; } if (this->Data()->HasNegativeEventWeights()){ Log() << kINFO << " You are using a Monte Carlo that has also negative weights. " << "That should in principle be fine as long as on average you end up with " << "something positive. For this you have to make sure that the minimal number " << "of (un-weighted) events demanded for a tree node (currently you use: MinNodeSize=" << fMinNodeSizeS << " ("<< fMinNodeSize << "%)" <<", (or the deprecated equivalent nEventsMin) you can set this via the " <<"BDT option string when booking the " << "classifier) is large enough to allow for reasonable averaging!!! " << " If this does not help.. maybe you want to try the option: IgnoreNegWeightsInTraining " << "which ignores events with negative weight in the training. " << Endl << Endl << "Note: You'll get a WARNING message during the training if that should ever happen" << Endl; } if (DoRegression()) { if (fUseYesNoLeaf && !IsConstructedFromWeightFile()){ Log() << kWARNING << "Regression Trees do not work with fUseYesNoLeaf=TRUE --> I will set it to FALSE" << Endl; fUseYesNoLeaf = kFALSE; } if (fSepType != NULL){ Log() << kWARNING << "Regression Trees do not work with Separation type other than --> I will use it instead" << Endl; fSepType = NULL; } } if (fRandomisedTrees){ Log() << kINFO << " Randomised trees use no pruning" << Endl; fPruneMethod = DecisionTree::kNoPruning; // fBoostType = "Bagging"; } if (fNTrees==0){ Log() << kERROR << " Zero Decision Trees demanded... that does not work !! " << " I set it to 1 .. just so that the program does not crash" << Endl; fNTrees = 1; } fNegWeightTreatment.ToLower(); if (fNegWeightTreatment == "ignorenegweightsintraining") fNoNegWeightsInTraining = kTRUE; else if (fNegWeightTreatment == "nonegweightsintraining") fNoNegWeightsInTraining = kTRUE; else if (fNegWeightTreatment == "inverseboostnegweights") fInverseBoostNegWeights = kTRUE; else if (fNegWeightTreatment == "pairnegweightsglobal") fPairNegWeightsGlobal = kTRUE; else if (fNegWeightTreatment == "pray") Log() << kWARNING << "Yes, good luck with praying " << Endl; else { Log() << kINFO << GetOptions() << Endl; Log() << kFATAL << " unknown option for treating negative event weights during training " << fNegWeightTreatment << " requested" << Endl; } if (fNegWeightTreatment == "pairnegweightsglobal") Log() << kWARNING << " you specified the option NegWeightTreatment=PairNegWeightsGlobal : This option is still considered EXPERIMENTAL !! " << Endl; // dealing with deprecated options ! if (fNNodesMax>0) { UInt_t tmp=1; // depth=0 == 1 node fMaxDepth=0; while (tmp < fNNodesMax){ tmp+=2*tmp; fMaxDepth++; } Log() << kWARNING << "You have specified a deprecated option *NNodesMax="< please use *UseBaggedBoost* instead" << Endl; } } //_______________________________________________________________________ void TMVA::MethodBDT::SetMinNodeSize(Double_t sizeInPercent){ if (sizeInPercent > 0 && sizeInPercent < 50){ fMinNodeSize=sizeInPercent; } else { Log() << kFATAL << "you have demanded a minimal node size of " << sizeInPercent << "% of the training events.. \n" << " that somehow does not make sense "<12) ? UInt_t(GetNvar()/8) : TMath::Max(UInt_t(2),UInt_t(GetNvar()/3)); fUseNvars = UInt_t(TMath::Sqrt(GetNvar())+0.6); fUsePoissonNvars = kTRUE; fShrinkage = 1.0; fSumOfWeights = 0.0; // reference cut value to distinguish signal-like from background-like events SetSignalReferenceCut( 0 ); } //_______________________________________________________________________ void TMVA::MethodBDT::Reset( void ) { // reset the method, as if it had just been instantiated (forget all training etc.) // I keep the BDT EventSample and its Validation sample (eventuall they should all // disappear and just use the DataSet samples .. // remove all the trees for (UInt_t i=0; iDelete(); fMonitorNtuple=NULL; fVariableImportance.clear(); fResiduals.clear(); // now done in "InitEventSample" which is called in "Train" // reset all previously stored/accumulated BOOST weights in the event sample //for (UInt_t iev=0; ievSetBoostWeight(1.); if (Data()) Data()->DeleteResults(GetMethodName(), Types::kTraining, GetAnalysisType()); Log() << kDEBUG << " successfully(?) reset the method " << Endl; } //_______________________________________________________________________ TMVA::MethodBDT::~MethodBDT( void ) { //destructor // Note: fEventSample and ValidationSample are already deleted at the end of TRAIN // When they are not used anymore // for (UInt_t i=0; i Data().TrainingTree() is zero pointer" << Endl; if (fEventSample.size() > 0) { // do not re-initialise the event sample, just set all boostweights to 1. as if it were untouched // reset all previously stored/accumulated BOOST weights in the event sample for (UInt_t iev=0; ievSetBoostWeight(1.); } else { Data()->SetCurrentType(Types::kTraining); UInt_t nevents = Data()->GetNTrainingEvents(); std::vector tmpEventSample; for (Long64_t ievt=0; ievt 0.05) continue; } if (event->GetWeight() < 0 && (IgnoreEventsWithNegWeightsInTraining() || fNoNegWeightsInTraining)){ if (firstNegWeight) { Log() << kWARNING << " Note, you have events with negative event weight in the sample, but you've chosen to ignore them" << Endl; firstNegWeight=kFALSE; } delete event; }else if (event->GetWeight()==0){ if (firstZeroWeight) { firstZeroWeight = kFALSE; Log() << "Events with weight == 0 are going to be simply ignored " << Endl; } }else{ if (event->GetWeight() < 0) { fTrainWithNegWeights=kTRUE; if (firstNegWeight){ firstNegWeight = kFALSE; if (fPairNegWeightsGlobal){ Log() << kWARNING << "Events with negative event weights are found and " << " will be removed prior to the actual BDT training by global " << " paring (and subsequent annihilation) with positiv weight events" << Endl; }else{ Log() << kWARNING << "Events with negative event weights are USED during " << "the BDT training. This might cause problems with small node sizes " << "or with the boosting. Please remove negative events from training " << "using the option *IgnoreEventsWithNegWeightsInTraining* in case you " << "observe problems with the boosting" << Endl; } } } // if fAutomatic == true you need a validation sample to optimize pruning if (fAutomatic) { Double_t modulo = 1.0/(fFValidationEvents); Int_t imodulo = static_cast( fmod(modulo,1.0) > 0.5 ? ceil(modulo) : floor(modulo) ); if (ievt % imodulo == 0) fValidationSample.push_back( event ); else fEventSample.push_back( event ); } else { fEventSample.push_back(event); } } } if (fAutomatic) { Log() << kINFO << " Internally I use " << fEventSample.size() << " for Training and " << fValidationSample.size() << " for Pruning Validation (" << ((Float_t)fValidationSample.size())/((Float_t)fEventSample.size()+fValidationSample.size())*100.0 << "% of training used for validation)" << Endl; } // some pre-processing for events with negative weights if (fPairNegWeightsGlobal) PreProcessNegativeEventWeights(); } // it does not make sense in decision trees to start with unequal number of signal/background // events (weights) .. hence normalize them now (happens atherwise in first 'boosting step' // anyway.. // Also make sure, that the sum_of_weights == sample.size() .. as this is assumed in // the DecisionTree to derive a sensible number for "fMinSize" (min.#events in node) // that currently is an OR between "weighted" and "unweighted number" // I want: // nS + nB = n // a*SW + b*BW = n // (a*SW)/(b*BW) = fSigToBkgFraction // // ==> b = n/((1+f)BW) and a = (nf/(1+f))/SW Double_t nevents = fEventSample.size(); Double_t sumSigW=0, sumBkgW=0; Int_t sumSig=0, sumBkg=0; for (UInt_t ievt=0; ievtGetWeight(); sumSig++; } else { sumBkgW += fEventSample[ievt]->GetWeight(); sumBkg++; } } Double_t normSig = nevents/((1+fSigToBkgFraction)*sumSigW)*fSigToBkgFraction; Double_t normBkg = nevents/((1+fSigToBkgFraction)*sumBkgW); ; Log() << kINFO << "re-normlise events such that Sig and Bkg have respective sum of weights = " << fSigToBkgFraction << Endl; Log() << kINFO << " sig->sig*"<bkg*"<SetBoostWeight(normSig); else fEventSample[ievt]->SetBoostWeight(normBkg); } fTrainSample = &fEventSample; if (fBaggedBoost){ GetBaggedSubSample(fEventSample); fTrainSample = &fSubSample; } //just for debug purposes.. /* sumSigW=0; sumBkgW=0; for (UInt_t ievt=0; ievtGetWeight(); else sumBkgW += fEventSample[ievt]->GetWeight(); } Log() << kWARNING << "sigSumW="< negEvents; for (UInt_t iev = 0; iev < fEventSample.size(); iev++){ if (fEventSample[iev]->GetWeight() < 0) { totalNegWeights += fEventSample[iev]->GetWeight(); negEvents.push_back(fEventSample[iev]); } else { totalPosWeights += fEventSample[iev]->GetWeight(); } totalWeights += fEventSample[iev]->GetWeight(); } if (totalNegWeights == 0 ) { Log() << kINFO << "no negative event weights found .. no preprocessing necessary" << Endl; return; } else { Log() << kINFO << "found a total of " << totalNegWeights << " of negative event weights which I am going to try to pair with positive events to annihilate them" << Endl; Log() << kINFO << "found a total of " << totalPosWeights << " of events with positive weights" << Endl; Log() << kINFO << "--> total sum of weights = " << totalWeights << " = " << totalNegWeights+totalPosWeights << Endl; } std::vector* cov = gTools().CalcCovarianceMatrices( fEventSample, 2); TMatrixDSym *invCov; for (Int_t i=0; i<2; i++){ invCov = ((*cov)[i]); if ( TMath::Abs(invCov->Determinant()) < 10E-24 ) { std::cout << " matrix is almost singular with deterninant=" << TMath::Abs(invCov->Determinant()) << " did you use the variables that are linear combinations or highly correlated?" << std::endl; } if ( TMath::Abs(invCov->Determinant()) < 10E-120 ) { std::cout << " matrix is singular with determinant=" << TMath::Abs(invCov->Determinant()) << " did you use the variables that are linear combinations?" << std::endl; } invCov->Invert(); } Log() << kINFO << "Found a total of " << totalNegWeights << " in negative weights out of " << fEventSample.size() << " training events " << Endl; Timer timer(negEvents.size(),"Negative Event paired"); for (UInt_t nev = 0; nev < negEvents.size(); nev++){ timer.DrawProgressBar( nev ); Double_t weight = negEvents[nev]->GetWeight(); UInt_t iClassID = negEvents[nev]->GetClass(); invCov = ((*cov)[iClassID]); while (weight < 0){ // find closest event with positive event weight and "pair" it with the negative event // (add their weight) until there is no negative weight anymore Int_t iMin=-1; Double_t dist, minDist=10E270; for (UInt_t iev = 0; iev < fEventSample.size(); iev++){ if (iClassID==fEventSample[iev]->GetClass() && fEventSample[iev]->GetWeight() > 0){ dist=0; for (UInt_t ivar=0; ivar < GetNvar(); ivar++){ for (UInt_t jvar=0; jvarGetValue(ivar)-fEventSample[iev]->GetValue(ivar))* (*invCov)[ivar][jvar]* (negEvents[nev]->GetValue(jvar)-fEventSample[iev]->GetValue(jvar)); } } if (dist < minDist) { iMin=iev; minDist=dist;} } } if (iMin > -1) { // std::cout << "Happily pairing .. weight before : " << negEvents[nev]->GetWeight() << " and " << fEventSample[iMin]->GetWeight(); Double_t newWeight = (negEvents[nev]->GetWeight() + fEventSample[iMin]->GetWeight()); if (newWeight > 0){ negEvents[nev]->SetBoostWeight( 0 ); fEventSample[iMin]->SetBoostWeight( newWeight/fEventSample[iMin]->GetOriginalWeight() ); // note the weight*boostweight should be "newWeight" } else { negEvents[nev]->SetBoostWeight( newWeight/negEvents[nev]->GetOriginalWeight() ); // note the weight*boostweight should be "newWeight" fEventSample[iMin]->SetBoostWeight( 0 ); } // std::cout << " and afterwards " << negEvents[nev]->GetWeight() << " and the paired " << fEventSample[iMin]->GetWeight() << " dist="<GetWeight(); } } Log() << kINFO << " took: " << timer.GetElapsedTime() << " " << Endl; // just check.. now there should be no negative event weight left anymore totalNegWeights = 0; totalPosWeights = 0; totalWeights = 0; Double_t sigWeight=0; Double_t bkgWeight=0; Int_t nSig=0; Int_t nBkg=0; std::vector newEventSample; for (UInt_t iev = 0; iev < fEventSample.size(); iev++){ if (fEventSample[iev]->GetWeight() < 0) { totalNegWeights += fEventSample[iev]->GetWeight(); totalWeights += fEventSample[iev]->GetWeight(); } else { totalPosWeights += fEventSample[iev]->GetWeight(); totalWeights += fEventSample[iev]->GetWeight(); } if (fEventSample[iev]->GetWeight() > 0) { newEventSample.push_back(new Event(*fEventSample[iev])); if (fEventSample[iev]->GetClass() == fSignalClass){ sigWeight += fEventSample[iev]->GetWeight(); nSig+=1; }else{ bkgWeight += fEventSample[iev]->GetWeight(); nBkg+=1; } } } if (totalNegWeights < 0) Log() << kFATAL << " compenstion of negative event weights with positive ones did not work " << totalNegWeights << Endl; for (UInt_t i=0; i("NTrees", new Interval(10,1000,5))); // stepsize 50 tuneParameters.insert(std::pair("MaxDepth", new Interval(2,4,3))); // stepsize 1 tuneParameters.insert(std::pair("MinNodeSize", new LogInterval(1,30,30))); // //tuneParameters.insert(std::pair("NodePurityLimit",new Interval(.4,.6,3))); // stepsize .1 //tuneParameters.insert(std::pair("BaggedSampleFraction",new Interval(.4,.9,6))); // stepsize .1 // method-specific parameters if (fBoostType=="AdaBoost"){ tuneParameters.insert(std::pair("AdaBoostBeta", new Interval(.2,1.,5))); }else if (fBoostType=="Grad"){ tuneParameters.insert(std::pair("Shrinkage", new Interval(0.05,0.50,5))); }else if (fBoostType=="Bagging" && fRandomisedTrees){ Int_t min_var = TMath::FloorNint( GetNvar() * .25 ); Int_t max_var = TMath::CeilNint( GetNvar() * .75 ); tuneParameters.insert(std::pair("UseNvars", new Interval(min_var,max_var,4))); } Log()<::iterator it; for(it=tuneParameters.begin(); it!= tuneParameters.end(); it++){ Log() << kWARNING << it->first << Endl; (it->second)->Print(Log()); Log()< tuneParameters) { // set the tuning parameters accoding to the argument std::map::iterator it; for(it=tuneParameters.begin(); it!= tuneParameters.end(); it++){ Log() << kWARNING << it->first << " = " << it->second << Endl; if (it->first == "MaxDepth" ) SetMaxDepth ((Int_t)it->second); else if (it->first == "MinNodeSize" ) SetMinNodeSize (it->second); else if (it->first == "NTrees" ) SetNTrees ((Int_t)it->second); else if (it->first == "NodePurityLimit") SetNodePurityLimit (it->second); else if (it->first == "AdaBoostBeta" ) SetAdaBoostBeta (it->second); else if (it->first == "Shrinkage" ) SetShrinkage (it->second); else if (it->first == "UseNvars" ) SetUseNvars ((Int_t)it->second); else if (it->first == "BaggedSampleFraction" ) SetBaggedSampleFraction (it->second); else Log() << kFATAL << " SetParameter for " << it->first << " not yet implemented " <GetResults(GetMethodName(), Types::kTraining, GetAnalysisType()); h->SetXTitle("boost weight"); results->Store(h, "BoostWeights"); // Monitor the performance (on TEST sample) versus number of trees if (fDoBoostMonitor){ TH2* boostMonitor = new TH2F("BoostMonitor","ROC Integral Vs iTree",2,0,fNTrees,2,0,1.05); boostMonitor->SetXTitle("#tree"); boostMonitor->SetYTitle("ROC Integral"); results->Store(boostMonitor, "BoostMonitor"); TGraph *boostMonitorGraph = new TGraph(); boostMonitorGraph->SetName("BoostMonitorGraph"); boostMonitorGraph->SetTitle("ROCIntegralVsNTrees"); results->Store(boostMonitorGraph, "BoostMonitorGraph"); } // weights applied in boosting vs tree number h = new TH1F("BoostWeightVsTree","Boost weights vs tree",fNTrees,0,fNTrees); h->SetXTitle("#tree"); h->SetYTitle("boost weight"); results->Store(h, "BoostWeightsVsTree"); // error fraction vs tree number h = new TH1F("ErrFractHist","error fraction vs tree number",fNTrees,0,fNTrees); h->SetXTitle("#tree"); h->SetYTitle("error fraction"); results->Store(h, "ErrorFrac"); // nNodesBeforePruning vs tree number nodesBeforePruningVsTree->SetXTitle("#tree"); nodesBeforePruningVsTree->SetYTitle("#tree nodes"); results->Store(nodesBeforePruningVsTree); // nNodesAfterPruning vs tree number nodesAfterPruningVsTree->SetXTitle("#tree"); nodesAfterPruningVsTree->SetYTitle("#tree nodes"); results->Store(nodesAfterPruningVsTree); } fMonitorNtuple= new TTree("MonitorNtuple","BDT variables"); fMonitorNtuple->Branch("iTree",&fITree,"iTree/I"); fMonitorNtuple->Branch("boostWeight",&fBoostWeight,"boostWeight/D"); fMonitorNtuple->Branch("errorFraction",&fErrorFraction,"errorFraction/D"); Timer timer( fNTrees, GetName() ); Int_t nNodesBeforePruningCount = 0; Int_t nNodesAfterPruningCount = 0; Int_t nNodesBeforePruning = 0; Int_t nNodesAfterPruning = 0; if(fBoostType=="Grad"){ InitGradBoost(fEventSample); } Int_t itree=0; Bool_t continueBoost=kTRUE; //for (int itree=0; itreeSetNVars(GetNvar()); if (fUseFisherCuts) { fForest.back()->SetUseFisherCuts(); fForest.back()->SetMinLinCorrForFisher(fMinLinCorrForFisher); fForest.back()->SetUseExclusiveVars(fUseExclusiveVars); } // the minimum linear correlation between two variables demanded for use in fisher criterion in node splitting nNodesBeforePruning = fForest.back()->BuildTree(*fTrainSample); Double_t bw = this->Boost(fEventSample, fForest.back(),i); if (bw > 0) { fBoostWeights.push_back(bw); }else{ fBoostWeights.push_back(0); Log() << kWARNING << "stopped boosting at itree="<SetNVars(GetNvar()); if (fUseFisherCuts) { fForest.back()->SetUseFisherCuts(); fForest.back()->SetMinLinCorrForFisher(fMinLinCorrForFisher); fForest.back()->SetUseExclusiveVars(fUseExclusiveVars); } nNodesBeforePruning = fForest.back()->BuildTree(*fTrainSample); if (fUseYesNoLeaf && !DoRegression() && fBoostType!="Grad") { // remove leaf nodes where both daughter nodes are of same type nNodesBeforePruning = fForest.back()->CleanTree(); } nNodesBeforePruningCount += nNodesBeforePruning; nodesBeforePruningVsTree->SetBinContent(itree+1,nNodesBeforePruning); fForest.back()->SetPruneMethod(fPruneMethod); // set the pruning method for the tree fForest.back()->SetPruneStrength(fPruneStrength); // set the strength parameter std::vector * validationSample = NULL; if(fAutomatic) validationSample = &fValidationSample; Double_t bw = this->Boost(fEventSample, fForest.back()); if (bw > 0) { fBoostWeights.push_back(bw); }else{ fBoostWeights.push_back(0); Log() << kWARNING << "stopped boosting at itree="<PruneTree(validationSample); if (fUseYesNoLeaf && !DoRegression() && fBoostType!="Grad"){ // remove leaf nodes where both daughter nodes are of same type fForest.back()->CleanTree(); } nNodesAfterPruning = fForest.back()->GetNNodes(); nNodesAfterPruningCount += nNodesAfterPruning; nodesAfterPruningVsTree->SetBinContent(itree+1,nNodesAfterPruning); fITree = itree; fMonitorNtuple->Fill(); if (fDoBoostMonitor){ if (! DoRegression() ){ if ( itree==fNTrees-1 || (!(itree%500)) || (!(itree%250) && itree <1000)|| (!(itree%100) && itree < 500)|| (!(itree%50) && itree < 250)|| (!(itree%25) && itree < 150)|| (!(itree%10) && itree < 50)|| (!(itree%5) && itree < 20) ) BoostMonitor(itree); } } } itree++; } // get elapsed time Log() << kINFO << " elapsed time: " << timer.GetElapsedTime() << " " << Endl; if (fPruneMethod == DecisionTree::kNoPruning) { Log() << kINFO << " average number of nodes (w/o pruning) : " << nNodesBeforePruningCount/GetNTrees() << Endl; } else { Log() << kINFO << " average number of nodes before/after pruning : " << nNodesBeforePruningCount/GetNTrees() << " / " << nNodesAfterPruningCount/GetNTrees() << Endl; } TMVA::DecisionTreeNode::fgIsTraining=false; // reset all previously stored/accumulated BOOST weights in the event sample // for (UInt_t iev=0; ievSetBoostWeight(1.); Log() << kDEBUG << "Now I delete the privat data sample"<< Endl; for (UInt_t i=0; iCheckEvent(e,kFALSE); } return 2.0/(1.0+exp(-2.0*sum))-1; //MVA output between -1 and 1 } //_______________________________________________________________________ void TMVA::MethodBDT::UpdateTargets(std::vector& eventSample, UInt_t cls) { //Calculate residua for all events; if(DoMulticlass()){ UInt_t nClasses = DataInfo().GetNClasses(); for (std::vector::iterator e=eventSample.begin(); e!=eventSample.end();e++) { fResiduals[*e].at(cls)+=fForest.back()->CheckEvent(*e,kFALSE); if(cls == nClasses-1){ for(UInt_t i=0;iGetClass()==i)?(1.0-p_cls):(-p_cls); const_cast(*e)->SetTarget(i,res); } } } } else{ for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { fResiduals[*e].at(0)+=fForest.back()->CheckEvent(*e,kFALSE); Double_t p_sig=1.0/(1.0+exp(-2.0*fResiduals[*e].at(0))); Double_t res = (DataInfo().IsSignal(*e)?1:0)-p_sig; const_cast(*e)->SetTarget(0,res); } } } //_______________________________________________________________________ void TMVA::MethodBDT::UpdateTargetsRegression(std::vector& eventSample, Bool_t first) { //Calculate current residuals for all events and update targets for next iteration for (std::vector::const_iterator e=fEventSample.begin(); e!=fEventSample.end();e++) { if(!first){ fWeightedResiduals[*e].first -= fForest.back()->CheckEvent(*e,kFALSE); } } fSumOfWeights = 0; vector< std::pair > temp; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++){ temp.push_back(make_pair(fabs(fWeightedResiduals[*e].first),fWeightedResiduals[*e].second)); fSumOfWeights += (*e)->GetWeight(); } fTransitionPoint = GetWeightedQuantile(temp,0.7,fSumOfWeights); Int_t i=0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { if(temp[i].first<=fTransitionPoint) const_cast(*e)->SetTarget(0,fWeightedResiduals[*e].first); else const_cast(*e)->SetTarget(0,fTransitionPoint*(fWeightedResiduals[*e].first<0?-1.0:1.0)); i++; } } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GetWeightedQuantile(vector< std::pair > vec, const Double_t quantile, const Double_t norm){ //calculates the quantile of the distribution of the first pair entries weighted with the values in the second pair entries Double_t temp = 0.0; std::sort(vec.begin(), vec.end()); UInt_t i = 0; while(i= vec.size()) return 0.; // prevent uncontrolled memory access in return value calculation return vec[i].first; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GradBoost(std::vector& eventSample, DecisionTree *dt, UInt_t cls) { //Calculate the desired response value for each region std::map > leaves; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t weight = (*e)->GetWeight(); TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e)); if ((leaves[node]).empty()){ (leaves[node]).push_back((*e)->GetTarget(cls)* weight); (leaves[node]).push_back(fabs((*e)->GetTarget(cls))*(1.0-fabs((*e)->GetTarget(cls))) * weight* weight); } else { (leaves[node])[0]+=((*e)->GetTarget(cls)* weight); (leaves[node])[1]+=fabs((*e)->GetTarget(cls))*(1.0-fabs((*e)->GetTarget(cls))) * weight* weight; } } for (std::map >::iterator iLeave=leaves.begin(); iLeave!=leaves.end();++iLeave){ if ((iLeave->second)[1]<1e-30) (iLeave->second)[1]=1e-30; (iLeave->first)->SetResponse(fShrinkage/DataInfo().GetNClasses()*(iLeave->second)[0]/((iLeave->second)[1])); } //call UpdateTargets before next tree is grown DoMulticlass() ? UpdateTargets(fEventSample, cls) : UpdateTargets(fEventSample); return 1; //trees all have the same weight } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GradBoostRegression(std::vector& eventSample, DecisionTree *dt ) { // Implementation of M_TreeBoost using a Huber loss function as desribed by Friedman 1999 std::map leaveWeights; std::map > > leaves; UInt_t i =0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e)); (leaves[node]).push_back(make_pair(fWeightedResiduals[*e].first,(*e)->GetWeight())); (leaveWeights[node]) += (*e)->GetWeight(); i++; } for (std::map > >::iterator iLeave=leaves.begin(); iLeave!=leaves.end();++iLeave){ Double_t shift=0,diff= 0; Double_t ResidualMedian = GetWeightedQuantile(iLeave->second,0.5,leaveWeights[iLeave->first]); for(UInt_t j=0;j<((iLeave->second).size());j++){ diff = (iLeave->second)[j].first-ResidualMedian; shift+=1.0/((iLeave->second).size())*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff)); } (iLeave->first)->SetResponse(fShrinkage*(ResidualMedian+shift)); } UpdateTargetsRegression(*fTrainSample); return 1; } //_______________________________________________________________________ void TMVA::MethodBDT::InitGradBoost( std::vector& eventSample) { // initialize targets for first tree fSumOfWeights = 0; fSepType=NULL; //set fSepType to NULL (regression trees are used for both classification an regression) std::vector > temp; if(DoRegression()){ for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { fWeightedResiduals[*e]= make_pair((*e)->GetTarget(0), (*e)->GetWeight()); fSumOfWeights+=(*e)->GetWeight(); temp.push_back(make_pair(fWeightedResiduals[*e].first,fWeightedResiduals[*e].second)); } Double_t weightedMedian = GetWeightedQuantile(temp,0.5, fSumOfWeights); //Store the weighted median as a first boosweight for later use fBoostWeights.push_back(weightedMedian); std::map >::iterator res = fWeightedResiduals.begin(); for (; res!=fWeightedResiduals.end(); ++res ) { //substract the gloabl median from all residuals (*res).second.first -= weightedMedian; } UpdateTargetsRegression(*fTrainSample,kTRUE); return; } else if(DoMulticlass()){ UInt_t nClasses = DataInfo().GetNClasses(); for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { for (UInt_t i=0;iGetClass()==i?(1-1.0/nClasses):(-1.0/nClasses); const_cast(*e)->SetTarget(i,r); fResiduals[*e].push_back(0); } } } else{ for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t r = (DataInfo().IsSignal(*e)?1:0)-0.5; //Calculate initial residua const_cast(*e)->SetTarget(0,r); fResiduals[*e].push_back(0); } } } //_______________________________________________________________________ Double_t TMVA::MethodBDT::TestTreeQuality( DecisionTree *dt ) { // test the tree quality.. in terms of Miscalssification Double_t ncorrect=0, nfalse=0; for (UInt_t ievt=0; ievtCheckEvent(fValidationSample[ievt]) > fNodePurityLimit ) ? 1 : 0; if (isSignalType == (DataInfo().IsSignal(fValidationSample[ievt])) ) { ncorrect += fValidationSample[ievt]->GetWeight(); } else{ nfalse += fValidationSample[ievt]->GetWeight(); } } return ncorrect / (ncorrect + nfalse); } //_______________________________________________________________________ Double_t TMVA::MethodBDT::Boost( std::vector& eventSample, DecisionTree *dt, UInt_t cls ) { // apply the boosting alogrithim (the algorithm is selecte via the the "option" given // in the constructor. The return value is the boosting weight Double_t returnVal=-1; if (fBoostType=="AdaBoost") returnVal = this->AdaBoost (eventSample, dt); else if (fBoostType=="AdaCost") returnVal = this->AdaCost (eventSample, dt); else if (fBoostType=="Bagging") returnVal = this->Bagging ( ); else if (fBoostType=="RegBoost") returnVal = this->RegBoost (eventSample, dt); else if (fBoostType=="AdaBoostR2") returnVal = this->AdaBoostR2(eventSample, dt); else if (fBoostType=="Grad"){ if(DoRegression()) returnVal = this->GradBoostRegression(eventSample, dt); else if(DoMulticlass()) returnVal = this->GradBoost (eventSample, dt, cls); else returnVal = this->GradBoost (eventSample, dt); } else { Log() << kINFO << GetOptions() << Endl; Log() << kFATAL << " unknown boost option " << fBoostType<< " called" << Endl; } if (fBaggedBoost){ GetBaggedSubSample(fEventSample); } return returnVal; } //_______________________________________________________________________ void TMVA::MethodBDT::BoostMonitor(Int_t iTree) { // fills the ROCIntegral vs Itree from the testSample for the monitoring plots // during the training .. but using the testing events Results* results = Data()->GetResults(GetMethodName(),Types::kTraining, Types::kMaxAnalysisType); TH1F *tmpS = new TH1F( "tmpS", "", 100 , -1., 1.00001 ); TH1F *tmpB = new TH1F( "tmpB", "", 100 , -1., 1.00001 ); TH1F *tmp; UInt_t signalClassNr = DataInfo().GetClassInfo("Signal")->GetNumber(); // const std::vector events=Data()->GetEventCollection(Types::kTesting); // // fMethod->GetTransformationHandler().CalcTransformations(fMethod->Data()->GetEventCollection(Types::kTesting)); // for (UInt_t iev=0; iev < events.size() ; iev++){ // if (events[iev]->GetClass() == signalClassNr) tmp=tmpS; // else tmp=tmpB; // tmp->Fill(PrivateGetMvaValue(*(events[iev])),events[iev]->GetWeight()); // } UInt_t nevents = Data()->GetNTestEvents(); for (UInt_t iev=0; iev < nevents; iev++){ const Event* event = GetTestingEvent(iev); if (event->GetClass() == signalClassNr) {tmp=tmpS;} else {tmp=tmpB;} tmp->Fill(PrivateGetMvaValue(event),event->GetWeight()); } Double_t max=1; std::vector hS; std::vector hB; for (UInt_t ivar=0; ivarStore(hS.back(),hS.back()->GetTitle()); results->Store(hB.back(),hB.back()->GetTitle()); } for (UInt_t iev=0; iev < fEventSample.size(); iev++){ if (fEventSample[iev]->GetBoostWeight() > max) max = 1.01*fEventSample[iev]->GetBoostWeight(); } TH1F *tmpBoostWeightsS = new TH1F(Form("BoostWeightsInTreeS%d",iTree),Form("BoostWeightsInTreeS%d",iTree),100,0.,max); TH1F *tmpBoostWeightsB = new TH1F(Form("BoostWeightsInTreeB%d",iTree),Form("BoostWeightsInTreeB%d",iTree),100,0.,max); results->Store(tmpBoostWeightsS,tmpBoostWeightsS->GetTitle()); results->Store(tmpBoostWeightsB,tmpBoostWeightsB->GetTitle()); TH1F *tmpBoostWeights; std::vector *h; for (UInt_t iev=0; iev < fEventSample.size(); iev++){ if (fEventSample[iev]->GetClass() == signalClassNr) { tmpBoostWeights=tmpBoostWeightsS; h=&hS; }else{ tmpBoostWeights=tmpBoostWeightsB; h=&hB; } tmpBoostWeights->Fill(fEventSample[iev]->GetBoostWeight()); for (UInt_t ivar=0; ivarFill(fEventSample[iev]->GetValue(ivar),fEventSample[iev]->GetWeight()); } } TMVA::PDF *sig = new TMVA::PDF( " PDF Sig", tmpS, TMVA::PDF::kSpline3 ); TMVA::PDF *bkg = new TMVA::PDF( " PDF Bkg", tmpB, TMVA::PDF::kSpline3 ); TGraph* gr=results->GetGraph("BoostMonitorGraph"); Int_t nPoints = gr->GetN(); gr->Set(nPoints+1); gr->SetPoint(nPoints,(Double_t)iTree+1,GetROCIntegral(sig,bkg)); tmpS->Delete(); tmpB->Delete(); delete sig; delete bkg; return; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::AdaBoost( std::vector& eventSample, DecisionTree *dt ) { // the AdaBoost implementation. // a new training sample is generated by weighting // events that are misclassified by the decision tree. The weight // applied is w = (1-err)/err or more general: // w = ((1-err)/err)^beta // where err is the fraction of misclassified events in the tree ( <0.5 assuming // demanding the that previous selection was better than random guessing) // and "beta" being a free parameter (standard: beta = 1) that modifies the // boosting. Double_t err=0, sumGlobalw=0, sumGlobalwfalse=0, sumGlobalwfalse2=0; std::vector sumw(DataInfo().GetNClasses(),0); //for individually re-scaling each class std::map sigEventsInNode; // how many signal events of the training tree Double_t maxDev=0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t w = (*e)->GetWeight(); sumGlobalw += w; UInt_t iclass=(*e)->GetClass(); sumw[iclass] += w; if ( DoRegression() ) { Double_t tmpDev = TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) ); sumGlobalwfalse += w * tmpDev; sumGlobalwfalse2 += w * tmpDev*tmpDev; if (tmpDev > maxDev) maxDev = tmpDev; }else{ if (fUseYesNoLeaf){ Bool_t isSignalType = (dt->CheckEvent(*e,fUseYesNoLeaf) > fNodePurityLimit ); if (!(isSignalType == DataInfo().IsSignal(*e))) { sumGlobalwfalse+= w; } }else{ Double_t dtoutput = (dt->CheckEvent(*e,fUseYesNoLeaf) - 0.5)*2.; Int_t trueType; if (DataInfo().IsSignal(*e)) trueType = 1; else trueType = -1; sumGlobalwfalse+= w*trueType*dtoutput; } } } err = sumGlobalwfalse/sumGlobalw ; if ( DoRegression() ) { //if quadratic loss: if (fAdaBoostR2Loss=="linear"){ err = sumGlobalwfalse/maxDev/sumGlobalw ; } else if (fAdaBoostR2Loss=="quadratic"){ err = sumGlobalwfalse2/maxDev/maxDev/sumGlobalw ; } else if (fAdaBoostR2Loss=="exponential"){ err = 0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t w = (*e)->GetWeight(); Double_t tmpDev = TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) ); err += w * (1 - exp (-tmpDev/maxDev)) / sumGlobalw; } } else { Log() << kFATAL << " you've chosen a Loss type for Adaboost other than linear, quadratic or exponential " << " namely " << fAdaBoostR2Loss << "\n" << "and this is not implemented... a typo in the options ??" < newSumw(sumw.size(),0); Double_t boostWeight=1.; if (err >= 0.5 && fUseYesNoLeaf) { // sanity check ... should never happen as otherwise there is apparently // something odd with the assignement of the leaf nodes (rem: you use the training // events for this determination of the error rate) if (dt->GetNNodes() == 1){ Log() << kERROR << " YOUR tree has only 1 Node... kind of a funny *tree*. I cannot " << "boost such a thing... if after 1 step the error rate is == 0.5" << Endl << "please check why this happens, maybe too many events per node requested ?" << Endl; }else{ Log() << kERROR << " The error rate in the BDT boosting is > 0.5. ("<< err << ") That should not happen, please check your code (i.e... the BDT code), I " << " stop boosting here" << Endl; return -1; } err = 0.5; } else if (err < 0) { Log() << kERROR << " The error rate in the BDT boosting is < 0. That can happen" << " due to improper treatment of negative weights in a Monte Carlo.. (if you have" << " an idea on how to do it in a better way, please let me know (Helge.Voss@cern.ch)" << " for the time being I set it to its absolute value.. just to continue.." << Endl; err = TMath::Abs(err); } if (fUseYesNoLeaf) boostWeight = TMath::Log((1.-err)/err)*fAdaBoostBeta; else boostWeight = TMath::Log((1.+err)/(1-err))*fAdaBoostBeta; Log() << kDEBUG << "BDT AdaBoos wrong/all: " << sumGlobalwfalse << "/" << sumGlobalw << " 1-err/err="<GetResults(GetMethodName(),Types::kTraining, Types::kMaxAnalysisType); for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { if (fUseYesNoLeaf||DoRegression()){ if ((!( (dt->CheckEvent(*e,fUseYesNoLeaf) > fNodePurityLimit ) == DataInfo().IsSignal(*e))) || DoRegression()) { Double_t boostfactor = TMath::Exp(boostWeight); if (DoRegression()) boostfactor = TMath::Power(1/boostWeight,(1.-TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) )/maxDev ) ); if ( (*e)->GetWeight() > 0 ){ (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor); // Helge change back (*e)->ScaleBoostWeight(boostfactor); if (DoRegression()) results->GetHist("BoostWeights")->Fill(boostfactor); } else { if ( fInverseBoostNegWeights )(*e)->ScaleBoostWeight( 1. / boostfactor); // if the original event weight is negative, and you want to "increase" the events "positive" influence, you'd reather make the event weight "smaller" in terms of it's absolute value while still keeping it something "negative" else (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor); } } }else{ Double_t dtoutput = (dt->CheckEvent(*e,fUseYesNoLeaf) - 0.5)*2.; Int_t trueType; if (DataInfo().IsSignal(*e)) trueType = 1; else trueType = -1; Double_t boostfactor = TMath::Exp(-1*boostWeight*trueType*dtoutput); if ( (*e)->GetWeight() > 0 ){ (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor); // Helge change back (*e)->ScaleBoostWeight(boostfactor); if (DoRegression()) results->GetHist("BoostWeights")->Fill(boostfactor); } else { if ( fInverseBoostNegWeights )(*e)->ScaleBoostWeight( 1. / boostfactor); // if the original event weight is negative, and you want to "increase" the events "positive" influence, you'd reather make the event weight "smaller" in terms of it's absolute value while still keeping it something "negative" else (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor); } } newSumGlobalw+=(*e)->GetWeight(); newSumw[(*e)->GetClass()] += (*e)->GetWeight(); } // Double_t globalNormWeight=sumGlobalw/newSumGlobalw; Double_t globalNormWeight=( (Double_t) eventSample.size())/newSumGlobalw; Log() << kDEBUG << "new Nsig="<Fill(boostWeight); results->GetHist("BoostWeightsVsTree")->SetBinContent(fForest.size(),boostWeight); results->GetHist("ErrorFrac")->SetBinContent(fForest.size(),err); fBoostWeight = boostWeight; fErrorFraction = err; return boostWeight; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::AdaCost( vector& eventSample, DecisionTree *dt ) { // the AdaCost boosting algorithm takes a simple cost Matrix (currently fixed for // all events... later could be modified to use individual cost matrices for each // events as in the original paper... // // true_signal true_bkg // ---------------------------------- // sel_signal | Css Ctb_ss Cxx.. in the range [0,1] // sel_bkg | Cts_sb Cbb // // and takes this into account when calculating the misclass. cost (former: error fraction): // // err = sum_events ( weight* y_true*y_sel * beta(event) // Double_t Css = fCss; Double_t Cbb = fCbb; Double_t Cts_sb = fCts_sb; Double_t Ctb_ss = fCtb_ss; Double_t err=0, sumGlobalWeights=0, sumGlobalCost=0; std::vector sumw(DataInfo().GetNClasses(),0); //for individually re-scaling each class std::map sigEventsInNode; // how many signal events of the training tree for (vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t w = (*e)->GetWeight(); sumGlobalWeights += w; UInt_t iclass=(*e)->GetClass(); sumw[iclass] += w; if ( DoRegression() ) { Log() << kFATAL << " AdaCost not implemented for regression"<CheckEvent(*e,false) - 0.5)*2.; Int_t trueType; Bool_t isTrueSignal = DataInfo().IsSignal(*e); Bool_t isSelectedSignal = (dtoutput>0); if (isTrueSignal) trueType = 1; else trueType = -1; Double_t cost=0; if (isTrueSignal && isSelectedSignal) cost=Css; else if (isTrueSignal && !isSelectedSignal) cost=Cts_sb; else if (!isTrueSignal && isSelectedSignal) cost=Ctb_ss; else if (!isTrueSignal && !isSelectedSignal) cost=Cbb; else Log() << kERROR << "something went wrong in AdaCost" << Endl; sumGlobalCost+= w*trueType*dtoutput*cost; } } if ( DoRegression() ) { Log() << kFATAL << " AdaCost not implemented for regression"< newSumClassWeights(sumw.size(),0); Double_t boostWeight = TMath::Log((1+sumGlobalCost)/(1-sumGlobalCost)) * fAdaBoostBeta; Results* results = Data()->GetResults(GetMethodName(),Types::kTraining, Types::kMaxAnalysisType); for (vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t dtoutput = (dt->CheckEvent(*e,false) - 0.5)*2.; Int_t trueType; Bool_t isTrueSignal = DataInfo().IsSignal(*e); Bool_t isSelectedSignal = (dtoutput>0); if (isTrueSignal) trueType = 1; else trueType = -1; Double_t cost=0; if (isTrueSignal && isSelectedSignal) cost=Css; else if (isTrueSignal && !isSelectedSignal) cost=Cts_sb; else if (!isTrueSignal && isSelectedSignal) cost=Ctb_ss; else if (!isTrueSignal && !isSelectedSignal) cost=Cbb; else Log() << kERROR << "something went wrong in AdaCost" << Endl; Double_t boostfactor = TMath::Exp(-1*boostWeight*trueType*dtoutput*cost); if (DoRegression())Log() << kFATAL << " AdaCost not implemented for regression"<GetWeight() > 0 ){ (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor); // Helge change back (*e)->ScaleBoostWeight(boostfactor); if (DoRegression())Log() << kFATAL << " AdaCost not implemented for regression"<ScaleBoostWeight( 1. / boostfactor); // if the original event weight is negative, and you want to "increase" the events "positive" influence, you'd reather make the event weight "smaller" in terms of it's absolute value while still keeping it something "negative" } newSumGlobalWeights+=(*e)->GetWeight(); newSumClassWeights[(*e)->GetClass()] += (*e)->GetWeight(); } // Double_t globalNormWeight=sumGlobalWeights/newSumGlobalWeights; Double_t globalNormWeight=Double_t(eventSample.size())/newSumGlobalWeights; Log() << kDEBUG << "new Nsig="<Fill(boostWeight); results->GetHist("BoostWeightsVsTree")->SetBinContent(fForest.size(),boostWeight); results->GetHist("ErrorFrac")->SetBinContent(fForest.size(),err); fBoostWeight = boostWeight; fErrorFraction = err; return boostWeight; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::Bagging( ) { // call it boot-strapping, re-sampling or whatever you like, in the end it is nothing // else but applying "random" poisson weights to each event. // this is now done in "MethodBDT::Boost as it might be used by other boost methods, too // GetBaggedSample(eventSample); return 1.; //here as there are random weights for each event, just return a constant==1; } //_______________________________________________________________________ void TMVA::MethodBDT::GetBaggedSubSample(std::vector& eventSample) { // fills fEventSample with fBaggedSampleFraction*NEvents random training events Double_t n; TRandom3 *trandom = new TRandom3(100*fForest.size()+1234); if (!fSubSample.empty()) fSubSample.clear(); for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { n = trandom->PoissonD(fBaggedSampleFraction); for (Int_t i=0;iRndm()& /* eventSample */, DecisionTree* /* dt */ ) { // a special boosting only for Regression ... // maybe I'll implement it later... return 1; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::AdaBoostR2( std::vector& eventSample, DecisionTree *dt ) { // adaption of the AdaBoost to regression problems (see H.Drucker 1997) if ( !DoRegression() ) Log() << kFATAL << "Somehow you chose a regression boost method for a classification job" << Endl; Double_t err=0, sumw=0, sumwfalse=0, sumwfalse2=0; Double_t maxDev=0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t w = (*e)->GetWeight(); sumw += w; Double_t tmpDev = TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) ); sumwfalse += w * tmpDev; sumwfalse2 += w * tmpDev*tmpDev; if (tmpDev > maxDev) maxDev = tmpDev; } //if quadratic loss: if (fAdaBoostR2Loss=="linear"){ err = sumwfalse/maxDev/sumw ; } else if (fAdaBoostR2Loss=="quadratic"){ err = sumwfalse2/maxDev/maxDev/sumw ; } else if (fAdaBoostR2Loss=="exponential"){ err = 0; for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t w = (*e)->GetWeight(); Double_t tmpDev = TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) ); err += w * (1 - exp (-tmpDev/maxDev)) / sumw; } } else { Log() << kFATAL << " you've chosen a Loss type for Adaboost other than linear, quadratic or exponential " << " namely " << fAdaBoostR2Loss << "\n" << "and this is not implemented... a typo in the options ??" <= 0.5) { // sanity check ... should never happen as otherwise there is apparently // something odd with the assignement of the leaf nodes (rem: you use the training // events for this determination of the error rate) if (dt->GetNNodes() == 1){ Log() << kERROR << " YOUR tree has only 1 Node... kind of a funny *tree*. I cannot " << "boost such a thing... if after 1 step the error rate is == 0.5" << Endl << "please check why this happens, maybe too many events per node requested ?" << Endl; }else{ Log() << kERROR << " The error rate in the BDT boosting is > 0.5. ("<< err << ") That should not happen, but is possible for regression trees, and" << " should trigger a stop for the boosting. please check your code (i.e... the BDT code), I " << " stop boosting " << Endl; return -1; } err = 0.5; } else if (err < 0) { Log() << kERROR << " The error rate in the BDT boosting is < 0. That can happen" << " due to improper treatment of negative weights in a Monte Carlo.. (if you have" << " an idea on how to do it in a better way, please let me know (Helge.Voss@cern.ch)" << " for the time being I set it to its absolute value.. just to continue.." << Endl; err = TMath::Abs(err); } Double_t boostWeight = err / (1.-err); Double_t newSumw=0; Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, Types::kMaxAnalysisType); for (std::vector::const_iterator e=eventSample.begin(); e!=eventSample.end();e++) { Double_t boostfactor = TMath::Power(boostWeight,(1.-TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) )/maxDev ) ); results->GetHist("BoostWeights")->Fill(boostfactor); // std::cout << "R2 " << boostfactor << " " << boostWeight << " " << (1.-TMath::Abs(dt->CheckEvent(*e,kFALSE) - (*e)->GetTarget(0) )/maxDev) << std::endl; if ( (*e)->GetWeight() > 0 ){ Float_t newBoostWeight = (*e)->GetBoostWeight() * boostfactor; Float_t newWeight = (*e)->GetWeight() * (*e)->GetBoostWeight() * boostfactor; if (newWeight == 0) { Log() << kINFO << "Weight= " << (*e)->GetWeight() << Endl; Log() << kINFO << "BoostWeight= " << (*e)->GetBoostWeight() << Endl; Log() << kINFO << "boostweight="<SetBinContent(fForest.size(),1./boostWeight); results->GetHist("ErrorFrac")->SetBinContent(fForest.size(),err); fBoostWeight = boostWeight; fErrorFraction = err; return TMath::Log(1./boostWeight); } //_______________________________________________________________________ void TMVA::MethodBDT::AddWeightsXMLTo( void* parent ) const { // write weights to XML void* wght = gTools().AddChild(parent, "Weights"); if (fDoPreselection){ for (UInt_t ivar=0; ivarGetAnalysisType() ); for (UInt_t i=0; i< fForest.size(); i++) { void* trxml = fForest[i]->AddXMLTo(wght); gTools().AddAttr( trxml, "boostWeight", fBoostWeights[i] ); gTools().AddAttr( trxml, "itree", i ); } } //_______________________________________________________________________ void TMVA::MethodBDT::ReadWeightsFromXML(void* parent) { // reads the BDT from the xml file UInt_t i; for (i=0; i( DecisionTree::CreateFromXML(ch, GetTrainingTMVAVersionCode()) ) ); fForest.back()->SetAnalysisType(Types::EAnalysisType(analysisType)); fForest.back()->SetTreeID(i++); gTools().ReadAttr(ch,"boostWeight",boostWeight); fBoostWeights.push_back(boostWeight); ch = gTools().GetNextChild(ch); } } //_______________________________________________________________________ void TMVA::MethodBDT::ReadWeightsFromStream( std::istream& istr ) { // read the weights (BDT coefficients) TString dummy; // Types::EAnalysisType analysisType; Int_t analysisType(0); // coverity[tainted_data_argument] istr >> dummy >> fNTrees; Log() << kINFO << "Read " << fNTrees << " Decision trees" << Endl; for (UInt_t i=0;i> dummy >> iTree >> dummy >> boostWeight; if (iTree != i) { fForest.back()->Print( std::cout ); Log() << kFATAL << "Error while reading weight file; mismatch iTree=" << iTree << " i=" << i << " dummy " << dummy << " boostweight " << boostWeight << Endl; } fForest.push_back( new DecisionTree() ); fForest.back()->SetAnalysisType(Types::EAnalysisType(analysisType)); fForest.back()->SetTreeID(i); fForest.back()->Read(istr, GetTrainingTMVAVersionCode()); fBoostWeights.push_back(boostWeight); } } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GetMvaValue( Double_t* err, Double_t* errUpper ){ return this->GetMvaValue( err, errUpper, 0 ); } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GetMvaValue( Double_t* err, Double_t* errUpper, UInt_t useNTrees ) { // Return the MVA value (range [-1;1]) that classifies the // event according to the majority vote from the total number of // decision trees. const Event* ev = GetEvent(); if (fDoPreselection) { Double_t val = ApplyPreselectionCuts(ev); if (TMath::Abs(val)>0.05) return val; } return PrivateGetMvaValue(ev, err, errUpper, useNTrees); } //_______________________________________________________________________ Double_t TMVA::MethodBDT::PrivateGetMvaValue(const TMVA::Event* ev, Double_t* err, Double_t* errUpper, UInt_t useNTrees ) { // Return the MVA value (range [-1;1]) that classifies the // event according to the majority vote from the total number of // decision trees. // cannot determine error NoErrorCalc(err, errUpper); // allow for the possibility to use less trees in the actual MVA calculation // than have been originally trained. UInt_t nTrees = fForest.size(); if (useNTrees > 0 ) nTrees = useNTrees; if (fBoostType=="Grad") return GetGradBoostMVA(ev,nTrees); Double_t myMVA = 0; Double_t norm = 0; for (UInt_t itree=0; itreeCheckEvent(ev,fUseYesNoLeaf); norm += fBoostWeights[itree]; } return ( norm > std::numeric_limits::epsilon() ) ? myMVA /= norm : 0 ; } //_______________________________________________________________________ const std::vector& TMVA::MethodBDT::GetMulticlassValues() { // get the multiclass MVA response for the BDT classifier const TMVA::Event *e = GetEvent(); if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector(); fMulticlassReturnVal->clear(); std::vector temp; UInt_t nClasses = DataInfo().GetNClasses(); for(UInt_t iClass=0; iClassCheckEvent(e,kFALSE); } } for(UInt_t iClass=0; iClass & TMVA::MethodBDT::GetRegressionValues() { // get the regression value generated by the BDTs if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector(); fRegressionReturnVal->clear(); const Event * ev = GetEvent(); Event * evT = new Event(*ev); Double_t myMVA = 0; Double_t norm = 0; if (fBoostType=="AdaBoostR2") { // rather than using the weighted average of the tree respones in the forest // H.Decker(1997) proposed to use the "weighted median" // sort all individual tree responses according to the prediction value // (keep the association to their tree weight) // the sum up all the associated weights (starting from the one whose tree // yielded the smalles response) up to the tree "t" at which you've // added enough tree weights to have more than half of the sum of all tree weights. // choose as response of the forest that one which belongs to this "t" vector< Double_t > response(fForest.size()); vector< Double_t > weight(fForest.size()); Double_t totalSumOfWeights = 0; for (UInt_t itree=0; itreeCheckEvent(ev,kFALSE); weight[itree] = fBoostWeights[itree]; totalSumOfWeights += fBoostWeights[itree]; } std::vector< std::vector > vtemp; vtemp.push_back( response ); // this is the vector that will get sorted vtemp.push_back( weight ); gTools().UsefulSortAscending( vtemp ); Int_t t=0; Double_t sumOfWeights = 0; while (sumOfWeights <= totalSumOfWeights/2.) { sumOfWeights += vtemp[1][t]; t++; } Double_t rVal=0; Int_t count=0; for (UInt_t i= TMath::Max(UInt_t(0),UInt_t(t-(fForest.size()/6)-0.5)); i< TMath::Min(UInt_t(fForest.size()),UInt_t(t+(fForest.size()/6)+0.5)); i++) { count++; rVal+=vtemp[0][i]; } // fRegressionReturnVal->push_back( rVal/Double_t(count)); evT->SetTarget(0, rVal/Double_t(count) ); } else if(fBoostType=="Grad"){ for (UInt_t itree=0; itreeCheckEvent(ev,kFALSE); } // fRegressionReturnVal->push_back( myMVA+fBoostWeights[0]); evT->SetTarget(0, myMVA+fBoostWeights[0] ); } else{ for (UInt_t itree=0; itreeCheckEvent(ev,kFALSE); norm += fBoostWeights[itree]; } // fRegressionReturnVal->push_back( ( norm > std::numeric_limits::epsilon() ) ? myMVA /= norm : 0 ); evT->SetTarget(0, ( norm > std::numeric_limits::epsilon() ) ? myMVA /= norm : 0 ); } const Event* evT2 = GetTransformationHandler().InverseTransform( evT ); fRegressionReturnVal->push_back( evT2->GetTarget(0) ); delete evT; return *fRegressionReturnVal; } //_______________________________________________________________________ void TMVA::MethodBDT::WriteMonitoringHistosToFile( void ) const { // Here we could write some histograms created during the processing // to the output file. Log() << kINFO << "Write monitoring histograms to file: " << BaseDir()->GetPath() << Endl; //Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, Types::kMaxAnalysisType); //results->GetStorage()->Write(); fMonitorNtuple->Write(); } //_______________________________________________________________________ vector< Double_t > TMVA::MethodBDT::GetVariableImportance() { // Return the relative variable importance, normalized to all // variables together having the importance 1. The importance in // evaluated as the total separation-gain that this variable had in // the decision trees (weighted by the number of events) fVariableImportance.resize(GetNvar()); for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) { fVariableImportance[ivar]=0; } Double_t sum=0; for (UInt_t itree = 0; itree < GetNTrees(); itree++) { std::vector relativeImportance(fForest[itree]->GetVariableImportance()); for (UInt_t i=0; i< relativeImportance.size(); i++) { fVariableImportance[i] += fBoostWeights[itree] * relativeImportance[i]; } } for (UInt_t ivar=0; ivar< fVariableImportance.size(); ivar++){ fVariableImportance[ivar] = TMath::Sqrt(fVariableImportance[ivar]); sum += fVariableImportance[ivar]; } for (UInt_t ivar=0; ivar< fVariableImportance.size(); ivar++) fVariableImportance[ivar] /= sum; return fVariableImportance; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::GetVariableImportance( UInt_t ivar ) { // Returns the measure for the variable importance of variable "ivar" // which is later used in GetVariableImportance() to calculate the // relative variable importances. std::vector relativeImportance = this->GetVariableImportance(); if (ivar < (UInt_t)relativeImportance.size()) return relativeImportance[ivar]; else Log() << kFATAL << " ivar = " << ivar << " is out of range " << Endl; return -1; } //_______________________________________________________________________ const TMVA::Ranking* TMVA::MethodBDT::CreateRanking() { // Compute ranking of input variables // create the ranking object fRanking = new Ranking( GetName(), "Variable Importance" ); vector< Double_t> importance(this->GetVariableImportance()); for (UInt_t ivar=0; ivarAddRank( Rank( GetInputLabel(ivar), importance[ivar] ) ); } return fRanking; } //_______________________________________________________________________ void TMVA::MethodBDT::GetHelpMessage() const { // Get help message text // // typical length of text line: // "|--------------------------------------------------------------|" Log() << Endl; Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl; Log() << Endl; Log() << "Boosted Decision Trees are a collection of individual decision" << Endl; Log() << "trees which form a multivariate classifier by (weighted) majority " << Endl; Log() << "vote of the individual trees. Consecutive decision trees are " << Endl; Log() << "trained using the original training data set with re-weighted " << Endl; Log() << "events. By default, the AdaBoost method is employed, which gives " << Endl; Log() << "events that were misclassified in the previous tree a larger " << Endl; Log() << "weight in the training of the following tree." << Endl; Log() << Endl; Log() << "Decision trees are a sequence of binary splits of the data sample" << Endl; Log() << "using a single descriminant variable at a time. A test event " << Endl; Log() << "ending up after the sequence of left-right splits in a final " << Endl; Log() << "(\"leaf\") node is classified as either signal or background" << Endl; Log() << "depending on the majority type of training events in that node." << Endl; Log() << Endl; Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl; Log() << Endl; Log() << "By the nature of the binary splits performed on the individual" << Endl; Log() << "variables, decision trees do not deal well with linear correlations" << Endl; Log() << "between variables (they need to approximate the linear split in" << Endl; Log() << "the two dimensional space by a sequence of splits on the two " << Endl; Log() << "variables individually). Hence decorrelation could be useful " << Endl; Log() << "to optimise the BDT performance." << Endl; Log() << Endl; Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl; Log() << Endl; Log() << "The two most important parameters in the configuration are the " << Endl; Log() << "minimal number of events requested by a leaf node as percentage of the " < fForest; // i.e. root nodes of decision trees" << std::endl; fout << " std::vector fBoostWeights; // the weights applied in the individual boosts" << std::endl; fout << "};" << std::endl << std::endl; fout << "double " << className << "::GetMvaValue__( const std::vector& inputValues ) const" << std::endl; fout << "{" << std::endl; fout << " double myMVA = 0;" << std::endl; if (fDoPreselection){ for (UInt_t ivar = 0; ivar< fIsLowBkgCut.size(); ivar++){ if (fIsLowBkgCut[ivar]){ fout << " if (inputValues["< "< "<GetNodeType() == 0) { //intermediate node" << std::endl; fout << " if (current->GoesRight(inputValues)) current=("<GetRight();" << std::endl; fout << " else current=("<GetLeft();" << std::endl; fout << " }" << std::endl; if (fBoostType=="Grad"){ fout << " myMVA += current->GetResponse();" << std::endl; }else{ if (fUseYesNoLeaf) fout << " myMVA += fBoostWeights[itree] * current->GetNodeType();" << std::endl; else fout << " myMVA += fBoostWeights[itree] * current->GetPurity();" << std::endl; fout << " norm += fBoostWeights[itree];" << std::endl; } fout << " }" << std::endl; if (fBoostType=="Grad"){ fout << " return 2.0/(1.0+exp(-2.0*myMVA))-1.0;" << std::endl; } else fout << " return myMVA /= norm;" << std::endl; fout << "};" << std::endl << std::endl; fout << "void " << className << "::Initialize()" << std::endl; fout << "{" << std::endl; //Now for each decision tree, write directly the constructors of the nodes in the tree structure for (UInt_t itree=0; itreeMakeClassInstantiateNode((DecisionTreeNode*)fForest[itree]->GetRoot(), fout, className); fout <<" );" << std::endl; } fout << " return;" << std::endl; fout << "};" << std::endl; fout << " " << std::endl; fout << "// Clean up" << std::endl; fout << "inline void " << className << "::Clear() " << std::endl; fout << "{" << std::endl; fout << " for (unsigned int itree=0; itree& inputValues ) const;" << std::endl; fout << " "<& inputValues ) const;" << std::endl; fout << " "< fFisherCoeff; // the fisher coeff (offset at the last element)" << std::endl; } fout << " int fSelector; // index of variable used in node selection (decision tree) " << std::endl; fout << " double fCutValue; // cut value appplied on this node to discriminate bkg against sig" << std::endl; fout << " bool fCutType; // true: if event variable > cutValue ==> signal , false otherwise" << std::endl; fout << " int fNodeType; // Type of node: -1 == Bkg-leaf, 1 == Signal-leaf, 0 = internal " << std::endl; fout << " double fPurity; // Purity of node from training"<< std::endl; fout << " double fResponse; // Regression response value of node" << std::endl; fout << "}; " << std::endl; fout << " " << std::endl; fout << "//_______________________________________________________________________" << std::endl; fout << " "<& inputValues ) const" << std::endl; fout << "{" << std::endl; fout << " // test event if it decends the tree at this node to the right" << std::endl; fout << " bool result;" << std::endl; if (fUseFisherCuts){ fout << " if (fNFisherCoeff == 0){" << std::endl; fout << " result = (inputValues[fSelector] > fCutValue );" << std::endl; fout << " }else{" << std::endl; fout << " double fisher = fFisherCoeff.at(fFisherCoeff.size()-1);" << std::endl; fout << " for (unsigned int ivar=0; ivar fCutValue;" << std::endl; fout << " }" << std::endl; }else{ fout << " result = (inputValues[fSelector] > fCutValue );" << std::endl; } fout << " if (fCutType == true) return result; //the cuts are selecting Signal ;" << std::endl; fout << " else return !result;" << std::endl; fout << "}" << std::endl; fout << " " << std::endl; fout << "//_______________________________________________________________________" << std::endl; fout << "bool "<& inputValues ) const" << std::endl; fout << "{" << std::endl; fout << " // test event if it decends the tree at this node to the left" << std::endl; fout << " if (!this->GoesRight(inputValues)) return true;" << std::endl; fout << " else return false;" << std::endl; fout << "}" << std::endl; fout << " " << std::endl; fout << "#endif" << std::endl; fout << " " << std::endl; } //_______________________________________________________________________ void TMVA::MethodBDT::MakeClassInstantiateNode( DecisionTreeNode *n, std::ostream& fout, const TString& className ) const { // recursively descends a tree and writes the node instance to the output streem if (n == NULL) { Log() << kFATAL << "MakeClassInstantiateNode: started with undefined node" <GetLeft() != NULL){ this->MakeClassInstantiateNode( (DecisionTreeNode*)n->GetLeft() , fout, className); } else { fout << "0"; } fout << ", " <GetRight() != NULL){ this->MakeClassInstantiateNode( (DecisionTreeNode*)n->GetRight(), fout, className ); } else { fout << "0"; } fout << ", " << std::endl << std::setprecision(6); if (fUseFisherCuts){ fout << n->GetNFisherCoeff() << ", "; for (UInt_t i=0; i< GetNVariables()+1; i++) { if (n->GetNFisherCoeff() == 0 ){ fout << "0, "; }else{ fout << n->GetFisherCoeff(i) << ", "; } } } fout << n->GetSelector() << ", " << n->GetCutValue() << ", " << n->GetCutType() << ", " << n->GetNodeType() << ", " << n->GetPurity() << "," << n->GetResponse() << ") "; } //_______________________________________________________________________ void TMVA::MethodBDT::DeterminePreselectionCuts(const std::vector& eventSample) { // find useful preselection cuts that will be applied before // and Decision Tree training.. (and of course also applied // in the GetMVA .. --> -1 for background +1 for Signal // /* Double_t nTotS = 0.0, nTotB = 0.0; Int_t nTotS_unWeighted = 0, nTotB_unWeighted = 0; std::vector bdtEventSample; fIsLowSigCut.assign(GetNvar(),kFALSE); fIsLowBkgCut.assign(GetNvar(),kFALSE); fIsHighSigCut.assign(GetNvar(),kFALSE); fIsHighBkgCut.assign(GetNvar(),kFALSE); fLowSigCut.assign(GetNvar(),0.); // ---------------| --> in var is signal (accept all above lower cut) fLowBkgCut.assign(GetNvar(),0.); // ---------------| --> in var is bkg (accept all above lower cut) fHighSigCut.assign(GetNvar(),0.); // <-- | -------------- in var is signal (accept all blow cut) fHighBkgCut.assign(GetNvar(),0.); // <-- | -------------- in var is blg (accept all blow cut) // Initialize (un)weighted counters for signal & background // Construct a list of event wrappers that point to the original data for( std::vector::const_iterator it = eventSample.begin(); it != eventSample.end(); ++it ) { if (DataInfo().IsSignal(*it)){ nTotS += (*it)->GetWeight(); ++nTotS_unWeighted; } else { nTotB += (*it)->GetWeight(); ++nTotB_unWeighted; } bdtEventSample.push_back(TMVA::BDTEventWrapper(*it)); } for( UInt_t ivar = 0; ivar < GetNvar(); ivar++ ) { // loop over all discriminating variables TMVA::BDTEventWrapper::SetVarIndex(ivar); // select the variable to sort by std::sort( bdtEventSample.begin(),bdtEventSample.end() ); // sort the event data Double_t bkgWeightCtr = 0.0, sigWeightCtr = 0.0; std::vector::iterator it = bdtEventSample.begin(), it_end = bdtEventSample.end(); for( ; it != it_end; ++it ) { if (DataInfo().IsSignal(**it)) sigWeightCtr += (**it)->GetWeight(); else bkgWeightCtr += (**it)->GetWeight(); // Store the accumulated signal (background) weights it->SetCumulativeWeight(false,bkgWeightCtr); it->SetCumulativeWeight(true,sigWeightCtr); } //variable that determines how "exact" you cut on the preslection found in the training data. Here I chose //1% of the variable range... Double_t dVal = (DataInfo().GetVariableInfo(ivar).GetMax() - DataInfo().GetVariableInfo(ivar).GetMin())/100. ; Double_t nSelS, nSelB, effS=0.05, effB=0.05, rejS=0.05, rejB=0.05; Double_t tmpEffS, tmpEffB, tmpRejS, tmpRejB; // Locate the optimal cut for this (ivar-th) variable for(UInt_t iev = 1; iev < bdtEventSample.size(); iev++) { //dVal = bdtEventSample[iev].GetVal() - bdtEventSample[iev-1].GetVal(); nSelS = bdtEventSample[iev].GetCumulativeWeight(true); nSelB = bdtEventSample[iev].GetCumulativeWeight(false); // you look for some 100% efficient pre-selection cut to remove background.. i.e. nSelS=0 && nSelB>5%nTotB or ( nSelB=0 nSelS>5%nTotS) tmpEffS=nSelS/nTotS; tmpEffB=nSelB/nTotB; tmpRejS=1-tmpEffS; tmpRejB=1-tmpEffB; if (nSelS==0 && tmpEffB>effB) {effB=tmpEffB; fLowBkgCut[ivar] = bdtEventSample[iev].GetVal() - dVal; fIsLowBkgCut[ivar]=kTRUE;} else if (nSelB==0 && tmpEffS>effS) {effS=tmpEffS; fLowSigCut[ivar] = bdtEventSample[iev].GetVal() - dVal; fIsLowSigCut[ivar]=kTRUE;} else if (nSelB==nTotB && tmpRejS>rejS) {rejS=tmpRejS; fHighSigCut[ivar] = bdtEventSample[iev].GetVal() + dVal; fIsHighSigCut[ivar]=kTRUE;} else if (nSelS==nTotS && tmpRejB>rejB) {rejB=tmpRejB; fHighBkgCut[ivar] = bdtEventSample[iev].GetVal() + dVal; fIsHighBkgCut[ivar]=kTRUE;} } } Log() << kINFO << " found and suggest the following possible pre-selection cuts " << Endl; if (fDoPreselection) Log() << kINFO << "the training will be done after these cuts... and GetMVA value returns +1, (-1) for a signal (bkg) event that passes these cuts" << Endl; else Log() << kINFO << "as option DoPreselection was not used, these cuts however will not be performed, but the training will see the full sample"< " << fHighBkgCut[ivar] << Endl; } if (fIsHighSigCut[ivar]){ Log() << kINFO << " found cut: Sig if var " << ivar << " > " << fHighSigCut[ivar] << Endl; } } return; } //_______________________________________________________________________ Double_t TMVA::MethodBDT::ApplyPreselectionCuts(const Event* ev) { // aply the preselection cuts before even bothing about any // Decision Trees in the GetMVA .. --> -1 for background +1 for Signal Double_t result=0; for (UInt_t ivar=0; ivar < GetNvar(); ivar++ ) { // loop over all discriminating variables if (fIsLowBkgCut[ivar]){ if (ev->GetValue(ivar) < fLowBkgCut[ivar]) result = -1; // is background } if (fIsLowSigCut[ivar]){ if (ev->GetValue(ivar) < fLowSigCut[ivar]) result = 1; // is signal } if (fIsHighBkgCut[ivar]){ if (ev->GetValue(ivar) > fHighBkgCut[ivar]) result = -1; // is background } if (fIsHighSigCut[ivar]){ if (ev->GetValue(ivar) > fHighSigCut[ivar]) result = 1; // is signal } } return result; }