Fork me on GitHub

source: git/modules/VertexFinderDA4D.cc@ 2264876

ImprovedOutputFile Timing dual_readout llp
Last change on this file since 2264876 was d97b2af, checked in by Michele Selvaggi <michele.selvaggi@…>, 8 years ago

removed couts

  • Property mode set to 100644
File size: 30.7 KB
RevLine 
[29d662e]1/** \class VertexFinderDA4D
2 *
3 * Cluster vertices from tracks using deterministic annealing and timing information
4 *
5 * \authors M. Selvaggi, L. Gray
6 *
7 */
8
9
10#include "modules/VertexFinderDA4D.h"
11#include "classes/DelphesClasses.h"
12#include "classes/DelphesFactory.h"
13#include "classes/DelphesFormula.h"
14#include "classes/DelphesPileUpReader.h"
15
16#include "ExRootAnalysis/ExRootResult.h"
17#include "ExRootAnalysis/ExRootFilter.h"
18#include "ExRootAnalysis/ExRootClassifier.h"
19
20#include "TMath.h"
21#include "TString.h"
22#include "TFormula.h"
23#include "TRandom3.h"
24#include "TObjArray.h"
25#include "TDatabasePDG.h"
26#include "TLorentzVector.h"
27#include "TMatrixT.h"
28#include "TVector3.h"
29
30static const Double_t mm = 1.;
31static const Double_t m = 1000.*mm;
32static const Double_t ns = 1.;
33static const Double_t s = 1.e+9 *ns;
34static const Double_t c_light = 2.99792458e+8 * m/s;
35
36
37namespace {
38 bool recTrackLessZ1(const VertexFinderDA4D::track_t & tk1,
39 const VertexFinderDA4D::track_t & tk2)
40 {
41 return tk1.z < tk2.z;
42 }
43}
44
[0f73bea]45using namespace std;
[29d662e]46
47//------------------------------------------------------------------------------
48
49VertexFinderDA4D::VertexFinderDA4D() :
50 fVerbose(0), fMinPT(0), fVertexSpaceSize(0), fVertexTimeSize(0),
51 fUseTc(0), fBetaMax(0), fBetaStop(0), fCoolingFactor(0),
52 fMaxIterations(0), fDzCutOff(0), fD0CutOff(0), fDtCutOff(0)
53{
54}
55
56//------------------------------------------------------------------------------
57
58VertexFinderDA4D::~VertexFinderDA4D()
59{
60}
61
62//------------------------------------------------------------------------------
63
64void VertexFinderDA4D::Init()
65{
66
67 fVerbose = GetBool("Verbose", 1);
68 fMinPT = GetDouble("MinPT", 0.1);
69 fVertexSpaceSize = GetDouble("VertexSpaceSize", 0.5); //in mm
70 fVertexTimeSize = GetDouble("VertexTimeSize", 10E-12); //in s
71 fUseTc = GetBool("UseTc", 1);
72 fBetaMax = GetDouble("BetaMax ", 0.1);
73 fBetaStop = GetDouble("BetaStop", 1.0);
74 fCoolingFactor = GetDouble("CoolingFactor", 0.8);
75 fMaxIterations = GetInt("MaxIterations", 100);
76 fDzCutOff = GetDouble("DzCutOff", 40); // Adaptive Fitter uses 30 mm but that appears to be a bit tight here sometimes
77 fD0CutOff = GetDouble("D0CutOff", 30);
78 fDtCutOff = GetDouble("DtCutOff", 100E-12); // dummy
79
80 // convert stuff in cm, ns
81 fVertexSpaceSize /= 10.0;
82 fVertexTimeSize *= 1E9;
83 fDzCutOff /= 10.0; // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes
84 fD0CutOff /= 10.0;
85
86 fInputArray = ImportArray(GetString("InputArray", "TrackSmearing/tracks"));
87 fItInputArray = fInputArray->MakeIterator();
88
89 fOutputArray = ExportArray(GetString("OutputArray", "tracks"));
90 fVertexOutputArray = ExportArray(GetString("VertexOutputArray", "vertices"));
91}
92
93//------------------------------------------------------------------------------
94
95void VertexFinderDA4D::Finish()
96{
97 if(fItInputArray) delete fItInputArray;
98}
99
100//------------------------------------------------------------------------------
101
102void VertexFinderDA4D::Process()
103{
104 Candidate *candidate, *track;
105 TObjArray *ClusterArray;
106 ClusterArray = new TObjArray;
107 TIterator *ItClusterArray;
108 Int_t ivtx = 0;
109
110 fInputArray->Sort();
111
112 TLorentzVector pos, mom;
113 if (fVerbose)
114 {
115 cout<<" start processing vertices ..."<<endl;
116 cout<<" Found "<<fInputArray->GetEntriesFast()<<" input tracks"<<endl;
117 //loop over input tracks
118 fItInputArray->Reset();
119 while((candidate = static_cast<Candidate*>(fItInputArray->Next())))
120 {
121 pos = candidate->InitialPosition;
122 mom = candidate->Momentum;
123
124 cout<<"pt: "<<mom.Pt()<<", eta: "<<mom.Eta()<<", phi: "<<mom.Phi()<<", z: "<<candidate->DZ/10<<endl;
125 }
126 }
127
128 // clusterize tracks in Z
129 clusterize(*fInputArray, *ClusterArray);
130
131 if (fVerbose){std::cout << " clustering returned "<< ClusterArray->GetEntriesFast() << " clusters from " << fInputArray->GetEntriesFast() << " selected tracks" <<std::endl;}
132
133 //loop over vertex candidates
134 ItClusterArray = ClusterArray->MakeIterator();
135 ItClusterArray->Reset();
136 while((candidate = static_cast<Candidate*>(ItClusterArray->Next())))
137 {
138
139 double meantime = 0.;
140 double expv_x2 = 0.;
141 double normw = 0.;
142 double errtime = 0;
143
144 double meanpos = 0.;
145 double meanerr2 = 0.;
146 double normpos = 0.;
147 double errpos = 0.;
148
149 double sumpt2 = 0.;
150
151 int itr = 0;
152
153 if(fVerbose)cout<<"this vertex has: "<<candidate->GetCandidates()->GetEntriesFast()<<" tracks"<<endl;
154
155 // loop over tracks belonging to this vertex
156 TIter it1(candidate->GetCandidates());
157 it1.Reset();
158
159 while((track = static_cast<Candidate*>(it1.Next())))
160 {
161
162 itr++;
163 // TBC: the time is in ns for now TBC
164 double t = track->InitialPosition.T()/c_light;
165 double dt = track->ErrorT/c_light;
166 const double time = t;
167 const double inverr = 1.0/dt;
168 meantime += time*inverr;
169 expv_x2 += time*time*inverr;
170 normw += inverr;
171
172 // compute error position TBC
173 const double pt = track->Momentum.Pt();
174 const double z = track->DZ/10.0;
175 const double err_pt = track->ErrorPT;
176 const double err_z = track->ErrorDZ;
177
178 const double wi = (pt/(err_pt*err_z))*(pt/(err_pt*err_z));
179 meanpos += z*wi;
180
181 meanerr2 += err_z*err_z*wi;
182 normpos += wi;
183 sumpt2 += pt*pt;
184
185 // while we are here store cluster index in tracks
186 track->ClusterIndex = ivtx;
187 }
188
189 meantime = meantime/normw;
190 expv_x2 = expv_x2/normw;
191 errtime = TMath::Sqrt((expv_x2 - meantime*meantime)/itr);
192
193 meanpos = meanpos/normpos;
194 meanerr2 = meanerr2/normpos;
195 errpos = TMath::Sqrt(meanerr2/itr);
196
197 candidate->Position.SetXYZT(0.0, 0.0, meanpos*10.0 , meantime*c_light);
198 candidate->PositionError.SetXYZT(0.0, 0.0, errpos*10.0 , errtime*c_light);
199 candidate->SumPT2 = sumpt2;
200 candidate->ClusterNDF = itr;
201 candidate->ClusterIndex = ivtx;
202
203 fVertexOutputArray->Add(candidate);
204
205 ivtx++;
206
207 if (fVerbose){
208 std::cout << "x,y,z";
209 std::cout << ",t";
210 std::cout << "=" << candidate->Position.X()/10.0 <<" " << candidate->Position.Y()/10.0 << " " << candidate->Position.Z()/10.0;
211 std::cout << " " << candidate->Position.T()/c_light;
212
213 std::cout << std::endl;
214 std::cout << "sumpt2 " << candidate->SumPT2<<endl;
215 }
216 }// end of cluster loop
217
218
219 if(fVerbose){
220 std::cout << "PrimaryVertexProducerAlgorithm::vertices candidates =" << ClusterArray->GetEntriesFast() << std::endl;
221 }
222
223 //TBC maybe this can be done later
224 // sort vertices by pt**2 vertex (aka signal vertex tagging)
225 /*if(pvs.size()>1){
226 sort(pvs.begin(), pvs.end(), VertexHigherPtSquared());
227 }
228 */
229
230 delete ClusterArray;
231
232}
233
234//------------------------------------------------------------------------------
235
236
237void VertexFinderDA4D::clusterize(const TObjArray & tracks, TObjArray & clusters)
238{
239 if(fVerbose) {
240 cout << "###################################################" << endl;
241 cout << "# VertexFinderDA4D::clusterize nt="<<tracks.GetEntriesFast() << endl;
242 cout << "###################################################" << endl;
243 }
244
245 vector< Candidate* > pv = vertices();
246
247 if(fVerbose){ cout << "# VertexFinderDA4D::clusterize pv.size="<<pv.size() << endl; }
248 if (pv.size()==0){ return; }
249
250 // convert into vector of candidates
251 //TObjArray *ClusterArray = pv.begin()->GetCandidates();
252 //Candidate *aCluster = static_cast<Candidate*>(&(pv.at(0)));
253 Candidate *aCluster = pv.at(0);
254
255 // fill into clusters and merge
256
257
258 if( fVerbose ) {
259 std::cout << '\t' << 0;
260 std::cout << ' ' << (*pv.begin())->Position.Z()/10.0 << ' ' << (*pv.begin())->Position.T()/c_light << std::endl;
261 }
262
263 for(vector<Candidate*>::iterator k=pv.begin()+1; k!=pv.end(); k++){
264 if( fVerbose ) {
265 std::cout << '\t' << std::distance(pv.begin(),k);
266 std::cout << ' ' << (*k)->Position.Z() << ' ' << (*k)->Position.T() << std::endl;
267 }
268
269
270 // TBC - check units here
271 if ( std::abs((*k)->Position.Z() - (*(k-1))->Position.Z())/10.0 > (2*fVertexSpaceSize) ||
272 std::abs((*k)->Position.T() - (*(k-1))->Position.Z())/c_light > 2*0.010 ) {
273 // close a cluster
274 clusters.Add(aCluster);
275 //aCluster.clear();
276 }
277 //for(unsigned int i=0; i<k->GetCandidates().GetEntriesFast(); i++){
278 aCluster = *k;
279 //}
280
281 }
282 clusters.Add(aCluster);
283
284 if(fVerbose) { std::cout << "# VertexFinderDA4D::clusterize clusters.size="<<clusters.GetEntriesFast() << std::endl; }
285
286}
287
288
289//------------------------------------------------------------------------------
290
291vector< Candidate* > VertexFinderDA4D::vertices()
292{
293 Candidate *candidate;
294 UInt_t clusterIndex = 0;
295 vector< Candidate* > clusters;
296
297 vector<track_t> tks = fill();
298
299 //print out input tracks
300
301 if(fVerbose)
302 {
303 std::cout<<" start processing vertices ..."<<std::endl;
304 std::cout<<" Found "<<tks.size()<<" input tracks"<<std::endl;
305 //loop over input tracks
306
307
308 for(std::vector<track_t>::const_iterator it=tks.begin(); it!=tks.end(); it++){
309 double z = it->z;
310 double pt=it->pt;
311 double eta=it->eta;
312 double phi=it->phi;
313 double t = it->t;
314
315 std::cout<<"pt: "<<pt<<", eta: "<<eta<<", phi: "<<phi<<", z: "<<z<<", t: "<<t<<std::endl;
316 }
317 }
318
319 unsigned int nt=tks.size();
320 double rho0=0.0; // start with no outlier rejection
321
322 if (tks.empty()) return clusters;
323
324 vector<vertex_t> y; // the vertex prototypes
325
326 // initialize:single vertex at infinite temperature
327 vertex_t vstart;
328 vstart.z=0.;
329 vstart.t=0.;
330 vstart.pk=1.;
331 y.push_back(vstart);
332 int niter=0; // number of iterations
333
334 // estimate first critical temperature
335 double beta=beta0(fBetaMax, tks, y);
336 niter=0; while((update(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ }
337
338 // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
339 while(beta<fBetaMax){
340
341 if(fUseTc){
342 update(beta, tks,y);
343 while(merge(y,beta)){update(beta, tks,y);}
344 split(beta, tks,y);
345 beta=beta/fCoolingFactor;
346 }else{
347 beta=beta/fCoolingFactor;
348 splitAll(y);
349 }
350
351 // make sure we are not too far from equilibrium before cooling further
352 niter=0; while((update(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ }
353
354 }
355
356 if(fUseTc){
357 // last round of splitting, make sure no critical clusters are left
358 update(beta, tks,y);
359 while(merge(y,beta)){update(beta, tks,y);}
360 unsigned int ntry=0;
361 while( split(beta, tks,y) && (ntry++<10) ){
362 niter=0;
363 while((update(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){}
364 merge(y,beta);
365 update(beta, tks,y);
366 }
367 }else{
368 // merge collapsed clusters
369 while(merge(y,beta)){update(beta, tks,y);}
370 if(fVerbose ){ cout << "dump after 1st merging " << endl; dump(beta,y,tks);}
371 }
372
373 // switch on outlier rejection
374 rho0=1./nt; for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; } // democratic
375 niter=0; while((update(beta, tks,y,rho0) > 1.e-8) && (niter++ < fMaxIterations)){ }
376 if(fVerbose ){ cout << "rho0=" << rho0 << " niter=" << niter << endl; dump(beta,y,tks);}
377
378
379 // merge again (some cluster split by outliers collapse here)
380 while(merge(y)){}
381 if(fVerbose ){ cout << "dump after 2nd merging " << endl; dump(beta,y,tks);}
382
383
384 // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
385 while(beta<=fBetaStop){
386 while(purge(y,tks,rho0, beta)){
387 niter=0; while((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }
388 }
389 beta/=fCoolingFactor;
390 niter=0; while((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }
391 }
392
393
394// // new, one last round of cleaning at T=Tstop
395// while(purge(y,tks,rho0, beta)){
396// niter=0; while((update(beta, tks,y,rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }
397// }
398
399
400 if(fVerbose){
401 cout << "Final result, rho0=" << rho0 << endl;
402 dump(beta,y,tks);
403 }
404
405
406 // select significant tracks and use a TransientVertex as a container
407 //GlobalError dummyError;
408
409 // ensure correct normalization of probabilities, should make double assginment reasonably impossible
410 for(unsigned int i=0; i<nt; i++){
411 tks[i].Z=rho0*exp(-beta*( fDzCutOff*fDzCutOff));
412 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
413 tks[i].Z += k->pk * exp(-beta*Eik(tks[i],*k));
414 }
415 }
416
417 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
418
419 DelphesFactory *factory = GetFactory();
420 candidate = factory->NewCandidate();
421
422 //cout<<"new vertex"<<endl;
423 //GlobalPoint pos(0, 0, k->z);
424 double time = k->t;
425 double z = k->z;
426 //vector< reco::TransientTrack > vertexTracks;
427 //double max_track_time_err2 = 0;
428 double mean = 0.;
429 double expv_x2 = 0.;
430 double normw = 0.;
431 for(unsigned int i=0; i<nt; i++){
432 const double invdt = 1.0/std::sqrt(tks[i].dt2);
433 if(tks[i].Z>0){
434 double p = k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
435 if( (tks[i].pi>0) && ( p > 0.5 ) ){
436 //std::cout << "pushing back " << i << ' ' << tks[i].tt << std::endl;
437 //vertexTracks.push_back(*(tks[i].tt)); tks[i].Z=0;
438
439 candidate->AddCandidate(tks[i].tt); tks[i].Z=0;
440
441 mean += tks[i].t*invdt*p;
442 expv_x2 += tks[i].t*tks[i].t*invdt*p;
443 normw += invdt*p;
444 } // setting Z=0 excludes double assignment
445 }
446 }
447
448 mean = mean/normw;
449 expv_x2 = expv_x2/normw;
450 const double time_var = expv_x2 - mean*mean;
451 const double crappy_error_guess = std::sqrt(time_var);
452 /*GlobalError dummyErrorWithTime(0,
453 0,0,
454 0,0,0,
455 0,0,0,crappy_error_guess);*/
456 //TransientVertex v(pos, time, dummyErrorWithTime, vertexTracks, 5);
457
458
459 candidate->ClusterIndex = clusterIndex++;;
460 candidate->Position.SetXYZT(0.0, 0.0, z*10.0 , time*c_light);
461
462 // TBC - fill error later ...
463 candidate->PositionError.SetXYZT(0.0, 0.0, 0.0 , crappy_error_guess*c_light);
464
465 clusterIndex++;
466 clusters.push_back(candidate);
467 }
468
469
470 return clusters;
471
472}
473
474//------------------------------------------------------------------------------
475
476vector<VertexFinderDA4D::track_t>
477VertexFinderDA4D::fill()const{
478
479 Candidate *candidate;
480 track_t tr;
481 Double_t z, dz, t, l, dt, d0, d0error;
482
483 // prepare track data for clustering
484 vector< track_t > tks;
485
486 // loop over input tracks
487 fItInputArray->Reset();
488 while((candidate = static_cast<Candidate*>(fItInputArray->Next())))
489 {
490 //TBC everything in cm
491 z = candidate->DZ/10;
492 tr.z = z;
493 dz = candidate->ErrorDZ/10;
494 tr.dz2 = dz*dz // track error
495 // TBC: beamspot size induced error, take 0 for now.
496 // + (std::pow(beamspot.BeamWidthX()*cos(phi),2.)+std::pow(beamspot.BeamWidthY()*sin(phi),2.))/std::pow(tantheta,2.) // beam-width induced
497 + fVertexSpaceSize*fVertexSpaceSize; // intrinsic vertex size, safer for outliers and short lived decays
498
499 // TBC: the time is in ns for now TBC
500 //t = candidate->Position.T()/c_light;
501 t = candidate->InitialPosition.T()/c_light;
502 l = candidate->L/c_light;
503
504 double pt = candidate->Momentum.Pt();
505 double eta = candidate->Momentum.Eta();
506 double phi = candidate->Momentum.Phi();
507
508 tr.pt = pt;
509 tr.eta = eta;
510 tr.phi = phi;
511 tr.t = t; //
512 tr.dtz = 0.;
513 dt = candidate->ErrorT/c_light;
514 tr.dt2 = dt*dt + fVertexTimeSize*fVertexTimeSize; // the ~injected~ timing error plus a small minimum vertex size in time
515 if (fD0CutOff>0){
516
517 d0 = TMath::Abs(candidate->D0)/10.0;
518 d0error = candidate->ErrorD0/10.0;
519
520 tr.pi=1./(1.+exp((d0*d0)/(d0error*d0error) - fD0CutOff*fD0CutOff)); // reduce weight for high ip tracks
521
522 }
523 else{
524 tr.pi=1.;
525 }
526 tr.tt=&(*candidate);
527 tr.Z=1.;
528
529 // TBC now putting track selection here (> fPTMin)
530 if( tr.pi > 1e-3 && tr.pt > fMinPT) {
531 tks.push_back(tr);
532 }
533
534 }
535
536 return tks;
537}
538
539
540
541
542//------------------------------------------------------------------------------
543
544double VertexFinderDA4D::Eik(const track_t & t, const vertex_t &k ) const {
545 return std::pow(t.z-k.z,2.)/t.dz2 + std::pow(t.t - k.t,2.)/t.dt2;
546}
547
548//--------------------------------------------------------------------------------
549
550void VertexFinderDA4D::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0)const{
551
552 // copy and sort for nicer printout
553 vector<track_t> tks;
554 for(vector<track_t>::const_iterator t=tks0.begin(); t!=tks0.end(); t++){tks.push_back(*t); }
555 std::stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
556
557 cout << "-----DAClusterizerInZT::dump ----" << endl;
558 cout << " beta=" << beta << " betamax= " << fBetaMax << endl;
559 cout << " z= ";
560 cout.precision(4);
561 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
[d97b2af]562 //cout << setw(8) << fixed << k->z;
[29d662e]563 }
564 cout << endl << " t= ";
565 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
[d97b2af]566 //cout << setw(8) << fixed << k->t;
[29d662e]567 }
[d97b2af]568 //cout << endl << "T=" << setw(15) << 1./beta <<" Tc= ";
[29d662e]569 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
[d97b2af]570 //cout << setw(8) << fixed << k->Tc ;
[29d662e]571 }
572
573 cout << endl << " pk=";
574 double sumpk=0;
575 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
[d97b2af]576 //cout << setw(8) << setprecision(3) << fixed << k->pk;
[29d662e]577 sumpk+=k->pk;
578 }
579 cout << endl;
580
581 if(fVerbose){
582 double E=0, F=0;
583 cout << endl;
584 cout << "---- z +/- dz t +/- dt ip +/-dip pt phi eta weights ----" << endl;
585 cout.precision(4);
586 for(unsigned int i=0; i<tks.size(); i++){
587 if (tks[i].Z>0){ F-=log(tks[i].Z)/beta;}
588 double tz= tks[i].z;
589 double tt= tks[i].t;
[d97b2af]590 //cout << setw (3)<< i << ")" << setw (8) << fixed << setprecision(4)<< tz << " +/-" << setw (6)<< sqrt(tks[i].dz2)
591 // << setw(8) << fixed << setprecision(4) << tt << " +/-" << setw(6) << std::sqrt(tks[i].dt2) ;
[29d662e]592
593 double sump=0.;
594 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
595 if((tks[i].pi>0)&&(tks[i].Z>0)){
596 //double p=pik(beta,tks[i],*k);
597 double p=k->pk * std::exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
598 if( p > 0.0001){
[d97b2af]599 //cout << setw (8) << setprecision(3) << p;
[29d662e]600 }else{
601 cout << " . ";
602 }
603 E+=p*Eik(tks[i],*k);
604 sump+=p;
605 }else{
606 cout << " ";
607 }
608 }
609 cout << endl;
610 }
611 cout << endl << "T=" << 1/beta << " E=" << E << " n="<< y.size() << " F= " << F << endl << "----------" << endl;
612 }
613}
614
615
616
617
618
619//------------------------------------------------------------------------------
620
621
622double VertexFinderDA4D::update( double beta,
623 vector<track_t> & tks,
624 vector<vertex_t> & y ) const {
625 //update weights and vertex positions
626 // mass constrained annealing without noise
627 // returns the squared sum of changes of vertex positions
628
629 unsigned int nt=tks.size();
630
631 //initialize sums
632 double sumpi=0;
633 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
634 k->sw=0.; k->swz=0.; k->swt = 0.; k->se=0.;
635 k->swE=0.; k->Tc=0.;
636 }
637
638
639 // loop over tracks
640 for(unsigned int i=0; i<nt; i++){
641
642 // update pik and Zi
643 double Zi = 0.;
644 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
645 k->ei = std::exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
646 Zi += k->pk * k->ei;
647 }
648 tks[i].Z=Zi;
649
650 // normalization for pk
651 if (tks[i].Z>0){
652 sumpi += tks[i].pi;
653 // accumulate weighted z and weights for vertex update
654 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
655 k->se += tks[i].pi* k->ei / Zi;
656 const double w = k->pk * tks[i].pi* k->ei / ( Zi * ( tks[i].dz2 * tks[i].dt2 ) );
657 k->sw += w;
658 k->swz += w * tks[i].z;
659 k->swt += w * tks[i].t;
660 k->swE += w * Eik(tks[i],*k);
661 }
662 }else{
663 sumpi += tks[i].pi;
664 }
665
666
667 } // end of track loop
668
669
670 // now update z and pk
671 double delta=0;
672 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
673 if ( k->sw > 0){
674 const double znew = k->swz/k->sw;
675 const double tnew = k->swt/k->sw;
676 delta += std::pow(k->z-znew,2.) + std::pow(k->t-tnew,2.);
677 k->z = znew;
678 k->t = tnew;
679 k->Tc = 2.*k->swE/k->sw;
680 }else{
681 if(fVerbose){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
682 k->Tc=-1;
683 }
684
685 k->pk = k->pk * k->se / sumpi;
686 }
687
688 // return how much the prototypes moved
689 return delta;
690}
691
692//------------------------------------------------------------------------------
693
694double VertexFinderDA4D::update( double beta,
695 vector<track_t> & tks,
696 vector<vertex_t> & y,
697 double & rho0 ) const {
698 // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise
699 // returns the squared sum of changes of vertex positions
700
701 unsigned int nt=tks.size();
702
703 //initialize sums
704 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
705 k->sw = 0.; k->swz = 0.; k->swt = 0.; k->se = 0.;
706 k->swE = 0.; k->Tc=0.;
707 }
708
709
710 // loop over tracks
711 for(unsigned int i=0; i<nt; i++){
712
713 // update pik and Zi and Ti
714 double Zi = rho0*std::exp(-beta*(fDzCutOff*fDzCutOff));// cut-off (eventually add finite size in time)
715 //double Ti = 0.; // dt0*std::exp(-beta*fDtCutOff);
716 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
717 k->ei = std::exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
718 Zi += k->pk * k->ei;
719 }
720 tks[i].Z=Zi;
721
722 // normalization
723 if (tks[i].Z>0){
724 // accumulate weighted z and weights for vertex update
725 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
726 k->se += tks[i].pi* k->ei / Zi;
727 double w = k->pk * tks[i].pi * k->ei /( Zi * ( tks[i].dz2 * tks[i].dt2 ) );
728 k->sw += w;
729 k->swz += w * tks[i].z;
730 k->swt += w * tks[i].t;
731 k->swE += w * Eik(tks[i],*k);
732 }
733 }
734
735 } // end of track loop
736
737 // now update z
738 double delta=0;
739 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
740 if ( k->sw > 0){
741 const double znew=k->swz/k->sw;
742 const double tnew=k->swt/k->sw;
743 delta += std::pow(k->z-znew,2.) + std::pow(k->t-tnew,2.);
744 k->z = znew;
745 k->t = tnew;
746 k->Tc = 2*k->swE/k->sw;
747 }else{
748 if(fVerbose){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
749 k->Tc = 0;
750 }
751
752 }
753
754 // return how much the prototypes moved
755 return delta;
756}
757
758//------------------------------------------------------------------------------
759
760bool VertexFinderDA4D::merge(vector<vertex_t> & y)const{
761 // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
762
763 if(y.size()<2) return false;
764
765 for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
766 if( std::abs( (k+1)->z - k->z ) < 1.e-3 &&
767 std::abs( (k+1)->t - k->t ) < 1.e-3 ){ // with fabs if only called after freeze-out (splitAll() at highter T)
768 double rho = k->pk + (k+1)->pk;
769 if(rho>0){
770 k->z = ( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
771 k->t = ( k->pk * k->t + (k+1)->t * (k+1)->pk)/rho;
772 }else{
773 k->z = 0.5*(k->z + (k+1)->z);
774 k->t = 0.5*(k->t + (k+1)->t);
775 }
776 k->pk = rho;
777
778 y.erase(k+1);
779 return true;
780 }
781 }
782
783 return false;
784}
785
786//------------------------------------------------------------------------------
787
788bool VertexFinderDA4D::merge(vector<vertex_t> & y, double & beta)const{
789 // merge clusters that collapsed or never separated,
790 // only merge if the estimated critical temperature of the merged vertex is below the current temperature
791 // return true if vertices were merged, false otherwise
792 if(y.size()<2) return false;
793
794 for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
795 if ( std::abs((k+1)->z - k->z) < 2.e-3 &&
796 std::abs((k+1)->t - k->t) < 2.e-3 ) {
797 double rho=k->pk + (k+1)->pk;
798 double swE=k->swE+(k+1)->swE - k->pk * (k+1)->pk / rho * ( std::pow((k+1)->z - k->z,2.) +
799 std::pow((k+1)->t - k->t,2.) );
800 double Tc=2*swE/(k->sw+(k+1)->sw);
801
802 if(Tc*beta<1){
803 if(rho>0){
804 k->z = ( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
805 k->t = ( k->pk * k->t + (k+1)->t * (k+1)->pk)/rho;
806 }else{
807 k->z = 0.5*(k->z + (k+1)->z);
808 k->t = 0.5*(k->t + (k+1)->t);
809 }
810 k->pk = rho;
811 k->sw += (k+1)->sw;
812 k->swE = swE;
813 k->Tc = Tc;
814 y.erase(k+1);
815 return true;
816 }
817 }
818 }
819
820 return false;
821}
822
823//------------------------------------------------------------------------------
824
825bool VertexFinderDA4D::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{
826 // eliminate clusters with only one significant/unique track
827 if(y.size()<2) return false;
828
829 unsigned int nt=tks.size();
830 double sumpmin=nt;
831 vector<vertex_t>::iterator k0=y.end();
832 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
833 int nUnique=0;
834 double sump=0;
835 double pmax=k->pk/(k->pk+rho0*exp(-beta*fDzCutOff*fDzCutOff));
836 for(unsigned int i=0; i<nt; i++){
837 if(tks[i].Z > 0){
838 double p = k->pk * std::exp(-beta*Eik(tks[i],*k)) / tks[i].Z ;
839 sump+=p;
840 if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; }
841 }
842 }
843
844 if((nUnique<2)&&(sump<sumpmin)){
845 sumpmin=sump;
846 k0=k;
847 }
848 }
849
850 if(k0!=y.end()){
851 if(fVerbose){cout << "eliminating prototype at " << k0->z << "," << k0->t
852 << " with sump=" << sumpmin << endl;}
853 //rho0+=k0->pk;
854 y.erase(k0);
855 return true;
856 }else{
857 return false;
858 }
859}
860
861//------------------------------------------------------------------------------
862
863double VertexFinderDA4D::beta0( double betamax,
864 vector<track_t> & tks,
865 vector<vertex_t> & y ) const {
866
867 double T0=0; // max Tc for beta=0
868 // estimate critical temperature from beta=0 (T=inf)
869 unsigned int nt=tks.size();
870
871 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
872
873 // vertex fit at T=inf
874 double sumwz=0.;
875 double sumwt=0.;
876 double sumw=0.;
877 for(unsigned int i=0; i<nt; i++){
878 double w = tks[i].pi/(tks[i].dz2 * tks[i].dt2);
879 sumwz += w*tks[i].z;
880 sumwt += w*tks[i].t;
881 sumw += w;
882 }
883 k->z = sumwz/sumw;
884 k->t = sumwt/sumw;
885
886 // estimate Tcrit, eventually do this in the same loop
887 double a=0, b=0;
888 for(unsigned int i=0; i<nt; i++){
889 double dx = tks[i].z-(k->z);
890 double dt = tks[i].t-(k->t);
891 double w = tks[i].pi/(tks[i].dz2 * tks[i].dt2);
892 a += w*(std::pow(dx,2.)/tks[i].dz2 + std::pow(dt,2.)/tks[i].dt2);
893 b += w;
894 }
895 double Tc= 2.*a/b; // the critical temperature of this vertex
896 if(Tc>T0) T0=Tc;
897 }// vertex loop (normally there should be only one vertex at beta=0)
898
899 if (T0>1./betamax){
900 return betamax/pow(fCoolingFactor, int(std::log(T0*betamax)/std::log(fCoolingFactor))-1 );
901 }else{
902 // ensure at least one annealing step
903 return betamax/fCoolingFactor;
904 }
905}
906
907//------------------------------------------------------------------------------
908
909bool VertexFinderDA4D::split( double beta,
910 vector<track_t> & tks,
911 vector<vertex_t> & y) const {
912 // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
913 // an update must have been made just before doing this (same beta, no merging)
914 // returns true if at least one cluster was split
915
916 constexpr double epsilon=1e-3; // split all single vertices by 10 um
917 bool split=false;
918
919 // avoid left-right biases by splitting highest Tc first
920
921 std::vector<std::pair<double, unsigned int> > critical;
922 for(unsigned int ik=0; ik<y.size(); ik++){
923 if (beta*y[ik].Tc > 1.){
924 critical.push_back( make_pair(y[ik].Tc, ik));
925 }
926 }
927 std::stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
928
929 for(unsigned int ic=0; ic<critical.size(); ic++){
930 unsigned int ik=critical[ic].second;
931 // estimate subcluster positions and weight
932 double p1=0, z1=0, t1=0, w1=0;
933 double p2=0, z2=0, t2=0, w2=0;
934 //double sumpi=0;
935 for(unsigned int i=0; i<tks.size(); i++){
936 if(tks[i].Z>0){
937 //sumpi+=tks[i].pi;
938 double p=y[ik].pk * exp(-beta*Eik(tks[i],y[ik])) / tks[i].Z*tks[i].pi;
939 double w=p/(tks[i].dz2 * tks[i].dt2);
940 if(tks[i].z < y[ik].z){
941 p1+=p; z1+=w*tks[i].z; t1+=w*tks[i].t; w1+=w;
942 }else{
943 p2+=p; z2+=w*tks[i].z; t2+=w*tks[i].t; w2+=w;
944 }
945 }
946 }
947 if(w1>0){ z1=z1/w1; t1=t1/w1;} else{ z1=y[ik].z-epsilon; t1=y[ik].t-epsilon; }
948 if(w2>0){ z2=z2/w2; t2=t2/w2;} else{ z2=y[ik].z+epsilon; t2=y[ik].t+epsilon;}
949
950 // reduce split size if there is not enough room
951 if( ( ik > 0 ) && ( y[ik-1].z>=z1 ) ){ z1=0.5*(y[ik].z+y[ik-1].z); t1=0.5*(y[ik].t+y[ik-1].t); }
952 if( ( ik+1 < y.size()) && ( y[ik+1].z<=z2 ) ){ z2=0.5*(y[ik].z+y[ik+1].z); t2=0.5*(y[ik].t+y[ik+1].t); }
953
954 // split if the new subclusters are significantly separated
955 if( (z2-z1)>epsilon || std::abs(t2-t1) > epsilon){
956 split=true;
957 vertex_t vnew;
958 vnew.pk = p1*y[ik].pk/(p1+p2);
959 y[ik].pk= p2*y[ik].pk/(p1+p2);
960 vnew.z = z1;
961 vnew.t = t1;
962 y[ik].z = z2;
963 y[ik].t = t2;
964 y.insert(y.begin()+ik, vnew);
965
966 // adjust remaining pointers
967 for(unsigned int jc=ic; jc<critical.size(); jc++){
968 if (critical[jc].second>ik) {critical[jc].second++;}
969 }
970 }
971 }
972
973 // stable_sort(y.begin(), y.end(), clusterLessZ);
974 return split;
975}
976
977//------------------------------------------------------------------------------
978
979void VertexFinderDA4D::splitAll( vector<vertex_t> & y ) const {
980
981
982 constexpr double epsilon=1e-3; // split all single vertices by 10 um
983 constexpr double zsep=2*epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
984 constexpr double tsep=2*epsilon; // check t as well
985
986 vector<vertex_t> y1;
987
988 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
989 if ( ( (k==y.begin())|| (k-1)->z < k->z - zsep) && (((k+1)==y.end() )|| (k+1)->z > k->z + zsep)) {
990 // isolated prototype, split
991 vertex_t vnew;
992 vnew.z = k->z - epsilon;
993 vnew.t = k->t - epsilon;
994 (*k).z = k->z + epsilon;
995 (*k).t = k->t + epsilon;
996 vnew.pk= 0.5* (*k).pk;
997 (*k).pk= 0.5* (*k).pk;
998 y1.push_back(vnew);
999 y1.push_back(*k);
1000
1001 }else if( y1.empty() || (y1.back().z < k->z -zsep) || (y1.back().t < k->t - tsep) ){
1002 y1.push_back(*k);
1003 }else{
1004 y1.back().z -= epsilon;
1005 y1.back().t -= epsilon;
1006 k->z += epsilon;
1007 k->t += epsilon;
1008 y1.push_back(*k);
1009 }
1010 }// vertex loop
1011
1012 y=y1;
1013}
1014
1015
1016
Note: See TracBrowser for help on using the repository browser.