Changeset 4154bbd in git
- Timestamp:
- Sep 1, 2016, 12:56:40 PM (8 years ago)
- Branches:
- ImprovedOutputFile, Timing, dual_readout, llp, master
- Children:
- 61569e0
- Parents:
- 95b4e9f
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
Makefile
r95b4e9f r4154bbd 1669 1669 1670 1670 modules/VertexFinderDA4D.h: \ 1671 classes/DelphesModule.h \ 1672 classes/DelphesClasses.h 1671 classes/DelphesModule.h 1673 1672 @touch $@ 1674 1673 … … 1942 1941 1943 1942 modules/VertexSorter.h: \ 1944 classes/DelphesModule.h \ 1945 classes/DelphesClasses.h 1943 classes/DelphesModule.h 1946 1944 @touch $@ 1947 1945 … … 1951 1949 1952 1950 modules/VertexFinder.h: \ 1953 classes/DelphesModule.h \ 1954 classes/DelphesClasses.h 1951 classes/DelphesModule.h 1955 1952 @touch $@ 1956 1953 -
modules/VertexFinderDA4D.cc
r95b4e9f r4154bbd 42 42 static const Double_t c_light = 2.99792458e+8 * m/s; 43 43 44 45 namespace { 46 bool recTrackLessZ1(const VertexFinderDA4D::track_t & tk1, 47 const VertexFinderDA4D::track_t & tk2) 48 { 49 return tk1.z < tk2.z; 50 } 44 struct track_t 45 { 46 double z; // z-coordinate at point of closest approach to the beamline 47 double t; // t-coordinate at point of closest approach to the beamline 48 double dz2; // square of the error of z(pca) 49 double dtz; // covariance of z-t 50 double dt2; // square of the error of t(pca) 51 Candidate *tt; // a pointer to the Candidate Track 52 double Z; // Z[i] for DA clustering 53 double pi; // track weight 54 double pt; 55 double eta; 56 double phi; 57 }; 58 59 struct vertex_t 60 { 61 double z; 62 double t; 63 double pk; // vertex weight for "constrained" clustering 64 // --- temporary numbers, used during update 65 double ei; 66 double sw; 67 double swz; 68 double swt; 69 double se; 70 // ---for Tc 71 double swE; 72 double Tc; 73 }; 74 75 static bool split(double beta, std::vector<track_t> &tks, std::vector<vertex_t> &y); 76 static double update1(double beta, std::vector<track_t> &tks, std::vector<vertex_t> &y); 77 static double update2(double beta, std::vector<track_t> &tks, std::vector<vertex_t> &y, double &rho0, const double dzCutOff); 78 static void dump(const double beta, const std::vector<vertex_t> & y, const std::vector<track_t> & tks); 79 static bool merge(std::vector<vertex_t> &); 80 static bool merge(std::vector<vertex_t> &, double &); 81 static bool purge(std::vector<vertex_t> &, std::vector<track_t> & , double &, const double, const double); 82 static void splitAll(std::vector<vertex_t> &y); 83 static double beta0(const double betamax, std::vector<track_t> &tks, std::vector<vertex_t> &y, const double coolingFactor); 84 static double Eik(const track_t &t, const vertex_t &k); 85 86 static bool recTrackLessZ1(const track_t & tk1, const track_t & tk2) 87 { 88 return tk1.z < tk2.z; 51 89 } 52 90 … … 214 252 215 253 if (fVerbose){ 216 254 std::cout << "x,y,z"; 217 255 std::cout << ",t"; 218 256 std::cout << "=" << candidate->Position.X()/10.0 <<" " << candidate->Position.Y()/10.0 << " " << candidate->Position.Z()/10.0; … … 242 280 //------------------------------------------------------------------------------ 243 281 244 245 void VertexFinderDA4D::clusterize(const TObjArray & tracks, TObjArray & clusters) 282 void VertexFinderDA4D::clusterize(const TObjArray &tracks, TObjArray &clusters) 246 283 { 247 284 if(fVerbose) { … … 294 331 } 295 332 296 297 333 //------------------------------------------------------------------------------ 298 334 … … 303 339 vector< Candidate* > clusters; 304 340 305 vector<track_t> tks = fill(); 341 vector<track_t> tks; 342 track_t tr; 343 Double_t z, dz, t, l, dt, d0, d0error; 344 345 // loop over input tracks 346 fItInputArray->Reset(); 347 while((candidate = static_cast<Candidate*>(fItInputArray->Next()))) 348 { 349 //TBC everything in cm 350 z = candidate->DZ/10; 351 tr.z = z; 352 dz = candidate->ErrorDZ/10; 353 tr.dz2 = dz*dz // track error 354 //TBC: beamspot size induced error, take 0 for now. 355 // + (std::pow(beamspot.BeamWidthX()*cos(phi),2.)+std::pow(beamspot.BeamWidthY()*sin(phi),2.))/std::pow(tantheta,2.) // beam-width induced 356 + fVertexSpaceSize*fVertexSpaceSize; // intrinsic vertex size, safer for outliers and short lived decays 357 358 // TBC: the time is in ns for now TBC 359 //t = candidate->Position.T()/c_light; 360 t = candidate->InitialPosition.T()/c_light; 361 l = candidate->L/c_light; 362 double pt = candidate->Momentum.Pt(); 363 double eta = candidate->Momentum.Eta(); 364 double phi = candidate->Momentum.Phi(); 365 366 tr.pt = pt; 367 tr.eta = eta; 368 tr.phi = phi; 369 tr.t = t; // 370 tr.dtz = 0.; 371 dt = candidate->ErrorT/c_light; 372 tr.dt2 = dt*dt + fVertexTimeSize*fVertexTimeSize; // the ~injected~ timing error plus a small minimum vertex size in time 373 if(fD0CutOff>0) 374 { 375 376 d0 = TMath::Abs(candidate->D0)/10.0; 377 d0error = candidate->ErrorD0/10.0; 378 379 tr.pi=1./(1.+exp((d0*d0)/(d0error*d0error) - fD0CutOff*fD0CutOff)); // reduce weight for high ip tracks 380 381 } 382 else 383 { 384 tr.pi=1.; 385 } 386 tr.tt=&(*candidate); 387 tr.Z=1.; 388 389 // TBC now putting track selection here (> fPTMin) 390 if(tr.pi > 1e-3 && tr.pt > fMinPT) 391 { 392 tks.push_back(tr); 393 } 394 } 306 395 307 396 //print out input tracks … … 341 430 342 431 // estimate first critical temperature 343 double beta=beta0(fBetaMax, tks, y );344 niter=0; while((update (beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ }432 double beta=beta0(fBetaMax, tks, y, fCoolingFactor); 433 niter=0; while((update1(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ } 345 434 346 435 // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin) … … 348 437 349 438 if(fUseTc){ 350 update (beta, tks,y);351 while(merge(y,beta)){update (beta, tks,y);}439 update1(beta, tks,y); 440 while(merge(y,beta)){update1(beta, tks,y);} 352 441 split(beta, tks,y); 353 442 beta=beta/fCoolingFactor; … … 358 447 359 448 // make sure we are not too far from equilibrium before cooling further 360 niter=0; while((update (beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ }449 niter=0; while((update1(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){ } 361 450 362 451 } … … 364 453 if(fUseTc){ 365 454 // last round of splitting, make sure no critical clusters are left 366 update (beta, tks,y);367 while(merge(y,beta)){update (beta, tks,y);}455 update1(beta, tks,y); 456 while(merge(y,beta)){update1(beta, tks,y);} 368 457 unsigned int ntry=0; 369 458 while( split(beta, tks,y) && (ntry++<10) ){ 370 459 niter=0; 371 while((update (beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){}460 while((update1(beta, tks,y)>1.e-6) && (niter++ < fMaxIterations)){} 372 461 merge(y,beta); 373 update (beta, tks,y);462 update1(beta, tks,y); 374 463 } 375 464 }else{ 376 465 // merge collapsed clusters 377 while(merge(y,beta)){update (beta, tks,y);}466 while(merge(y,beta)){update1(beta, tks,y);} 378 467 if(fVerbose ){ cout << "dump after 1st merging " << endl; dump(beta,y,tks);} 379 468 } … … 381 470 // switch on outlier rejection 382 471 rho0=1./nt; for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; } // democratic 383 niter=0; while((update (beta, tks,y,rho0) > 1.e-8) && (niter++ < fMaxIterations)){ }472 niter=0; while((update2(beta, tks,y,rho0, fDzCutOff) > 1.e-8) && (niter++ < fMaxIterations)){ } 384 473 if(fVerbose ){ cout << "rho0=" << rho0 << " niter=" << niter << endl; dump(beta,y,tks);} 385 474 … … 392 481 // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices 393 482 while(beta<=fBetaStop){ 394 while(purge(y,tks,rho0, beta )){395 niter=0; while((update (beta, tks, y, rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }483 while(purge(y,tks,rho0, beta, fDzCutOff)){ 484 niter=0; while((update2(beta, tks, y, rho0, fDzCutOff) > 1.e-6) && (niter++ < fMaxIterations)){ } 396 485 } 397 486 beta/=fCoolingFactor; 398 niter=0; while((update (beta, tks, y, rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }487 niter=0; while((update2(beta, tks, y, rho0, fDzCutOff) > 1.e-6) && (niter++ < fMaxIterations)){ } 399 488 } 400 489 … … 402 491 // // new, one last round of cleaning at T=Tstop 403 492 // while(purge(y,tks,rho0, beta)){ 404 // niter=0; while((update (beta, tks,y,rho0) > 1.e-6) && (niter++ < fMaxIterations)){ }493 // niter=0; while((update2(beta, tks,y,rho0, fDzCutOff) > 1.e-6) && (niter++ < fMaxIterations)){ } 405 494 // } 406 495 … … 440 529 const double invdt = 1.0/std::sqrt(tks[i].dt2); 441 530 if(tks[i].Z>0){ 442 443 531 double p = k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z; 532 if( (tks[i].pi>0) && ( p > 0.5 ) ){ 444 533 //std::cout << "pushing back " << i << ' ' << tks[i].tt << std::endl; 445 534 //vertexTracks.push_back(*(tks[i].tt)); tks[i].Z=0; … … 482 571 //------------------------------------------------------------------------------ 483 572 484 vector<VertexFinderDA4D::track_t> 485 VertexFinderDA4D::fill()const{ 486 487 Candidate *candidate; 488 track_t tr; 489 Double_t z, dz, t, l, dt, d0, d0error; 490 491 // prepare track data for clustering 492 vector< track_t > tks; 493 494 // loop over input tracks 495 fItInputArray->Reset(); 496 while((candidate = static_cast<Candidate*>(fItInputArray->Next()))) 497 { 498 //TBC everything in cm 499 z = candidate->DZ/10; 500 tr.z = z; 501 dz = candidate->ErrorDZ/10; 502 tr.dz2 = dz*dz // track error 503 // TBC: beamspot size induced error, take 0 for now. 504 // + (std::pow(beamspot.BeamWidthX()*cos(phi),2.)+std::pow(beamspot.BeamWidthY()*sin(phi),2.))/std::pow(tantheta,2.) // beam-width induced 505 + fVertexSpaceSize*fVertexSpaceSize; // intrinsic vertex size, safer for outliers and short lived decays 506 507 // TBC: the time is in ns for now TBC 508 //t = candidate->Position.T()/c_light; 509 t = candidate->InitialPosition.T()/c_light; 510 l = candidate->L/c_light; 511 512 double pt = candidate->Momentum.Pt(); 513 double eta = candidate->Momentum.Eta(); 514 double phi = candidate->Momentum.Phi(); 515 516 tr.pt = pt; 517 tr.eta = eta; 518 tr.phi = phi; 519 tr.t = t; // 520 tr.dtz = 0.; 521 dt = candidate->ErrorT/c_light; 522 tr.dt2 = dt*dt + fVertexTimeSize*fVertexTimeSize; // the ~injected~ timing error plus a small minimum vertex size in time 523 if (fD0CutOff>0){ 524 525 d0 = TMath::Abs(candidate->D0)/10.0; 526 d0error = candidate->ErrorD0/10.0; 527 528 tr.pi=1./(1.+exp((d0*d0)/(d0error*d0error) - fD0CutOff*fD0CutOff)); // reduce weight for high ip tracks 529 530 } 531 else{ 532 tr.pi=1.; 533 } 534 tr.tt=&(*candidate); 535 tr.Z=1.; 536 537 // TBC now putting track selection here (> fPTMin) 538 if( tr.pi > 1e-3 && tr.pt > fMinPT) { 539 tks.push_back(tr); 540 } 541 542 } 543 544 return tks; 545 } 546 547 548 549 550 //------------------------------------------------------------------------------ 551 552 double VertexFinderDA4D::Eik(const track_t & t, const vertex_t &k ) const { 573 static double Eik(const track_t & t, const vertex_t &k) 574 { 553 575 return std::pow(t.z-k.z,2.)/t.dz2 + std::pow(t.t - k.t,2.)/t.dt2; 554 576 } 555 577 556 //------------------------------------------------------------------------------ --557 558 void VertexFinderDA4D::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0)const{ 559 578 //------------------------------------------------------------------------------ 579 580 static void dump(const double beta, const vector<vertex_t> &y, const vector<track_t> &tks0) 581 { 560 582 // copy and sort for nicer printout 561 583 vector<track_t> tks; … … 564 586 565 587 cout << "-----DAClusterizerInZT::dump ----" << endl; 566 cout << " beta=" << beta << " betamax= " << fBetaMax <<endl;588 cout << " beta=" << beta << endl; 567 589 cout << " z= "; 568 590 cout.precision(4); … … 587 609 cout << endl; 588 610 589 if(fVerbose){ 590 double E=0, F=0; 591 cout << endl; 592 cout << "---- z +/- dz t +/- dt ip +/-dip pt phi eta weights ----" << endl; 593 cout.precision(4); 594 for(unsigned int i=0; i<tks.size(); i++){ 595 if (tks[i].Z>0){ F-=log(tks[i].Z)/beta;} 596 double tz= tks[i].z; 597 double tt= tks[i].t; 598 //cout << setw (3)<< i << ")" << setw (8) << fixed << setprecision(4)<< tz << " +/-" << setw (6)<< sqrt(tks[i].dz2) 599 // << setw(8) << fixed << setprecision(4) << tt << " +/-" << setw(6) << std::sqrt(tks[i].dt2) ; 600 601 double sump=0.; 602 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){ 603 if((tks[i].pi>0)&&(tks[i].Z>0)){ 604 //double p=pik(beta,tks[i],*k); 605 double p=k->pk * std::exp(-beta*Eik(tks[i],*k)) / tks[i].Z; 606 if( p > 0.0001){ 607 //cout << setw (8) << setprecision(3) << p; 608 }else{ 609 cout << " . "; 610 } 611 E+=p*Eik(tks[i],*k); 612 sump+=p; 613 }else{ 614 cout << " "; 615 } 611 double E=0, F=0; 612 cout << endl; 613 cout << "---- z +/- dz t +/- dt ip +/-dip pt phi eta weights ----" << endl; 614 cout.precision(4); 615 for(unsigned int i=0; i<tks.size(); i++){ 616 if (tks[i].Z>0){ F-=log(tks[i].Z)/beta;} 617 double tz= tks[i].z; 618 double tt= tks[i].t; 619 //cout << setw (3)<< i << ")" << setw (8) << fixed << setprecision(4)<< tz << " +/-" << setw (6)<< sqrt(tks[i].dz2) 620 // << setw(8) << fixed << setprecision(4) << tt << " +/-" << setw(6) << std::sqrt(tks[i].dt2) ; 621 622 double sump=0.; 623 for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){ 624 if((tks[i].pi>0)&&(tks[i].Z>0)){ 625 //double p=pik(beta,tks[i],*k); 626 double p=k->pk * std::exp(-beta*Eik(tks[i],*k)) / tks[i].Z; 627 if( p > 0.0001){ 628 //cout << setw (8) << setprecision(3) << p; 629 }else{ 630 cout << " . "; 631 } 632 E+=p*Eik(tks[i],*k); 633 sump+=p; 634 }else{ 635 cout << " "; 636 } 616 637 } 617 638 cout << endl; 618 639 } 619 640 cout << endl << "T=" << 1/beta << " E=" << E << " n="<< y.size() << " F= " << F << endl << "----------" << endl; 620 } 621 } 622 623 624 625 626 627 //------------------------------------------------------------------------------ 628 629 630 double VertexFinderDA4D::update( double beta, 631 vector<track_t> & tks, 632 vector<vertex_t> & y ) const { 641 } 642 643 //------------------------------------------------------------------------------ 644 645 static double update1(double beta, vector<track_t> &tks, vector<vertex_t> &y) 646 { 633 647 //update weights and vertex positions 634 648 // mass constrained annealing without noise … … 661 675 // accumulate weighted z and weights for vertex update 662 676 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){ 663 664 665 666 677 k->se += tks[i].pi* k->ei / Zi; 678 const double w = k->pk * tks[i].pi* k->ei / ( Zi * ( tks[i].dz2 * tks[i].dt2 ) ); 679 k->sw += w; 680 k->swz += w * tks[i].z; 667 681 k->swt += w * tks[i].t; 668 682 k->swE += w * Eik(tks[i],*k); 669 683 } 670 684 }else{ … … 687 701 k->Tc = 2.*k->swE/k->sw; 688 702 }else{ 689 if(fVerbose){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}703 // cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl 690 704 k->Tc=-1; 691 705 } … … 700 714 //------------------------------------------------------------------------------ 701 715 702 double VertexFinderDA4D::update( double beta, 703 vector<track_t> & tks, 704 vector<vertex_t> & y, 705 double & rho0 ) const { 716 static double update2(double beta, vector<track_t> &tks, vector<vertex_t> &y, double &rho0, double dzCutOff) 717 { 706 718 // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise 707 719 // returns the squared sum of changes of vertex positions … … 720 732 721 733 // update pik and Zi and Ti 722 double Zi = rho0*std::exp(-beta*( fDzCutOff*fDzCutOff));// cut-off (eventually add finite size in time)734 double Zi = rho0*std::exp(-beta*(dzCutOff*dzCutOff));// cut-off (eventually add finite size in time) 723 735 //double Ti = 0.; // dt0*std::exp(-beta*fDtCutOff); 724 736 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ … … 732 744 // accumulate weighted z and weights for vertex update 733 745 for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 734 735 736 737 746 k->se += tks[i].pi* k->ei / Zi; 747 double w = k->pk * tks[i].pi * k->ei /( Zi * ( tks[i].dz2 * tks[i].dt2 ) ); 748 k->sw += w; 749 k->swz += w * tks[i].z; 738 750 k->swt += w * tks[i].t; 739 751 k->swE += w * Eik(tks[i],*k); 740 752 } 741 753 } … … 754 766 k->Tc = 2*k->swE/k->sw; 755 767 }else{ 756 if(fVerbose){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}768 // cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl; 757 769 k->Tc = 0; 758 770 } … … 766 778 //------------------------------------------------------------------------------ 767 779 768 bool VertexFinderDA4D::merge(vector<vertex_t> & y)const{ 780 static bool merge(vector<vertex_t> &y) 781 { 769 782 // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise 770 783 … … 794 807 //------------------------------------------------------------------------------ 795 808 796 bool VertexFinderDA4D::merge(vector<vertex_t> & y, double & beta)const{ 809 static bool merge(vector<vertex_t> &y, double &beta) 810 { 797 811 // merge clusters that collapsed or never separated, 798 812 // only merge if the estimated critical temperature of the merged vertex is below the current temperature … … 809 823 810 824 if(Tc*beta<1){ 811 812 825 if(rho>0){ 826 k->z = ( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho; 813 827 k->t = ( k->pk * k->t + (k+1)->t * (k+1)->pk)/rho; 814 815 828 }else{ 829 k->z = 0.5*(k->z + (k+1)->z); 816 830 k->t = 0.5*(k->t + (k+1)->t); 817 818 819 820 821 822 823 831 } 832 k->pk = rho; 833 k->sw += (k+1)->sw; 834 k->swE = swE; 835 k->Tc = Tc; 836 y.erase(k+1); 837 return true; 824 838 } 825 839 } … … 831 845 //------------------------------------------------------------------------------ 832 846 833 bool VertexFinderDA4D::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{ 847 static bool purge(vector<vertex_t> &y, vector<track_t> &tks, double & rho0, const double beta, const double dzCutOff) 848 { 834 849 // eliminate clusters with only one significant/unique track 835 850 if(y.size()<2) return false; … … 841 856 int nUnique=0; 842 857 double sump=0; 843 double pmax=k->pk/(k->pk+rho0*exp(-beta* fDzCutOff*fDzCutOff));858 double pmax=k->pk/(k->pk+rho0*exp(-beta*dzCutOff*dzCutOff)); 844 859 for(unsigned int i=0; i<nt; i++){ 845 860 if(tks[i].Z > 0){ 846 847 848 861 double p = k->pk * std::exp(-beta*Eik(tks[i],*k)) / tks[i].Z ; 862 sump+=p; 863 if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; } 849 864 } 850 865 } … … 857 872 858 873 if(k0!=y.end()){ 859 if(fVerbose){cout << "eliminating prototype at " << k0->z << "," << k0->t 860 << " with sump=" << sumpmin << endl;} 874 //cout << "eliminating prototype at " << k0->z << "," << k0->t << " with sump=" << sumpmin << endl; 861 875 //rho0+=k0->pk; 862 876 y.erase(k0); … … 869 883 //------------------------------------------------------------------------------ 870 884 871 double VertexFinderDA4D::beta0( double betamax, 872 vector<track_t> & tks, 873 vector<vertex_t> & y ) const { 885 static double beta0(double betamax, vector<track_t> &tks, vector<vertex_t> &y, const double coolingFactor) 886 { 874 887 875 888 double T0=0; // max Tc for beta=0 … … 906 919 907 920 if (T0>1./betamax){ 908 return betamax/pow( fCoolingFactor, int(std::log(T0*betamax)/std::log(fCoolingFactor))-1 );921 return betamax/pow(coolingFactor, int(std::log(T0*betamax)/std::log(coolingFactor))-1 ); 909 922 }else{ 910 923 // ensure at least one annealing step 911 return betamax/fCoolingFactor; 912 } 913 } 914 915 //------------------------------------------------------------------------------ 916 917 bool VertexFinderDA4D::split( double beta, 918 vector<track_t> & tks, 919 vector<vertex_t> & y) const { 924 return betamax/coolingFactor; 925 } 926 } 927 928 //------------------------------------------------------------------------------ 929 930 static bool split(double beta, vector<track_t> &tks, vector<vertex_t> &y) 931 { 920 932 // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1) 921 933 // an update must have been made just before doing this (same beta, no merging) 922 934 // returns true if at least one cluster was split 923 935 924 const expr double epsilon=1e-3;// split all single vertices by 10 um925 bool split =false;936 const double epsilon = 1e-3; // split all single vertices by 10 um 937 bool split = false; 926 938 927 939 // avoid left-right biases by splitting highest Tc first … … 943 955 for(unsigned int i=0; i<tks.size(); i++){ 944 956 if(tks[i].Z>0){ 945 946 947 948 949 950 951 952 957 //sumpi+=tks[i].pi; 958 double p=y[ik].pk * exp(-beta*Eik(tks[i],y[ik])) / tks[i].Z*tks[i].pi; 959 double w=p/(tks[i].dz2 * tks[i].dt2); 960 if(tks[i].z < y[ik].z){ 961 p1+=p; z1+=w*tks[i].z; t1+=w*tks[i].t; w1+=w; 962 }else{ 963 p2+=p; z2+=w*tks[i].z; t2+=w*tks[i].t; w2+=w; 964 } 953 965 } 954 966 } … … 974 986 // adjust remaining pointers 975 987 for(unsigned int jc=ic; jc<critical.size(); jc++){ 976 988 if (critical[jc].second>ik) {critical[jc].second++;} 977 989 } 978 990 } … … 985 997 //------------------------------------------------------------------------------ 986 998 987 void VertexFinderDA4D::splitAll( vector<vertex_t> & y ) const { 988 989 990 constexpr double epsilon=1e-3; // split all single vertices by 10 um 991 constexpr double zsep=2*epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed) 992 constexpr double tsep=2*epsilon; // check t as well 999 void splitAll(vector<vertex_t> &y) 1000 { 1001 1002 1003 const double epsilon=1e-3; // split all single vertices by 10 um 1004 const double zsep=2*epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed) 1005 const double tsep=2*epsilon; // check t as well 993 1006 994 1007 vector<vertex_t> y1; -
modules/VertexFinderDA4D.h
r95b4e9f r4154bbd 30 30 void Finish(); 31 31 32 struct track_t 33 { 34 double z; // z-coordinate at point of closest approach to the beamline 35 double t; // t-coordinate at point of closest approach to the beamline 36 double dz2; // square of the error of z(pca) 37 double dtz; // covariance of z-t 38 double dt2; // square of the error of t(pca) 39 Candidate* tt; // a pointer to the Candidate Track 40 double Z; // Z[i] for DA clustering 41 double pi; // track weight 42 double pt; 43 double eta; 44 double phi; 45 }; 46 47 48 struct vertex_t 49 { 50 double z; 51 double t; 52 double pk; // vertex weight for "constrained" clustering 53 // --- temporary numbers, used during update 54 double ei; 55 double sw; 56 double swz; 57 double swt; 58 double se; 59 // ---for Tc 60 double swE; 61 double Tc; 62 }; 63 64 void clusterize(const TObjArray & tracks, TObjArray & clusters); 65 32 void clusterize(const TObjArray &tracks, TObjArray &clusters); 66 33 std::vector< Candidate* > vertices(); 67 68 std::vector<track_t> fill() const;69 70 bool split(double beta,71 std::vector<track_t> & tks,72 std::vector<vertex_t> & y) const;73 74 double update(double beta,75 std::vector<track_t> & tks,76 std::vector<vertex_t> & y) const;77 78 double update(double beta,79 std::vector<track_t> & tks,80 std::vector<vertex_t> & y,81 double &)const;82 83 void dump(const double beta, const std::vector<vertex_t> & y, const std::vector<track_t> & tks) const;84 bool merge(std::vector<vertex_t> &) const;85 bool merge(std::vector<vertex_t> &, double &) const;86 bool purge(std::vector<vertex_t> &, std::vector<track_t> & , double &, const double) const;87 88 void splitAll(std::vector<vertex_t> &y) const;89 90 double beta0(const double betamax,91 std::vector<track_t> &tks,92 std::vector<vertex_t> &y)const;93 94 double Eik(const track_t &t, const vertex_t &k)const;95 34 96 35 private: -
modules/VertexSorter.cc
r95b4e9f r4154bbd 115 115 Candidate *candidate, *jetCandidate, *beamSpotCandidate; 116 116 map<Int_t, UInt_t> clusterIDToIndex; 117 map<Int_t, UInt_t>::const_iterator itClusterIDToIndex; 117 118 map<Int_t, Double_t> clusterIDToSumPT2; 119 map<Int_t, Double_t>::const_iterator itClusterIDToSumPT2; 118 120 vector<pair<Int_t, Double_t> > sortedClusterIDs; 121 vector<pair<Int_t, Double_t> >::const_iterator itSortedClusterIDs; 119 122 120 123 for (Int_t iCluster = 0; iCluster < fInputArray->GetEntries (); iCluster++) … … 161 164 } 162 165 163 for ( const auto &clusterID : clusterIDToSumPT2)164 sortedClusterIDs.push_back (make_pair ( clusterID.first, clusterID.second));166 for (itClusterIDToSumPT2 = clusterIDToSumPT2.begin(); itClusterIDToSumPT2 != clusterIDToSumPT2.end(); ++itClusterIDToSumPT2) 167 sortedClusterIDs.push_back (make_pair (itClusterIDToSumPT2->first, itClusterIDToSumPT2->second)); 165 168 sort (sortedClusterIDs.begin (), sortedClusterIDs.end (), secondDescending); 166 169 } … … 193 196 if (candidate->IsPU) 194 197 continue; 195 for ( const auto &clusterID : clusterIDToIndex)198 for (itClusterIDToIndex = clusterIDToIndex.begin(); itClusterIDToIndex != clusterIDToIndex.end(); ++itClusterIDToIndex) 196 199 { 197 if (candidate->ClusterIndex != clusterID.first)200 if (candidate->ClusterIndex != itClusterIDToIndex->first) 198 201 continue; 199 clusterIDToSumPT2.at ( clusterID.first) += candidate->Momentum.Pt () * candidate->Momentum.Pt ();202 clusterIDToSumPT2.at (itClusterIDToIndex->first) += candidate->Momentum.Pt () * candidate->Momentum.Pt (); 200 203 } 201 204 } 202 205 203 for ( const auto &clusterID : clusterIDToSumPT2)204 sortedClusterIDs.push_back (make_pair ( clusterID.first, clusterID.second));206 for (itClusterIDToSumPT2 = clusterIDToSumPT2.begin(); itClusterIDToSumPT2 != clusterIDToSumPT2.end(); ++itClusterIDToSumPT2) 207 sortedClusterIDs.push_back (make_pair (itClusterIDToSumPT2->first, itClusterIDToSumPT2->second)); 205 208 sort (sortedClusterIDs.begin (), sortedClusterIDs.end (), secondDescending); 206 209 } … … 214 217 throw 0; 215 218 } 216 217 for (const auto &clusterID : sortedClusterIDs) 218 { 219 Candidate *cluster = (Candidate *) fInputArray->At (clusterIDToIndex.at (clusterID.first)); 219 for (itSortedClusterIDs = sortedClusterIDs.begin(); itSortedClusterIDs != sortedClusterIDs.end(); ++itSortedClusterIDs) 220 { 221 Candidate *cluster = (Candidate *) fInputArray->At (clusterIDToIndex.at (itSortedClusterIDs->first)); 220 222 if (fMethod == "BTV") 221 cluster->BTVSumPT2 = clusterID.second;223 cluster->BTVSumPT2 = itSortedClusterIDs->second; 222 224 else if (fMethod == "GenClosest") 223 cluster->GenDeltaZ = clusterID.second;225 cluster->GenDeltaZ = itSortedClusterIDs->second; 224 226 else if (fMethod == "GenBest") 225 cluster->GenSumPT2 = clusterID.second;227 cluster->GenSumPT2 = itSortedClusterIDs->second; 226 228 fOutputArray->Add (cluster); 227 229 }
Note:
See TracChangeset
for help on using the changeset viewer.