diff --git a/camb/Makefile b/camb/Makefile index 3a42267..8c94044 100644 --- a/camb/Makefile +++ b/camb/Makefile @@ -1,85 +1,136 @@ -#CAMB Makefile +# >>> DESIGNED FOR GMAKE <<< +#CAMB System unified Makefile + +ext=$(shell uname | cut -c1-3) +ifeq ($(ext),IRI) +F90C=f90 +FFLAGS = -Ofast -mp -n32 -LANG:recursive=ON +LFLAGS= +endif -#Set FISHER=Y to compile bispectrum fisher matrix code -FISHER= +ifeq ($(ext),Lin) +F90C=gfortran +FFLAGS= -O -fopenmp +LFLAGS= +endif -#Edit for your compiler -#Note there are many old ifort versions, some of which behave oddly +ifeq ($(ext),OSF) +F90C=f90 +FFLAGS= -omp -O -arch host -math_library fast -tune host -fpe1 +LFLAGS= +endif +ifeq ($(ext),Sun) +F90C=f90 +FFLAGS= -O4 -xarch=native64 -openmp -ftrap=%none +LFLAGS= +endif -#Intel , -openmp toggles mutli-processor: -#note version 10.0 gives wrong result for lensed when compiled with -openmp [fixed in 10.1] -F90C = ifort -FFLAGS = -openmp -fast -W0 -WB -fpp2 -vec_report0 -## This is flag is passed to the Fortran compiler allowing it to link C++ if required (not usually): -F90CRLINK = -cxxlib -ifneq ($(FISHER),) -FFLAGS += -mkl +ifeq ($(ext),AIX) +F90C=xlf90_r +FFLAGS= -O4 -qsmp=omp -qmaxmem=-1 -qstrict -qfree=f90 -qsuffix=f=f90:cpp=F90 +LFLAGS= endif -#Gfortran compiler: -#The options here work in v4.5, delete from RHS in earlier versions (15% slower) -#if pre v4.3 add -D__GFORTRAN__ -#With v4.6+ try -Ofast -march=native -fopenmp -#On my machine v4.5 is about 20% slower than ifort -#F90C = gfortran -#FFLAGS = -O3 -fopenmp -ffast-math -march=native -funroll-loops - - -#Old Intel ifc, add -openmp for multi-processor (some have bugs): -#F90C = ifc -#FFLAGS = -O2 -Vaxlib -ip -W0 -WB -quiet -fpp2 -#some systems can can also add e.g. -tpp7 -xW - -#G95 compiler -#F90C = g95 -#FFLAGS = -O2 - -#SGI, -mp toggles multi-processor. Use -O2 if -Ofast gives problems. -#F90C = f90 -#FFLAGS = -Ofast -mp - -#Digital/Compaq fortran, -omp toggles multi-processor -#F90C = f90 -#FFLAGS = -omp -O4 -arch host -math_library fast -tune host -fpe1 - -#Absoft ProFortran, single processor: -#F90C = f95 -#FFLAGS = -O2 -cpu:athlon -s -lU77 -w -YEXT_NAMES="LCS" -YEXT_SFX="_" - -#NAGF95, single processor: -#F90C = f95 -#FFLAGS = -DNAGF95 -O3 - -#PGF90 -#F90C = pgf90 -#FFLAGS = -O2 -DESCAPEBACKSLASH -Mpreprocess - -#Sun V880 -#F90C = mpf90 -#FFLAGS = -O4 -openmp -ftrap=%none -dalign -DMPI - -#Sun parallel enterprise: -#F90C = f95 -#FFLAGS = -O2 -xarch=native64 -openmp -ftrap=%none -#try removing -openmp if get bus errors. -03, -04 etc are dodgy. - -#IBM XL Fortran, multi-processor (run gmake) -#F90C = xlf90_r -#FFLAGS = -DESCAPEBACKSLASH -DIBMXL -qsmp=omp -qsuffix=f=f90:cpp=F90 -O3 -qstrict -qarch=pwr3 -qtune=pwr3 - -#Settings for building camb_fits -#Location of FITSIO and name of library -FITSDIR ?= /home/cpac/cpac-tools/lib -FITSLIB = cfitsio -#Location of HEALPIX for building camb_fits -HEALPIXDIR ?= /home/cpac/cpac-tools/healpix - -ifneq ($(FISHER),) + +#Files containing evolution equations initial power spectrum module +EQUATIONS = equations +POWERSPECTRUM = power_tilt +REIONIZATION = reionization +RECOMBINATION = recfast +#RECOMBINATION = cosmorec + + +BISPECTRUM = SeparableBispectrum + +DENABLE_FISHER= + +#Module doing non-linear scaling +NONLINEAR ?= halofit + +#Driver program +DRIVER ?= inidriver.F90 +#DRIVER = sigma8.f90 +#DRIVER = tester.f90 + + +CAMBBIN = camb.$(ext) +CAMBLIB = libcamb_$(RECOMBINATION).a + +ifneq ($(DENABLE_FISHER),) FFLAGS += -DFISHER +LFLAGS += -llapack -lblas EXTCAMBFILES = Matrix_utils.o else EXTCAMBFILES = endif -include ./Makefile_main +HEALPIXDIR= + +#Shouldn't need to change anything else... + +F90FLAGS = $(FFLAGS) +HEALPIXLD = -L$(HEALPIXDIR)/lib -lhealpix + +CAMBOBJ = constants.o utils.o $(EXTCAMBFILES) subroutines.o inifile.o $(POWERSPECTRUM).o $(RECOMBINATION).o $(REIONIZATION).o modules.o \ + bessels.o $(EQUATIONS).o $(NONLINEAR).o lensing.o $(BISPECTRUM).o \ + cmbmain.o camb.o + + +ifeq ($(RECOMBINATION),cosmorec) +COSMOREC_PATH =../CosmoRec/ +LFLAGS += -L$(COSMOREC_PATH) -lCosmoRec -lgsl -lgslcblas -lstdc++ +endif + + +ifeq ($(RECOMBINATION),hyrec) +HYREC_PATH = ../HyRec/ +LFLAGS += -L$(HYREC_PATH) -lhyrec -lstdc++ +endif + +default: $(CAMBBIN) +all: $(CAMBBIN) $(CAMBLIB) + + +subroutines.o: constants.o utils.o +$(POWERSPECTRUM).o: subroutines.o inifile.o +$(RECOMBINATION).o: subroutines.o inifile.o +$(REIONIZATION).o: constants.o inifile.o +modules.o: $(REIONIZATION).o $(POWERSPECTRUM).o $(RECOMBINATION).o +bessels.o: modules.o +$(EQUATIONS).o: bessels.o +$(NONLINEAR).o: modules.o +lensing.o: bessels.o +$(BISPECTRUM).o: lensing.o modules.o +cmbmain.o: lensing.o $(NONLINEAR).o $(EQUATIONS).o $(BISPECTRUM).o +camb.o: cmbmain.o +Matrix_utils.o: utils.o + + +$(CAMBBIN): $(CAMBOBJ) $(DRIVER) + $(F90C) $(F90FLAGS) $(CAMBOBJ) $(DRIVER) $(LFLAGS) -o $@ + +$(CAMBLIB): $(CAMBOBJ) + ar -r $@ $(CAMBOBJ) + +camb_fits: writefits.f90 $(CAMBOBJ) $(DRIVER) + $(F90C) $(F90FLAGS) -I$(HEALPIXDIR)/include $(CAMBOBJ) writefits.f90 $(DRIVER) $(HEALPIXLD) -DWRITE_FITS -o $@ + + +%.o: %.f90 + $(F90C) $(F90FLAGS) -c $*.f90 + +%.o: %.F90 + $(F90C) $(F90FLAGS) -c $*.F90 + +utils.o: + $(F90C) $(F90FLAGS) -c utils.F90 + +$(BISPECTRUM).o: + $(F90C) $(F90FLAGS) -c $(BISPECTRUM).F90 + +Matrix_utils.o: + $(F90C) $(F90FLAGS) -c Matrix_utils.F90 + +clean: + -rm -f *.o *.a *.d core *.mod *.$(ext) diff --git a/camb/reionization.f90 b/camb/reionization.f90 index 935e5db..f45290f 100644 --- a/camb/reionization.f90 +++ b/camb/reionization.f90 @@ -50,7 +50,7 @@ module Reionization end type ReionizationHistory - real(dl), parameter :: Reionization_maxz = 40._dl + real(dl), parameter :: Reionization_maxz = 500._dl real(dl), private, parameter :: Reionization_tol = 1d-5 real(dl), private, external :: dtauda, rombint,rombint2 diff --git a/multinest/LICENCE b/multinest/LICENCE new file mode 100644 index 0000000..4e35496 --- /dev/null +++ b/multinest/LICENCE @@ -0,0 +1,71 @@ +Copyright © 2010 Farhan Feroz & Mike Hobson + +Permission to include in application software or to make digital or +hard copies of part or all of this work is subject to the following +licensing agreement. + + +MultiNest License Agreement + +MultiNest (both binary and source) is copyrighted by Farhan Feroz and +Mike Hobson (hereafter, FF&MH) and ownership of all right, title and +interest in and to the Software remains with FF&MH. By using or +copying the Software, User agrees to abide by the terms of this +Agreement. + + +Non-commercial Use + +FF&MH grant to you (hereafter, User) a royalty-free, non-exclusive +right to execute, copy, modify and distribute both the binary and +source code solely for academic, research and other similar +non-commercial uses, subject to the following + +1. User acknowledges that the Software is still in the development +stage and that it is being supplied "as is," without any support +services from FF&MH. FF&MH do not make any representations or +warranties, express or implied, including, without limitation, any +representations or warranties of the merchantability or fitness for +any particular purpose, or that the application of the software, will +not infringe on any patents or other proprietary rights of others. + +2. FF&MH shall not be held liable for direct, indirect, incidental or +consequential damages arising from any claim by User or any third +party with respect to uses allowed under this Agreement, or from any +use of the Software. + +3. User agrees to fully indemnify and hold harmless FF&MH of the +original work from and against any and all claims, demands, suits, +losses, damages, costs and expenses arising out of the User's use of +the Software, including, without limitation, arising out of the User's +modification of the Software. + +4. User may modify the Software and distribute that modified work to +third parties provided that: (a) if posted separately, it clearly +acknowledges that it contains material copyrighted by FF&MH (b) no +charge is associated with such copies, (c) User agrees to notify FF&MH +of the distribution, and (d) User clearly notifies secondary users +that such modified work is not the original Software. + +5. User agrees that the authors of the original work and others may +enjoy a royalty-free, non-exclusive license to use, copy, modify and +redistribute these modifications to the Software made by the User and +distributed to third parties as a derivative work under this +agreement. + +6. This agreement will terminate immediately upon User's breach of, or +non-compliance with, any of its terms. User may be held liable for any +copyright infringement or the infringement of any other proprietary +rights in the Software that is caused or facilitated by the User's +failure to abide by the terms of this agreement. + + +Commercial Use + +Any User wishing to make a commercial use of the Software must contact +FF&MH at f.feroz@mrao.cam.ac.uk to arrange an appropriate license. +Commercial use includes (1) integrating or incorporating all or part +of the source code into a product for sale or license by, or on behalf +of, User to third parties, or (2) distribution of the binary or source +code to third parties for use with a commercial product sold or +licensed by, or on behalf of, User. diff --git a/multinest/README b/multinest/README new file mode 100644 index 0000000..7c5b337 --- /dev/null +++ b/multinest/README @@ -0,0 +1,351 @@ +MultiNest v 2.18 +Farhan Feroz, Mike Hobson +f.feroz@mrao.cam.ac.uk +arXiv:0704.3704 & arXiv:0809.3437 +Released Aug 2012 + +--------------------------------------------------------------------------- + +MultiNest Licence + +Users are required to accept to the licence agreement given in LICENCE file. + +--------------------------------------------------------------------------- + +Required Libraries: + +MultiNest requires lapack. To use MPI support, some MPI library must also be installed. + +--------------------------------------------------------------------------- + +MPI Support: + +The code is MPI compatible. In order to disable the MPI parallelization, remove -DMPI compilation flag. + +--------------------------------------------------------------------------- + +gfortran compiler: + +You might need to use the following flag while compiling MultiNest with gfortran compiler to remove the +restriction imposed by gfortran on line length. + +-ffree-line-length-none + +--------------------------------------------------------------------------- + +The subtoutine to begin MultiNest are as follows: + +subroutine nestRun(mmodal, ceff, nlive, tol, efr, ndims, nPar, nCdims, maxModes, updInt, Ztol, root, seed, +pWrap, feedback, resume, outfile, initMPI, logZero, maxiter, loglike, dumper, context) + +logical mmodal !do mode separation? +integer nlive !number of live points +logical ceff !run in constant efficiency mode +double precision tol !evidence tolerance factor +double precision efr !sampling efficiency +integer ndims !number of dimensions +integer nPar !total no. of parameters +integer nCdims !no. of parameters on which clustering should be performed (read below) +integer maxModes !maximum no. of modes (for memory allocation) +integer updInt !iterations after which the output files should be written +double precision Ztol !null log-evidence (read below) +character(LEN=100) root !root for MultiNest output files +integer seed !random no. generator seed, -ve value for seed from the sys clock +integer pWrap[ndims] !wraparound parameters? +logical feedback !need update on sampling progress? +logical resume !resume from a previous run? +logical outfile !write output files? +logical initMPI !initialize MPI routines?, relevant only if compiling with MPI + !set it to F if you want your main program to handle MPI initialization +double precision logZero !points with loglike < logZero will be ignored by MultiNest +integer maxiter !max no. of iterations, a non-positive value means infinity. MultiNest will terminate if either it + !has done max no. of iterations or convergence criterion (defined through tol) has been satisfied +loglike(Cube,ndims,nPar,lnew) !subroutine which gives lnew=loglike(Cube(ndims)) +dumper(nSamples,nlive,nPar,physLive,posterior,paramConstr,maxloglike,logZ,c) !subroutine called after every updInt*10 iterations with the posterior + !distribution, parameter constraints, max loglike & log evidence values +integer context !not required by MultiNest, any additional information user wants to pass + +--------------------------------------------------------------------------- + +likelihood routine: slikelihood(Cube,ndims,nPar,lnew,context) + +Cube(1:nPar) has nonphysical parameters. + +scale Cube(1:n_dim) & return the scaled parameters in Cube(1:n_dim) & additional parameters that you want to +be returned by MultiNest along with the actual parameters in Cube(n_dim+1:nPar) + +Return the log-likelihood in lnew. + +--------------------------------------------------------------------------- + +dumper routine: dumper(nSamples,nlive,nPar,physLive,posterior,paramConstr,maxloglike,logZ,logZerr,context) + +This routine is called after every updInt*10 iterations & at the end of the sampling allowing the posterior +distribution & parameter constraints to be passed on to the user in the memory. The argument are as follows: + +nSamples = total number of samples in posterior distribution +nlive = total number of live points +nPar = total number of parameters (free + derived) +physLive(nlive, nPar+1) = 2D array containing the last set of live points (physical parameters plus derived parameters) along with their loglikelihood values +posterior(nSamples, nPar+2) = posterior distribution containing nSamples points. Each sample has nPar parameters (physical + derived) along with the their loglike + value & posterior probability +paramConstr(1, 4*nPar): + paramConstr(1, 1) to paramConstr(1, nPar) = mean values of the parameters + paramConstr(1, nPar+1) to paramConstr(1, 2*nPar) = standard deviation of the parameters + paramConstr(1, nPar*2+1) to paramConstr(1, 3*nPar) = best-fit (maxlike) parameters + paramConstr(1, nPar*4+1) to paramConstr(1, 4*nPar) = MAP (maximum-a-posteriori) parameters +maxLogLike = maximum loglikelihood value +logZ = log evidence value +logZerr = error on log evidence value +context not required by MultiNest, any additional information user wants to pass + +The 2D arrays are Fortran arrays which are different to C/C++ arrays. In the example dumper routine provided with C & C++ +eggbox examples, the Fortran arrays are copied on to C/C++ arrays. + +--------------------------------------------------------------------------- + +Tranformation from hypercube to physical parameters: + +MultiNest native space is unit hyper-cube in which all the parameter are uniformly distributed in [0, 1]. User +is required to transform the hypercube parameters to physical parameters. This transformation is described in +Sec 5.1 of arXiv:0809.3437. The routines to tranform hypercube parameters to most commonly used priors are +provided in module priors (in file priors.f90). + +--------------------------------------------------------------------------- + +Checkpointing: + +MultiNest is able to checkpoint. It creates [root]resume.dat file & stores information in it after every +updInt iterations to checkpoint, where updInt is set by the user. If you don't want to resume your program from +the last run run then make sure that you either delete [root]resume.dat file or set the parameter resume to F +before starting the sampling. + +--------------------------------------------------------------------------- + +Periodic Boundary Conditions: + +In order to sample from parameters with periodic boundary conditions (or wraparound parameters), set pWrap[i], +where i is the index of the parameter to be wraparound, to a non-zero value. If pWrap[i] = 0, then the ith +parameter is not wraparound. + +--------------------------------------------------------------------------- + +Constant Efficiency Mode: + +If ceff is set to T, then the enlargement factor of the bounding ellipsoids are tuned so that the sampling +efficiency is as close to the target efficiency (set by efr) as possible. This does mean however, that the +evidence value may not be accurate. + +--------------------------------------------------------------------------- + +Sampling Parameters: + +The recommended paramter values to be used with MultiNest are described below. For detailed description please +refer to the paper arXiv:0809.3437 + +nPar: +Total no. of parameters, should be equal to ndims in most cases but if you need to store some additional +parameters with the actual parameters then you need to pass them through the likelihood routine. + + +efr: +defines the sampling efficiency. 0.8 and 0.3 are recommended for parameter estimation & evidence evalutaion +respectively. + + +tol: +A value of 0.5 should give good enough accuracy. + + +nCdims: +If mmodal is T, MultiNest will attempt to separate out the modes. Mode separation is done through a clustering +algorithm. Mode separation can be done on all the parameters (in which case nCdims should be set to ndims) & it +can also be done on a subset of parameters (in which case nCdims < ndims) which might be advantageous as +clustering is less accurate as the dimensionality increases. If nCdims < ndims then mode separation is done on +the first nCdims parameters. + + +Ztol: +If mmodal is T, MultiNest can find multiple modes & also specify which samples belong to which mode. It might be +desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a +particular value in which case Ztol should be set to that value. If there isn't any particularly interesting +Ztol value, then Ztol should be set to a very large negative number (e.g. -1.d90). + +--------------------------------------------------------------------------- + +Progress Monitoring: + +MultiNest produces [root]physlive.dat & [root]ev.dat files after every updInt iterations which can be used to +monitor the progress. The format & contents of these two files are as follows: + +[root]physlive.dat: +This file contains the current set of live points. It has nPar+2 columns. The first nPar columns are the ndim +parameter values along with the (nPar-ndim) additional parameters that are being passed by the likelihood +routine for MultiNest to save along with the ndim parameters. The nPar+1 column is the log-likelihood value & +the last column is the node no. (used for clustering). + +[root]ev.dat: +This file contains the set of rejected points. It has nPar+3 columns. The first nPar columns are the ndim +parameter values along with the (nPar-ndim) additional parameters that are being passed by the likelihood +routine for MultiNest to save along with the ndim parameters. The nPar+1 column is the log-likelihood value, +nPar+2 column is the log(prior mass) & the last column is the node no. (used for clustering). + +--------------------------------------------------------------------------- + +Posterior Files: + +These files are created after every updInt*10 iterations of the algorithm & at the end of sampling. + +MultiNest will produce five posterior sample files in the root, given by the user, as following + + +[root].txt +Compatable with getdist with 2+nPar columns. Columns have sample probability, -2*loglikehood, parameter values. +Sample probability is the sample prior mass multiplied by its likelihood & normalized by the evidence. + + +[root]post_separate.dat +This file is only created if mmodal is set to T. Posterior samples for modes with local log-evidence value +greater than Ztol, separated by 2 blank lines. Format is the same as [root].txt file. + + +[root]stats.dat +Contains the global log-evidence, its error & local log-evidence with error & parameter means & standard +deviations as well as the best fit & MAP parameters of each of the mode found with local log-evidence > Ztol. + + +[root]post_equal_weights.dat +Contains the equally weighted posterior samples. Columns have parameter values followed by loglike value. + + +[root]summary.txt +There is one line per mode with nPar*4+2 values in each line in this file. Each line has the following values +in its column mean parameter values, standard deviations of the parameters, bestfit (maxlike) parameter values, +MAP (maximum-a-posteriori) parameter values, local log evidence, maximum loglike value + +--------------------------------------------------------------------------- + +C/C++ Interface: + +Starting in MultiNest v2.18 there is a common C/C++ interface to MultiNest. The file is located in the +"includes" directory. Examples of how it may be used are found in the example_eggboxC and +example_eggboxC++ directories. + +You may need to check the symbol table for your platform (nm libmulitnest.a | grep nestrun) and edit the +multinest.h file to define NESTRUN. Please let us know of any modifications made so they may be included +in future releases. + + +--------------------------------------------------------------------------- + +Python Interface: + +Johannes Buchner has written an easy-to-use Python interface to MultiNest called PyMultiNest which +provides integration with existing scientific Python code (numpy, scipy). It allows you to write Prior & +likelihood functions in Python. It is available from: + +https://github.com/JohannesBuchner/PyMultiNest + + +--------------------------------------------------------------------------- + +R Interface: + +Johannes Buchner has written an R bridge for MultiNest called RMultiNest. It allows likelihood functions +written in R (http://www.r-project.org) to be used by MultiNest. It is available from: + +https://github.com/JohannesBuchner/RMultiNest + + +--------------------------------------------------------------------------- + +Matlab version of MultiNest: + +Matthew Pitkin and Joe Romano have created a simple Matlab version of MultiNest. It doesn't have all the +bells-and-whistles of the full implementation and just uses the basic Algorithm 1 from arXiv:0809.3437. It +is available for download the MultiNest website: + +http://ccpforge.cse.rl.ac.uk/gf/project/multinest/ + + +--------------------------------------------------------------------------- + +Visualization of MultiNest Output: + +[root].txt file created by MultiNest is compatable with the format required by getdist package which is +part of CosmoMC package. Refer to the following website in order to download or get more information about +getdist: +http://cosmologist.info/cosmomc/readme.html#Analysing + + +Johannes Buchner's PyMultiNest can also be used on exisiting MultiNest output to plot & visualize results. +https://github.com/JohannesBuchner/PyMultiNest + + +--------------------------------------------------------------------------- + +Toy Problems + +There are 8 toy programs included with MultiNest. + +example_obj_detect: The object detection problem discussed in arXiv:0704.3704. The positions, amplitudes & +widths of the Gaussian objects can be modified through params.f90 file. Sampling parameters are also set in +params.f90. + +example_gauss_shell: The Gaussian shells problem discussed in arXiv:0809.3437. The dimensionality, positions and +thickness of these shells can be modified through params.f90 file. Sampling parameters are also set in +params.f90. + +example_gaussian: The Gaussian shells problem discussed in arXiv:1001.0719. The dimensionality of the problem can +be modified through params.f90 file. Sampling parameters are also set in params.f90. + +example_eggboxC/example_eggboxC++: The C/C++ interface includes the egg box problem discussed in arXiv:0809.3437. +The toy problem and sampling parameters are set in eggbox.c & eggbox.cc files. + +example_ackley: The Ackley mimimization problem (see T. Bäck, Evolutionary Algorithms in Theory and Practice, +Oxford University Press, New York (1996).) + +example_himmelblau: The Himmelblau's minimization problem. (see http://en.wikipedia.org/wiki/Himmelblau's_function) + +example_rosenbrock: Rosenbrock minimization problem. (see http://en.wikipedia.org/wiki/Rosenbrock_function) + +example_gaussian: Multivariate Gaussian with uncorrelated paramters. + +--------------------------------------------------------------------------- + +Common Problems & FAQs: + + +1. MultiNest crashes after segmentation fault. + +Try increasing the stack size (ulimit -s unlimited on Linux) & resume your job. + + +2. Output files (.txt & post_equal_weights.dat) files have very few (of order tens) points. + +If tol is set to a reasonable value (tol <= 1) & the job hasn't finished then it is possible for these files to +have very few points in them. If even after the completion of job these files have very few points then increase the stack +size (ulimit -s unlimited on Linux) & resume the job again. + + +3. Not all modes are being reported in the stats file. + +stats file reports all the modes with local log-evidence value greater than Ztol. Set Ztol to a very large +negative number (e.g. -1.d90) in order for the stats file to report all the modes found. + + +4. Compilation fails with error something like 'can not find nestrun function'. + +Check the symbol table for your platform (nm libnest3.a | grep nestrun) & edit multinest.h file to define NESTRUN +appropriately. + + +5. When to use the constant efficiency mode? + +If the sampling efficiency with the standard MultiNest (when ceff = F) is very low & no. of free parameters is relatively +high (roughly greater than 30) then constant efficiency mode can be used to get higher sampling efficiency. Users should use +ask for around 5% or lower targer efficiency (efr <= 0.05) in constant efficiency mode & ensure that posteriors are stable +when a slightly lower value is used for target efficiency. Evidence values obtained with constant efficiency mode will not be +accurate. + +--------------------------------------------------------------------------- diff --git a/multinest/kmeans_clstr.f90 b/multinest/kmeans_clstr.f90 new file mode 100755 index 0000000..a732500 --- /dev/null +++ b/multinest/kmeans_clstr.f90 @@ -0,0 +1,1574 @@ +! soft k-means clustering with k=2 +! D.J.C. MacKay, Information Theory, Inference & Learning Algorithms, 2003, p.304 +! Aug 2006 + +module kmeans_clstr + use RandomNS + use utils1 + implicit none + + +contains + +!---------------------------------------------------------------------- + + !soft k-means corresponding to the model of axis aligned Gaussians (see + !MacKay, 2003) + subroutine kmeans(k,pt,npt,numdim,cluster,min_pt) + implicit none + + integer k,numdim,npt + double precision pt(numdim,npt) + double precision sigsq(k,numdim),means(k,numdim),wt(k),r(npt,k) + integer cluster(npt) + double precision sumr,totR(k),temp,covmat(numdim,numdim),mean(numdim) + integer i,j,indx(1),x,nochg,count,min_pt + logical clstrd + double precision urv + + if(k>npt/min_pt+1) k=npt/min_pt+1 + + clstrd=.false. + nochg=0 + count=0 + + do i=1,numdim + mean(i)=sum(pt(i,1:npt))/dble(npt) + end do + + call calc_covmat(npt,numdim,pt,mean,covmat) + + do i=1,k + wt(i)=ranmarns(0) + urv=ranmarns(0) + x=int(dble(npt)*urv)+1 + means(i,:)=pt(:,x) + end do + +! means(1,1)=0 +! means(1,2)=0 +! means(2,1)=1. +! means(2,2)=1. + + temp=huge(1.d0) + do j=1,numdim + if(covmat(j,j)npt/min_pt+1) k=npt/min_pt+1 + clstrd=.false. + nochg=0 + + do i=1,numdim + mean(i)=sum(pt(i,1:npt))/dble(npt) + end do + + call calc_covmat(npt,numdim,pt,mean,covmat) + + do i=1,k + wt(i)=ranmarns(0) + urv=ranmarns(0) + x=int(dble(npt)*urv)+1 + means(i,:)=pt(:,x) + end do + + temp=huge(1.d0) + do j=1,numdim + if(covmat(j,j)npt/min_pt+1) k=npt/min_pt+1 + + if(k==1) then + do i=1,numdim + means(1,i)=sum(pt(i,1:npt))/dble(npt) + enddo + cluster=1 + return + endif + + clstrd=.false. + nochg=0 + +! call ranmarns(urv) +! x=int(dble(npt)*urv)+1 +! means(1,:)=pt(:,x) +! temp=-1. +! do i=2,k +! lmean(:)=sum(means(1:i-1,:))/dble(i-1) +! do j=1,npt +! dist=sum((lmean(:)-pt(:,j))**2.) +! if(dist>temp) then +! temp=dist +! x=j +! end if +! end do +! means(i,:)=pt(:,x) +! end do + +! lmean(:)=means(1,:) +! do i=1,k/2+1 +! do +! flag=.false. +! call ranmarns(urv) +! x=int(dble(npt)*urv)+1 +! do j=1,i-1 +! if(x==scrap(j)) then +! flag=.true. +! exit +! end if +! end do +! if(.not.flag) exit +! end do +! scrap(i)=x +! means(i*2-1,1:numdim)=pt(1:numdim,x) +! if(i*2>k) exit +! means(i*2,1:numdim)=2.*lmean(1:numdim)-pt(1:numdim,x) +! end do + + !choose random points as starting positions + do i=1,k + do + flag=.false. + urv=ranmarns(0) + x=int(dble(npt)*urv)+1 + do j=1,i-1 + if(x==scrap(j)) then + flag=.true. + exit + endif + enddo + if(flag) then + cycle + else + scrap(i)=x + exit + endif + enddo + means(i,:)=pt(:,x) + enddo + + + old_cluster=0 + + do + do i=1,npt + temp=huge(1.d0) + do j=1,k + dist=sum((means(j,:)-pt(:,i))**2.) + if(distmin_pt) then + d1=sum((means(i,:)-pt(:,j))**2.) + i3=0 + do i2=i1,1,-1 + if(d1npt/min_pt+1) k=npt/min_pt+1 + distortion=0. + totR=0 + + !choose a random point to be the cluster center + cluster=1 + totR(1)=npt + do j=1,numdim + means(1,j)=sum(pt(j,:))/dble(npt) + end do + + if(k==1) return + + do m=2,k + clstrd=.false. + + !pick the cluster with the max distortion + indx=maxloc(distortion(1:m-1)) + + !pick a random point from this cluster as new cluster center + j=0 + urv=ranmarns(0) + x=int(dble(totR(indx(1)))*urv)+1 + do i=1,npt + if(cluster(i)==indx(1)) j=j+1 + if(j==x) exit + end do + means(cluster(i),:)=(means(cluster(i),:)*dble(totR(cluster(i)))-pt(:,i))/dble(totR(cluster(i))-1) + means(m,:)=pt(:,i) + totR(cluster(i))=totR(cluster(i))-1 + totR(m)=1 + cluster(i)=m + + do + clstrd=.true. + + !check whether each point is closest to its own cluster or to the newly formed + !one & assign accordingly + do i=1,npt + if(sum((pt(1:numdim,i)-means(cluster(i),1:numdim))**2.)> & + sum((pt(1:numdim,i)-means(m,1:numdim))**2.))then + !point moved so not finished yet + clstrd=.false. + + !update cluster data + totR(cluster(i))=totR(cluster(i))-1 + cluster(i)=m + totR(m)=totR(m)+1 + end if + end do + + !update means & distortion of each cluster + means=0. + distortion=0. + do i=1,npt + means(cluster(i),1:numdim)=means(cluster(i),1:numdim)+pt(1:numdim,i) + distortion(cluster(i))=distortion(cluster(i))+sum(pt(1:numdim,i)**2.) + end do + do j=1,numdim + means(1:m,j)=means(1:m,j)/dble(totR(1:m)) + distortion(1:m)=distortion(1:m)-dble(totR(1:m))*(means(1:m,j)**2.) + end do + + if(clstrd) exit + end do + end do + + + end subroutine kmeans4 + +!---------------------------------------------------------------------- + + !2-means with k=2 & clusters placed in their expected positions as the starting points + subroutine kmeans6(pt,npt,numdim,cluster) + implicit none + + integer numdim,npt + double precision pt(numdim,npt) + double precision means(2,numdim) + integer cluster(npt) + integer totR(2) + integer i,j,x + logical clstrd + double precision covmat(numdim,numdim),evec(numdim,numdim),eval(numdim) + + !calculate the mean of the data + do j=1,numdim + means(1,j)=sum(pt(j,1:npt))/dble(npt) + end do + + !do principal component analysis + call calc_covmat(npt,numdim,pt,means(1,:),covmat) + evec=covmat + call diagonalize(evec,eval,numdim,.false.) + + !place the cluster centers in their ex[ected locations + means(2,:)=means(1,:)+evec(:,numdim)*sqrt(2.*eval(numdim)/3.1416) + means(1,:)=means(1,:)-evec(:,numdim)*sqrt(2.*eval(numdim)/3.1416) + + cluster=1 + totR(1)=npt + totR(2)=0 + + do + clstrd=.true. + + !check whether each point is closest to its own cluster or to the newly formed + !one & assign accordingly + do i=1,npt + if(cluster(i)==1) then + x=2 + else + x=1 + end if + if(sum((pt(1:numdim,i)-means(cluster(i),1:numdim))**2.)> & + sum((pt(1:numdim,i)-means(x,1:numdim))**2.))then + !point moved so not finished yet + clstrd=.false. + + !update cluster data + totR(cluster(i))=totR(cluster(i))-1 + cluster(i)=x + totR(x)=totR(x)+1 + end if + end do + + !update means of each cluster + means=0. + do i=1,npt + means(cluster(i),1:numdim)=means(cluster(i),1:numdim)+pt(1:numdim,i) + end do + do j=1,numdim + means(1:2,j)=means(1:2,j)/dble(totR(1:2)) + end do + + if(clstrd) exit + end do + + end subroutine kmeans6 + +!---------------------------------------------------------------------- + !Incremental K-means with Unknown K + !(Pham, Dimov, Nguyen, 2005, "Incremental K-means Algorithm") + !(Pham, Dimov, Nguyen, 2005, "Selection of K in K-means Clustering") + subroutine kmeans5(k,pt,npt,numdim,cluster,min_pt) + implicit none + + integer k,numdim,npt + double precision pt(numdim,npt) + double precision means(2,npt/(numdim+1),numdim),meanst(npt/(numdim+1),numdim) + double precision mean(npt/(numdim+1),numdim) + integer cls(2,npt),clst(npt),cluster(npt) + integer totR(2,npt/(numdim+1)),totRt(npt/(numdim+1))!total points in each cluster + integer i,j,x,m,min_pt + logical clstrd + !distortion of each cluster & total distortion of the recent two iterations + double precision distortion(2,npt/(numdim+1)),totDistor(2),distor(npt/(numdim+1)) + integer indx(1) + double precision f(npt/(numdim+1)),fmin,a(npt/(numdim+1))!evaluation function + double precision S(npt/(numdim+1))!total distortion + integer count!counter for evaluation function + + if(k>npt/min_pt+1) k=npt/min_pt+1 + distortion=0. + totR=0 + f=0. + a=0. + S=0. + count=0 + + !choose a random point to be the cluster center + cluster=1 + cls(2,:)=1 + totR(2,1)=npt + do j=1,numdim + means(2,1,j)=sum(pt(j,:))/dble(npt) + S(1)=S(1)+sum((pt(j,1:npt)-means(2,1,j))**2.) + end do + mean(:,:)=means(2,:,:) + + f(1)=1. + fmin=1. + k=1 + + a(2)=1.-3./dble(4*numdim) + m=1 + do + m=m+1 + clstrd=.false. + + !pick the cluster with the max distortion + indx=maxloc(distortion(2,1:m-1)) + !break this cluster + clst(:)=cls(2,:) + totRt(:)=totR(2,:) + meanst(:,:)=means(2,:,:) + x=m-1 + j=indx(1) + call kmeansDis(x,pt,npt,numdim,clst,j,totRt,distor,meanst) + cls(1,:)=clst(:) + totR(1,:)=totRt(:) + means(1,:,:)=meanst(:,:) + distortion(1,:)=distor(:) + totDistor(1)=sum(distor(1:m)) + + do i=1,m-1 + if(i==indx(1)) cycle + if(distortion(2,i)>1.5*(totDistor(2)-totDistor(1))) then + clst(:)=cls(2,:) + totRt(:)=totR(2,:) + meanst(:,:)=means(2,:,:) + x=m-1 + call kmeansDis(x,pt,npt,numdim,clst,i,totRt,distor,meanst) + if(sum(distor(1:m)) & + sum((pt(1:numdim,i)-means(k+1,1:numdim))**2))then + !point moved so not finished yet + clstrd=.false. + + !update cluster data + totR(cluster(i))=totR(cluster(i))-1 + cluster(i)=k+1 + totR(k+1)=totR(k+1)+1 + end if + end do + + !update means & distortion of each cluster + means=0. + distortion=0. + do i=1,npt + means(cluster(i),1:numdim)=means(cluster(i),1:numdim)+pt(1:numdim,i) + distortion(cluster(i))=distortion(cluster(i))+sum(pt(1:numdim,i)**2) + end do + do j=1,numdim + means(1:k+1,j)=means(1:k+1,j)/dble(totR(1:k+1)) + distortion(1:k+1)=distortion(1:k+1)-dble(totR(1:k+1))*(means(1:k+1,j)**2) + end do + + if(clstrd) exit + end do + + end subroutine kmeansDis + +!---------------------------------------------------------------------- + + subroutine cal_means(numdim,npt,p,mean) + implicit none + integer numdim,npt + double precision p(numdim,npt) + double precision mean(numdim) + integer i,j + + mean=0. + do i=1,numdim + do j=1,npt + mean(i)=mean(i)+p(i,j) + end do + mean(i)=mean(i)/dble(npt) + end do + + return + + end subroutine cal_means + +!---------------------------------------------------------------------- + +end module kmeans_clstr diff --git a/multinest/nested.F90 b/multinest/nested.F90 new file mode 100755 index 0000000..c617692 --- /dev/null +++ b/multinest/nested.F90 @@ -0,0 +1,2900 @@ +! Do nested sampling algorithm to calculate Bayesian evidence +! Aug 2012 +! Farhan Feroz + +module Nested + use utils1 + use kmeans_clstr + use xmeans_clstr + use posterior + use priors + implicit none + +#ifdef MPI + include 'mpif.h' + integer mpi_status(MPI_STATUS_SIZE), errcode +#endif + integer my_rank + integer maxCls,maxeCls + integer mpi_nthreads !total no. of mpi processors + integer min_pt,totPar + integer nCdims !total no. of parameters on which clustering should be done + integer nlive ! Number of live points + integer ndims ! Number of dimensions + integer nsc_def !no. of iterations per every sub-clustering step + integer updInt !update interval + double precision Ztol !lowest local evidence for which samples to produce + double precision tol ! tolerance at end + double precision ef + logical multimodal ! multimodal or unimodal sampling + logical ceff ! constant efficiency? + integer numlike,globff + double precision logZero + integer maxIter + logical fback,resumeFlag,dlive,genLive,dino + !output files name + character(LEN=100)physname,broot,rname,resumename,livename,evname + !output file units + integer u_ev,u_resume,u_phys,u_live + double precision gZ,ginfo !total log(evidence) & info + integer count,sCount + logical, dimension(:), allocatable :: pWrap + logical mWrap,aWrap !whether to do wraparound for mode separation + logical debug, prior_warning, resume, outfile + +contains + + subroutine nestRun(nest_mmodal,nest_ceff,nest_nlive,nest_tol,nest_ef,nest_ndims,nest_totPar,nest_nCdims,maxClst, & + nest_updInt,nest_Ztol,nest_root,seed,nest_pWrap,nest_fb,nest_resume,nest_outfile,initMPI,nest_logZero,nest_maxIter, & + loglike,dumper,context) + + implicit none + + integer nest_ndims,nest_nlive,nest_updInt,context,seed,i + integer maxClst,nest_nsc,nest_totPar,nest_nCdims,nest_pWrap(*),nest_maxIter + logical nest_mmodal,nest_fb,nest_resume,nest_ceff,nest_outfile,initMPI + character(LEN=100) nest_root + double precision nest_tol,nest_ef,nest_Ztol,nest_logZero + + INTERFACE + !the likelihood function + subroutine loglike(Cube,n_dim,nPar,lnew,context_pass) + integer n_dim,nPar,context_pass + double precision lnew,Cube(nPar) + end subroutine loglike + end INTERFACE + + INTERFACE + !the user dumper function + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context_pass) + integer nSamples, nlive, nPar, context_pass + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + end subroutine dumper + end INTERFACE + +#ifdef MPI + if( initMPI ) then + !MPI initializations + call MPI_INIT(errcode) + if (errcode/=MPI_SUCCESS) then + write(*,*)'Error starting MPI. Terminating.' + call MPI_ABORT(MPI_COMM_WORLD,errcode) + end if + endif + call MPI_COMM_RANK(MPI_COMM_WORLD, my_rank, errcode) + call MPI_COMM_SIZE(MPI_COMM_WORLD, mpi_nthreads, errcode) +#else + mpi_nthreads=1 + my_rank=0 +#endif + nest_nsc=50 + nlive=nest_nlive + Ztol=nest_Ztol + updInt=nest_updInt + logZero=nest_logZero + maxIter=nest_maxIter + if(maxIter<=0) maxIter=huge(1) + + ndims=nest_ndims + totPar=nest_totPar + nCdims=nest_nCdims + debug=.false. + prior_warning=.true. + resume=nest_resume + outfile=nest_outfile + if(.not.outfile) resume=.false. + + if(nCdims>ndims) then + if(my_rank==0) then + write(*,*)"ERROR: nCdims can not be greater than ndims." + write(*,*)"Aborting" + endif +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + + if(my_rank==0) then + count=0 + sCount=0 + + broot=nest_root + rname = trim(broot) + fback = nest_fb + + !output file info + !setup the output files + resumename = trim(rname)//'resume.dat' + physname = trim(rname)//'phys_live.points' + livename = trim(rname)//'live.points' + evname = trim(rname)//'ev.dat' + !setup the output file units + u_ev=55 + u_phys=57 + u_live=59 + u_resume=61 + endif + + allocate(pWrap(ndims)) + mWrap=.false. + aWrap=.true. + do i=1,ndims + if(nest_pWrap(i)==0) then + pWrap(i)=.false. + if(i<=nCdims) aWrap=.false. + else + pWrap(i)=.true. + if(i<=nCdims) mWrap=.true. + endif + enddo + multimodal=nest_mmodal + ceff=nest_ceff + tol=nest_tol + ef=nest_ef + if(ef>=1d6) then + dino=.false. + ef=1d0 + min_pt=ndims+1 + elseif(ceff) then + if(ef>1d0) then + if(my_rank==0) then + write(*,*)"ERROR: Can not undersample in constant efficiency mode." + write(*,*)"Aborting" + endif +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + dino=.true. + min_pt=2 + else + dino=.true. + min_pt=2 + endif + nsc_def=nest_nsc + + if(.not.multimodal) then + maxCls=1 + else + maxCls=maxClst*2 + endif + + maxeCls=nlive/min_pt + + if(seed<0) then + !take the seed from system clock + call InitRandomNS(mpi_nthreads) + else + call InitRandomNS(mpi_nthreads,seed) + endif + + if(my_rank==0) then + !set the resume flag to true if the resume file exists else to false + if(resume) then + inquire(file=resumename,exist=resumeFlag) + if(.not.resumeFlag) write(*,*)"MultiNest Warning: no resume file found, starting from scratch" + else + resumeFlag=.false. + endif + + write(*,*)"*****************************************************" + write(*,*)"MultiNest v2.18" + write(*,*)"Copyright Farhan Feroz & Mike Hobson" + write(*,*)"Release Aug 2012" + write(*,*) + write(*,'(a,i4)')" no. of live points = ",nest_nlive + write(*,'(a,i4)')" dimensionality = ",nest_ndims + if(resumeFlag) write(*,'(a)')" resuming from previous job" + write(*,*)"*****************************************************" + + if (fback) write (*,*) 'Starting MultiNest' + + + !create the output files + if(.not.resumeFlag .and. outfile) then + open(unit=u_ev,file=evname,status='replace') + close(u_ev) + endif + endif + + call Nestsample(loglike, dumper, context) + deallocate(pWrap) + call killRandomNS() +#ifdef MPI + if( initMPI ) call MPI_FINALIZE(errcode) +#endif + + end subroutine nestRun + +!---------------------------------------------------------------------- + + subroutine Nestsample(loglike, dumper, context) + + implicit none + + integer context + double precision, allocatable :: p(:,:), phyP(:,:) !live points + double precision, allocatable :: l(:) !log-likelihood + double precision vnow1!current vol + double precision ltmp(totPar+2) + character(len=100) fmt + integer np,i,j,k,ios + + + INTERFACE + !the likelihood function + subroutine loglike(Cube,n_dim,nPar,lnew,context_pass) + integer n_dim,nPar,context_pass + double precision lnew,Cube(nPar) + end subroutine loglike + end INTERFACE + + INTERFACE + !the user dumper function + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context_pass) + integer nSamples, nlive, nPar, context_pass + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + end subroutine dumper + end INTERFACE + + + allocate( p(ndims,nlive+1), phyP(totPar,nlive+1), l(nlive+1) ) + + if(my_rank==0) then + np=ndims + globff=0 + numlike=0 + vnow1=1.d0 + + write(fmt,'(a,i5,a)') '(',np+1,'E28.18)' + + genLive=.true. + + if(resumeflag) then + !check if the last run was aborted during the live points generation + open(unit=u_resume,file=resumename,status='old') + read(u_resume,*)genLive + + if( .not.genLive ) then + read(u_resume,*)i,j,j,j + + if( j /= nlive ) then + write(*,*)"ERROR: no. of live points in the resume file is not equal to the the no. passed to nestRun." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + endif + close(u_resume) + + if( .not.genLive ) then + j = 0 + open(unit=u_ev,file=evname,status='old') + write(fmt,'(a,i2.2,a)') '(',totPar+2,'E28.18,i3)' + do + read(55,*,IOSTAT=ios) ltmp(1:totPar+2),k + + !end of file? + if(ios<0) exit + + j = j + 1 + enddo + + close(u_ev) + + if( j + nlive /= i ) then + write(*,*)"ERROR: no. of points in ev.dat file is not equal to the no. specified in resume file." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + endif + + endif + + gZ=logZero + ginfo=0.d0 + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(genLive,1,MPI_LOGICAL,0,MPI_COMM_WORLD,errcode) +#endif + + if(genLive) then + if(my_rank==0 .and. fback) write(*,*) 'generating live points' + + call gen_initial_live(p,phyP,l,loglike,dumper,context) + + if(my_rank==0) then + globff=nlive + numlike=nlive + if(fback) write(*,*) 'live points generated, starting sampling' + endif + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) +#endif + + call clusteredNest(p,phyP,l,loglike,dumper,context) + + if(my_rank==0) then + write(*,*)"ln(ev)=",gZ,"+/-",sqrt(ginfo/dble(nlive)) + write(*,'(a,i12)')' Total Likelihood Evaluations: ', numlike + write(*,*)"Sampling finished. Exiting MultiNest" + setBlk=.false. + endif + + deallocate( p, phyP, l ) + + end subroutine Nestsample + +!---------------------------------------------------------------------- + + subroutine gen_initial_live(p,phyP,l,loglike,dumper,context) + + implicit none + + integer i,j,iostatus,idum,k,m,nptPerProc,nGen,nstart,nend,context + double precision, allocatable :: pnewP(:,:), phyPnewP(:,:), lnewP(:) + double precision p(ndims,nlive+1), phyP(totPar,nlive+1), l(nlive+1) + integer id + character(len=100) fmt,fmt2 +#ifdef MPI + double precision, allocatable :: tmpl(:), tmpp(:,:), tmpphyP(:,:) + integer q +#endif + + INTERFACE + !the likelihood function + subroutine loglike(Cube,n_dim,nPar,lnew,context_pass) + integer n_dim,nPar,context_pass + double precision lnew,Cube(nPar) + end subroutine loglike + end INTERFACE + + INTERFACE + !the user dumper function + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context_pass) + integer nSamples, nlive, nPar, context_pass + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + end subroutine dumper + end INTERFACE + + allocate( pnewP(ndims,10), phyPnewP(totPar,10), lnewP(10) ) +#ifdef MPI + allocate( tmpl(10), tmpp(ndims,10), tmpphyP(totPar,10) ) +#endif + + if(my_rank==0) then + + if(outfile) then + open(unit=u_resume,file=resumename,form='formatted',status='replace') + write(u_resume,'(l2)')genLive + close(u_resume) + write(fmt,'(a,i5,a)') '(',ndims+1,'E28.18)' + write(fmt2,'(a,i5,a)') '(',totPar+1,'E28.18,i4)' + endif + + id=0 + i=0 + + !resume from previous live points generation? + if(outfile) then + if(resumeflag) then + !read hypercube-live file + open(unit=u_live,file=livename,status='old') + do + i=i+1 + read(u_live,*,IOSTAT=iostatus) p(:,i),l(i) + if(iostatus<0) then + i=i-1 + if(i>nlive) then + write(*,*)"ERROR: more than ",nlive," points in the live points file." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + exit + endif + enddo + close(u_live) + + if(i>0) then + !read physical live file + open(unit=u_phys,file=physname,status='old') + do j=1,i + read(u_phys,*) phyP(:,j),l(j),idum + enddo + close(u_phys) + endif + + open(unit=u_live,file=livename,form='formatted',status='old',position='append') + open(unit=u_phys,file=physname,form='formatted',status='old',position='append') + else + open(unit=u_live,file=livename,form='formatted',status='replace') + open(unit=u_phys,file=physname,form='formatted',status='replace') + endif + endif + + j=i + nend=i + + nGen = nlive - j + nptPerProc = ceiling( dble(nGen) / dble(mpi_nthreads) ) + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(nGen,1,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(nptPerProc,1,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) +#endif + + if(nGen==0) then + if(my_rank==0) then + genLive=.false. + resumeFlag=.false. + endif + + deallocate( pnewP, phyPnewP, lnewP ) +#ifdef MPI + deallocate( tmpl, tmpp, tmpphyP ) +#endif + if( outfile ) then + close(u_live) + close(u_phys) + endif + + return + + elseif(nGen<0) then + if(my_rank==0) then + write(*,*)"ERROR: live points files have more live points than required." + write(*,*)"Aborting" + endif +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + + k=0 + j=0 + id=my_rank + do + k=k+1 + j=j+1 + do + call getrandom(ndims,pnewP(:,j),id) ! start points + phyPnewP(1:ndims,j)=pnewP(1:ndims,j) + lnewP(j)=logZero + call loglike(phyPnewP(:,j),ndims,totPar,lnewP(j),context) + if(lnewP(j)>logZero) exit + enddo + if(k==nptPerProc .or. j==10) then + if(k==nptPerProc) then + i=mod(nptPerProc,10) + if(i==0) i=10 + else + i=10 + j=0 + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) +#endif + + if(id/=0) then +#ifdef MPI + !send the generated points to the root node + call MPI_SEND(lnewP(1:i),i,MPI_DOUBLE_PRECISION,0,id,MPI_COMM_WORLD,errcode) + call MPI_SEND(pnewP(1:ndims,1:i),ndims*i,MPI_DOUBLE_PRECISION,0,id,MPI_COMM_WORLD,errcode) + call MPI_SEND(phyPnewP(1:totPar,1:i),totPar*i,MPI_DOUBLE_PRECISION,0,id,MPI_COMM_WORLD,errcode) +#endif + else + !first write the points generated by the root node + nstart=nend+1 + p(1:ndims,nstart:nstart+i-1)=pnewP(1:ndims,1:i) + phyP(1:totPar,nstart:nstart+i-1)=phyPnewP(1:totPar,1:i) + l(nstart:nstart+i-1)=lnewP(1:i) + nend=nstart+i-1 + +#ifdef MPI + !receive the points from other nodes + do m=1,mpi_nthreads-1 + call MPI_RECV(tmpl(1:i),i,MPI_DOUBLE_PRECISION,m,m,MPI_COMM_WORLD,mpi_status,errcode) + call MPI_RECV(tmpp(1:ndims,1:i),i*ndims,MPI_DOUBLE_PRECISION,m,m,MPI_COMM_WORLD,mpi_status,errcode) + call MPI_RECV(tmpphyP(1:totPar,1:i),i*totPar,MPI_DOUBLE_PRECISION,m,m,MPI_COMM_WORLD,mpi_status,errcode) + do q = 1 , i + if( nend + 1 <= nlive ) then + l(nend + 1) = tmpl(q) + p(1 : ndims, nend + 1) = tmpp(1 : ndims, q) + phyP(1 : totPar, nend + 1) = tmpphyP(1 : totPar, q) + nend = nend + 1 + endif + enddo + enddo +#endif + + if( outfile ) then + !now write this batch to the files + do m=nstart,nend + write(u_live,fmt) p(1:ndims,m),l(m) + write(u_phys,fmt2) phyP(:,m),l(m),1 + enddo + endif + endif + endif + if(k==nptPerProc) exit + enddo + + + + deallocate( pnewP, phyPnewP, lnewP ) +#ifdef MPI + deallocate( tmpl, tmpp, tmpphyP ) +#endif + + if( outfile ) then + close(u_live) + close(u_phys) + endif + genLive=.false. + resumeFlag=.false. +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) +#endif + + end subroutine gen_initial_live + +!---------------------------------------------------------------------- + + subroutine getrandom(n,x,id) + + implicit none + + integer, intent(in) :: n + double precision, intent(out) :: x(:) + integer i,id + + ! --- uniform prior ---- + do i = 1,n + x(i)=ranmarNS(id) + enddo + return + end subroutine getrandom + +!---------------------------------------------------------------------- + + subroutine clusteredNest(p,phyP,l,loglike,dumper,context) + + implicit none + + + !input variables + + integer context + double precision p(ndims,nlive+1) !live points + double precision phyP(totPar,nlive+1) !physical live points + double precision l(nlive+1) !log-likelihood + + + !work variables + + !misc + integer i, j, k, m, n, j1, i1, i2, i3, i4, ff, sff, n1, n2, q, nd, nd_i, nd_j, iostatus + integer num_old + integer, allocatable :: eswitchff(:), escount(:), dmin(:) + double precision d1, d2, d3, d4, d5, urv + double precision h, logX, vprev, vnext, shrink !prior volume + double precision mar_r !marginal acceptance rate + double precision gZOld !global evidence & info + logical eswitch,peswitch,cSwitch !whether to do ellipsoidal sampling or not + logical remFlag, acpt, flag, flag2 + integer funit1, funit2 !file units + character(len=100) fName1, fName2 !file names + character(len=100) fmt,fmt1 + + !diagnostics for determining when to do eigen analysis + integer neVol + parameter(neVol=4) + double precision, dimension(:), allocatable :: totVol, x1, x2, y1, y2, slope, intcpt, cVolFrac, pVolFrac + double precision, dimension(:,:,:), allocatable :: eVolFrac + + !info for output file update + double precision, allocatable :: evData(:,:), evDataAll(:), evDataTemp(:) + + !isolated cluster info + integer ic_n !no. of nodes + integer, allocatable :: ic_sc(:), ic_npt(:) + logical, allocatable :: ic_done(:) + integer, dimension(:), allocatable :: ic_fNode, ic_nsc, ic_nBrnch + double precision, dimension(:,:,:), allocatable :: ic_brnch, ic_llimits, ic_plimits + double precision, allocatable :: ic_climits(:,:,:), ic_volFac(:) + double precision, dimension(:), allocatable :: ic_Z, ic_Zold, ic_info, ic_vnow, ic_hilike, ic_inc + double precision, dimension(:,:), allocatable :: ic_eff + logical, dimension(:), allocatable :: ic_reme, ic_rFlag, ic_chk + logical modeFound + + !means & standard deviations of the live points (for prior edge detection) + double precision, dimension(:,:), allocatable :: ic_mean, ic_sigma + double precision lPts(ndims) + + !sub-cluster properties + integer sc_n !no. of sub-clusters + integer, dimension(:), allocatable :: sc_npt, nptk, nptx ,sc_node, nodek, sck + double precision, dimension(:,:), allocatable :: meank, sc_eval, evalk + double precision, dimension(:,:,:), allocatable :: sc_invcov, invcovk, sc_evec, eveck, tMatk + double precision, dimension(:), allocatable :: kfack, volk, effk + double precision, allocatable :: sc_mean(:,:), sc_tmat(:,:,:), sc_kfac(:), sc_eff(:), sc_vol(:) + + !auxiliary points (to be re-arranged with main points during clustering) + integer naux !dimensionality of aux points + double precision, dimension(:,:), allocatable :: aux, pt + + !rejected point info + double precision lowlike !lowest log-like + double precision, allocatable :: lowp(:), lowphyP(:) !point with the lowlike + integer indx(1) !point no. of lowlike + + !new point + double precision lnew + double precision, allocatable :: pnew(:), phyPnew(:) ! new point + double precision, dimension(:,:,:), allocatable :: pnewa, phyPnewa + double precision, dimension(:,:), allocatable :: lnewa + integer, dimension(:), allocatable :: rIdx + integer, dimension(:,:), allocatable :: sEll + logical, dimension(:), allocatable :: remain + + !mode separation + integer nCdim + + INTERFACE + !the likelihood function + subroutine loglike(Cube,n_dim,nPar,lnew,context_pass) + integer n_dim,nPar,context_pass + double precision lnew,Cube(nPar) + end subroutine loglike + end INTERFACE + + INTERFACE + !the user dumper function + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context_pass) + integer nSamples, nlive, nPar, context_pass + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + end subroutine dumper + end INTERFACE + + + allocate( eswitchff(maxCls), escount(maxCls), dmin(maxCls) ) + allocate( evData(updInt,totPar+3) ) + allocate( ic_sc(maxCls), ic_npt(maxCls) ) + allocate( ic_done(0:maxCls) ) + allocate( ic_climits(maxCls,ndims,2), ic_volFac(maxCls) ) + allocate( sc_mean(maxeCls,ndims), sc_tmat(maxeCls,ndims,ndims), sc_kfac(maxeCls), sc_eff(maxeCls), sc_vol(maxeCls) ) + allocate( lowp(ndims), lowphyP(totPar) ) + allocate( pnew(ndims), phyPnew(totPar) ) + + + !initializations + ic_done=.false. + ic_npt=nlive + ic_climits(:,:,2)=1d0 + ic_climits(:,:,1)=0d0 + ic_volFac(:)=1d0 + + if(my_rank==0) then + !memory allocation + allocate(evDataAll(1)) + allocate(sc_npt(maxeCls), nptk(maxeCls), nptx(nlive), meank(maxeCls,ndims), & + sc_eval(maxeCls,ndims), evalk(maxeCls,ndims), sc_invcov(maxeCls,ndims,ndims), & + invcovk(maxeCls,ndims,ndims), tMatk(maxeCls,ndims,ndims), & + sc_evec(maxeCls,ndims,ndims), eveck(maxeCls,ndims,ndims), kfack(maxeCls), & + effk(maxeCls), volk(maxeCls), & + sc_node(maxeCls),nodek(maxeCls),sck(maxCls)) + allocate(pt(ndims,nlive), aux(ndims+totPar+4-nCdims,nlive)) + allocate(ic_fNode(maxCls),ic_nsc(maxCls),ic_nBrnch(maxCls), & + ic_brnch(maxCls,maxCls,2),ic_reme(maxCls),ic_rFlag(maxCls),ic_z(maxCls),ic_zold(maxCls),ic_info(maxCls), & + ic_vnow(maxCls),ic_hilike(maxCls),ic_inc(maxCls),ic_chk(maxCls),ic_llimits(maxCls,ndims,2)) + if(multimodal) allocate(ic_plimits(maxCls,nCdims,2)) + if(ceff) then + allocate(ic_eff(maxCls,4)) + ic_eff(:,1:2)=0d0 + ic_eff(:,3)=1d0 + ic_eff(:,4)=1d0 + endif + allocate(pnewa(maxCls,mpi_nthreads,ndims),phyPnewa(maxCls,mpi_nthreads,totPar),lnewa(maxCls,mpi_nthreads), & + sEll(maxCls,mpi_nthreads),remain(maxCls),rIdx(maxCls)) + allocate(totVol(maxCls),eVolFrac(maxCls,neVol*2,neVol),x1(maxCls),x2(maxCls),y1(maxCls), & + y2(maxCls),slope(maxCls),intcpt(maxCls),cVolFrac(maxCls),pVolFrac(maxCls)) + if( prior_warning ) allocate( ic_mean(maxCls, ndims), ic_sigma(maxCls, ndims) ) + + !global logZ = log(0) + gZ=logZero + gZOld=logZero + ic_Z=logZero + ic_zold=logZero + ginfo=0.d0 + ic_info=0.d0 + + ic_llimits(:,:,2)=1d0 + ic_llimits(:,:,1)=0d0 + if(multimodal) then + ic_plimits(:,:,2)=huge(1d0) + ic_plimits(:,:,1)=-huge(1d0) + endif + + !just one node to sttart with + ic_n=1 + ic_fNode(1)=0 + ic_sc(1)=0 + ic_nsc=nsc_def + ic_reme=.false. + ic_nBrnch=0 + + !set the prior volume + ic_vnow=0.d0 + ic_vnow(1)=1.d0 + + !no ellipsoidal sampling to start with + eswitch=.false. + peswitch=.false. + escount=0 + + !no leftover points + remain=.false. + rIdx=1 + + !no sub-clusters at the beginning + sc_n=0 + totVol=1.d0 + sc_node=1 + + !eigen analysis diagnostics + eVolFrac=0.d0 + + dmin=ndims+1 + ic_rFlag=.false. + sff=0 + + if(resumeFlag) then + !read the resume file + funit1=u_resume + + open(unit=funit1,file=resumename,status='old') + + read(funit1,*)genLive + read(funit1,*)globff,numlike,ic_n,nlive + read(funit1,*)gZ,ginfo + ic_rFlag(1:ic_n)=.true. + read(funit1,*)eswitch + peswitch=eswitch + if(eswitch) totVol=1.d0 + + !read branching info + do i=1,ic_n + read(funit1,*)ic_nBrnch(i) + if(ic_nBrnch(i)>0) read(funit1,*)ic_brnch(i,1:ic_nBrnch(i),1),ic_brnch(i,1:ic_nBrnch(i),2) + enddo + + !read the node info + do i=1,ic_n + read(funit1,*)ic_done(i),ic_reme(i),ic_fNode(i),ic_npt(i) + read(funit1,*)ic_vnow(i),ic_Z(i),ic_info(i) + if(ceff) then + read(funit1,*)ic_eff(i,4) + ic_eff(i,3)=ic_eff(i,4) + endif + enddo + if(.not.eswitch .or. ic_n==1) ic_npt(1)=nlive + close(funit1) + eswitchff=0 + + !read hypercube-live file + open(unit=u_live,file=livename,status='old') + i=0 + do + i=i+1 + read(u_live,*,IOSTAT=iostatus) p(1:ndims,i),l(i) + if(iostatus<0) then + i=i-1 + if(inlive) then + write(*,*)"ERROR: live points file has greater than ",nlive," points." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + enddo + close(u_live) + + !read physical-live file + open(unit=u_phys,file=physname,status='old') + i=0 + do + i=i+1 + read(u_phys,*,IOSTAT=iostatus) phyP(1:totPar,i),d1,j + if(iostatus<0) then + i=i-1 + if(inlive) then + write(*,*)"ERROR: phys live points file has greater than ",nlive," points." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + enddo + close(u_phys) + endif + + !find the highest likelihood for each node + j=0 + ic_done(0)=.true. + do i=1,ic_n + ic_hilike(i)=maxval(l(j+1:j+ic_npt(i))) + lowlike=minval(l(j+1:j+ic_npt(i))) + ic_inc(i)=ic_hilike(i)+log(ic_vnow(i))-ic_Z(i) + if(ic_npt(i)50)) then + ic_done(i)=.true. + else + ic_done(i)=.false. + ic_done(0)=.false. + endif + + !set the limits + if(.not.ic_done(i)) then + ic_llimits(i,:,1)=p(:,j+1) + ic_llimits(i,:,2)=p(:,j+1) + if(multimodal) then + ic_plimits(i,1:nCdims,1)=phyP(1:nCdims,j+1) + ic_plimits(i,1:nCdims,2)=phyP(1:nCdims,j+1) + endif + do k=j+2,j+ic_npt(i) + if(multimodal) then + call setLimits(multimodal,ndims,nCdims,ic_llimits(i,:,:), & + ic_plimits(i,:,:),p(:,k),phyP(1:nCdims,k),ic_climits(i,:,:)) + else + call setLimits(multimodal,ndims,nCdims,ic_llimits(i,:,:), & + ic_climits(i,:,:),p(:,k),phyP(1:nCdims,k),ic_climits(i,:,:)) + endif + enddo + endif + + j=j+ic_npt(i) + enddo + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(ic_n,1,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(ic_npt(1:ic_n),ic_n,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) +#endif + + do ff=1,maxIter + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(ic_done(0:ic_n),ic_n+1,MPI_LOGICAL,0,MPI_COMM_WORLD,errcode) +#endif + !stopping condition reached + if(ic_done(0)) then + if(my_rank==0) then + + if(outfile) then + !write the resume file + funit1=u_resume + fName1=resumename + write(fmt,'(a,i5,a)') '(',totPar+1,'E28.18,i4)' + open(unit=funit1,file=fName1,form='formatted',status='replace') + write(funit1,'(l2)').false. + write(funit1,'(4i12)')globff,numlike,ic_n,nlive + write(funit1,'(2E28.18)')gZ,ginfo + write(funit1,'(l2)')eswitch + !write branching info + do i=1,ic_n + write(funit1,'(i4)')ic_nBrnch(i) + if(ic_nBrnch(i)>0) then + write(fmt,'(a,i5,a)') '(',2*ic_nBrnch(i),'E28.18)' + write(funit1,fmt)ic_brnch(i,1:ic_nBrnch(i),1),ic_brnch(i,1:ic_nBrnch(i),2) + endif + enddo + !write the node info + do i=1,ic_n + write(funit1,'(2l2,2i6)')ic_done(i),ic_reme(i),ic_fNode(i),ic_npt(i) + write(funit1,'(3E28.18)')ic_vnow(i),ic_Z(i),ic_info(i) + if(ceff) write(funit1,'(1E28.18)')ic_eff(i,4) + enddo + close(funit1) + endif + + !fback + if(fback) call gfeedback(gZ,numlike,globff,.false.) + call pos_samp(Ztol,globff,broot,nlive,ndims,nCdims,totPar,multimodal,outfile,gZ,ginfo,ic_n,ic_Z(1:ic_n), & + ic_info(1:ic_n),ic_reme(1:ic_n),ic_vnow(1:ic_n),ic_npt(1:ic_n),ic_nBrnch(1:ic_n),ic_brnch(1:ic_n,:,1),phyP(:,1:nlive), & + l(1:nlive),evDataAll,dumper,context) + + !if done then add in the contribution to the global evidence from live points + j=0 + do i1=1,ic_n + !live point's contribution to the evidence + logX=log(ic_vnow(i1)/dble(ic_npt(i1))) + do i=j+1,j+ic_npt(i1) + d1=l(i)+logX + !global evidence + gZold=gZ + gZ=LogSumExp(gZ,d1) + !ginfo=exp(d1-gZ)*l(i)+exp(gZold-gZ)*(ginfo+gZold)-gZ + ginfo=ginfo*exp(gZold-gZ)+exp(d1-gZ)*l(i) + !local evidence + !gZold=ic_Z(i1) + ic_Zold(i1)=ic_Z(i1) + ic_Z(i1)=LogSumExp(ic_Z(i1),d1) + !ic_info(i1)=exp(d1-ic_Z(i1))*l(i)+ & + !exp(gZold-ic_Z(i1))*(ic_info(i1)+gZold)-ic_Z(i1) + ic_info(i1)=ic_info(i1)*exp(ic_zold(i1)-ic_Z(i1))+exp(d1-ic_Z(i1))*l(i) + enddo + j=j+ic_npt(i1) + enddo + + ginfo=ginfo-gZ + + !memory deallocation + deallocate(sc_npt, sc_invcov, sc_node, sc_eval, sc_evec) + deallocate(nptk, nptx, meank, evalk, invcovk, tMatk, eveck, kfack, effk, volk, & + nodek ,sck) + deallocate(pt, aux) + deallocate(ic_fNode,ic_nsc,ic_nBrnch,ic_brnch,ic_reme,ic_rFlag,ic_Z,ic_zold, & + ic_info,ic_vnow,ic_hilike,ic_inc,ic_chk,ic_llimits) + if(multimodal) deallocate(ic_plimits) + if(ceff) deallocate(ic_eff) + deallocate(pnewa,phyPnewa,lnewa,sEll,remain,rIdx) + deallocate(totVol,eVolFrac,x1,x2,y1,y2,slope,intcpt,cVolFrac,pVolFrac) + if( prior_warning ) deallocate( ic_mean, ic_sigma ) + deallocate( evDataAll ) + endif + + deallocate( eswitchff, escount, dmin, evData, ic_sc, ic_npt, ic_done, ic_climits, & + ic_volFac, sc_mean, sc_tmat, sc_kfac, sc_eff, sc_vol, lowp, lowphyP, pnew, phyPnew ) + return + endif + + modeFound=.false. + + if(my_rank==0) then + !mode separation + if(ff/=1 .and. eswitch .and. multimodal .and. mod(ff,15)==0 .and. ic_n=ndims+1 .and. totVol(i1)==0.d0) ic_rFlag(i1)=.true. + + if(.not.eswitch) then + if(mod(ff-1-eswitchff(i1),ic_nsc(i1))==0) then + flag=.true. + else + flag=.false. + endif + elseif(ic_npt(i1)1.1 .or. .not.dino) .and. & + (pVolFrac(i1)d1) + elseif( abs( d1 - ic_llimits(nd,i3,2) ) < d4 * 1d-5 ) then + pt(1,1:ic_npt(nd))=ic_climits(nd,i3,1)+d4*p(i3,nd_j+1:nd_j+ic_npt(nd)) + ic_llimits(nd,i3,2)=maxval(pt(1,1:ic_npt(nd)),MASK=pt(1,1:ic_npt(nd))lowPhyP(i3)) + elseif( abs( lowPhyP(i3) - ic_plimits(nd,i3,2) ) < d4 * 1d-5 ) then + ic_plimits(nd,i3,2)=maxval(phyP(i3,nd_j+1:nd_j+ic_npt(nd)), & + MASK=phyP(i3,nd_j+1:nd_j+ic_npt(nd))logZero) then + pnewa(nd,1,:)=pnew(:) + phyPnewa(nd,1,:)=phyPnew(:) + endif + endif +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + !now send the points to the root node + if(my_rank/=0) then + call MPI_SEND(lnew,1,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + if(lnew>logZero) then + call MPI_SEND(pnew,ndims,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + call MPI_SEND(phyPnew,totPar,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + endif + else + do i2=1,mpi_nthreads-1 + call MPI_RECV(lnewa(nd,i2+1),1,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + if(lnewa(nd,i2+1)>logZero) then + call MPI_RECV(pnewa(nd,i2+1,1:ndims),ndims,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + call MPI_RECV(phyPnewa(nd,i2+1,1:totPar),totPar,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + endif + enddo + endif + call MPI_BARRIER(MPI_COMM_WORLD,errcode) +#endif + endif + + if(my_rank==0) then + !now check if any of them is a good one + do i=rIdx(nd),mpi_nthreads + numlike=numlike+1 + if(lnewa(nd,i)>lowlike) then + pnew(:)=pnewa(nd,i,:) + phyPnew(:)=phyPnewa(nd,i,:) + lnew=lnewa(nd,i) + acpt=.true. + !leftover points? + if(i==mpi_nthreads) then + remain(nd)=.false. + rIdx(nd)=1 + else + remain(nd)=.true. + rIdx(nd)=i+1 + endif + exit + endif + if(i==mpi_nthreads) then + remain(nd)=.false. + rIdx(nd)=1 + endif + enddo + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(acpt,1,MPI_LOGICAL,0,MPI_COMM_WORLD,errcode) +#endif + + if(acpt) exit + enddo + + if(my_rank==0) then + p(:,indx(1))=pnew(:) + phyP(:,indx(1))=phyPnew(:) + l(indx(1))=lnew + if(lnew>ic_hilike(nd)) ic_hilike(nd)=lnew + + !set the limits + if(multimodal) then + call setLimits(multimodal,ndims,nCdims,ic_llimits(nd,:,:), & + ic_plimits(nd,:,:),pnew,phyPnew(1:nCdims),ic_climits(nd,:,:)) + else + call setLimits(multimodal,ndims,nCdims,ic_llimits(nd,:,:), & + ic_climits(nd,:,:),pnew,phyPnew(1:nCdims),ic_climits(nd,:,:)) + endif + endif + else + num_old=numlike + + acpt=.false. + do + if(my_rank==0) remFlag=remain(nd) +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(remFlag,1,MPI_LOGICAL,0,MPI_COMM_WORLD,errcode) +#endif + if(.not.remFlag) then + !first pick an ellipsoid according to the vol + do + !pick a sub-cluster according to the vol + call selectEll(ic_sc(nd),sc_vol(nd_i+1:nd_i+ic_sc(nd)),i) + i=i+nd_i + if(sc_kfac(i)==0.d0 .or. sc_vol(i)==0.d0) then + cycle + else + exit + endif + enddo + + !generate mpi_nthreads potential points + d1=sc_kfac(i)*sc_eff(i) + call samp(pnew,phyPnew,lnew,sc_mean(i,:),d1,sc_tMat(i,:,:),ic_climits(nd,:,:),loglike,eswitch,lowlike,context) + if(my_rank==0) then + lnewa(nd,1)=lnew + + if(lnew>logZero) then + sEll(nd,1)=i-nd_i + pnewa(nd,1,:)=pnew(:) + phyPnewa(nd,1,:)=phyPnew + endif + endif +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + !now send the points to the root node + if(my_rank/=0) then + call MPI_SEND(lnew,1,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + if(lnew>logZero) then + call MPI_SEND(i-nd_i,1,MPI_INTEGER,0,my_rank,MPI_COMM_WORLD,errcode) + call MPI_SEND(pnew,ndims,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + call MPI_SEND(phyPnew,totPar,MPI_DOUBLE_PRECISION,0,my_rank,MPI_COMM_WORLD,errcode) + endif + else + do i2=1,mpi_nthreads-1 + call MPI_RECV(lnewa(nd,i2+1),1,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + if(lnewa(nd,i2+1)>logZero) then + call MPI_RECV(sEll(nd,i2+1),1,MPI_INTEGER,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + call MPI_RECV(pnewa(nd,i2+1,1:ndims),ndims,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + call MPI_RECV(phyPnewa(nd,i2+1,1:totPar),totPar,MPI_DOUBLE_PRECISION,i2,i2,MPI_COMM_WORLD,mpi_status,errcode) + endif + enddo + endif + call MPI_BARRIER(MPI_COMM_WORLD,errcode) +#endif + endif + + if(my_rank==0) then + !check if any of them is inside the hard edge + do j1=rIdx(nd),mpi_nthreads + numlike=numlike+1 + if(my_rank==0 .and. ceff) then + if(.not.remain(nd)) ic_eff(nd,2)=ic_eff(nd,2)+1d0 + endif + + if(lnewa(nd,j1)>lowlike) then + if(sc_npt(sEll(nd,j1)+nd_i)>0) then + !find the no. of ellipsoids, j, the new points lies in + j=1 + acpt=.true. + do m=nd_i+1,nd_i+ic_sc(nd) + if(m==sEll(nd,j1)+nd_i .or. sc_npt(m)==0) cycle + if(ptIn1Ell(ndims,pnewa(nd,j1,:),sc_mean(m,:), & + sc_invcov(m,:,:),sc_kfac(m)*sc_eff(m))) j=j+1 + enddo + endif + + if(acpt) then + !accept it with probability 1/j + if(j>1) then + urv=ranmarNS(0) + if(urv<=(1.d0/dble(j))) then + acpt=.true. + else + acpt=.false. + endif + else + acpt=.true. + endif + + !point accepted + if(acpt) then + if(my_rank==0 .and. ceff) then + if(.not.remain(nd)) ic_eff(nd,1)=ic_eff(nd,1)+1d0 + endif + !leftover points? + if(j1==mpi_nthreads) then + remain(nd)=.false. + rIdx(nd)=1 + else + remain(nd)=.true. + rIdx(nd)=j1+1 + endif + exit + endif + endif + endif + + if(j1==mpi_nthreads) then + remain(nd)=.false. + rIdx(nd)=1 + endif + enddo + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(acpt,1,MPI_LOGICAL,0,MPI_COMM_WORLD,errcode) +#endif + + if(acpt) then + if(my_rank==0) then + pnew=pnewa(nd,j1,:) + phyPnew=phyPnewa(nd,j1,:) + lnew=lnewa(nd,j1) + i=sEll(nd,j1)+nd_i + + if(lnew>ic_hilike(nd)) ic_hilike(nd)=lnew + + !set the limits + if(multimodal) then + call setLimits(multimodal,ndims,nCdims,ic_llimits(nd,:,:), & + ic_plimits(nd,:,:),pnew,phyPnew(1:nCdims),ic_climits(nd,:,:)) + else + call setLimits(multimodal,ndims,nCdims,ic_llimits(nd,:,:), & + ic_climits(nd,:,:),pnew,phyPnew(1:nCdims),ic_climits(nd,:,:)) + endif + endif + exit + endif + enddo + + + if(my_rank==0) then + + if(ceff) then + if(ic_eff(nd,1)>0d0 .and. mod(int(ic_eff(nd,1)),10)==0) then + d1=ic_eff(nd,1)/ic_eff(nd,2) + ic_eff(nd,1)=0d0 + ic_eff(nd,2)=0d0 + + if(1.2d0*d11.2d0*ef) then + ic_eff(nd,3)=max(ef,ic_eff(nd,4)/(1d0+0.2d0*sqrt(1000d0/dble(ic_npt(nd))))) + !ic_eff(nd,3)=ic_eff(nd,4)/(1d0+0.2d0*sqrt(1000d0/dble(ic_npt(nd)))) + ic_eff(nd,4)=ic_eff(nd,3) + endif + endif + endif + + !find the sub-cluster in which the rejected point lies + n1=nd_j + do q=nd_i+1,nd_i+ic_sc(nd) + if(sc_npt(q)==0) cycle + n1=n1+sc_npt(q) + if(indx(1)<=n1) then + n1=n1-sc_npt(q) + exit + endif + enddo + + i1=nlive + + !remove the rejected point & its likelihood & update the no. of points in the cluster + if(indx(1)0.d0 .and. sc_kfac(i)>0.d0 .and. (i/=q .or. (i==q .and. sc_npt(q)>0))) then + + !min vol this ellipsoid should occupy + if(dino) then + if(ceff) then + d4=ic_eff(nd,3) + else + d4=ef + endif + + d1=(ic_vnow(nd)*ic_volFac(nd)*shrink/d4)*(sc_npt(q))/dble(ic_npt(nd)) + else + d1=tiny(1.d0) + endif + + !now evolve the ellipsoid with the rejected point + call evolveEll(0,sc_npt(q),ndims,lowp,p(:,n1+1:n1+sc_npt(q)),sc_mean(q,:), & + sc_eval(q,:),sc_invcov(q,:,:),sc_kfac(q),sc_eff(q),sc_vol(q),d1) + endif + + n1=sum(sc_npt(1:i-1)) + + if(i/=q .or. (i==q .and. sc_npt(q)>0)) then + !min vol this ellipsoid should occupy + if(dino) then + if(ceff) then + d4=ic_eff(nd,3) + else + d4=ef + endif + + d1=(ic_vnow(nd)*ic_volFac(nd)*shrink/d4)*(dble(sc_npt(i))+1.d0)/dble(ic_npt(nd)) + else + d1=tiny(1.d0) + endif + + !now evolve the ellipsoid with the inserted point + call evolveEll(1,sc_npt(i),ndims,pnew,p(:,n1+1:n1+sc_npt(i)),sc_mean(i,:), & + sc_eval(i,:),sc_invcov(i,:,:),sc_kfac(i),sc_eff(i),sc_vol(i),d1) + endif + endif + +#ifdef MPI + call MPI_BARRIER(MPI_COMM_WORLD,errcode) + call MPI_BCAST(q,1,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(i,1,MPI_INTEGER,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_kfac(i),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_eff(i),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_vol(i),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_kfac(q),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_eff(q),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) + call MPI_BCAST(sc_vol(q),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,errcode) +#endif + + !now evolve the rest of the sub-clusters + !only if sub-clustering wasn't done in the current iteration + do j=nd_i+1,nd_i+ic_sc(nd) + !sub-clusters with rejected/inserted point already evolved + if(j==i .or. j==q) cycle + if(sc_eff(j)>1.d0 .and. sc_vol(j)>0.d0 .and. sc_kfac(i)>0.d0) then + d1=sc_eff(j) + sc_eff(j)=max(1.d0,sc_eff(j)*(shrink**(2.d0/dble(ndims)))) + d1=sc_eff(j)/d1 + sc_vol(j)=sc_vol(j)*(d1**(dble(ndims)/2.d0)) + endif + enddo + + if(my_rank==0) then + !add the new point & its likelihood & update the no. of points + !in the cluster new point's index + n1=n1+sc_npt(i)+1 + !make room + p(:,n1+1:nd_j+ic_npt(nd))=p(:,n1:nd_j+ic_npt(nd)-1) + phyP(:,n1+1:nd_j+ic_npt(nd))=phyP(:,n1:nd_j+ic_npt(nd)-1) + l(n1+1:nd_j+ic_npt(nd))=l(n1:nd_j+ic_npt(nd)-1) + !insert the new point + p(:,n1)=pnew(:) + phyP(:,n1)=phyPnew(:) + l(n1)=lnew + !increment no. of points in sub-cluster i + sc_npt(i)=sc_npt(i)+1 + endif + endif + + if(my_rank==0) then + !update evidence, info, prior vol, sampling statsitics + globff=globff+1 + sff=sff+1 + gZold=gZ + ic_zold(nd)=ic_z(nd) + d1=lowlike+log(h) + gZ=LogSumExp(gZ,d1) + ic_Z(nd)=LogSumExp(ic_Z(nd),d1) +! ic_info(nd)=exp(d1-ic_Z(nd))*lowlike+exp(gZold-ic_Z(nd))* & +! (ic_info(nd)+gZold)-ic_Z(nd) + ginfo=ginfo*exp(gzold-gz)+exp(d1-gz)*lowlike + ic_info(nd)=ic_info(nd)*exp(ic_zold(nd)-ic_z(nd))+exp(d1-ic_z(nd))*lowlike + + !data for ev.dat file + j1=mod(sff-1,updInt)+1 + evData(j1,1:totPar)=lowphyP(1:totPar) + evData(j1,totPar+1)=lowlike + evData(j1,totPar+2)=log(h) + evData(j1,totPar+3)=dble(nd) + + lowlike=minval(l(nd_j+1:nd_j+ic_npt(nd))) + + if(abs(lowlike-ic_hilike(nd))<= 0.0001 .or. (ic_inc(nd)50) .or. ff==maxIter) then + ic_done(nd)=.true. + + !check if all done + ic_done(0)=.true. + do i=1,ic_n + if(.not.ic_done(i)) then + ic_done(0)=.false. + exit + endif + enddo + endif + endif + endif + + if(my_rank==0) then + if(sff>0 .and. (mod(sff,updInt)==0 .or. ic_done(0))) then + + if( .not.outfile ) then + k=0 + if( sff > updInt ) then + k=size(evDataAll) + allocate( evDataTemp(k) ) + evDataTemp=evDataAll + deallocate( evDataAll ) + allocate( evDataAll(k+j1*(totPar+3)) ) + evDataAll(1:k)=evDataTemp(1:k) + deallocate( evDataTemp ) + else + deallocate( evDataAll ) + allocate( evDataAll(j1*(totPar+3)) ) + endif + do i=1,j1 + evDataAll(k+1:k+totPar+3) = evData(i,1:totPar+3) + k=k+totPar+3 + enddo + else + !write the evidence file + funit1=u_ev + fName1=evname + open(unit=funit1,file=fName1,form='formatted',status='old', position='append') + write(fmt,'(a,i5,a)') '(',totPar+2,'E28.18,i5)' + do i=1,j1 + write(funit1,fmt) evData(i,1:totPar+2),int(evData(i,totPar+3)) + enddo + !close the files + close(funit1) + + !write the live file + funit1=u_phys + fName1=physname + funit2=u_live + fName2=livename + open(unit=funit1,file=fName1,form='formatted',status='replace') + open(unit=funit2,file=fName2,form='formatted',status='replace') + + write(fmt,'(a,i5,a)') '(',totPar+1,'E28.18,i4)' + write(fmt1,'(a,i5,a)') '(',ndims+1,'E28.18)' + k=0 + do i=1,ic_n + do j=1,ic_npt(i) + k=k+1 + write(funit1,fmt) phyP(1:totPar,k),l(k),i + lPts(1:ndims) = ic_climits(i,1:ndims,1)+(ic_climits(i,1:ndims,2)-ic_climits(i,1:ndims,1))*p(1:ndims,k) + write(funit2,fmt1) lPts(1:ndims),l(k) + enddo + enddo + !close the files + close(funit1) + close(funit2) + + + !write the resume file + funit1=u_resume + fName1=resumename + open(unit=funit1,file=fName1,form='formatted',status='replace') + + write(funit1,'(l2)')genLive + write(funit1,'(4i12)')globff,numlike,ic_n,nlive + write(funit1,'(2E28.18)')gZ,ginfo + write(funit1,'(l2)')eswitch + + !write branching info + do i=1,ic_n + write(funit1,'(i4)')ic_nBrnch(i) + if(ic_nBrnch(i)>0) then + write(fmt,'(a,i5,a)') '(',2*ic_nBrnch(i),'E28.18)' + write(funit1,fmt)ic_brnch(i,1:ic_nBrnch(i),1), ic_brnch(i,1:ic_nBrnch(i),2) + endif + enddo + + !write the node info + do i=1,ic_n + write(funit1,'(2l2,2i6)')ic_done(i),ic_reme(i),ic_fNode(i), ic_npt(i) + write(funit1,'(3E28.18)')ic_vnow(i),ic_Z(i),ic_info(i) + if(ceff) write(funit1,'(1E28.18)')ic_eff(i,4) + enddo + close(funit1) + endif + + !check if the parameters are close the prior edges + if( prior_warning .and. mod(ff,50)== 0 ) then + flag = .false. + k=0 + do i=1,ic_n + if( ic_npt(i) == 0 .or. ic_done(i) ) cycle + + ic_mean(i,1:ndims) = 0d0 + ic_sigma(i,1:ndims) = 0d0 + + do j=1,ic_npt(i) + k=k+1 + lPts(1:ndims) = ic_climits(i,1:ndims,1)+(ic_climits(i,1:ndims,2)-ic_climits(i,1:ndims,1))*p(1:ndims,k) + if( prior_warning .and. mod(ff,50)== 0 ) then + ic_mean(i,1:ndims) = ic_mean(i,1:ndims) + lPts(1:ndims) + ic_sigma(i,1:ndims) = ic_sigma(i,1:ndims) + lPts(1:ndims) * lPts(1:ndims) + endif + enddo + + ic_mean(i,1:ndims) = ic_mean(i,1:ndims) / dble(ic_npt(i)) + ic_sigma(i,1:ndims) = sqrt(max(0d0, ic_sigma(i,1:ndims) / dble(ic_npt(i)) + ic_mean(i,1:ndims) * ic_mean(i,1:ndims))) + + do j = 1, ndims + if( ic_sigma(i,j) <= 0.05 .and. ( ic_sigma(i,j) <= 0.05 .or. ic_sigma(i,j) >= 0.95 ) ) then + if( .not. flag ) then + write(*,*) + write(*,*)"MultiNest Warning!" + flag = .true. + endif + write(*,*)"Parameter ", j, " of mode ", i, " is converging towards the edge of the prior." + endif + enddo + enddo + endif + + if(mod(sff,updInt*10)==0 .or. ic_done(0)) call pos_samp(Ztol,globff,broot,nlive,ndims,nCdims,totPar, & + multimodal,outfile,gZ,ginfo,ic_n,ic_Z(1:ic_n),ic_info(1:ic_n),ic_reme(1:ic_n),ic_vnow(1:ic_n), & + ic_npt(1:ic_n),ic_nBrnch(1:ic_n),ic_brnch(1:ic_n,:,1),phyP(:,1:nlive),l(1:nlive),evDataAll,dumper,context) + endif + endif + + nd_i=nd_i+ic_sc(nd) + + !prior volume shrinks + if(my_rank==0) then + if(.not.ic_done(nd)) ic_vnow(nd)=ic_vnow(nd)*shrink + nd_j=nd_j+ic_npt(nd) + endif + enddo + + if(my_rank==0) then + !update the total volume + if(eswitch) then + k=0 + do i=1,ic_n + if(ic_done(i)) then + totVol(i)=0.d0 + else + totVol(i)=sum(sc_vol(k+1:k+ic_sc(i))) + if(ceff) ic_eff(i,4)=ic_vnow(i)*ic_volFac(i)/totVol(i) + endif + k=k+ic_sc(i) + enddo + endif + + !calculate the global evidence & info + if(mod(ff,50)==0) then + + if(fback) then + call gfeedback(gZ,numlike,globff,.false.) + + if(debug) then + d1=0.d0 + d2=0.d0 + j=0 + do i=1,ic_n + if(ic_done(i)) then + j=j+ic_sc(i) + cycle + endif + d1=d1+ic_vnow(i)*ic_volFac(i) + d2=d2+sum(sc_vol(j+1:j+ic_sc(i))) + j=j+ic_sc(i) + enddo + write(*,*)ic_n,sc_n,d2/d1,count,scount + if(ceff) write(*,*)ic_eff(1:ic_n,4) + endif + endif + endif + + !switch ellipsoidal sampling on if the sum of the volumes of all the + !clusters/sub-cluster is less than 1 + if(.not.peswitch) then + !marginal acceptance rate + if(.not.peswitch) mar_r=1.d0/dble(numlike-num_old) + + if(mar_rlboundary + subroutine samp(pnew,phyPnew,lnew,mean,ekfac,TMat,limits,loglike,eswitch,lowlike,context) + + implicit none + double precision lnew + double precision pnew(ndims),spnew(ndims),phyPnew(totPar),ekfac + double precision mean(ndims),TMat(ndims,ndims) + double precision limits(ndims,2) + double precision lowlike !likelihood threshold + logical eswitch + integer id,i,context + + INTERFACE + !the likelihood function + subroutine loglike(Cube,n_dim,nPar,lnew,context_pass) + integer n_dim,nPar,context_pass + double precision lnew,Cube(nPar) + end subroutine loglike + end INTERFACE + + id=my_rank + + do + if(.not.eswitch) then + !generate a random point inside unit hypercube + call getrandom(ndims,pnew(1:ndims),id) + phyPnew(1:ndims)=pnew(1:ndims) + lnew=lowlike + call loglike(phyPnew,ndims,totPar,lnew,context) + else + !generate a point uniformly inside the given ellipsoid + call genPtInEll(ndims,mean,ekfac,TMat,id,pnew(1:ndims)) + spnew(:)=limits(:,1)+(limits(:,2)-limits(:,1))*pnew(:) + do i=1,ndims + if(pWrap(i)) then + call wraparound(spnew(i),spnew(i)) + endif + enddo + if(.not.inprior(ndims,spnew(1:ndims))) cycle + phyPnew(1:ndims)=spnew(1:ndims) + lnew=lowlike + call loglike(phyPnew,ndims,totPar,lnew,context) + endif + if(lnew>logZero) exit + enddo + + end subroutine samp + + !---------------------------------------------------------------------- + + subroutine selectEll(n,volx,sEll) + + implicit none + + integer n!total no. of ellipsoids + double precision volx(n)!no. points in each ellipsoid + integer sEll!answer, the ellipsoid to sample from + double precision volTree(n),totvol,urv + integer i + + totvol=0.d0 + volTree=0.d0 + do i=1,n + volTree(i)=totvol+volx(i) + totvol=volTree(i) + enddo + volTree=volTree/totvol + urv=ranmarNS(0) + do i=1,n + if(urv<=volTree(i)) exit + enddo + sEll=i + end subroutine selectEll + +!---------------------------------------------------------------------- + + !provide fback to the user + subroutine gfeedback(logZ,nlike,nacc,dswitch) + + implicit none + !input variables + double precision logZ !log-evidence + integer nlike !no. of likelihood evaluations + integer nacc !no. of accepted samples + logical dswitch !dynamic live points + + write(*,'(a,F14.6)')'Acceptance Rate:',dble(nacc)/dble(nlike) + write(*,'(a,i14)') 'Replacements: ',nacc + write(*,'(a,i14)') 'Total Samples: ',nlike + write(*,'(a,F14.6)')'ln(Z): ',logZ + if(dswitch) write(*,'(a,i5)')'Total No. of Live Points:',nlive + + end subroutine gfeedback + +!---------------------------------------------------------------------- + + !isolate the modes + logical function isolateModes2(npt,ndim,nCdim,pt,naux,aux,ic_n,ic_fnode,ic_npt,ic_reme,ic_chk,ic_vnow,reCluster,limits) + implicit none + + !input parameters + integer npt !total no. of points + integer ndim !dimensionality + integer nCdim !clustering dimensionality + double precision pt(nCdim,npt) + integer naux !no. of dimensions of the aux array + double precision aux(naux,npt) + logical ic_chk(ic_n) !whether to check a node for mode separation + double precision ic_vnow(ic_n) !prior volumes + double precision limits(maxCls,nCdim,2) !physical parameter ranges + + !input/output parameters + !mode info + integer ic_n !input: no. of existing modes. output: updated no. of existing modes + integer ic_fnode(maxCls),ic_npt(maxCls) + logical ic_reme(maxCls) + + !output variables + logical reCluster(maxCls) + + !work variables + integer i,j,k,i1,j2,j3,j4,i2,i3,i4,i5,n,n1,n2,n_mode,npt_mode,m,nLost + integer, allocatable :: order(:), nptx(:), nodex(:) + double precision d1,d2,d4,ef0, ef1, ef2 + integer nN,sc_n + logical, allocatable :: gList(:), lList(:), toBeChkd(:), overlapk(:,:) + logical flag,intFlag + double precision, allocatable :: ptk(:,:), auxk(:,:), ptx(:,:), auxx(:,:) + double precision, allocatable :: mMean(:), lMean(:), mStdErr(:), lStdErr(:) + double precision, allocatable :: mean1(:), mean2(:), mean1w(:), mean2w(:) + double precision, allocatable :: eval1(:), evec1(:,:), invcov1(:,:), invcov2(:,:) + logical, allocatable :: wrapEll(:), wrapDim(:,:,:) + integer, allocatable :: wrapN(:) + double precision, allocatable :: meanw(:,:),meank(:,:),evalk(:,:),eveck(:,:,:),invcovk(:,:,:),tmatk(:,:,:),kfack(:) + + + allocate( order(nCdim), nptx(npt/(ndim+1)+1), nodex(npt/(ndim+1)+1) ) + allocate( gList(npt/(ndim+1)+1), lList(npt/(ndim+1)+1), toBeChkd(npt/(ndim+1)+1), overlapk(npt/(ndim+1)+1,npt/(ndim+1)+1) ) + allocate( ptk(nCdim,npt), auxk(naux,npt), ptx(nCdim,npt), auxx(naux,npt), mMean(nCdim), lMean(nCdim), mStdErr(nCdim), & + lStdErr(nCdim), mean1(nCdim), mean2(nCdim), mean1w(nCdim), mean2w(nCdim), eval1(nCdim), evec1(nCdim,nCdim), & + invcov1(nCdim,nCdim), invcov2(nCdim,nCdim) ) + + nN=ic_n + isolateModes2=.false. + reCluster=.false. + + i1=0 !no. of points traversed + sc_n=0 !no. of clusters created so far + + do i=1,nN + !enlargement factor + ef0 = max( ( ic_vnow(i) * 100d0 / 111d0 ) + ( 11d0 / 111d0 ) , 0.4d0 ) + + i3=ic_n + + if(ic_npt(i)==0) cycle + + if(.not.ic_chk(i)) then + do j=1,nCdim + ptk(j,i1+1:i1+ic_npt(i))=pt(j,i1+1:i1+ic_npt(i)) + enddo + auxk(1:naux,i1+1:i1+ic_npt(i))=aux(1:naux,i1+1:i1+ic_npt(i)) + nptx(sc_n+1)=ic_npt(i) + nodex(sc_n+1)=i + i1=i1+ic_npt(i) + sc_n=sc_n+1 + cycle + endif + + nLost=0 + overlapk=.true. + gList=.false. + + !cluster using G-means + do j=1,nCdim + d4=limits(i,j,2)-limits(i,j,1) + ptk(j,i1+1:i1+ic_npt(i))=pt(j,i1+1:i1+ic_npt(i)) + + if(pWrap(j)) then + do i2=i1+1,i1+ic_npt(i) + !scale to unit hypercube + d2=limits(i,j,1)+d4*ptk(j,i2) + + call wraparound(d2,d1) + + !scale back to the limits + if(d1 /= d2) ptk(j,i2)=(d1-limits(i,j,1))/d4 + enddo + endif + enddo + + auxk(1:naux,i1+1:i1+ic_npt(i))=aux(1:naux,i1+1:i1+ic_npt(i)) + n1=max(nCdim+1,3) !min no. of points allowed in a cluster + n2=ic_npt(i)/n1+1 !max no. of clusters possible + + call doGmeans(ptk(1:nCdim,i1+1:i1+ic_npt(i)),ic_npt(i),nCdim,k,nptx(sc_n+1:sc_n+n2), & + naux,auxk(:,i1+1:i1+ic_npt(i)),n1,n2) + + allocate(wrapN(k),wrapEll(k),wrapDim(k,nCdim,2),meank(k,nCdim),evalk(k,nCdim), & + eveck(k,nCdim,nCdim),invcovk(k,nCdim,nCdim),tmatk(k,nCdim,nCdim),kfack(k),meanw(k,nCdim)) + wrapEll=.false. + wrapDim=.false. + + nodex(sc_n+1:sc_n+k)=i + + !enclose sub-clusters in bounding ellipsoids + n=i1 + wrapN=1 + do j=1,k + call CalcBEllInfo(nptx(sc_n+j),nCdim,ptk(1:nCdim,n+1:n+nptx(sc_n+j)),meank(j,:), & + evalk(j,:),eveck(j,:,:),invcovk(j,:,:),tmatk(j,:,:),kfack(j),n1) + + if(mWrap) then + ef1=kfack(j)*max(2d0,((1.d0+ef0*sqrt(50.d0/dble(nptx(sc_n+j))))**(1d0/nCdim))) + call wrapEllCheck(nCdim,meank(j,:),tmatk(j,:,:),ef1,limits(i,:,:), & + wrapEll(j),wrapDim(j,:,:)) + + !transform the mean to hypercube + call Scaled2Cube(nCdim,limits(j,:,:),meank(j,:),meanw(j,:)) + + if(wrapEll(j)) then + do i2=1,nCdim + if(wrapDim(j,i2,1)) then + wrapN(j)=wrapN(j)+1 + meanw(j,i2)=1d0+meanw(j,i2) + elseif(wrapDim(j,i2,2)) then + wrapN(j)=wrapN(j)+1 + meanw(j,i2)=meanw(j,i2)-1d0 + endif + enddo + endif + + !scale the mean + call Cube2Scaled(nCdim,limits(j,:,:),meanw(j,:),meanw(j,:)) + endif + + n=n+nptx(sc_n+j) + enddo + + !calculate the standard error, required for localization + if(.not.aWrap) then + mMean=0.d0 + mStdErr=0.d0 + do j=i1+1,i1+ic_npt(i) + mMean(:)=mMean(:)+ptk(:,j) + mStdErr(:)=mStdErr(:)+ptk(:,j)**2 + enddo + mMean=mMean/dble(ic_npt(i)) + mStdErr=mStdErr/dble(ic_npt(i)) + mStdErr=sqrt(mStdErr-mMean**2) + endif + + do + npt_mode=0 + lList=.false. + toBeChkd=.false. + n_mode=0 !no. of clusters in the mode + + !find a starting sub-cluster + do j=1,k + if(gList(j)) then + cycle + else + n_mode=1 + npt_mode=nptx(sc_n+j) + lList(j)=.true. + gList(j)=.true. + toBeChkd(j)=.true. + m=j + exit + endif + enddo + + !didn't find a starting position? + if(n_mode==0) exit + + do + flag=.false. + do j=1,k + if(.not.toBeChkd(j)) cycle + flag=.true. + toBeChkd(j)=.false. + mean1(:)=meank(j,:) + eval1(:)=evalk(j,:) + ef1=kfack(j)*((1.d0+ef0*sqrt(40.d0/dble(nptx(sc_n+j))))**(1d0/nCdim)) + invcov1(:,:)=invcovk(j,:,:) + evec1(:,:)=eveck(j,:,:) + exit + enddo + + if(.not.flag) exit + + do n=1,k + if(lList(n) .or. n==j .or. .not.overlapk(n,j)) cycle + mean2(:)=meank(n,:) + ef2=kfack(n)*((1.d0+ef0*sqrt(40.d0/dble(nptx(sc_n+n))))**(1d0/nCdim)) + invcov2(:,:)=invcovk(n,:,:) + + intFlag=.false. + + if(mWrap .and. (wrapEll(j) .or. wrapEll(n))) then + do i2=0,2**(wrapN(j)-1)-1 + call returnOrder(wrapN(j)-1,i2,order(1:wrapN(j)-1)) + + i4=0 + do i5=1,nCdim + if(wrapDim(j,i5,1) .or. wrapDim(j,i5,2)) then + i4=i4+1 + if(order(i4)==0) then + mean1w(i5)=meanw(j,i5) + else + mean1w(i5)=mean1(i5) + endif + else + mean1w(i5)=mean1(i5) + endif + enddo + + + do j2=0,2**(wrapN(n)-1)-1 + call returnOrder(wrapN(n)-1,j2,order(1:wrapN(n)-1)) + + j4=0 + do j3=1,nCdim + if(wrapDim(n,j3,1) .or. wrapDim(n,j3,2)) then + j4=j4+1 + if(order(j4)==0) then + mean2w(j3)=meanw(n,j3) + else + mean2w(j3)=mean2(j3) + endif + else + mean2w(j3)=mean2(j3) + endif + enddo + + if(ellIntersect(nCdim,mean1w,mean2w,eval1,evec1,ef1,ef2,invcov1,invcov2)) then + intFlag=.true. + exit + endif + enddo + + if(intFlag) exit + enddo + elseif(ellIntersect(nCdim,mean1,mean2,eval1,evec1,ef1,ef2,invcov1,invcov2)) then + intFlag=.true. + endif + + if(intFlag) then + lList(n)=.true. + gList(n)=.true. + toBeChkd(n)=.true. + n_mode=n_mode+1 + npt_mode=npt_mode+nptx(sc_n+n) + else + overlapk(n,j)=.false. + overlapk(j,n)=.false. + endif + enddo + enddo + + !found a candidate? + if((n_mode=2*(ndim+1) .and. ((ic_npt(i)-npt_mode)>=2*(ndim+1) & + .or. (ic_reme(i) .and. ic_npt(i)-npt_mode==0))) then + flag=.true. + else + flag=.false. + endif + + !now check for localization in ndim parameters + !calculate the standard error + if(flag .and. n_modemaxCls) then + write(*,*)"ERROR: More modes found than allowed memory." + write(*,*)"Increase maxmodes in the call to nestrun and run MultiNest again." + write(*,*)"Aborting" +#ifdef MPI + call MPI_ABORT(MPI_COMM_WORLD,errcode) +#endif + stop + endif + ic_fnode(ic_n)=i + ic_reme(ic_n)=.false. + ic_reme(i)=.true. + isolateModes2=.true. + reCluster(i)=.true. + reCluster(ic_n)=.true. + do j=1,k + if(lList(j)) nodex(sc_n+j)=ic_n + enddo + endif + enddo + + !make the leftover points into a new node + if(ic_reme(i) .and. ic_npt(i)-nLost>=ndim+1 .and. .false.) then + ic_n=ic_n+1 + ic_fnode(ic_n)=i + ic_reme(ic_n)=.false. + reCluster(ic_n)=.true. + do j=1,k + if(nodex(sc_n+j)==i) nodex(sc_n+j)=ic_n + enddo + endif + + if(ic_n==i3) then + do j=1,nCdim + ptk(j,i1+1:i1+ic_npt(i))=pt(j,i1+1:i1+ic_npt(i)) + enddo + auxk(1:naux,i1+1:i1+ic_npt(i))=aux(1:naux,i1+1:i1+ic_npt(i)) + endif + + i1=i1+ic_npt(i) + sc_n=sc_n+k + deallocate(wrapN,wrapEll,wrapDim,meank,evalk,eveck,invcovk,tmatk,kfack,meanw) + enddo + + !if modes found then re-arrange everything + if(isolateModes2) then + i1=0 !no. of points re-arranged + do i=1,ic_n + ic_npt(i)=0 + k=0 !no. of points traversed + do j=1,sc_n + if(nodex(j)==i) then + !arrange the points + do i2=1,nCdim + ptx(i2,i1+1:i1+nptx(j))=ptk(i2,k+1:k+nptx(j)) + enddo + auxx(1:naux,i1+1:i1+nptx(j))=auxk(1:naux,k+1:k+nptx(j)) + i1=i1+nptx(j) + ic_npt(i)=ic_npt(i)+nptx(j) + endif + k=k+nptx(j) + enddo + enddo + pt=ptx + aux=auxx + endif + + deallocate( order, nptx, nodex ) + deallocate( gList, lList, toBeChkd, overlapk ) + deallocate( ptk, auxk, ptx, auxx, mMean, lMean, mStdErr, lStdErr, mean1, mean2, mean1w, & + mean2w, eval1, evec1, invcov1, invcov2 ) + + end function isolateModes2 + +!---------------------------------------------------------------------- + + subroutine setLimits(multimodal,ndim,nCdim,llimits,plimits,pnew,phyPnew,climits) + + implicit none + + !input variables + logical multimodal !set clustering limits? + integer ndim !dimensionality + integer nCdim !clustering dimension + double precision pnew(ndim) !new point + double precision phyPnew(nCdim) !new physical point + double precision climits(ndim,2) !current scaling limits + + !input/output variables + double precision llimits(ndim,2) !current limits + double precision plimits(nCdim,2) !current clustering limits + + !work variables + integer i + double precision pt(ndim) + + + !first scale the point + do i=1,ndim + pt(i)=climits(i,1)+(climits(i,2)-climits(i,1))*pnew(i) + + !wraparound + if(pWrap(i)) call wraparound(pt(i),pt(i)) + enddo + + do i=1,ndim + if(pt(i)llimits(i,2)) then + llimits(i,2)=pt(i) + endif + enddo + + if(multimodal) then + do i=1,nCdim + if(phyPnew(i)plimits(i,2)) then + plimits(i,2)=phyPnew(i) + endif + enddo + endif + + end subroutine setLimits + +!---------------------------------------------------------------------- + + subroutine wraparound(oPt,wPt) + + implicit none + double precision oPt !actual point + double precision wPt !wrapped-around point + + wPt=oPt + do + if(wPt<0.d0 .or. wPt>1.d0) then + wPt=wPt-floor(wPt) + else + exit + endif + enddo + + end subroutine wraparound + +!---------------------------------------------------------------------- + + subroutine wrapEllCheck(ndim,mean,TMat,ef,limits,wrapEll,wrapDim) + + implicit none + + !input variables + integer ndim !dimensionality + double precision mean(ndim) + double precision TMat (ndim,ndim) !transformation matrix + double precision ef !enlargement factor + double precision limits(ndim,2) !current scale limits + + !output variable + logical wrapEll + logical wrapDim(ndim,2) !which dimensions in which directions to wrap + + !work variable + integer i,j,k + double precision cubeEdge(ndim),sCubeEdge + double precision pnewM(1,ndim),u(1,ndim) + + + wrapEll=.false. + wrapDim=.false. + + !loop over the principle directions + do i=1,ndim + !loop over the two edges + do k=1,2 + u(1,:)=0d0 + if(k==1) then + u(1,i)=1d0 + else + u(1,i)=-1d0 + endif + + pnewM=MatMul(u,TMat) + cubeEdge(:)=sqrt(ef)*pnewM(1,:)+mean(:) + + !loop over dimensions + do j=1,ndim + if(pWrap(j) .and. (.not.wrapDim(j,1) .or. .not.wrapDim(j,2))) then + !transform back to unit hypercube + sCubeEdge=limits(j,1)+(limits(j,2)-limits(j,1))*cubeEdge(j) + + if(sCubeEdge<0d0) then + wrapDim(j,1)=.true. + wrapEll=.true. + endif + if(sCubeEdge>1d0) then + wrapDim(j,2)=.true. + wrapEll=.true. + endif + endif + enddo + enddo + enddo + + !sanity check + !no wraparound if both edges are outside the hyper cube boundary + if(wrapEll) then + wrapEll=.false. + do i=1,ndim + if(wrapDim(i,1) .and. wrapDim(i,2)) then + wrapDim(i,1)=.false. + wrapDim(i,2)=.false. + else + wrapEll=.true. + endif + enddo + endif + + + end subroutine wrapEllCheck + +!---------------------------------------------------------------------- + + subroutine scaled2Cube(ndim,limits,sP,cP) + + implicit none + + !input variables + integer ndim + double precision limits(ndim,2) !current limits + double precision sP(ndim) !scaled point + + !output variable + double precision cP(ndim) !point in unit hypercube + + !work variables + integer i + + + do i=1,ndim + cP(i)=limits(i,1)+(limits(i,2)-limits(i,1))*sP(i) + enddo + + end subroutine scaled2Cube + +!---------------------------------------------------------------------- + + subroutine Cube2Scaled(ndim,limits,sP,cP) + + implicit none + + !input variables + integer ndim + double precision limits(ndim,2) !current limits + double precision sP(ndim) !scaled point + + !output variable + double precision cP(ndim) !point in unit hypercube + + !work variables + integer i + + + do i=1,ndim + sP(i)=(cP(i)-limits(i,1))/(limits(i,2)-limits(i,1)) + enddo + + end subroutine Cube2Scaled + + +!---------------------------------------------------------------------- + + subroutine returnOrder(nBits,n,order) + + implicit none + + !input variables + integer nBits !no. of bits + integer n !number under consideration + + !output variable + integer order(nBits) !list with the order + + !work variables + integer i,j + + + order=0 + + do i=1,nBits + j=2**(i-1) + if(mod(n/j,2)==0) then + order(i)=0 + else + order(i)=1 + endif + enddo + + end subroutine returnOrder +!---------------------------------------------------------------------- +end module Nested diff --git a/multinest/posterior.F90 b/multinest/posterior.F90 new file mode 100644 index 0000000..2c3695c --- /dev/null +++ b/multinest/posterior.F90 @@ -0,0 +1,707 @@ +! posterior samples code + +module posterior + use utils1 + + implicit none + + double precision, dimension(:,:,:), allocatable :: evdatp + integer, dimension(:), allocatable :: nbranchp,nPtPerNode,ncon,nSamp,clstrdNode + double precision, dimension(:,:), allocatable :: branchp + double precision, dimension(:,:), allocatable :: pts,pts2,consP,unconsP,pwt,pNwt + logical, dimension(:), allocatable :: check + integer nClst,nUncon + double precision, dimension(:,:), allocatable :: stMu,stSigma + +contains + +!------------------------------------------------------------------------ + + subroutine pos_samp(Ztol,nIter,root,nLpt,ndim,nCdim,nPar,multimodal,outfile,globZ,globinfo,ic_n,ic_Z,ic_info,ic_reme, & + ic_vnow,ic_npt,ic_nBrnch,ic_brnch,phyP,l,evDataAll,dumper,context) + !subroutine pos_samp + implicit none + + double precision Ztol !null evidence + integer nIter !globff (total no. replacements) + character(LEN=100)root !base root + integer nLpt !no. of live points + integer ndim !dimensionality + integer nCdim !no. of parameters to cluster on + integer nPar !total no. of parameters to save + logical multimodal + logical outfile !write output files? + integer i,j,k,i1,ios,ic_n,m,indx + integer ic_npt(ic_n),ic_nptloc(ic_n),ic_nBrnch(ic_n) + double precision ic_Z(ic_n),ic_info(ic_n),ic_vnow(ic_n),ic_brnch(ic_n,ic_n),phyP(nPar,nLpt),l(nLpt),evDataAll(:) + logical ic_reme(ic_n) + character(len=100) evfile,livefile,postfile,resumefile + character(len=100) sepFile,statsFile,postfile4,strictSepFile,summaryFile + character(len=32) fmt,fmt2 + logical l1 + double precision d1,d2,urv + double precision ltmp(nPar+2),ic_zloc(ic_n),ic_infoloc(ic_n),gzloc,ginfoloc + integer phyID(nLpt) + integer context + + !posterior info + double precision lognpost,globZ,globInfo,gZold,maxWt + integer npost !no. of equally weighted posterior samples + double precision, dimension(:,:), allocatable :: wt,temp !probability weight of each posterior sample + double precision, dimension(:), allocatable :: ic_zold,llike !local evidence + + ! parameters for dumper + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + integer nSamples + + INTERFACE + !the user dumper function + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context_pass) + integer nSamples, nlive, nPar, context_pass + double precision, pointer :: physLive(:,:), posterior(:,:), paramConstr(:) + double precision maxLogLike, logZ, logZerr + end subroutine dumper + end INTERFACE + + + ic_zloc=ic_z + ic_infoloc=ic_info + gzloc=globz + ginfoloc=globinfo + ic_nptloc=ic_npt + + + !Ztol=-1.d99 + !file names + postfile=TRIM(root)//'.txt' + statsFile=TRIM(root)//'stats.dat' + postfile4=TRIM(root)//'post_equal_weights.dat' + strictSepFile=TRIM(root)//'post_separate_strict.dat' + sepFile=TRIM(root)//'post_separate.dat' + evfile=TRIM(root)//'ev.dat' + livefile = TRIM(root)//'phys_live.points' + resumefile = TRIM(root)//'resume.dat' + summaryFile = TRIM(root)//'summary.txt' + + allocate(branchp(0:ic_n,ic_n),evdatp(ic_n,nIter,nPar+2),wt(ic_n,nIter)) + allocate(nbranchp(0:ic_n),nPtPerNode(ic_n)) + allocate(pts2(ndim+1,nIter),pts(nPar+2,nIter),consP(nIter,nPar+2),unconsP(nIter,nPar+2),pwt(nIter,ic_n), & + pNwt(nIter,ic_n)) + allocate(ncon(ic_n),nSamp(ic_n),clstrdNode(ic_n)) + allocate(check(ic_n),ic_zold(ic_n),temp(ic_n,3)) + allocate(stMu(ic_n,nPar),stSigma(ic_n,nPar),llike(ic_n)) + + nPtPerNode=0 + clstrdNode=0 + nbranchp=0 + check=.false. + nSamp=0 + + nbranchp(1:ic_n)=ic_nbrnch(1:ic_n) + branchp(1:ic_n,:)=ic_brnch(1:ic_n,:) + + + !set the membership of live points to their coresponding nodes + k=0 + do i=1,ic_n + if(ic_nptloc(i)==0) cycle + phyID(k+1:k+ic_nptloc(i))=i + k=k+ic_nptloc(i) + enddo + + + !add in the contribution of the remaining live points to the evidence + i1=1 + do i=1,nLpt + ltmp(1:nPar)=phyP(1:nPar,i) + ltmp(nPar+1)=l(i) + i1=phyID(i) + + d1=ltmp(nPar+1)+log(ic_vnow(i1)/dble(ic_nptloc(i1))) + + !global evidence & info + gZold=gzloc + gzloc=LogSumExp(gzloc,d1) +! ginfoloc=exp(d1-gzloc)*ltmp(nPar+1)+exp(gZold-gzloc)*(ginfoloc+gZold)-gzloc + ginfoloc=ginfoloc*exp(gzold-gzloc)+exp(d1-gzloc)*ltmp(nPar+1) + + !local evidence & info +! gZold=ic_Z(i1) + ic_zold(i1)=ic_zloc(i1) + ic_zloc(i1)=LogSumExp(ic_zloc(i1),d1) +! ic_infoloc(i1)=exp(d1-ic_zloc(i1))*ltmp(nPar+1)+exp(gZold-ic_zloc(i1))*(ic_infoloc(i1)+gZold)-ic_zloc(i1) + ic_infoloc(i1)=ic_infoloc(i1)*exp(ic_zold(i1)-ic_zloc(i1))+exp(d1-ic_zloc(i1))*ltmp(nPar+1) + enddo + + ginfoloc=ginfoloc-gzloc + ic_infoloc(1:ic_n)=ic_infoloc(1:ic_n)-ic_zloc(1:ic_n) + + !make the top level branch + nbranchp(0)=1 + branchp(0,1)=1.d0 + + lognpost=0.d0 + if(outfile) then + !read the ev.dat file & calculate the probability weights + open(unit=55,file=evfile,status='old') + endif + i=0 + do + if(outfile) then + read(55,*,IOSTAT=ios) ltmp(1:nPar+2),i1 + + !end of file? + if(ios<0) then + close(55) + exit + endif + else + i=i+1 + if(i*(nPar+3)>size(evDataAll)) exit + ltmp(1:nPar+2)=evDataAll((i-1)*(nPar+3)+1:i*(nPar+3)-1) + i1=int(evDataAll(i*(nPar+3))) + endif + + nPtPerNode(i1)=nPtPerNode(i1)+1 + evdatp(i1,nPtPerNode(i1),1:nPar+2)=ltmp(1:nPar+2) + + !probability weight + wt(i1,nPtPerNode(i1))=exp(evdatp(i1,nPtPerNode(i1),nPar+1)+evdatp(i1,nPtPerNode(i1),nPar+2)-gzloc) + if(wt(i1,nPtPerNode(i1))>0.d0) lognpost=lognpost-wt(i1,nPtPerNode(i1))*log(wt(i1,nPtPerNode(i1))) + enddo + + allocate(posterior(nIter, nPar+2), physLive(nLpt, nPar+1), paramConstr(4*nPar)) + paramConstr(1:2*nPar) = 0d0 + maxLogLike = -huge(1d0) + logZ = gzloc + nSamples = nIter + maxWt = 0d0 + m = 0 + + !read in final remaining points & calculate their probability weights + do i=1,nLpt + ltmp(1:nPar)=phyP(1:nPar,i) + ltmp(nPar+1)=l(i) + i1=phyID(i) + + physLive(i,1:nPar+1) = ltmp(1:nPar+1) + if( ltmp(nPar+1) > maxLogLike ) then + maxLogLike = ltmp(nPar+1) + indx = i + endif + nPtPerNode(i1)=nPtPerNode(i1)+1 + evdatp(i1,nPtPerNode(i1),1:nPar+1)=ltmp(1:nPar+1) + evdatp(i1,nPtPerNode(i1),nPar+2)=log(ic_vnow(i1)/dble(ic_nptloc(i1))) + wt(i1,nPtPerNode(i1))=exp(evdatp(i1,nPtPerNode(i1),nPar+1)+evdatp(i1,nPtPerNode(i1),nPar+2)-gzloc) + if(wt(i1,nPtPerNode(i1))>0.d0) lognpost=lognpost-wt(i1,nPtPerNode(i1))*log(wt(i1,nPtPerNode(i1))) + enddo + ! global maxlike parameters + paramConstr(nPar*2+1:nPar*3) = physLive(indx,1:nPar) + + !no. of equally weighted posterior samples + npost=nint(exp(lognpost)) + + !write the global posterior files + if(outfile) then + open(55,file=postfile,form='formatted',status='replace') + open(56,file=postfile4,form='formatted',status='replace') + write(fmt,'(a,i4,a)') '(',nPar+2,'E28.18)' + write(fmt2,'(a,i4,a)') '(',nPar+1,'E28.18)' + endif + do i=1,ic_n + do j=1,nPtPerNode(i) + m = m + 1 + posterior(m, 1:nPar+1) = evdatp(i, j, 1:nPar+1) + posterior(m, nPar+2) = wt(i,j) + if( wt(i,j) > maxWt ) then + indx = m + maxWt = wt(i,j) + endif + ! global paramater means + paramConstr(1:nPar) = paramConstr(1:nPar) + evdatp(i, j, 1:nPar) * wt(i,j) + ! global paramater standard deviations + paramConstr(nPar+1:2*nPar) = paramConstr(nPar+1:2*nPar) + (evdatp(i, j, 1:nPar)**2.0) * wt(i,j) + + if(wt(i,j)>1.d-99) then + if(outfile) write(55,fmt) wt(i,j),-2.d0*evdatp(i,j,nPar+1),evdatp(i,j,1:nPar) + + !find the multiplicity + d1=wt(i,j)*npost + !calculate the integer part of multiplicity + k=int(d1) + !calculate the remaining part of multiplicity + d2=d1-dble(k) + !increase the multiplicity by one with probability d2 + urv=ranmarns(0) + if(urv<=d2) k=k+1 + if(outfile) then + do i1=1,k + write(56,fmt) evdatp(i,j,1:nPar+1) + enddo + endif + endif + enddo + enddo + + ! global paramater standard deviations + paramConstr(nPar+1:2*nPar) = sqrt( paramConstr(nPar+1:2*nPar) - paramConstr(1:nPar)**2.0 ) + ! global MAP parameters + paramConstr(nPar*3+1:nPar*4) = posterior(indx,1:nPar) + + if(outfile) then + close(55) + close(56) + + open(unit=57,file=statsFile,form='formatted',status='replace') + !stats file + write(57,'(a,E28.18,a,E28.18)')"Global Evidence:",gzloc," +/-",sqrt(ginfoloc/dble(nLpt)) + + !now the separated posterior samples + + !generate the point set to be used by the constrained clustering algorithm + nUncon=0 + nClst=0 + i=0 + j=1 + call genPoints(i,j,nPar,ic_reme) + + do i=1,nClst + temp(i,1)=ic_zloc(clstrdNode(i)) + temp(i,2)=ic_infoloc(clstrdNode(i)) + temp(i,3)=ic_nptloc(clstrdNode(i)) + enddo + ic_zloc(1:nClst)=temp(1:nClst,1) + ic_infoloc(1:nClst)=temp(1:nClst,2) + ic_nptloc(1:nClst)=temp(1:nClst,3) + + !now arrange the point set, constrained points first, unconstrained later + pNwt=0.d0 + pwt=0.d0 + k=0 + do i=1,nClst + llike(i)=minval(consP(k+1:k+nCon(i),nPar+1)) + do j=1,nCon(i) + k=k+1 + pts(1:nPar+2,k)=consP(k,1:nPar+2) + pwt(k,i)=1.d0 + enddo + enddo + do i=1,nUncon + k=k+1 + pts(1:nPar+2,k)=unconsP(i,1:nPar+2) + enddo + + i1=0 + do + if(.false.) then + i1=i1+1 + + do i=1,k + pts2(1:ndim,i)=pts(1:ndim,i) + pts2(ndim+1,i)=pts(nPar+1,i) + enddo + + !perform constrained clustering + i=nCdim + l1=.true. + + call GaussMixExpMaxLike(i,nClst,k,nCon(1:nClst),.true.,pts2(1:i,1:k), & + pts(nPar+1:nPar+2,1:k),pwt(1:k,1:nClst),pNwt(1:k,1:nClst),ic_zloc(1:nClst), & + llike(1:nClst),l1) + endif + + !calculate cluster properties + do i=1,nClst + call rGaussProp(k,nCdim,pts(1:nCdim,1:k),pts(nPar+1:nPar+2,1:k), & + pwt(1:k,i),pNwt(1:k,i),stMu(i,1:nCdim),stSigma(i,1:nCdim),ic_zloc(i)) + enddo + + i=sum(nCon(1:nClst)) + j=nCdim + + !if(.not.merge(nClst,j,nPar,i,k,nCon(1:nClst),pts(:,1:k),stMu(1:nClst,1:j), & + !stSigma(1:nClst,1:j),ic_zloc(1:nClst),Ztol,pwt(1:k,1:nClst),pNwt(1:k,1:nClst), & + !llike(1:nClst))) exit + exit + enddo + + !open the output file + if(multimodal .and. outfile) then + !open(unit=56,file=strictSepFile,form='formatted',status='replace') + open(unit=55,file=sepFile,form='formatted',status='replace') + write(57,'(a)') + write(57,'(a)')"Local Mode Properties" + write(57,'(a)')"-------------------------------------------" + write(57,'(a)') + write(57,'(a,i12)')"Total Modes Found:",nClst + endif + + open(unit=58,file=summaryFile,status='unknown') + call genSepFiles(k,nPar,nClst,Ztol,pts,pNwt(1:k,1:nClst),nCon(1:nClst),ic_zloc(1:nClst), & + ic_infoloc(1:nClst), ic_nptloc(1:nClst),55,56,57,58,multimodal) + close(58) + + if(multimodal) close(55) + close(57) + endif + + !error on global evidence + logZerr=sqrt(ginfoloc/dble(nLpt)) + + ! call the dumper + call dumper(nSamples, nLpt, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, context) + + deallocate(branchp,evdatp,wt) + deallocate(nbranchp,nPtPerNode) + deallocate(pts2,pts,consP,unconsP,pwt,pNwt) + deallocate(ncon,nSamp,clstrdNode) + deallocate(check,ic_zold,temp) + deallocate(stMu,stSigma,llike) + deallocate(posterior, physLive, paramConstr) + + end subroutine pos_samp + +!------------------------------------------------------------------------ + + recursive subroutine genPoints(br,brNum,nPar,ic_reme) + + implicit none + + integer br !branch to be analyzed + integer brNum !branching no. of the branch to be analyzed + integer nPar !dimensionality + logical ic_reme(:) + !work variables + integer i,j,k,i1,i2,node + + node=int(branchp(br,brNum)) + + !find starting node + i1=1 + + !find out the ending position + i2=nPtPerNode(node) + + if(nbranchp(node)==0 .and. .not.ic_reme(node) .and. nPtPerNode(node)>0) then + !add the points to the constrained point set if encountered the leaf + !& calculate the means & sigmas + nClst=nClst+1 + nCon(nClst)=i2-i1+1 + j=sum(nCon(1:nClst-1)) + stMu(nClst,:)=0.d0 + stSigma(nClst,:)=0.d0 + ClstrdNode(nClst)=node + do i=i1,i2 + j=j+1 + consP(j,1:nPar+2)=evdatp(node,i,1:nPar+2) + stMu(nClst,1:nPar)=stMu(nClst,1:nPar)+evdatp(node,i,1:nPar) + stSigma(nClst,1:nPar)=stSigma(nClst,1:nPar)+evdatp(node,i,1:nPar)**2 + enddo + stMu(nClst,1:nPar)=stMu(nClst,1:nPar)/dble(nCon(nClst)) + stSigma(nClst,1:nPar)=stSigma(nClst,1:nPar)/dble(nCon(nClst)) + stSigma(nClst,1:nPar)=sqrt(stSigma(nClst,1:nPar)-stMu(nClst,1:nPar)**2) + elseif(.not.(nbranchp(node)==0)) then + if(.not.check(node)) then + !add the points to the un-constrained point set + do i=i1,i2 + nUncon=nUncon+1 + unconsP(nUncon,1:nPar+2)=evdatp(node,i,1:nPar+2) + enddo + check(node)=.true. + endif + !now parse the daughter branches + do i=1,nbranchp(node) + k=node + i1=i + call genPoints(k,i1,nPar,ic_reme) + enddo + endif + + end subroutine genPoints + +!------------------------------------------------------------------------ + + subroutine genSepFiles(npt,nPar,nCls,Ztol,pt,pwt,nCon,locZ,locInfo,locNpt,funit1,funit2,funit3,funit4,multimodal) + + implicit none + + !input variables + integer npt !total no. of points + integer nPar !dimensionality + integer nCls !no. of modes + double precision pt(nPar+2,npt) !points + integer nCon(nCls) !no. of constrained points + double precision pwt(npt,nCls) !ptrobability weights + double precision locZ(nCls), locInfo(nCls) !local evidence + integer locNpt(nCls) + double precision Ztol + integer funit1 !file having strictly separated samples + integer funit2 !file having separated samples + integer funit3 !stats file + integer funit4 !summary file + logical multimodal + !work variables + integer i,j,k,indx(1),nliveP + double precision d1,d2,d3,mean(nCls,nPar),sigma(nCls,nPar),maxLike(nPar),MAP(nPar) + double precision old_slocZ,sinfo,slocZ + character*30 fmt,stfmt + + nliveP=sum(locNpt(1:nCls)) + write(stfmt,'(a,i4,a)') '(',nPar*4+2,'E28.18)' + + !calculate the weights including the posterior component + do i=1,nCls + k=sum(nCon(1:i-1)) + !first calculate the evidence & info for points strictly lying the cluster + slocZ=-huge(1.d0)*epsilon(1.d0) !logZero + sinfo=0.d0 + do j=k+1,k+nCon(i) + !local evidence + old_slocZ=slocZ + slocZ=logSumExp(slocZ,pt(nPar+1,j)+pt(nPar+2,j)) + !local info + sinfo=exp(pt(nPar+1,j)+pt(nPar+2,j)-slocZ)*pt(nPar+1,j) & + +exp(old_slocZ-slocZ)*(sinfo+old_slocZ)-slocZ + enddo + + if(locZ(i)k .and. j1.d-99) then +! write(funit2,fmt)swt,-2.d0*pt(nPar+1,j),pt(1:nPar,j) +! endif +! endif + + !write the separate file + if(pwt(j,i)>1.d-99) then + nSamp(i)=nSamp(i)+1 + write(funit1,fmt)pwt(j,i),-2.d0*pt(nPar+1,j),pt(1:nPar,j) + endif + endif + enddo + sigma(i,1:nPar)=sqrt(sigma(i,1:nPar)-mean(i,1:nPar)**2.) + + !stMu(i,:)=mean(i,:) + !stSigma(i,:)=sigma(i,:) + + !find maxLike parameters + k=sum(nCon(1:i-1)) + indx=maxloc(pt(nPar+1,k+1:k+nCon(i))) + maxLike(1:nPar)=pt(1:nPar,indx(1)+k) + d2=pt(nPar+1,indx(1)+k) + + !find MAP parameters + indx=maxloc(pwt(1:npt,i)) + MAP(1:nPar)=pt(1:nPar,indx(1)) + + !write the stats file + if(multimodal) then + write(funit3,*) + write(funit3,*) + write(funit3,'(a,i4)')'Mode',i + d3=(nliveP-locNpt(i))*sinfo/locInfo(i)+locNpt(i) + write(funit3,'(a,E28.18,a,E28.18)')"Strictly Local Evidence",slocZ," +/-",sqrt(sinfo/locNpt(i)) + write(funit3,'(a,E28.18,a,E28.18)')"Local Evidence",locZ(i)," +/-",sqrt(locInfo(i)/d3) + endif + write(funit3,'(a)')"" + write(funit3,'(a)')"Dim No. Mean Sigma" + do j=1,nPar + !write(funit3,'(i4,2E28.18)')j,stMu(i,j),stSigma(i,j) + write(funit3,'(i4,2E28.18)')j,mean(i,j),sigma(i,j) + enddo + write(funit3,'(a)')"" + write(funit3,'(a)')"Maximum Likelihood Parameters" + write(funit3,'(a)')"Dim No. Parameter" + do j=1,nPar + write(funit3,'(i4,1E28.18)')j,maxLike(j) + enddo + write(funit3,'(a)')"" + write(funit3,'(a)')"MAP Parameters" + write(funit3,'(a)')"Dim No. Parameter" + do j=1,nPar + write(funit3,'(i4,1E28.18)')j,MAP(j) + enddo + write(funit4,stfmt)mean(i,1:nPar),sigma(i,1:nPar),maxLike(1:nPar),MAP(1:nPar),locZ(i),d2 + enddo + + end subroutine genSepFiles + +!------------------------------------------------------------------------ + + logical function merge(n,nCdim,nPar,npt,gnpt,nptx,pt,mean,sigma,locEv,nullEv,wt,nWt,llike) + + implicit none + + !input variables + integer n !no. of clusters + integer nCdim,nPar !dimensionality + integer npt !total no. of constrained points + integer gnpt !total no. of points + integer nptx(n) !no. of points in each cluster + double precision pt(nPar+2,gnpt) !points + double precision mean(n,nCdim),sigma(n,nCdim) + double precision locEv(n) !local evidence + double precision nullEv !null evidence + double precision wt(gnpt,n) + double precision nWt(gnpt,n),llike(n) + + !work variables + integer i,j,k,l,m + double precision tP(nPar+2,npt),tWt(npt,n) + logical check(n),flag + + + flag = .false. + merge=.false. + check=.false. + do i=1,n + if(nptx(i)==0 .or. locEv(i)0) then + j=j+1 + nptx(j)=nptx(i) + locEv(j)=locEv(i) + mean(j,:)=mean(i,:) + sigma(j,:)=sigma(i,:) + wt(:,j)=wt(:,i) + nWt(:,j)=nWt(:,i) + llike(j)=llike(i) + endif + enddo + n=j + + end function merge + +!------------------------------------------------------------------------ + + subroutine rGaussProp(n,d,p,LX,wt,nWt,mean,sigma,Z) + + implicit none + + !input variables + integer n !no. of points + integer d !dimensionality + double precision p(d,n) !points + double precision LX(2,n) !log-like & log-dX of points + double precision wt(n) + double precision nWt(n) + + !output variables + double precision mean(d) !mean + double precision sigma(d) !standard deviations + double precision Z !local evidence + + + !work variables + integer i + + + nWt=wt + !calculate the evidence +! Z=-huge(1.d0)*epsilon(1.d0) !logZero +! do i=1,n +! if(nWt(i)>0.d0) Z=logSumExp(Z,LX(1,i)+LX(2,i)+log(nWt(i))) +! enddo + + !now calculate the normalized posterior probabilty weights + do i=1,n + if(nWt(i)>0.d0) nWt(i)=nWt(i)*exp(LX(1,i)+LX(2,i)-Z) + enddo + + !mean & sigma + mean=0.d0 + sigma=0.d0 + do i=1,n + mean(1:d)=mean(1:d)+p(1:d,i)*nWt(i) + sigma(1:d)=sigma(1:d)+(p(1:d,i)**2)*nWt(i) + enddo + sigma(1:d)=sqrt(sigma(1:d)-mean(1:d)**2) + + end subroutine rGaussProp + +!---------------------------------------------------------------------- + + +end module posterior diff --git a/multinest/priors.f90 b/multinest/priors.f90 new file mode 100644 index 0000000..3c8cd4d --- /dev/null +++ b/multinest/priors.f90 @@ -0,0 +1,193 @@ +module priors + + use utils1 + +contains + +!======================================================================= +! Prior distribution functions: r is a uniform deviate from the unit +! +! + +! Uniform[0:1] -> Delta[x1] + +function DeltaFunctionPrior(r,x1,x2) + + implicit none + + double precision r,x1,x2,DeltaFunctionPrior + + DeltaFunctionPrior=x1 + +end function DeltaFunctionPrior + +!======================================================================= +! Uniform[0:1] -> Uniform[x1:x2] + +function UniformPrior(r,x1,x2) + + implicit none + + double precision r,x1,x2,UniformPrior + + UniformPrior=x1+r*(x2-x1) + +end function UniformPrior + +!======================================================================= +! Uniform[0:1] -> LogUniform[x1:x2] + +function LogPrior(r,x1,x2) + + implicit none + + double precision r,x1,x2,LogPrior + double precision lx1,lx2 + + if (r.le.0.0d0) then + LogPrior=-1.0d32 + else + lx1=dlog10(x1) + lx2=dlog10(x2) + LogPrior=10.d0**(lx1+r*(lx2-lx1)) + endif + +end function LogPrior + +!======================================================================= +! Uniform[0:1] -> Sin[x1:x2] (angles in degrees): + +function SinPrior(r,x1,x2) + + implicit none + + double precision r,x1,x2,SinPrior + real cx1,cx2,deg2rad + parameter(deg2rad=0.017453292) + + cx1=cos(x1*deg2rad) + cx2=cos(x2*deg2rad) + SinPrior=1.d0*acos(cx1+r*(cx2-cx1)) + +end function SinPrior + +!======================================================================= +! Uniform[0:1] -> Cauchy[mean=x0,FWHM=2*gamma] + +function CauchyPrior(r,x0,gamma) + + implicit none + + double precision r,x0,gamma,CauchyPrior + real Pi + parameter(Pi=3.141592654) + + CauchyPrior=x0+gamma*tan(Pi*(r-0.5)) + +end function CauchyPrior + +!======================================================================= +! Uniform[0:1] -> Gaussian[mean=mu,variance=sigma**2] + +function GaussianPrior(r,mu,sigma) + + implicit none + + double precision r,mu,sigma,GaussianPrior + double precision SqrtTwo + parameter(SqrtTwo=1.414213562d0) + + if (r.le.1.0d-16.or.(1.0d0-r).le.1.0d-16) then + GaussianPrior=-1.0d32 + else + GaussianPrior=mu+sigma*SqrtTwo*dierfc(2.d0*(1.d0-r)) + endif + +end function GaussianPrior + +!======================================================================= + +! Uniform[0:1] -> LogNormal[mode=a,width parameter=sigma] + +function LogNormalPrior(r,a,sigma) + + implicit none + + double precision r,a,sigma,LogNormalPrior + double precision SqrtTwo,bracket + parameter(SqrtTwo=1.414213562d0) + + bracket=sigma*sigma+sigma*SqrtTwo*dierfc(2.d0*r) + LogNormalPrior=a*dexp(bracket) + +end function LogNormalPrior + +!======================================================================= +! Inverse of complimentary error function in double precision + +function dierfc(y) + + implicit none + double precision y,dierfc + double precision qa,qb,qc,qd,q0,q1,q2,q3,q4,pa,pb,p0,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,p16,p17,p18 + double precision p19,p20,p21,p22,x,z,w,u,s,t + double precision infinity + parameter (infinity=5.0d0) + parameter (qa=9.16461398268964d-01, & + qb=2.31729200323405d-01, & + qc=4.88826640273108d-01, & + qd=1.24610454613712d-01, & + q0=4.99999303439796d-01, & + q1=1.16065025341614d-01, & + q2=1.50689047360223d-01, & + q3=2.69999308670029d-01, & + q4=-7.28846765585675d-02) + parameter (pa=3.97886080735226000d+00, & + pb=1.20782237635245222d-01, & + p0=2.44044510593190935d-01, & + p1=4.34397492331430115d-01, & + p2=6.86265948274097816d-01, & + p3=9.56464974744799006d-01, & + p4=1.16374581931560831d+00, & + p5=1.21448730779995237d+00, & + p6=1.05375024970847138d+00, & + p7=7.13657635868730364d-01, & + p8=3.16847638520135944d-01, & + p9=1.47297938331485121d-02, & + p10=-1.05872177941595488d-01, & + p11=-7.43424357241784861d-02) + parameter (p12=2.20995927012179067d-03, & + p13=3.46494207789099922d-02, & + p14=1.42961988697898018d-02, & + p15=-1.18598117047771104d-02, & + p16=-1.12749169332504870d-02, & + p17=3.39721910367775861d-03, & + p18=6.85649426074558612d-03, & + p19=-7.71708358954120939d-04, & + p20=-3.51287146129100025d-03, & + p21=1.05739299623423047d-04, & + p22=1.12648096188977922d-03) + if (y==0.0) then + dierfc=infinity + return + endif + z=y + if (y .gt. 1) z=2-y + w=qa-log(z) + u=sqrt(w) + s=(qc+log(u))/w + t=1/(u+qb) + x=u*(1-s*(0.5d0+s*qd))-((((q4*t+q3)*t+q2)*t+q1)*t+q0)*t + t=pa/(pa+x) + u=t-0.5d0 + s=(((((((((p22*u+p21)*u+p20)*u+p19)*u+p18)*u+p17)*u+p16)*u+p15)*u+p14)*u+p13)*u+p12 + s=((((((((((((s*u+p11)*u+p10)*u+p9)*u+p8)*u+p7)*u+p6)*u+p5)*u+p4)*u+p3)*u+p2) & + *u+p1)*u+p0)*t-z*exp(x*x-pb) + x=x+s*(1+x*s) + if (y .gt. 1) x=-x + dierfc=x + +end function dierfc + +!======================================================================= +end module priors diff --git a/multinest/utils0.f90 b/multinest/utils0.f90 new file mode 100644 index 0000000..7ecc434 --- /dev/null +++ b/multinest/utils0.f90 @@ -0,0 +1,165 @@ + module RandomNS + integer :: rand_instNS = 0 + double precision, dimension(:), allocatable :: C, CD, CM, GSET + double precision, dimension(:,:), allocatable :: U + integer, dimension(:), allocatable :: I97, J97, ISET + integer numNodes + + contains + + subroutine initRandomNS(n,i) + implicit none + integer n !no. of nodes + integer, optional, intent(IN) :: i + integer kl,ij,k + character(len=10) :: fred + real :: klr + + !sanity check + if(n<=0) then + write(*,*)'you have asked for ',n,'nodes' + stop + endif + + numNodes=n + + !memory allocation + allocate(C(n),CD(n),CM(n),U(n,97),I97(n),J97(n),ISET(n),GSET(n)) + + ISET=0 + + do k=1,n + if(present(i)) then + kl = 9373 + ij = (i+(k-1))*45 + else + call system_clock(count=ij) + ij = mod(ij + rand_instNS*100, 31328)+(k-1)*45 + call date_and_time(time=fred) + read (fred,'(e10.3)') klr + kl = mod(int(klr*1000), 30081) + end if + + !write(*,'(" randomNS seeds:",1I6,",",1I6," rand_instNS:",1I4)")') ij,kl,rand_instNS + call RMARINNS(ij,kl,k) + enddo + end subroutine initRandomNS + + + subroutine killRandomNS + deallocate(C,CD,CM,U,I97,J97,ISET,GSET) + end subroutine killRandomNS + + + double precision function GAUSSIAN1NS(idg) + implicit none + integer idg,id !node no. + double precision urv + double precision R, V1, V2, FAC + + id=idg+1 + if(ISET(id)==0) then + R=2 + do while (R >=1.d0) + urv=ranmarns(id-1) + V1=2.d0*urv-1.d0 + urv=ranmarns(id-1) + V2=2.d0*urv-1.d0 + R=V1**2+V2**2 + end do + FAC=sqrt(-2.d0*log(R)/R) + GSET(id)=V1*FAC + gaussian1ns=V2*FAC + ISET(id)=1 + else + gaussian1ns=GSET(id) + ISET(id)=0 + endif + + end function GAUSSIAN1NS + + + + subroutine RMARINNS(IJ,KL,id) +! This is the initialization routine for the randomNS number generator ranmarns() +! NOTE: The seed variables can have values between: 0 <= IJ <= 31328 +! 0 <= KL <= 30081 +!The randomNS number sequences created by these two seeds are of sufficient +! length to complete an entire calculation with. For example, if sveral +! different groups are working on different parts of the same calculation, +! each group could be assigned its own IJ seed. This would leave each group +! with 30000 choices for the second seed. That is to say, this randomNS +! number generator can create 900 million different subsequences -- with +! each subsequence having a length of approximately 10^30. +! +! Use IJ = 1802 & KL = 9373 to test the randomNS number generator. The +! subroutine ranmarns should be used to generate 20000 randomNS numbers. +! Then display the next six randomNS numbers generated multiplied by 4096*4096 +! If the randomNS number generator is working properly, the randomNS numbers +! should be: +! 6533892.0 14220222.0 7275067.0 +! 6172232.0 8354498.0 10633180.0 + + if(IJ<0) IJ=31328+IJ + if(IJ>31328) IJ=mod(IJ,31328) + if(KL<0) KL=30081+KL + if(KL>30081) KL=mod(KL,30081) + + if(IJ<0 .or. IJ>31328 .or. KL<0 .or. KL>30081 ) then + print '(A)', ' The first randomNS number seed must have a value between 0 and 31328' + print '(A)',' The second seed must have a value between 0 and 30081' + stop + endif + I = mod(IJ/177, 177) + 2 + J = mod(IJ , 177) + 2 + K = mod(KL/169, 178) + 1 + L = mod(KL, 169) + do II = 1, 97 + S = 0.0 + T = 0.5 + do JJ = 1, 24 + M = mod(mod(I*J, 179)*K, 179) + I = J + J = K + K = M + L = mod(53*L+1, 169) + if (mod(L*M, 64)>=32) S = S + T + T = 0.5 * T + enddo + U(id,II) = S + enddo + C(id) = 362436.0 / 16777216.0 + CD(id) = 7654321.0 / 16777216.0 + CM(id) = 16777213.0 /16777216.0 + I97(id) = 97 + J97(id) = 33 + + end subroutine RMARINNS + + + + double precision function ranmarns(idg) +! This is the random number generator proposed by George Marsaglia in +! Florida State University Report: FSU-SCRI-87-50 +! It was slightly modified by F. James to produce an array of pseudorandom +! numbers. + + id=idg+1 + + UNI = U(id,I97(id)) - U(id,J97(id)) + if(UNI<0.) UNI = UNI + 1. + U(id,I97(id)) = UNI + I97(id) = I97(id) - 1 + if(I97(id)==0) I97(id) = 97 + J97(id) = J97(id) - 1 + if(J97(id)==0) J97(id) = 97 + C(id) = C(id) - CD(id) + if(C(id)<0.) C(id) = C(id) + CM(id) + UNI = UNI - C(id) + if(UNI<0.) UNI = UNI + 1. ! bug? + ranmarns = UNI + + end function ranmarns + + + end module RandomNS diff --git a/multinest/utils1.f90 b/multinest/utils1.f90 new file mode 100755 index 0000000..0cc3457 --- /dev/null +++ b/multinest/utils1.f90 @@ -0,0 +1,1086 @@ +! Covariance matrix, diagonalization + +module utils1 + !$ use omp_lib + use RandomNS + implicit none + + integer lwork,liwork + data lwork,liwork /1,1/ + logical setBlk + data setBlk /.false./ + +contains + +!---------------------------------------------------------------------- + + subroutine Diagonalize(a,diag,n,switch) + !Does m = U diag U^T, returning U in m + integer n,id + double precision a(n,n),diag(n) + logical switch + integer i,j,ierr,il,iu,m,isuppz(2*n) + double precision vl,vu,abstol,z(n,n) + double precision, dimension(:), allocatable :: work + integer, dimension(:), allocatable :: iwork + + diag=0. + abstol=0. + + id=0 + !$ id=omp_get_thread_num() + + if(.not.setBlk .or. switch) then + if(id==0) then + !find optimal block sizes + allocate(work(1)) + allocate(iwork(1)) + lwork=-1 + liwork=-1 + + call DSYEVR( 'V', 'A', 'U', n, a, n, vl, vu, il, iu, & + abstol, m, diag, Z, n, isuppz, work, lwork, iwork, liwork, ierr ) + + lwork=int(work(1)) + liwork=iwork(1) + deallocate(work,iwork) + setBlk=.true. + else + do + !$OMP FLUSH(setBlk) + if(setBlk) exit + enddo + end if + endif + + allocate(work(lwork),iwork(liwork)) + call DSYEVR( 'V', 'A', 'U', n, a, n, vl, vu, il, iu, & + abstol, m, diag, Z, n, isuppz, work, lwork, iwork, liwork, ierr ) + a=z + deallocate(work,iwork) + + !check for inf & nan + do i=1,n + if(diag(i)/=diag(i) .or. diag(i)>huge(1d0)) then + diag=1d0 + a=0d0 + do j=1,n + a(j,j)=1d0 + enddo + exit + endif + enddo + + end subroutine Diagonalize + +!---------------------------------------------------------------------- + +! LogSumExp(x,y)=log(exp(x)+exp(y)) + double precision function LogSumExp(x,y) + double precision x,y + + if (x.gt.y) then + LogSumExp=x+log(1+exp(y-x)) + else + LogSumExp=y+log(1+exp(x-y)) + end if + return + + end function LogSumExp + +!---------------------------------------------------------------------- + + subroutine calc_covmat(npt,np,p,mean,covmat) + implicit none + integer np,npt + double precision p(:,:) + double precision covmat(:,:) + double precision mean(:) + integer i,j,ip + + covmat=0. + do i=1,np + do j=i,np + do ip=1,npt + covmat(i,j)=covmat(i,j)+((p(i,ip)-mean(i))*(p(j,ip)-mean(j))) + end do + if(j/=i) covmat(j,i)=covmat(i,j) + end do + covmat(i,1:np)=covmat(i,1:np)/dble(npt-1) + end do + + return + + end subroutine calc_covmat + +!---------------------------------------------------------------------- + + subroutine calc_covmat_wt(npt,np,p,wt,mean,covmat) + implicit none + integer np,npt + double precision p(:,:),wt(:) + double precision covmat(:,:) + double precision mean(:) + integer i,j,ip + + covmat=0. + do i=1,np + do j=i,np + do ip=1,npt + covmat(i,j)=covmat(i,j)+((p(i,ip)-mean(i))*(p(j,ip)-mean(j)))*wt(ip) + end do + if(j/=i) covmat(j,i)=covmat(i,j) + end do + end do + + return + + end subroutine calc_covmat_wt + +!---------------------------------------------------------------------- + + !inv_cov=(evec).(inv_eval).Transpose(evec) + subroutine calc_invcovmat(numdim,evec,eval,invcov) + implicit none + integer numdim !num of dimensions & points + double precision evec(:,:),eval(:) !eigenvectors & eigenvalues + double precision invcov(:,:) + integer i,j,k + + invcov=0d0 + do i=1,numdim + do j=i,numdim + do k=1,numdim + invcov(i,j)=invcov(i,j)+(evec(i,k)*evec(j,k)/eval(k)) + end do + if(i/=j) invcov(j,i)=invcov(i,j) + end do + end do + + return + + end subroutine calc_invcovmat + +!---------------------------------------------------------------------- + + subroutine ScaleFactor(npt,ndim,pt,mean,inv_cov,kmax) + implicit none + + double precision kmax + integer npt,ndim + double precision pt(ndim,npt),ptM(1,ndim),sf(1,1) + integer i,k + double precision inv_cov(ndim,ndim) + double precision mean(ndim) + double precision temp_p(ndim,npt) + + do i=1,ndim + temp_p(i,1:npt)=pt(i,1:npt)-mean(i) + enddo + + kmax=0d0 + do k=1,npt + ptM(1,:)=temp_p(:,k) + sf=MatMul(MatMul(ptM,inv_cov),Transpose(ptM)) + if(sf(1,1)>kmax) kmax=sf(1,1) + enddo + + !kmax=sqrt(kmax) + + end subroutine ScaleFactor + +!---------------------------------------------------------------------- + + double precision function ptScaleFac(ndim,pt,meanx,invcovx) + + implicit none + + integer ndim!dimensionality + double precision pt(ndim),point(1,ndim)!point to be checked + double precision meanx(ndim),invcovx(ndim,ndim)!cluster attributes + double precision kfac(1,1)!k factor of present point + + point(1,:)=pt(:)-meanx(:) + kfac=MATMUL(MATMUL(point,invcovx),Transpose(point)) + ptScaleFac=kfac(1,1) + + end function ptScaleFac + +!---------------------------------------------------------------------- + + subroutine getTMatrix(ndim,evec,eval,TMatrix) + + implicit none + + double precision TMatrix(ndim,ndim),evec(ndim,ndim),eval(ndim) + integer ndim,np,i,j + double precision TMat(ndim,ndim) + + np=ndim + TMatrix=evec + + do i=1,np + do j=1,np + TMatrix(i,j) = evec(i,j) * sqrt(eval(j)) + enddo + enddo + TMat=Transpose(TMatrix) + TMatrix=TMat + + end subroutine getTMatrix + +!---------------------------------------------------------------------- + + !return the Mahalanobis distance of a point from the given ellipsoid + double precision function MahaDis(ndim,pt,mean,inv_cov,kfac) + implicit none + + !input varaibles + integer ndim !dimensionality + double precision pt(ndim) !point + double precision mean(ndim) !centroid of the ellipsoid + double precision inv_cov(ndim,ndim) !inv covariance matrix of the ellipsoid + double precision kfac !enlargement factor + + MahaDis=ptScaleFac(ndim,pt,mean,inv_cov)/kfac + + end function MahaDis + +!---------------------------------------------------------------------- + + double precision function ellVol(ndim,eval,k_fac) + + implicit none + + integer ndim + double precision eval(ndim),k_fac,pi + integer i + + pi=4.d0*atan(1.d0) + + ellVol=sqrt(product(eval)*(k_fac**dble(ndim))) + + if(mod(ndim,2)==0) then + do i=2,ndim,2 + ellVol=ellVol*2.d0*pi/dble(i) + enddo + else + ellVol=ellVol*2. + do i=3,ndim,2 + ellVol=ellVol*2.d0*pi/dble(i) + enddo + endif + end function ellVol + +!---------------------------------------------------------------------- + + subroutine genPtInSpheroid(np,u,id) + + implicit none + + integer np,i,id + double precision u(:), mod, urv + + mod=0d0 + do i=1,np + u(i)=gaussian1NS(id) + mod=mod+u(i)**2. + enddo + urv=ranmarNS(id) + mod=(urv**dble(1./np))/sqrt(mod) + u(:)=mod*u(:) + + end subroutine genPtInSpheroid + +!---------------------------------------------------------------------- + + subroutine genPtOnSpheroid(np,u,id) + + implicit none + + integer np,i,id + double precision u(:), mod + + mod=0.d0 + do i=1,np + u(i)=gaussian1NS(id) + mod=mod+u(i)**2. + enddo + mod=1.d0/sqrt(mod) + u(:)=mod*u(:) + + end subroutine genPtOnSpheroid + +!---------------------------------------------------------------------- + + !calculate the mean, covariance matrix, inv covariance matrix, evalues, + !evects, det(cov) & enlargement of a given point set + subroutine CalcEllProp(npt,ndim,pt,mean,covmat,invcov,tMat,evec,eval,detcov, & + kfac,eff,vol,pVol,switch) + implicit none + !input variables + integer npt !no. of points + integer ndim !dimensionality + double precision pt(ndim,npt) !points + double precision pVol !min vol this ellipsoid should occupy + logical switch !initialize the LAPACK eigen analysis routines? + !output variables + double precision mean(ndim) !mean + double precision covmat(ndim,ndim) !covariance matrix + double precision invcov(ndim,ndim) !inverse covariance matrix + double precision tMat(ndim,ndim) !transformation matrix + double precision evec(ndim,ndim) !eigenvectors + double precision eval(ndim) + double precision detcov !determinant of the covariance matrix + double precision kfac !enlargement point factor + double precision eff !enlargement volume factor + double precision vol !ellipsoid volume + !work variables + integer i,j + + !calculate the mean + do i=1,ndim + mean(i)=sum(pt(i,1:npt))/npt + enddo + + if(npt<=1) then + kfac=0.d0 + vol=0.d0 + eval=0.d0 + eff=1.d0 + return + endif + + !calculate the covariance matrix + call calc_covmat(npt,ndim,pt,mean,covmat) + + !eigen analysis + evec=covmat + call Diagonalize(evec,eval,ndim,switch) + + !eigenvalues of covariance matrix can't be zero + do i=1,ndim-1 + if(eval(i)<=0.d0) eval(1:i)=eval(i+1)/2. + enddo + + !if the no. of points is less than ndim+1 then set the eigenvalues of unconstrained + !dimensions equal to the min constrained eigenvalue + if(npt0d0 .and. volminPt) then + k=min(n,npt-minPt) + if(k>0) then + maxIndx(:,2)=0d0 + do i=1,npt + dist(i)=0d0 + do j=1,ndim + dist(i)=dist(i)+abs((pt(j,i)-mean(j)))/sigma(j) + enddo + + do j=1,k+1 + if(j==k+1) exit + if(dist(i)0) then + maxIndx(1:j-1,:) = maxIndx(2:j,:) + maxIndx(j,1) = dble(i) + maxIndx(j,2) = dist(i) + endif + enddo + + nOut=0 + do i=1,k + if(maxIndx(i,2)>3d0) then + nOut=nOut+1 + ptOut(nOut)=int(maxIndx(i,1)) + endif + enddo + else + nOut=0 + endif + + if(nOut>0) then + j=0 + do i=1,npt + flag=.false. + do k=1,nOut + if(i == ptOut(k)) then + flag=.true. + exit + endif + enddo + + if(flag) cycle + + ptk(:,j+1)=pt(:,i) + j=j+1 + enddo + nptk=npt-nOut + else + ptk=pt + nptk=npt + endif + else + ptk=pt + nptk=npt + endif + + + !calculate the covariance matrix + call calc_covmat(nptk,ndim,ptk,mean,covmat) + + !eigen analysis + evec=covmat + call Diagonalize(evec,eval,ndim,.false.) + !eigenvalues of covariance matrix can't be zero + do i=1,ndim-1 + if(eval(i)<=0.d0) eval(1:i)=eval(i+1)/2. + enddo + + !if the no. of points is less than ndim+1 then set the eigenvalues of unconstrained + !dimensions equal to the min constrained eigenvalue + if(nptk 1) then + write(*,*)"a_r in evolveEll cannot be", a_r + stop + endif + + if(pVol==0.d0 .or. eval(ndim)==0.d0) then + kfac=0.d0 + eff=1.d0 + vol=0.d0 + return + endif + + + !point inserted + if(a_r==1) then + if(npt==0) then + write(*,*)"can not insert point in an ellipsoid with no points" + stop + else + !calculate the scale factor + new_pt(:,1)=newpt(:) + call ScaleFactor(1,ndim,new_pt,mean,invcov,new_kfac) + if(new_kfac>kfac) then + eff=eff*kfac/new_kfac + kfac=new_kfac + if(eff<1d0) then + eff=1d0 + vol=ellVol(ndim,eval,kfac) + endif + endif + + !if the point has been inserted to a cluster with npt > 0 then kfac remains the same + !scale eff if target vol is bigger + if(pVol>vol) then + eff=eff*(pVol/(vol))**(2.d0/dble(ndim)) + vol=pVol + else + !if target vol is smaller then calculate the point volume & + !scale eff if required + vol=ellVol(ndim,eval,kfac) + + !scale the enlargement factor if volnew_kfac*eff .and. npt>1) then + write(*,*)"Problem in evoleell" + write(*,*)kfac,new_kfac,eff,npt + stop + endif + eff=1d0 + vol=ellVol(ndim,eval,kfac) + endif + + !scale the enlargement factor if volkfac) then + kfac=d1 + eff=1.d0 + vol=ellVol(ndim,eval,kfac) + endif + + if(pVol>vol) then + eff=eff*(pVol/(vol))**(2.d0/dble(ndim)) + vol=pVol + endif + + end subroutine enlargeEll + +!---------------------------------------------------------------------- + + + function inprior(np,p) + + implicit none + + logical inprior + integer np, i + double precision p(np) + + inprior = .true. + + do i=1,np + if(p(i)<0d0 .or. p(i)>1d0) then + inprior = .false. + return + endif + enddo + + end function inprior + +!---------------------------------------------------------------------- + + !Returns the value gamma(ln[(xx)]) for xx > 0. + FUNCTION gammln(xx) + REAL gammln,xx + INTEGER j + DOUBLE PRECISION ser,stp,tmp,x,y,cof(6) + !Internal arithmetic will be done in double precision, + !a nicety that you can omit if ve- gure accuracy is good enough. + SAVE cof,stp + + DATA cof,stp/76.18009172947146d0,-86.50532032941677d0,24.01409824083091d0, & + -1.231739572450155d0,.1208650973866179d-2,-.5395239384953d-5,2.5066282746310005d0/ + + x=xx + y=x + tmp=x+5.5d0 + tmp=(x+0.5d0)*log(tmp)-tmp + ser=1.000000000190015d0 + do j=1,6 + y=y+1.d0 + ser=ser+cof(j)/y + enddo + gammln=tmp+log(stp*ser/x) + return + END FUNCTION gammln + + + + + !USES gcf,gser Returns the incomplete gamma function P(a, x). + FUNCTION gammp(a,x) + REAL a,gammp,x + REAL gammcf,gamser,gln + + if(x.lt.0..or.a.le.0.) then + write(*,*) 'bad arguments in gammp' + stop + endif + if(x.lt.a+1.)then + !Use the series representation. + call gser(gamser,a,x,gln) + gammp=gamser + else + !Use the continued fraction representation + call gcf(gammcf,a,x,gln) + gammp=1.-gammcf !and take its complement. + endif + return + END FUNCTION gammp + + + !USES gcf,gser Returns the incomplete gamma function Q(a,x)=1-P(a,x). + FUNCTION gammq(a,x) + REAL a,gammq,x + REAL gammcf,gamser,gln + + if(x.lt.0..or.a.le.0.) then + write(*,*) 'bad arguments in gammq' + stop + endif + if(x.lt.a+1.)then + !Use the series representation + call gser(gamser,a,x,gln) + gammq=1.-gamser !and take its complement. + else + !Use the continued fraction representation. + call gcf(gammcf,a,x,gln) + gammq=gammcf + endif + return + END FUNCTION gammq + + + !USES gammln Returns the incomplete gamma function P(a,x) evaluated by its series + !representation as gamser. + !Also returns ln (a) as gln. + SUBROUTINE gser(gamser,a,x,gln) + INTEGER ITMAX + REAL a,gamser,gln,x,EPS + PARAMETER (ITMAX=100,EPS=3.e-7) + INTEGER n + REAL ap,del,sum + + gln=gammln(a) + if(x.le.0.)then + if(x.lt.0.) write(*,*) 'x < 0 in gser' + gamser=0. + return + endif + ap=a + sum=1./a + del=sum + do n=1,ITMAX + ap=ap+1. + del=del*x/ap + sum=sum+del + if(abs(del).lt.abs(sum)*EPS)goto 91 + enddo + write(*,*) 'a too large, ITMAX too small in gser' + 91 gamser=sum*exp(-x+a*log(x)-gln) + return + END SUBROUTINE gser + + + !USES gammln Returns the incomplete gamma function Q(a, x) evaluated by its continued + !fraction representation as gammcf. Also returns ln (a) as gln. + !Parameters: ITMAX is the maximum allowed number of iterations; + !EPS is the relative accuracy; FPMIN is a number near the smallest representable + !floating-point number. + SUBROUTINE gcf(gammcf,a,x,gln) + INTEGER ITMAX + REAL a,gammcf,gln,x,EPS,FPMIN + PARAMETER (ITMAX=100,EPS=3.e-7,FPMIN=1.e-30) + INTEGER i + REAL an,b,c,d,del,h + + gln=gammln(a) + b=x+1.-a + c=1./FPMIN + d=1./b + h=d + do i=1,ITMAX !Iterate to convergence. + an=-i*(i-a) + b=b+2. + d=an*d+b + if(abs(d).lt.FPMIN)d=FPMIN + c=b+an/c + if(abs(c).lt.FPMIN)c=FPMIN + d=1./d + del=d*c + h=h*del + if(abs(del-1.).lt.EPS)goto 91 + enddo + write(*,*) 'a too large, ITMAX too small in gcf' + 91 gammcf=exp(-x+a*log(x)-gln)*h !Put factors in front. + return + END SUBROUTINE gcf + + + !USES gammp Returns the error function erf(x). + FUNCTION erf(x) + REAL erf,x + + if(x.lt.0.)then + erf=-gammp(.5,x**2) + else + erf=gammp(.5,x**2) + endif + return + END FUNCTION erf + + + double precision function stNormalCDF(x) + double precision x + + if(x>6.) then + stNormalCDF=1. + else if(x<-6.) then + stNormalCDF=0. + else + stNormalCDF=0.5*(1.+erf(real(x)/1.41421356)) + end if + + end function stNormalCDF + +!---------------------------------------------------------------------- + + integer function binSearch(array,npt,x) + implicit none + integer npt,array(npt) !total no. of points in the array & the array + integer x !no. whose position has to be found + integer sp,ep !starting, end & insertion points + + binSearch = 1 + if(npt==0) then + return + elseif(x>=array(1)) then + return + end if + if(x<=array(npt)) then + binSearch=npt+1 + return + end if + sp=1 + ep=npt + do + if(sp==ep) return + if(x>array((ep+sp)/2)) then + ep=(ep+sp)/2 + binSearch=ep + elseif(x1d0) then + d1=eff + eff=max(1d0,eff*exp(-2d0/dble(npt*ndim))) + vol=vol*((eff/d1)**(dble(ndim)/2d0)) + endif + + + end subroutine ceffEvolve + +!---------------------------------------------------------------------- + +end module utils1 diff --git a/multinest/xmeans_clstr.f90 b/multinest/xmeans_clstr.f90 new file mode 100755 index 0000000..7c37c6a --- /dev/null +++ b/multinest/xmeans_clstr.f90 @@ -0,0 +1,2854 @@ +! X-means, Pelleg, Moore + +module xmeans_clstr + use kmeans_clstr + use utils1 + implicit none + + integer n_dim + double precision LogTwoPi + parameter(LogTwoPi=1.83787706641) + double precision larg + parameter(larg=huge(1.d0)) + !information about ellipses at each node + integer numClstr,nCls,ptClstrd,maxClstr !total clusters found yet & total pts in clstrs yet + double precision, dimension(:,:), allocatable :: p,xclsMean,xclsEval,aux + double precision, dimension(:), allocatable :: xclsBIC,loglike,xclsVar,xclsVol,xclsKfac,xclsEff,xclsDetcov + double precision, dimension(:,:,:), allocatable :: xclsInvCov,xclsTMat,xclsCovmat,xclsEvec + integer, dimension(:), allocatable :: revcPos,xclsPos,ptInClstr + integer, dimension(:,:), allocatable :: pt_Clstr + + contains + + !simple X-means with likelihoods also being rearranged + subroutine doXmeans(points,npt,np,nClstr,ptClstr,cMean,cCovmat,cInvCov,cEval,cEvec,like,min_pt) + implicit none + + double precision points(:,:),like(:) + integer npt,np !num of points & dimensionality + integer nClstr !total clusters found + integer ptClstr(:),min_pt + double precision cMean(:,:),cCovmat(:,:,:),cInvCov(:,:,:),cEval(:,:),cEvec(:,:,:) + integer i,j + + n_dim=np + maxClstr=npt/min_pt+1 + allocate (p(n_dim,npt),loglike(npt),xclsBIC(maxClstr),revcPos(maxClstr), & + xclsPos(maxClstr),ptInClstr(maxClstr)) + allocate (xclsMean(maxClstr,n_dim),xclsCovmat(maxClstr,n_dim,n_dim), & + xclsInvCov(maxClstr,n_dim,n_dim),xclsEvec(maxClstr,n_dim,n_dim), & + xclsEval(maxClstr,n_dim)) + nCls=1 + numClstr=0 + ptClstrd=0 + ptInClstr=0 + revcPos=0 + + call xmeans(points,npt,like,min_pt) + !if(numClstr>1) call postprocess + j=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + cMean(j,1:n_dim)=xclsMean(i,1:n_dim) + cEvec(j,1:n_dim,1:n_dim)=xclsEvec(i,1:n_dim,1:n_dim) + cEval(j,1:n_dim)=xclsEval(i,1:n_dim) + cCovmat(j,1:n_dim,1:n_dim)=xclsCovmat(i,1:n_dim,1:n_dim) + cInvCov(j,1:n_dim,1:n_dim)=xclsInvCov(i,1:n_dim,1:n_dim) + end if + end do + nClstr=j + points=p + like=loglike + deallocate(p,loglike,xclsBIC,revcPos,xclsPos,ptInClstr) + deallocate(xclsMean,xclsInvCov,xclsCovmat,xclsEvec,xclsEval) + + end subroutine doXmeans + +!---------------------------------------------------------------------- + !sub-clustering X-means with shared points + !points rearranged with the cluster index of primary points also returned + subroutine doXmeans5(points,npt,np,nClstr,ptClstr,ad,naux,auxa,min_pt) + implicit none + !input/output + double precision points(:,:) !actual points which are rearranged after clustering + !note: the clusters have shared points too + double precision auxa(:,:) !auxilliary variables which are rearranged after clustering + !input + integer npt,np !num of points & dimensionality + integer ad !no. of shared points required per cluster + integer min_pt !min points allowed per cluster + integer naux !no. of auxilliary variables to be a re-arranged + !ouput + integer nClstr !total clusters found + integer ptClstr(:) !total no. of points in each cluster (including shared points) + integer cluster(npt) !cluster index of primary points (before re-arrangement) + + !work + integer i,j,maxec,k + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + ad=0 + return + end if + + if(ad<0) ad=0 + n_dim=np + maxec=npt/min_pt+1 + + allocate (p(n_dim,npt+maxec*ad),revcPos(maxec),xclsPos(maxec), & + ptInClstr(maxec),aux(naux,npt+maxec*ad)) + + ptInClstr=0 + nCls=1 + numClstr=0 + ptClstrd=0 + revcPos=0 + + call xmeans5(points(:,1:npt),npt,cluster,ad,naux,auxa,min_pt) + + j=0 + k=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + k=k+ptInClstr(xclsPos(i)) + end if + end do + + nClstr=j + points(1:n_dim,1:k)=p(1:n_dim,1:k) + auxa(1:naux,1:k)=aux(1:naux,1:k) + + deallocate(p,revcPos,xclsPos,ptInClstr,aux) + + end subroutine doXmeans5 + +!---------------------------------------------------------------------- + !sub-clustering X-means with + !nClstr = no. of clusters found by previous clustering + !ptClstr(nClstr) = no. of pts in each cluster found by previous clustering + !nb = every cluster is divided into nb sub-clusters + !ad = shared pts per sub-cluster + subroutine doXmeans8(points,like,npt,np,nClstr,ptClstr,ad,min_pt) + implicit none + + double precision points(:,:),like(:) + integer npt,np !num of points & dimensionality + integer nClstr !total clusters found + integer ptClstr(:),min_pt + integer i,j,maxec,k + integer ad !no. of shared points per cluster + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + ad=0 + return + end if + + if(ad<0) ad=0 + + n_dim=np + maxec=npt/min_pt+1 + + allocate (p(n_dim,npt+maxec*ad)) + allocate (loglike(npt+maxec*ad)) + allocate (revcPos(maxec)) + allocate (xclsPos(maxec)) + allocate (ptInClstr(maxec)) + + ptInClstr=0 + nCls=1 + numClstr=0 + ptClstrd=0 + revcPos=0 + + call Xmeans8(nClstr,points(:,1:npt),like(1:npt),ptClstr,npt,ad,min_pt) + + j=0 + k=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + k=k+ptInClstr(xclsPos(i)) + end if + end do + + nClstr=j + points(1:n_dim,1:k)=p(1:n_dim,1:k) + like(1:k)=loglike(1:k) + + deallocate(p) + deallocate(revcPos) + deallocate(loglike) + deallocate(xclsPos) + deallocate(ptInClstr) + + end subroutine doXmeans8 + +!---------------------------------------------------------------------- + + !simple X-means with likelihoods also being rearranged + subroutine doXmeans6(points,npt,np,nClstr,ptClstr,naux,auxa,min_pt,maxC) + implicit none + + double precision points(:,:),auxa(:,:) + integer npt,np !num of points & dimensionality + integer naux !dimension of auxilliary array + integer nClstr !total clusters found + integer ptClstr(:),min_pt,maxC + integer i,j + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + return + end if + + n_dim=np + maxClstr=maxC + allocate (p(n_dim,npt),aux(naux,npt),xclsPos(maxClstr),ptInClstr(maxClstr), & + xclsMean(maxClstr,n_dim),xclsVar(maxClstr),xclsBIC(maxClstr),revcPos(maxClstr)) + nCls=1 + numClstr=0 + ptClstrd=0 + ptInClstr=0 + + call xmeans6(points,npt,naux,auxa,min_pt) + + j=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + end if + end do + + nClstr=j + points(1:n_dim,1:npt)=p(1:n_dim,1:npt) + auxa(1:naux,1:npt)=aux(1:naux,1:npt) + deallocate(p,aux,xclsPos,ptInClstr,xclsMean,xclsVar,xclsBIC,revcPos) + + end subroutine doXmeans6 + +!---------------------------------------------------------------------- + + !simple X-means + subroutine doXmeans7(points,npt,np,nClstr,ptClstr,min_pt) + implicit none + + double precision points(:,:) + integer npt,np !num of points & dimensionality + integer nClstr !total clusters found + integer ptClstr(:),min_pt + integer i,j + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + return + end if + + n_dim=np + maxClstr=npt/min_pt+1 + allocate (p(n_dim,npt),xclsPos(maxClstr),ptInClstr(maxClstr), & + xclsMean(maxClstr,n_dim),xclsVar(maxClstr),xclsBIC(maxClstr),revcPos(maxClstr)) + nCls=1 + numClstr=0 + ptClstrd=0 + ptInClstr=0 + + call Xmeans7(points,npt,min_pt) + + j=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + end if + end do + + nClstr=j + points(1:n_dim,1:npt)=p(1:n_dim,1:npt) + deallocate(p,xclsPos,ptInClstr,xclsMean,xclsVar,xclsBIC,revcPos) + + end subroutine doXmeans7 + +!---------------------------------------------------------------------- + + subroutine doGmeans(points,npt,np,nClstr,ptClstr,naux,auxa,min_pt,maxC) + implicit none + + double precision points(:,:),auxa(:,:) + integer npt,naux,np !num of points & dimensionality + integer nClstr !total clusters found + integer ptClstr(:),min_pt,maxC + integer i,j + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + return + endif + + n_dim=np + maxClstr=maxC + allocate (p(n_dim,npt),aux(naux,npt),xclsPos(maxClstr),ptInClstr(maxClstr)) + nCls=1 + numClstr=0 + ptClstrd=0 + ptInClstr=0 + + call Gmeans(points,npt,naux,auxa,min_pt) + + j=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + endif + enddo + + nClstr=j + points(1:n_dim,1:npt)=p(1:n_dim,1:npt) + auxa(1:naux,1:npt)=aux(1:naux,1:npt) + + deallocate(p,aux,xclsPos,ptInClstr) + + end subroutine doGmeans + +!---------------------------------------------------------------------- + + subroutine doGmeans2(points,npt,np,nClstr,ptClstr,min_pt) + implicit none + + double precision points(:,:) + integer npt,np !num of points & dimensionality + integer nClstr !total clusters found + integer ptClstr(:),min_pt + integer i,j + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + return + end if + + n_dim=np + maxClstr=npt/min_pt+1 + allocate (p(n_dim,npt),xclsPos(maxClstr),ptInClstr(maxClstr)) + nCls=1 + numClstr=0 + ptClstrd=0 + ptInClstr=0 + + call Gmeans2(points,npt,min_pt) + + j=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + end if + end do + nClstr=j + points=p + deallocate(p,xclsPos,ptInClstr) + + end subroutine doGmeans2 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans(pt,npt,like,min_pt) + implicit none + + integer min_pt,npt !num of points + double precision pt(:,:) !points + double precision like(:) !likelihood values + double precision ptk(2,n_dim,npt)!points in clusters + double precision likek(2,npt)!loglike, to change order only + integer nptk(2) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + integer i,j,ip + double precision covar(n_dim,n_dim),covark(2,n_dim,n_dim),invcov(n_dim,n_dim),invcovk(2,n_dim,n_dim) + double precision evec(n_dim,n_dim),eveck(2,n_dim,n_dim),eval(n_dim),evalk(2,n_dim),detcov,detcovk(2) + double precision mean(n_dim),meank(2,n_dim) + double precision BIC1,BIC2 !BIC for 1 & 2 clusters respectively + integer i1 + logical flag + + flag=.false. + + do i=1,n_dim + mean(i)=sum(pt(i,1:npt))/dble(npt) + enddo + !calculate cov,inv_cov,evec matrices & eval & det(cov) + call calc_covmat(npt,n_dim,pt,mean,covar) + evec=covar + call diagonalize(evec,eval,n_dim,flag) + !if any of the eval are 0 or -ve then use the lowest +ve eval + do j=1,n_dim + if(eval(j)<=0.) eval(1:j)=eval(j+1) + enddo + detcov=product(eval) + call calc_invcovmat(n_dim,evec,eval,invcov) + + !don't cluster + !if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + !or + !if the number of clusters has reached maxCls + if(npt<2*min_pt.or.nCls==maxClstr) then + BIC2=larg + BIC1=-larg + flag=.true. + end if + + if(.not.flag) then + !calculate BIC for all the points, no clustering + BIC1=calcBIC1(pt,npt,mean,invcov,detcov) + + !breakup the points in 2 clusters + i1=2 + meank(1,1:n_dim)=mean(1:n_dim) + call kmeans3(i1,pt,npt,n_dim,meank(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + likek(cluster(i),nptk(cluster(i)))=like(i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)BIC2) then + nCls=nCls+1 + call xmeans(ptk(1,:,1:nptk(1)),nptk(1),likek(1,1:nptk(1)),min_pt) + call xmeans(ptk(2,:,1:nptk(2)),nptk(2),likek(2,1:nptk(2)),min_pt) + return + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + loglike(ptClstrd+1:ptClstrd+npt)=like(1:npt) + ip=binSearch(ptInClstr,numClstr,npt) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + do i=1,numClstr + if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + !save cluster info + xclsBIC(numClstr)=BIC1 + xclsMean(numClstr,1:n_dim)=mean(1:n_dim) + xclsCovmat(numClstr,1:n_dim,1:n_dim)=covar(1:n_dim,1:n_dim) + xclsInvCov(numClstr,1:n_dim,1:n_dim)=invcov(1:n_dim,1:n_dim) + xclsEvec(numClstr,1:n_dim,1:n_dim)=evec(1:n_dim,1:n_dim) + xclsEval(numClstr,1:n_dim)=eval(1:n_dim) + end if + + end subroutine Xmeans + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans4(pt,npt,ad,min_pt) + implicit none + + integer min_pt,npt !num of points + integer ad !no. of shared points required, returns the no. of shared points found + double precision pt(:,:) !points + double precision ptk(npt/(n_dim+1),n_dim,npt+ad)!points in clusters + integer nptk(npt/(n_dim+1)) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + integer i,j,ip,q + double precision covar(n_dim,n_dim),covark(2,n_dim,n_dim),invcov(n_dim,n_dim),invcovk(2,n_dim,n_dim) + double precision evec(n_dim,n_dim),eveck(2,n_dim,n_dim),eval(n_dim),evalk(2,n_dim),detcov,detcovk(2) + double precision mean(n_dim),meank(2,n_dim) + double precision BIC1,BIC2 !BIC for 1 & 2 clusters respectively + integer i1 + integer cluster2(npt/(n_dim+1),ad) + logical flag + + flag=.false. + + do i=1,n_dim + mean(i)=sum(pt(i,1:npt))/dble(npt) + enddo + + !don't cluster if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + !don't cluster if the number of clusters has reached maxCls + if(npt<2*min_pt .or. nCls==maxClstr) then + BIC2=larg + BIC1=-larg + flag=.true. + end if + + if(.not.flag) then + !calculate cov,inv_cov,evec matrices & eval & det(cov) + call calc_covmat(npt,n_dim,pt,mean,covar) + evec=covar + call diagonalize(evec,eval,n_dim,flag) + !if any of the eval are 0 or -ve then use the lowest +ve eval + do j=1,n_dim + if(eval(j)<=0.) eval(1:j)=eval(j+1) + enddo + detcov=product(eval) + call calc_invcovmat(n_dim,evec,eval,invcov) + + !calculate BIC for all the points, no clustering + BIC1=calcBIC1(pt,npt,mean,invcov,detcov) + + + !breakup the points in 2 clusters + i1=2 + meank(1,1:n_dim)=mean(1:n_dim) + call kmeans3(i1,pt,npt,n_dim,meank(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)BIC2) then + nCls=nCls+1 + call xmeans4(ptk(1,:,1:nptk(1)),nptk(1),ad,min_pt) + call xmeans4(ptk(2,:,1:nptk(2)),nptk(2),ad,min_pt) + return + else + !i1=min(nb,npt/max(min_pt,min(10,2*min_pt))) + i1=npt/min_pt+1 + !if(npt>=2*max(min_pt,min(10,2*min_pt)).and.i1>1) then + if(npt>=2*min_pt.and.i1>1) then + !break up the cluster into nb clusters + call kmeans7(i1,pt,npt,n_dim,cluster,ad,cluster2,min_pt) + !calculate the no. of points in each cluster & separate them + + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + end do + + do i=1,i1 + do j=1,ad + !if(cluster2(i,j)==0) exit + nptk(i)=nptk(i)+1 + ptk(i,:,nptk(i))=pt(:,cluster2(i,j)) + end do + end do + + !arrange the points + do i=1,i1 + p(:,ptClstrd+1:ptClstrd+nptk(i))=ptk(i,:,1:nptk(i)) + ip=binSearch(ptInClstr,numClstr,nptk(i)) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=nptk(i) + do q=1,numClstr + if(xclsPos(q)>=ip) xclsPos(q)=xclsPos(q)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+nptk(i) + numClstr=numClstr+1 + !save cluster info + do q=1,n_dim + xclsMean(numClstr,q)=sum(ptk(i,q,1:nptk(i)))/dble(nptk(i)) + end do + end do + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + ip=binSearch(ptInClstr,numClstr,npt) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + do i=1,numClstr + if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + !save cluster info + xclsInvCov(numClstr,1:n_dim,1:n_dim)=invcov(1:n_dim,1:n_dim) + xclsBIC(numClstr)=BIC1 + xclsMean(numClstr,1:n_dim)=mean(1:n_dim) + end if + end if + + end subroutine xmeans4 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans5(ptf,tnpt,cluster,ad,naux,auxa,min_pt) + implicit none + + integer min_pt,npt !num of points + integer ad !no. of shared points required, returns the no. of shared points found + integer tnpt + double precision ptf(:,:),pt(n_dim,tnpt) !points + double precision ptk(tnpt/(n_dim+1),n_dim,tnpt+ad)!points in clusters + integer nptk(tnpt/(n_dim+1)) !no. of points in the clusters + integer cluster(tnpt) !cluster array having cluster num of each pt + integer i,j,ip + integer i1 + integer cluster2(tnpt/(n_dim+1),ad) + integer naux + double precision auxa(:,:),auxk(tnpt/(n_dim+1),naux,tnpt+ad) + + npt=tnpt + pt(:,1:npt)=ptf(:,1:npt) + + i1=npt/min_pt+1 + + if(npt>=2*min_pt.and.i1>1) then + !break up the cluster into nb clusters + call kmeans7(i1,pt(:,1:npt),npt,n_dim,cluster(1:npt),ad,cluster2,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + auxk(cluster(i),:,nptk(cluster(i)))=auxa(:,i) + end do + + do i=1,i1 + do j=1,ad + !if(cluster2(i,j)==0) exit + nptk(i)=nptk(i)+1 + ptk(i,:,nptk(i))=pt(:,cluster2(i,j)) + auxk(i,:,nptk(i))=auxa(:,cluster2(i,j)) + end do + end do + + !arrange the points + do i=1,i1 + p(:,ptClstrd+1:ptClstrd+nptk(i))=ptk(i,:,1:nptk(i)) + aux(:,ptClstrd+1:ptClstrd+nptk(i))=auxk(i,:,1:nptk(i)) + !ip=binSearch(ptInClstr,numClstr,nptk(i)) + ip=numClstr+1 + !ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=nptk(i) + !do q=1,numClstr + !if(xclsPos(q)>=ip) xclsPos(q)=xclsPos(q)+1 + !end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+nptk(i) + numClstr=numClstr+1 + end do + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + aux(:,ptClstrd+1:ptClstrd+npt)=auxa(:,1:npt) + !ip=binSearch(ptInClstr,numClstr,npt) + ip=numClstr+1 + !ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + !do i=1,numClstr + !if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + !end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine xmeans5 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans8(nClsf,ptf,lkf,ptInClsf,tnpt,ad,min_pt) + implicit none + + integer min_pt,npt !num of points + integer ad !no. of shared points required, returns the no. of shared points found + integer tnpt,nClsf,ptInClsf(:) + double precision ptf(:,:),pt(n_dim,tnpt),lkf(:),lk(tnpt) !points & log-like + double precision ptk(tnpt/(n_dim+1),n_dim,tnpt+ad)!points in clusters + double precision likek(tnpt/(n_dim+1),tnpt+ad)!log-like of points in clusters + integer nptk(tnpt/(n_dim+1)) !no. of points in the clusters + integer cluster(tnpt) !cluster array having cluster num of each pt + integer i,j,ip,q,l,m + integer i1 + integer cluster2(tnpt/(n_dim+1),ad) + + m=1 + do l=1,nClsf + npt=ptInClsf(l) + pt(:,1:npt)=ptf(:,m:m+ptInClsf(l)) + lk(1:npt)=lkf(m:m+ptInClsf(l)) + m=m+ptInClsf(l) + + i1=npt/min_pt+1 + + if(npt>=2*min_pt.and.i1>1) then + !break up the cluster into nb clusters + call kmeans7(i1,pt(:,1:npt),npt,n_dim,cluster(1:npt),ad,cluster2,min_pt) + !calculate the no. of points in each cluster & separate them + + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + likek(cluster(i),nptk(cluster(i)))=lk(i) + end do + + if(ad>0) then + do i=1,i1 + do j=1,ad + !if(cluster2(i,j)==0) exit + nptk(i)=nptk(i)+1 + ptk(i,:,nptk(i))=pt(:,cluster2(i,j)) + likek(i,nptk(i))=lk(cluster2(i,j)) + end do + end do + end if + + !arrange the points + do i=1,i1 + p(:,ptClstrd+1:ptClstrd+nptk(i))=ptk(i,:,1:nptk(i)) + loglike(ptClstrd+1:ptClstrd+nptk(i))=likek(i,1:nptk(i)) + ip=binSearch(ptInClstr,numClstr,nptk(i)) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=nptk(i) + do q=1,numClstr + if(xclsPos(q)>=ip) xclsPos(q)=xclsPos(q)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+nptk(i) + numClstr=numClstr+1 + end do + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + loglike(ptClstrd+1:ptClstrd+npt)=lk(1:npt) + ip=binSearch(ptInClstr,numClstr,npt) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + do i=1,numClstr + if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + end do + + end subroutine Xmeans8 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans6(pt,npt,naux,auxa,min_pt) + implicit none + + integer min_pt,npt,naux !num of points + double precision pt(:,:) !points + double precision auxa(:,:) !auxilliary array + double precision ptk(2,n_dim,npt)!points in clusters + double precision auxk(2,naux,npt)!aux, to change order only + integer nptk(2) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + double precision mean(3,n_dim),var(3) + integer i,j,ip + double precision BIC1,BIC2 !BIC for 1 & 2 clusters respectively + integer i1 + logical flag + + flag=.false. + + !don't cluster + !if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + !or + !if the number of clusters has reached maxCls + if(npt<2*min_pt.or.nCls==maxClstr) then + BIC2=larg + BIC1=-larg + flag=.true. + end if + + if(.not.flag) then + var=0. + !calculate mean & variance + do i=1,n_dim + mean(3,i)=sum(pt(i,1:npt))/npt + var(3)=var(3)+sum(pt(i,1:npt)**2.)/npt-mean(3,i)**2. + end do + + !calculate BIC for all the points, no clustering + BIC1=calcBIC1_iso(npt,var(3)) + + !breakup the points in 2 clusters + i1=2 + mean(1,:)=mean(3,:) + call kmeans3(i1,pt(1:n_dim,1:npt),npt,n_dim,mean(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + auxk(cluster(i),:,nptk(cluster(i)))=auxa(:,i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)BIC2) then + nCls=nCls+1 + call Xmeans6(ptk(1,:,1:nptk(1)),nptk(1),naux,auxk(1,:,1:nptk(1)),min_pt) + call Xmeans6(ptk(2,:,1:nptk(2)),nptk(2),naux,auxk(2,:,1:nptk(2)),min_pt) + return + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + aux(:,ptClstrd+1:ptClstrd+npt)=auxa(:,1:npt) + ip=binSearch(ptInClstr,numClstr,npt) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + do i=1,numClstr + if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + xclsMean(numClstr+1,1:n_dim)=mean(3,1:n_dim) + xclsVar(numClstr+1)=var(3) + xclsBIC(numClstr+1)=BIC1 + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine Xmeans6 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans7(pt,npt,min_pt) + implicit none + + integer min_pt,npt !num of points + double precision pt(:,:) !points + double precision ptk(2,n_dim,npt)!points in clusters + integer nptk(2) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + double precision mean(3,n_dim),var(3) + integer i,j,ip + double precision BIC1,BIC2 !BIC for 1 & 2 clusters respectively + integer i1 + logical flag + + flag=.false. + + !don't cluster + !if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + !or + !if the number of clusters has reached maxCls + if(npt<2*min_pt.or.nCls==maxClstr) then + BIC2=larg + BIC1=-larg + flag=.true. + end if + + if(.not.flag) then + var=0. + !calculate mean & variance + do i=1,n_dim + mean(3,i)=sum(pt(i,1:npt))/npt + var(3)=var(3)+sum(pt(i,1:npt)**2.)/npt-mean(3,i)**2. + end do + + !calculate BIC for all the points, no clustering + BIC1=calcBIC1_iso(npt,var(3)) + + !breakup the points in 2 clusters + i1=2 + mean(1,:)=mean(3,:) + call kmeans3(i1,pt(1:n_dim,1:npt),npt,n_dim,mean(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + ptk=0. + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)BIC2) then + nCls=nCls+1 + call Xmeans7(ptk(1,:,1:nptk(1)),nptk(1),min_pt) + call Xmeans7(ptk(2,:,1:nptk(2)),nptk(2),min_pt) + return + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + ip=binSearch(ptInClstr,numClstr,npt) + ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + do i=1,numClstr + if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + xclsMean(numClstr+1,1:n_dim)=mean(3,1:n_dim) + xclsVar(numClstr+1)=var(3) + xclsBIC(numClstr+1)=BIC1 + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine Xmeans7 + +!---------------------------------------------------------------------- + !sub-clustering X-means with shared points + !points rearranged with the cluster index of primary points also returned + subroutine doXmeans9(points,npt,np,nClstr,ptClstr,ad,min_pt) + implicit none + !input/output + double precision points(:,:) !actual points which are rearranged after clustering + !note: the clusters have shared points too + !input + integer npt,np !num of points & dimensionality + integer ad !no. of shared points required per cluster + integer min_pt !min points allowed per cluster + !ouput + integer nClstr !total clusters found + integer ptClstr(:) !total no. of points in each cluster (including shared points) + integer cluster(npt) !cluster index of primary points (before re-arrangement) + + !work + integer i,j,maxec,k + + if(npt<2*min_pt) then + nClstr=1 + ptClstr(1)=npt + ad=0 + return + end if + + if(ad<0) ad=0 + n_dim=np + maxec=npt/min_pt+1 + + allocate (p(n_dim,npt+maxec*ad)) + allocate (revcPos(maxec)) + allocate (xclsPos(maxec)) + allocate (ptInClstr(maxec)) + + ptInClstr=0 + nCls=1 + numClstr=0 + ptClstrd=0 + revcPos=0 + + call Xmeans9(points(:,1:npt),npt,cluster,ad,min_pt) + + j=0 + k=0 + do i=1,numClstr + if(ptInClstr(xclsPos(i))>0) then + j=j+1 + ptClstr(j)=ptInClstr(xclsPos(i)) + k=k+ptInClstr(xclsPos(i)) + end if + end do + + nClstr=j + points(1:n_dim,1:k)=p(1:n_dim,1:k) + + deallocate(p) + deallocate(revcPos) + deallocate(xclsPos) + deallocate(ptInClstr) + + end subroutine doXmeans9 + +!---------------------------------------------------------------------- + + recursive subroutine Xmeans9(ptf,tnpt,cluster,ad,min_pt) + implicit none + + integer min_pt,npt !num of points + integer ad !no. of shared points required, returns the no. of shared points found + integer tnpt + double precision ptf(:,:),pt(n_dim,tnpt) !points + double precision ptk(tnpt/(n_dim+1),n_dim,tnpt+ad)!points in clusters + integer nptk(tnpt/(n_dim+1)) !no. of points in the clusters + integer cluster(tnpt) !cluster array having cluster num of each pt + integer i,j,ip + integer i1 + integer cluster2(tnpt/(n_dim+1),ad) + + npt=tnpt + pt(:,1:npt)=ptf(:,1:npt) + + i1=npt/min_pt+1 + + if(npt>=2*min_pt.and.i1>1) then + !break up the cluster into nb clusters + call kmeans7(i1,pt(:,1:npt),npt,n_dim,cluster(1:npt),ad,cluster2,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + end do + + do i=1,i1 + do j=1,ad + !if(cluster2(i,j)==0) exit + nptk(i)=nptk(i)+1 + ptk(i,:,nptk(i))=pt(:,cluster2(i,j)) + end do + end do + + !arrange the points + do i=1,i1 + p(:,ptClstrd+1:ptClstrd+nptk(i))=ptk(i,:,1:nptk(i)) + !ip=binSearch(ptInClstr,numClstr,nptk(i)) + ip=numClstr+1 + !ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=nptk(i) + !do q=1,numClstr + !if(xclsPos(q)>=ip) xclsPos(q)=xclsPos(q)+1 + !end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+nptk(i) + numClstr=numClstr+1 + end do + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + !ip=binSearch(ptInClstr,numClstr,npt) + ip=numClstr+1 + !ptInClstr(ip+1:numClstr+1)=ptInClstr(ip:numClstr) + ptInClstr(ip)=npt + !do i=1,numClstr + !if(xclsPos(i)>=ip) xclsPos(i)=xclsPos(i)+1 + !end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine Xmeans9 + +!---------------------------------------------------------------------- + + recursive subroutine Gmeans(pt,npt,naux,auxa,min_pt) + implicit none + + integer min_pt,npt,naux !num of points + double precision pt(:,:) !points + double precision auxa(:,:) !auxilliary points + double precision ptk(2,n_dim,npt)!points in clusters + double precision auxk(2,naux,npt)!loglike, to change order only + integer nptk(2) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + integer i,ip + integer i1 + double precision alpha !confidence level for Anderson-Darlind test + parameter(alpha=0.0001) + double precision mean(n_dim),meank(2,n_dim) + double precision delMean(n_dim) !difference between the means of the two clusters + logical flag + + flag=.false. + + !calculate the mean + do i=1,n_dim + mean(i)=sum(pt(i,1:npt))/npt + enddo + + !don't cluster if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + if(npt<2*min_pt.or.nCls==maxClstr) flag=.true. + + if(.not.flag) then + !breakup the points in 2 clusters + i1=2 + meank(1,1:n_dim)=mean(1:n_dim) + call kmeans3(i1,pt,npt,n_dim,meank(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + auxk(cluster(i),:,nptk(cluster(i)))=auxa(:,i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine Gmeans + +!---------------------------------------------------------------------- + + recursive subroutine Gmeans2(pt,npt,min_pt) + implicit none + + integer min_pt,npt !num of points + double precision pt(:,:) !points + double precision ptk(2,n_dim,npt)!points in clusters + integer nptk(2) !no. of points in the clusters + integer cluster(npt) !cluster array having cluster num of each pt + integer i,ip + integer i1 + double precision alpha !confidence level for Anderson-Darlind test + parameter(alpha=0.0001) + double precision mean(n_dim),meank(2,n_dim) + double precision delMean(n_dim) !difference between the means of the two clusters + logical flag + + flag=.false. + + !calculate the mean + do i=1,n_dim + mean(i)=sum(pt(i,1:npt))/npt + enddo + + !don't cluster if it produces a cluster with less than 2*(min_pt-1)+1 points + !since it would cluster into clusters having less than min_pt points + if(npt<2*min_pt.or.nCls==maxClstr) flag=.true. + + if(.not.flag) then + !breakup the points in 2 clusters + i1=2 + meank(1,1:n_dim)=mean(1:n_dim) + call kmeans3(i1,pt,npt,n_dim,meank(1:i1,1:n_dim),cluster,min_pt) + !calculate the no. of points in each cluster & separate them + nptk=0 + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + end do + + !don't cluster if either of the clusters have less than min_pt points + !since that might be noise + do i=1,2 + if(nptk(i)=ip) xclsPos(i)=xclsPos(i)+1 + end do + xclsPos(numClstr+1)=ip + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + end if + + end subroutine Gmeans2 + +!---------------------------------------------------------------------- + + !calculate BIC for no clustering in the data + double precision function calcBIC1(pt,npt,mean,invcov,detcov) + implicit none + integer npt !num of points + double precision pt(:,:) + double precision detcov !determinant of the covariance matrix + double precision mean(:),invcov(:,:) + double precision tpt(n_dim,npt),a,nd,nn + integer i,j,k + + nd=n_dim + nn=npt + + !calculate point-mean + do i=1,npt + do j=1,n_dim + tpt(j,i)=pt(j,i)-mean(j) + end do + end do + + !a=sum(Transpose(pt(i)-mean).invcov.(pt(i)-mean)) + a=0. + do i=1,npt + do j=1,n_dim + a=a+tpt(j,i)*tpt(j,i)*invcov(j,j) + do k=j+1,n_dim + a=a+2.*tpt(j,i)*tpt(k,i)*invcov(j,k) + end do + end do + end do + calcBIC1=nn*nd*LogTwoPi+nn*log(detcov)+a+0.5*nd*(nd+3.)*log(nn) + + end function calcBIC1 + +!---------------------------------------------------------------------- + + !calculate BIC for no clustering in the data + double precision function calcBIC1_iso(npt,variance) + implicit none + integer npt !num of points + double precision variance,var + double precision nd,nn + + nd=n_dim + nn=npt + var=variance + + !convert into unbiased variance + var=var*nn/(nn-1.) + + calcBIC1_iso=nn*LogTwoPi+nn*nd*log(var)+nn-1.+(nd+1.)*log(nn) + + end function calcBIC1_iso + +!---------------------------------------------------------------------- + + !calculate BIC for clustering in the data + double precision function calcBIC2(ptk,nptk,meank,invcovk,detcovk) + implicit none + integer nptk(2) !num of points + double precision ptk(2,n_dim,nptk(1)+nptk(2)) + double precision detcovk(2) !determinant of the covariance matrix + double precision meank(2,n_dim),invcovk(2,n_dim,n_dim) + double precision tptk(2,n_dim,nptk(1)+nptk(2)),ak(2) + integer i,j,k,l + double precision nd,np1,np2,npt + + np1=nptk(1) + np2=nptk(2) + npt=np1+np2 + nd=n_dim + + !calculate point-mean + do l=1,2 + do i=1,nptk(l) + do j=1,n_dim + tptk(l,j,i)=ptk(l,j,i)-meank(l,j) + end do + end do + end do + + !a=sum(Transpose(pt(i)-mean).invcov.(pt(i)-mean)) + ak=0. + do l=1,2 + do i=1,nptk(l) + do j=1,n_dim + ak(l)=ak(l)+tptk(l,j,i)*tptk(l,j,i)*invcovk(l,j,j) + do k=j+1,n_dim + ak(l)=ak(l)+2.*tptk(l,j,i)*tptk(l,k,i)*invcovk(l,j,k) + end do + end do + end do + end do + + calcBIC2=npt*nd*LogTwoPi+np1*log(detcovk(1))+np2*log(detcovk(2))+ak(1)+ak(2) & + -2.*(np1*log(np1)+np2*log(np2))+2.*npt*log(npt)+(nd**2.+3.*nd+1.)*log(npt) + end function calcBIC2 + +!---------------------------------------------------------------------- + + !calculate BIC for clustering in the data + double precision function calcBIC2_iso(nptk,var) + implicit none + integer nptk(2) !num of points + double precision var(2) + double precision variance + integer i + double precision nd,np1,np2,npt + + np1=nptk(1) + np2=nptk(2) + npt=np1+np2 + nd=n_dim + + variance=0. + do i=1,2 + variance=variance+var(i)*nptk(i) + end do + variance=variance/(npt-2.) + + calcBIC2_iso=npt*LogTwoPi+npt*nd*log(variance)+(npt-2.)-2.*np1*log(np1)- & + 2.*np2*log(np2)+(2.+2.*nd+2.*npt)*log(npt) + end function calcBIC2_iso + +!---------------------------------------------------------------------- + + !calculate BIC for no clustering of the points constituting the given clusters + double precision function caltBIC(cls1,cls2) + implicit none + integer cls1,cls2 + integer n1,n2,n !total num of points + double precision mean(n_dim),covar(n_dim,n_dim),invcov(n_dim,n_dim),evec(n_dim,n_dim),eval(n_dim),detcov + integer i,j + integer ind1,ind2!indices of starting points of the 2 clusters + double precision, dimension(:,:), allocatable :: ptt + logical flag + + !find total number of points + n1=ptInClstr(xclsPos(cls1)) + n2=ptInClstr(xclsPos(cls2)) + n=n1+n2 + + !find the starting indices of points in both the clusters + ind1=0 + ind2=0 + do i=1,cls1-1 + ind1=ind1+ptInClstr(xclsPos(i)) + end do + do i=1,cls2-1 + ind2=ind2+ptInClstr(xclsPos(i)) + end do + + !combine points from both the clusters + allocate(ptt(n_dim,n)) + ptt(:,1:n1)=p(:,ind1+1:ind1+n1) + ptt(:,n1+1:n)=p(:,ind2+1:ind2+n2) + + !find mean of the merged cluster + mean(1:n_dim)=(n1*xclsMean(cls1,1:n_dim)+n2*xclsMean(cls2,1:n_dim))/n + !calculate invcov & det(cov) of the merged cluster + call calc_covmat(n,n_dim,ptt,mean,covar) + evec=covar + flag=.false. + call diagonalize(evec,eval,n_dim,flag) + + !if any of the eval are 0 or -ve then use the lowest +ve eval + do j=1,n_dim + if(eval(j)<=0.) eval(1:j)=eval(j+1) + enddo + + detcov=product(eval) + call calc_invcovmat(n_dim,evec,eval,invcov) + + caltBIC=calcBIC1(ptt,n,mean,invcov,detcov) + + deallocate(ptt) + + end function caltBIC + +!---------------------------------------------------------------------- + + !Anderson Darling test for normal distribution + logical function AndersonDarling(npt,pt,delMean,alpha) + implicit none + integer npt !no. of points + double precision pt(n_dim,npt) !points + double precision alpha !significance level + double precision delMean(n_dim) !difference between the two cluster means + double precision ppt(npt) !projected & normalized points + double precision mean,sigma !mean & st.dev. of the projected & normalized points + double precision A !Anderson Darling statistic + double precision stn1,stn2 + integer i + double precision mode + integer*8 nn,info + + mean=0. + sigma=0. + !project the points onto delMean & calculate the mean & sigma of the + !projected points + mode=sqrt(sum((delMean(1:n_dim)**2))) + do i=1,npt + ppt(i)=sum(pt(1:n_dim,i)*delMean(1:n_dim))/mode + mean=mean+ppt(i) + sigma=sigma+ppt(i)**2 + end do + mean=mean/npt + sigma=sqrt(sigma/npt-mean**2)*npt/(npt-1.) + + !transform the projected points into ones with mean=0 & sigma=1 + ppt(1:npt)=(ppt(1:npt)-mean)/sigma + + !sort ppt in ascending order + !CALL QSORT(ppt,npt,8,COMPARE) + nn=npt + call DLASRT('I',nn,ppt,info) + + !calculate Anderson Darling statistic + A=0. + do i=1,npt + if(ppt(i)>5.) then + stn1=1. + else if(ppt(i)<-5.) then + stn1=0. + else + stn1=stNormalCDF(ppt(i)) + end if + + if(ppt(npt+1-i)>5.) then + stn2=1. + else if(ppt(npt+1-i)<-5.) then + stn2=0. + else + stn2=stNormalCDF(ppt(npt+1-i)) + end if + + A=A+(2.*i-1.)*(log(stn1)+log(1.-stn2)) + end do + A=A/(-1.*npt)-npt + A=A*(1.+4./npt-25./(npt**2)) + + !inference at alpha=0.0001 + if(A>1.8692) then + AndersonDarling=.false. + else + AndersonDarling=.true. + end if + + end function AndersonDarling + +!---------------------------------------------------------------------- + + INTEGER*2 FUNCTION COMPARE(N1, N2) + double precision N1 + double precision N2 + + IF (N1 .LT. N2) THEN + COMPARE = -1 + ELSE IF (N1 .EQ. N2) THEN + COMPARE = 0 + ELSE + COMPARE = 1 + END IF + + RETURN + + END FUNCTION COMPARE + +!---------------------------------------------------------------------- + + !learn the Gaussian mixture by expectation maximization + !some points are constrained to be in specific clusters + !likelihood constraint + subroutine GaussMixExpMaxLike(ndim,nCls,npt,ncon,norm,pt,like,wt,wtNorm,locZ,lowlike,switch) + implicit none + + !input variables + integer ndim !dimensionality + integer nCls !no. of clusters + integer npt !total no. of points + integer ncon(nCls) !no. constrained points in each cluster + double precision pt(ndim,npt) !points + double precision like(2,npt) !log-like & log of the dx + logical norm !rescale the points so that all have the same range? + double precision lowlike(nCls) + logical switch !initialize the LAPACK eigenanalysis routines? + + !output variables + double precision wt(npt,nCls) !probability weights of points for each cluster + double precision wtNorm(npt,nCls) !normalized probability weights of points for each cluster + double precision locZ(nCls) !local evidence + + !work variables + integer i,k,count + double precision d1 + double precision pts(ndim,npt) + double precision mean(nCls,ndim),eval(nCls,ndim),evec(nCls,ndim,ndim),cwt(nCls) + double precision old_locZ(nCls),covmat(nCls,ndim,ndim),invcov(nCls,ndim,ndim),detcov(nCls) + + n_dim=ndim + count=0 + !rescaling + pts=pt + if(norm) call rescale(ndim,npt,pts) + + wt=0.d0 + !set the initial probability weights + k=0 + do i=1,nCls + wt(k+1:k+ncon(i),i)=1.d0 + + k=k+ncon(i) + enddo + + !calculate the initial Gaussian mixture model + wtNorm=wt + do i=1,nCls + !calculate the evidence & normalized weights + call setWt(npt,like,wtNorm(:,i),locZ(i),.true.) + !now the Gaussian with normalized weights + call GaussProp(npt,ndim,pts,wt(:,i),wtNorm(:,i),cwt(i),mean(i,:),covmat(i,:,:), & + invcov(i,:,:),eval(i,:),evec(i,:,:),detcov(i),switch,.true.) + enddo + !normalize cluster prior probabilities + d1=sum(cwt(1:nCls)) + cwt(1:nCls)=cwt(1:nCls)/d1 + + k=sum(ncon(1:nCls)) + !expectation-maximization + do + count=count+1 + old_locZ=locZ + !check all the points now + do i=k+1,npt + !calculate the probability of points lying in each cluster + call normalProbClsGivPt(nCls,ndim,pts(:,i),like(1,i), & + lowlike(1:nCls),mean(1:nCls,:),invcov(1:nCls,:,:), & + detcov(1:nCls),cwt(1:nCls),wt(i,1:nCls)) + enddo + + !re-calculate the Gaussian mixture model + wtNorm=wt + do i=1,nCls + !calculate the evidence & normalized weights + call setWt(npt,like,wtNorm(:,i),locZ(i),.true.) + !now the Gaussian with normalized weights + call GaussProp(npt,ndim,pts,wt(:,i),wtNorm(:,i),cwt(i),mean(i,:),covmat(i,:,:), & + invcov(i,:,:),eval(i,:),evec(i,:,:),detcov(i),.false.,.true.) + enddo + !normalize cluster prior probabilities + d1=sum(cwt(1:nCls)) + cwt(1:nCls)=cwt(1:nCls)/d1 + + !check for convergence + d1=sqrt(sum((old_locZ(1:nCls)-locZ(1:nCls))**2.)) + if(d1<0.0001 .or. count==100) then + !wt=wtNorm + exit + endif + enddo + + end subroutine GaussMixExpMaxLike + +!---------------------------------------------------------------------- + + !returns the normalized normal probability for the given point + double precision function normalProb(d,p,mean,invcov,detcov) + + implicit none + + !input variables + integer d !dimensionality + double precision p(d) !point + double precision mean(d) !mean + double precision invcov(d,d) !inverse covariance matrix + double precision detcov !determinant of the covariance matrix + + !work variables + integer j,k + double precision a,tpt(d),pi + + pi=4.*atan(1.d0) + + !calculate point-mean + tpt(1:d)=p(1:d)-mean(1:d) + + !a=sum(Transpose(pt-mean).invcov.(pt-mean)) + a=0. + do j=1,n_dim + a=a+tpt(j)*tpt(j)*invcov(j,j) + do k=j+1,n_dim + a=a+2.*tpt(j)*tpt(k)*invcov(j,k) + end do + end do + normalProb=exp(-a/2.)/((detcov**0.5)*((2.*pi)**(d/2.))) + + end function normalProb + +!---------------------------------------------------------------------- + + !returns the normalized normal probability for the points given a cluster + subroutine setWt(npt,like,wt,locZ,flag) + + implicit none + + !input variables + integer npt !no. of points + double precision like(2,npt) !log-like & log of dX of points + logical flag !set the weights + !input/output variables + double precision wt(npt) !weights + double precision locZ !local evidence + !work variables + integer j + + + locZ=-huge(1.d0)*epsilon(1.d0) !logZero + do j=1,npt + if(wt(j)>0.d0) locZ=logSumExp(locZ,like(1,j)+like(2,j)+log(wt(j))) + enddo + + if(flag) then + !now calculate the posterior probabilty weights + do j=1,npt + if(wt(j)>0.d0) wt(j)=wt(j)*exp(like(1,j)+like(2,j)-locZ) + enddo + endif + + end subroutine setWt + +!---------------------------------------------------------------------- + + !assuming the model is Gaussian Mixuture, return the probability of each cluster given a particular point + subroutine normalProbClsGivPt(nCls,d,p,like,lowlike,mean,invcov,detcov,cwt,prob) + + implicit none + + !input variables + integer nCls !no. of clusters + integer d !dimensionality + double precision p(d) !point + double precision like !log-like of the point + double precision lowlike(nCls) !lowest log-like of each cluster + double precision mean(nCls,d) !mean + double precision invcov(nCls,d,d) !inverse covariance matrix + double precision detcov(nCls) !determinant of the covariance matrix + double precision cwt(nCls) !cluster prior probabilities + + !output variables + double precision prob(nCls) + + !work variables + integer i,j,k + double precision a(nCls),tpt(nCls,d),pi,d2 + logical flag + + pi=4.d0*atan(1.d0) + + do i=1,nCls + !calculate point-mean + tpt(i,1:d)=p(1:d)-mean(i,1:d) + enddo + + !a=sum(Transpose(pt-mean).invcov.(pt-mean)) + flag=.false. + a=0.d0 + do i=1,nCls + if(lowlike(i)>=like) then + do j=1,d + a(i)=a(i)+tpt(i,j)*tpt(i,j)*invcov(i,j,j) + do k=j+1,d + a(i)=a(i)+2.d0*tpt(i,j)*tpt(i,k)*invcov(i,j,k) + enddo + enddo + prob(i)=log(cwt(i))-log(detcov(i))/2.d0-a(i)/2.d0 + flag=.true. + else + prob(i)=-huge(1.d0)*epsilon(1.d0) !logZero + endif + enddo + + if(.not.flag) then + prob(1:nCls)=0.d0 + return + endif + + d2=prob(1) + do i=2,nCls + d2=logSumExp(d2,prob(i)) + enddo + + if(prob(i)==-huge(1.d0)*epsilon(1.d0)) then + prob(i)=0.d0 + else + prob(1:nCls)=exp(prob(1:nCls)-d2) + endif + + end subroutine normalProbClsGivPt + +!---------------------------------------------------------------------- + + subroutine GaussProp(n,d,p,wt,wtNorm,cwt,mean,covmat,invcov,eval,evec,detcov,switch,calcMu) + + implicit none + + !input variables + integer n !no. of points + integer d !dimensionality + double precision p(d,n) !points + double precision wt(n) !probability weights of each point + double precision wtNorm(n) !normalized (with the evidence) probability weights of each point + logical switch !initialize the LAPACK eigenanalysis routine? + logical calcMu !calculate the mean? + + !output variables + double precision mean(d) !mean + double precision covmat(d,d) !covariance matrix + double precision invcov(d,d) !inverse covariance matrix + double precision eval(d) !eigen values + double precision evec(d,d) !eigen vectors + double precision detcov !determinant of the covariance matrix + double precision cwt !no. of points in the cluster + + !work variables + integer i + + + !total no. of points in the cluster + cwt=sum(wt(1:n)) + + if(calcMu) then + mean=0.d0 + do i=1,n + mean(1:d)=mean(1:d)+p(1:d,i)*wtNorm(i) + enddo + endif + + !covariance matrix + call calc_covmat_wt(n,d,p,wtNorm,mean,covmat) + !eigen analysis + evec=covmat + call diagonalize(evec,eval,d,switch) + !eigenvalues of covariance matrix can't be zero + do i=1,d + if(eval(i)<=0.d0) eval(1:i)=eval(i+1)/100. + enddo + + !determinant of the covariance matrix + detcov=product(eval) + + !inverse of covariance matrix + call calc_invcovmat(d,evec,eval,invcov) + + end subroutine GaussProp + +!---------------------------------------------------------------------- + + !rescale the points so that all have the same range + subroutine rescale(ndim,npt,pt) + implicit none + + !input variables + integer ndim !dimensionality + integer npt !total no. of points + + !input/output varialbe + double precision pt(ndim,npt) !points + + !work variables + integer i + double precision d1,d2 + + + do i=1,ndim + d2=maxval(pt(i,1:npt)) + d1=minval(pt(i,1:npt)) + pt(i,1:npt)=(pt(i,1:npt)-d1)/(d2-d1) + enddo + + end subroutine rescale + +!---------------------------------------------------------------------- + + function Dinosaur(points,npt,ndim,nClstr,ptClstr,naux,auxa,min_pt,maxC, & + meanx,invcovx,tmatx,evalx,evecx,kfacx,effx,volx,pVol,cVol,neVol,eVolFrac, & + nIter,dTol,switch,rFlag,cSwitch,nCdim) + implicit none + + !input variables + integer npt !total no. of points + integer ndim !dimensionality + double precision points(ndim,npt) !points + integer naux !dimensionality of auxiliary points + double precision auxa(naux,npt) !auxiliary points + integer min_pt !min no. of points per cluster + integer maxC !max no. of clusters + double precision pVol !prior volume + double precision cVol !current vol + integer neVol + double precision eVolFrac(2*neVol,2) + integer nIter + double precision dTol !volume tolerance + logical switch !initialize the LAPACK eigen analysis routines? + logical cSwitch + integer nCdim + logical rFlag !desperate acceptance + + !output variables + integer nClstr !total no. clusters found + integer ptClstr(maxC) !points in each cluster + double precision meanx(maxC,ndim) !cluster means + double precision invcovx(maxC,ndim,ndim) !cluster inv covarianc matrices + double precision tmatx(maxC,ndim,ndim) !cluster transformation matrices + double precision evalx(maxC,ndim) !cluster eigenvalues + double precision evecx(maxC,ndim,ndim) !cluster eigenvectors + double precision kfacx(maxC) !cluster point enlargement factors + double precision effx(maxC) !cluster volume enlargement factors + double precision volx(maxC) !cluster volumes + logical Dinosaur !successful? + + !work variables + integer i,i2,j,k,n1,n2,n3,j1,k1,m,l,logLloc(1) + integer, allocatable :: nptx(:) + logical flag + double precision, allocatable :: mean(:), covmat(:,:), invcov(:,:), tmat(:,:), evec(:,:), eval(:), p2(:,:), aux2(:,:) + double precision kfac,eff,detcov,vol + double precision d1,d2 + logical, allocatable :: updEll(:) + integer, allocatable :: updPt(:), cluster(:) + integer nClstrk !total no. clusters found + integer, allocatable :: ptClstrk(:) !points in each cluster + double precision, allocatable :: meank(:,:) !cluster means + double precision, allocatable :: covmatk(:,:,:) !cluster covarianc matrices + double precision, allocatable :: invcovk(:,:,:) !cluster inv covarianc matrices + double precision, allocatable :: tmatk(:,:,:) !cluster transformation matrices + double precision, allocatable :: evalk(:,:) !cluster eigenvalues + double precision, allocatable :: eveck(:,:,:) !cluster eigenvectors + double precision, allocatable :: kfack(:) !cluster point enlargement factors + double precision, allocatable :: effk(:) !cluster volume enlargement factors + double precision, allocatable :: detcovk(:) !cluster covariance matrix determinants + double precision, allocatable :: volk(:) !cluster volumes + double precision, allocatable :: pointsk(:,:), auxk(:,:) + !merge operation variables + integer mChk + double precision, allocatable :: dis(:,:) + logical, allocatable :: check(:,:) + logical gflag + + + n_dim=ndim + + !sanity checks + if(min_pt<2) min_pt=2 + if(npt 1) then + j=0 + do i=1,numClstr + cluster(j+1:j+ptInClstr(i))=i + j=j+ptInClstr(i) + enddo + + d1=pVol*ptClstrk(nClstrk)/npt + + flag=postDino(numClstr,p(:,1:ptClstrk(nClstrk)),ptClstrk(nClstrk), & + aux(:,1:ptClstrk(nClstrk)),naux,ndim,cluster,min_pt,ptInClstr(1:numClstr), & + xclsMean(1:numClstr,:),xclsCovmat(1:numClstr,:,:),xclsInvcov(1:numClstr,:,:), & + xclsTmat(1:numClstr,:,:),xclsEval(1:numClstr,:),xclsEvec(1:numClstr,:,:), & + xclsKfac(1:numClstr),xclsEff(1:numClstr),xclsDetcov(1:numClstr),xclsVol(1:numClstr), & + d1) + + j=sum(ptClstrk(1:nClstrk-1)) + + pointsk(:,j+1:npt)=p(:,1:ptClstrk(nClstrk)) + auxk(:,j+1:npt)=aux(:,1:ptClstrk(nClstrk)) + ptClstrk(nClstrk:nClstrk+numClstr-1)=ptInClstr(1:numClstr) + meank(nClstrk:nClstrk+numClstr-1,:)=xclsMean(1:numClstr,:) + covmatk(nClstrk:nClstrk+numClstr-1,:,:)=xclsCovmat(1:numClstr,:,:) + invcovk(nClstrk:nClstrk+numClstr-1,:,:)=xclsInvcov(1:numClstr,:,:) + tmatk(nClstrk:nClstrk+numClstr-1,:,:)=xclsTmat(1:numClstr,:,:) + evalk(nClstrk:nClstrk+numClstr-1,:)=xclsEval(1:numClstr,:) + eveck(nClstrk:nClstrk+numClstr-1,:,:)=xclsEvec(1:numClstr,:,:) + kfack(nClstrk:nClstrk+numClstr-1)=xclsKfac(1:numClstr) + effk(nClstrk:nClstrk+numClstr-1)=xclsEff(1:numClstr) + detcovk(nClstrk:nClstrk+numClstr-1)=xclsDetcov(1:numClstr) + volk(nClstrk:nClstrk+numClstr-1)=xclsVol(1:numClstr) + else + flag=.true. + endif + + numClstr=nClstrk+numClstr-1 + p=pointsk + aux=auxk + ptInClstr(1:numClstr)=ptClstrk(1:numClstr) + xclsMean(1:numClstr,:)=meank(1:numClstr,:) + xclsCovmat(1:numClstr,:,:)=covmatk(1:numClstr,:,:) + xclsInvcov(1:numClstr,:,:)=invcovk(1:numClstr,:,:) + xclsTmat(1:numClstr,:,:)=tmatk(1:numClstr,:,:) + xclsEval(1:numClstr,:)=evalk(1:numClstr,:) + xclsEvec(1:numClstr,:,:)=eveck(1:numClstr,:,:) + xclsKfac(1:numClstr)=kfack(1:numClstr) + xclsEff(1:numClstr)=effk(1:numClstr) + xclsDetcov(1:numClstr)=detcovk(1:numClstr) + xclsVol(1:numClstr)=volk(1:numClstr) + + logLloc=maxloc(aux(1,1:npt)) + if(logLloc(1)>npt-ptInClstr(numClstr)) then + d2=0d0 + else + d2=0.005d0 + endif + + if(flag) then + xclsVol(numClstr)=0.d0 + if(dble(ptInClstr(numClstr))/dble(npt)1) then + updPt=0 + updEll=.false. + nptx(1:numClstr)=ptInClstr(1:numClstr) + gflag=.false. + k=0 + do i=1,numClstr + if(ptInClstr(i)0.d0) then + do j=1,ptInClstr(i) + d1=1.d99 + do l=1,numClstr + if(l==i .or. ptInClstr(l)=min_pt .or. xclsVol(i)==0.d0) then + n1=n1+1 + p2(:,j+1:j+ptInClstr(i))=p(:,k+1:k+ptInClstr(i)) + aux2(:,j+1:j+ptInClstr(i))=aux(:,k+1:k+ptInClstr(i)) + j=j+ptInClstr(i) + k=k+ptInClstr(i) + + if(updEll(i)) then + do m=1,npt + if(updPt(m)==i) then + p2(:,j+1)=p(:,m) + aux2(:,j+1)=aux(:,m) + j=j+1 + ptInClstr(i)=ptInClstr(i)+1 + endif + enddo + endif + else + k=k+ptInClstr(i) + ptInClstr(i)=0 + xclsVol(i)=0.d0 + endif + enddo + p=p2 + aux=aux2 + else + n1=numClstr + endif + + if(xclsVol(numClstr)==0.d0) then + i2=numClstr-1 + else + i2=numClstr + endif + + n1=0 + do i=1,i2 + if(ptInClstr(i)>0) n1=n1+1 + enddo + + j1=0 + n3=0 + do i=1,i2 + if(ptInClstr(i)==0) cycle + n3=n3+1 + + do + mChk=min(10,n1-1) + dis=huge(1.d0) + do j=1,i2 + if(ptInClstr(j)==0 .or. j==i) cycle + + d1=sum((xclsMean(i,:)-xclsMean(j,:))**2) + + k1=0 + do k=mChk,1,-1 + if(d1>dis(k,1)) then + exit + else + k1=k + endif + enddo + if(k1/=0) then + dis(k1+1:mChk,:)=dis(k1:mChk-1,:) + dis(k1,1)=d1 + dis(k1,2)=dble(j) + endif + enddo + + !now do the merge check + flag=.false. + p2(:,1:ptInClstr(i))=p(:,j1+1:j1+ptInClstr(i)) + aux2(:,1:ptInClstr(i))=aux(:,j1+1:j1+ptInClstr(i)) + do j=1,mChk + k=int(dis(j,2)) !ellipsoid to check + + !already checked + if(check(i,k)) cycle + + check(i,k)=.true. + check(k,i)=.true. + + k1=sum(ptInClstr(1:k-1)) + n2=ptInClstr(i)+ptInClstr(k) + p2(:,ptInClstr(i)+1:n2)=p(:,k1+1:k1+ptInClstr(k)) + aux2(:,ptInClstr(i)+1:n2)=aux(:,k1+1:k1+ptInClstr(k)) + d1=pVol*dble(n2)/dble(npt) + + call CalcEllProp(n2,ndim,p2(:,1:n2),mean,covmat,invcov,tmat,evec,eval,detcov,kfac,eff,vol, & + d1,switch) + + !success? + if(vol/1.10) then + j=j+1 + ptClstr(j)=ptInClstr(i) + meanx(j,:)=xclsMean(i,:) + invcovx(j,:,:)=xclsInvcov(i,:,:) + tmatx(j,:,:)=xclsTMat(i,:,:) + evalx(j,:)=xclsEval(i,:) + evecx(j,:,:)=xclsEvec(i,:,:) + kfacx(j)=xclsKfac(i) + effx(j)=xclsEff(i) + volx(j)=xclsVol(i) + endif + enddo + + nClstr=j + points(1:ndim,1:npt)=p(1:ndim,1:npt) + auxa(1:naux,1:npt)=aux(1:naux,1:npt) + endif + + + deallocate( updEll, check ) + deallocate( mean, covmat, invcov, tmat, evec, eval, p2, aux2 ) + deallocate( nptx, updPt, cluster, ptClstrk ) + deallocate( meank, covmatk, invcovk, tmatk, evalk, eveck, kfack, effk, detcovk, volk, pointsk, auxk, dis ) + + deallocate(xclsMean,xclsInvcov,xclsTMat,xclsEval,xclsEvec,xclsKfac,xclsEff, & + xclsVol,xclsCovmat,xclsDetcov,p,aux,ptInClstr) + + end function Dinosaur + +!---------------------------------------------------------------------- + + recursive subroutine makeDino(pt,npt,ndim,naux,auxa,min_pt,mean,covmat,invcov, & + tmat,evec,eval,detcov,kfac,eff,vol,pVol,cSwitch,nCdim) + implicit none + + !input variables + integer npt !no. of points to analyze + double precision pVol !total prior volume + integer ndim !dimensionality + double precision pt(ndim,npt) !points + integer naux !dimensionality of auxiliary points + double precision auxa(naux,npt) !auxiliary points + integer min_pt !min no. of points per cluster + double precision mean(ndim) !overall mean + double precision covmat(ndim,ndim) !overall covariance matrix + double precision invcov(ndim,ndim) !overall inv covariance matrix + double precision tmat(ndim,ndim) !overall transformation matrix + double precision evec(ndim,ndim) !overall eigenvector + double precision eval(ndim) !overall eigenvalues + double precision detcov !overall covariance matrix determinant + double precision kfac !overall cluster point enlargement factor + double precision eff !overall cluster volume enlargement factor + double precision vol !overall volume + logical cSwitch + integer nCdim + + !work variables + integer nk + parameter(nk=2) + integer i,k,d,ip + integer, allocatable :: cluster(:), nptk(:) + logical flag + double precision, allocatable :: meank(:,:), covmatk(:,:,:), invcovk(:,:,:), tmatk(:,:,:) + double precision, allocatable :: eveck(:,:,:), evalk(:,:), detcovk(:),kfack(:), effk(:),volk(:) + double precision, allocatable :: ptk(:,:,:), auxk(:,:,:) + double precision d1 + + if(npt==0) return + + allocate( cluster(npt), nptk(nk) ) + allocate( meank(nk,ndim), covmatk(nk,ndim,ndim), invcovk(nk,ndim,ndim), tmatk(nk,ndim,ndim), eveck(nk,ndim,ndim), & + evalk(nk,ndim), detcovk(nk), kfack(nk), effk(nk), volk(nk), ptk(nk,ndim,npt), auxk(nk,naux,npt) ) + + if(abs((vol-pVol)/pVol)<0.01 .or. npt<2*min_pt .or. nCls>=maxClstr) then + if(abs((vol-pVol)/pVol)<0.01) d=0 + flag=.true. + else + flag=.false. + endif + + if(.not.flag) then + !assign all points to the same cluster & calculate its properties + cluster=1 + meank(1,:)=mean(:) + covmatk(1,:,:)=covmat(:,:) + invcovk(1,:,:)=invcov(:,:) + tmatk(1,:,:)=tmat(:,:) + evalk(1,:)=eval(:) + eveck(1,:,:)=evec(:,:) + kfack(1)=kfac + effk(1)=eff + detcovk(1)=detcov + volk(1)=vol + nptk(1)=npt + nptk(2:nk)=0 + volk(2:nk)=0.d0 + + !breakup the points in 2 or 3 clusters + k=2 + do + if(k>nk) then + flag=.true. + exit + endif + + nptk(k)=0 + + d=Dmeans(k,pt,npt,auxa(naux,:),ndim,cluster,min_pt,nptk(1:k),meank(1:k,:),covmatk(1:k,:,:), & + invcovk(1:k,:,:),tmatk(1:k,:,:),evalk(1:k,:),eveck(1:k,:,:),kfack(1:k),effk(1:k), & + detcovk(1:k),volk(1:k),pVol,vol,cSwitch,nCdim) + + if(d==2) then + !couldn't partition at all, exit the loop + flag=.true. + exit + else + !found a partition + !calculate the no. of points in each partition & separate them + nptk=0 + do i=1,npt + nptk(cluster(i))=nptk(cluster(i))+1 + ptk(cluster(i),:,nptk(cluster(i)))=pt(:,i) + auxk(cluster(i),:,nptk(cluster(i)))=auxa(:,i) + enddo + + !was it a better partition? + if(d==0 .or. vol>2.*pVol) then + !yes, then exit + flag=.false. + exit + else + !no, then partition further + k=k+1 + endif + endif + enddo + endif + + if(.not.flag) then + nCls=nCls+k-1 + do i=1,k + d1=pVol*dble(nptk(i))/dble(npt) + call makeDino(ptk(i,:,1:nptk(i)),nptk(i),ndim,naux,auxk(i,:,1:nptk(i)),min_pt, & + meank(i,:),covmatk(i,:,:),invcovk(i,:,:),tmatk(i,:,:),eveck(i,:,:),evalk(i,:), & + detcovk(i),kfack(i),effk(i),volk(i),d1,cSwitch,nCdim) + enddo + else + p(:,ptClstrd+1:ptClstrd+npt)=pt(:,1:npt) + aux(:,ptClstrd+1:ptClstrd+npt)=auxa(:,1:npt) + ip=numClstr+1 + ptInClstr(ip)=npt + xclsMean(ip,:)=mean(:) + xclsInvcov(ip,:,:)=invcov(:,:) + xclsCovmat(ip,:,:)=covmat(:,:) + xclsTMat(ip,:,:)=tmat(:,:) + xclsEval(ip,:)=eval(:) + xclsEvec(ip,:,:)=evec(:,:) + xclsKfac(ip)=kfac + xclsEff(ip)=eff + xclsVol(ip)=vol + xclsDetcov(ip)=detcov + ptClstrd=ptClstrd+npt + numClstr=numClstr+1 + endif + + deallocate( cluster, nptk ) + deallocate( meank, covmatk, invcovk, tmatk, eveck, evalk, detcovk, kfack, effk, volk, ptk, auxk ) + + end subroutine makeDino + +!---------------------------------------------------------------------- + + logical function kDinosaur(points,npt,ndim,nClstr,ptClstr,naux,auxa,min_pt,maxC, & + meanx,invcovx,tmatx,evalx,evecx,kfacx,effx,volx,pVol,cVol,neVol,eVolFrac, & + nIter,dTol,switch,rFlag) + implicit none + + !input variables + integer npt !total no. of points + integer ndim !dimensionality + double precision points(ndim,npt) !points + integer naux !dimensionality of auxiliary points + double precision auxa(naux,npt) !auxiliary points + integer min_pt !min no. of points per cluster + integer maxC !max no. of clusters + double precision pVol !prior volume + double precision cVol !current vol + integer neVol + double precision eVolFrac(2*neVol,2) + integer nIter + double precision dTol !volume tolerance + logical switch !initialize the LAPACK eigen analysis routines? + logical rFlag !desperate acceptance + !output variables + integer nClstr !total no. clusters found + integer ptClstr(maxC) !points in each cluster + double precision meanx(maxC,ndim) !cluster means + double precision invcovx(maxC,ndim,ndim) !cluster inv covarianc matrices + double precision tmatx(maxC,ndim,ndim) !cluster tranformation matrices + double precision evalx(maxC,ndim) !cluster eigenvalues + double precision evecx(maxC,ndim,ndim) !cluster eigenvectors + double precision kfacx(maxC) !cluster point enlargement factors + double precision effx(maxC) !cluster volume enlargement factors + double precision volx(maxC) !cluster volumes + !work variables + integer i,j,k,nChkd + logical flag + integer cluster(npt),nptk(maxC) + double precision meank(maxC,ndim),covmatk(maxC,ndim,ndim),invcovk(maxC,ndim,ndim),tmatk(maxC,ndim,ndim) + double precision evalk(maxC,ndim),eveck(maxC,ndim,ndim),kfack(maxC),effk(maxC),detcovk(maxC),volk(maxC) + double precision pts(ndim,npt),auxk(naux,npt) + + kDinosaur = .false. + n_dim=ndim + + !sanity checks + if(min_ptfTol) then + postDino=.true. + exit + endif + enddo + + if(.not.postDino) then + deallocate(fVal) + return + else + allocate( h(k,npt), mdis(k,npt), ptk(ndim,npt), auxk(naux,npt) ) + allocate( nptx(k), clusterx(npt) ) + allocate( meanx(k,ndim), covmatx(k,ndim,ndim), invcovx(k,ndim,ndim), tmatx(k,ndim,ndim), evalx(k,ndim), & + evecx(k,ndim,ndim), kfacx(k), effx(k), detcovx(k), volx(k) ) + endif + + !initialization + nptx=nptk + clusterx=cluster + meanx=meank + covmatx=covmatk + invcovx=invcovk + tmatx=tmatk + evalx=evalk + evecx=eveck + kfacx=kfack + effx=effk + detcovx=detcovk + volx=volk + count=0 + doCal=.false. + + do i=1,k + if(fVal(i)>fTol) then + h(i,:)=1.d99 + else + do j=1,npt + if(fVal(clusterx(j))<=fTol) cycle + mdis(i,j)=MahaDis(ndim,pt(:,j),meanx(i,:),invcovx(i,:,:),kfacx(i)*effx(i)) + if(mdis(i,j)>1.d0) then + h(i,j)=1.d99 + else + h(i,j)=volx(i)*mdis(i,j)/nptx(i) + h(i,j)=mdis(i,j) + endif + enddo + endif + enddo + + !now assign point j to the ellipsoid i such that h(i,j) is min + flag=.false. + do j=1,npt + if(fVal(clusterx(j))<=fTol) cycle + + indx=minloc(h(1:k,j)) + if(h(indx(1),j)/=1.d99) then + doCal(indx(1))=.true. + flag=.true. + nptx(clusterx(j))=nptx(clusterx(j))-1 + if(nptx(clusterx(j))==0) then + doCal(clusterx(j))=.false. + fVal(clusterx(j))=0.d0 + else + doCal(clusterx(j))=.true. + endif + nptx(indx(1))=nptx(indx(1))+1 + clusterx(j)=indx(1) + endif + enddo + + if(flag) then + !point reassigned, recalculate the ellipsoid properties + do i=1,k + if(.not.doCal(i)) cycle + i1=0 + do j=1,npt + if(clusterx(j)==i) then + i1=i1+1 + ptk(:,i1)=pt(:,j) + auxk(:,i1)=auxa(:,j) + endif + enddo + + d1=cVol*dble(i1)/dble(npt) + call CalcEllProp(i1,ndim,ptk(:,1:i1),meanx(i,:),covmatx(i,:,:), & + invcovx(i,:,:),tmatx(i,:,:),evecx(i,:,:),evalx(i,:),detcovx(i),kfacx(i), & + effx(i),volx(i),d1,.false.) + + fVal(i)=volx(i)/(cVol*nptx(i)/npt) + enddo + + nptk=nptx + cluster=clusterx + meank=meanx + covmatk=covmatx + invcovk=invcovx + tmatk=tmatx + evalk=evalx + eveck=evecx + kfack=kfacx + effk=effx + detcovk=detcovx + volk=volx + endif + + !rearrange + i1=0 + i2=0 + + postDino=.false. + do i=1,k + if(fVal(i)<=fTol .and. nptk(i)>=min_pt) then + i2=i2+1 + nptx(i2)=nptk(i) + meanx(i2,:)=meank(i,:) + covmatx(i2,:,:)=covmatk(i,:,:) + invcovx(i2,:,:)=invcovk(i,:,:) + tmatx(i2,:,:)=tmatk(i,:,:) + evalx(i2,:)=evalk(i,:) + evecx(i2,:,:)=eveck(i,:,:) + kfacx(i2)=kfack(i) + effx(i2)=effk(i) + detcovx(i2)=detcovk(i) + volx(i2)=volk(i) + else + postDino=.true. + cycle + endif + + do j=1,npt + if(cluster(j)==i) then + i1=i1+1 + clusterx(i1)=i + ptk(:,i1)=pt(:,j) + auxk(:,i1)=auxa(:,j) + endif + enddo + enddo + + if(postDino) then + nptx(i2+1)=0 + do i=1,k + if(fVal(i)>fTol .or. nptk(i)>> DESIGNED FOR GMAKE <<< + +ext=$(shell uname | cut -c1-3) + +CC=cc + +ifeq ($(ext),IRI) +F90C= f90 +FFLAGS= -Ofast -mp -n32 -LANG:recursive=ON -lmpi -DMPI +LAPACKL = -lcomplib.sgimath_mp +INCLUDE = -I../camb +LFLAGS= +endif + +ifeq ($(ext),Lin) +F90C=gfortran +FFLAGS= -O -cpp -fopenmp +LAPACKL = -llapack -lblas +INCLUDE = -I../camb +LFLAGS= +endif + +ifeq ($(ext),OSF) +F90C=f90 +FFLAGS= -omp -O -arch host -math_library fast -tune host -fpe1 +LAPACKL = -lcxml +INCLUDE = -I../camb +LFLAGS= +endif + + +ifeq ($(ext),Sun) +F90C=f90 +FFLAGS= -O4 -xarch=native64 -openmp -ftrap=%none +LAPACKL = -lsunperf -lfsu +INCLUDE = -I../camb -M../camb +LFLAGS= endif -#G95; make sure LAPACK and MPI libs also compiled with g95 -#F90C = mpif90 -#FFLAGS = -O2 -cpp -DMPI -#LAPACKL = /LAPACK/lapack_LINUX.a /LAPACK/blas_LINUX.a - -#SGI, -mp toggles multi-processor. Use -O2 if -Ofast gives problems. -#Not various versions of the compiler are buggy giving erroneous seg faults with -mp. -#Version 7.3 is OK, currently version 7.4 is bugged, as are some earlier versions. -#F90C = f90 -#LAPACKL = -lcomplib.sgimath -#FFLAGS = -Ofast -mp - -#Digital/Compaq fortran, -omp toggles multi-processor -#F90C = f90 -#FFLAGS = -omp -O4 -arch host -math_library fast -tune host -fpe1 -#LAPACKL = -lcxml - -#Absoft ProFortran, single processor, set -cpu:[type] for your local system -#F90C = f95 -#FFLAGS = -O2 -s -cpu:athlon -lU77 -w -YEXT_NAMES="LCS" -YEXT_SFX="_" -#LAPACKL = -llapack -lblas -lg2c -#IFLAG = -p - -#NAGF95, single processor: -#F90C = f95 -#FFLAGS = -DNAGF95 -O3 -#LAPACKL = -llapack -lblas -lg2c - -#PGF90 -#F90C = pgf90 -#FFLAGS = -O2 -DESCAPEBACKSLASH -Mpreprocess -#LAPACKL = -llapack -lblas - - -#Sun, single processor: -#F90C = f90 -#FFLAGS = -fast -ftrap=%none -#LAPACKL = -lsunperf -lfsu -#LAPACKL = -lsunperf -lfsu -lsocket -lm -#IFLAG = -M - -#Sun MPI -#F90C = mpf90 -#FFLAGS = -O4 -openmp -ftrap=%none -dalign -DMPI -#LAPACKL = -lsunperf -lfsu -lmpi_mt -#IFLAG = -M - -#Sun parallel enterprise: -#F90C = f95 -#FFLAGS = -O4 -xarch=native64 -openmp -ftrap=%none -#LAPACKL = -lsunperf -lfsu -#IFLAG = -M - - -#IBM XL Fortran, multi-processor (run "module load lapack" then run "gmake") -# See also http://cosmocoffee.info/viewtopic.php?t=326 -#F90C = xlf90_r $(LAPACK) -#FFLAGS = -WF,-DIBMXL -qsmp=omp -qsuffix=f=f90:cpp=F90 -O3 -qstrict -qarch=pwr3 -qtune=pwr3 -#INCLUDE = -lessl -#LAPACKL = +ifeq ($(ext),AIX) +F90C = mpxlf90_r +FFLAGS = -O4 -WF,-DIBMXL,-DMPI -qstrict -qsmp=omp -qmaxmem=-1 -qsuffix=f=f90:cpp=F90 +LAPACKL = -lessl +INCLUDE = -I../camb +LFLAGS= +endif + +EXTDATA = LRG +EXTINCLUDE = -I$(GSLDIR)/include +EXTOBJS = bsplinepk.o + + +NESTDIR = ../multinest +NESTINCLUDE = -I ../multinest -I../camb -I/usr/include +NESTOBJS = utils0.o utils1.o priors.o kmeans_clstr.o xmeans_clstr.o \ +posterior.o nested.o +NESTFLAGS = $(FFLAGS) -ffree-line-length-none + +#WMAPDIR = ../WMAP +WMAPINCLUDE = -I$(CFITSIODIR)/include +WMAPOBJS = read_archive_map.o \ + read_fits.o \ + healpix_types.o \ + br_mod_dist.o \ + WMAP_7yr_options.o \ + WMAP_7yr_util.o \ + WMAP_7yr_gibbs.o \ + WMAP_7yr_tt_pixlike.o \ + WMAP_7yr_tt_beam_ptsrc_chisq.o \ + WMAP_7yr_teeebb_pixlike.o \ + WMAP_7yr_tetbeebbeb_pixlike.o \ + WMAP_7yr_likelihood.o + + +RECOMBINATION =recfast PROPOSE = propose.o CLSFILE = CMB_Cls_simple.o @@ -124,70 +80,61 @@ CLSFILE = CMB_Cls_simple.o #Can use params_H if you prefer more generic parameters PARAMETERIZATION = params_CMB.o -F90FLAGS = -DMATRIX_SINGLE $(FFLAGS) $(IFLAG)../camb $(INCLUDE) -LINKFLAGS = -L../camb -lcamb_$(RECOMBINATION) $(LAPACKL) $(F90CRLINK) +F90FLAGS = -DMATRIX_SINGLE $(FFLAGS) -I../camb $(INCLUDE) +LFLAGS += -L../camb -lcamb_$(RECOMBINATION) $(LAPACKL) + +DISTFILES = utils.o ParamNames.o Matrix_utils.o settings.o IO.o GetDist.o -DISTFILES = ParamNames.o Matrix_utils.o settings.o IO.o GetDist.o LIKEFILES= calclike.o -OBJFILES = ParamNames.o Matrix_utils.o settings.o IO.o cmbtypes.o Planck_like.o \ +OBJFILES = utils.o ParamNames.o Matrix_utils.o settings.o IO.o cmbtypes.o Planck_like.o \ cmbdata.o WeakLen.o bbn.o lrggettheory.o mpk.o bao.o supernovae.o HST.o SDSSLy-a-v3.o \ $(CLSFILE) paramdef.o $(PROPOSE) $(PARAMETERIZATION) $(LIKEFILES) \ - EstCovmat.o PowellConstrainedMinimize.o minimize.o postprocess.o MCMC.o driver.o + EstCovmat.o PowellConstrainedMinimize.o minimize.o postprocess.o MCMC.o \ + $(NESTOBJS) nestwrap.o driver.o -ifeq ($(EXTDATA),LRG) +ifeq ($(EXTDATA),LRG) F90FLAGS += -DDR71RG -OBJFILES += bsplinepk.o -GSLINC = -I$(GSLPATH)/include -LINKFLAGS += -L$(GSLPATH)/lib -lgsl -lgslcblas -endif +LFLAGS += -lgsl -lgslcblas +OBJFILES += $(EXTOBJS) +endif -F90CRLINK = ifeq ($(RECOMBINATION),cosmorec) -## This is flag is passed to the Fortran compiler allowing it to link C++ (uncomment the right one). -# GCC (gfortran/g++) -COSMOREC_PATH ?= ../CosmoRec/ -F90CRLINK = -lstdc++ -L$(COSMOREC_PATH) -lCosmoRec -L$(GSLPATH)/lib -lgsl -lgslcblas -# Intel Compilers (ifort/icpc) -#F90CRLINK = -cxxlib -L$(COSMOREC_PATH) -lCosmoRec -L$(GSLPATH)/lib -lgsl -lgslcblas +COSMOREC_PATH = ../CosmoRec/ FFLAGS += -DCOSMOREC +LFLAGS += -lstdc++ -L$(COSMOREC_PATH) -lCosmoRec -lgsl -lgslcblas endif + ifeq ($(RECOMBINATION),hyrec) -HYREC_PATH ?= ../HyRec/ -F90CRLINK += -L$(HYREC_PATH) -lhyrec +HYREC_PATH = ../HyRec/ +LFLAGS += -L$(HYREC_PATH) -lhyrec endif -ifneq ($(WMAP),) -F90FLAGS += $(IFLAG)$(cfitsio)/include $(IFLAG)$(WMAP) -LINKFLAGS += -L$(cfitsio)/lib -L$(WMAP) -lcfitsio - -OBJFILES += $(WMAP)/read_archive_map.o \ - $(WMAP)/read_fits.o \ - $(WMAP)/healpix_types.o \ - $(WMAP)/WMAP_7yr_options.o \ - $(WMAP)/WMAP_7yr_util.o \ - $(WMAP)/WMAP_7yr_tt_pixlike.o \ - $(WMAP)/WMAP_7yr_teeebb_pixlike.o \ - $(WMAP)/WMAP_7yr_likelihood.o \ - $(WMAP)/WMAP_7yr_gibbs.o \ - $(WMAP)/WMAP_7yr_tt_beam_ptsrc_chisq.o \ - $(WMAP)/br_mod_dist.o + +ifneq ($(WMAPDIR),) +F90FLAGS += $(WMAPINCLUDE) +OBJFILES += $(WMAPOBJS) +CMBDATAOBJS = $(WMAPOBJS) +LFLAGS += -lgsl -lgslcblas -lcfitsio else F90FLAGS += -DNOWMAP endif -default: cosmomc -all : cosmomc getdist + + +default: cosmomc.$(ext) + +all : cosmomc.$(ext) getdist.$(ext) settings.o: ../camb/libcamb_$(RECOMBINATION).a cmbtypes.o: settings.o Planck_like.o: cmbtypes.o -cmbdata.o: Planck_like.o $(WMAPOBJS) +cmbdata.o: Planck_like.o $(CMBDATAOBJS) WeakLen.o: cmbtypes.o bbn.o: settings.o mpk.o: cmbtypes.o lrggettheory.o @@ -204,15 +151,28 @@ postprocess.o: calclike.o MCMC.o: calclike.o driver.o: EstCovmat.o MCMC.o minimize.o minimize.o: PowellConstrainedMinimize.o calclike.o - -export FFLAGS -export F90C +nestwrap.o:$(NESTOBJS) .f.o: f77 $(F90FLAGS) -c $< %.o: %.c - $(CC) $(GSLINC) -c $*.c + $(CC) $(EXTINCLUDE) -c $*.c + +%.o: $(NESTDIR)/%.F90 + $(F90C) $(NESTFLAGS) $(NESTINCLUDE) -c $< + +%.o: $(NESTDIR)/%.f90 + $(F90C) $(NESTFLAGS) $(NESTINCLUDE) -c $< + +%.o: $(WMAPDIR)/%.f90 + $(F90C) $(FFLAGS) $(WMAPINCLUDE) -c $< + +%.o: $(WMAPDIR)/%.F90 + $(F90C) $(FFLAGS) $(WMAPINCLUDE) -c $< + +utils.o: ../camb/utils.F90 + $(F90C) $(F90FLAGS) -c $< %.o: %.f90 $(F90C) $(F90FLAGS) -c $*.f90 @@ -221,19 +181,12 @@ export F90C $(F90C) $(F90FLAGS) -c $*.F90 -cosmomc: camb $(OBJFILES) - $(F90C) -o ../cosmomc $(OBJFILES) $(LINKFLAGS) $(F90FLAGS) - - -clean: cleancosmomc - rm -f ../camb/*.o ../camb/*.obj ../camb/*.mod - -cleancosmomc: - rm -f *.o *.mod *.d *.pc *.obj ../core +cosmomc.$(ext): $(OBJFILES) ../camb/libcamb_$(RECOMBINATION).a + $(F90C) -o ../$@ $(OBJFILES) $(F90FLAGS) $(LFLAGS) -getdist: camb $(DISTFILES) - $(F90C) -o ../getdist $(DISTFILES) $(LINKFLAGS) $(F90FLAGS) +getdist.$(ext): $(DISTFILES) + $(F90C) -o ../$@ $(DISTFILES) $(F90FLAGS) $(LFLAGS) -camb: - cd ../camb && $(MAKE) --file=Makefile_main libcamb_$(RECOMBINATION).a RECOMBINATION=$(RECOMBINATION) +clean: + rm -f *.o *.mod *.d *.pc *.obj ../*.$(ext) ../core \ No newline at end of file diff --git a/source/driver.F90 b/source/driver.F90 index 2e39ef6..053733c 100644 --- a/source/driver.F90 +++ b/source/driver.F90 @@ -16,6 +16,8 @@ program SolveCosmology use mpk use MatrixUtils use IO +!nest + use nestwrap use ParamNames #ifdef WMAP_PARAMS use WMAP_OPTIONS @@ -23,8 +25,10 @@ program SolveCosmology implicit none character(LEN=Ini_max_string_len) InputFile, LogFile +!nest + integer seed + logical bad, est_bfp_before_covmat - logical bad integer numsets, nummpksets, i, numtoget, action character(LEN=Ini_max_string_len) baseroot, filename(100), & mpk_filename(100), SZTemplate(100), numstr, fname, keyname @@ -37,16 +41,17 @@ program SolveCosmology real bestfit_loglike real max_like_radius integer, parameter :: action_MCMC=0, action_importance=1, action_maxlike=2 - +!nest + integer, parameter :: action_NEST = 3 #ifdef MPI double precision intime integer ierror - +#ifndef MPINEST call mpi_init(ierror) if (ierror/=MPI_SUCCESS) stop 'MPI fail: rank' #endif - +#endif instance = 0 #ifndef MPI @@ -60,7 +65,7 @@ program SolveCosmology end if #ifdef MPI - +#ifndef MPINEST if (instance /= 0) call DoAbort('With MPI should not have second parameter') call mpi_comm_rank(mpi_comm_world,MPIrank,ierror) @@ -76,9 +81,10 @@ program SolveCosmology InputFile=GetParam(1) end if - CALL MPI_Bcast(InputFile, LEN(InputFile), MPI_CHARACTER, 0, MPI_COMM_WORLD, ierror) - +#else + InputFile=GetParam(1) +#endif #endif call IO_Ini_Load(DefIni,InputFile, bad) @@ -115,6 +121,9 @@ program SolveCosmology checkpoint = Ini_Read_Logical('checkpoint',.false.) if (checkpoint) flush_write = .true. +!nest + nest_resume = checkpoint +!end nest #ifdef WMAP_PARAMS use_TT_beam_ptsrc = Ini_read_Logical('use_TT_beam_ptsrc') @@ -153,6 +162,9 @@ program SolveCosmology baseroot = ReadIniFileName(DefIni,'file_root') rootname = trim(baseroot) +!nest + nest_root = trim(baseroot)//'_nest' +!end nest FileChangeIniAll = trim(rootname)//'.read' @@ -162,8 +174,12 @@ program SolveCosmology new_chains = .true. + + action = Ini_Read_Int('action',action_MCMC) + if (action == action_NEST) checkpoint=.false. + if (checkpoint) then if (action /= action_MCMC) call DoAbort('checkpoint only with action =0') #ifdef MPI @@ -210,12 +226,37 @@ program SolveCosmology numstr = Ini_Read_String('rand_seed') if (numstr /= '') then - read(numstr,*) i - call InitRandom(i) + read(numstr,*) seed + call InitRandom(seed) else + seed = 0 call InitRandom() end if +!nest + if(action==action_NEST) then + nest_mmodal = Ini_Read_Logical('multimodal',.false.) + nest_nlive = Ini_Read_Int('nlive',400) + nest_maxmodes = Ini_Read_Int('maxmodes',1) + nest_efr = Ini_Read_Double('eff',0.3d0) + nest_tol = Ini_Read_Double('tol',0.5d0) + nest_ceff = Ini_Read_Logical('ceff',.false.) + nest_updInt = Ini_Read_Int('updInt',50) + + !set up the seed for the nested sampler + if (seed /= 0) then + nest_rseed = seed + else + nest_rseed=-1 !take it from system clock + end if + !feedback for the nested sampler + if(FeedBack>0) then + nest_fb=.true. + else + nest_fb=.false. + end if + endif + use_nonlinear = Ini_Read_Logical('nonlinear_pk',.false.) pivot_k = Ini_Read_Real('pivot_k',0.05) inflation_consistency = Ini_read_Logical('inflation_consistency',.false.) @@ -384,8 +425,9 @@ program SolveCosmology else if (action==action_importance) then if (Feedback > 0) write (*,*) 'starting post processing' call postprocess(rootname, baseroot) - call DoStop('Postprocesing done',.false.) - + else if (action==action_NEST) then + if (Feedback > 0) write (*,*) 'starting Nestsampler' + call nest_sample else call DoAbort('undefined action') end if diff --git a/source/nestwrap.f90 b/source/nestwrap.f90 new file mode 100644 index 0000000..a9a9280 --- /dev/null +++ b/source/nestwrap.f90 @@ -0,0 +1,216 @@ +! The wrapper for MultiNest +! From Farhan Feroz version Apr 2008 + +module nestwrap + use ParamDef + use CalcLike + use propose + use nested + implicit none + + !nested sampling parameters + + !multiple modes expected? + logical, save :: nest_mmodal = .true. + + !constant efficiency + logical, save:: nest_ceff = .false. + + !dump output file + logical, save :: nest_outfile = .true. + + !if false MPI is not initialized in nested + logical, save :: nest_initMPI = .false. + + !number of live points + integer, save :: nest_nlive = 1000 + + ! total number of parameters (derived included) + integer, save :: nest_npar = 0 + + !param to wrap around + integer, dimension(:), allocatable, save :: nest_pwrap + + !seed for nested sampler, -ve means take it from sys clock + integer, save :: nest_rseed = -1 + + !evidence tolerance factor + real(kind=8), save :: nest_tol = 0.5 + + !sampling efficiency + real(kind=8), save :: nest_efr = 0.1 + + real(kind=8), save :: nest_logZero = -1d90 + + integer, parameter :: nest_maxIter = 1000000 + + real(kind=8), parameter :: nest_nullZ = -1.d90 + + !no. of iterations after which to update on the progress + integer, save :: nest_updInt = 1 + + !root for saving posterior files + character(LEN=100), save :: nest_root + + !max modes expected, for memory allocation + integer, save :: nest_maxModes = 10 + +!no. of parameters to cluster (for mode detection) + integer, save :: nest_nClsPar = 2 + + !feedback on the sampling progress? + logical, save :: nest_fb = .true. + + !resume from a previous run? + logical, save :: nest_resume = .true. + + !dimensionality + integer, save :: sdim = 0 + + !retain Curparams throughout, for the code to be run in the only fast params mode + Type(ParamSet), save :: Curparams + +contains + + + subroutine setup() + implicit none + + real(kind=8) :: cosparams(num_params),lnew + Type(ParamSet) :: Trial + integer :: i + + cosparams = Scales%center + Trial%P = cosparams + lnew = -GetLogLike(Trial) + Curparams=Trial !retain Curparams throughout, for the code to be run in the only fast params mode + write(*,*) 'LIKELIHOOD TEST', lnew, cosparams + +!set up the prior ranges & the dimensionality + sdim=0 + do i=1,num_params_used + if(Scales%Pmin(params_used(i))/=Scales%Pmax(params_used(i))) sdim=sdim+1 + enddo + +!set dimensionaity + nest_nPar=num_params_used + + +#ifdef MPINEST + nest_initMPI = .true. +#else + nest_initMPI = .false. +#endif + + nest_logZero = -huge(1d0) + + if (Feedback > 1) then + write(*,*) 'Dimensionality of param space = ', sdim + write(*,*) 'Number of derived parameters = ', nest_nPar-sdim + write(*,*) 'Location of ln(like) in live points file, column = ', nest_nPar-sdim+1 + endif + + end subroutine setup + + + + subroutine getLogLikeNS(Cube,n_dim,nPar,lnew,context) + implicit none + + integer i,j,context,n_dim,nPar + real(kind=8) :: cosparams(num_params),Cube(nPar),lnew + logical accept + Type(ParamSet) Trial + real(kind=8), parameter :: logZero = -huge(1.d0)*epsilon(1.d0) + + accept=.false. + + cosparams = Scales%center + j=0 + do i=1,num_params_used + if(Scales%Pmin(params_used(i))/=Scales%Pmax(params_used(i))) then + j=j+1 + Cube(j)=Scales%Pmin(params_used(i))+(Scales%Pmax(params_used(i))-Scales%Pmin(params_used(i)))*Cube(j) + cosparams(params_used(i))=Cube(j) + else + cosparams(params_used(i))=Scales%Pmin(params_used(i)) + endif + enddo + + Trial=Curparams + Trial%P=cosparams + lnew=-GetLogLike(Trial) + if(lnew<-1.d20) lnew=logZero + call AcceptReject(accept,CurParams%Info,Trial%Info) + + end subroutine getLogLikeNS + + + + subroutine nest_Sample + implicit none +!total number of clusters found + integer :: nclusters + integer :: context + + + call setup() + + allocate(nest_pwrap(sdim)) + nest_pwrap = 0 + + call nestRun(nest_mmodal,nest_ceff,nest_nlive,nest_tol,nest_efr,sdim,nest_nPar, & + nest_nClsPar,nest_maxModes,nest_updInt,nest_nullZ,nest_root,nest_rseed,nest_pWrap, & + nest_fb,nest_resume,nest_outfile,nest_initMPI,nest_logZero,nest_maxIter & + ,getLogLikeNS,dumper,context) + + deallocate(nest_pwrap) + + end subroutine nest_Sample + + + + subroutine dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike & + , logZ, logZerr, context_pass) + + implicit none + +! number of samples in posterior array + integer nSamples +! number of live points + integer nlive +! number of parameters saved (physical plus derived) + integer nPar +! array containing the last set of live points + double precision, pointer :: physLive(:,:) +! array with the posterior distribution + double precision, pointer :: posterior(:,:) +! array with mean, sigmas, maxlike & MAP parameters + double precision, pointer :: paramConstr(:) +! max loglikelihood value + double precision maxLogLike +! log evidence + double precision logZ +! error on log evidence + double precision logZerr + + integer :: context_pass + + integer :: i + + write(*,*) + write(*,*)'***********************************' + write(*,*)'nestwrap dumper: ' + write(*,*)'nSamples= logZ= logZerr= ',nSamples, logZ, logZerr + write(*,*)'maxLogLike= ',maxLogLike + write(*,*)'***********************************' + write(*,*) + write(*,*)'MEAN= SIGMA= ' + do i=1,nPar + write(*,*)paramConstr(i), paramConstr(i+nPar) + enddo + write(*,*) + + end subroutine dumper + +end module nestwrap