diff options
Diffstat (limited to 'utils')
| -rw-r--r-- | utils/enums.py | 13 | ||||
| -rw-r--r-- | utils/fr.py | 49 | ||||
| -rw-r--r-- | utils/gf.py | 22 | ||||
| -rw-r--r-- | utils/likelihood.py | 30 | ||||
| -rw-r--r-- | utils/misc.py | 5 | ||||
| -rw-r--r-- | utils/multinest.py | 37 | ||||
| -rw-r--r-- | utils/param.py | 9 | ||||
| -rw-r--r-- | utils/plot.py | 9 |
8 files changed, 89 insertions, 85 deletions
diff --git a/utils/enums.py b/utils/enums.py index 8a9e868..b80b712 100644 --- a/utils/enums.py +++ b/utils/enums.py @@ -39,8 +39,9 @@ class ParamTag(Enum): class PriorsCateg(Enum): - UNIFORM = 1 - GAUSSIAN = 2 + UNIFORM = 1 + GAUSSIAN = 2 + HALFGAUSS = 3 class MCMCSeedType(Enum): @@ -49,9 +50,11 @@ class MCMCSeedType(Enum): class SensitivityCateg(Enum): - FULL = 1 - FIXED_ANGLE = 2 - CORR_ANGLE = 3 + FULL = 1 + FIXED_ANGLE = 2 + CORR_ANGLE = 3 + FIXED_ONE_ANGLE = 4 + CORR_ONE_ANGLE = 5 class StatCateg(Enum): diff --git a/utils/fr.py b/utils/fr.py index 342a848..a82e081 100644 --- a/utils/fr.py +++ b/utils/fr.py @@ -13,9 +13,9 @@ import argparse from functools import partial import numpy as np -from scipy import linalg from utils.enums import EnergyDependance +from utils.misc import DTYPE, CDTYPE, PI from utils.misc import enum_parse, parse_bool @@ -44,11 +44,11 @@ def angles_to_fr(src_angles): """ sphi4, c2psi = src_angles - psi = (0.5)*np.arccos(c2psi) + psi = (0.5)*np.arccos(c2psi, dtype=DTYPE) - sphi2 = np.sqrt(sphi4) + sphi2 = np.sqrt(sphi4, dtype=DTYPE) cphi2 = 1. - sphi2 - spsi2 = np.sin(psi)**2 + spsi2 = np.sin(psi, dtype=DTYPE)**2 cspi2 = 1. - spsi2 x = sphi2*cspi2 @@ -80,16 +80,16 @@ def angles_to_u(bsm_angles): """ s12_2, c13_4, s23_2, dcp = bsm_angles - dcp = np.complex128(dcp) + dcp = CDTYPE(dcp) c12_2 = 1. - s12_2 - c13_2 = np.sqrt(c13_4) + c13_2 = np.sqrt(c13_4, dtype=DTYPE) s13_2 = 1. - c13_2 c23_2 = 1. - s23_2 - t12 = np.arcsin(np.sqrt(s12_2)) - t13 = np.arccos(np.sqrt(c13_2)) - t23 = np.arcsin(np.sqrt(s23_2)) + t12 = np.arcsin(np.sqrt(s12_2, dtype=DTYPE)) + t13 = np.arccos(np.sqrt(c13_2, dtype=DTYPE)) + t23 = np.arcsin(np.sqrt(s23_2, dtype=DTYPE)) c12 = np.cos(t12) s12 = np.sin(t12) @@ -98,9 +98,9 @@ def angles_to_u(bsm_angles): c23 = np.cos(t23) s23 = np.sin(t23) - p1 = np.array([[1 , 0 , 0] , [0 , c23 , s23] , [0 , -s23 , c23]] , dtype=np.complex128) - p2 = np.array([[c13 , 0 , s13*np.exp(-1j*dcp)] , [0 , 1 , 0] , [-s13*np.exp(1j*dcp) , 0 , c13]] , dtype=np.complex128) - p3 = np.array([[c12 , s12 , 0] , [-s12 , c12 , 0] , [0 , 0 , 1]] , dtype=np.complex128) + p1 = np.array([[1 , 0 , 0] , [0 , c23 , s23] , [0 , -s23 , c23]] , dtype=CDTYPE) + p2 = np.array([[c13 , 0 , s13*np.exp(-1j*dcp)] , [0 , 1 , 0] , [-s13*np.exp(1j*dcp) , 0 , c13]] , dtype=CDTYPE) + p3 = np.array([[c12 , s12 , 0] , [-s12 , c12 , 0] , [0 , 0 , 1]] , dtype=CDTYPE) u = np.dot(np.dot(p1, p2), p3) return u @@ -142,15 +142,15 @@ def cardano_eqn(ham): a = -np.trace(ham) b = (0.5) * ((np.trace(ham))**2 - np.trace(np.dot(ham, ham))) - c = -linalg.det(ham) + c = -np.linalg.det(ham) Q = (1/9.) * (a**2 - 3*b) R = (1/54.) * (2*a**3 - 9*a*b + 27*c) theta = np.arccos(R / np.sqrt(Q**3)) E1 = -2 * np.sqrt(Q) * np.cos(theta/3.) - (1/3.)*a - E2 = -2 * np.sqrt(Q) * np.cos((theta - 2.*np.pi)/3.) - (1/3.)*a - E3 = -2 * np.sqrt(Q) * np.cos((theta + 2.*np.pi)/3.) - (1/3.)*a + E2 = -2 * np.sqrt(Q) * np.cos((theta - 2.*PI)/3.) - (1/3.)*a + E3 = -2 * np.sqrt(Q) * np.cos((theta + 2.*PI)/3.) - (1/3.)*a A1 = ham[1][2] * (ham[0][0] - E1) - ham[1][0]*ham[0][2] A2 = ham[1][2] * (ham[0][0] - E2) - ham[1][0]*ham[0][2] @@ -164,9 +164,9 @@ def cardano_eqn(ham): C2 = ham[1][0] * (ham[2][2] - E2) - ham[1][2]*ham[2][0] C3 = ham[1][0] * (ham[2][2] - E3) - ham[1][2]*ham[2][0] - N1 = np.sqrt(abs(A1*B1)**2 + abs(A1*C1)**2 + abs(B1*C1)**2) - N2 = np.sqrt(abs(A2*B2)**2 + abs(A2*C2)**2 + abs(B2*C2)**2) - N3 = np.sqrt(abs(A3*B3)**2 + abs(A3*C3)**2 + abs(B3*C3)**2) + N1 = np.sqrt(np.abs(A1*B1, dtype=DTYPE)**2 + np.abs(A1*C1, dtype=DTYPE)**2 + np.abs(B1*C1, dtype=DTYPE)**2) + N2 = np.sqrt(np.abs(A2*B2, dtype=DTYPE)**2 + np.abs(A2*C2, dtype=DTYPE)**2 + np.abs(B2*C2, dtype=DTYPE)**2) + N3 = np.sqrt(np.abs(A3*B3, dtype=DTYPE)**2 + np.abs(A3*C3, dtype=DTYPE)**2 + np.abs(B3*C3, dtype=DTYPE)**2) mm = np.array([ [np.conjugate(B1)*C1 / N1, np.conjugate(B2)*C2 / N2, np.conjugate(B3)*C3 / N3], @@ -195,7 +195,7 @@ def normalise_fr(fr): array([ 0.16666667, 0.33333333, 0.5 ]) """ - return np.array(fr) / float(np.sum(fr)) + return np.array(fr) / np.sum(fr) def estimate_scale(args): @@ -300,7 +300,7 @@ NUFIT_U = angles_to_u((0.307, (1-0.02195)**2, 0.565, 3.97935)) def params_to_BSMu(theta, dim, energy, mass_eigenvalues=MASS_EIGENVALUES, sm_u=NUFIT_U, no_bsm=False, fix_mixing=False, fix_mixing_almost=False, fix_scale=False, scale=None, - check_uni=True, epsilon=1e-9): + check_uni=True, epsilon=1e-7): """Construct the BSM mixing matrix from the BSM parameters. Parameters @@ -393,8 +393,8 @@ def params_to_BSMu(theta, dim, energy, mass_eigenvalues=MASS_EIGENVALUES, if check_uni: tu = test_unitarity(eg_vector) - if not abs(np.trace(tu) - 3.) < epsilon or \ - not abs(np.sum(tu) - 3.) < epsilon: + if not np.abs(np.trace(tu) - 3.) < epsilon or \ + not np.abs(np.sum(tu) - 3.) < epsilon: raise AssertionError( 'Matrix is not unitary!\neg_vector\n{0}\ntest ' 'u\n{1}'.format(eg_vector, tu) @@ -427,7 +427,7 @@ def test_unitarity(x, prnt=False): [ 0., 0., 1.]]) """ - f = abs(np.dot(x, x.conj().T)) + f = np.abs(np.dot(x, x.conj().T), dtype=DTYPE) if prnt: print 'Unitarity test:\n{0}'.format(f) return f @@ -456,7 +456,8 @@ def u_to_fr(source_fr, matrix): """ composition = np.einsum( - 'ai, bi, a -> b', abs(matrix)**2, abs(matrix)**2, source_fr + 'ai, bi, a -> b', + np.abs(matrix)**2, np.abs(matrix)**2, source_fr, ) ratio = composition / np.sum(source_fr) return ratio diff --git a/utils/gf.py b/utils/gf.py index 59d1033..17ac029 100644 --- a/utils/gf.py +++ b/utils/gf.py @@ -16,6 +16,7 @@ import GolemFitPy as gf from utils.enums import DataType, SteeringCateg from utils.misc import enum_parse, thread_factors +from utils.param import ParamSet def fit_flags(llh_paramset): @@ -23,9 +24,6 @@ def fit_flags(llh_paramset): # False means it's not fixed in minimization 'astroFlavorAngle1' : True, 'astroFlavorAngle2' : True, - 'astroENorm' : True, - 'astroMuNorm' : True, - 'astroTauNorm' : True, 'convNorm' : True, 'promptNorm' : True, 'muonNorm' : True, @@ -44,9 +42,14 @@ def fit_flags(llh_paramset): 'astroDeltaGammaSec' : True } flags = gf.FitParametersFlag() + gf_nuisance = [] for param in llh_paramset: - flags.__setattr__(param.name, False) - return flags + if param.name in default_flags: + print 'Setting param {0:<15} to float in the ' \ + 'minimisation'.format(param.name) + flags.__setattr__(param.name, False) + gf_nuisance.append(param) + return flags, ParamSet(gf_nuisance) def steering_params(args): @@ -68,7 +71,7 @@ def set_up_as(fitter, params): print 'Injecting the model', params asimov_params = gf.FitParameters(gf.sampleTag.HESE) for parm in params: - asimov_params.__setattr__(parm.name, parm.value) + asimov_params.__setattr__(parm.name, float(parm.value)) fitter.SetupAsimov(asimov_params) @@ -84,7 +87,7 @@ def setup_fitter(args, asimov_paramset): def get_llh(fitter, params): fitparams = gf.FitParameters(gf.sampleTag.HESE) for parm in params: - fitparams.__setattr__(parm.name, parm.value) + fitparams.__setattr__(parm.name, float(parm.value)) llh = -fitter.EvalLLH(fitparams) return llh @@ -93,9 +96,10 @@ def get_llh_freq(fitter, params): print 'setting to {0}'.format(params) fitparams = gf.FitParameters(gf.sampleTag.HESE) for parm in params: - fitparams.__setattr__(parm.name, parm.value) + fitparams.__setattr__(parm.name, float(parm.value)) fitter.SetFitParametersSeed(fitparams) - return -fitter.MinLLH().likelihood + llh = -fitter.MinLLH().likelihood + return llh def data_distributions(fitter): diff --git a/utils/likelihood.py b/utils/likelihood.py index 04828e8..6387a1e 100644 --- a/utils/likelihood.py +++ b/utils/likelihood.py @@ -12,7 +12,7 @@ from __future__ import absolute_import, division from functools import partial import numpy as np -from scipy.stats import multivariate_normal +from scipy.stats import multivariate_normal, rv_continuous import GolemFitPy as gf @@ -22,15 +22,16 @@ from utils.enums import EnergyDependance, Likelihood, ParamTag, PriorsCateg from utils.misc import enum_parse -def multi_gaussian(fr, fr_bf, sigma): - """Multivariate gaussian likelihood.""" - cov_fr = np.identity(3) * sigma - return np.log(multivariate_normal.pdf(fr, mean=fr_bf, cov=cov_fr)) +class Gaussian(rv_continuous): + """Gaussian for one dimension.""" + def _pdf(self, x, mu, sig): + return (1./np.sqrt(2*np.pi*sig**2))*np.exp(-((x-mu)**2)/(2*sig**2)) -def log_gaussian(x, mu, sig): - """Log gaussian for one dimension.""" - return np.log((1./np.sqrt(2*np.pi*sig**2))*np.exp(-((x-mu)**2)/(2*sig**2))) +def multi_gaussian(fr, fr_bf, sigma, offset=-320): + """Multivariate gaussian likelihood.""" + cov_fr = np.identity(3) * sigma + return np.log(multivariate_normal.pdf(fr, mean=fr_bf, cov=cov_fr)) + offset def likelihood_argparse(parser): @@ -46,6 +47,11 @@ def likelihood_argparse(parser): def lnprior(theta, paramset): """Priors on theta.""" + if len(theta) != len(paramset): + raise AssertionError( + 'Length of MCMC scan is not the same as the input ' + 'params\ntheta={0}\nparamset={1}'.format(theta, paramset) + ) for idx, param in enumerate(paramset): param.value = theta[idx] ranges = paramset.ranges @@ -57,9 +63,13 @@ def lnprior(theta, paramset): prior = 0 for param in paramset: if param.prior is PriorsCateg.GAUSSIAN: - prior += log_gaussian( + prior += Gaussian().logpdf( param.nominal_value, param.value, param.std ) + elif param.prior is PriorsCateg.HALFGAUSS: + prior += Gaussian().logpdf( + param.nominal_value, param.value, param.std + ) + Gaussian().logcdf(1, param.value, param.std) return prior @@ -68,7 +78,7 @@ def triangle_llh(theta, args, asimov_paramset, llh_paramset, fitter): if len(theta) != len(llh_paramset): raise AssertionError( 'Length of MCMC scan is not the same as the input ' - 'params\ntheta={0}\nmcmc_paramset]{1}'.format(theta, llh_paramset) + 'params\ntheta={0}\nparamset]{1}'.format(theta, llh_paramset) ) for idx, param in enumerate(llh_paramset): param.value = theta[idx] diff --git a/utils/misc.py b/utils/misc.py index 59c5edb..970c693 100644 --- a/utils/misc.py +++ b/utils/misc.py @@ -22,6 +22,11 @@ import numpy as np from utils.enums import Likelihood +DTYPE = np.float128 +CDTYPE = np.complex128 +PI = np.arccos(DTYPE(-1)) + + class SortingHelpFormatter(argparse.HelpFormatter): """Sort argparse help options alphabetically.""" def add_arguments(self, actions): diff --git a/utils/multinest.py b/utils/multinest.py index 426a951..005a43a 100644 --- a/utils/multinest.py +++ b/utils/multinest.py @@ -16,26 +16,11 @@ import numpy as np from pymultinest import analyse, run from utils import likelihood -from utils.misc import make_dir, parse_bool +from utils.misc import gen_outfile_name, make_dir, parse_bool -def CubePrior(cube, ndim, n_params, mn_paramset): - if ndim != len(mn_paramset): - raise AssertionError( - 'Length of MultiNest scan paramset is not the same as the input ' - 'params\ncube={0}\nmn_paramset]{1}'.format(cube, mn_paramset) - ) - pranges = mn_paramset.seeds - for i in xrange(ndim): - mn_paramset[i].value = (pranges[i][1]-pranges[i][0])*cube[i] + pranges[i][0] - prior = 0 - for parm in mn_paramset: - if parm.prior is PriorsCateg.GAUSSIAN: - prior_penatly += multivariate_normal( - parm.nominal_value, mean=parm.value, cov=parm.std - ) - print 'prior', prior - return prior +def CubePrior(cube, ndim, n_params): + pass def lnProb(cube, ndim, n_params, mn_paramset, llh_paramset, asimov_paramset, @@ -63,18 +48,6 @@ def lnProb(cube, ndim, n_params, mn_paramset, llh_paramset, asimov_paramset, def mn_argparse(parser): parser.add_argument( - '--mn-run', type=parse_bool, default='True', - help='Run MultiNest' - ) - parser.add_argument( - '--mn-bins', type=int, default=10, - help='Number of bins for the Bayes factor plot' - ) - parser.add_argument( - '--mn-eval-bin', type=str, default='all', - help='Which bin to evalaute for Bayes factor plot' - ) - parser.add_argument( '--mn-live-points', type=int, default=3000, help='Number of live points for MultiNest evaluations' ) @@ -104,8 +77,8 @@ def mn_evidence(mn_paramset, llh_paramset, asimov_paramset, args, fitter): fitter = fitter ) - prefix = './mnrun/DIM{0}/{1:>019}_'.format( - args.dimension, np.random.randint(0, 2**63) + prefix = './mnrun/DIM{0}/{1}_{2:>019}_'.format( + args.dimension, gen_outfile_name(args), np.random.randint(0, 2**63) ) make_dir(prefix) print 'Running evidence calculation for {0}'.format(prefix) diff --git a/utils/param.py b/utils/param.py index 13f1a63..4d8106e 100644 --- a/utils/param.py +++ b/utils/param.py @@ -205,6 +205,13 @@ class ParamSet(Sequence): else: return ParamSet([io[1] for io in ps]) + def remove_params(self, params): + rm_paramset = [] + for parm in self.params: + if parm.name not in params.names: + rm_paramset.append(parm) + return ParamSet(rm_paramset) + def get_paramsets(args, nuisance_paramset): """Make the paramsets for generating the Asmimov MC sample and also running @@ -216,7 +223,7 @@ def get_paramsets(args, nuisance_paramset): llh_paramset.extend( [x for x in nuisance_paramset.from_tag(ParamTag.SM_ANGLES)] ) - if args.likelihood is Likelihood.GOLEMFIT: + if args.likelihood in [Likelihood.GOLEMFIT, Likelihood.GF_FREQ]: gf_nuisance = [x for x in nuisance_paramset.from_tag(ParamTag.NUISANCE)] asimov_paramset.extend(gf_nuisance) llh_paramset.extend(gf_nuisance) diff --git a/utils/plot.py b/utils/plot.py index 6ae8dc2..0c431cf 100644 --- a/utils/plot.py +++ b/utils/plot.py @@ -233,10 +233,11 @@ def chainer_plot(infile, outfile, outformat, args, llh_paramset): def plot_statistic(data, outfile, outformat, args, scale_param, label=None): """Make MultiNest factor or LLH value plot.""" - print "Making MultiNest Factor plot" + print "Making Statistic plot" fig_text = gen_figtext(args) if label is not None: fig_text += '\n' + label + print 'data', data print 'data.shape', data.shape scales, statistic = data.T if args.stat_method is StatCateg.BAYESIAN: @@ -244,9 +245,9 @@ def plot_statistic(data, outfile, outformat, args, scale_param, label=None): null = statistic[min_idx] reduced_ev = -(statistic - null) elif args.stat_method is StatCateg.FREQUENTIST: - min_idx = np.argmin(statistic) + min_idx = np.argmin(scales) null = statistic[min_idx] - reduced_ev = 2*(statistic - null) + reduced_ev = -2*(statistic - null) fig = plt.figure(figsize=(7, 5)) ax = fig.add_subplot(111) @@ -256,7 +257,7 @@ def plot_statistic(data, outfile, outformat, args, scale_param, label=None): if args.stat_method is StatCateg.BAYESIAN: ax.set_ylabel(r'Bayes Factor') elif args.stat_method is StatCateg.FREQUENTIST: - ax.set_ylabel(r'$\Delta {\rm LLH}$') + ax.set_ylabel(r'$-2\Delta {\rm LLH}$') ax.plot(scales, reduced_ev) |
