diff options
| author | Shivesh Mandalia <shivesh.mandalia@outlook.com> | 2020-02-28 18:39:45 +0000 |
|---|---|---|
| committer | Shivesh Mandalia <shivesh.mandalia@outlook.com> | 2020-02-28 18:39:45 +0000 |
| commit | 402f8b53dd892b8fd44ae5ad45eac91b5f6b3750 (patch) | |
| tree | b619c6efb0eb303e164bbd27691cdd9f8fce36a2 /golemflavor | |
| parent | 3a5a6c658e45402d413970e8d273a656ed74dcf5 (diff) | |
| download | GolemFlavor-402f8b53dd892b8fd44ae5ad45eac91b5f6b3750.tar.gz GolemFlavor-402f8b53dd892b8fd44ae5ad45eac91b5f6b3750.zip | |
reogranise into a python package
Diffstat (limited to 'golemflavor')
| -rw-r--r-- | golemflavor/__init__.py | 0 | ||||
| -rw-r--r-- | golemflavor/__version__.py | 1 | ||||
| -rw-r--r-- | golemflavor/enums.py | 63 | ||||
| -rw-r--r-- | golemflavor/fr.py | 531 | ||||
| -rw-r--r-- | golemflavor/gf.py | 156 | ||||
| -rw-r--r-- | golemflavor/llh.py | 111 | ||||
| -rw-r--r-- | golemflavor/mcmc.py | 120 | ||||
| -rw-r--r-- | golemflavor/misc.py | 226 | ||||
| -rw-r--r-- | golemflavor/mn.py | 106 | ||||
| -rw-r--r-- | golemflavor/param.py | 213 | ||||
| -rw-r--r-- | golemflavor/plot.py | 1030 |
11 files changed, 2557 insertions, 0 deletions
diff --git a/golemflavor/__init__.py b/golemflavor/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/golemflavor/__init__.py diff --git a/golemflavor/__version__.py b/golemflavor/__version__.py new file mode 100644 index 0000000..b794fd4 --- /dev/null +++ b/golemflavor/__version__.py @@ -0,0 +1 @@ +__version__ = '0.1.0' diff --git a/golemflavor/enums.py b/golemflavor/enums.py new file mode 100644 index 0000000..e85158d --- /dev/null +++ b/golemflavor/enums.py @@ -0,0 +1,63 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 17, 2018 + +""" +Define Enums for the BSM flavour ratio analysis +""" + +from enum import Enum + + +def str_enum(x): + return '{0}'.format(str(x).split('.')[-1]) + + +class DataType(Enum): + REAL = 1 + ASIMOV = 2 + REALISATION = 3 + + +class Likelihood(Enum): + GOLEMFIT = 1 + GF_FREQ = 2 + + +class ParamTag(Enum): + NUISANCE = 1 + SM_ANGLES = 2 + MMANGLES = 3 + SCALE = 4 + SRCANGLES = 5 + BESTFIT = 6 + NONE = 7 + + +class PriorsCateg(Enum): + UNIFORM = 1 + GAUSSIAN = 2 + LIMITEDGAUSS = 3 + + +class MCMCSeedType(Enum): + UNIFORM = 1 + GAUSSIAN = 2 + + +class StatCateg(Enum): + BAYESIAN = 1 + FREQUENTIST = 2 + + +class SteeringCateg(Enum): + P2_0 = 1 + P2_1 = 2 + + +class Texture(Enum): + OEU = 1 + OET = 2 + OUT = 3 + NONE = 4 diff --git a/golemflavor/fr.py b/golemflavor/fr.py new file mode 100644 index 0000000..bf0fb56 --- /dev/null +++ b/golemflavor/fr.py @@ -0,0 +1,531 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 17, 2018 + +""" +Useful functions for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +from functools import partial + +import numpy as np + +from utils.enums import ParamTag, Texture +from utils.misc import enum_parse, parse_bool + +import mpmath as mp +mp.mp.dps = 100 # Computation precision + +# DTYPE = np.float128 +# CDTYPE = np.complex256 +# PI = np.arccos(DTYPE(-1)) +# SQRT = np.sqrt +# COS = np.cos +# SIN = np.sin +# ACOS = np.arccos +# ASIN = np.arcsin +# EXP = np.exp + +DTYPE = mp.mpf +CDTYPE = mp.mpc +PI = mp.pi +SQRT = mp.sqrt +COS = mp.cos +SIN = mp.sin +ACOS = mp.acos +ASIN = mp.asin +EXP = mp.exp + +MASS_EIGENVALUES = [7.40E-23, 2.515E-21] +"""SM mass eigenvalues.""" + +SCALE_BOUNDARIES = { + 3 : (-32, -20), + 4 : (-40, -24), + 5 : (-48, -27), + 6 : (-56, -30), + 7 : (-64, -33), + 8 : (-72, -36) +} +"""Boundaries to scan the NP scale for each dimension.""" + + +def determinant(x): + """Calculate the determininant of a 3x3 matrix. + + Parameters + ---------- + x : ndarray, shape = (3, 3) + + Returns + ---------- + float determinant + + Examples + ---------- + >>> print determinant( + >>> [[-1.65238188-0.59549718j, 0.27486548-0.18437467j, -1.35524534-0.38542072j], + >>> [-1.07480906+0.29630449j, -0.47808456-0.80316821j, -0.88609356-1.50737308j], + >>> [-0.14924144-0.99230446j, 0.49504234+0.63639805j, 2.29258915-0.36537507j]] + >>> ) + (2.7797571563274688+3.0841795325804848j) + + """ + return (x[0][0] * (x[1][1] * x[2][2] - x[2][1] * x[1][2]) + -x[1][0] * (x[0][1] * x[2][2] - x[2][1] * x[0][2]) + +x[2][0] * (x[0][1] * x[1][2] - x[1][1] * x[0][2])) + + +def angles_to_fr(src_angles): + """Convert angular projection of the source flavour ratio back into the + flavour ratio. + + Parameters + ---------- + src_angles : list, length = 2 + sin(phi)^4 and cos(psi)^2 + + Returns + ---------- + flavour ratios (nue, numu, nutau) + + Examples + ---------- + >>> print angles_to_fr((0.3, 0.4)) + (0.38340579025361626, 0.16431676725154978, 0.45227744249483393) + + """ + sphi4, c2psi = map(DTYPE, src_angles) + + psi = (0.5)*ACOS(c2psi) + + sphi2 = SQRT(sphi4) + cphi2 = 1. - sphi2 + spsi2 = SIN(psi)**2 + cspi2 = 1. - spsi2 + + x = float(abs(sphi2*cspi2)) + y = float(abs(sphi2*spsi2)) + z = float(abs(cphi2)) + return x, y, z + + +def angles_to_u(bsm_angles): + """Convert angular projection of the mixing matrix elements back into the + mixing matrix elements. + + Parameters + ---------- + bsm_angles : list, length = 4 + sin(12)^2, cos(13)^4, sin(23)^2 and deltacp + + Returns + ---------- + unitary numpy ndarray of shape (3, 3) + + Examples + ---------- + >>> from fr import angles_to_u + >>> print angles_to_u((0.2, 0.3, 0.5, 1.5)) + array([[ 0.66195018+0.j , 0.33097509+0.j , 0.04757188-0.6708311j ], + [-0.34631487-0.42427084j, 0.61741198-0.21213542j, 0.52331757+0.j ], + [ 0.28614067-0.42427084j, -0.64749908-0.21213542j, 0.52331757+0.j ]]) + + """ + s12_2, c13_4, s23_2, dcp = map(DTYPE, bsm_angles) + dcp = CDTYPE(dcp) + + c12_2 = 1. - s12_2 + c13_2 = SQRT(c13_4) + s13_2 = 1. - c13_2 + c23_2 = 1. - s23_2 + + t12 = ASIN(SQRT(s12_2)) + t13 = ACOS(SQRT(c13_2)) + t23 = ASIN(SQRT(s23_2)) + + c12 = COS(t12) + s12 = SIN(t12) + c13 = COS(t13) + s13 = SIN(t13) + c23 = COS(t23) + s23 = SIN(t23) + + p1 = np.array([[1 , 0 , 0] , [0 , c23 , s23] , [0 , -s23 , c23]] , dtype=CDTYPE) + p2 = np.array([[c13 , 0 , s13*EXP(-1j*dcp)] , [0 , 1 , 0] , [-s13*EXP(1j*dcp) , 0 , c13]] , dtype=CDTYPE) + p3 = np.array([[c12 , s12 , 0] , [-s12 , c12 , 0] , [0 , 0 , 1]] , dtype=CDTYPE) + + u = np.dot(np.dot(p1, p2), p3) + return u + + +def cardano_eqn(ham): + """Diagonalise the effective Hamiltonian 3x3 matrix into the form + h_{eff} = UE_{eff}U^{dagger} using the procedure in PRD91, 052003 (2015). + + Parameters + ---------- + ham : numpy ndarray of shape (3, 3) + sin(12)^2, cos(13)^4, sin(23)^2 and deltacp + + Returns + ---------- + unitary numpy ndarray of shape (3, 3) + + Examples + ---------- + >>> import numpy as np + >>> from fr import cardano_eqn + >>> ham = np.array( + >>> [[ 0.66195018+0.j , 0.33097509+0.j , 0.04757188-0.6708311j ], + >>> [-0.34631487-0.42427084j, 0.61741198-0.21213542j, 0.52331757+0.j ], + >>> [ 0.28614067-0.42427084j, -0.64749908-0.21213542j, 0.52331757+0.j ]] + >>> ) + >>> print cardano_eqn(ham) + array([[-0.11143379-0.58863683j, -0.09067747-0.48219068j, 0.34276625-0.08686465j], + [ 0.14835519+0.47511473j, -0.18299305+0.40777481j, 0.31906300+0.82514223j], + [-0.62298966+0.07231745j, -0.61407815-0.42709603j, 0.03660313+0.30160428j]]) + + """ + if np.shape(ham) != (3, 3): + raise ValueError( + 'Input matrix should be a square and dimension 3, ' + 'got\n{0}'.format(ham) + ) + + a = -np.trace(ham) + b = DTYPE(1)/2 * ((np.trace(ham))**DTYPE(2) - np.trace(np.dot(ham, ham))) + c = -determinant(ham) + + Q = (DTYPE(1)/9) * (a**DTYPE(2) - DTYPE(3)*b) + R = (DTYPE(1)/54) * (DTYPE(2)*a**DTYPE(3) - DTYPE(9)*a*b + DTYPE(27)*c) + theta = ACOS(R / SQRT(Q**DTYPE(3))) + + E1 = -DTYPE(2) * SQRT(Q) * COS(theta/DTYPE(3)) - (DTYPE(1)/3)*a + E2 = -DTYPE(2) * SQRT(Q) * COS((theta - DTYPE(2)*PI)/DTYPE(3)) - (DTYPE(1)/3)*a + E3 = -DTYPE(2) * SQRT(Q) * COS((theta + DTYPE(2)*PI)/DTYPE(3)) - (DTYPE(1)/3)*a + + A1 = ham[1][2] * (ham[0][0] - E1) - ham[1][0]*ham[0][2] + A2 = ham[1][2] * (ham[0][0] - E2) - ham[1][0]*ham[0][2] + A3 = ham[1][2] * (ham[0][0] - E3) - ham[1][0]*ham[0][2] + + B1 = ham[2][0] * (ham[1][1] - E1) - ham[2][1]*ham[1][0] + B2 = ham[2][0] * (ham[1][1] - E2) - ham[2][1]*ham[1][0] + B3 = ham[2][0] * (ham[1][1] - E3) - ham[2][1]*ham[1][0] + + C1 = ham[1][0] * (ham[2][2] - E1) - ham[1][2]*ham[2][0] + C2 = ham[1][0] * (ham[2][2] - E2) - ham[1][2]*ham[2][0] + C3 = ham[1][0] * (ham[2][2] - E3) - ham[1][2]*ham[2][0] + + N1 = SQRT(np.abs(A1*B1)**2 + np.abs(A1*C1)**2 + np.abs(B1*C1)**2) + N2 = SQRT(np.abs(A2*B2)**2 + np.abs(A2*C2)**2 + np.abs(B2*C2)**2) + N3 = SQRT(np.abs(A3*B3)**2 + np.abs(A3*C3)**2 + np.abs(B3*C3)**2) + + mm = np.array([ + [np.conjugate(B1)*C1 / N1, np.conjugate(B2)*C2 / N2, np.conjugate(B3)*C3 / N3], + [A1*C1 / N1, A2*C2 / N2, A3*C3 / N3], + [A1*B1 / N1, A2*B2 / N2, A3*B3 / N3] + ]) + return mm + + +def normalise_fr(fr): + """Normalise an input flavour combination to a flavour ratio. + + Parameters + ---------- + fr : list, length = 3 + flavour combination + + Returns + ---------- + numpy ndarray flavour ratio + + Examples + ---------- + >>> from fr import normalise_fr + >>> print normalise_fr((1, 2, 3)) + array([ 0.16666667, 0.33333333, 0.5 ]) + + """ + return np.array(fr) / float(np.sum(fr)) + + +def fr_argparse(parser): + parser.add_argument( + '--injected-ratio', type=float, nargs=3, required=False, + help='Injected ratio if not using data' + ) + parser.add_argument( + '--source-ratio', type=float, nargs=3, default=[1, 2, 0], + help='Set the source flavour ratio for the case when you want to fix it' + ) + parser.add_argument( + '--no-bsm', type=parse_bool, default='False', + help='Turn off BSM terms' + ) + parser.add_argument( + '--dimension', type=int, default=3, + help='Set the new physics dimension to consider' + ) + parser.add_argument( + '--texture', type=partial(enum_parse, c=Texture), + default='none', choices=Texture, help='Set the BSM mixing texture' + ) + parser.add_argument( + '--binning', default=[6e4, 1e7, 20], type=float, nargs=3, + help='Binning for spectral energy dependance' + ) + + +def fr_to_angles(ratios): + """Convert from flavour ratio into the angular projection of the flavour + ratios. + + Parameters + ---------- + TODO(shivesh) + """ + fr0, fr1, fr2 = normalise_fr(ratios) + + cphi2 = fr2 + sphi2 = (1.0 - cphi2) + + if sphi2 == 0.: + return (0., 0.) + else: + cpsi2 = fr0 / sphi2 + + sphi4 = sphi2**2 + c2psi = COS(ACOS(SQRT(cpsi2))*2) + + return map(float, (sphi4, c2psi)) + + +NUFIT_U = angles_to_u((0.307, (1-0.02195)**2, 0.565, 3.97935)) +"""NuFIT mixing matrix (s_12^2, c_13^4, s_23^2, dcp)""" + + +def params_to_BSMu(bsm_angles, dim, energy, mass_eigenvalues=MASS_EIGENVALUES, + sm_u=NUFIT_U, no_bsm=False, texture=Texture.NONE, + check_uni=True, epsilon=1e-7): + """Construct the BSM mixing matrix from the BSM parameters. + + Parameters + ---------- + bsm_angles : list, length > 3 + BSM parameters + + dim : int + Dimension of BSM physics + + energy : float + Energy in GeV + + mass_eigenvalues : list, length = 2 + SM mass eigenvalues + + sm_u : numpy ndarray, dimension 3 + SM mixing matrix + + no_bsm : bool + Turn off BSM behaviour + + texture : Texture + BSM mixing texture + + check_uni : bool + Check the resulting BSM mixing matrix is unitary + + Returns + ---------- + unitary numpy ndarray of shape (3, 3) + + Examples + ---------- + >>> from fr import params_to_BSMu + >>> print params_to_BSMu((0.2, 0.3, 0.5, 1.5, -20), dim=3, energy=1000) + array([[ 0.18658169 -6.34190523e-01j, -0.26460391 +2.01884200e-01j, 0.67247096 -9.86808417e-07j], + [-0.50419832 +2.14420570e-01j, -0.36013768 +5.44254868e-01j, 0.03700961 +5.22039894e-01j], + [-0.32561308 -3.95946524e-01j, 0.64294909 -2.23453580e-01j, 0.03700830 +5.22032403e-01j]]) + + """ + if np.shape(sm_u) != (3, 3): + raise ValueError( + 'Input matrix should be a square and dimension 3, ' + 'got\n{0}'.format(sm_u) + ) + + if not isinstance(bsm_angles, (list, tuple)): + bsm_angles = [bsm_angles] + + z = 0.+1e-9 + if texture is Texture.OEU: + np_s12_2, np_c13_4, np_s23_2, np_dcp, sc2 = 0.5, 1.0, z, z, bsm_angles + elif texture is Texture.OET: + np_s12_2, np_c13_4, np_s23_2, np_dcp, sc2 = z, 0.25, z, z, bsm_angles + elif texture is Texture.OUT: + np_s12_2, np_c13_4, np_s23_2, np_dcp, sc2 = z, 1.0, 0.5, z, bsm_angles + else: + np_s12_2, np_c13_4, np_s23_2, np_dcp, sc2 = bsm_angles + + sc2 = np.power(10., sc2) + sc1 = sc2 / 100. + + mass_matrix = np.array( + [[0, 0, 0], [0, mass_eigenvalues[0], 0], [0, 0, mass_eigenvalues[1]]] + ) + sm_ham = (1./(2*energy))*np.dot(sm_u, np.dot(mass_matrix, sm_u.conj().T)) + if no_bsm: + eg_vector = cardano_eqn(sm_ham) + else: + NP_U = angles_to_u((np_s12_2, np_c13_4, np_s23_2, np_dcp)) + SC_U = np.array( + [[0, 0, 0], [0, sc1, 0], [0, 0, sc2]] + ) + bsm_term = (energy**(dim-3)) * np.dot(NP_U, np.dot(SC_U, NP_U.conj().T)) + bsm_ham = sm_ham + bsm_term + eg_vector = cardano_eqn(bsm_ham) + + if check_uni: + test_unitarity(eg_vector, rse=True, epsilon=epsilon) + return eg_vector + + +def flux_averaged_BSMu(theta, args, spectral_index, llh_paramset): + if len(theta) != len(llh_paramset): + raise AssertionError( + 'Length of MCMC scan is not the same as the input ' + 'params\ntheta={0}\nparamset]{1}'.format(theta, llh_paramset) + ) + + for idx, param in enumerate(llh_paramset): + param.value = theta[idx] + + bin_centers = np.sqrt(args.binning[:-1]*args.binning[1:]) + bin_width = np.abs(np.diff(args.binning)) + + source_flux = np.array( + [fr * np.power(bin_centers, spectral_index) + for fr in args.source_ratio] + ).T + + bsm_angles = llh_paramset.from_tag( + [ParamTag.SCALE, ParamTag.MMANGLES], values=True + ) + + m_eig_names = ['m21_2', 'm3x_2'] + ma_names = ['s_12_2', 'c_13_4', 's_23_2', 'dcp'] + + if set(m_eig_names+ma_names).issubset(set(llh_paramset.names)): + mass_eigenvalues = [x.value for x in llh_paramset if x.name in m_eig_names] + sm_u = angles_to_u( + [x.value for x in llh_paramset if x.name in ma_names] + ) + else: + mass_eigenvalues = MASS_EIGENVALUES + sm_u = NUFIT_U + + if args.no_bsm: + fr = u_to_fr(source_flux, np.array(sm_u, dtype=np.complex256)) + else: + mf_perbin = [] + for i_sf, sf_perbin in enumerate(source_flux): + u = params_to_BSMu( + bsm_angles = bsm_angles, + dim = args.dimension, + energy = bin_centers[i_sf], + mass_eigenvalues = mass_eigenvalues, + sm_u = sm_u, + no_bsm = args.no_bsm, + texture = args.texture, + ) + fr = u_to_fr(sf_perbin, u) + mf_perbin.append(fr) + measured_flux = np.array(mf_perbin).T + intergrated_measured_flux = np.sum(measured_flux * bin_width, axis=1) + averaged_measured_flux = (1./(args.binning[-1] - args.binning[0])) * \ + intergrated_measured_flux + fr = averaged_measured_flux / np.sum(averaged_measured_flux) + return fr + + +def test_unitarity(x, prnt=False, rse=False, epsilon=None): + """Test the unitarity of a matrix. + + Parameters + ---------- + x : numpy ndarray + Matrix to evaluate + + prnt : bool + Print the result + + rse : bool + Raise Assertion if matrix is not unitary + + Returns + ---------- + numpy ndarray + + Examples + ---------- + >>> from fr import test_unitarity + >>> x = np.identity(3) + >>> print test_unitarity(x) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + """ + f = np.abs(np.dot(x, x.conj().T), dtype=DTYPE) + if prnt: + print 'Unitarity test:\n{0}'.format(f) + if rse: + if not np.abs(np.trace(f) - 3.) < epsilon or \ + not np.abs(np.sum(f) - 3.) < epsilon: + raise AssertionError( + 'Matrix is not unitary!\nx\n{0}\ntest ' + 'u\n{1}'.format(x, f) + ) + return f + + +def u_to_fr(source_fr, matrix): + """Compute the observed flavour ratio assuming decoherence. + + Parameters + ---------- + source_fr : list, length = 3 + Source flavour ratio components + + matrix : numpy ndarray, dimension 3 + Mixing matrix + + Returns + ---------- + Measured flavour ratio + + Examples + ---------- + >>> from fr import params_to_BSMu, u_to_fr + >>> print u_to_fr((1, 2, 0), params_to_BSMu((0.2, 0.3, 0.5, 1.5, -20), 3, 1000)) + array([ 0.33740075, 0.33176584, 0.33083341]) + + """ + try: + composition = np.einsum( + 'ai, bi, a -> b', np.abs(matrix)**2, np.abs(matrix)**2, source_fr, + ) + except: + matrix = np.array(matrix, dtype=np.complex256) + composition = np.einsum( + 'ai, bi, a -> b', np.abs(matrix)**2, np.abs(matrix)**2, source_fr, + ) + pass + + ratio = composition / np.sum(source_fr) + return ratio diff --git a/golemflavor/gf.py b/golemflavor/gf.py new file mode 100644 index 0000000..de21cc5 --- /dev/null +++ b/golemflavor/gf.py @@ -0,0 +1,156 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 17, 2018 + +""" +Useful GolemFit wrappers for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +from functools import partial + +import numpy as np + +try: + import GolemFitPy as gf +except: + print 'Running without GolemFit' + pass + +from utils.enums import DataType, Likelihood, SteeringCateg +from utils.misc import enum_parse, parse_bool, thread_factors +from utils.param import ParamSet + + +FITTER = None + + +def fit_flags(llh_paramset): + default_flags = { + # False means it's not fixed in minimization + 'astroFlavorAngle1' : True, + 'astroFlavorAngle2' : True, + 'convNorm' : True, + 'promptNorm' : True, + 'muonNorm' : True, + 'astroNorm' : True, + 'astroParticleBalance' : True, + # 'astroDeltaGamma' : True, + 'cutoffEnergy' : True, + 'CRDeltaGamma' : True, + 'piKRatio' : True, + 'NeutrinoAntineutrinoRatio' : True, + 'darkNorm' : True, + 'domEfficiency' : True, + 'holeiceForward' : True, + 'anisotropyScale' : True, + 'astroNormSec' : True, + 'astroDeltaGammaSec' : True + } + flags = gf.FitParametersFlag() + gf_nuisance = [] + for param in llh_paramset: + if param.name in default_flags: + print 'Setting param {0:<15} to float in the ' \ + 'minimisation'.format(param.name) + flags.__setattr__(param.name, False) + gf_nuisance.append(param) + return flags, ParamSet(gf_nuisance) + + +def steering_params(args): + steering_categ = args.ast + params = gf.SteeringParams(gf.sampleTag.MagicTau) + params.quiet = False + if args.debug: + params.fastmode = False + else: + params.fastmode = True + params.simToLoad= steering_categ.name.lower() + params.evalThreads = args.threads + + if hasattr(args, 'binning'): + params.minFitEnergy = args.binning[0] # GeV + params.maxFitEnergy = args.binning[-1] # GeV + else: + params.minFitEnergy = 6E4 # GeV + params.maxFitEnergy = 1E7 # GeV + params.load_data_from_text_file = False + params.do_HESE_reshuffle=False + params.use_legacy_selfveto_calculation = False + + return params + + +def setup_asimov(params): + print 'Injecting the model', params + asimov_params = gf.FitParameters(gf.sampleTag.MagicTau) + for parm in params: + asimov_params.__setattr__(parm.name, float(parm.value)) + FITTER.SetupAsimov(asimov_params) + + +def setup_realisation(params, seed): + print 'Injecting the model', params + asimov_params = gf.FitParameters(gf.sampleTag.MagicTau) + for parm in params: + asimov_params.__setattr__(parm.name, float(parm.value)) + FITTER.Swallow(FITTER.SpitRealization(asimov_params, seed)) + + +def setup_fitter(args, asimov_paramset): + global FITTER + datapaths = gf.DataPaths() + sparams = steering_params(args) + npp = gf.NewPhysicsParams() + FITTER = gf.GolemFit(datapaths, sparams, npp) + if args.data is DataType.ASIMOV: + setup_asimov(FITTER, asimov_paramset) + elif args.data is DataType.REALISATION: + seed = args.seed if args.seed is not None else 1 + setup_realisation(FITTER, asimov_paramset, seed) + elif args.data is DataType.REAL: + print 'Using MagicTau DATA' + + +def get_llh(params): + fitparams = gf.FitParameters(gf.sampleTag.MagicTau) + for parm in params: + fitparams.__setattr__(parm.name, float(parm.value)) + llh = -FITTER.EvalLLH(fitparams) + return llh + + +def get_llh_freq(params): + print 'setting to {0}'.format(params) + fitparams = gf.FitParameters(gf.sampleTag.MagicTau) + for parm in params: + fitparams.__setattr__(parm.name, float(parm.value)) + FITTER.SetFitParametersSeed(fitparams) + llh = -FITTER.MinLLH().likelihood + return llh + + +def data_distributions(): + hdat = FITTER.GetDataDistribution() + binedges = np.asarray( + [FITTER.GetZenithBinsData(), FITTER.GetEnergyBinsData()] + ) + return hdat, binedges + + +def gf_argparse(parser): + parser.add_argument( + '--debug', default='False', type=parse_bool, help='Run without fastmode' + ) + parser.add_argument( + '--data', default='asimov', type=partial(enum_parse, c=DataType), + choices=DataType, help='select datatype' + ) + parser.add_argument( + '--ast', default='p2_0', type=partial(enum_parse, c=SteeringCateg), + choices=SteeringCateg, + help='use asimov/fake dataset with specific steering' + ) diff --git a/golemflavor/llh.py b/golemflavor/llh.py new file mode 100644 index 0000000..5a0eea7 --- /dev/null +++ b/golemflavor/llh.py @@ -0,0 +1,111 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : April 04, 2018 + +""" +Likelihood functions for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +from copy import deepcopy +from functools import partial + +import numpy as np +import scipy +from scipy.stats import multivariate_normal, truncnorm + +from utils import fr as fr_utils +from utils import gf as gf_utils +from utils.enums import Likelihood, ParamTag, PriorsCateg, StatCateg +from utils.misc import enum_parse, gen_identifier, parse_bool + + +def GaussianBoundedRV(loc=0., sigma=1., lower=-np.inf, upper=np.inf): + """Normalised gaussian bounded between lower and upper values""" + low, up = (lower - loc) / sigma, (upper - loc) / sigma + g = scipy.stats.truncnorm(loc=loc, scale=sigma, a=low, b=up) + return g + + +def multi_gaussian(fr, fr_bf, sigma, offset=-320): + """Multivariate gaussian likelihood.""" + cov_fr = np.identity(3) * sigma + return np.log(multivariate_normal.pdf(fr, mean=fr_bf, cov=cov_fr)) + offset + + +def llh_argparse(parser): + parser.add_argument( + '--stat-method', default='bayesian', + type=partial(enum_parse, c=StatCateg), choices=StatCateg, + help='Statistical method to employ' + ) + + +def lnprior(theta, paramset): + """Priors on theta.""" + if len(theta) != len(paramset): + raise AssertionError( + 'Length of MCMC scan is not the same as the input ' + 'params\ntheta={0}\nparamset={1}'.format(theta, paramset) + ) + for idx, param in enumerate(paramset): + param.value = theta[idx] + ranges = paramset.ranges + for value, range in zip(theta, ranges): + if range[0] <= value <= range[1]: + pass + else: return -np.inf + + prior = 0 + for param in paramset: + if param.prior is PriorsCateg.GAUSSIAN: + prior += GaussianBoundedRV( + loc=param.nominal_value, sigma=param.std + ).logpdf(param.value) + elif param.prior is PriorsCateg.LIMITEDGAUSS: + prior += GaussianBoundedRV( + loc=param.nominal_value, sigma=param.std, + lower=param.ranges[0], upper=param.ranges[1] + ).logpdf(param.value) + return prior + + +def triangle_llh(theta, args, asimov_paramset, llh_paramset): + """Log likelihood function for a given theta.""" + if len(theta) != len(llh_paramset): + raise AssertionError( + 'Length of MCMC scan is not the same as the input ' + 'params\ntheta={0}\nparamset]{1}'.format(theta, llh_paramset) + ) + hypo_paramset = asimov_paramset + for param in llh_paramset.from_tag(ParamTag.NUISANCE): + hypo_paramset[param.name].value = param.value + + spectral_index = -hypo_paramset['astroDeltaGamma'].value + # Assigning llh_paramset values from theta happens in this function. + fr = fr_utils.flux_averaged_BSMu(theta, args, spectral_index, llh_paramset) + + flavour_angles = fr_utils.fr_to_angles(fr) + # print 'flavour_angles', map(float, flavour_angles) + for idx, param in enumerate(hypo_paramset.from_tag(ParamTag.BESTFIT)): + param.value = flavour_angles[idx] + + if args.likelihood is Likelihood.GOLEMFIT: + llh = gf_utils.get_llh(hypo_paramset) + elif args.likelihood is Likelihood.GF_FREQ: + llh = gf_utils.get_llh_freq(hypo_paramset) + return llh + + +def ln_prob(theta, args, asimov_paramset, llh_paramset): + dc_asimov_paramset = deepcopy(asimov_paramset) + dc_llh_paramset = deepcopy(llh_paramset) + lp = lnprior(theta, paramset=dc_llh_paramset) + if not np.isfinite(lp): + return -np.inf + return lp + triangle_llh( + theta, args=args, asimov_paramset=dc_asimov_paramset, + llh_paramset=dc_llh_paramset + ) diff --git a/golemflavor/mcmc.py b/golemflavor/mcmc.py new file mode 100644 index 0000000..49e5022 --- /dev/null +++ b/golemflavor/mcmc.py @@ -0,0 +1,120 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 17, 2018 + +""" +Useful functions to use an MCMC for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +from functools import partial + +import emcee +import tqdm + +import numpy as np + +from utils.enums import MCMCSeedType +from utils.misc import enum_parse, make_dir, parse_bool + + +def mcmc(p0, ln_prob, ndim, nwalkers, burnin, nsteps, args, threads=1): + """Run the MCMC.""" + sampler = emcee.EnsembleSampler( + nwalkers, ndim, ln_prob, threads=threads + ) + + print "Running burn-in" + for result in tqdm.tqdm(sampler.sample(p0, iterations=burnin), total=burnin): + pos, prob, state = result + sampler.reset() + print "Finished burn-in" + args.burnin = False + + print "Running" + for _ in tqdm.tqdm(sampler.sample(pos, iterations=nsteps), total=nsteps): + pass + print "Finished" + + samples = sampler.chain.reshape((-1, ndim)) + print 'acceptance fraction', sampler.acceptance_fraction + print 'sum of acceptance fraction', np.sum(sampler.acceptance_fraction) + print 'np.unique(samples[:,0]).shape', np.unique(samples[:,0]).shape + try: + print 'autocorrelation', sampler.acor + except: + print 'WARNING : NEED TO RUN MORE SAMPLES' + + return samples + + +def mcmc_argparse(parser): + parser.add_argument( + '--run-mcmc', type=parse_bool, default='True', + help='Run the MCMC' + ) + parser.add_argument( + '--burnin', type=int, default=100, + help='Amount to burnin' + ) + parser.add_argument( + '--nwalkers', type=int, default=60, + help='Number of walkers' + ) + parser.add_argument( + '--nsteps', type=int, default=2000, + help='Number of steps to run' + ) + parser.add_argument( + '--mcmc-seed-type', default='uniform', + type=partial(enum_parse, c=MCMCSeedType), choices=MCMCSeedType, + help='Type of distrbution to make the initial MCMC seed' + ) + parser.add_argument( + '--plot-angles', type=parse_bool, default='False', + help='Plot MCMC triangle in the angles space' + ) + parser.add_argument( + '--plot-elements', type=parse_bool, default='False', + help='Plot MCMC triangle in the mixing elements space' + ) + + +def flat_seed(paramset, nwalkers): + """Get gaussian seed values for the MCMC.""" + ndim = len(paramset) + low = np.array(paramset.seeds).T[0] + high = np.array(paramset.seeds).T[1] + p0 = np.random.uniform( + low=low, high=high, size=[nwalkers, ndim] + ) + return p0 + + +def gaussian_seed(paramset, nwalkers): + """Get gaussian seed values for the MCMC.""" + ndim = len(paramset) + p0 = np.random.normal( + paramset.values, paramset.stds, size=[nwalkers, ndim] + ) + return p0 + + +def save_chains(chains, outfile): + """Save the chains. + + Parameters + ---------- + chains : numpy ndarray + MCMC chains to save + + outfile : str + Output file location of chains + + """ + make_dir(outfile) + print 'Saving chains to location {0}'.format(outfile+'.npy') + np.save(outfile+'.npy', chains) + diff --git a/golemflavor/misc.py b/golemflavor/misc.py new file mode 100644 index 0000000..630aaf6 --- /dev/null +++ b/golemflavor/misc.py @@ -0,0 +1,226 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 17, 2018 + +""" +Misc functions for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +import os +import errno +import multiprocessing +from fractions import gcd + +import argparse +from operator import attrgetter + +import numpy as np + +from utils.enums import str_enum +from utils.enums import DataType, Likelihood, Texture + + +class SortingHelpFormatter(argparse.HelpFormatter): + """Sort argparse help options alphabetically.""" + def add_arguments(self, actions): + actions = sorted(actions, key=attrgetter('option_strings')) + super(SortingHelpFormatter, self).add_arguments(actions) + + +def solve_ratio(fr): + denominator = reduce(gcd, fr) + f = [int(x/denominator) for x in fr] + allow = (1, 2, 0) + if f[0] not in allow or f[1] not in allow or f[2] not in allow: + return '{0:.2f}_{1:.2f}_{2:.2f}'.format(fr[0], fr[1], fr[2]) + else: + return '{0}_{1}_{2}'.format(f[0], f[1], f[2]) + + +def gen_identifier(args): + f = '_DIM{0}'.format(args.dimension) + f += '_sfr_' + solve_ratio(args.source_ratio) + if args.data in [DataType.ASIMOV, DataType.REALISATION]: + f += '_mfr_' + solve_ratio(args.injected_ratio) + if args.texture is not Texture.NONE: + f += '_{0}'.format(str_enum(args.texture)) + return f + + +def gen_outfile_name(args): + """Generate a name for the output file based on the input args. + + Parameters + ---------- + args : argparse + argparse object to print + + """ + return args.outfile + gen_identifier(args) + + +def parse_bool(s): + """Parse a string to a boolean. + + Parameters + ---------- + s : str + String to parse + + Returns + ---------- + bool + + Examples + ---------- + >>> from misc import parse_bool + >>> print parse_bool('true') + True + + """ + if s.lower() == 'true': + return True + elif s.lower() == 'false': + return False + else: + raise ValueError + + +def parse_enum(e): + return '{0}'.format(e).split('.')[1].lower() + + +def print_args(args): + """Print the input arguments. + + Parameters + ---------- + args : argparse + argparse object to print + + """ + arg_vars = vars(args) + for key in sorted(arg_vars): + print '== {0:<25} = {1}'.format(key, arg_vars[key]) + + +def enum_parse(s, c): + return c[s.upper()] + + +def make_dir(outfile): + try: + os.makedirs(outfile[:-len(os.path.basename(outfile))]) + except OSError as exc: # Python >2.5 + if exc.errno == errno.EEXIST and os.path.isdir(outfile[:-len(os.path.basename(outfile))]): + pass + else: + raise + + +def remove_option(parser, arg): + for action in parser._actions: + if (vars(action)['option_strings'] + and vars(action)['option_strings'][0] == arg) \ + or vars(action)['dest'] == arg: + parser._remove_action(action) + + for action in parser._action_groups: + vars_action = vars(action) + var_group_actions = vars_action['_group_actions'] + for x in var_group_actions: + if x.dest == arg: + var_group_actions.remove(x) + return + + +def seed_parse(s): + if s.lower() == 'none': + return None + else: + return int(s) + + +def thread_type(t): + if t.lower() == 'max': + return multiprocessing.cpu_count() + else: + return int(t) + + +def thread_factors(t): + for x in reversed(range(int(np.ceil(np.sqrt(t)))+1)): + if t%x == 0: + return (x, int(t/x)) + + +def centers(x): + return (x[:-1]+x[1:])*0.5 + + +def get_units(dimension): + if dimension == 3: return r' / \:{\rm GeV}' + if dimension == 4: return r'' + if dimension == 5: return r' / \:{\rm GeV}^{-1}' + if dimension == 6: return r' / \:{\rm GeV}^{-2}' + if dimension == 7: return r' / \:{\rm GeV}^{-3}' + if dimension == 8: return r' / \:{\rm GeV}^{-4}' + + +def calc_nbins(x): + n = (np.max(x) - np.min(x)) / (2 * len(x)**(-1./3) * (np.percentile(x, 75) - np.percentile(x, 25))) + return np.floor(n) + + +def calc_bins(x): + nbins = calc_nbins(x) + return np.linspace(np.min(x), np.max(x)+2, num=nbins+1) + + +def most_likely(arr): + """Return the densest region given a 1D array of data.""" + binning = calc_bins(arr) + harr = np.histogram(arr, binning)[0] + return centers(binning)[np.argmax(harr)] + + +def interval(arr, percentile=68.): + """Returns the *percentile* shortest interval around the mode.""" + center = most_likely(arr) + sarr = sorted(arr) + delta = np.abs(sarr - center) + curr_low = np.argmin(delta) + curr_up = curr_low + npoints = len(sarr) + while curr_up - curr_low < percentile/100.*npoints: + if curr_low == 0: + curr_up += 1 + elif curr_up == npoints-1: + curr_low -= 1 + elif sarr[curr_up]-sarr[curr_low-1] < sarr[curr_up+1]-sarr[curr_low]: + curr_low -= 1 + elif sarr[curr_up]-sarr[curr_low-1] > sarr[curr_up+1]-sarr[curr_low]: + curr_up += 1 + elif (curr_up - curr_low) % 2: + # they are equal so step half of the time up and down + curr_low -= 1 + else: + curr_up += 1 + return sarr[curr_low], center, sarr[curr_up] + + +def flat_angles_to_u(x): + """Convert from angles to mixing elements.""" + return abs(angles_to_u(x)).astype(np.float32).flatten().tolist() + + +def myround(x, base=2, up=False, down=False): + if up == down and up is True: assert 0 + if up: return int(base * np.round(float(x)/base-0.5)) + elif down: return int(base * np.round(float(x)/base+0.5)) + else: int(base * np.round(float(x)/base)) + + diff --git a/golemflavor/mn.py b/golemflavor/mn.py new file mode 100644 index 0000000..e3a4a09 --- /dev/null +++ b/golemflavor/mn.py @@ -0,0 +1,106 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : April 19, 2018 + +""" +Useful functions to use MultiNest for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +from functools import partial + +import numpy as np + +from pymultinest import analyse, run + +from utils import llh as llh_utils +from utils.misc import gen_identifier, make_dir, parse_bool, solve_ratio + + +def CubePrior(cube, ndim, n_params): + pass + + +def lnProb(cube, ndim, n_params, mn_paramset, llh_paramset, asimov_paramset, + args): + if ndim != len(mn_paramset): + raise AssertionError( + 'Length of MultiNest scan paramset is not the same as the input ' + 'params\ncube={0}\nmn_paramset]{1}'.format(cube, mn_paramset) + ) + pranges = mn_paramset.ranges + for i in xrange(ndim): + mn_paramset[i].value = (pranges[i][1]-pranges[i][0])*cube[i] + pranges[i][0] + for pm in mn_paramset.names: + llh_paramset[pm].value = mn_paramset[pm].value + theta = llh_paramset.values + llh = llh_utils.ln_prob( + theta=theta, + args=args, + asimov_paramset=asimov_paramset, + llh_paramset=llh_paramset, + ) + return llh + + +def mn_argparse(parser): + parser.add_argument( + '--mn-live-points', type=int, default=3000, + help='Number of live points for MultiNest evaluations' + ) + parser.add_argument( + '--mn-tolerance', type=float, default=0.01, + help='Tolerance for MultiNest' + ) + parser.add_argument( + '--mn-efficiency', type=float, default=0.3, + help='Sampling efficiency for MultiNest' + ) + parser.add_argument( + '--mn-output', type=str, default='./mnrun/', + help='Folder to store MultiNest evaluations' + ) + parser.add_argument( + '--run-mn', type=parse_bool, default='True', + help='Run MultiNest' + ) + + +def mn_evidence(mn_paramset, llh_paramset, asimov_paramset, args, prefix='mn'): + """Run the MultiNest algorithm to calculate the evidence.""" + n_params = len(mn_paramset) + + for n in mn_paramset.names: + llh_paramset[n].value = mn_paramset[n].value + + lnProbEval = partial( + lnProb, + mn_paramset = mn_paramset, + llh_paramset = llh_paramset, + asimov_paramset = asimov_paramset, + args = args, + ) + + if args.run_mn: + make_dir(prefix) + print 'Running evidence calculation for {0}'.format(prefix) + run( + LogLikelihood = lnProbEval, + Prior = CubePrior, + n_dims = n_params, + n_live_points = args.mn_live_points, + evidence_tolerance = args.mn_tolerance, + sampling_efficiency = args.mn_efficiency, + outputfiles_basename = prefix, + importance_nested_sampling = True, + # resume = False, + resume = True, + verbose = True + ) + + analyser = analyse.Analyzer( + outputfiles_basename=prefix, n_params=n_params + ) + return analyser.get_stats()['global evidence'] diff --git a/golemflavor/param.py b/golemflavor/param.py new file mode 100644 index 0000000..2378758 --- /dev/null +++ b/golemflavor/param.py @@ -0,0 +1,213 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : April 19, 2018 + +""" +Param class and functions for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +import sys + +from collections import Sequence +from copy import deepcopy + +import numpy as np + +from utils.fr import fr_to_angles +from utils.enums import DataType, Likelihood, ParamTag, PriorsCateg + + +class Param(object): + """Parameter class to store parameters.""" + def __init__(self, name, value, ranges, prior=None, seed=None, std=None, + tex=None, tag=None): + self._prior = None + self._seed = None + self._ranges = None + self._tex = None + self._tag = None + + self.name = name + self.value = value + self.nominal_value = deepcopy(value) + self.prior = prior + self.ranges = ranges + self.seed = seed + self.std = std + self.tex = tex + self.tag = tag + + @property + def ranges(self): + return tuple(self._ranges) + + @ranges.setter + def ranges(self, values): + self._ranges = [val for val in values] + + @property + def prior(self): + return self._prior + + @prior.setter + def prior(self, value): + if value is None: + self._prior = PriorsCateg.UNIFORM + else: + assert value in PriorsCateg + self._prior = value + + @property + def seed(self): + if self._seed is None: return self.ranges + return tuple(self._seed) + + @seed.setter + def seed(self, values): + if values is None: return + self._seed = [val for val in values] + + @property + def tex(self): + return r'{0}'.format(self._tex) + + @tex.setter + def tex(self, t): + self._tex = t if t is not None else r'{\rm %s}' % self.name + + @property + def tag(self): + return self._tag + + @tag.setter + def tag(self, t): + if t is None: self._tag = ParamTag.NONE + else: + assert t in ParamTag + self._tag = t + + +class ParamSet(Sequence): + """Container class for a set of parameters.""" + def __init__(self, *args): + param_sequence = [] + for arg in args: + try: + param_sequence.extend(arg) + except TypeError: + param_sequence.append(arg) + + if len(param_sequence) != 0: + # Disallow duplicated params + all_names = [p.name for p in param_sequence] + unique_names = set(all_names) + if len(unique_names) != len(all_names): + duplicates = set([x for x in all_names if all_names.count(x) > 1]) + raise ValueError('Duplicate definitions found for param(s): ' + + ', '.join(str(e) for e in duplicates)) + + # Elements of list must be Param type + assert all([isinstance(x, Param) for x in param_sequence]), \ + 'All params must be of type "Param"' + + self._params = param_sequence + + def __len__(self): + return len(self._params) + + def __getitem__(self, i): + if isinstance(i, int): + return self._params[i] + elif isinstance(i, basestring): + return self._by_name[i] + + def __getattr__(self, attr): + return super(ParamSet, self).__getattribute__(attr) + + def __iter__(self): + return iter(self._params) + + def __str__(self): + o = '\n' + for obj in self._params: + o += '== {0:<15} = {1:<15}, tag={2:<15}\n'.format( + obj.name, obj.value, obj.tag + ) + return o + + @property + def _by_name(self): + return {obj.name: obj for obj in self._params} + + @property + def names(self): + return tuple([obj.name for obj in self._params]) + + @property + def labels(self): + return tuple([obj.tex for obj in self._params]) + + @property + def values(self): + return tuple([obj.value for obj in self._params]) + + @property + def nominal_values(self): + return tuple([obj.nominal_value for obj in self._params]) + + @property + def seeds(self): + return tuple([obj.seed for obj in self._params]) + + @property + def ranges(self): + return tuple([obj.ranges for obj in self._params]) + + @property + def stds(self): + return tuple([obj.std for obj in self._params]) + + @property + def tags(self): + return tuple([obj.tag for obj in self._params]) + + @property + def params(self): + return self._params + + def to_dict(self): + return {obj.name: obj.value for obj in self._params} + + def from_tag(self, tag, values=False, index=False, invert=False): + if values and index: assert 0 + tag = np.atleast_1d(tag) + if not invert: + ps = [(idx, obj) for idx, obj in enumerate(self._params) + if obj.tag in tag] + else: + ps = [(idx, obj) for idx, obj in enumerate(self._params) + if obj.tag not in tag] + if values: + return tuple([io[1].value for io in ps]) + elif index: + return tuple([io[0] for io in ps]) + else: + return ParamSet([io[1] for io in ps]) + + def remove_params(self, params): + rm_paramset = [] + for parm in self.params: + if parm.name not in params.names: + rm_paramset.append(parm) + return ParamSet(rm_paramset) + + def extend(self, p): + param_sequence = self.params + if isinstance(p, Param): + param_sequence.append(p) + elif isinstance(p, ParamSet): + param_sequence.extend(p.params) + return ParamSet(param_sequence) diff --git a/golemflavor/plot.py b/golemflavor/plot.py new file mode 100644 index 0000000..d19a52e --- /dev/null +++ b/golemflavor/plot.py @@ -0,0 +1,1030 @@ +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : March 19, 2018 + +""" +Plotting functions for the BSM flavour ratio analysis +""" + +from __future__ import absolute_import, division + +import os +import socket +from copy import deepcopy + +import numpy as np +import numpy.ma as ma +from scipy.interpolate import splev, splprep +from scipy.ndimage.filters import gaussian_filter + +import matplotlib as mpl +import matplotlib.patches as patches +import matplotlib.gridspec as gridspec +mpl.use('Agg') + +from matplotlib import rc +from matplotlib import pyplot as plt +from matplotlib.offsetbox import AnchoredText +from matplotlib.lines import Line2D +from matplotlib.patches import Patch +from matplotlib.patches import Arrow + +tRed = list(np.array([226,101,95]) / 255.) +tBlue = list(np.array([96,149,201]) / 255.) +tGreen = list(np.array([170,196,109]) / 255.) + +import getdist +from getdist import plots, mcsamples + +import ternary +from ternary.heatmapping import polygon_generator + +import shapely.geometry as geometry + +from shapely.ops import cascaded_union, polygonize +from scipy.spatial import Delaunay + +from utils.enums import DataType, str_enum +from utils.enums import Likelihood, ParamTag, StatCateg, Texture +from utils.misc import get_units, make_dir, solve_ratio, interval +from utils.fr import angles_to_u, angles_to_fr, SCALE_BOUNDARIES + + +BAYES_K = 1. # Strong degree of belief. +# BAYES_K = 3/2. # Very strong degree of belief. +# BAYES_K = 2. # Decisive degree of belief + + +LV_ATMO_90PC_LIMITS = { + 3: (2E-24, 1E-1), + 4: (2.7E-28, 3.16E-25), + 5: (1.5E-32, 1.12E-27), + 6: (9.1E-37, 2.82E-30), + 7: (3.6E-41, 1.77E-32), + 8: (1.4E-45, 1.00E-34) +} + + +PS = 8.203E-20 # GeV^{-1} +PLANCK_SCALE = { + 5: PS, + 6: PS**2, + 7: PS**3, + 8: PS**4 +} + + +if os.path.isfile('./plot_llh/paper.mplstyle'): + plt.style.use('./plot_llh/paper.mplstyle') +elif os.environ.get('GOLEMSOURCEPATH') is not None: + plt.style.use(os.environ['GOLEMSOURCEPATH']+'/GolemFit/scripts/paper/paper.mplstyle') +if 'submitter' in socket.gethostname(): + rc('text', usetex=False) + +mpl.rcParams['text.latex.preamble'] = [ + r'\usepackage{xcolor}', + r'\usepackage{amsmath}', + r'\usepackage{amssymb}'] +mpl.rcParams['text.latex.unicode'] = True + + +def gen_figtext(args): + """Generate the figure text.""" + t = r'$' + if args.data is DataType.REAL: + t += r'\textbf{IceCube\:Preliminary}' + '$\n$' + elif args.data in [DataType.ASIMOV, DataType.REALISATION]: + t += r'{\rm\bf IceCube\:Simulation}' + '$\n$' + t += r'\rm{Injected\:composition}'+r'\:=\:({0})_\oplus'.format( + solve_ratio(args.injected_ratio).replace('_', ':') + ) + '$\n$' + t += r'{\rm Source\:composition}'+r'\:=\:({0})'.format( + solve_ratio(args.source_ratio).replace('_', ':') + ) + r'_\text{S}' + t += '$\n$' + r'{\rm Dimension}'+r' = {0}$'.format(args.dimension) + return t + + +def texture_label(x, dim): + cpt = r'c' if dim % 2 == 0 else r'a' + if x == Texture.OEU: + # return r'$\mathcal{O}_{e\mu}$' + return r'$\mathring{'+cpt+r'}_{e\mu}^{('+str(int(dim))+r')}$' + elif x == Texture.OET: + # return r'$\mathcal{O}_{e\tau}$' + return r'$\mathring{'+cpt+r'}_{\tau e}^{('+str(int(dim))+r')}$' + elif x == Texture.OUT: + # return r'$\mathcal{O}_{\mu\tau}$' + return r'$\mathring{'+cpt+r'}_{\mu\tau}^{('+str(int(dim))+r')}$' + else: + raise AssertionError + + +def cmap_discretize(cmap, N): + colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.))) + colors_rgba = cmap(colors_i) + indices = np.linspace(0, 1., N+1) + cdict = {} + for ki,key in enumerate(('red','green','blue')): + cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ] + # Return colormap object. + return mpl.colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024) + + +def get_limit(scales, statistic, args, mask_initial=False, return_interp=False): + max_st = np.max(statistic) + print 'scales, stat', zip(scales, statistic) + if args.stat_method is StatCateg.BAYESIAN: + if (statistic[0] - max_st) > np.log(10**(BAYES_K)): + raise AssertionError('Discovered LV!') + + try: + tck, u = splprep([scales, statistic], s=0) + except: + print 'Failed to spline' + # return None + raise + sc, st = splev(np.linspace(0, 1, 1000), tck) + + if mask_initial: + scales_rm = sc[sc >= scales[1]] + statistic_rm = st[sc >= scales[1]] + else: + scales_rm = sc + statistic_rm = st + + min_idx = np.argmin(scales) + null = statistic[min_idx] + # if np.abs(statistic_rm[0] - null) > 0.8: + # print 'Warning, null incompatible with smallest scanned scale! For ' \ + # 'DIM {0} [{1}, {2}, {3}]!'.format( + # args.dimension, *args.source_ratio + # ) + # null = statistic_rm[0] + if args.stat_method is StatCateg.BAYESIAN: + reduced_ev = -(statistic_rm - null) + print '[reduced_ev > np.log(10**(BAYES_K))]', np.sum([reduced_ev > np.log(10**(BAYES_K))]) + al = scales_rm[reduced_ev > np.log(10**(BAYES_K))] + else: + assert 0 + if len(al) == 0: + print 'No points for DIM {0} [{1}, {2}, {3}]!'.format( + args.dimension, *args.source_ratio + ) + return None + re = -(statistic-null)[scales > al[0]] + if np.sum(re < np.log(10**(BAYES_K)) - 0.1) >= 2: + print 'Warning, peaked contour does not exclude large scales! For ' \ + 'DIM {0} [{1}, {2}, {3}]!'.format( + args.dimension, *args.source_ratio + ) + return None + if np.sum(re >= np.log(10**(BAYES_K)) + 0.0) < 2: + print 'Warning, only single point above threshold! For ' \ + 'DIM {0} [{1}, {2}, {3}]!'.format( + args.dimension, *args.source_ratio + ) + return None + + if return_interp: + return (scales_rm, reduced_ev) + + # Divide by 2 to convert to standard SME coefficient + lim = al[0] - np.log10(2.) + # lim = al[0] + print 'limit = {0}'.format(lim) + return lim + + +def heatmap(data, scale, vmin=None, vmax=None, style='triangular'): + for k, v in data.items(): + data[k] = np.array(v) + style = style.lower()[0] + if style not in ["t", "h", 'd']: + raise ValueError("Heatmap style must be 'triangular', 'dual-triangular', or 'hexagonal'") + + vertices_values = polygon_generator(data, scale, style) + + vertices = [] + for v, value in vertices_values: + vertices.append(v) + return vertices + + +def get_tax(ax, scale, ax_labels, rot_ax_labels=False, fontsize=23): + ax.set_aspect('equal') + + # Boundary and Gridlines + fig, tax = ternary.figure(ax=ax, scale=scale) + + # Draw Boundary and Gridlines + tax.boundary(linewidth=2.0) + tax.gridlines(color='grey', multiple=scale/5., linewidth=0.5, alpha=0.7, ls='--') + # tax.gridlines(color='grey', multiple=scale/10., linewidth=0.2, alpha=1, ls=':') + + # Set Axis labels and Title + if rot_ax_labels: roty, rotz = (-60, 60) + else: roty, rotz = (0, 0) + tax.bottom_axis_label( + ax_labels[0], fontsize=fontsize+4, + position=(0.55, -0.10/2, 0.5), rotation=0 + ) + tax.right_axis_label( + ax_labels[1], fontsize=fontsize+4, + position=(2./5+0.1, 3./5+0.06, 0), rotation=roty + ) + tax.left_axis_label( + ax_labels[2], fontsize=fontsize+4, + position=(-0.15, 3./5+0.1, 2./5), rotation=rotz + ) + + # Remove default Matplotlib axis + tax.get_axes().axis('off') + tax.clear_matplotlib_ticks() + + # Set ticks + ticks = np.linspace(0, 1, 6) + tax.ticks(ticks=ticks, locations=ticks*scale, axis='lr', linewidth=1, + offset=0.03, fontsize=fontsize, tick_formats='%.1f') + tax.ticks(ticks=ticks, locations=ticks*scale, axis='b', linewidth=1, + offset=0.02, fontsize=fontsize, tick_formats='%.1f') + # tax.ticks() + + tax._redraw_labels() + + return tax + + +def project(p): + """Convert from flavour to cartesian.""" + a, b, c = p + x = a + b/2. + y = b * np.sqrt(3)/2. + return [x, y] + + +def project_toflavour(p, nbins): + """Convert from cartesian to flavour space.""" + x, y = p + b = y / (np.sqrt(3)/2.) + a = x - b/2. + return [a, b, nbins-a-b] + + +def tax_fill(ax, points, nbins, **kwargs): + pol = np.array(map(project, points)) + ax.fill(pol.T[0]*nbins, pol.T[1]*nbins, **kwargs) + + +def alpha_shape(points, alpha): + """ + Compute the alpha shape (concave hull) of a set + of points. + @param points: Iterable container of points. + @param alpha: alpha value to influence the + gooeyness of the border. Smaller numbers + don't fall inward as much as larger numbers. + Too large, and you lose everything! + """ + if len(points) < 4: + # When you have a triangle, there is no sense + # in computing an alpha shape. + return geometry.MultiPoint(list(points)).convex_hull + def add_edge(edges, edge_points, coords, i, j): + """ + Add a line between the i-th and j-th points, + if not in the list already + """ + if (i, j) in edges or (j, i) in edges: + # already added + return + edges.add( (i, j) ) + edge_points.append(coords[ [i, j] ]) + coords = np.array([point.coords[0] + for point in points]) + tri = Delaunay(coords) + edges = set() + edge_points = [] + # loop over triangles: + # ia, ib, ic = indices of corner points of the + # triangle + for ia, ib, ic in tri.vertices: + pa = coords[ia] + pb = coords[ib] + pc = coords[ic] + # Lengths of sides of triangle + a = np.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2) + b = np.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2) + c = np.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2) + # Semiperimeter of triangle + s = (a + b + c)/2.0 + # Area of triangle by Heron's formula + area = np.sqrt(s*(s-a)*(s-b)*(s-c)) + circum_r = a*b*c/(4.0*area) + # Here's the radius filter. + #print circum_r + if circum_r < 1.0/alpha: + add_edge(edges, edge_points, coords, ia, ib) + add_edge(edges, edge_points, coords, ib, ic) + add_edge(edges, edge_points, coords, ic, ia) + m = geometry.MultiLineString(edge_points) + triangles = list(polygonize(m)) + return cascaded_union(triangles), edge_points + + +def flavour_contour(frs, nbins, coverage, ax=None, smoothing=0.4, + hist_smooth=0.05, plot=True, fill=False, oversample=1., + delaunay=False, d_alpha=1.5, d_gauss=0.08, debug=False, + **kwargs): + """Plot the flavour contour for a specified coverage.""" + # Histogram in flavour space + os_nbins = nbins * oversample + H, b = np.histogramdd( + (frs[:,0], frs[:,1], frs[:,2]), + bins=(os_nbins+1, os_nbins+1, os_nbins+1), + range=((0, 1), (0, 1), (0, 1)) + ) + H = H / np.sum(H) + + # 3D smoothing + H_s = gaussian_filter(H, sigma=hist_smooth) + + # Finding coverage + H_r = np.ravel(H_s) + H_rs = np.argsort(H_r)[::-1] + H_crs = np.cumsum(H_r[H_rs]) + thres = np.searchsorted(H_crs, coverage/100.) + mask_r = np.zeros(H_r.shape) + mask_r[H_rs[:thres]] = 1 + mask = mask_r.reshape(H_s.shape) + + # Get vertices inside covered region + binx = np.linspace(0, 1, os_nbins+1) + interp_dict = {} + for i in xrange(len(binx)): + for j in xrange(len(binx)): + for k in xrange(len(binx)): + if mask[i][j][k] == 1: + interp_dict[(i, j, k)] = H_s[i, j, k] + vertices = np.array(heatmap(interp_dict, os_nbins)) + points = vertices.reshape((len(vertices)*3, 2)) + if debug: + ax.scatter(*(points/float(oversample)).T, marker='o', s=3, alpha=1.0, color=kwargs['color'], zorder=9) + + pc = geometry.MultiPoint(points) + if not delaunay: + # Convex full to find points forming exterior bound + polygon = pc.convex_hull + ex_cor = np.array(list(polygon.exterior.coords)) + else: + # Delaunay + concave_hull, edge_points = alpha_shape(pc, alpha=d_alpha) + polygon = geometry.Polygon(concave_hull.buffer(1)) + if d_gauss == 0.: + ex_cor = np.array(list(polygon.exterior.coords)) + else: + ex_cor = gaussian_filter( + np.array(list(polygon.exterior.coords)), sigma=d_gauss + ) + + # Join points with a spline + tck, u = splprep([ex_cor.T[0], ex_cor.T[1]], s=0., per=1, k=1) + xi, yi = map(np.array, splev(np.linspace(0, 1, 300), tck)) + + # Spline again to smooth + if smoothing != 0: + tck, u = splprep([xi, yi], s=smoothing, per=1, k=3) + xi, yi = map(np.array, splev(np.linspace(0, 1, 600), tck)) + + xi /= float(oversample) + yi /= float(oversample) + ev_polygon = np.dstack((xi, yi))[0] + + # Remove points interpolated outside flavour triangle + f_ev_polygon = np.array(map(lambda x: project_toflavour(x, nbins), ev_polygon)) + + xf, yf, zf = f_ev_polygon.T + mask = np.array((xf < 0) | (yf < 0) | (zf < 0) | (xf > nbins) | + (yf > nbins) | (zf > nbins)) + ev_polygon = np.dstack((xi[~mask], yi[~mask]))[0] + + # Plot + if plot: + if fill: + ax.fill( + ev_polygon.T[0], ev_polygon.T[1], + label=r'{0}\%'.format(int(coverage)), **kwargs + ) + else: + ax.plot( + ev_polygon.T[0], ev_polygon.T[1], + label=r'{0}\%'.format(int(coverage)), **kwargs + ) + else: + return ev_polygon + +def plot_Tchain(Tchain, axes_labels, ranges): + """Plot the Tchain using getdist.""" + Tsample = mcsamples.MCSamples( + samples=Tchain, labels=axes_labels, ranges=ranges + ) + + Tsample.updateSettings({'contours': [0.90, 0.99]}) + Tsample.num_bins_2D=10 + Tsample.fine_bins_2D=50 + Tsample.smooth_scale_2D=0.05 + + g = plots.getSubplotPlotter() + g.settings.num_plot_contours = 2 + g.settings.axes_fontsize = 10 + g.settings.figure_legend_frame = False + g.settings.lab_fontsize = 20 + g.triangle_plot( + [Tsample], filled=True,# contour_colors=['green', 'lightgreen'] + ) + return g + + +def chainer_plot(infile, outfile, outformat, args, llh_paramset, fig_text=None, + labels=None, ranges=None): + """Make the triangle plot.""" + if hasattr(args, 'plot_elements'): + if not args.plot_angles and not args.plot_elements: + return + elif not args.plot_angles: + return + + if not isinstance(infile, np.ndarray): + raw = np.load(infile) + else: + raw = infile + print 'raw.shape', raw.shape + print 'raw', raw + + make_dir(outfile), make_dir + if fig_text is None: + fig_text = gen_figtext(args) + + if labels is None: axes_labels = llh_paramset.labels + else: axes_labels = labels + if ranges is None: ranges = llh_paramset.ranges + + if args.plot_angles: + print "Making triangle plots" + Tchain = raw + g = plot_Tchain(Tchain, axes_labels, ranges) + + mpl.pyplot.figtext(0.6, 0.7, fig_text, fontsize=20) + + # for i_ax_1, ax_1 in enumerate(g.subplots): + # for i_ax_2, ax_2 in enumerate(ax_1): + # if i_ax_1 == i_ax_2: + # itv = interval(Tchain[:,i_ax_1], percentile=90.) + # for l in itv: + # ax_2.axvline(l, color='gray', ls='--') + # ax_2.set_title(r'${0:.2f}_{{{1:.2f}}}^{{+{2:.2f}}}$'.format( + # itv[1], itv[0]-itv[1], itv[2]-itv[1] + # ), fontsize=10) + + # if not args.fix_mixing: + # sc_index = llh_paramset.from_tag(ParamTag.SCALE, index=True) + # itv = interval(Tchain[:,sc_index], percentile=90.) + # mpl.pyplot.figtext( + # 0.5, 0.3, 'Scale 90% Interval = [1E{0}, 1E{1}], Center = ' + # '1E{2}'.format(itv[0], itv[2], itv[1]) + # ) + + for of in outformat: + print 'Saving', outfile+'_angles.'+of + g.export(outfile+'_angles.'+of) + + if not hasattr(args, 'plot_elements'): + return + + if args.plot_elements: + print "Making triangle plots" + if args.fix_mixing_almost: + raise NotImplementedError + nu_index = llh_paramset.from_tag(ParamTag.NUISANCE, index=True) + fr_index = llh_paramset.from_tag(ParamTag.MMANGLES, index=True) + sc_index = llh_paramset.from_tag(ParamTag.SCALE, index=True) + if not args.fix_source_ratio: + sr_index = llh_paramset.from_tag(ParamTag.SRCANGLES, index=True) + + nu_elements = raw[:,nu_index] + fr_elements = np.array(map(flat_angles_to_u, raw[:,fr_index])) + sc_elements = raw[:,sc_index] + if not args.fix_source_ratio: + sr_elements = np.array(map(angles_to_fr, raw[:,sr_index])) + if args.fix_source_ratio: + Tchain = np.column_stack( + [nu_elements, fr_elements, sc_elements] + ) + else: + Tchain = np.column_stack( + [nu_elements, fr_elements, sc_elements, sr_elements] + ) + + trns_ranges = np.array(ranges)[nu_index,].tolist() + trns_axes_labels = np.array(axes_labels)[nu_index,].tolist() + if args.fix_mixing is not MixingScenario.NONE: + trns_axes_labels += \ + [r'\mid \tilde{U}_{e1} \mid' , r'\mid \tilde{U}_{e2} \mid' , r'\mid \tilde{U}_{e3} \mid' , \ + r'\mid \tilde{U}_{\mu1} \mid' , r'\mid \tilde{U}_{\mu2} \mid' , r'\mid \tilde{U}_{\mu3} \mid' , \ + r'\mid \tilde{U}_{\tau1} \mid' , r'\mid \tilde{U}_{\tau2} \mid' , r'\mid \tilde{U}_{\tau3} \mid'] + trns_ranges += [(0, 1)] * 9 + if not args.fix_scale: + trns_axes_labels += [np.array(axes_labels)[sc_index].tolist()] + trns_ranges += [np.array(ranges)[sc_index].tolist()] + if not args.fix_source_ratio: + trns_axes_labels += [r'\phi_e', r'\phi_\mu', r'\phi_\tau'] + trns_ranges += [(0, 1)] * 3 + + g = plot_Tchain(Tchain, trns_axes_labels, trns_ranges) + + if args.data is DataType.REAL: + plt.text(0.8, 0.7, 'IceCube Preliminary', color='red', fontsize=15, + ha='center', va='center') + elif args.data in [DataType.ASIMOV, DataType.REALISATION]: + plt.text(0.8, 0.7, 'IceCube Simulation', color='red', fontsize=15, + ha='center', va='center') + + mpl.pyplot.figtext(0.5, 0.7, fig_text, fontsize=15) + for of in outformat: + print 'Saving', outfile+'_elements'+of + g.export(outfile+'_elements.'+of) + + +def plot_statistic(data, outfile, outformat, args, scale_param, label=None): + """Make MultiNest factor or LLH value plot.""" + print 'Making Statistic plot' + fig_text = gen_figtext(args) + if label is not None: fig_text += '\n' + label + + print 'data', data + print 'data.shape', data.shape + print 'outfile', outfile + try: + scales, statistic = ma.compress_rows(data).T + lim = get_limit(deepcopy(scales), deepcopy(statistic), args, mask_initial=True) + tck, u = splprep([scales, statistic], s=0) + except: + return + sc, st = splev(np.linspace(0, 1, 1000), tck) + scales_rm = sc[sc >= scales[1]] + statistic_rm = st[sc >= scales[1]] + + min_idx = np.argmin(scales) + null = statistic[min_idx] + # fig_text += '\nnull lnZ = {0:.2f}'.format(null) + + if args.stat_method is StatCateg.BAYESIAN: + reduced_ev = -(statistic_rm - null) + elif args.stat_method is StatCateg.FREQUENTIST: + reduced_ev = -2*(statistic_rm - null) + + fig = plt.figure(figsize=(7, 5)) + ax = fig.add_subplot(111) + + xlims = SCALE_BOUNDARIES[args.dimension] + ax.set_xlim(xlims) + ax.set_xlabel(r'${\rm log}_{10}\left[\Lambda^{-1}_{'+ \ + r'{0}'.format(args.dimension)+r'}'+ \ + get_units(args.dimension)+r'\right]$', fontsize=16) + + if args.stat_method is StatCateg.BAYESIAN: + ax.set_ylabel(r'$\text{Bayes\:Factor}\:\left[\text{ln}\left(B_{0/1}\right)\right]$') + elif args.stat_method is StatCateg.FREQUENTIST: + ax.set_ylabel(r'$-2\Delta {\rm LLH}$') + + # ymin = np.round(np.min(reduced_ev) - 1.5) + # ymax = np.round(np.max(reduced_ev) + 1.5) + # ax.set_ylim((ymin, ymax)) + + ax.scatter(scales[1:], -(statistic[1:]-null), color='r') + ax.plot(scales_rm, reduced_ev, color='k', linewidth=1, alpha=1, ls='-') + + ax.axhline(y=np.log(10**(BAYES_K)), color='red', alpha=1., linewidth=1.2, ls='--') + ax.axvline(x=lim, color='red', alpha=1., linewidth=1.2, ls='--') + + at = AnchoredText( + fig_text, prop=dict(size=10), frameon=True, loc=4 + ) + at.patch.set_boxstyle("round,pad=0.1,rounding_size=0.5") + ax.add_artist(at) + make_dir(outfile) + for of in outformat: + print 'Saving as {0}'.format(outfile+'.'+of) + fig.savefig(outfile+'.'+of, bbox_inches='tight', dpi=150) + + +def plot_table_sens(data, outfile, outformat, args, show_lvatmo=True): + print 'Making TABLE sensitivity plot' + argsc = deepcopy(args) + + dims = args.dimensions + srcs = args.source_ratios + if args.texture is Texture.NONE: + textures = [Texture.OET, Texture.OUT] + else: + textures = [args.texture] + + if len(srcs) > 3: + raise NotImplementedError + + xlims = (-60, -20) + ylims = (0.5, 1.5) + + colour = {2:'red', 1:'blue', 0:'green'} + rgb_co = {2:[1,0,0], 1:[0,0,1], 0:[0.0, 0.5019607843137255, 0.0]} + + fig = plt.figure(figsize=(8, 6)) + gs = gridspec.GridSpec(len(dims), 1) + gs.update(hspace=0.15) + + first_ax = None + legend_log = [] + legend_elements = [] + + for idim, dim in enumerate(dims): + print '|||| DIM = {0}'.format(dim) + argsc.dimension = dim + gs0 = gridspec.GridSpecFromSubplotSpec( + len(textures), 1, subplot_spec=gs[idim], hspace=0 + ) + + for itex, tex in enumerate(textures): + argsc.texture = tex + ylabel = texture_label(tex, dim) + # if angles == 2 and ian == 0: continue + ax = fig.add_subplot(gs0[itex]) + + if first_ax is None: + first_ax = ax + + ax.set_xlim(xlims) + ax.set_ylim(ylims) + ax.set_yticks([], minor=True) + ax.set_yticks([1.], minor=False) + ax.set_yticklabels([ylabel], fontsize=13) + ax.yaxis.tick_right() + for xmaj in ax.xaxis.get_majorticklocs(): + ax.axvline(x=xmaj, ls=':', color='gray', alpha=0.2, linewidth=1) + ax.get_xaxis().set_visible(False) + if itex == len(textures) - 2: + ax.spines['bottom'].set_alpha(0.6) + elif itex == len(textures) - 1: + ax.text( + -0.04, 1.0, '$d = {0}$'.format(dim), fontsize=16, + rotation='90', verticalalignment='center', + transform=ax.transAxes + ) + # dim_label_flag = False + ax.spines['top'].set_alpha(0.6) + ax.spines['bottom'].set_alpha(0.6) + + for isrc, src in enumerate(srcs): + print '== src', src + argsc.source_ratio = src + + if dim in PLANCK_SCALE.iterkeys(): + ps = np.log10(PLANCK_SCALE[dim]) + if ps < xlims[0]: + ax.annotate( + s='', xy=(xlims[0], 1), xytext=(xlims[0]+1, 1), + arrowprops={'arrowstyle': '->, head_length=0.2', + 'lw': 1, 'color':'purple'} + ) + elif ps > xlims[1]: + ax.annotate( + s='', xy=(xlims[1]-1, 1), xytext=(xlims[1], 1), + arrowprops={'arrowstyle': '<-, head_length=0.2', + 'lw': 1, 'color':'purple'} + ) + else: + ax.axvline(x=ps, color='purple', alpha=1., linewidth=1.5) + + try: + scales, statistic = ma.compress_rows(data[idim][isrc][itex]).T + except: continue + lim = get_limit(deepcopy(scales), deepcopy(statistic), argsc, mask_initial=True) + if lim is None: continue + + ax.axvline(x=lim, color=colour[isrc], alpha=1., linewidth=1.5) + ax.add_patch(patches.Rectangle( + (lim, ylims[0]), 100, np.diff(ylims), fill=True, + facecolor=colour[isrc], alpha=0.3, linewidth=0 + )) + + if isrc not in legend_log: + legend_log.append(isrc) + label = r'$\left('+r'{0}'.format(solve_ratio(src)).replace('_',':')+ \ + r'\right)_{\text{S}}\:\text{at\:source}$' + legend_elements.append( + Patch(facecolor=rgb_co[isrc]+[0.3], + edgecolor=rgb_co[isrc]+[1], label=label) + ) + + if itex == len(textures)-1 and show_lvatmo: + LV_lim = np.log10(LV_ATMO_90PC_LIMITS[dim]) + ax.add_patch(patches.Rectangle( + (LV_lim[1], ylims[0]), LV_lim[0]-LV_lim[1], np.diff(ylims), + fill=False, hatch='\\\\' + )) + + ax.get_xaxis().set_visible(True) + ax.set_xlabel(r'${\rm New\:Physics\:Scale}\:[\:{\rm log}_{10} (\Lambda^{-1}_{d}\:/\:{\rm GeV}^{-d+4})\: ]$', + labelpad=5, fontsize=19) + ax.tick_params(axis='x', labelsize=16) + + purple = [0.5019607843137255, 0.0, 0.5019607843137255] + if show_lvatmo: + legend_elements.append( + Patch(facecolor='none', hatch='\\\\', edgecolor='k', label='IceCube, Nature.Phy.14,961(2018)') + ) + legend_elements.append( + Patch(facecolor=purple+[0.7], edgecolor=purple+[1], label='Planck Scale Expectation') + ) + legend = first_ax.legend( + handles=legend_elements, prop=dict(size=11), loc='upper left', + title='Excluded regions', framealpha=1., edgecolor='black', + frameon=True + ) + first_ax.set_zorder(10) + plt.setp(legend.get_title(), fontsize='11') + legend.get_frame().set_linestyle('-') + + ybound = 0.595 + if args.data is DataType.REAL: + # fig.text(0.295, 0.684, 'IceCube Preliminary', color='red', fontsize=13, + fig.text(0.278, ybound, r'\bf IceCube Preliminary', color='red', fontsize=13, + ha='center', va='center', zorder=11) + elif args.data is DataType.REALISATION: + fig.text(0.278, ybound-0.05, r'\bf IceCube Simulation', color='red', fontsize=13, + ha='center', va='center', zorder=11) + else: + fig.text(0.278, ybound, r'\bf IceCube Simulation', color='red', fontsize=13, + ha='center', va='center', zorder=11) + + make_dir(outfile) + for of in outformat: + print 'Saving plot as {0}'.format(outfile+'.'+of) + fig.savefig(outfile+'.'+of, bbox_inches='tight', dpi=150) + + +def plot_x(data, outfile, outformat, args, normalise=False): + """Limit plot as a function of the source flavour ratio for each operator + texture.""" + print 'Making X sensitivity plot' + dim = args.dimension + if dim < 5: normalise = False + srcs = args.source_ratios + x_arr = np.array([i[0] for i in srcs]) + if args.texture is Texture.NONE: + textures = [Texture.OEU, Texture.OET, Texture.OUT] + else: + textures = [args.texture] + + # Rearrange data structure + r_data = np.full(( + data.shape[1], data.shape[0], data.shape[2], data.shape[3] + ), np.nan) + + for isrc in xrange(data.shape[0]): + for itex in xrange(data.shape[1]): + r_data[itex][isrc] = data[isrc][itex] + r_data = ma.masked_invalid(r_data) + print r_data.shape, 'r_data.shape' + + fig = plt.figure(figsize=(7, 6)) + ax = fig.add_subplot(111) + + ylims = SCALE_BOUNDARIES[dim] + if normalise: + if dim == 5: ylims = (-24, -8) + if dim == 6: ylims = (-12, 8) + if dim == 7: ylims = (0, 20) + if dim == 8: ylims = (12, 36) + else: + if dim == 3: ylims = (-28, -22) + if dim == 4: ylims = (-35, -26) + if dim == 5: SCALE_BOUNDARIES[5] + xlims = (0, 1) + + colour = {0:'red', 2:'blue', 1:'green'} + rgb_co = {0:[1,0,0], 2:[0,0,1], 1:[0.0, 0.5019607843137255, 0.0]} + + legend_log = [] + legend_elements = [] + labelsize = 13 + largesize = 17 + + ax.set_xlim(xlims) + ax.set_ylim(ylims) + xticks = [0, 1/3., 0.5, 2/3., 1] + # xlabels = [r'$0$', r'$\frac{1}{3}$', r'$\frac{1}{2}$', r'$\frac{2}{3}$', r'$1$'] + xlabels = [r'$0$', r'$1 / 3$', r'$1/2$', r'$2/3$', r'$1$'] + ax.set_xticks([], minor=True) + ax.set_xticks(xticks, minor=False) + ax.set_xticklabels(xlabels, fontsize=largesize) + if dim != 4 or dim != 3: + yticks = range(ylims[0], ylims[1], 2) + [ylims[1]] + ax.set_yticks(yticks, minor=False) + if dim == 3 or dim == 4: + yticks = range(ylims[0], ylims[1], 1) + [ylims[1]] + ax.set_yticks(yticks, minor=False) + # for ymaj in ax.yaxis.get_majorticklocs(): + # ax.axhline(y=ymaj, ls=':', color='gray', alpha=0.2, linewidth=1) + for xmaj in xticks: + if xmaj == 1/3.: + ax.axvline(x=xmaj, ls='--', color='gray', alpha=0.5, linewidth=0.3) + # else: + # ax.axvline(x=xmaj, ls=':', color='gray', alpha=0.2, linewidth=1) + + ax.text( + (1/3.)+0.01, 0.01, r'$(0.33:0.66:0)_\text{S}$', fontsize=labelsize, + transform=ax.transAxes, rotation='vertical', va='bottom' + ) + ax.text( + 0.96, 0.01, r'$(1:0:0)_\text{S}$', fontsize=labelsize, + transform=ax.transAxes, rotation='vertical', va='bottom', ha='left' + ) + ax.text( + 0.01, 0.01, r'$(0:1:0)_\text{S}$', fontsize=labelsize, + transform=ax.transAxes, rotation='vertical', va='bottom' + ) + yl = 0.55 + if dim == 3: yl = 0.65 + ax.text( + 0.03, yl, r'${\rm \bf Excluded}$', fontsize=largesize, + transform=ax.transAxes, color = 'g', rotation='vertical', zorder=10 + ) + ax.text( + 0.95, 0.55, r'${\rm \bf Excluded}$', fontsize=largesize, + transform=ax.transAxes, color = 'b', rotation='vertical', zorder=10 + ) + + for itex, tex in enumerate(textures): + print '|||| TEX = {0}'.format(tex) + lims = np.full(len(srcs), np.nan) + + for isrc, src in enumerate(srcs): + x = src[0] + print '|||| X = {0}'.format(x) + args.source_ratio = src + d = r_data[itex][isrc] + if np.sum(d.mask) > 2: continue + scales, statistic = ma.compress_rows(d).T + lim = get_limit(deepcopy(scales), deepcopy(statistic), args, mask_initial=True) + if lim is None: continue + if normalise: + lim -= np.log10(PLANCK_SCALE[dim]) + lims[isrc] = lim + + lims = ma.masked_invalid(lims) + size = np.sum(~lims.mask) + if size == 0: continue + + print 'x_arr, lims', zip(x_arr, lims) + if normalise: + zeropoint = 100 + else: + zeropoint = 0 + lims[lims.mask] = zeropoint + + l0 = np.argwhere(lims == zeropoint)[0] + h0 = len(lims) - np.argwhere(np.flip(lims, 0) == zeropoint)[0] + lims[int(l0):int(h0)] = zeropoint + + x_arr_a = [x_arr[0]-0.1] + list(x_arr) + x_arr_a = list(x_arr_a) + [x_arr_a[-1]+0.1] + lims = [lims[0]] + list(lims) + lims = list(lims) + [lims[-1]] + + s = 0.2 + g = 2 + if dim == 3 and tex == Texture.OUT: + s = 0.4 + g = 4 + if dim in (4,5) and tex == Texture.OUT: + s = 0.5 + g = 5 + if dim == 7 and tex == Texture.OET: + s = 1.6 + g = 2 + if dim == 7 and tex == Texture.OUT: + s = 2.0 + g = 20 + if dim == 8 and tex == Texture.OET: + s = 0.8 + g = 6 + if dim == 8 and tex == Texture.OUT: + s = 1.7 + g = 8 + + # ax.scatter(x_arr_a, lims, color='black', s=1) + tck, u = splprep([x_arr_a, lims], s=0, k=1) + x, y = splev(np.linspace(0, 1, 200), tck) + tck, u = splprep([x, y], s=s) + x, y = splev(np.linspace(0, 1, 400), tck) + y = gaussian_filter(y, sigma=g) + ax.fill_between(x, y, zeropoint, color=rgb_co[itex]+[0.3]) + # ax.scatter(x, y, color='black', s=1) + # ax.scatter(x_arr_a, lims, color=rgb_co[itex], s=8) + + if itex not in legend_log: + legend_log.append(itex) + # label = texture_label(tex, dim)[:-1] + r'\:{\rm\:texture}$' + label = texture_label(tex, dim)[:-1] + r'\:({\rm this\:work})$' + legend_elements.append( + Patch(facecolor=rgb_co[itex]+[0.3], + edgecolor=rgb_co[itex]+[1], label=label) + ) + + LV_lim = np.log10(LV_ATMO_90PC_LIMITS[dim]) + if normalise: + LV_lim -= np.log10(PLANCK_SCALE[dim]) + ax.add_patch(patches.Rectangle( + (xlims[0], LV_lim[1]), np.diff(xlims), LV_lim[0]-LV_lim[1], + fill=False, hatch='\\\\' + )) + + if dim in PLANCK_SCALE: + ps = np.log10(PLANCK_SCALE[dim]) + if normalise and dim == 6: + ps -= np.log10(PLANCK_SCALE[dim]) + ax.add_patch(Arrow( + 0.24, -0.009, 0, -5, width=0.12, capstyle='butt', + facecolor='purple', fill=True, alpha=0.8, + edgecolor='darkmagenta' + )) + ax.add_patch(Arrow( + 0.78, -0.009, 0, -5, width=0.12, capstyle='butt', + facecolor='purple', fill=True, alpha=0.8, + edgecolor='darkmagenta' + )) + + ax.text( + 0.26, 0.5, r'${\rm \bf Quantum\:Gravity\:Frontier}$', + fontsize=largesize-2, transform=ax.transAxes, va='top', + ha='left', color='purple' + ) + if dim > 5: + ax.axhline(y=ps, color='purple', alpha=1., linewidth=1.5) + + cpt = r'c' if dim % 2 == 0 else r'a' + if normalise: + ft = r'${\rm New\:Physics\:Scale}\:[\:{\rm log}_{10} \left (\mathring{'+cpt+r'}^{(' + \ + r'{0}'.format(args.dimension)+r')}\cdot{\rm E}_{\:\rm P}' + if dim > 5: ft += r'^{\:'+ r'{0}'.format(args.dimension-4)+ r'}' + ft += r'\right )\: ]$' + fig.text( + 0.01, 0.5, ft, ha='left', + va='center', rotation='vertical', fontsize=largesize + ) + else: + fig.text( + 0.01, 0.5, + r'${\rm New\:Physics\:Scale}\:[\:{\rm log}_{10} \left (\mathring{'+cpt+r'}^{(' + + r'{0}'.format(args.dimension)+r')}\:' + get_units(args.dimension) + + r'\right )\: ]$', ha='left', + va='center', rotation='vertical', fontsize=largesize + ) + + ax.set_xlabel( + r'${\rm Source\:Composition}\:[\:\left (\:x:1-x:0\:\right )_\text{S}\:]$', + labelpad=10, fontsize=largesize + ) + ax.tick_params(axis='x', labelsize=largesize-1) + + purple = [0.5019607843137255, 0.0, 0.5019607843137255] + # legend_elements.append( + # Patch(facecolor=purple+[0.7], edgecolor=purple+[1], label='Planck Scale Expectation') + # ) + legend_elements.append( + Patch(facecolor='none', hatch='\\\\', edgecolor='k', label='IceCube [TODO]') + ) + legend = ax.legend( + handles=legend_elements, prop=dict(size=labelsize-2), + loc='upper center', title='Excluded regions', framealpha=1., + edgecolor='black', frameon=True, bbox_to_anchor=(0.5, 1) + ) + plt.setp(legend.get_title(), fontsize=labelsize) + legend.get_frame().set_linestyle('-') + + # ybound = 0.14 + # if args.data is DataType.REAL: + # fig.text(0.7, ybound, r'\bf IceCube Preliminary', color='red', fontsize=13, + # ha='center', va='center', zorder=11) + # elif args.data is DataType.REALISATION: + # fig.text(0.7, ybound-0.05, r'\bf IceCube Simulation', color='red', fontsize=13, + # ha='center', va='center', zorder=11) + # else: + # fig.text(0.7, ybound, r'\bf IceCube Simulation', color='red', fontsize=13, + # ha='center', va='center', zorder=11) + + make_dir(outfile) + for of in outformat: + print 'Saving plot as {0}'.format(outfile + '.' + of) + fig.savefig(outfile + '.' + of, bbox_inches='tight', dpi=150) |
