diff options
author | tlatorre <tlatorre@uchicago.edu> | 2020-11-16 08:17:36 -0600 |
---|---|---|
committer | tlatorre <tlatorre@uchicago.edu> | 2020-11-16 08:17:36 -0600 |
commit | f96f2a8663e393801d6ab7c2967d6ab7fcee6a3d (patch) | |
tree | aff0dbfd35dcabb8023de53b519f481aeffe242a | |
parent | 85926df6680ee9aa2a33a49ff816a90e93099cad (diff) | |
download | sddm-f96f2a8663e393801d6ab7c2967d6ab7fcee6a3d.tar.gz sddm-f96f2a8663e393801d6ab7c2967d6ab7fcee6a3d.tar.bz2 sddm-f96f2a8663e393801d6ab7c2967d6ab7fcee6a3d.zip |
DM_ENERGIES -> DM_MASSES
-rwxr-xr-x | utils/dm-search | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/utils/dm-search b/utils/dm-search index c6d2d3a..cb0fa72 100755 --- a/utils/dm-search +++ b/utils/dm-search @@ -35,6 +35,7 @@ from sddm import printoptions from sddm.utils import fast_cdf, correct_energy_bias from scipy.integrate import quad from sddm.dm import * +from sddm import SNOMAN_MASS # Likelihood Fit Parameters # 0 - Atmospheric Neutrino Flux Scale @@ -50,7 +51,8 @@ from sddm.dm import * # the fit takes. DM_SAMPLES = 10000 -DM_ENERGIES = np.logspace(np.log10(20),np.log10(10e3),10) +DM_MASSES = {2020: np.logspace(np.log10(22),np.log10(10e3 - 500),101), + 2222: np.logspace(np.log10(318),np.log10(10e3 - 500),101)} DISCOVERY_P_VALUE = 0.05 @@ -390,7 +392,7 @@ def integrate_mc_hist(mc, muons, id, x, a, b, atmo_scale_factor, muon_scale_fact # are in the same bin, etc. return quad(total_hist,a,b,points=bins[(bins >= a) & (bins <= b)])[0] -def get_limits(dm_energies,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin): +def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin): limits = {} best_fit = {} discovery_array = {} @@ -398,8 +400,12 @@ def get_limits(dm_energies,data,muon,data_mc,atmo_scale_factor,muon_scale_factor limits[dm_particle_id] = [] best_fit[dm_particle_id] = [] discovery_array[dm_particle_id] = [] - for dm_energy in dm_energies: - dm_mass = dm_energy + for dm_mass in dm_masses[dm_particle_id]: + id1 = dm_particle_id//100 + id2 = dm_particle_id % 100 + m1 = SNOMAN_MASS[id1] + m2 = SNOMAN_MASS[id2] + dm_energy = dm_mass xopt, samples = do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin) limit = np.percentile(samples[:,6],90) @@ -421,8 +427,9 @@ def get_limits(dm_energies,data,muon,data_mc,atmo_scale_factor,muon_scale_factor # mean n. Therefore s should have the same distribution but offset # by n. Therefore, to determine the threshold, we simply look for # the threshold we expect in n and then subtract n. - a = dm_energy - 2*dm_energy*DM_ENERGY_RESOLUTION - b = dm_energy + 2*dm_energy*DM_ENERGY_RESOLUTION + dm_kinetic_energy = dm_energy - m1 - m2 + a = dm_kinetic_energy - 2*dm_kinetic_energy*DM_ENERGY_RESOLUTION + b = dm_kinetic_energy + 2*dm_kinetic_energy*DM_ENERGY_RESOLUTION n = integrate_mc_hist(data_mc, muon, dm_particle_id, xopt, a, b, atmo_scale_factor, muon_scale_factor) # Set our discovery threshold to the p-value we want divided by the # number of bins. The idea here is that the number of bins is @@ -666,7 +673,7 @@ if __name__ == '__main__': data_atm.loc[data_atm.id2 == 22,'energy2'] *= (1+xtrue[3]+np.random.randn(np.count_nonzero(data_atm.id2 == 22))*xtrue[4]) data_atm['ke'] = data_atm['energy1'].fillna(0) + data_atm['energy2'].fillna(0) + data_atm['energy3'].fillna(0) - limits, best_fit, discovery_array = get_limits(DM_ENERGIES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin) + limits, best_fit, discovery_array = get_limits(DM_MASSES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin) for id in (2020,2222): if (best_fit[id] > discovery_array[id]).any(): @@ -677,11 +684,11 @@ if __name__ == '__main__': sys.exit(0) - limits, best_fit, discovery_array = get_limits(DM_ENERGIES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin) + limits, best_fit, discovery_array = get_limits(DM_MASSES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin) fig = plt.figure() for color, dm_particle_id in zip(('C0','C1'),(2020,2222)): - plt.plot(DM_ENERGIES,limits[dm_particle_id],color=color,label='$' + ''.join([particle_id[int(''.join(x))] for x in grouper(str(dm_particle_id),2)]) + '$') + plt.plot(DM_MASSES[dm_particle_id],limits[dm_particle_id],color=color,label='$' + ''.join([particle_id[int(''.join(x))] for x in grouper(str(dm_particle_id),2)]) + '$') plt.gca().set_xscale("log") despine(fig,trim=True) plt.xlabel("Energy (MeV)") @@ -697,8 +704,8 @@ if __name__ == '__main__': fig = plt.figure() for color, dm_particle_id in zip(('C0','C1'),(2020,2222)): - plt.plot(DM_ENERGIES,best_fit[dm_particle_id],color=color,label='$' + ''.join([particle_id[int(''.join(x))] for x in grouper(str(dm_particle_id),2)]) + '$') - plt.plot(DM_ENERGIES,discovery_array[dm_particle_id],color=color,ls='--') + plt.plot(DM_MASSES[dm_particle_id],best_fit[dm_particle_id],color=color,label='$' + ''.join([particle_id[int(''.join(x))] for x in grouper(str(dm_particle_id),2)]) + '$') + plt.plot(DM_MASSES[dm_particle_id],discovery_array[dm_particle_id],color=color,ls='--') plt.gca().set_xscale("log") despine(fig,trim=True) plt.xlabel("Energy (MeV)") |