diff options
author | tlatorre <tlatorre@uchicago.edu> | 2020-12-21 00:02:04 -0600 |
---|---|---|
committer | tlatorre <tlatorre@uchicago.edu> | 2020-12-21 00:02:04 -0600 |
commit | cf2d8db90bbb7757b0336cab7801a03ef7043f31 (patch) | |
tree | d918df4826af0985907ab042bf1ccb5d389475e4 /utils/dm-search | |
parent | 834df6cfc5737db2b2f4c0a1432c97960ed65e76 (diff) | |
download | sddm-cf2d8db90bbb7757b0336cab7801a03ef7043f31.tar.gz sddm-cf2d8db90bbb7757b0336cab7801a03ef7043f31.tar.bz2 sddm-cf2d8db90bbb7757b0336cab7801a03ef7043f31.zip |
update bins for muon histograms
This commit updates the bins for the muon histograms to not go too far
above 2 GeV. The reason is that at these energies most muons will exit
the detetor and my energy reconstruction doesn't work too well.
I also updated chi2 and dm-search to use the flux_weight when sampling
from the MC.
Diffstat (limited to 'utils/dm-search')
-rwxr-xr-x | utils/dm-search | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/utils/dm-search b/utils/dm-search index dd4946b..12d2a4b 100755 --- a/utils/dm-search +++ b/utils/dm-search @@ -139,14 +139,17 @@ def plot_hist2(hists, bins, color=None): elif id == 2222: plt.subplot(2,3,6) - bincenters = (bins[1:] + bins[:-1])/2 - plt.hist(bincenters, bins=bins, histtype='step', weights=hists[id],color=color) + bincenters = (bins[id][1:] + bins[id][:-1])/2 + plt.hist(bincenters, bins=bins[id], histtype='step', weights=hists[id],color=color) plt.gca().set_xscale("log") major = np.array([10,100,1000,10000]) minor = np.unique(list(chain(*list(range(i,i*10,i) for i in major[:-1])))) minor = np.setdiff1d(minor,major) + major = major[major <= bins[id][-1]] + minor = minor[minor <= bins[id][-1]] plt.gca().set_xticks(major) plt.gca().set_xticks(minor,minor=True) + plt.gca().set_xlim(10,10000) plt.xlabel("Energy (MeV)") plt.title('$' + ''.join([particle_id[int(''.join(x))] for x in grouper(str(id),2)]) + '$') @@ -205,9 +208,9 @@ def get_mc_hists_fast(df_dict,x,bins,scale=1.0,reweight=False): resolution = np.sqrt((df.energy1.values*max(EPSILON,x[2]))**2 + (df.energy2.values*max(EPSILON,x[4]))**2) if reweight: - cdf = fast_cdf(bins[:,np.newaxis],ke,resolution)*df.weight.values + cdf = fast_cdf(bins[id][:,np.newaxis],ke,resolution)*df.weight.values else: - cdf = fast_cdf(bins[:,np.newaxis],ke,resolution) + cdf = fast_cdf(bins[id][:,np.newaxis],ke,resolution) if 'flux_weight' in df.columns: cdf *= df.flux_weight.values @@ -222,7 +225,7 @@ def get_data_hists(data,bins,scale=1.0): """ data_hists = {} for id in (20,22,2020,2022,2222): - data_hists[id] = np.histogram(data[data.id == id].ke.values,bins=bins)[0]*scale + data_hists[id] = np.histogram(data[data.id == id].ke.values,bins=bins[id])[0]*scale return data_hists def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_factor, muon_scale_factor, bins, reweight=False, print_nll=False, dm_sample=None): @@ -559,17 +562,18 @@ if __name__ == '__main__': import matplotlib.pyplot as plt + if args.run_list is not None: + run_list = np.genfromtxt(args.run_list) + # Loop over runs to prevent using too much memory evs = [] rhdr = pd.concat([read_hdf(filename, "rhdr").assign(filename=filename) for filename in args.filenames],ignore_index=True) for run, df in rhdr.groupby('run'): + if args.run_list and run not in run_list: + continue evs.append(get_events(df.filename.values, merge_fits=True, nhit_thresh=args.nhit_thresh)) ev = pd.concat(evs).reset_index() - if args.run_list is not None: - runs = np.genfromtxt(args.run_list) - ev = ev[ev.run.isin(runs)] - livetime = 0.0 livetime_pulse_gt = 0.0 for _ev in evs: @@ -600,7 +604,7 @@ if __name__ == '__main__': ev_mcs = [] for filename in args.mc: ev_mcs.append(get_events([filename], merge_fits=True, nhit_thresh=args.nhit_thresh, mc=True)) - ev_mc = pd.concat(ev_mcs) + ev_mc = pd.concat(ev_mcs).reset_index() muon_mc = get_events(args.muon_mc, merge_fits=True, nhit_thresh=args.nhit_thresh, mc=True) weights = pd.concat([read_hdf(filename, "weights") for filename in args.weights],ignore_index=True) @@ -697,7 +701,11 @@ if __name__ == '__main__': data_mc = ev_mc[ev_mc.signal & ev_mc.prompt & ~ev_mc.atm] data_atm_mc = ev_mc[ev_mc.signal & ev_mc.prompt & ev_mc.atm] - bins = np.logspace(np.log10(20),np.log10(10e3),21) + bins = {20:np.logspace(np.log10(20),np.log10(10e3),21), + 22:np.logspace(np.log10(20),np.log10(10e3),21)[:-5], + 2020:np.logspace(np.log10(20),np.log10(10e3),21), + 2022:np.logspace(np.log10(20),np.log10(10e3),21)[:-5], + 2222:np.logspace(np.log10(20),np.log10(10e3),21)[:-5]} atmo_scale_factor = 100.0 muon_scale_factor = len(muon) + len(muon_atm) @@ -730,8 +738,8 @@ if __name__ == '__main__': dm_energy = dm_mass # Sample data from Monte Carlo - data = pd.concat((data_mc.sample(n=n,replace=True), muon.sample(n=n_muon,replace=True),get_dm_sample(n_dm,dm_particle_id,dm_mass,dm_energy))) - data_atm = pd.concat((data_atm_mc.sample(n=n_atm,replace=True), muon_atm.sample(n=n_muon_atm,replace=True))) + data = pd.concat((data_mc.sample(n=n,weights='flux_weight',replace=True), muon.sample(n=n_muon,replace=True))) + data_atm = pd.concat((data_atm_mc.sample(n=n_atm,weights='flux_weight',replace=True), muon_atm.sample(n=n_muon_atm,replace=True))) # Smear the energies by the additional energy resolution data.loc[data.id1 == 20,'energy1'] *= (1+xtrue[1]+np.random.randn(np.count_nonzero(data.id1 == 20))*xtrue[2]) @@ -786,6 +794,9 @@ if __name__ == '__main__': discoveries = 0 + data_mc_with_weights.weight *= data_mc_with_weights.flux_weight + data_atm_mc_with_weights.weight *= data_atm_mc_with_weights.flux_weight + for i in range(args.test): xtrue = sample_priors() |