aboutsummaryrefslogtreecommitdiff
path: root/utils/chi2
diff options
context:
space:
mode:
Diffstat (limited to 'utils/chi2')
-rwxr-xr-xutils/chi221
1 files changed, 13 insertions, 8 deletions
diff --git a/utils/chi2 b/utils/chi2
index e30a058..6888158 100755
--- a/utils/chi2
+++ b/utils/chi2
@@ -409,7 +409,7 @@ def do_fit(data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,st
nlls = []
for universe in range(nuniverses):
- data_mc_with_weights = pd.merge(data_mc,weights_dict[universe],how='left',on=['run','evn'])
+ data_mc_with_weights = pd.merge(data_mc,weights_dict[universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
nll = make_nll(data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll)
@@ -418,7 +418,7 @@ def do_fit(data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,st
universe = np.argmin(nlls)
if refit:
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','evn'])
+ data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
# Create a new negative log likelihood function with the weighted Monte Carlo.
@@ -530,6 +530,11 @@ if __name__ == '__main__':
mcpl = load_mcpl_files(args.mcpl)
ev_mc = renormalize_data(ev_mc.reset_index(),mcpl)
+ # Merge weights with MCPL dataframe to get the unique id column in the
+ # weights dataframe since that is what we use to merge with the Monte
+ # Carlo.
+ weights = pd.merge(weights,mcpl[['run','evn','unique_id']],on=['run','evn'],how='left')
+
# There are a handful of weights which turn out to be slightly negative for
# some reason. For example:
#
@@ -625,8 +630,8 @@ if __name__ == '__main__':
xtrue = truncnorm_scaled(PRIORS_LOW,PRIORS_HIGH,PRIORS,PRIOR_UNCERTAINTIES)
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == 0],how='left',on=['run','evn'])
- data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == 0],how='left',on=['run','evn'])
+ data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == 0],how='left',on=['run','unique_id'])
+ data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == 0],how='left',on=['run','unique_id'])
for i in range(args.coverage):
# Calculate expected number of events
@@ -660,10 +665,10 @@ if __name__ == '__main__':
xopt, universe, samples = do_fit(data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin)
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','evn'])
+ data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
- data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == universe],how='left',on=['run','evn'])
+ data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
data_atm_mc_with_weights.weight = data_atm_mc_with_weights.weight.fillna(1.0)
prob = get_prob(data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,samples,bins,size=args.multinomial_prob_size)
@@ -792,10 +797,10 @@ if __name__ == '__main__':
xopt, universe, samples = do_fit(data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin)
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','evn'])
+ data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
- data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == universe],how='left',on=['run','evn'])
+ data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
data_atm_mc_with_weights.weight = data_atm_mc_with_weights.weight.fillna(1.0)
prob = get_prob(data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,samples,bins,size=args.multinomial_prob_size)