#!/usr/bin/env python3 import ROOT import numpy as np # on retina screens, the default plots are way too small # by using Qt5 and setting QT_AUTO_SCREEN_SCALE_FACTOR=1 # Qt5 will scale everything using the dpi in ~/.Xresources import matplotlib matplotlib.use("Qt5Agg") def tick_formatter(x, pos): if x < 1: return '%.1f' % x else: return '%.0f' % x # FIXME: What is this for the salt phase? NEUTRON_DETECTION_EFFICIENCY = 0.5 # FIXME: What is the index for D2O? INDEX_HEAVY_WATER = 1.3 # fractional energy resolution # from Richie's thesis page 134 ENERGY_RESOLUTION = 0.05 def pdg_code_to_string(pdg): A = int(("%010i" % event.tgt)[-4:-1]) Z = int(("%010i" % event.tgt)[-7:-4]) if Z == 1: atom = 'H' elif Z == 8: atom = 'O' elif Z == 6: atom = 'C' else: raise NotImplementedError("unknown atom %i" % Z) return '%i%s' % (A,atom) def get_reaction(event): reactants = [] products = [] if event.neu == 12: reactants.append('ve') elif event.neu == -12: reactants.append('vebar') elif event.neu == 14: reactants.append('vu') elif event.neu == -14: reactants.append('vubar') elif event.neu == 16: reactants.append('vt') elif event.neu == -16: reactants.append('vtbar') if event.hitnuc == 2212: reactants.append('p') elif event.hitnuc == 2112: reactants.append('n') elif event.hitnuc == 0: reactants.append(pdg_code_to_string(event.tgt)) else: print("unknown nucleon %i" % event.hitnuc) if event.cc: if event.neu == 12: products.append('e-') elif event.neu == -12: products.append('e+') elif event.neu == 14: products.append('u-') elif event.neu == -14: products.append('u+') elif event.neu == 16: products.append('t-') elif event.neu == -16: products.append('t+') elif event.nc: if event.neu == 12: products.append('ve') elif event.neu == -12: products.append('vebar') elif event.neu == 14: products.append('vu') elif event.neu == -14: products.append('vubar') elif event.neu == 16: products.append('vt') elif event.neu == -16: products.append('vtbar') else: products.append("???") for pdg in event.pdgf: if pdg == 2112: products.append('n') elif abs(pdg) == 11: # e- or e+ if pdg == 11: products.append('e-') else: products.append('e+') elif pdg == 22: # gamma products.append('gamma') elif pdg == 111: # pi0 products.append('pi0') elif abs(pdg) == 211: # pi+/- if pdg == 211: products.append('pi+') else: products.append('pi-') elif abs(pdg) == 311: if pdg == 311: products.append('K0') else: products.append('K0bar') elif abs(pdg) == 321: # K+/- if pdg == 321: products.append('K+') else: products.append('K-') elif abs(pdg) == 3222: products.append('Sigma+') elif abs(pdg) == 3112: products.append('Sigma-') elif abs(pdg) == 3122: products.append('Delta') elif pdg == 2212: products.append('p') elif int(("%010i" % abs(pdg))[0]) == 1: products.append(pdg_code_to_string(pdg)) else: print("unknown pdg code %i" % pdg) return ' + '.join(reactants) + ' -> ' + ' + '.join(products) if __name__ == '__main__': import argparse import matplotlib.pyplot as plt from collections import Counter parser = argparse.ArgumentParser("script to analyze GENIE 'ntuple' ROOT files") parser.add_argument("filenames", nargs='+', help="GENIE ROOT files") args = parser.parse_args() bins = np.logspace(-1,2,100) El = [] total_neutrons = [] total_neutrons_detected = [] E = [] KE = [] r = [] total_nrings = [] total_e_like_rings = [] total_u_like_rings = [] reactions = Counter() for filename in args.filenames: print("analyzing %s" % filename) f = ROOT.TFile(filename) T = f.Get("gst") for event in T: neutrons = 0 nrings = 0 e_like_rings = 0 u_like_rings = 0 ke = 0 if event.cc: if abs(event.neu) == 12: e_like_rings = 1 else: u_like_rings = 1 nrings = 1 ke += event.El elif event.nc: pass else: print("event is not cc or nc!") continue for i, pdg in enumerate(event.pdgf): if pdg == 2112: neutrons += 1 elif abs(pdg) == 11: # e- or e+ if event.Ef[i] > 0.1: # for now assume we only count rings from electrons # with > 100 MeV nrings += 1 e_like_rings += 1 ke += event.Ef[i] elif pdg == 22: # gamma if event.Ef[i] > 0.1: # for now assume we only count rings from gammas with > # 100 MeV nrings += 1 e_like_rings += 1 ke += event.Ef[i] elif pdg == 111: # pi0 nrings += 1 e_like_rings += 1 ke += event.Ef[i] elif abs(pdg) == 211: # pi+/- # momentum of ith particle in hadronic system p = np.sqrt(event.pxf[i]**2 + event.pyf[i]**2 + event.pzf[i]**2) # velocity of ith particle (in units of c) # FIXME: is energy total energy or kinetic energy? v = p/event.Ef[i] if v > 1/INDEX_HEAVY_WATER: # if the pion is above threshold, we assume that it # produces 2 muon like rings nrings += 2 u_like_rings += 2 else: # if the pion is not above threshold, we assume that it # produces 1 muon like ring nrings += 1 u_like_rings += 1 # FIXME: should actually be a beta distribution p = np.sqrt(event.pxf[i]**2 + event.pyf[i]**2 + event.pzf[i]**2) m = np.sqrt(event.Ef[i]**2 - p**2) ke += event.Ef[i] - m elif abs(pdg) in [2212,3222,311,321,3122,3112]: # momentum of ith particle in hadronic system p = np.sqrt(event.pxf[i]**2 + event.pyf[i]**2 + event.pzf[i]**2) # velocity of ith particle (in units of c) # FIXME: is energy total energy or kinetic energy? v = p/event.Ef[i] if v > 1/INDEX_HEAVY_WATER: # above cerenkov threshold nrings += 1 u_like_rings += 1 m = np.sqrt(event.Ef[i]**2 - p**2) ke += event.Ef[i] - m elif int(("%010i" % abs(pdg))[0]) == 1: # usually just excited 16O atom which won't produce a lot # of light pass else: print("unknown pdg code %i" % pdg) total_neutrons.append(neutrons) total_neutrons_detected.append(np.random.binomial(neutrons,NEUTRON_DETECTION_EFFICIENCY)) total_nrings.append(nrings) total_e_like_rings.append(e_like_rings) total_u_like_rings.append(u_like_rings) El.append(event.El) E.append(event.calresp0) KE.append(ke + np.random.randn()*ke*ENERGY_RESOLUTION) r.append(np.sqrt(event.vtxx**2 + event.vtxy**2 + event.vtxz**2)) if total_neutrons_detected[-1] == 0 and nrings >= 2 and ((e_like_rings == 0) or (u_like_rings == 0)): reactions.update([get_reaction(event)]) total = sum(reactions.values()) for reaction, count in reactions.most_common(10): print("%.0f%% %s" % (count*100/total, reaction)) El = np.array(El) total_neutrons = np.array(total_neutrons) total_neutrons_detected = np.array(total_neutrons_detected) E = np.array(E) KE = np.array(KE) r = np.array(r) total_nrings = np.array(total_nrings) total_e_like_rings = np.array(total_e_like_rings) total_u_like_rings = np.array(total_u_like_rings) cut1 = (total_neutrons_detected == 0) cut2 = (total_neutrons_detected == 0) & (total_nrings >= 2) cut3 = (total_neutrons_detected == 0) & (total_nrings >= 2) & ((total_e_like_rings == 0) | (total_u_like_rings == 0)) El1 = El[cut1] El2 = El[cut2] El3 = El[cut3] E1 = E[cut1] E2 = E[cut2] E3 = E[cut3] KE1 = KE[cut1] KE2 = KE[cut2] KE3 = KE[cut3] plt.figure() bincenters = (bins[1:] + bins[:-1])/2 x = np.repeat(bins,2) El_hist, _ = np.histogram(El, bins=bins) total_events = El_hist.sum() # FIXME: this is just a rough estimate of how many events we expect in 3 # years based on Richie's thesis which says "Over the 306.4 live days of # the D2O phase we expect a total of 192.4 events within the acrylic vessel # and 504.5 events within the PSUP. El_hist = El_hist*230/total_events y = np.concatenate([[0],np.repeat(El_hist,2),[0]]) El1_hist, _ = np.histogram(El1, bins=bins) El1_hist = El1_hist*230/total_events y1 = np.concatenate([[0],np.repeat(El1_hist,2),[0]]) El2_hist, _ = np.histogram(El2, bins=bins) El2_hist = El2_hist*230/total_events y2 = np.concatenate([[0],np.repeat(El2_hist,2),[0]]) El3_hist, _ = np.histogram(El3, bins=bins) El3_hist = El3_hist*230/total_events y3 = np.concatenate([[0],np.repeat(El3_hist,2),[0]]) plt.plot(x, y, label="All events") plt.step(x, y1, where='mid', label="n") plt.step(x, y2, where='mid', label="n + nrings") plt.step(x, y3, where='mid', label="n + nrings + same flavor") plt.xlabel("Energy (GeV)") plt.ylabel("Events/year") plt.gca().set_xscale("log") plt.gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(tick_formatter)) plt.xlim((0.02,bins[-1])) plt.ylim((0,None)) plt.legend() plt.title("Primary Lepton Energy") plt.figure() KE_hist, _ = np.histogram(KE, bins=bins) KE_signal, _ = np.histogram(np.random.randn(1000)*1.0*ENERGY_RESOLUTION + 1.0, bins=bins) total_events = KE_hist.sum() # FIXME: this is just a rough estimate of how many events we expect in 3 # years based on Richie's thesis which says "Over the 306.4 live days of # the D2O phase we expect a total of 192.4 events within the acrylic vessel # and 504.5 events within the PSUP. KE_hist = KE_hist*230/total_events y = np.concatenate([[0],np.repeat(KE_hist,2),[0]]) KE1_hist, _ = np.histogram(KE1, bins=bins) KE1_hist = KE1_hist*230/total_events y1 = np.concatenate([[0],np.repeat(KE1_hist,2),[0]]) KE2_hist, _ = np.histogram(KE2, bins=bins) KE2_hist = KE2_hist*230/total_events y2 = np.concatenate([[0],np.repeat(KE2_hist,2),[0]]) KE3_hist, _ = np.histogram(KE3, bins=bins) KE3_hist = KE3_hist*230/total_events y3 = np.concatenate([[0],np.repeat(KE3_hist,2),[0]]) KE_signal = KE_signal*10/np.sum(KE_signal) y4 = np.concatenate([[0],np.repeat(KE_signal,2),[0]]) plt.plot(x, y, label="All events") plt.plot(x, y1, label="n") plt.plot(x, y2, label="n + nrings") plt.plot(x, y3, label="n + nrings + same flavor") plt.plot(x, y4, label="1 GeV signal") plt.xlabel("Energy (GeV)") plt.ylabel(r"Expected Event Rate (year$^{-1}$)") plt.gca().set_xscale("log") plt.gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(tick_formatter)) plt.xlim((0.02,bins[-1])) plt.ylim((0,None)) plt.legend() plt.title("Approximate Visible Energy") plt.figure() plt.hist(r, bins=np.linspace(0,8,100), histtype='step') plt.xlabel("R (m)") plt.title("Radius of Events") plt.figure() plt.hist(total_neutrons, bins=np.arange(11)-0.5, histtype='step') plt.xlabel("Number of Neutrons") plt.title("Number of Neutrons") plt.figure() plt.hist(total_nrings, bins=np.arange(11)-0.5, histtype='step') plt.xlabel("Number of Rings") plt.title("Number of Rings (approximate)") plt.show() ='n315' href='#n315'>315 316 317 318 319 320 321 322
#!/usr/bin/env python
"""
Script to combine the low energy Battistoni atmospheric flux with the high
energy flux from Giles Barr and apply oscillations.

Note: There are a couple of potential issues with this:

1. The Battistoni fluxes give a total flux integrated over the zenith angle
whereas we need to account for the zenith dependence. Therefore, we just use
the zenith dependence of the lowest energy in the Barr flux files.

2. I only have access to the Battistoni fluxes for electron neutrinos.
Therefore I will assume that the muon neutrino flux is twice as big which seems
to be roughly correct based on the paper "The Atmospheric Neutrino Flux Below
100 MeV: The FLUKA Results".

To run it:

    $ ./apply-atmospheric-oscillations --flux-dir ~/atm-production/fluxes/irc01_plus_lowe \
                                       --osc-dir ~/atm-production/nucraft_osc
"""
from __future__ import print_function, division
import numpy as np

def pdg_code_to_string(pdg):
    if pdg == 12:
        return "nue"
    elif pdg == -12:
        return "nbe"
    elif pdg == 14:
        return "num"
    elif pdg == -14:
        return "nbm"
    elif pdg == 16:
        return "nut"
    elif pdg == -16:
        return "nbt"
    else:
        raise ValueError("Unknown pdg code %i" % pdg)

if __name__ == '__main__':
    import argparse
    import matplotlib.pyplot as plt
    from os.path import split, splitext, join
    from scipy.interpolate import RectBivariateSpline
    from scipy.integrate import dblquad, nquad
    import sys

    parser = argparse.ArgumentParser("combine low energy and high energy atmospheric fluxes and apply oscillations")
    parser.add_argument("--flux-dir",required=True,help="atmospheric production directory")
    parser.add_argument("--osc-dir",required=True,help="directory with atmospheric oscillations")
    args = parser.parse_args()

    # Energy and cos(zenith) bins for the fluxes from Barr
    barr_cos_theta_bins = np.linspace(-1,1,21)
    barr_energy_bins = np.logspace(-1,1,41)

    # Final energy bins
    # We go here from 10 MeV -> 10 GeV with 20 points per decade like the Barr
    # fluxes
    energy_bins = np.logspace(-2,1,61)
    cos_theta_bins = barr_cos_theta_bins.copy()

    # Read in Battistoni fluxes
    nue_battistoni = np.genfromtxt(join(args.flux_dir,"lowe/nue.dat"))
    nbe_battistoni = np.genfromtxt(join(args.flux_dir,"lowe/nbe.dat"))

    # Convert Battistoni fluxes from MeV -> GeV
    nue_battistoni[:,0] /= 1000.0
    nue_battistoni[:,1] *= 1000.0
    nbe_battistoni[:,0] /= 1000.0
    nbe_battistoni[:,1] *= 1000.0

    # Convert Battistoni fluxes from cm^-2 -> m^-2
    nue_battistoni[:,1] *= 100.0**2
    nbe_battistoni[:,1] *= 100.0**2

    # Assume muon neutrino flux below 100 MeV is twice that of electron
    # neutrino flux
    num_battistoni = nue_battistoni.copy()
    num_battistoni[:,1] *= 2
    nbm_battistoni = nbe_battistoni.copy()
    nbm_battistoni[:,1] *= 2

    # Read in Barr fluxes
    nue_barr = np.genfromtxt(join(args.flux_dir,"irc01/fmax20_i0403z.sno_nue"))
    nbe_barr = np.genfromtxt(join(args.flux_dir,"irc01/fmax20_i0403z.sno_nbe"))
    num_barr = np.genfromtxt(join(args.flux_dir,"irc01/fmax20_i0403z.sno_num"))
    nbm_barr = np.genfromtxt(join(args.flux_dir,"irc01/fmax20_i0403z.sno_nbm"))

    # Read in oscillation probabilities
    nue_osc_prob = np.genfromtxt(join(args.osc_dir,"nue_osc_prob.txt"))
    nbe_osc_prob = np.genfromtxt(join(args.osc_dir,"nbe_osc_prob.txt"))
    num_osc_prob = np.genfromtxt(join(args.osc_dir,"num_osc_prob.txt"))
    nbm_osc_prob = np.genfromtxt(join(args.osc_dir,"nbm_osc_prob.txt"))

    # Reshape oscillation probability arrays. They should now look like:
    #
    #     [[[0.01,  -1,   P(nue), P(num), P(nut)],
    #       [0.01,  -0.8, P(nue), P(num), P(nut)],
    #                ...
    #       [0.01,  1.0,  P(nue), P(num), P(nut)]],
    #      [[0.02,  -1,   P(nue), P(num), P(nut)],
    #       [0.02,  -0.8, P(nue), P(num), P(nut)],
    #                ...
    #       [0.02,  1.0,  P(nue), P(num), P(nut)]],
    #                ...
    #      [[10.0,  -1,   P(nue), P(num), P(nut)],
    #       [10.0,  -0.8, P(nue), P(num), P(nut)],
    #                ...
    #       [10.0,  1.0,  P(nue), P(num), P(nut)]]]
    shape0 = len(np.unique(nue_osc_prob[:,0]))
    nue_osc_prob = nue_osc_prob.reshape((shape0,-1,5))
    nbe_osc_prob = nbe_osc_prob.reshape((shape0,-1,5))
    num_osc_prob = num_osc_prob.reshape((shape0,-1,5))
    nbm_osc_prob = nbm_osc_prob.reshape((shape0,-1,5))

    # convert dn/dlnE -> dn/dE
    nue_barr[:,2] /= nue_barr[:,0]
    nbe_barr[:,2] /= nbe_barr[:,0]
    num_barr[:,2] /= num_barr[:,0]
    nbm_barr[:,2] /= nbm_barr[:,0]

    # Reshape Barr flux arrays. They should now look like:
    #
    #     [[[0.1,  -1,   flux,...],
    #       [0.2,  -1,   flux,...],
    #               ...
    #       [10.0, -1,   flux,...]],
    # 
    #      [[0.1,  -0.9, flux,...],
    #       [0.2,  -0.9, flux,...],
    #              ...
    #       [10.0, -0.9, flux,...]],
    #              ...
    #              ...
    #      [[0.1,   1.0, flux,...],
    #       [0.2,   1.0, flux,...],
    #              ...
    #       [10.0,  1.0, flux,...]]]
    shape1 = len(np.unique(nue_barr[:,0]))
    nue_barr = nue_barr.reshape((-1,shape1,5))
    nbe_barr = nbe_barr.reshape((-1,shape1,5))
    num_barr = num_barr.reshape((-1,shape1,5))
    nbm_barr = nbm_barr.reshape((-1,shape1,5))

    nue_zenith_dist_x = nue_barr[:,0,1].copy()
    nue_zenith_dist_y = nue_barr[:,0,2].copy()
    nue_zenith_dist_y /= np.trapz(nue_zenith_dist_y,x=nue_zenith_dist_x)
    nbe_zenith_dist_x = nbe_barr[:,0,1].copy()
    nbe_zenith_dist_y = nbe_barr[:,0,2].copy()
    nbe_zenith_dist_y /= np.trapz(nbe_zenith_dist_y,x=nbe_zenith_dist_x)
    num_zenith_dist_x = num_barr[:,0,1].copy()
    num_zenith_dist_y = num_barr[:,0,2].copy()
    num_zenith_dist_y /= np.trapz(num_zenith_dist_y,x=num_zenith_dist_x)
    nbm_zenith_dist_x = nbm_barr[:,0,1].copy()
    nbm_zenith_dist_y = nbm_barr[:,0,2].copy()
    nbm_zenith_dist_y /= np.trapz(nbm_zenith_dist_y,x=nbm_zenith_dist_x)

    nue_battistoniz = np.empty((nue_barr.shape[0],nue_battistoni.shape[0],4))
    nue_battistoniz[:,:,0] = nue_battistoni[:,0]
    nue_battistoniz[:,:,1] = nue_zenith_dist_x[:,np.newaxis]
    nue_battistoniz[:,:,2] = nue_battistoni[:,1]*nue_zenith_dist_y[:,np.newaxis]/(2*np.pi)
    nue_battistoniz[:,:,3] = nue_battistoniz[:,:,2]*nue_battistoniz[:,:,0]

    nbe_battistoniz = np.empty((nbe_barr.shape[0],nbe_battistoni.shape[0],4))
    nbe_battistoniz[:,:,0] = nbe_battistoni[:,0]
    nbe_battistoniz[:,:,1] = nbe_zenith_dist_x[:,np.newaxis]
    nbe_battistoniz[:,:,2] = nbe_battistoni[:,1]*nbe_zenith_dist_y[:,np.newaxis]/(2*np.pi)
    nbe_battistoniz[:,:,3] = nbe_battistoniz[:,:,2]*nbe_battistoniz[:,:,0]

    num_battistoniz = np.empty((num_barr.shape[0],num_battistoni.shape[0],4))
    num_battistoniz[:,:,0] = num_battistoni[:,0]
    num_battistoniz[:,:,1] = num_zenith_dist_x[:,np.newaxis]
    num_battistoniz[:,:,2] = num_battistoni[:,1]*num_zenith_dist_y[:,np.newaxis]/(2*np.pi)
    num_battistoniz[:,:,3] = num_battistoniz[:,:,2]*num_battistoniz[:,:,0]

    nbm_battistoniz = np.empty((nbm_barr.shape[0],nbm_battistoni.shape[0],4))
    nbm_battistoniz[:,:,0] = nbm_battistoni[:,0]
    nbm_battistoniz[:,:,1] = nbm_zenith_dist_x[:,np.newaxis]
    nbm_battistoniz[:,:,2] = nbm_battistoni[:,1]*nbm_zenith_dist_y[:,np.newaxis]/(2*np.pi)
    nbm_battistoniz[:,:,3] = nbm_battistoniz[:,:,2]*nbm_battistoniz[:,:,0]

    nue_total = np.empty((nue_barr.shape[0],nue_barr.shape[1]+nue_battistoniz.shape[1],4))
    nue_total[:,:nue_battistoniz.shape[1],:] = nue_battistoniz
    nue_total[:,nue_battistoniz.shape[1]:,:] = nue_barr[:,:,:4]

    nbe_total = np.empty((nbe_barr.shape[0],nbe_barr.shape[1]+nbe_battistoniz.shape[1],4))
    nbe_total[:,:nbe_battistoniz.shape[1],:] = nbe_battistoniz
    nbe_total[:,nbe_battistoniz.shape[1]:,:] = nbe_barr[:,:,:4]

    num_total = np.empty((num_barr.shape[0],num_barr.shape[1]+num_battistoniz.shape[1],4))
    num_total[:,:num_battistoniz.shape[1],:] = num_battistoniz
    num_total[:,num_battistoniz.shape[1]:,:] = num_barr[:,:,:4]

    nbm_total = np.empty((nbm_barr.shape[0],nbm_barr.shape[1]+nbm_battistoniz.shape[1],4))
    nbm_total[:,:nbm_battistoniz.shape[1],:] = nbm_battistoniz
    nbm_total[:,nbm_battistoniz.shape[1]:,:] = nbm_barr[:,:,:4]

    flux_arrays = {12: nue_total, -12: nbe_total,
                   14: num_total, -14: nbm_total}

    # Save unoscillated fluxes
    for p in [12,-12,14,-14]:
        np.savetxt("fmax20_i0403z_plus_lowe.sno_%s" % pdg_code_to_string(p),flux_arrays[p].reshape((flux_arrays[p].shape[0]*flux_arrays[p].shape[1],-1)),
                   fmt='%10.4f %10.3f %20.8g %20.8g',
                   header="Energy (GeV), Cos(zenith), Flux (m^-2 s^-1 Gev^-1 steradian^-1), Flux (dn/dlnE)")

    osc_prob = {12: nue_osc_prob, -12: nbe_osc_prob,
                14: num_osc_prob, -14: nbm_osc_prob}

    def get_flux_interp(nu_type):
        flux_array = flux_arrays[nu_type]
        flux = flux_array[:,:,2]
        e = flux_array[0,:,0]
        cos_theta = flux_array[:,0,1]

        # Use first order splines here since we need to enforce the fact that
        # the flux is not negative
        return RectBivariateSpline(e,cos_theta,flux.T,kx=1,ky=1)

    def get_osc_interp(nu_type1,nu_type2):
        prob = osc_prob[nu_type1]
        e = prob[:,0,0]
        cos_theta = prob[0,:,1]

        if np.sign(nu_type1) != np.sign(nu_type2):
            raise ValueError("asking for oscillation probability from %s -> %s!" % tuple(map(pdg_code_to_string,(nu_type1,nu_type2))))

        # Use second order splines here since oscillation probabilities are
        # pretty smooth
        if abs(nu_type2) == 12:
            return RectBivariateSpline(e,cos_theta,prob[:,:,2],kx=2,ky=2)
        elif abs(nu_type2) == 14:
            return RectBivariateSpline(e,cos_theta,prob[:,:,3],kx=2,ky=2)
        elif abs(nu_type2) == 16:
            return RectBivariateSpline(e,cos_theta,prob[:,:,4],kx=2,ky=2)

    def get_oscillated_flux(nu_type1, nu_type2, elo, emid, ehi, zlo, zmid, zhi):
        """
        Returns the average flux of neutrinos coming from `nu_type1` neutrinos
        oscillating to `nu_type2` neutrinos for neutrinos in the bin between
        energies elo and ehi and with a cosine of the zenith angle between zlo
        and zhi.

        Note: We also pass the midpoint of the bin for both energy and
        cos(theta) as `emid` and `zmid`. The reason for this is that at least
        for the Barr fluxes, the midpoint of the bin was chosen as the midpoint
        of the log of the left and right edge instead of the actual middle of
        the bin. Therefore to be consistent when interpolating these fluxes we
        should use the same value.

        The returned flux has units of 1/(m^2 sec steradian GeV).
        """
        f_flux1 = get_flux_interp(nu_type1)
        f_osc1 = get_osc_interp(nu_type1,nu_type2)

        def f_flux(e,z):
            # Not sure exactly how scipy deals with boundary issues in
            # RectBivariateSpline so we just make sure everything is within the
            # range of the values given in the Barr and Battistoni fluxes
            if e < 0.0106:
                e = 0.0106
            if z < -0.95:
                z = -0.95
            if z > 0.95:
                z = 0.95
            return f_flux1(e,z)

        def f_osc(e,z):
            # FIXME: Should remove this
            if e < 0.02:
                e = 0.02
            return f_osc1(e,z)

        # Here we have two options for calculating the oscillated flux:
        #
        # 1. We interpolate the value of the flux and then multiply by an
        # average oscillation probability. This is more correct in the sense
        # that the original flux values are actually a histogram and so it
        # makes sense to just sample the flux at the middle of the bin.
        #
        # 2. We integrate the product of the flux and the oscillation
        # probability over the whole bin. This is technically more correct
        # since it will weight the oscillation probability by the flux, but it
        # may have issues near the edge of the first and last bin since we have
        # to extrapolate.
        #
        # For now, we do 1 since it is much faster. To do the second you can
        # just comment the next line.
        return f_flux(emid,zmid)*f_osc1.integral(elo,ehi,zlo, zhi)/(ehi-elo)/(zhi-zlo)
        return nquad(lambda e, z: f_flux(e,z)*f_osc(e,z), [(elo,ehi), (zlo, zhi)], opts={'limit':1000,'epsrel':1e-3})[0]/(ehi-elo)/(zhi-zlo)

    data = {nu_type: np.zeros((cos_theta_bins.shape[0]-1,energy_bins.shape[0]-1,4)) for nu_type in [12,-12,14,-14,16,-16]}
    for nu_type1 in [12,-12,14,-14]:
        if nu_type1 > 0:
            nu_type2s = [12,14,16]
        else:
            nu_type2s = [-12,-14,-16]
        for nu_type2 in nu_type2s:
            print("Calculating neutrino flux for %s -> %s" % tuple(map(pdg_code_to_string,(nu_type1,nu_type2))))
            for i, (elo, ehi) in enumerate(zip(energy_bins[:-1],energy_bins[1:])):
                for j, (zlo, zhi) in enumerate(zip(cos_theta_bins[:-1],cos_theta_bins[1:])):
                    print("\r%i/%i" % (i*(len(cos_theta_bins)-1)+j+1,(len(cos_theta_bins)-1)*(len(energy_bins)-1)),end="")
                    sys.stdout.flush()
                    # We'll follow the Barr convention here and take the midpoint in log space
                    emid = np.exp((np.log(elo)+np.log(ehi))/2)
                    zmid = (zlo + zhi)/2
                    data[nu_type2][j,i,0] = emid
                    data[nu_type2][j,i,1] = zmid
                    data[nu_type2][j,i,2] += get_oscillated_flux(nu_type1,nu_type2,elo,emid,ehi,zlo,zmid,zhi)
            print()

    for nu_type2 in data:
        for i, (elo, ehi) in enumerate(zip(energy_bins[:-1],energy_bins[1:])):
            for j, (zlo, zhi) in enumerate(zip(cos_theta_bins[:-1],cos_theta_bins[1:])):
                data[nu_type2][j,i,3] = data[nu_type2][j,i,2]*data[nu_type2][j,i,0]

    for p in [12,-12,14,-14,16,-16]:
        np.savetxt("fmax20_i0403z_plus_lowe.sno_%s.osc" % pdg_code_to_string(p),data[p].reshape((data[p].shape[0]*data[p].shape[1],-1)),
                    fmt='%10.4f %10.3f %20.8g %20.8g',
                   header="Energy (GeV), Cos(zenith), Flux (m^-2 s^-1 Gev^-1 steradian^-1), Flux (dn/dlnE), Junk")