aboutsummaryrefslogtreecommitdiff
path: root/utils/plot-energy
diff options
context:
space:
mode:
authortlatorre <tlatorre@uchicago.edu>2020-05-11 10:30:39 -0500
committertlatorre <tlatorre@uchicago.edu>2020-05-11 10:30:39 -0500
commit15fc972c89a4366a06755daeedaac52f91762ecd (patch)
tree9a5dbea7787cef9946473787e9a3996f24cd2898 /utils/plot-energy
parent651cbe5d261a6d29b4dec7c38b65c0eac5431363 (diff)
downloadsddm-15fc972c89a4366a06755daeedaac52f91762ecd.tar.gz
sddm-15fc972c89a4366a06755daeedaac52f91762ecd.tar.bz2
sddm-15fc972c89a4366a06755daeedaac52f91762ecd.zip
update utils/ folder to make a python package called sddm
This commit adds an sddm python package to the utils/ folder. This allows me to consolidate code used across all the various scripts. This package is now installed by default to /home/tlatorre/local/lib/python2.7/site-packages so you should add the following to your .bashrc file: export PYTHONPATH=$HOME/local/lib/python2.7/site-packages/:$PYTHONPATH before using the scripts installed to ~/local/bin.
Diffstat (limited to 'utils/plot-energy')
-rwxr-xr-xutils/plot-energy675
1 files changed, 43 insertions, 632 deletions
diff --git a/utils/plot-energy b/utils/plot-energy
index a057302..5c33969 100755
--- a/utils/plot-energy
+++ b/utils/plot-energy
@@ -37,61 +37,8 @@ from scipy.stats import iqr, norm, beta
from scipy.special import spence
from itertools import izip_longest
-PSUP_RADIUS = 840.0
-
-# from https://stackoverflow.com/questions/287871/how-to-print-colored-text-in-terminal-in-python
-class bcolors:
- HEADER = '\033[95m'
- OKBLUE = '\033[94m'
- OKGREEN = '\033[92m'
- WARNING = '\033[93m'
- FAIL = '\033[91m'
- ENDC = '\033[0m'
- BOLD = '\033[1m'
- UNDERLINE = '\033[4m'
-
-# on retina screens, the default plots are way too small
-# by using Qt5 and setting QT_AUTO_SCREEN_SCALE_FACTOR=1
-# Qt5 will scale everything using the dpi in ~/.Xresources
-import matplotlib
-matplotlib.use("Qt5Agg")
-
-font = {'family':'serif', 'serif': ['computer modern roman']}
-matplotlib.rc('font',**font)
-
-matplotlib.rc('text', usetex=True)
-
-SNOMAN_MASS = {
- 20: 0.511,
- 21: 0.511,
- 22: 105.658,
- 23: 105.658
-}
-
-AV_RADIUS = 600.0
-
-# Data cleaning bitmasks.
-DC_MUON = 0x1
-DC_JUNK = 0x2
-DC_CRATE_ISOTROPY = 0x4
-DC_QVNHIT = 0x8
-DC_NECK = 0x10
-DC_FLASHER = 0x20
-DC_ESUM = 0x40
-DC_OWL = 0x80
-DC_OWL_TRIGGER = 0x100
-DC_FTS = 0x200
-DC_ITC = 0x400
-DC_BREAKDOWN = 0x800
-
particle_id = {20: 'e', 22: r'\mu'}
-def grouper(iterable, n, fillvalue=None):
- "Collect data into fixed-length chunks or blocks"
- # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
- args = [iter(iterable)] * n
- return izip_longest(fillvalue=fillvalue, *args)
-
def plot_hist2(df, muons=False):
for id, df_id in sorted(df.groupby('id')):
if id == 20:
@@ -148,550 +95,14 @@ def plot_hist(df, muons=False):
if len(df):
plt.tight_layout()
-def chunks(l, n):
- """Yield successive n-sized chunks from l."""
- for i in range(0, len(l), n):
- yield l[i:i + n]
-
-def print_warning(msg):
- print(bcolors.FAIL + msg + bcolors.ENDC,file=sys.stderr)
-
-def unwrap(p, delta, axis=-1):
- """
- A modified version of np.unwrap() useful for unwrapping the 50 MHz clock.
- It unwraps discontinuities bigger than delta/2 by delta.
-
- Example:
-
- >>> a = np.arange(10) % 5
- >>> a
- array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
- >>> unwrap(a,5)
- array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
-
- In the case of the 50 MHz clock delta should be 0x7ffffffffff*20.0.
- """
- p = np.asarray(p)
- nd = p.ndim
- dd = np.diff(p, axis=axis)
- slice1 = [slice(None, None)]*nd # full slices
- slice1[axis] = slice(1, None)
- slice1 = tuple(slice1)
- ddmod = np.mod(dd + delta/2, delta) - delta/2
- np.copyto(ddmod, delta/2, where=(ddmod == -delta/2) & (dd > 0))
- ph_correct = ddmod - dd
- np.copyto(ph_correct, 0, where=abs(dd) < delta/2)
- up = np.array(p, copy=True, dtype='d')
- up[slice1] = p[slice1] + ph_correct.cumsum(axis)
- return up
-
-def unwrap_50_mhz_clock(gtr):
- """
- Unwrap an array with 50 MHz clock times. These times should all be in
- nanoseconds and come from the KEV_GTR variable in the EV bank.
-
- Note: We assume here that the events are already ordered contiguously by
- GTID, so you shouldn't pass an array with multiple runs!
- """
- return unwrap(gtr,0x7ffffffffff*20.0)
-
-def retrigger_cut(ev):
- """
- Cuts all retrigger events.
- """
- return ev[ev.dt > 500]
-
-def breakdown_follower_cut(ev):
- """
- Cuts all events within 1 second of breakdown events.
- """
- breakdowns = ev[ev.dc & DC_BREAKDOWN != 0]
- return ev[~np.any((ev.gtr.values > breakdowns.gtr.values[:,np.newaxis]) & \
- (ev.gtr.values < breakdowns.gtr.values[:,np.newaxis] + 1e9),axis=0)]
-
-def flasher_follower_cut(ev):
- """
- Cuts all events within 200 microseconds of flasher events.
- """
- flashers = ev[ev.dc & DC_FLASHER != 0]
- return ev[~np.any((ev.gtr.values > flashers.gtr.values[:,np.newaxis]) & \
- (ev.gtr.values < flashers.gtr.values[:,np.newaxis] + 200e3),axis=0)]
-
-def muon_follower_cut(ev):
- """
- Cuts all events 200 microseconds after a muon.
- """
- muons = ev[ev.dc & DC_MUON != 0]
- return ev[~np.any((ev.gtr.values > muons.gtr.values[:,np.newaxis]) & \
- (ev.gtr.values < muons.gtr.values[:,np.newaxis] + 200e3),axis=0)]
-
-def michel_cut(ev):
- """
- Looks for Michel electrons after muons.
- """
- prompt_plus_muons = ev[ev.prompt | ((ev.dc & DC_MUON) != 0)]
-
- # Michel electrons and neutrons can be any event which is not a prompt
- # event
- follower = ev[~ev.prompt]
-
- # require Michel events to pass more of the SNO data cleaning cuts
- michel = follower[follower.dc & (DC_JUNK | DC_CRATE_ISOTROPY | DC_QVNHIT | DC_FLASHER | DC_NECK | DC_ESUM | DC_OWL | DC_OWL_TRIGGER | DC_FTS) == 0]
-
- michel = michel[michel.nhit >= 100]
-
- # Accept events which had a muon more than 800 nanoseconds but less than 20
- # microseconds before them. The 800 nanoseconds cut comes from Richie's
- # thesis. He also mentions that the In Time Channel Spread Cut is very
- # effective at cutting electron events caused by muons, so I should
- # implement that.
- #
- # Note: We currently don't look across run boundaries. This should be a
- # *very* small effect, and the logic to do so would be very complicated
- # since I would have to deal with 50 MHz clock rollovers, etc.
- if prompt_plus_muons.size and michel.size:
- mask = (michel.gtr.values > prompt_plus_muons.gtr.values[:,np.newaxis] + 800) & \
- (michel.gtr.values < prompt_plus_muons.gtr.values[:,np.newaxis] + 20e3)
- michel = michel.iloc[np.any(mask,axis=0)]
- michel['muon_gtid'] = pd.Series(prompt_plus_muons['gtid'].iloc[np.argmax(mask[:,np.any(mask,axis=0)],axis=0)].values,
- index=michel.index.values,
- dtype=np.int32)
- return michel
- else:
- # Return an empty slice since we need it to have the same datatype as
- # the other dataframes
- michel = ev[:0]
- michel['muon_gtid'] = -1
- return michel
-
-def atmospheric_events(ev):
- """
- Tags atmospheric events which have a neutron follower.
- """
- prompt = ev[ev.prompt]
-
- # Michel electrons and neutrons can be any event which is not a prompt
- # event
- follower = ev[~ev.prompt]
-
- ev['atm'] = np.zeros(len(ev),dtype=np.bool)
-
- if prompt.size and follower.size:
- # neutron followers have to obey stricter set of data cleaning cuts
- neutron = follower[follower.dc & (DC_JUNK | DC_CRATE_ISOTROPY | DC_QVNHIT | DC_FLASHER | DC_NECK | DC_ESUM | DC_OWL | DC_OWL_TRIGGER | DC_FTS) == 0]
- neutron = neutron[~np.isnan(neutron.ftp_x) & ~np.isnan(neutron.rsp_energy)]
- # FIXME: What should the radius cut be here? AV? (r/r_psup)^3 < 0.9?
- neutron = neutron[neutron.ftp_r < AV_RADIUS]
- neutron = neutron[neutron.rsp_energy > 4.0]
-
- # neutron events accepted after 20 microseconds and before 250 ms (50 ms during salt)
- ev.loc[ev.prompt,'atm'] = np.any((neutron.gtr.values > prompt.gtr.values[:,np.newaxis] + 20e3) & \
- (neutron.gtr.values < prompt.gtr.values[:,np.newaxis] + 250e6),axis=1)
-
- return ev
-
-def gtid_sort(ev, first_gtid):
- """
- Adds 0x1000000 to the gtid_sort column for all gtids before the first gtid
- in a run, which should be passed as a dictionary. This column can then be
- used to sort the events sequentially.
-
- This function should be passed to ev.groupby('run').apply(). We use this
- idiom instead of just looping over the groupby results since groupby()
- makes a copy of the dataframe, i.e.
-
- for run, ev_run in ev.groupby('run'):
- ev_run.loc[ev_run.gtid < first_gtid[run],'gtid_sort'] += 0x1000000
-
- would produce a SettingWithCopyWarning, so instead we use:
-
- ev = ev.groupby('run',as_index=False).apply(gtid_sort,first_gtid=first_gtid)
-
- which doesn't have this problem.
- """
- # see https://stackoverflow.com/questions/32460593/including-the-group-name-in-the-apply-function-pandas-python
- run = ev.name
-
- if run not in first_gtid:
- print_warning("No RHDR bank for run %i! Assuming first event is the first GTID." % run)
- first_gtid[run] = ev.gtid.iloc[0]
-
- ev.loc[ev.gtid < first_gtid[run],'gtid_sort'] += 0x1000000
-
- return ev
-
-def prompt_event(ev):
- ev['prompt'] = (ev.nhit >= 100)
- ev.loc[ev.prompt,'prompt'] &= np.concatenate(([True],np.diff(ev[ev.prompt].gtr.values) > 250e6))
- return ev
-
-# Taken from https://raw.githubusercontent.com/mwaskom/seaborn/c73055b2a9d9830c6fbbace07127c370389d04dd/seaborn/utils.py
-def despine(fig=None, ax=None, top=True, right=True, left=False,
- bottom=False, offset=None, trim=False):
- """Remove the top and right spines from plot(s).
-
- fig : matplotlib figure, optional
- Figure to despine all axes of, default uses current figure.
- ax : matplotlib axes, optional
- Specific axes object to despine.
- top, right, left, bottom : boolean, optional
- If True, remove that spine.
- offset : int or dict, optional
- Absolute distance, in points, spines should be moved away
- from the axes (negative values move spines inward). A single value
- applies to all spines; a dict can be used to set offset values per
- side.
- trim : bool, optional
- If True, limit spines to the smallest and largest major tick
- on each non-despined axis.
-
- Returns
- -------
- None
-
- """
- # Get references to the axes we want
- if fig is None and ax is None:
- axes = plt.gcf().axes
- elif fig is not None:
- axes = fig.axes
- elif ax is not None:
- axes = [ax]
-
- for ax_i in axes:
- for side in ["top", "right", "left", "bottom"]:
- # Toggle the spine objects
- is_visible = not locals()[side]
- ax_i.spines[side].set_visible(is_visible)
- if offset is not None and is_visible:
- try:
- val = offset.get(side, 0)
- except AttributeError:
- val = offset
- _set_spine_position(ax_i.spines[side], ('outward', val))
-
- # Potentially move the ticks
- if left and not right:
- maj_on = any(
- t.tick1line.get_visible()
- for t in ax_i.yaxis.majorTicks
- )
- min_on = any(
- t.tick1line.get_visible()
- for t in ax_i.yaxis.minorTicks
- )
- ax_i.yaxis.set_ticks_position("right")
- for t in ax_i.yaxis.majorTicks:
- t.tick2line.set_visible(maj_on)
- for t in ax_i.yaxis.minorTicks:
- t.tick2line.set_visible(min_on)
-
- if bottom and not top:
- maj_on = any(
- t.tick1line.get_visible()
- for t in ax_i.xaxis.majorTicks
- )
- min_on = any(
- t.tick1line.get_visible()
- for t in ax_i.xaxis.minorTicks
- )
- ax_i.xaxis.set_ticks_position("top")
- for t in ax_i.xaxis.majorTicks:
- t.tick2line.set_visible(maj_on)
- for t in ax_i.xaxis.minorTicks:
- t.tick2line.set_visible(min_on)
-
- if trim:
- # clip off the parts of the spines that extend past major ticks
- xticks = ax_i.get_xticks()
- if xticks.size:
- firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
- xticks)[0]
- lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
- xticks)[-1]
- ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
- ax_i.spines['top'].set_bounds(firsttick, lasttick)
- newticks = xticks.compress(xticks <= lasttick)
- newticks = newticks.compress(newticks >= firsttick)
- ax_i.set_xticks(newticks)
-
- yticks = ax_i.get_yticks()
- if yticks.size:
- firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
- yticks)[0]
- lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
- yticks)[-1]
- ax_i.spines['left'].set_bounds(firsttick, lasttick)
- ax_i.spines['right'].set_bounds(firsttick, lasttick)
- newticks = yticks.compress(yticks <= lasttick)
- newticks = newticks.compress(newticks >= firsttick)
- ax_i.set_yticks(newticks)
-
-def plot_corner_plot(ev, title, save=None):
- variables = ['r_psup','psi','z','udotr']
- labels = [r'$(r/r_\mathrm{PSUP})^3$',r'$\psi$','z',r'$\vec{u}\cdot\vec{r}$']
- limits = [(0,1),(0,10),(-840,840),(-1,1)]
- cuts = [0.9,6,0,-0.5]
-
- ev = ev.dropna(subset=variables)
-
- fig = plt.figure(figsize=(FIGSIZE[0],FIGSIZE[0]))
- despine(fig,trim=True)
- for i in range(len(variables)):
- for j in range(len(variables)):
- if j > i:
- continue
- ax = plt.subplot(len(variables),len(variables),i*len(variables)+j+1)
- if i == j:
- plt.hist(ev[variables[i]],bins=np.linspace(limits[i][0],limits[i][1],100),histtype='step')
- plt.gca().set_xlim(limits[i])
- else:
- plt.scatter(ev[variables[j]],ev[variables[i]],s=0.5)
- plt.gca().set_xlim(limits[j])
- plt.gca().set_ylim(limits[i])
- n = len(ev)
- if n:
- p_i_lo = np.count_nonzero(ev[variables[i]] < cuts[i])/n
- p_j_lo = np.count_nonzero(ev[variables[j]] < cuts[j])/n
- p_lolo = p_i_lo*p_j_lo
- p_lohi = p_i_lo*(1-p_j_lo)
- p_hilo = (1-p_i_lo)*p_j_lo
- p_hihi = (1-p_i_lo)*(1-p_j_lo)
- n_lolo = np.count_nonzero((ev[variables[i]] < cuts[i]) & (ev[variables[j]] < cuts[j]))
- n_lohi = np.count_nonzero((ev[variables[i]] < cuts[i]) & (ev[variables[j]] >= cuts[j]))
- n_hilo = np.count_nonzero((ev[variables[i]] >= cuts[i]) & (ev[variables[j]] < cuts[j]))
- n_hihi = np.count_nonzero((ev[variables[i]] >= cuts[i]) & (ev[variables[j]] >= cuts[j]))
- observed = np.array([n_lolo,n_lohi,n_hilo,n_hihi])
- expected = n*np.array([p_lolo,p_lohi,p_hilo,p_hihi])
- psi = -poisson.logpmf(observed,expected).sum() + poisson.logpmf(observed,observed).sum()
- psi /= np.std(-poisson.logpmf(np.random.poisson(observed,size=(10000,4)),observed).sum(axis=1) + poisson.logpmf(observed,observed).sum())
- plt.title(r"$\psi = %.1f$" % psi)
- if i == len(variables) - 1:
- plt.xlabel(labels[j])
- else:
- plt.setp(ax.get_xticklabels(),visible=False)
- if j == 0:
- plt.ylabel(labels[i])
- else:
- plt.setp(ax.get_yticklabels(),visible=False)
- plt.axvline(cuts[j],color='k',ls='--',alpha=0.5)
- if i != j:
- plt.axhline(cuts[i],color='k',ls='--',alpha=0.5)
-
- plt.tight_layout()
-
- if save:
- plt.savefig(save + ".pdf")
- plt.savefig(save + ".eps")
-
- plt.suptitle(title)
-
-def intersect_sphere(pos, dir, R):
- """
- Compute the first intersection of a ray starting at `pos` with direction
- `dir` and a sphere centered at the origin with radius `R`. The distance to
- the intersection is returned.
-
- Example:
-
- pos = np.array([0,0,0])
- dir = np.array([1,0,0])
-
- l = intersect_sphere(pos,dir,PSUP_RADIUS):
- if l is not None:
- hit = pos + l*dir
- print("ray intersects sphere at %.2f %.2f %.2f", hit[0], hit[1], hit[2])
- else:
- print("ray didn't intersect sphere")
- """
-
- b = 2*np.dot(dir,pos)
- c = np.dot(pos,pos) - R*R
-
- if b*b - 4*c <= 0:
- # Ray doesn't intersect the sphere.
- return None
-
- # First, check the shorter solution.
- l = (-b - np.sqrt(b*b - 4*c))/2
-
- # If the shorter solution is less than 0, check the second solution.
- if l < 0:
- l = (-b + np.sqrt(b*b - 4*c))/2
-
- # If the distance is still negative, we didn't intersect the sphere.
- if l < 0:
- return None
-
- return l
-
-def get_dx(row):
- pos = np.array([row.x,row.y,row.z])
- dir = np.array([np.sin(row.theta1)*np.cos(row.phi1),
- np.sin(row.theta1)*np.sin(row.phi1),
- np.cos(row.theta1)])
- l = intersect_sphere(pos,-dir,PSUP_RADIUS)
- if l is not None:
- pos -= dir*l
- michel_pos = np.array([row.x_michel,row.y_michel,row.z_michel])
- return np.linalg.norm(michel_pos-pos)
- else:
- return 0
-
-def dx_to_energy(dx):
- lines = []
- with open("../src/muE_water_liquid.txt") as f:
- for i, line in enumerate(f):
- if i < 10:
- continue
- if 'Minimum ionization' in line:
- continue
- if 'Muon critical energy' in line:
- continue
- lines.append(line)
- data = np.genfromtxt(lines)
- return np.interp(dx,data[:,8],data[:,0])
-
-def iqr_std_err(x):
- """
- Returns the approximate standard deviation assuming the central part of the
- distribution is gaussian.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- # see https://stats.stackexchange.com/questions/110902/error-on-interquartile-range
- std = iqr(x.values)/1.3489795
- return 1.573*std/np.sqrt(n)
-
-def iqr_std(x):
- """
- Returns the approximate standard deviation assuming the central part of the
- distribution is gaussian.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- return iqr(x.values)/1.3489795
-
-def quantile_error(x,q):
- """
- Returns the standard error for the qth quantile of `x`. The error is
- computed using the Maritz-Jarrett method described here:
- https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/quantse.htm.
- """
- x = np.sort(x)
- n = len(x)
- m = int(q*n+0.5)
- A = m - 1
- B = n - m
- i = np.arange(1,len(x)+1)
- w = beta.cdf(i/n,A,B) - beta.cdf((i-1)/n,A,B)
- return np.sqrt(np.sum(w*x**2)-np.sum(w*x)**2)
-
-def q90_err(x):
- """
- Returns the error on the 90th percentile for all the non NaN values in a
- Series `x`.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- return quantile_error(x.values,0.9)
-
-def q90(x):
- """
- Returns the 90th percentile for all the non NaN values in a Series `x`.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- return np.percentile(x.values,90.0)
-
-def median(x):
- """
- Returns the median for all the non NaN values in a Series `x`.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- return np.median(x.values)
-
-def median_err(x):
- """
- Returns the approximate error on the median for all the non NaN values in a
- Series `x`. The error on the median is approximated assuming the central
- part of the distribution is gaussian.
- """
- x = x.dropna()
- n = len(x)
- if n == 0:
- return np.nan
- # First we estimate the standard deviation using the interquartile range.
- # Here we are essentially assuming the central part of the distribution is
- # gaussian.
- std = iqr(x.values)/1.3489795
- median = np.median(x.values)
- # Now we estimate the error on the median for a gaussian
- # See https://stats.stackexchange.com/questions/45124/central-limit-theorem-for-sample-medians.
- return 1/(2*np.sqrt(n)*norm.pdf(median,median,std))
-
-def std_err(x):
- x = x.dropna()
- mean = np.mean(x)
- std = np.std(x)
- n = len(x)
- if n == 0:
- return np.nan
- elif n == 1:
- return 0.0
- u4 = np.mean((x-mean)**4)
- error = np.sqrt((u4-(n-3)*std**4/(n-1))/n)/(2*std)
- return error
-
-# Fermi constant
-GF = 1.16637887e-5 # 1/MeV^2
-ELECTRON_MASS = 0.5109989461 # MeV
-MUON_MASS = 105.6583745 # MeV
-PROTON_MASS = 938.272081 # MeV
-FINE_STRUCTURE_CONSTANT = 7.297352566417e-3
-
-def f(x):
- y = (5/(3*x**2) + 16*x/3 + 4/x + (12-8*x)*np.log(1/x-1) - 8)*np.log(MUON_MASS/ELECTRON_MASS)
- y += (6-4*x)*(2*spence(x) - 2*np.log(x)**2 + np.log(x) + np.log(1-x)*(3*np.log(x)-1/x-1) - np.pi**2/3-2)
- y += (1-x)*(34*x**2+(5-34*x**2+17*x)*np.log(x) - 22*x)/(3*x**2)
- y += 6*(1-x)*np.log(x)
- return y
-
-def michel_spectrum(T):
- """
- Michel electron energy spectrum for a free muon. `T` should be the kinetic
- energy of the electron or positron in MeV.
-
- Note: The result is not normalized.
-
- From https://arxiv.org/abs/1406.3575.
- """
- E = T + ELECTRON_MASS
- x = 2*E/MUON_MASS
- mask = (x > 0) & (x < 1)
- y = np.zeros_like(x,dtype=np.double)
- y[mask] = GF**2*MUON_MASS**5*x[mask]**2*(6-4*x[mask]+FINE_STRUCTURE_CONSTANT*f(x[mask])/np.pi)/(192*np.pi**3)
- y *= 2*MUON_MASS
- return y
-
if __name__ == '__main__':
import argparse
- import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
import h5py
+ from sddm.plot_energy import *
+ from sddm.plot import despine
parser = argparse.ArgumentParser("plot fit results")
parser.add_argument("filenames", nargs='+', help="input files")
@@ -699,6 +110,47 @@ if __name__ == '__main__':
parser.add_argument("--save", action='store_true', default=False, help="save corner plots for backgrounds")
args = parser.parse_args()
+ if args.save:
+ # default \textwidth for a fullpage article in Latex is 16.50764 cm.
+ # You can figure this out by compiling the following TeX document:
+ #
+ # \documentclass{article}
+ # \usepackage{fullpage}
+ # \usepackage{layouts}
+ # \begin{document}
+ # textwidth in cm: \printinunitsof{cm}\printlen{\textwidth}
+ # \end{document}
+
+ width = 16.50764
+ width /= 2.54 # cm -> inches
+ # According to this page:
+ # http://www-personal.umich.edu/~jpboyd/eng403_chap2_tuftegospel.pdf,
+ # Tufte suggests an aspect ratio of 1.5 - 1.6.
+ height = width/1.5
+ FIGSIZE = (width,height)
+
+ import matplotlib.pyplot as plt
+
+ font = {'family':'serif', 'serif': ['computer modern roman']}
+ plt.rc('font',**font)
+
+ plt.rc('text', usetex=True)
+ else:
+ # on retina screens, the default plots are way too small
+ # by using Qt5 and setting QT_AUTO_SCREEN_SCALE_FACTOR=1
+ # Qt5 will scale everything using the dpi in ~/.Xresources
+ import matplotlib
+ matplotlib.use("Qt5Agg")
+
+ import matplotlib.pyplot as plt
+
+ # Default figure size. Currently set to my monitor width and height so that
+ # things are properly formatted
+ FIGSIZE = (13.78,7.48)
+
+ # Make the defalt font bigger
+ plt.rc('font', size=22)
+
ev = pd.concat([pd.read_hdf(filename, "ev") for filename in args.filenames],ignore_index=True)
fits = pd.concat([pd.read_hdf(filename, "fits") for filename in args.filenames],ignore_index=True)
rhdr = pd.concat([pd.read_hdf(filename, "rhdr") for filename in args.filenames],ignore_index=True)
@@ -834,47 +286,6 @@ if __name__ == '__main__':
# retrigger cut
ev = ev.groupby('run',group_keys=False).apply(retrigger_cut)
- if args.save:
- # default \textwidth for a fullpage article in Latex is 16.50764 cm.
- # You can figure this out by compiling the following TeX document:
- #
- # \documentclass{article}
- # \usepackage{fullpage}
- # \usepackage{layouts}
- # \begin{document}
- # textwidth in cm: \printinunitsof{cm}\prntlen{\textwidth}
- # \end{document}
-
- width = 16.50764
- width /= 2.54 # cm -> inches
- # According to this page:
- # http://www-personal.umich.edu/~jpboyd/eng403_chap2_tuftegospel.pdf,
- # Tufte suggests an aspect ratio of 1.5 - 1.6.
- height = width/1.5
- FIGSIZE = (width,height)
-
- import matplotlib.pyplot as plt
-
- font = {'family':'serif', 'serif': ['computer modern roman']}
- plt.rc('font',**font)
-
- plt.rc('text', usetex=True)
- else:
- # on retina screens, the default plots are way too small
- # by using Qt5 and setting QT_AUTO_SCREEN_SCALE_FACTOR=1
- # Qt5 will scale everything using the dpi in ~/.Xresources
- import matplotlib
- matplotlib.use("Qt5Agg")
-
- import matplotlib.pyplot as plt
-
- # Default figure size. Currently set to my monitor width and height so that
- # things are properly formatted
- FIGSIZE = (13.78,7.48)
-
- # Make the defalt font bigger
- plt.rc('font', size=22)
-
if args.dc:
ev = ev[ev.prompt]