#!/usr/bin/env python # Copyright (c) 2019, Anthony Latorre # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see . from __future__ import print_function, division import yaml try: from yaml import CLoader as Loader except ImportError: from yaml.loader import SafeLoader as Loader import string from os.path import split, join, abspath import uuid from subprocess import check_call import os import sys # Next two functions are a backport of the shutil.which() function from Python # 3.3 from Lib/shutil.py in the CPython code See # https://github.com/python/cpython/blob/master/Lib/shutil.py. # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", None) if path is None: try: path = os.confstr("CS_PATH") except (AttributeError, ValueError): # os.confstr() or CS_PATH is not available path = os.defpath # bpo-35755: Don't use os.defpath if the PATH environment variable is # set to an empty string # PATH='' doesn't match, whereas PATH=':' looks in the current directory if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. curdir = os.curdir if curdir not in path: path.insert(0, curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None CONDOR_TEMPLATE = \ """ # We need the job to run our executable script, with the # input.txt filename as an argument, and to transfer the # relevant input and output files: executable = @executable arguments = @args transfer_input_files = @transfer_input_files transfer_output_files = @transfer_output_files error = @error output = @output log = @log # The below are good base requirements for first testing jobs on OSG, # if you don't have a good idea of memory and disk usage. requirements = (HAS_MODULES =?= true) && (OSGVO_OS_STRING == "RHEL 7") && (OpSys == "LINUX") request_cpus = 1 request_memory = 1 GB request_disk = 1 GB # Queue one job with the above specifications. queue 1 +ProjectName = "SNOplus" """.strip() # all files required to run the fitter (except the DQXX files) INPUT_FILES = ["muE_water_liquid.txt","pmt_response_qoca_d2o_20060216.dat","rsp_rayleigh.dat","e_water_liquid.txt","pmt_pcath_response.dat","pmt.txt","muE_deuterium_oxide_liquid.txt","pmt_response.dat","proton_water_liquid.txt"] # generate a UUID to append to all the filenames so that if we run the same job # twice we don't overwrite the first job ID = uuid.uuid1() class MyTemplate(string.Template): delimiter = '@' def splitext(path): """ Like os.path.splitext() except it returns the full extension if the filename has multiple extensions, for example: splitext('foo.tar.gz') -> 'foo', '.tar.gz' """ full_root, full_ext = os.path.splitext(path) while True: root, ext = os.path.splitext(full_root) if ext: full_ext = ext + full_ext full_root = root else: break return full_root, full_ext def submit_job(filename, run, gtid, dir, dqxx_dir, min_nhit, max_particles, particle_combo=None): print("submitting job for %s gtid %i" % (filename, gtid)) head, tail = split(filename) root, ext = splitext(tail) # all output files are prefixed with FILENAME_GTID_UUID if particle_combo: prefix = "%s_%08i_%i_%s" % (root,gtid,particle_combo,ID.hex) else: prefix = "%s_%08i_%s" % (root,gtid,ID.hex) # fit output filename output = "%s.hdf5" % prefix # condor submit filename condor_submit = "%s.submit" % prefix # set up the arguments for the template executable = which("fit") wrapper = which("fit-wrapper") if executable is None: print("couldn't find fit in path!",file=sys.stderr) sys.exit(1) if wrapper is None: print("couldn't find fit-wrapper in path!",file=sys.stderr) sys.exit(1) args = [tail,"-o",output,"--gtid",gtid,"--min-nhit",min_nhit,"--max-particles",max_particles] if particle_combo: args += ["-p",particle_combo] transfer_input_files = ",".join([executable,filename,join(dqxx_dir,"DQXX_%010i.dat" % run)] + [join(dir,filename) for filename in INPUT_FILES]) transfer_output_files = ",".join([output]) condor_error = "%s.error" % prefix condor_output = "%s.output" % prefix condor_log = "%s.log" % prefix template = MyTemplate(CONDOR_TEMPLATE) submit_string = template.safe_substitute( executable=wrapper, args=" ".join(map(str,args)), transfer_input_files=transfer_input_files, transfer_output_files=transfer_output_files, error=condor_error, output=condor_output, log=condor_log) # write out the formatted template with open(condor_submit, "w") as f: f.write(submit_string) # submit the job check_call(["condor_submit",condor_submit]) def array_to_particle_combo(combo): particle_combo = 0 for i, id in enumerate(combo[::-1]): particle_combo += id*100**i return particle_combo if __name__ == '__main__': import argparse from subprocess import check_call import os import tempfile import h5py from itertools import combinations_with_replacement parser = argparse.ArgumentParser("submit grid jobs") parser.add_argument("filenames", nargs='+', help="input files") parser.add_argument("--min-nhit", type=int, help="minimum nhit to fit an event", default=100) parser.add_argument("--max-particles", type=int, help="maximum number of particles to fit for", default=3) parser.add_argument("--skip-second-event", action='store_true', help="only fit the first event after a MAST bank", default=False) parser.add_argument("--state", type=str, help="state file", default=None) args = parser.parse_args() if args.state is None: home = os.path.expanduser("~") args.state = join(home,'state.txt') if 'SDDM_DATA' not in os.environ: print("Please set the SDDM_DATA environment variable to point to the fitter source code location", file=sys.stderr) sys.exit(1) dir = os.environ['SDDM_DATA'] if 'DQXX_DIR' not in os.environ: print("Please set the DQXX_DIR environment variable to point to the directory with the DQXX files", file=sys.stderr) sys.exit(1) dqxx_dir = os.environ['DQXX_DIR'] args.state = abspath(args.state) # get the current working directory home_dir = os.getcwd() # get absolute paths since we are going to create a new directory dir = abspath(dir) dqxx_dir = abspath(dqxx_dir) zdab_cat = which("zdab-cat") if zdab_cat is None: print("couldn't find zdab-cat in path!",file=sys.stderr) sys.exit(1) for filename in args.filenames: filename = abspath(filename) head, tail = split(filename) root, ext = splitext(tail) try: with open(args.state) as f: state = yaml.load(f.read(),Loader=Loader) except IOError: state = None if state is None: state = [] if tail in state: print("skipping %s" % filename) continue with open(os.devnull, 'w') as f: # Create a temporary file to store the events. Docs recommended # this method instead of mkstemp(), but I think they're the same. output = tempfile.NamedTemporaryFile(suffix='.hdf5',delete=False) output.close() if args.skip_second_event: check_call([zdab_cat,"--skip-second-event",filename,"-o",output.name],stderr=f) else: check_call([zdab_cat,filename,"-o",output.name],stderr=f) new_dir = "%s_%s" % (root,ID.hex) os.mkdir(new_dir) os.chdir(new_dir) with h5py.File(output.name) as f: for ev in f['ev']: if ev['nhit'] >= args.min_nhit: for i in range(1,args.max_particles+1): for particle_combo in map(array_to_particle_combo,combinations_with_replacement([20,22],i)): submit_job(filename, ev['run'], ev['gtid'], dir, dqxx_dir, args.min_nhit, args.max_particles, particle_combo) # Delete temporary HDF5 file os.unlink(output.name) state.append(tail) with open(args.state,"w") as f: f.write(yaml.dump(state,default_flow_style=False)) os.chdir(home_dir) ='n257' href='#n257'>257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
#!/usr/bin/env python
# Copyright (c) 2019, Anthony Latorre <tlatorre at uchicago>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
"""
Script to reprocess ZDAB files from SNO and write out orphans. Copied mostly
from zdab-reprocess, but I changed the command file template. To reprocess a
single file:

    $ ./zdab-reprocess-orphans FILENAME

and to batch reprocess:

    $ ./zdab-reprocessi-orphans FILENAME FILENAME ...

By default, the reprocessed files will be stored in the current working
directory with _reprocessed appended to the name, but this can be changed by
passing a different suffix on the command line. For example:

    $ ./zdab-reprocess-orphans --suffix [suffix] SNOCR_00000100004_000_p2.xzdab

will produce a file named SNOCR_00000100004_000_p2_suffix.xzdab.
"""

from __future__ import print_function, division
import string
import os
import re

def GetSettingsFile(run):
    """
    Returns the settings file for a given run number.

    Taken from GetSettingsFile() in autosno/lib/AutoSNO_Module.pm.
    """
    if   run < 19999: return "load_d2o_settings.cmd"
    elif run < 33907: return "load_salt_settings.cmd"
    elif run < 39999: return "load_d2o_2_settings.cmd"
    elif run < 67900: return "load_ncd_settings.cmd"
    else:             return "load_h2o_settings.cmd"

# from autosno/run_lists/RunList_clock_force_no_fix.dat
RUN_LIST_CLOCK_FORCE_NO_FIX = [
    11371,
    11377,
    11399,
    11436,
    11575,
    11681,
    11701,
    11911,
    11976,
    12187,
    12233,
    13401,
    13423,
    13431,
    13874,
    14264,
    51456,
]

def get_clock_force_no_fix_string(run):
    if run in RUN_LIST_CLOCK_FORCE_NO_FIX:
        return "set bank TUPK 1 word 3 to -1"
    else:
        return ""

# This template comes from autosno/prod/Reconstruct.pm. I also added the lines
# which autosno adds in Run() of the Reconstruct module.
SNOMAN_TEMPLATE = \
"""
* This file was automatically created by zdab-reprocess.
*
* Note: This command file was created by attempting to "reverse engineer" how
*       autosno fits neutrino runs.
*
* Author: Anthony LaTorre
* Created on August 26, 2019.
file inp 1 @input
file out 1 @output

titles @dqxx
titles @anxx_pca
titles @anxx_neutrino

@@@settings_file

$flt_set_prescale @prescale_setting
$define_test 1 $line_1 'nhits float_equals EV+$KEV_NPM:@lower_nhit @upper_nhit;'
$starting_seed @seed
$starting_seed_2 @seed2

@clock_force_no_fix

* We want to process the entire file.
$num_events 0

* Titles and database checks.
set bank TMTT 1 word 0 to 1

* HCA Control.
$hca_mode $perform_correction
$hca_correction $new_correction
$hca_consecutive $yes

* QRC Control.
* We need the cross talk cut.
$apply_xtalk_cut

*Settings for muon fitters.
* Set the charge threshold for multi photon timing correction
$set_mpca_threshold 1.5
titles mpca_data.dat
* Turn on the channel dependent gain calibration
set bank TCAL 1 word 4 to 54

*Load charge probability titles file
titles charge_photoelectrons.dat

* Low nhit prescale
$flt_set_prescale_2 0.0

* Primary NHIT Cut.
$enable_test 1
$define_test 1 $line_1 'nhits float_equals EV+$KEV_NPM:18.5 9999.;'

* Primary Fitter NHIT Cut.
$enable_test 3
$define_test 3 $line_1 'nhits float_equals EV+$KEV_NPM:9.5 999.;'

* NCD Event Tag.
$enable_test 4
$define_test 4 $line_1 'ncd float_equals EV+$KEV_NCD_STATUS:0.5 3.5;'

* Muon Candidate Tag.
$enable_test 5
$define_test 5 $line_1 'nhits float_equals EV+$KEV_NPM:249.5 9999.;'

* A do nothing filter that allows autosno to filter maintenece events.
$enable_test 9
$define_test 9 $line_1 'one equals 1.0;'

define event_loop
   call INP
   call UPK
   call CAL
   call FLT($PERM_BANK_CUT)
   if_not_ok goto WRITE_OUT

EVENT_REJECT:
   call FLT(9)            * User defined cut. Default passes all.
   if_not_ok quit_event
   call FLT($JUNK_CUT)    * Junk. We still want to keep NCD events.
   if_not_ok goto WRITE_OUT
   quit_event

WRITE_OUT:
   call PRU
   call OUT
   quit_event
end_def

*Pruning Control
$prune_pix $drop
$prune_px $drop
$prune_pf $drop
$prune_nesg $drop
$prune_nes $drop
$prune_nemg $drop

* Run with the Database
@@run_snodb

!endfile member=fit_neutrino
""".strip()

class MyTemplate(string.Template):
    delimiter = '@'

def splitext(path):
    """
    Like os.path.splitext() except it returns the full extension if the
    filename has multiple extensions, for example:

        splitext('foo.tar.gz') -> 'foo', '.tar.gz'
    """
    full_root, full_ext = os.path.splitext(path)
    while True:
        root, ext = os.path.splitext(full_root)
        if ext:
            full_ext = ext + full_ext
            full_root = root
        else:
            break

    return full_root, full_ext

def GetRunRange(run, num=1000):
    """
    Returns the run range string for ANXX neutrino files.

    Example:

        GetRunRange(10000,1000) -> "10000-10999"

    Comes from GetRunRange() in autosno/lib/FileUtil.pm.
    """
    base_run = int(run//num)*num
    return "%i-%i" % (base_run, base_run + num - 1)

def get_anxx_neutrino_filename(run, dir):
    """
    Returns the ANXX neutrino file with the highest pass number for a given
    run. The files should all be located in dir.

    FIXME: Is it OK to just get the file with the highest pass number?
    Technically this info should come from autosno, but it's not clear exactly
    how it's getting the pass number in the function GetPassNum() in
    AutoSNO_Module.pm.
    """
    filename_re = "anxx_nu_(\d+)_p(\d+).dat"
    p = re.compile(filename_re)

    filenames = []
    for filename in os.listdir(dir):
        match = p.match(filename)

        if int(match.group(1)) != run:
            continue

        filenames.append((match.group(2),filename))

    return sorted(filenames)[-1][1]

def get_anxx_pca_next(run,dir):
    """
    Returns the next ANXX PCA file after `run`.

    See Get_ANxx_PCA_Next() in autosno/lib/GetTitles.pm.
    """
    filename_re = "anxx_pca_(\d+)_p(\d+).dat"
    p = re.compile(filename_re)

    filenames = []
    for filename in os.listdir(dir):
        match = p.match(filename)

        if match is None:
            continue

        if int(match.group(1)) < run:
            continue

        filenames.append((match.group(1),match.group(2),filename))

    return sorted(filenames)[-1][2]


if __name__ == '__main__':
    import argparse
    import sys
    import os
    from subprocess import check_call

    parser = argparse.ArgumentParser("reprocess zdabs")
    parser.add_argument("filenames", nargs="+", help="filenames of zdabs")
    parser.add_argument("--dir", default=None, help="directory to store reprocessed zdab files")
    parser.add_argument("--lower-nhit", type=float, default=18.5, help="lower value for primary nhit cut")
    parser.add_argument("--upper-nhit", type=float, default=9999, help="upper value for primary nhit cut")
    parser.add_argument("--suffix", default="orphans", help="suffix appended to output filename")
    args = parser.parse_args()

    filename_re = "(?:SNOCR_|SNO)(\d+)_\w+.x?zdab"

    p = re.compile(filename_re)

    template = MyTemplate(SNOMAN_TEMPLATE)

    for filename in args.filenames:
        head, tail = os.path.split(filename)
        match = p.match(tail)

        if not match:
            print("Unable to parse filename '%s'" % tail)
            sys.exit(1)

        try:
            run = int(match.group(1))
        except Exception as e:
            print("Unable to convert run number to int: %s" % str(e))
            sys.exit(1)

        root, ext = splitext(tail)

        output = "%s_%s%s" % (root,args.suffix,ext)

        if args.dir:
            output = os.path.join(args.dir,output)

        cmd_filename = "%s.cmd" % root

        dqxx_filename = "/sno/mcprod/dqxx/DQXX_%010i.dat" % run
        anxx_neutrino_directory = "/sno/mcprod/anxx/titles/neutrino/%s" % GetRunRange(run)
        anxx_neutrino_filename = get_anxx_neutrino_filename(run, anxx_neutrino_directory)
        anxx_neutrino_filename = os.path.join(anxx_neutrino_directory,anxx_neutrino_filename)
        anxx_pca_directory = "/sno/mcprod/anxx/titles/pca"
        anxx_pca_filename = get_anxx_pca_next(run, anxx_pca_directory)
        anxx_pca_filename = os.path.join(anxx_pca_directory,anxx_pca_filename)

        with open(cmd_filename, "w") as f:
            f.write(template.safe_substitute(input=filename,
                                             output=output,
                                             dqxx=dqxx_filename,
                                             anxx_neutrino=anxx_neutrino_filename,
                                             anxx_pca=anxx_pca_filename,
                                             settings_file=GetSettingsFile(run),
                                             prescale_setting=1.0, # no idea what this is
                                             lower_nhit = args.lower_nhit,
                                             upper_nhit = args.upper_nhit,
                                             seed=0,
                                             seed2=0,
                                             clock_force_no_fix=get_clock_force_no_fix_string(run)))

        check_call(["snoman.exe","-c",cmd_filename])