1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
|
#!/usr/bin/env python
# Copyright (c) 2019, Anthony Latorre <tlatorre at uchicago>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import print_function, division
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml.loader import SafeLoader as Loader
import string
from os.path import split, splitext, join, abspath
import uuid
from subprocess import check_call
CONDOR_TEMPLATE = \
"""
# We need the job to run our executable script, with the
# input.txt filename as an argument, and to transfer the
# relevant input and output files:
executable = @executable
arguments = @args
transfer_input_files = @transfer_input_files
transfer_output_files = @transfer_output_files
error = @error
output = @output
log = @log
# The below are good base requirements for first testing jobs on OSG,
# if you don't have a good idea of memory and disk usage.
requirements = (OSGVO_OS_STRING == "RHEL 7") && (OpSys == "LINUX")
request_cpus = 1
request_memory = 1 GB
request_disk = 1 GB
# Queue one job with the above specifications.
queue 1
+ProjectName = "SNOplus"
""".strip()
# all files required to run the fitter (except the DQXX files)
INPUT_FILES = ["muE_water_liquid.txt","pmt_response_qoca_d2o_20060216.dat","rsp_rayleigh.dat","e_water_liquid.txt","pmt_pcath_response.dat","pmt.txt","muE_deuterium_oxide_liquid.txt","pmt_response.dat","proton_water_liquid.txt"]
# generate a UUID to append to all the filenames so that if we run the same job
# twice we don't overwrite the first job
ID = uuid.uuid1()
class MyTemplate(string.Template):
delimiter = '@'
def submit_job(filename, run, gtid, dir, dqxx_dir, min_nhit, max_particles):
print("submitting job for %s gtid %i" % (filename, gtid))
head, tail = split(filename)
root, ext = splitext(tail)
# all output files are prefixed with FILENAME_GTID_UUID
prefix = "%s_%08i_%s" % (root,gtid,ID.hex)
# fit output filename
output = "%s.txt" % prefix
# condor submit filename
condor_submit = "%s.submit" % prefix
# set up the arguments for the template
executable = join(dir,"fit")
args = [tail,"-o",output,"--gtid",gtid,"--min-nhit",min_nhit,"--max-particles",max_particles]
transfer_input_files = ",".join([filename,join(dqxx_dir,"DQXX_%010i.dat" % run)] + [join(dir,filename) for filename in INPUT_FILES])
transfer_output_files = ",".join([output])
condor_error = "%s.error" % prefix
condor_output = "%s.output" % prefix
condor_log = "%s.log" % prefix
template = MyTemplate(CONDOR_TEMPLATE)
submit_string = template.safe_substitute(
executable=executable,
args=" ".join(map(str,args)),
transfer_input_files=transfer_input_files,
transfer_output_files=transfer_output_files,
error=condor_error,
output=condor_output,
log=condor_log)
# write out the formatted template
with open(condor_submit, "w") as f:
f.write(submit_string)
# submit the job
check_call(["condor_submit",condor_submit])
if __name__ == '__main__':
import argparse
from subprocess import check_output
import os
parser = argparse.ArgumentParser("submit grid jobs")
parser.add_argument("filenames", nargs='+', help="input files")
parser.add_argument("--dir", type=str, help="fitter directory", required=True)
parser.add_argument("--dqxx-dir", type=str, help="dqxx directory", required=True)
parser.add_argument("--min-nhit", type=int, help="minimum nhit to fit an event", default=100)
parser.add_argument("--max-particles", type=int, help="maximum number of particles to fit for", default=3)
parser.add_argument("--skip-second-event", action='store_true', help="only fit the first event after a MAST bank", default=False)
parser.add_argument("--state", type=str, help="state file", default=None)
args = parser.parse_args()
if args.state is None:
home = os.path.expanduser("~")
args.state = join(home,'state.txt')
args.state = abspath(args.state)
# get the current working directory
home_dir = os.getcwd()
# get absolute paths since we are going to create a new directory
args.dir = abspath(args.dir)
args.dqxx_dir = abspath(args.dqxx_dir)
for filename in args.filenames:
filename = abspath(filename)
head, tail = split(filename)
root, ext = splitext(tail)
try:
with open(args.state) as f:
state = yaml.load(f.read(),Loader=Loader)
except IOError:
state = None
if state is None:
state = []
if tail in state:
print("skipping %s" % filename)
continue
if args.skip_second_event:
output = check_output([join(args.dir,"zdab-cat"),"--skip-second-event",filename])
else:
output = check_output([join(args.dir,"zdab-cat"),filename])
data = yaml.load(output,Loader=Loader)
new_dir = "%s_%s" % (root,ID.hex)
os.mkdir(new_dir)
os.chdir(new_dir)
for i, event in enumerate(data['data']):
for ev in event['ev']:
run = ev['run']
gtid = ev['gtid']
nhit = ev['nhit']
if nhit >= args.min_nhit:
submit_job(filename, run, gtid, args.dir, args.dqxx_dir, args.min_nhit, args.max_particles)
state.append(tail)
with open(args.state,"w") as f:
f.write(yaml.dump(state,default_flow_style=False))
os.chdir(home_dir)
|