aboutsummaryrefslogtreecommitdiff
path: root/utils/zdab-reprocess-orphans
blob: ac8585ceac2f2dedbcc5823f1cf9fe566cd48656 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
#!/usr/bin/env python
# Copyright (c) 2019, Anthony Latorre <tlatorre at uchicago>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
"""
Script to reprocess ZDAB files from SNO and write out orphans. Copied mostly
from zdab-reprocess, but I changed the command file template. To reprocess a
single file:

    $ ./zdab-reprocess-orphans FILENAME

and to batch reprocess:

    $ ./zdab-reprocessi-orphans FILENAME FILENAME ...

By default, the reprocessed files will be stored in the current working
directory with _reprocessed appended to the name, but this can be changed by
passing a different suffix on the command line. For example:

    $ ./zdab-reprocess-orphans --suffix [suffix] SNOCR_00000100004_000_p2.xzdab

will produce a file named SNOCR_00000100004_000_p2_suffix.xzdab.
"""

from __future__ import print_function, division
import string
import os
import re

def GetSettingsFile(run):
    """
    Returns the settings file for a given run number.

    Taken from GetSettingsFile() in autosno/lib/AutoSNO_Module.pm.
    """
    if   run < 19999: return "load_d2o_settings.cmd"
    elif run < 33907: return "load_salt_settings.cmd"
    elif run < 39999: return "load_d2o_2_settings.cmd"
    elif run < 67900: return "load_ncd_settings.cmd"
    else:             return "load_h2o_settings.cmd"

# from autosno/run_lists/RunList_clock_force_no_fix.dat
RUN_LIST_CLOCK_FORCE_NO_FIX = [
    11371,
    11377,
    11399,
    11436,
    11575,
    11681,
    11701,
    11911,
    11976,
    12187,
    12233,
    13401,
    13423,
    13431,
    13874,
    14264,
    51456,
]

def get_clock_force_no_fix_string(run):
    if run in RUN_LIST_CLOCK_FORCE_NO_FIX:
        return "set bank TUPK 1 word 3 to -1"
    else:
        return ""

# This template comes from autosno/prod/Reconstruct.pm. I also added the lines
# which autosno adds in Run() of the Reconstruct module.
SNOMAN_TEMPLATE = \
"""
* This file was automatically created by zdab-reprocess.
*
* Note: This command file was created by attempting to "reverse engineer" how
*       autosno fits neutrino runs.
*
* Author: Anthony LaTorre
* Created on August 26, 2019.
file inp 1 @input
file out 1 @output

titles @dqxx
titles @anxx_pca
titles @anxx_neutrino

@@@settings_file

$flt_set_prescale @prescale_setting
$define_test 1 $line_1 'nhits float_equals EV+$KEV_NPM:@lower_nhit @upper_nhit;'
$starting_seed @seed
$starting_seed_2 @seed2

@clock_force_no_fix

* We want to process the entire file.
$num_events 0

* Titles and database checks.
set bank TMTT 1 word 0 to 1

* HCA Control.
$hca_mode $perform_correction
$hca_correction $new_correction
$hca_consecutive $yes

* QRC Control.
* We need the cross talk cut.
$apply_xtalk_cut

*Settings for muon fitters.
* Set the charge threshold for multi photon timing correction
$set_mpca_threshold 1.5
titles mpca_data.dat
* Turn on the channel dependent gain calibration
set bank TCAL 1 word 4 to 54

*Load charge probability titles file
titles charge_photoelectrons.dat

* Low nhit prescale
$flt_set_prescale_2 0.0

* Primary NHIT Cut.
$enable_test 1
$define_test 1 $line_1 'nhits float_equals EV+$KEV_NPM:18.5 9999.;'

* Primary Fitter NHIT Cut.
$enable_test 3
$define_test 3 $line_1 'nhits float_equals EV+$KEV_NPM:9.5 999.;'

* NCD Event Tag.
$enable_test 4
$define_test 4 $line_1 'ncd float_equals EV+$KEV_NCD_STATUS:0.5 3.5;'

* Muon Candidate Tag.
$enable_test 5
$define_test 5 $line_1 'nhits float_equals EV+$KEV_NPM:249.5 9999.;'

* A do nothing filter that allows autosno to filter maintenece events.
$enable_test 9
$define_test 9 $line_1 'one equals 1.0;'

define event_loop
   call INP
   call UPK
   call CAL
   call FLT($PERM_BANK_CUT)
   if_not_ok goto WRITE_OUT

EVENT_REJECT:
   call FLT(9)            * User defined cut. Default passes all.
   if_not_ok quit_event
   call FLT($JUNK_CUT)    * Junk. We still want to keep NCD events.
   if_not_ok goto WRITE_OUT
   quit_event

WRITE_OUT:
   call PRU
   call OUT
   quit_event
end_def

*Pruning Control
$prune_pix $drop
$prune_px $drop
$prune_pf $drop
$prune_nesg $drop
$prune_nes $drop
$prune_nemg $drop

* Run with the Database
@@run_snodb

!endfile member=fit_neutrino
""".strip()

class MyTemplate(string.Template):
    delimiter = '@'

def splitext(path):
    """
    Like os.path.splitext() except it returns the full extension if the
    filename has multiple extensions, for example:

        splitext('foo.tar.gz') -> 'foo', '.tar.gz'
    """
    full_root, full_ext = os.path.splitext(path)
    while True:
        root, ext = os.path.splitext(full_root)
        if ext:
            full_ext = ext + full_ext
            full_root = root
        else:
            break

    return full_root, full_ext

def GetRunRange(run, num=1000):
    """
    Returns the run range string for ANXX neutrino files.

    Example:

        GetRunRange(10000,1000) -> "10000-10999"

    Comes from GetRunRange() in autosno/lib/FileUtil.pm.
    """
    base_run = int(run//num)*num
    return "%i-%i" % (base_run, base_run + num - 1)

def get_anxx_neutrino_filename(run, dir):
    """
    Returns the ANXX neutrino file with the highest pass number for a given
    run. The files should all be located in dir.

    FIXME: Is it OK to just get the file with the highest pass number?
    Technically this info should come from autosno, but it's not clear exactly
    how it's getting the pass number in the function GetPassNum() in
    AutoSNO_Module.pm.
    """
    filename_re = "anxx_nu_(\d+)_p(\d+).dat"
    p = re.compile(filename_re)

    filenames = []
    for filename in os.listdir(dir):
        match = p.match(filename)

        if int(match.group(1)) != run:
            continue

        filenames.append((match.group(2),filename))

    return sorted(filenames)[-1][1]

def get_anxx_pca_next(run,dir):
    """
    Returns the next ANXX PCA file after `run`.

    See Get_ANxx_PCA_Next() in autosno/lib/GetTitles.pm.
    """
    filename_re = "anxx_pca_(\d+)_p(\d+).dat"
    p = re.compile(filename_re)

    filenames = []
    for filename in os.listdir(dir):
        match = p.match(filename)

        if match is None:
            continue

        if int(match.group(1)) < run:
            continue

        filenames.append((match.group(1),match.group(2),filename))

    return sorted(filenames)[-1][2]


if __name__ == '__main__':
    import argparse
    import sys
    import os
    from subprocess import check_call

    parser = argparse.ArgumentParser("reprocess zdabs")
    parser.add_argument("filenames", nargs="+", help="filenames of zdabs")
    parser.add_argument("--dir", default=None, help="directory to store reprocessed zdab files")
    parser.add_argument("--lower-nhit", type=float, default=18.5, help="lower value for primary nhit cut")
    parser.add_argument("--upper-nhit", type=float, default=9999, help="upper value for primary nhit cut")
    parser.add_argument("--suffix", default="orphans", help="suffix appended to output filename")
    args = parser.parse_args()

    filename_re = "(?:SNOCR_|SNO)(\d+)_\w+.x?zdab"

    p = re.compile(filename_re)

    template = MyTemplate(SNOMAN_TEMPLATE)

    for filename in args.filenames:
        head, tail = os.path.split(filename)
        match = p.match(tail)

        if not match:
            print("Unable to parse filename '%s'" % tail)
            sys.exit(1)

        try:
            run = int(match.group(1))
        except Exception as e:
            print("Unable to convert run number to int: %s" % str(e))
            sys.exit(1)

        root, ext = splitext(tail)

        output = "%s_%s%s" % (root,args.suffix,ext)

        if args.dir:
            output = os.path.join(args.dir,output)

        cmd_filename = "%s.cmd" % root

        dqxx_filename = "/sno/mcprod/dqxx/DQXX_%010i.dat" % run
        anxx_neutrino_directory = "/sno/mcprod/anxx/titles/neutrino/%s" % GetRunRange(run)
        anxx_neutrino_filename = get_anxx_neutrino_filename(run, anxx_neutrino_directory)
        anxx_neutrino_filename = os.path.join(anxx_neutrino_directory,anxx_neutrino_filename)
        anxx_pca_directory = "/sno/mcprod/anxx/titles/pca"
        anxx_pca_filename = get_anxx_pca_next(run, anxx_pca_directory)
        anxx_pca_filename = os.path.join(anxx_pca_directory,anxx_pca_filename)

        with open(cmd_filename, "w") as f:
            f.write(template.safe_substitute(input=filename,
                                             output=output,
                                             dqxx=dqxx_filename,
                                             anxx_neutrino=anxx_neutrino_filename,
                                             anxx_pca=anxx_pca_filename,
                                             settings_file=GetSettingsFile(run),
                                             prescale_setting=1.0, # no idea what this is
                                             lower_nhit = args.lower_nhit,
                                             upper_nhit = args.upper_nhit,
                                             seed=0,
                                             seed2=0,
                                             clock_force_no_fix=get_clock_force_no_fix_string(run)))

        check_call(["snoman.exe","-c",cmd_filename])
queue. Returns 0 on success, -1 on failure. """ log.debug("condor_q -json") output = check_output(["condor_q","-json"]) if not output: return -1 data = json.loads(output) head, tail = split(submit_file) for entry in data: if entry['SubmitFile'] == tail: try: log.debug("condor_rm %s" % entry['ClusterId']) check_call(['condor_rm',str(entry['ClusterId'])]) except subprocess.CalledProcessError: return -1 return 0 return -1 def release_job(submit_file): """ Release a particular job. Returns 0 on success, -1 on failure. """ log.debug("condor_q -json") output = check_output(["condor_q","-json"]) if not output: return -1 data = json.loads(output) head, tail = split(submit_file) for entry in data: if entry['SubmitFile'] == tail: try: log.debug("condor_release %s" % entry['ClusterId']) check_call(['condor_release',str(entry['ClusterId'])]) except subprocess.CalledProcessError: return -1 return 0 return -1 def get_job_status(submit_file, data=None): """ Check to see if a given grid job is finished. Returns the following statuses: 0 Unexpanded 1 Idle 2 Running 3 Removed 4 Completed 5 Held 6 Submission_err 7 Job failed 8 Success These come from the JobStatus entry in condor_q. The values here come from http://pages.cs.wisc.edu/~adesmet/status.html. """ log.debug("condor_q -json") head, tail = split(submit_file) if data is None: output = check_output(["condor_q","-json","--attributes","SubmitFile,JobStatus","--constraint",'SubmitFile == "%s"' % tail]) if output: data = json.loads(output) else: data = [] for entry in data: if entry['SubmitFile'] == tail: return entry['JobStatus'] # If there's no entry from condor_q the job is done. Now, we check to see # if it completed successfully. Note: Jobs often don't complete # successfully because I've noticed that even though I have specified in my # submit file that the node should have modules, many of them don't! root, ext = os.path.splitext(submit_file) log_file = "%s.log" % root try: with open(log_file) as f: if "return value 0" in f.read(): # Job completed successfully pass else: log.warn("Log file '%s' doesn't contain the string 'return value 0'. Assuming job failed." % log_file) return 7 except IOError: log.warn("Log file '%s' doesn't exist. Assuming job failed." % log_file) return 7 hdf5_file = "%s.hdf5" % root try: with h5py.File(hdf5_file) as f: if 'git_sha1' in f.attrs: # Job completed successfully return 8 else: log.warn("No git_sha1 attribute in HDF5 file '%s'. Assuming job failed." % hdf5_file) return 7 except IOError: log.warn("HDF5 file '%s' doesn't exist. Assuming job failed." % hdf5_file) return 7 return 7 def get_njobs(): """ Returns the total number of jobs in the job queue. """ log.debug("condor_q -json") output = check_output(["condor_q","-json"]) if not output: return 0 data = json.loads(output) return len(data) def main(conn, dqxx_dir, max_retries, max_jobs): c = conn.cursor() results = c.execute('SELECT id, filename, run, uuid, gtid, particle_id, max_time, state, nretry, submit_file, priority FROM state ORDER BY priority DESC, timestamp ASC') njobs = get_njobs() stats = {} output = check_output(["condor_q","-json","--attributes","SubmitFile,JobStatus"]) if output: data = json.loads(output) else: data = [] for id, filename, run, uuid, gtid, particle_id, max_time, state, nretry, submit_file, priority in results.fetchall(): if state not in stats: stats[state] = 1 else: stats[state] += 1 if state == 'NEW': if njobs >= max_jobs: log.verbose("Skipping job %i because there are already %i jobs in the queue" % (id,njobs)) continue log.verbose("Creating submit file for %s gtid %i particle combo %i" % (filename, gtid, particle_id)) submit_file = create_submit_file(filename, uuid, run, gtid, dir, dqxx_dir, particle_id, max_time, priority) if submit_job(submit_file): log.warn("Failed to submit job %i: %s" % (id,str(e))) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed to submit job: %s" % str(e),id)) else: log.notice("Successfully submitted job %i for %s gtid %i particle combo %i" % (id, filename, gtid, particle_id)) njobs += 1 c.execute("UPDATE state SET state = 'RUNNING', submit_file = ?, nretry = ? WHERE id = ?", (abspath(submit_file),0,id)) elif state == 'RUNNING': # check to see if it's completed job_status = get_job_status(submit_file, data=data) if job_status in (0,1,2,4): # nothing to do! log.verbose("Still waiting for job %i to finish" % id) elif job_status == 8: # Success! log.notice("Job %i completed successfully!" % id) c.execute("UPDATE state SET state = 'SUCCESS' WHERE id = ?", (id,)) elif job_status == 5: if nretry < max_retries: log.notice("Releasing job %i" % id) if release_job(submit_file) != 0: log.warn("Failed to release job %i. Setting it to FAILED state." % id) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed to release job",id)) else: log.verbose("Job %i has now been retried %i times" % (id,nretry+1)) c.execute("UPDATE state SET nretry = nretry + 1 WHERE id = ?", (id,)) else: log.warn("Job %i has failed %i times. Clearing it from the queue. Setting it to FAILED state." % (id,nretry)) remove_job(submit_file) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed too many times", id)) elif job_status == 7: # Retry if nretry < max_retries if nretry < max_retries: if submit_job(submit_file): log.warn("Failed to resubmit job %i. Setting it to FAILED state." % id) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed to resubmit job",id)) else: log.notice("Resubmitted job %i" % id) njobs += 1 c.execute("UPDATE state SET state = 'RUNNING', nretry = ? WHERE id = ?", (nretry+1,id)) else: log.warn("Job %i has failed %i times. Setting it to FAILED state." % (id,nretry)) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed too many times", id)) else: # Don't know what to do here for Removed or Submission_err log.warn("Job %i is in the state %i. Don't know what to do." % (id, job_status)) elif state == 'RETRY': if njobs >= max_jobs: log.verbose("Skipping job %i because there are already %i jobs in the queue" % (id,njobs)) continue log.notice("Resubmitting job %i from RETRY state" % id) if submit_job(submit_file): log.warn("Failed to resubmit job %i. Setting it to FAILED state." % id) c.execute("UPDATE state SET state = 'FAILED', message = ? WHERE id = ?", ("failed to resubmit job",id)) else: log.notice("Resubmitted job %i" % id) njobs += 1 c.execute("UPDATE state SET state = 'RUNNING', message = 'resubmitted from RETRY state', nretry = ? WHERE id = ?", (nretry+1,id)) elif state in ('SUCCESS','FAILED'): # Nothing to do here pass else: log.warn("Job %i is in the unknown state '%s'." % (id,state)) conn.commit() log.notice("Stats on jobs in the database:") for state, value in stats.iteritems(): log.notice(" %s: %i" % (state,value)) def array_to_particle_combo(combo): particle_combo = 0 for i, id in enumerate(combo[::-1]): particle_combo += id*100**i return particle_combo if __name__ == '__main__': import argparse from subprocess import check_call import os import tempfile import h5py from itertools import combinations_with_replacement import sqlite3 import traceback from sddm.plot_energy import prompt_event, gtid_sort, unwrap_50_mhz_clock import pandas as pd from sddm.dc import DC_MUON, DC_JUNK, DC_CRATE_ISOTROPY, DC_QVNHIT, DC_NECK, DC_FLASHER, DC_ESUM, DC_OWL, DC_OWL_TRIGGER, DC_FTS, DC_ITC, DC_BREAKDOWN from sddm import read_hdf from sddm.plot_energy import get_events parser = argparse.ArgumentParser("submit grid jobs", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("filenames", nargs='*', help="input files") parser.add_argument("--min-nhit", type=int, help="minimum nhit to fit an event", default=100) parser.add_argument("--max-particles", type=int, help="maximum number of particles to fit for", default=2) parser.add_argument("--skip-second-event", action='store_true', help="only fit the first event after a MAST bank", default=False) parser.add_argument("--max-time", type=float, default=3600*12, help="maximum time for fit") parser.add_argument("--db", type=str, help="database file", default=None) parser.add_argument('--loglevel', help="logging level (debug, verbose, notice, warning)", default='notice') parser.add_argument('--logfile', default=None, help="filename for log file") parser.add_argument('--max-retries', type=int, default=2, help="maximum number of times to try and resubmit a grid job") parser.add_argument('--auto', action='store_true', default=False, help="automatically loop over database entries and submit grid jobs") parser.add_argument('--max-jobs', type=int, default=100, help="maximum number of jobs in the grid queue at any time") parser.add_argument('-r','--reprocess', action='store_true', default=False, help="force reprocessing of runs which are already in the database") parser.add_argument("--data", action='store_true', default=False, help="zdab is not MC data") parser.add_argument("-p", "--priority", type=int, default=1, help="job priority") args = parser.parse_args() log.set_verbosity(args.loglevel) if args.logfile: log.set_logfile(args.logfile) if args.db is None: home = os.path.expanduser("~") args.db = join(home,'state.db') conn = sqlite3.connect(args.db) c = conn.cursor() # Create the database table if it doesn't exist c.execute("CREATE TABLE IF NOT EXISTS state (" "id INTEGER PRIMARY KEY, " "filename TEXT NOT NULL, " "run INTEGER NOT NULL, " "timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, " "uuid TEXT NOT NULL, " "gtid INTEGER NOT NULL, " "particle_id INTEGER NOT NULL, " "state TEXT NOT NULL, " "nretry INTEGER," "submit_file TEXT," "max_time REAL NOT NULL," "message TEXT," "priority INTEGER DEFAULT 1)" ) conn.commit() results = c.execute('SELECT DISTINCT filename FROM state') unique_filenames = [row[0] for row in results.fetchall()] if 'SDDM_DATA' not in os.environ: log.warn("Please set the SDDM_DATA environment variable to point to the fitter source code location", file=sys.stderr) sys.exit(1) dir = os.environ['SDDM_DATA'] if 'DQXX_DIR' not in os.environ: log.warn("Please set the DQXX_DIR environment variable to point to the directory with the DQXX files", file=sys.stderr) sys.exit(1) dqxx_dir = os.environ['DQXX_DIR'] # get absolute paths since we are going to create a new directory dir = abspath(dir) dqxx_dir = abspath(dqxx_dir) zdab_cat = which("zdab-cat") if zdab_cat is None: log.warn("Couldn't find zdab-cat in path!",file=sys.stderr) sys.exit(1) if args.auto: try: main(conn,dqxx_dir,args.max_retries,args.max_jobs) conn.commit() conn.close() except Exception as e: log.warn(traceback.format_exc()) sys.exit(1) sys.exit(0) # generate a UUID to append to all the filenames so that if we run the same job # twice we don't overwrite the first job ID = uuid.uuid1() for filename in args.filenames: log.notice("Analyzing file %s" % filename) filename = abspath(filename) if os.path.getsize(filename)/1e6 > 500: log.warn("Skipping %s because the file size is %i MB!" % (filename, os.path.getsize(filename)/1e6)) continue with open(os.devnull, 'w') as f: # Create a temporary file to store the events. Docs recommended # this method instead of mkstemp(), but I think they're the same. output = tempfile.NamedTemporaryFile(suffix='.hdf5',delete=False) output.close() if args.skip_second_event: check_call([zdab_cat,"--skip-second-event",filename,"-o",output.name],stderr=f) else: check_call([zdab_cat,filename,"-o",output.name],stderr=f) ev, fits = get_events([output.name], merge_fits=False) if len(ev) == 0: continue if len(ev) and not args.reprocess and filename in unique_filenames: head, tail = split(filename) log.notice("Skipping %s because it's already in the database" % tail) continue head, tail = split(filename) if 'muon' in tail: ev.loc[ev.prompt, 'muon'] = True nevents = 0 njobs = 0 for index, event in ev.iterrows(): if event['nhit_cal'] >= args.min_nhit: if event['muon'] and event['stopping_muon']: pass else: if event['prompt']: if event['flasher'] or event['muon'] or event['neck']: # Only want to submit 10% of prompt flasher, muon, # and neck events if event['gtid'] % 10 != 0: continue if not event['prompt'] and event['instrumental']: # Only submit followers if they have no data cleaning cuts continue nevents += 1 for i in range(1,args.max_particles+1): for particle_combo in map(array_to_particle_combo,combinations_with_replacement([20,22],i)): c.execute("INSERT INTO state (" "filename , " "run , " "uuid , " "gtid , " "particle_id , " "max_time , " "state , " "nretry , " "priority ) " "VALUES (?, ?, ?, ?, ?, ?, 'NEW', NULL, ?)", (filename,int(event['run']),ID.hex,int(event['gtid']),particle_combo,args.max_time,args.priority)) njobs += 1 conn.commit() log.notice("submitted %i jobs for %i events" % (njobs, nevents)) # Delete temporary HDF5 file os.unlink(output.name) conn.close()