This file is indexed.

/usr/share/arc/scan-SLURM-job is in nordugrid-arc-arex 4.0.0-1.

This file is owned by root:root, with mode 0o755.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
#!/bin/bash
#
# Periodically check state of grid jobs in SLURM, and put mark files
# for finished jobs.
#
# usage: scan_slurm_job control_dir ...

# ARC1 passes first the config file.
if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi

basedir=`dirname $0`
basedir=`cd $basedir > /dev/null && pwd` || exit $?

libexecdir="${ARC_LOCATION:-/usr}/lib/arc/"
pkgdatadir="$basedir"

. "${pkgdatadir}/configure-SLURM-env.sh" || exit $?

. "${pkgdatadir}/scan_common.sh" || exit $?

# Prevent multiple instances of scan-slurm-job to run concurrently
lockfile="${TMPDIR:-/tmp}/scan-slurm-job.lock"
#Check if lockfile exist, if not, create it.
(set -C; : > "$lockfile") 2> /dev/null
if [ "$?" != "0" ]; then
    if ps -p $(< "$lockfile") 2>/dev/null;then
	echo "lockfile exists and PID $(< $lockfile) is running"
	exit 1
    fi
    echo "old lockfile found, was scan-slurm-job killed?"

    # sleep, and if no other have removed and recreated the lockfile we remove it.
    # there are still races possible, but this will have to do
    sleep $((${RANDOM}%30+10))
    if ps -p $(< $lockfile) &>/dev/null;then
        echo "lockfile exists and $(< $lockfile) is running"
	exit 1
    else
	echo "still not running, removing lockfile"
	rm $lockfile
	exit 1
    fi
fi
echo "$$" > "$lockfile"
#If killed, remove lockfile
trap 'rm $lockfile' EXIT KILL TERM
#Default sleep-time is 30 seconds
sleep ${CONFIG_slurm_wakeupperiod:-30}

### If GM sees the session dirs... copied from scan-pbs-jobs
RUNTIME_NODE_SEES_FRONTEND=$CONFIG_shared_filesystem
#default is NFS
if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then
  RUNTIME_NODE_SEES_FRONTEND=yes
fi
# locally empty means no
if [ "${RUNTIME_NODE_SEES_FRONTEND}" = 'no' ] ; then
  RUNTIME_NODE_SEES_FRONTEND=
fi

my_id=`id -u`

#Validate control directories supplied on command-line
if [ -z "$1" ] ; then
    echo "no control_dir specified" 1>&2; exit 1
fi
for ctr_dir in "$@"; do
    if [ ! -d "$ctr_dir" ]; then
	echo "called with erronous control dir: $ctr_dir"
	exit 1
    fi
done

# List of SLURM jobids for grid-jobs with state INLRMS
declare -a localids
# Array with basenames of grid-job files in ctrl_dir, indexed by localid
# example /some/path/job.XXXXX /some/other/parh/job.YYYYY
declare -a basenames
# Array with states of the jobs in SLURM, indexed by localid
declare -a jobstates
# Array to store localids of jobs that are determined to have finished, which are sent to gm-kick
declare -a kicklist

# Find list of grid jobs with status INLRMS, store localid and
# basename for those jobs
for ctr_dir in "$@"; do
for basename in $(find "$ctr_dir/processing" -name 'job.*.status' -print0 \
    | xargs -0 egrep -l "INLRMS|CANCELING" \
    | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1/')
  do
  localid=$(grep ^localid= "${basename}.local" | cut -d= -f2)

  verify_jobid "$localid" || continue

  localids[${#localids[@]}]="$localid"
  basenames[$localid]="$basename"
done
done

# No need to continue further if no jobs have status INLRMS
if [ ${#localids[@]} -eq 0 ]; then
    exit 0
fi

# Get JobStates from SLURM
jobstate_squeue=$($squeue -a -h -o "%i:%T" -t all \
    -j $(echo "${localids[@]}" | tr ' ' ,))\
    || { echo "squeue failed" 1>&2; exit 1; }

for record in $jobstate_squeue; do
    localid=$(echo "$record"|cut -d: -f1)
    state=$(echo "$record"|cut -d: -f2)
    jobstates[$localid]=$state;
done
unset jobstate_squeue

handle_commentfile () {
    localid=$1
    sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'`
    if [ "$my_id" != '0' ] ; then
      if [ ! -O "$jobfile" ] ; then continue ; fi
    fi
    uid=$(get_owner_uid "$jobfile")
    [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; }
    save_commentfile "$uid" "${sessiondir}.comment" "${basenames[$localid]}.errors"
}

# Call scontrol and find the exitcode of a job. Write this, together with a
# message to the lrms_done file. This function is used in the loop below.
function handle_exitcode {
    localid="$1"
    tmpexitcode="$2"
    reason="$3"

    jobinfostring=$("$scontrol" -o show job $localid)

    exitcode1=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\1/p')
    exitcode2=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\2/p')

    if [ -z "$exitcode1" ] && [ -z "$exitcode2" ] ; then
	exitcode=$tmpexitcode
    elif [ $exitcode2 -ne 0 ]; then
	exitcode=$(( $exitcode2 + 256 ))
    elif [ $exitcode1 -ne 0 ]; then
	exitcode=$exitcode1
    else
	exitcode=0
    fi

    echo "$exitcode $reason" > "${basenames[$localid]}.lrms_done"
    kicklist=(${kicklist[@]} $localid)
}

# A special version of the function above, needed to force 
# exit code to non-zero if the job was cancelled, since
# CANCELLED jobs in SLURM can have 0 exit code.
# This is a temporary workaround, should later be replaced by 
# proper fix that determines the reason of failure
function handle_exitcode_cancelled {
    localid="$1"
    tmpexitcode="$2"
    reason="$3"
 
    jobinfostring=$("$scontrol" -o show job $localid)
 
    exitcode1=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\1/p')
    exitcode2=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\2/p')
 
    if [ -z "$exitcode1" ] && [ -z "$exitcode2" ] ; then
      exitcode=$tmpexitcode
    elif [ $exitcode2 -ne 0 ]; then
      exitcode=$(( $exitcode2 + 256 ))
    elif [ $exitcode1 -ne 0 ]; then
      exitcode=$exitcode1
    else
      exitcode=0
    fi
    if [ $exitcode -eq 0 ]; then
      exitcode=15
      reason="Job was cancelled by SLURM"
    fi
    echo "$exitcode $reason" > "${basenames[$localid]}.lrms_done"
    kicklist=(${kicklist[@]} $localid)
}


#This function filters out WallTime from the .diag-file if present and
#replaces it with output from the LRMS, it also adds StartTime and
#EndTime for accounting.

function handle_diag_file {
    localid="$1"
    ctr_diag="$2"
    jobinfostring=$("$scontrol" -o show job $localid)

    job_read_diag

    #Slurm can report StartTime and EndTime in at least these two formats:
    #2010-02-15T15:30:29
    #02/15-15:25:15
    #For our code to be able to manage both, the first needs to keep its hyphens,
    #the second needs them removed

    starttime=$(echo "$jobinfostring"|sed -n 's/.*StartTime=\([^ ]*\) .*/\1/p' | \
	sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g')
    endtime=$(echo "$jobinfostring"|sed -n 's/.*EndTime=\([^ ]*\) .*/\1/p' | \
	sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g')
    cpus=$(echo "$jobinfostring"|sed -n 's/.*NumCPUs=\([^ ]*\) .*/\1/p')

    date_to_utc_seconds "$starttime"
    starttime_seconds="$return_date_seconds"
    seconds_to_mds_date "$return_date_seconds"
    LRMSStartTime=$return_mds_date
    date_to_utc_seconds "$endtime"
    endtime_seconds="$return_date_seconds"
    seconds_to_mds_date "$return_date_seconds"
    LRMSEndTime=$return_mds_date

    #TODO handle cputime, exitcode etc.
    walltime=$(( $endtime_seconds - $starttime_seconds))
    #cputime=$(( $walltime * $count))
    # Values to write to diag. These will override values already written.
    [ -n "$walltime" ] && WallTime=$walltime
    [ -n "$cpus" ] && Processors=$cpus
    #[ -n "$cputime" ] && UserTime=$cputime
    #[ -n "$cputime" ] && KernelTime=0

    job_write_diag
}

# Look at the list of jobstates and determine which jobs that have
# finished. Write job.XXXX.lrms_done according to this
for localid in ${localids[@]}; do
#    state=${jobstates[$localid]}
#     case $state in
    # Initialize jobfile variable since it's used below
    jobfile="${basenames[$localid]}.local"
    case "${jobstates[$localid]}" in
 	"")
            # Job is missing (no state) from slurm but INLRMS.

            exitcode=''
            # get session directory of this job
            sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'`
            diagfile="${sessiondir}.diag"
	    commentfile="${sessiondir}.comment"
            if [ "$my_id" != '0' ] ; then
                if [ ! -O "$jobfile" ] ; then continue ; fi
            fi
            uid=$(get_owner_uid "$jobfile")
            [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; }

            if [ ! -z "$sessiondir" ] ; then
            # have chance to obtain exit code
		if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then
            # In case of non-NFS setup it may take some time till
            # diagnostics file is delivered. Wait for it max 2 minutes.
		    diag_tries=20
		    while [ "$diag_tries" -gt 0 ] ; do
			if [ -z "$uid" ] ; then
			    exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'`
			else
			    exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//')
			fi
			if [ ! -z "$exitcode" ] ; then break ; fi
			sleep 10
			diag_tries=$(( $diag_tries - 1 ))
		    done
		else
		    if [ -z "$uid" ] ; then
			exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'`
		    else
			exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//')
		    fi
		fi
	    fi

	    jobstatus="$exitcode Job missing from SLURM, exitcode recovered from session directory"
	    if [ -z $exitcode ];then
		exitcode="-1"
		jobstatus="$exitcode Job missing from SLURM"
	    fi

	    save_commentfile "$uid" "$commentfile" "${basenames[$localid]}.errors"
	    echo  "$jobstatus" > "${basenames[$localid]}.lrms_done"
	    kicklist=(${kicklist[@]} $localid)

 	    ;;
  	PENDING|RUNNING|SUSPENDE|COMPLETING)
  	#Job is running, nothing to do.
  	    ;;
  	CANCELLED)
	    handle_commentfile $localid
	    echo "-1 Job was cancelled" > "${basenames[$localid]}.lrms_done"
	    kicklist=(${kicklist[@]} $localid)
	    handle_exitcode_cancelled $localid "-1" "Job was cancelled"
	    handle_diag_file "$localid" "${basenames[$localid]}.diag"
	    ;;
  	COMPLETED)
	    handle_commentfile $localid
	    handle_exitcode $localid "0" ""
	    handle_diag_file "$localid" "${basenames[$localid]}.diag"
  	    ;;
  	FAILED)
	    handle_commentfile $localid
	    handle_exitcode $localid "-1" "Job failed"
	    handle_diag_file "$localid" "${basenames[$localid]}.diag"
  	    ;;
  	TIMEOUT)
	    handle_commentfile $localid
	    handle_exitcode $localid "-1" "Job timeout"
	    handle_diag_file "$localid" "${basenames[$localid]}.diag"
  	    ;;
  	NODE_FAIL)
	    handle_commentfile $localid
	    handle_exitcode_cancelled $localid "-1" "Node fail"
	    handle_diag_file "$localid" "${basenames[$localid]}.diag"
  	    ;;
    esac
done

# Kick the GM
if [ -n "${kicklist[*]}" ];then
    "${libexecdir}/gm-kick" \
	$(for localid in "${kicklist[@]}";do
	    echo "${basenames[$localid]}.local"
	    done | xargs)
fi

exit 0