This file is indexed.

/usr/share/arc/scan-ll-job is in nordugrid-arc-arex 5.3.0~rc1-1.

This file is owned by root:root, with mode 0o755.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
#!/bin/bash

# Helper script to flag done LoadLeveler jobs.
# The script is called periodically by the grid-manager.
#

# This function retrieve the jobs status and id in one shot
# look for jobs which have a known status but are not completed (!=C)
# and save the localid of these jobe in the string variable $outLocalIdsString
# The input variable is a string list of localid to check.
# Example of usage
# get_bunch_jobs_status "$inLocalIdsString"

outLocalIdsString=""
get_bunch_jobs_status() {

  #get the string list of jobs
  loop=`$LL_BIN_PATH/llq -r %st %id $1`
  if [ $? -eq 0 ]; then
    for elm in $loop
    do
      if [ `echo $elm | grep '^[A-Z]\{1,2\}!.\+$'` ]; then
        if [ ! `echo $elm | grep '^C!'` ]; then
          outLocalIdsString=$outLocalIdsString" "`echo $elm | awk -F! '{ print $NF}'`
        fi
      fi
    done
  fi
}

##################

# ARC1 passes the config file first.
if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi

basedir=`dirname $0`
basedir=`cd $basedir > /dev/null && pwd` || exit $?

libexecdir="${ARC_LOCATION:-/usr}/lib/arc/"
pkgdatadir="$basedir"

# Assume that gm-kick is installed in the same directory
GMKICK=${libexecdir}/gm-kick

# Does the control directory exist?
control_dir="$1"
test -d "$control_dir" || exit 1

# Get LoadLeveler environment
. "${pkgdatadir}/configure-ll-env.sh" || exit $?

. "${pkgdatadir}/scan_common.sh" || exit $?


# Log system performance
if [ ! -z "$perflogdir" ]; then
   perflog_common "$perflogdir" "$CONFIG_controldir"
fi

if [ ! -z "$perflogdir" ]; then
   start_ts=`date +%s.%N`
fi

my_id=`id -u`

# mergedlist: array where each element is made of
# key:value, where key is the arc jobid and value is the 
# localid
mergedlist=()
# inLocalIdsString: in this string we save the localid retrived by from 
# the arc .local file divided by space 
inLocalIdsString=""

findoutput=$(find "$control_dir/processing" -maxdepth 1 -type f -name 'job.*.status' | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1.local/') 

while read i
do

  # Continue if no glob expansion or other problems
  test -f "$i" || continue
  
  jobid=`basename $i .local|sed 's/^job.//'`
  donefile="${control_dir}/job.${jobid}.lrms_done"
  statusfile="${control_dir}/processing/job.${jobid}.status"

  # Continue if the job is already flagged as done?
  test -f "$donefile" && continue

  if [ ! -f "$statusfile" ] ; then continue ; fi
  gmstatus=`cat "$statusfile"`
  if [ "$gmstatus" != "INLRMS" ] && [ "$gmstatus" != "CANCELING" ] ; then continue ; fi

  # Get local LRMS id of job by evaluating the line with localid
  localid=`grep ^localid= $i|head -1`
  eval $localid

  # Did we get a local id?
  test "$localid" = "" && continue

  # HACK: save the localid to be queried into inLocalIdsString
  # associate the localid to its jobid and save them in a list
  inLocalIdsString=$inLocalIdsString" "$localid
  mergedlist+=("$jobid:$localid")

done <<< "$findoutput"

if [ ! -z "$perflogdir" ]; then
   stop_ts=`date +%s.%N`
   t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"`
   echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, ControldirTraversal: $t" >> $perflogfile
fi


if [ ! -z "$perflogdir" ]; then
   start_ts=`date +%s.%N`
fi

# Query the LoadLeveler for jobs 
# and save the not completed into $outLocalIdsString
# Call the funcion only if there is some into the string
if [[ $inLocalIdsString =~ /^[0-9]|[a-z]|[A-Z]*$/ ]]; then
  get_bunch_jobs_status "$inLocalIdsString"
fi 

if [ ! -z "$perflogdir" ]; then
   stop_ts=`date +%s.%N`
   t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"`
   echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, llq -r %st %id: $t" >> $perflogfile
fi

if [ ! -z "$perflogdir" ]; then
   start_ts=`date +%s.%N`
fi
numelem=0
# Start the loop based on element of the mergelist
for element in ${mergedlist[@]}
do

  # Divide the jobid from the localid
  jobid=`echo $element | awk '{split($0,a,":"); print a[1]}'`
  localid=`echo $element | awk '{split($0,a,":"); print a[2]}'`

  # Exclude the not completed jobs stored into the $outLocalIdsString
  if [[ $outLocalIdsString == *$localid*  ]]
  then 
      continue
  fi
  numelem=$((numelem+1))
  donefile="${control_dir}/job.${jobid}.lrms_done"
  statusfile="${control_dir}/processing/job.${jobid}.status"
  jobfile="${control_dir}/job.${jobid}.local"
  errorsfile="${control_dir}/job.${jobid}.errors"

  # Continue if the job is already flagged as done?
  test -f "$donefile" && continue

  if [ ! -f "$statusfile" ] ; then continue ; fi
  gmstatus=`cat "$statusfile"`

  exitcode=''

  # get session directory of this job
  sessiondir=`grep -h '^sessiondir=' "$control_dir/job.${jobid}.local" | sed 's/^sessiondir=\(.*\)/\1/'`
  diagfile="${sessiondir}.diag"
  commentfile="${sessiondir}.comment"

  if [ "$my_id" != '0' ] ; then
    if [ ! -O "$jobfile" ] ; then continue ; fi
  fi
  uid=$(get_owner_uid "$jobfile")
  [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; }

  if [ ! -z "$sessiondir" ] ; then
    # have chance to obtain exit code
    exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//')
  else
    continue
  fi

  if [ ! -z "$exitcode" ] ; then
    if [ "$exitcode" = "152" -o $exitcode = "24" ] ; then
      exitcode="24"
      save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile"
      echo "$exitcode Job exceeded time limit." > "$donefile"
      # If job exceeded time, then it will have been killed and no cputime/walltime has been written
      walltime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Wall Clk Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'`
      usertime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Step Cpu Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'`
      starttime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Dispatch Time: \(.*\)/\1/p'`
      endtime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Completion Date: \(.*\)/\1/p'`

      if [ -n "$starttime" ]; then
        date_to_utc_seconds "$starttime"
        seconds_to_mds_date "$return_date_seconds"
        starttime=$return_mds_date
      fi
      if [ -n "$endtime" ]; then
        date_to_utc_seconds "$endtime"
        seconds_to_mds_date "$return_date_seconds"
        endtime=$return_mds_date
      fi

      job_read_diag

      [ -n "$walltime" ] && WallTime=${walltime}
      [ -n "$usertime" ] && UserTime=${usertime}
      [ -n "$usertime" ] && KernelTime=0
      [ -n "$starttime" ] && LRMSStartTime=${starttime}
      [ -n "$endtime" ] && LRMSEndTime=${endtime}
      #This needs investigating, might be user program exit code
      [ -n "$exitcode" ] && LRMSExitcode=$exitcode

      job_write_diag

      ${GMKICK} "$jobfile"
      continue
    fi
    # job finished and exit code is known
    save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile"
    echo "$exitcode Executable finished with exit code $exitcode" >> "$donefile"
    ${GMKICK} "$jobfile"
    continue
  fi
  exitcode=-1
  save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile"
  echo "$exitcode Job finished with unknown exit code" >> "$donefile"
  ${GMKICK} "$jobfile"
done

if [ ! -z "$perflogdir" ]; then
   stop_ts=`date +%s.%N`
   t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"`
   echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, JobHandling, Handled= $numelem: $t" >> $perflogfile
fi

sleep 60
exit 0