#!/usr/bin/env python3
#******************************************************************************
#*                    X r d O s s A r c _ B k p U t i l s                    *
#******************************************************************************

import errno
import json
import os
import shutil
import pwd
import sys

from datetime import datetime
from datetime import timezone

from shutil import rmtree
from urllib.parse import urlparse

from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient

# Rucio changed the naming conventions for the metadata client with no
# indication of which release it occured in. So, we do if/then/else load.
try:
   from rucio.client.metaclient import MetaClient as MetaConventionClient
except:
   from rucio.client.metaconventionsclient import MetaConventionClient

from rucio.common.exception import RucioException

# Check for debugging
#
x = os.getenv("XRDOSSARC_DEBUG", None)
if x is None: Debug = False
else: Debug = True

# The following will be initialized in "main"
#
DIDclient = None

#******************************************************************************
#*                                  E m s g                                   *
#******************************************************************************
# Print to stderr a message
#
def Emsg(rc, txt):
   print('OssArc_BkupUtils:', txt, file=sys.stderr)
   if (rc < 0):
      print("Usage: addkey <key> [<key [...]]")
      print("       finish <rse> <scope> <dsname> <dsndir> <finkey> <finval>")
      print("       list   <key> <value> <scope> <eolval>")
      print("       qkey   <key>[,...]   <scope> <dsname>")
      print("       set    <key> <value> <scope> <dsname>")
      print("       setup  <rse> <scope> <dsname> <arenadir> <mntpfn> [<manpfn>]")
      print("       stat   <fmt> <scope> <did>")
      print("              <fmt>: cgi")
      print("       which  <arc> <fname> <scope> <dsname>")
      rc = -rc
   if rc:
      sys.exit(rc)

#******************************************************************************
#*                          g e t _ M a x I t e m s                           *
#******************************************************************************

def get_MaxItems():

   # Get the maximum allowed items in a bulk query
   #
   maxItems = 1000
   x = os.getenv("XRDOSSARC_MAXITEMS", None)
   if x is not None:
      try:
         maxItems = int(x)
         if maxItems < 1: maxItems = 1
      except Exception as e:
         Emsg(0, "XRDOSSARC_MAXITEMS={} is not an integer ({})".format(x, e))

   if Debug: Emsg(0, 'Rucio maxitems = {}'.format(maxItems))

   return maxItems
  
#******************************************************************************
#*                                A d d K e y                                 *
#******************************************************************************

def AddKey(keys):
   metaclient = MetaConventionClient()

   # Get all current keys
   #
   try:
      allkeys = metaclient.list_keys()
   except Exception as e:
         Emsg(8, 'Unable to list keys: {}'.format(keys, e))

   # Add all missing keys
   #
   for key in keys:
      if not key in allkeys:
         try:
            metaclient.add_key(key=key, key_type='COLLECTION')
            if Debug: Emsg(0, "Metadata key '{}' added.".format(key))
         except Exception as e:
            Emsg(8, 'Unable to add key {}: {}'.format(key, e))
      elif Debug: Emsg(0, "Metadata key '{}' exists.".format(key))

   return 0

#******************************************************************************
#*                              a r c S p l i t                               *
#******************************************************************************
  
def arcSplit(didVec, optSZ, minSZ, maxSZ):
   noGo = False
   vecSet = [] # Holds didVec grouped by conforming sizes
   vecMD  = [] # Holds medata description about vecSet

   # Now we try to group didVec entries to be about optSZ but not smaller
   # than maxSZ. Failure to do so is indicated by setting noGo to true.
   #
   curSize = 0
   curVec  = []
   while (len(didVec) > 0):
       didSZ = didVec[0][2]
       newSize = curSize + didSZ 
       if newSize < optSZ:
          curVec.append(didVec.pop(0))
          curSize = newSize
       elif newSize <= maxSZ:
          curVec.append(didVec.pop(0))
          vecSet.append(curVec)
          vecMD.append([len(curVec),newSize])
          curSize = 0
          curVec = []
       else:
          if len(curVec) == 0:
             noGo = True
             vecSet.append([didVec.pop(0)])
             vecMD.append([1, newSize])
          else:
             if curSize < minSZ:
                noGo = True
             vecSet.append(curVec)
             vecMD.append([len(curVec), curSize])
             curVec = []
             curSize = 0

   # Handle any residual memvec entries not included in vecSet. The residual
   # may create an additional vecSet member if it is conformable, or it may
   # be added to the last member of vecSet. Ideally, we would rebalance
   # vecSet to avoid violating maxSZ but better too large than too small.
   #
   if len(curVec) > 0:
      if curSize < minSZ:
         if len(vecSet) > 0:
            vsEnd = vecMD[-1]
            if vsEnd[1] + curSize > maxSZ:
               noGo = True
            vecMD[-1] = [vsEnd[0]+len(curVec), vsEnd[1] + curSize]
            for x in curVec: vecSet[-1].append(x)
         else:
            vecSet.append(curVec)
            vecMD.append([len(curVec), curSize])
      else:
         vecSet.append(curVec)
         vecMD.append([len(curVec), curSize])

   # Compute the ordinal metadata vector that can be used to locate which
   # set has the desired file using the original input file set.
   #
   if len(vecMD) > 1:
      ordN = vecMD[0][0]
      ordV = [ordN]
      for vsEnt in vecMD[1:-1]:
         ordN = ordN + vsEnt[0]
         ordV.append(ordN)
      ordMD = ' '.join(list(map(str, ordV)))
   else:
      ordMD = '' 
   return noGo, ordMD, vecSet, vecMD 
       
#******************************************************************************
#*                            a r c S y m l i n k                             *
#******************************************************************************
  
def arcSymlink(lfn2pfn, dsnDir, subDir=""):

   # If we need place the symlink in a subdirectory, create it, and chdir
   # to it, this is used when we are creating multiple archives.
   #
   if subDir: 
      if not os.path.exists(subDir):
         dsnDir = "{}/{}".format(dsnDir, subDir)
         try:
            os.mkdir(subDir)
         except Exception as e:
            Emsg(8, "Unable to create path {}: {}".format(dsnDir, e))
      try:
         os.chdir(subDir)
      except Exception as e:
          Emsg(errno.ENOENT, "Unable to chdir to {}: {}".format(dsnDir, e))

   # Now create all the symlinks using the lfn pointing to the pfn.
   #
   for fnMap in lfn2pfn:
      lfn  = fnMap[0]
      pfn  = fnMap[1]
      ldir = os.path.dirname(lfn)
      if ldir and lfn != '/':
         try:
            os.makedirs(ldir, 0o775, exist_ok=True)
         except Exception as e:
            Emsg(8, "Unable to recreate path {}{}: {}".format(dsnDir, ldir, e))
      try:
         os.symlink(pfn, lfn)
      except Exception as e:
         Emsg(8, "Unable to create symlink {} -> {}: {}".format(lfn,  pfn, e))

   # If we changed the current working directory, set it back to what it was
   #
   if subDir:
      try:
         os.chdir("..")
      except Exception as e:
          Emsg(errno.ENOENT, "Unable to chdir .. from {}: {}".format(dsnDir,e))
  
#******************************************************************************
#*                              G e t _ l f n s                               *
#******************************************************************************
  
def Get_lfns(scope, dsn):

   if Debug: Emsg(0, 'list_files({}, {})'.format(scope,dsn))
   DIDclient = DIDClient()
   try:
      didVec = DIDclient.list_files(scope, dsn)
   except Exception as e:
      Emsg(8, "Error getting lfns: {}".format(e))

   # Get the checksum that should be included, if any
   #
   cksum = os.getenv("XRDOSSARC_CKSUM", None)

   # Convert an object list to a standard list so we can sort it.
   #
   lfnVec = []
   noCKS  = 0
   for file in didVec:
      lfnVec.append([file['scope'], file['name'], file['bytes']])
      if cksum:
         try:
            ckval = file[cksum]
         except:
            ckval = None
            noCKS += 1
         lfnVec[-1].append({cksum:ckval})

   # Check is we should produce a warning about missing checkums
   #
   if noCKS:
      Emsg(0, "Warning! {} of {} files have no {} checksum!".format(noCKS,
               len(lfnVec), cksum))

   # We must sort the lfns in order to be able to figure out which archive
   # the file was placed in should we need to spill over into multiple ones.
   # Also note that the sort key must include the scope due to collections.
   #
   lfnVec.sort(key=lambda x: x[0]+':'+x[1])

   # Return the vector
   #
   return lfnVec
  
#******************************************************************************
#*                           G e t _ l f n 2 p f n                            *
#******************************************************************************

def Get_lfn2pfn(rse, scope, dsn, pfnPFX):

   # Get the list of filenames in the wanted dataset (this will be sorted)
   #
   lfnVec = Get_lfns(scope, dsn)

   # Map the lfns to ther pfns for the wanted RSE
   #
   qryVec = []
   totBytes = 0
   for file in lfnVec:
      qryVec.append({'scope':file[0], 'name':file[1]})
      totBytes += file[2]
   totFiles = len(qryVec)

   # Get the replicas for the specified data
   #
   lfn2pfn = []
   # print('PFN prefix:', pfnPFX)
   REPclient = ReplicaClient()

   # Get the maximum allowed items in a bulk query (sets global maxItems)
   #
   maxitems = get_MaxItems()

   while (len(qryVec) > 0):
      try:
         replicas = REPclient.list_replicas(qryVec[0:min(len(qryVec), maxitems)], rse_expression=rse)
         for replica in replicas:
            pfn = pfnPFX + urlparse(replica["rses"][rse][0]).path
            did = qryVec.pop(0)
            xtr = lfnVec.pop(0)
            lfn2pfn.append([did['scope'] + ':'+did['name'],pfn,xtr[2],xtr[3]])
      except Exception as e:
         Emsg(8, "Error getting pfns: {}".format(e))
  
   if Debug:
      Emsg(0, 'DSN: {}:{} Files: {} Bytes: {}'.format(scope, dsn, totFiles,
                                                      totBytes))

   return totFiles, totBytes, lfn2pfn
  
#******************************************************************************
#*                               L S _ B k u p                                *
#******************************************************************************

def LS_Bkup(argv):

   # Make sure we have atleast four arguments. These would correspond to:
   # <key> <kval> <scope> <eol> 
   #
   if len(argv) < 4: Emsg(-errno.EINVAL, "Too few arguments") 
   key      = argv[0] 
   kval     = argv[1]
   theScope = argv[2]
   eol      = argv[3]

   try:
      dids = DIDclient.list_dids(scope=theScope, did_type='DIDType.DATASET',
                                 filters=({key:kval}), long=False)
   except Exception as e:
      Emsg(8, 'Cannot get datasets to be backed up: {}'.format(e))

   dsVec = []
   for did in dids:
      dsVec.append({'scope':theScope, 'name':did})

   # Get the maximum items we can have in a bulk query
   #
   maxitems = get_MaxItems()

   # Get the metadata for all of the did's potentially needing backup
   #
   while (len(dsVec) > 0):
      try:
         mLen = min(len(dsVec), maxitems)
         mVec = DIDclient.get_metadata_bulk(dsVec[0:mLen]) 
      except Exception as e:
         Emsg(8, 'Cannot get dataset backup metadata: {}'.format(e))

      # Remove the queried elements from the list
      #
      del dsVec[:mLen]
   
      # Construct list of closed datasets as they can only be backed up
      #
      for meta in mVec:
         if not meta['is_open']:
            print(meta['name'])
         else:
            Emsg(0,"Dataset {}:{} is still open; backup skipped!"
                   .format(theScope, meta['name']))

   print(eol)
   return 0

#******************************************************************************
#*                                 Q r y _ K e y                              *
#******************************************************************************

def Qry_Key(argv, retval=False, mtype='JSON'):

   # Make sure we have atleast three arguments. These would correspond to:
   # <key> <scope> <dsn> 
   #
   if len(argv) < 3: Emsg(-errno.EINVAL, "Too few arguments") 
   key      = argv[0] 
   theScope = argv[1]
   didName  = argv[2]
   theDID   = theScope+':'+didName

   try:
       meta = DIDclient.get_metadata(theScope, didName, mtype)
   except RucioException as e:
      if type(e).__name__ == 'DataIdentifierNotFound':
         if retval: return None 
         Emsg(errno.ENOENT, "Unable to query {}: data identifier notfound"
                            .format(theDID))
      Emsg(errno.EINVAL, "Unable to query {} for key {}: {}"
                         .format(theDID, key[0], e))
   except Exception as e:
      Emsg(errno.EINVAL, "Unable to query {} for key {}: {}"
                         .format(theDID, key[0], e))

   if retval:
      valV= []
      for kv in key: valV.append(meta.get(kv))
      return valV

   for kv in key: print(meta.get(kv))
   return 0

#******************************************************************************
#*                            S e t _ B a c k u p                             *
#******************************************************************************

def Set_Backup(argv):

   # Make sure we have atleast four arguments. These would correspond to:
   # <key> <kval> <scope> <dsn> 
   #
   if len(argv) < 4: Emsg(-errno.EINVAL, "Too few arguments") 
   key      = argv[0] 
   kval     = argv[1]
   theScope = argv[2]
   dsname   = argv[3]
  
   try:
      DIDclient.set_metadata(theScope, dsname, key, kval)
   except Exception as e:
      {Emsg(8, "Unable to set {}:{} backup status to {}: {}".
               format(theScope, dsname, kval, e))
      }

   return 0

#******************************************************************************
#*                               S e t _ K e y                                *
#******************************************************************************

def Set_Key(theScope, dsname, key, kval):
  
   try:
      DIDclient.set_metadata(theScope, dsname, key, kval)
   except Exception as e:
      Emsg(8, "Unable to set {}:{} metadata key {}: {}:".format(theScope,
              dsname, key, e))
      return 0
  
#******************************************************************************
#*                               S e t T e s t                                *
#******************************************************************************

def SetTest(lfn2pfn, scope, dsn):

   # For each pfn we check that it exists and is actually a file. If either
   # is not we issue a message and fail the setup.
   #
   aOK = True
   for fnMap in lfn2pfn:
      lfn  = fnMap[0]
      pfn  = fnMap[1]
      if not os.path.exists(pfn) or not os.path.isfile(pfn):
         Emsg(0, "DID {}:{}/{} target {} does not exist or is not a file!".
                 format(scope, dsn, lfn, pfn))
         aOK = False;

   # Check the final state
   #
   if not aOK:
      Emsg(32,"WARNING! Dataset {}:{} is incomplete and connot be backed up!".
              format(scope, dsn))
  
#******************************************************************************
#*                                 S e t u p                                  *
#******************************************************************************
 
def Setup(argv):

   # Make sure we have atleast five arguments. These would correspond to:
   # <rse> <scope> <dsn> <path to atomic dsn directory> <pfn path prefix>
   #
   if len(argv) < 5: Emsg(-errno.EINVAL, "Too few arguments") 
   
   # Assign names to the arguments. Note that the dsnDir is atomically unique.
   #
   theRSE   = argv[0]
   theScope = argv[1]
   theDSN   = argv[2]
   dsnDir   = argv[3]
   pfnPFX   = argv[4]

   if len(argv) > 5 and argv[5][0:1] == '/': manpfn = argv[5]
   else: manpfn = None

   # Fix up pfn prefix as it must not end with a slash
   #
   while pfnPFX.endswith('/'): pfnPFX = pfnPFX[:-1]

   # Fix up dsnDir as it must not end with a slash
   #
   while dsnDir.endswith('/'): dsnDir = dsnDir[:-1]

   # Do some debugging
   #
   if Debug:
      Emsg(0, "RSE={} dsn={}:{} dsnDir={} pfnPFX={} manpfn={}"
              .format(theRSE, theScope, theDSN, dsnDir, pfnPFX, manpfn))
   
   # Set out working directory to the root of the dataset members
   #
   try:
      os.chdir(dsnDir)
   except Exception as e:
      Emsg(errno.ENOENT, "unable to chdir to {}: {}".format(dsnDir,e))

   # Verify that we can use the target directory
   #  
   if (not os.access(dsnDir, os.W_OK)):
      Emsg(errno.EACCES, "{} not writable (target directory)".format(dsnDir))

   # Clean this directory of anything that might be left over. We remove the
   # last directory in the 
   #
   bStat = os.stat(dsnDir)
   bMode = bStat.st_mode & 0o777
   try:
      os.chdir('..')
      rmtree(dsnDir)
   except Exception as e:
      Emsg(8, "Unable to clean arena directory {}: {}".format(dsnDir, e))
   try:
      os.mkdir(dsnDir, bMode)
   except Exception as e:
      Emsg(8, "Unable to recreate arena directory {}: {}".format(dsnDir, e))
   try:
      os.chdir(dsnDir)
   except Exception as e:
      Emsg(8, "Unable to cd to arena directory {}: {}".format(dsnDir, e))

   # Obtain lfn to pfn map for the dataset
   #
   totFiles, totBytes, lfn2pfn = Get_lfn2pfn(theRSE, theScope, theDSN, pfnPFX)
     
   # Make sure we have something here to work with
   #
   if totFiles != len(lfn2pfn) or not totFiles:
      Emsg(errno.ENODATA, "Dataset {}:{} is empty!".format(theScope, theDSN))

   # If we need to write out a manifest, do so now
   #
   if manpfn is not None:
      try:
         with open(manpfn, "w") as f:
            print(lfn2pfn, '# Source-RSE='+theRSE, file=f)
            f.close()
      except Exception as e:
         if isinstance(e, OSError) and e.errno != 0: rc = e.errno
         else: rc = 8
         Emsg(rc, "Unable to create {}: {}".format(manpfn, e))

   # Check if we want to possibly split this archive into segments
   #
   x = os.getenv("XRDOSSARC_SIZE", None)
   if x is not None:
      try:
         nparms = x.split()
         if len(nparms) < 4:
            Emsg(errno.EINVAL,"XRDOSSARC_SIZE={} has too few args!").format(x)
         wantSZ = int(nparms[0])
         aminSZ = int(nparms[1])
         amaxSZ = int(nparms[2])
         doSkip = int(nparms[3])
      except Exception as e:
         Emsg(errno.EINVAL,"XRDOSSARC_SIZE={} has non-integers ({})".format(x,e))
   else:
      amaxSZ = None
      doSkip = False

   if Debug: Emsg(0, 'XRDOSSARC_SIZE = {}'.format(x))

   # Verify that all of the files that make up this dataset are actually
   # accessible files. SetTest() fails the backup if that is not true.
   #
   SetTest(lfn2pfn, theScope, theDSN)

   # If split archives allowed, perform the split as requested
   #
   if amaxSZ:
      noGo, ordMD, arcSet, arcMD = arcSplit(lfn2pfn,wantSZ,aminSZ,amaxSZ)
      if noGo and doSkip:
         Set_Backup("arcBackup", "Skip", theScope, theDSN)
         {Emsg(16,"Unable to split {}:{} into conforming archives; skipping")
                   .format(theScope, theDSN)
         }
      Set_Key(theScope, theDSN, "arcIndex", ordMD)
      numArcs = len(arcMD)
      arcNum = 0
      for arcFiles in arcSet: 
          arcNum = arcNum + 1
          subDir = "~{}".format(arcNum)
          arcSymlink(arcFiles, dsnDir, subDir)
          arcEnt = arcMD.pop(0)
          if Debug:
             {Emsg(0, "Created {} symlinks in {}/{} for {} bytes".
                       format(arcEnt[0], dsnDir, subDir, arcEnt[1]))
             }
   else:
      numArcs = 1
      arcSymlink(lfn2pfn, dsnDir, '~1')

   # If a manifest file was henerated, make sure it is included in the
   # first archive file of the set of archives
   #
   if manpfn: os.symlink(manpfn, dsnDir+'/~1/Manifest')

   # We are done, tell caller how big the setup is
   #
   {Emsg(0, "Setup {} {}:{} for {} archive(s) totalling {} files and {} bytes".
            format(theRSE, theScope, theDSN, numArcs, totFiles, totBytes))
   }

   print(totFiles, totBytes)
   return 0

#******************************************************************************
#*                                  S t a t                                   *
#******************************************************************************

def Stat(argv):

   # Make sure we have atleast three arguments. These would correspond to:
   # 'cgi' <scope> <did>
   #
   if len(argv) < 3: Emsg(-errno.EINVAL, "Too few arguments") 

   # The first argument tels us how to print he response, Right now we only
   # support cgi format.
   #
   if argv[0] != 'cgi':
      Emsg(-errno.EINVAL, "{} format not supported".format(argv[0]))
   
   # Assign names to the arguments.
   #
   Scope = argv[1]
   Did   = argv[2]

   # Get the metadata for this did
   #
   qVec = ['created_at','updated_at','accessed_at','bytes','account','did_type']
   resp = Qry_Key([qVec, Scope, Did], retval=True, mtype='DID_COLUMN')

   # The did might not exist, check that out
   #
   if resp is None:
      print('!ENOENT')
      return 2

   # We will always have a vreate dat but might not have an access or update 
   # So, we use the create data as the fallback
   #
   ctime = int(resp.pop(0).timestamp())
   mtime = resp.pop(0)
   if mtime is None: mtime = ctime
   else: mtime = int(mtime.timestamp())
   atime = resp.pop(0)
   if atime is None: atime = mtime
   else: atime = int(atime.timestamp())

   bytes = resp.pop(0)
   user  = resp.pop(0)

   # Establish type of DID
   #
   didT  = resp.pop(0)
   if didT != 'FILE': didT = 'DIR'

   # Get the uid and gid for this did
   #
   try:
      uInfo = pwd.getpwnam(user)
      uid = uInfo.pw_uid
      gid = uInfo.pw_gid
   except:
      uid = 0
      gid = 0

   # Return result
   #
   print("mode={}&uid={}&gid={}&size={}&atime={}&mtime={}&ctime={}"
         .format(didT, uid, gid, bytes, atime, mtime, ctime))
   return 0   
  
#******************************************************************************
#*                                  W h a t                                   *
#******************************************************************************

def What(aName, aNum, aCnt):
    # Output the name of the archive that has a particular file
    #
    aVec = aName.rsplit('.', 1)
    if len(aVec) == 1:
       print("{}{}-{}".format(aVec[0], aNum, aCnt))
    else:
       print("{}{}-{}.{}".format(aVec[0], aNum, aCnt, aVec[1]))
    return 0
  
#******************************************************************************
#*                            x e q _ F i n i s h                             *
#******************************************************************************

def xeq_Finish(argv):

   # Make sure we have atleast three arguments. These would correspond to:
   # <rse> <scope> <dsname> <dsndir> <finkey> <finval>
   #
   if len(argv) < 6: Emsg(-errno.EINVAL, "Too few arguments") 
   theRSE = argv[0]
   Scope  = argv[1]
   dsName = argv[2]
   dsnDir = argv[3] 
   finKey = argv[4]
   finVal = argv[5]

   # First step is to completely remove the backup tree we created starting
   # at the dsnDir which is a flat structure. If dsnDir is not given, then
   # we skip this step as the caller want to keep the structure but still
   # set the metadata key to indicate how we completed the backup.
   #
   if dsnDir:
      if Debug:
         Emsg(0, "Removing directory tree '{}' from '{}'."
                 .format(dsnDir, os.path.basename(dsnDir)))
         try:
            shutil.rmtree(dsnDir)
         except Exception as e:
            Emsg(8, "Unable to remove directory tree '{}' from '{}'."
                    .format(dsnDir, os.path.basename(dsnDir)))

   # Last step is to indicate how the backup completed
   #
   if Debug:
      Emsg(0,"Setting backup ending status: {} = '{}'.".format(finKey,finVal))
   Set_Key(Scope, dsName, finKey, finVal)

   return 0
  
#******************************************************************************
#*                                 W h i c h                                  *
#******************************************************************************

def Which(argv):

   # Get the parameters
   #
   if len(argv) < 4:
      Emsg(-errno.EINVAL, "Insufficient number of arguments") 
      return errno.EINVAL

   aName = argv[0]
   fName = argv[1]
   Scope = argv[2]
   dName = argv[3]
   theDS = Scope+':'+dName

   # get the backup metadata for this dataset
   #
   mdVec = Qry_Key([["arcBackup", "arcIndex"], Scope, dName], retval=True)
   if mdVec is None:
      print('!ENOENT')
      sys.exit(2)
   

   # Verify that this dataset was actually backed up
   #
   bkpVal = mdVec.pop(0)
   if not bkpVal.endswith(":done"):
      print('!ENOANO')
      Emsg(errno.ENOANO, "Dataset {}:{} not backed up!".format(Scope, dName))

   # Get the ordinal index as without we can do nothing
   #
   ordIndex = mdVec.pop(0)
   ordSVec  = ordIndex.split()
   ordIlen  = len(ordSVec)

   # Now get the files in the dataset
   #
   lfnVec = Get_lfns(Scope, dName)
   lfnVlen = len(lfnVec)

   # The returned list has [<scope>,<did>,<size>] so we need to remove <size>
   #
   for row in lfnVec: del row[2]
   theLFNS = []
   for file in lfnVec:
       theLFNS.append(file[0]+':'+file[1])

   # If the file name has no scope, use the the dataset scope
   #
   if ':' in fName: fWant = fName.split(':',1)
   else:
      fWant = [Scope, fName]
      fName = Scope+':'+fName

   # Do some debugging
   #
   if Debug: Emsg(0, "lfn {} cnt={} ord cnt={} index={}".format(fName,
                     lfnVlen, ordIlen, ordSVec))

   # Get the ordinal for the file in the list of lfn's
   #
   try:
      oNum = lfnVec.index(fWant)
   except:
      print("!ENOENT")
      Emsg(errno.ENOENT, "File {} not found in dataset {}".format(fName,theDS))

   # If the index is empty then we know what to return
   #
   if not ordIlen: return What(aName, 1, 1)

   # Convert the ordinal index into a list of integers
   #
   try:
      ordIVec = list(map(int, ordSVec))
   except:
      {Emsg(errno.EINVAL,"Dataset {} arcIndex has non-integers; {}".
            format(dname, e))
      }
   
   # Compute the archive this file is in
   #
   aNum = 1
   aCnt = ordIlen + 1
   for ordX in ordIVec:
      if oNum < ordX: return What(aName, aNum, aCnt)
   return What(aName, aCnt, aCnt)
  
#******************************************************************************
#*                                  M a i n                                   *
#******************************************************************************

# The actual guts of the script
#
def Main(argv):

   # Make sure we have the common arguments: <cmd> <metakey> <metaval> <scope>
   # Normally, we would use argparse but that's left for another day.
   #
   if len(argv) < 1:
      Emsg(-errno.EINVAL, "Command not specified") 
      return errno.EINVAL
   cmd = argv[0]

   # There must be atleast one argument left
   #
   if len(argv) < 2: Emsg(-errno.EINVAL, "Too few arguments") 

   # Process the command
   #
   if cmd == "addkey": return AddKey(argv[1:])

   # There must be atleast two arguments left
   #
   if len(argv) < 3: Emsg(-errno.EINVAL, "Too few arguments") 

   if cmd == "finish": return xeq_Finish(argv[1:])

   if cmd == "list":   return LS_Bkup(argv[1:])

   if cmd == "qkey":
      aVec = argv[2:]
      aVec.insert(0, argv[1].split(','))
      return Qry_Key(aVec)

   if cmd == "set":    return Set_Backup(argv[1:])

   if cmd == 'setup':  return Setup(argv[1:])

   if cmd == "stat":   return Stat(argv[1:])

   if cmd == "which":  return Which(argv[1:])
      
   # Unknown command
   #
   Emsg(-errno.EINVAL, "Unknown command, '{}'".format(cmd))
   return errno.EINVAL

if __name__ == "__main__":

   # Initialize the did client
   try:
      DIDclient = DIDClient()
   except Exception as e:
      Emsg(8, "Error creating DID client: {}".format(e))

   sys.exit(Main(sys.argv[1:]))
