# This script exists to wrap command for retrieving and parsing localization information related to bundle editing, e.g. scale info, potentially even radars in use, etc.

from lxml import etree as ET # Used for xml object parsing
import os, sys # Used to access files and directory info
from collections import OrderedDict # needed for preserving order of keys in certain dictionaries (e.g. localization)
import time # Used for time functions like sleep (to slow down feedback)

# Import my supporting python scripts
import bundleModify as bMod # [NEW MAY 2020] Using for bundle interrogation methods

# Try adding path for LFI
try:
    sys.path.insert(0, os.environ['LOCALAPPS_LIB_python'] + "/LocalFileInstaller")
except:
    print('ALERT: Cannot add path to LocalFileInstaller, likely unable to load module')

# Set up a wrapper function to TEST import of NWS SCP LocalFileInstaller. Generic enough to be used for any module
def module_exists(module_name):
    try:
        __import__(module_name)
        return True
    except ImportError:
        print('Module Import Error: Cannot load {}'.format(module_name))
        return False
    # To use this for LFI, call with 'LocalFileInstaller' as module_name argument




# CLEVER FUNCTION FOR PRINTING FEEDBACK only if set a "verbose" flag. SOURCE: stackoverflow.com/questions/5980042
try:
    verbose # Check if variable was set
except:
    verbose = False # Set value if global variable wasn't found. Set to true to show more debugging messages
# verbose = True # Use to brute-force override verbose in order to print extra debugging messages
# Now define function depending on status of variable
if verbose:
    def verboseprint(*args):
        # Print each argument seperately so caller doesn't need to stuff everything to be printed into a single string
        for arg in args:
            print(arg, end=' ')
        print()
else:
    verboseprint = lambda *a: None # do-nothing function




# Make function which creates xml tree object out of file
def xmlObjFromFile(filePath):
    # Open the file
    f = open(filePath, 'rt')

    # Now parse it into an ElementTree
    parser = ET.XMLParser(ns_clean=True) # Only works if using LXML
    tree = ET.parse(f,parser) # Only works if using LXML

    f.close()
    return tree


# DEFINE Function which uses LFI to get site localization files (output as xml element object)
def LFIGet_asET(host,locType,locLevel,locName,filePrefix,fileName,verbose=True):
    # Might not be the most efficient method, but call import to LFI here, even though also called in Main.py
    #   --> This is to resolve LocalFileInstaller reference below
    #   --> This won't get called unless we already know LFI exists (from Main.py)
    import LocalFileInstaller # won't work on all machines
    # Now use LFI
    thriftClient = LocalFileInstaller.LocalFileInstaller(host)
    thriftClient.setType(locType)
    thriftClient.setLevel(locLevel)
    thriftClient.setName(locName) # What if base? Then locName is empty? Might need to wrap in 'try'

    if verbose:
        print('(Sourcing files with LFI from EDEX utility tree: {})'\
            .format('/'.join([locType, locLevel, locName,filePrefix,fileName])))

    # With thriftClient setup finished, retrieve the file contents (as string) with getFile method
    fullFileName=''.join([filePrefix,fileName])

    try:
        xmlStr = thriftClient.getFile(fullFileName)
        # [NEW MAY 2020] Feel it's beneficial to add a message that prints the source path when successful EVEN IF NON-VERBOSE
        if not verbose: # We'll actually only print this if user did not already have verbose enabled (because it would have printed above already if so)
            print('(Sourcing files with LFI from EDEX utility tree: {})'\
                .format('/'.join([locType, locLevel, locName,filePrefix,fileName])))
    except:
        if verbose:
            print('ERORR retrieving {}, problems may result.'.format(fileName))
        return None

    # For LFI, we get a string return, which we have to use ET's "fromstring" method to parse
    tree = ET.fromstring(xmlStr,ET.XMLParser(ns_clean=True))
    return tree


# DEFINE Function which gets files from local LOCAL machine (ALTERNATE to LFI) (output as xml element object)
def LocalGet_asET(locType,locLevel,locName,filePrefix,fileName,locBasePath=None,verbose=True):
    verboseprint("LocalGet_asET method args... locType:{}, locLevel:{}, locName:{}, locBasePath:{}".format(locType,locLevel,locName,locBasePath)) 
    # If the user provided a basePath in the main script arguments, use that. Else, default to caveData
    if not locBasePath:
        # To retrieve from caveData, we need information about the system and user
        homedir = os.environ['HOME']
        locBasePath='{}/caveData'.format(homedir)
        # Quick logic to use 'etc' instead of cave_static in caveData path (this 'etc' translation applies to ALL loc levels for cave_static in caveData)
        locType = 'etc' if locType == 'cave_static' else locType

    # [MAY 2020] Introduce some new logic that catches base cave_static
    # --> For cave_static, base files are NOT saved in the util tree... they are in in the /awips2/cave/etc/ directory
    # --> Note that this catch applies whether or not the user has provied a desired localization directory via the locBasePath, or is using caveData
    # [SEP 2022] Updated to catch locType='etc' since this gets assigned in prior "if" logic
    if locLevel == 'base' and (locType == 'cave_static' or locType == 'etc'):
        #verboseprint("Triggering override to use /awips2/cave")
        locBasePath='/awips2/cave' # overrides any locBasePath because this will not be found in any case-specific utility tree OR in caveData
        locType='etc'
        # Finally, set the locLevel to empty (from initial "base" value) because it DOES NOT APPEAR in the final path
        # --> e.g. we need /awips2/cave/etc/${filePrefix}/${fileName} ... note that locLevel "base" is implied and does not appear in the name
        locLevel='' # Reset to empty (from initial "base") 

    verboseprint("LocalGet_asET: Using locBasePath:{} and locType:{} and locLevel:{}".format(locBasePath,locType,locLevel))

    # Now construct the expected path to the file
    fullFileName=''.join([filePrefix,fileName])
    fullFilePath = '/'.join([locBasePath,locType,locLevel,locName,fullFileName])
    if verbose:
        print('(Sourcing files locally from: {})'.format(fullFilePath))

    # With full path specified, retrive the xml object directory using "xmlObjFromFile" function
    try:
        tree=xmlObjFromFile(fullFilePath)
        # [NEW MAY 2020] Feel it's beneficial to add a message that prints the source path when successful EVEN IF NON-VERBOSE
        if not verbose: # We'll actually only print this if user did not already have verbose enabled (because it would have printed above already if so)
            print('(Sourcing files locally from: {})'.format(fullFilePath))
        return tree
    except:
        if verbose:
            print('ERORR retrieving {}, problems may result.'.format(fileName))
        return None


# DEFINE wrapper function for getting scale info from files. Picks method of file retrieval,
def fileGetWrapper(sessionData,locType,locLevel,locName,filePrefix,fileName,verbose=True):
    # Check if a locFile path arg was provided to main script
    if 'locFiles' in sessionData['runConfig']:
        locBasePath = sessionData['runConfig']['locFiles']
    else:
        locBasePath = None


    # [FEB 2023] Check for conditions where may need to avoid LFI and force local retireval
    forceLocal = False
    # ForceLocal Condition: cave_static/base
    # --> LFI still unable to handle sourcing from /awips2/cave/etc for locType:cave_static locLevel:base arguments
    if locLevel == 'base' and (locType == 'cave_static' or locType == 'etc'):
        verboseprint('Forcing local (non-LFI) retrieval for cave_static/base search')
        forceLocal=True

    # Get the scale file through one of several mechanisms (note OVERRIDE for locBasePath which allows local argument)
    if sessionData['runConfig']['LFIavailable'] and not locBasePath and not forceLocal:
        # Then use LFI to get things
        host='ev' # DEFAULT FIELD VALUE FOR EDEX CLUSTER. KEEP unless want to add override logic
        #host='localhost' # TURN OFF FOR DEPLOYMENT! This is only used for testing on standalones
        tree=LFIGet_asET(host, locType, locLevel, locName, filePrefix, fileName, verbose)
    else: # Else use alternates, such as caveData
        tree=LocalGet_asET(locType, locLevel, locName, filePrefix, fileName, locBasePath, verbose)

    return tree


    # HAVING RETRIEVED file we can use the fromstring method of ET to
    # HERE we should do




# Use this to specify xml find object command. Will use this to handle searches
def xmlFind(treeObj,query):
    results=treeObj.findall(query)
    if len(results)==0:
        print("XML FIND ERROR: No matches")
        return None
    elif len(results)>1:
        print("XML FIND ERROR: Multiple found, using FIRST")
    return(results[0]) # Return first (possibly only) result


# DEFINE (NEW 2019) FUNCTION for reading scaleInfo.xml. This is needed as another layer of determining the scale files to reference for our scale mapping
def loadScalesInfo(scalesInfoXMLObj):
    # ARGUMENTS: the xml for scalesInfo
    # Believe that this function should be a stand-alone which simply loads ALL of the scale to file mappings available in scalesInfo, for reference if needed
    
    # Initialize dictionary to store mapping of scale displayname to file
    scalesFileMap={}

    # Find all of the mapScale elements
    for s in scalesInfoXMLObj.findall('.//mapScale[@displayName]'):
        scaleName=s.get('displayName')
        scalesFileMap[scaleName]=s.get('fileName')
    return scalesFileMap



# (NEW JUNE 2019) DEFINE FUNCTION for looking up appropriate Scale Files for remapping bundle displays to new WFO
def findScaleFiles(sessionData):
    # IN MOST DYNAMIC APPROACH, would consult bundle selected for editing to determine scales that need mapping...
    # ... however, first version we will stick with the scales WDTD is known to use: Regional, State(s), and WFO
    # ... a lookup for these will be hard-coded for now as the desired set of files to discover
    defaultScalesToMap={ # USE DEFAULT VALUES as starting point... use as fallback if no scalesInfo found    
        'Regional':'Regional.xml',
        'State(s)':'States.xml',
        'WFO':'WFO.xml',
        }

    # DEFINE SUB-FUNCTION for, if scalesInfo found, extracting needed info for each scale

    # set basic parameteres for WHERE we are looking for scale information. Generally should be in cave_static/.../bundles/scales (... depends on localization level)
    locType='cave_static'
    filePrefix='bundles/scales/' # subdirectories in the localization and before the file in the path
    # For more dynamic mapping to correct scale files, should attempt to look up via scalesInfo.xml (this maps CAVE scales options to files with geometry info) 
    fileName='scalesInfo.xml'

    # CALL to LFI (using custom function that allows searching over many localization levels to find best one) to find scalesInfo.xml file
    # ... even though function allows many locLevels, it will NOT find base level files... most likely that exists in site.
    # ... regardless, will request to look in base and site anyways (not any other levels for now)
    locLevelsToSearch=['base','site'] 
    locLevelsToSearch='*' 

    # Now actually look for the file. RETURN will be scaleInfo xml obj (if found) and matchLevel (if found)
    scaleInfoXML, matchLevel = multiLevelLocSearch(sessionData, locType=locType, searchScope=locLevelsToSearch, filePrefix=filePrefix, fileName=fileName)
    if scaleInfoXML is not None:
        print(('Found scalesInfo file at {} level... reading for scales mapping...'.format(matchLevel)))
        # Call function for loading full scale map
        scaleFilesMap=loadScalesInfo(scaleInfoXML)
        print(scaleFilesMap) 
    else:
        print('Failed to find scale info... attempting to use default locations')
        # If we could NOT find scalesInfo, we can default to looking at the expected place for scale files  
        scaleFilesMap={
            'Regional':'Regional.xml',
            'State(s)':'States.xml',
            'WFO':'WFO.xml',
            }

    # AT END, return scaleFilesMap
    return scaleFilesMap


# (NEW JULY 2019)  DEFINE a helper function for scale file matching selection which uses a simple prompt to request info
def matchScaleFileByPrompt(scaleToMap, scaleFilesMap):
    # ARGUMENTS
    # scaleToMap: the scale which requires mapping by prompt (e.g. WFO, State, etc)  
    #print("NOTICE: This bundle uses a scale ({}) which is not found in your scalesInfo configuration...".format(scaleToMap))
    print(("Please select the scale to which '{}' should be mapped from the below choices by entering the number, then ENTER...".format(scaleToMap)))
    # Use the keys of the scaleFilesMap list to pose question
    scaleOptions=list(scaleFilesMap.keys())
    print(('\n'.join('{:2d}: {}'.format(*k) for k in enumerate(scaleOptions))))
    choice = input('[EDIT/ENTER]:')
    # HANDLE INPUT. Currently, depends on it being a number. Also need bad entry handling
    if choice.isdigit() and int(choice)< (len(scaleOptions)):
        scaleMatch = scaleOptions[int(choice)]
        print(('You entered: {} ({})'.format(choice, scaleMatch)))
    else:
        print('INVALID CHOICE: {}'.format(choice))
        # In event of bad argument, employ this same fucntion again by calling on it. This is supported in python as a "recursive" function
        scaleMatch=matchScaleFileByPrompt(scaleToMap,scaleFilesMap) # Makes call to the same function to repeat process

    # In the end, return the matched scale
    return scaleMatch

# [NEW MAY 2020] Finally define function that can gather all the scales used in the bundle, to prepare for ultimatley mapping them to those available to a particular site
# --> Previously, was defining a static list of scales presumed to be used (Regional, States, WFO) but want to expand to be more accomodating of bundles with other scales (e.g. CONUS, etc)
# --> An accurate (and comprehensive) set of bundle scales is needed in sessionData so that translation from original scale parameters to new scale params can be managed
def getBundleScales(sessionData): 
    # Only argument is sessionData
    bFile=sessionData['runConfig']['bundlePath'] # Get the bundle file path from place where it's stored in sessionData
    print('Determing unique set of Map Scales used in bundle by reading: "{}"...'.format(bFile))

    # Set a list of default scales to assume that the bundle has if for some reason we can't read the bundle to obtain that list
    defaultBundleScales=('Regional', 'State(s)', 'WFO')  # This is the set most commonly used by RAC. Not sure if there's any advantage to not trying to be COMPREHENSIVE in this list (such as including 'CONUS') or not?
 
    # We need to actually create an xml object from the procedure
    # --> This is a little bit redundant with later reading it in Main.py, which then loops over the bundles
    # --> However, would have to re-order Main.py to have procedure xml available, AND figure out how to pass it in via sessionData
    # --> It seems acceptable to instead just allow reading of procedure file into xml on handful of other, as-needed occasions, such as this
    # --> The read-in xml below is only scoped to this function and will disappear, not be used outside of this
   
    # How to read in?
    # --> We have access to xmlObjFromFile method in this 
    # --> In case it ever were moved or became unavailable, we'll wrap in a try statement
    try:
        procXmlObj=xmlObjFromFile(bFile)
    except:
        print('\t-->CAUTION: Failed to read bundle file to determine scales used. Assuming default set of scales (printed below).')
        print(defuaultBundleScales)
        return defaultBundleScales 

    # Next we will read our procedure xml and prepare a UNIQUE LIST of the scales it contains
    uniqueBundleScales=[] # Initialize list to store unique scales
    # --> We USE OUR BUNDLEMODIFY module (read in as 'bMod') to avoid recreating the methods it already has for locating displays and scale information
    # --> Note, several lines of code below inspired by bundleModify function for updateMaps
    displayMatches= bMod.getBundleDisplays(procXmlObj) # We have a convenient function for this
    for d in displayMatches:
        dScale=bMod.getDisplayScale(d) # Again, we have a convenient function to use here
        if dScale not in uniqueBundleScales: # If we're not already aware of it...
            uniqueBundleScales.append(dScale) # ... then add to tracked list of unique scales used by bundle
    print(('\t-->Found {} unique scales in bundle: {}'.format(len(uniqueBundleScales),uniqueBundleScales)))
    # FInally, return the list of unique bundle scales... this will be used to continue buildng the scaleDict 
    return uniqueBundleScales

# DEFINE UPDATED FUNCTION for getting scales info, which uses restricted scale files, and can accomodate different ways of getting scales
def buildScaleDict(sessionData):
    # Only argument is sessionData...
    print('Importing map scale info for {}...'.format(sessionData['loc']))

    # (JUNE 2019 UPDATE) Call out to a different function for actually determinig where to look for scale files. This will return a "scaleFilesMap" dictionary with a mapping of possible scales to the corresponding files.
    scaleFilesMap=findScaleFiles(sessionData)

    # set parameters for WHERE we are looking for scale information. Should be in cave_static/site/{WFO}/bundles/scales
    locType='cave_static'
    #locLevel='site' # Original, used for SINGLE LOC LEVEL search
    locLevelsToSearch='*' # [NEW MAY 2020] Now allowing ANY loc levels to be searched for scale files (Most granular (e.g. user or site) are priotized before higher levels like region, base)
    locName=sessionData['loc'] # get WFO value  # [MAY 2020] this is now kind of irrelevant since multiLevelLocSearch dynamically constructs the locNames it needs for each locLevelsToSearch
    filePrefix='bundles/scales/' # subdirectories in the localization and before the file in the path

    ## UPDATE JUNE 2019... ALTHOUGH we're makign the scale mapping more flexible with the preparion of the "scaleFilesMap" dictionary and "findScaleFiles" function above... we will still leave a restriction in place for the actual scales we care to build a dictionary for.
    ## What scale files do we want to look for? This MUST encompass all scales used in the bundles, or re-mapping will fail
    #allowedScales = ('Regional', 'State(s)', 'WFO') # Must exactly match the scale attribute values. COULD (instead of statically declaring) dynamically retrieve by querying the bundle file for all used... but will skip this for now. To symbolize this, going to map "allScales" simply to "allowedScales" for now... but in future, may determine what this is with code instead.
    #allScales=allowedScales # OLD simple assignment of fixed "allowedScales" to allScales

    # [NEW MAY 2020] Now finally do a dynamic retrieval of the scales USED by the bundle for this list
    allScales=getBundleScales(sessionData) # getBundleScales method actually reads bundle for unique scale list

    # Initialize dictionary to store scale-specific elements
    scaleDict = {}

    # Now iterate through each scale file and request the data
    for s in allScales:
        # JULY 2019 UPDATE... further refining to accomodate WFOs with unique scale needs as follows:
        # 1) Implement a call to manual scale matching prompt when scale expected by bundle is not found in scaleFilesMap
        # 2) Add additinoal sub-key to scaleDict which is the ACTUAL scale used to replace the expected scale (e.g. if State used to replace WFO). Because if an actual change of the scale used is needed in the bundle (such that what once was WFO will now map to smoething else) we need this info. Will have to update the bundleModify code to accomplish this as well.
        # JUNE 2019 UPDATE... considerably changing how scaleDict is built as follows:
        # 1) Referencing a new "scaleFilesMap" dictionary to look up the actual file responsible for a given scale before loading its contents
        # 2) Now assigning key of scaleDict as value of "s" variable, AS OPPOSED TO trying to query the "scale" attribute in each respective scale file. WHY? Determined that some sites have non-matching "scale" attibutes (e.g. CONUS for Regional, WFO, etc) which breaks preparation of dictionary by clobbering an existing key, and not creating the expected keys for each scale. Most reliable method is using the actual display value of scale, which is what we iterate through with "s" variable.

        # TEST if scaleFilesMap has a matching key for desired scale (note this could also be accomplished by warapping in a "try" statement)        
        if s in scaleFilesMap:
            # If it is, then directly access it through dictionary
            fName=scaleFilesMap[s] # This queries scaleFilesMap for fname to use
            scaleVar=s # Simple assignment of s to scaleVar that's going to be scaleDict key
        else:
            time.sleep(1) # For pacing messages
            print(('\nSCALE MAPPING ERORR FOR "{}" ... This bundle uses a scale which is not found in your scalesInfo configuration.'.format(s,s)))
            #time.sleep(1) # For pacing messages
            # If it isn't, then we have to rectify this. Do so with manual prompt for user to select match
            new_s=matchScaleFileByPrompt(s, scaleFilesMap)
            # Use new scale to get fName for use here
            fName=scaleFilesMap[new_s]
            scaleVar=new_s # Simple assignment ofuser-selected new_s to scaleVar that's going to be scaleDict key

        verboseprint('Matched "{}" scale to "{}" file.'.format(s,fName)) # Useful debug statement to say what file we matched scale to
        #fName='{}.xml'.format(s) # OLD METHOD... don't just assume file location anymore, actually use mapping from scalesInfo.xml
        # Make call to fileGetWrapper to retrieve xml data for this scale

        # [NEW MAY 2020] Now using multiLevelLocSearch to get the xml file for each needed scale
        # --> This should've been the preferred method from the beginning, but is especially important when using non-site-specific scales (e.g. CONUS), as these can occur in the base level of the util tree, or there may even be regional scale files
        # --> Note that almost all the same arguments as before are needed, except: 
        #     --> searchScope, which now replaces the locLevel, and can encompass MULTIPLE localizaiton levels which are all scanned
        #     --> locName, now not provided, becuase it is dynamically considered WITHIN the multiLeveLocSearch method
        sXml, matchLevel = multiLevelLocSearch(sessionData, locType=locType, searchScope=locLevelsToSearch, filePrefix=filePrefix, fileName=fName)
        #sXml=fileGetWrapper(sessionData,locType,locLevel,locName,filePrefix,fName) # PREVIOUS, SINGLE-LOC-LEVEL RETRIEVAL
        if sXml is None:
            ## SKIP scales if xml was not loaded succesfully. An error will have already been thrown by loading function
            # [NEW MAY 2020] Actually, now that using multiLevelLocSearch, which disables lots of feedback, it's no longer dependable that we'll get an error
            # --> Therefore, adding a statement here about something failing to be matched
            print('ERROR RETRIEVING "{}" SCALE INFO! Some remapping may fail.'.format(s))
            continue # We can proceed and hope that scale mapping works even without a good mapping of scale info 
        else:
            # [MAY 2020] This seems like a logical "else" to the prior if, so re-structring as "else" condition
            # Now grab the first displays element which has mapCenter attribute
            displayElement = xmlFind(sXml, './/displays[@mapCenter]')

        # We need the scale variable and the mapCenter attribute, so let's grab those
        # scaleVar = displayElement.get('scale') # OLD... we used to grab this to use as key for this item in scaleDict, BUT have since determined this is not reliable. Now using actual displayName for scale as represented by "S" in this loop
        #scaleVar=s # Simple assignment of s to scaleVar that's going to be scaleDict key
        mapCenter = displayElement.get('mapCenter')
        verboseprint('Map Center({}): {}'.format(s, mapCenter))

        # Next grab the large gridGeometry element with map projection info
        geomElement = xmlFind(sXml, './/gridGeometry')

        # Check that an element was provided for each type, and if so add to dictionary, using region as key
        if len(displayElement) == 1 and len(geomElement) == 1:
            scaleDict[s] = {'scaleName':scaleVar, 'mapCenter': mapCenter, 'geomElement': geomElement}
            #print '\t--> Succesfully retrieved "{}" scale info'.format(s)
            print('\t--> Succesfully retrieved "{}" scale info (from "{}" level)'.format(s,matchLevel)) # Now adding the LOC MATCH LEVEL where file was found
        else:
            print('ERROR RETRIEVING "{}" SCALE INFO! Some remapping may fail.'.format(s))

    if len(scaleDict) > 0:
        print('\tScale Dictionary FINISHED (Loaded {} items)'.format(len(scaleDict)))

    print('')  # EMPTY LINE
    return scaleDict

# DEFINE Function for getting warning sites from the utility tree, if possible
def getWarningSites(sessionData):
    # Only argument is sessionData...
    print('Attempting to import warning sites info for {}...'.format(sessionData['loc']))

    # set parameters for WHERE we are looking for scale information. Should be in cave_static/site/{WFO}/menus/warnings/index.xml
    locType = 'cave_static'
    locLevel = 'site'
    locName = sessionData['loc']  # get WFO value
    filePrefix = 'menus/warnings/'  # subdirectories in the localization and before the file in the path
    fName = 'index.xml' # looking for THIS file to get configuration for warning menu

    # Make call to fileGetWrapper to retrieve xml data for this file
    fXml = fileGetWrapper(sessionData, locType, locLevel, locName, filePrefix, fName)

    if fXml is None:
        # SKIP scales if xml was not loaded succesfully. An error message will have already been thrown from file get
        return None

    sitesList=None # Initialize potential sitesList variable
    # Now grab the substitute tag with attribute key="sites"
    try: # just in case this doesn't work for some reason
        sitesList = fXml.find('.//substitute[@key="sites"]').get('value')
        print('\t--> Succesfully retrieved warning sites: "{}" '.format(sitesList))
    except:
        print('ERROR loading warning sites list from localization... Skipping! ' \
              '(User should manually enter when prompted)')
    print('') # EMPTY LINE
    return sitesList


# SATELLITE: There is NO WAY to get satellite sectors from a localization file. This is auto-determined by CAVE when loading, so it has to be asked


# RADAR: DEFINE Function for pre-loading provided radars for substitution, if possible
# THIS IS NOT STRICTLY a localization-utilizing function, but fits with the template of preparing input values
def getRadarSubs(sessionData,radarArgs=None):
    # FOR NOW, skipping any attempt at radar retrieval from localization. Using this funciton ONLY to parse provided radars in main function arguments

    # For a four-panel display, define what the substitution keys we'll use for panels 1-4 are, in order
    panelPositionKeys = ['topLeft', 'topRight', 'botLeft', 'botRight']

    # Specify (hard-code for now) the order of assigning radars from arguments to panels
    panelOrderOptions={'NUMERICAL':[1,2,3,4],'CLOCKWISE':[1,2,4,3]}
    panelOrder = 'CLOCKWISE'

    radarSubTEMP={}
    for i,radar in enumerate(radarArgs):
        # Get the display number from for the i'th radar and using the specified panel order
        dNum=panelOrderOptions[panelOrder][i]
        # Using the appropriate panel position key as the substitution key, assign to sessionData substitution
        radarSubTEMP[panelPositionKeys[dNum-1]]=radar

    # Want to RETURN sessionData or the radar substitution subset of it, because logical assignment of keys belongs here
    return radarSubTEMP




# ----------------------------------------------------------------------------------------------------------------
# (June2019) Improve and generalize on below function as a utility for looking for a file at ALL localization levels
# This will need to accept certain basic arguments directing the loc search, but will enable a broad search thereafter.
# Also, should prioritize discovered files according to proper localization override level (e.g. site > base)
#def multiLevelLocSearch(sessionData, locType=None, searchScope=None, filePrefix=None, fileName=None):
# [MAY 2020] Added verbose OPTION which can help with overriding default additional message display behavior
def multiLevelLocSearch(sessionData, locType=None, searchScope=None, filePrefix=None, fileName=None, verbose=False):
    # Arguments:
    # sessionData - required. we need this to access information about site name for localization
    # locType - required. we need to know what part of loc (cave_static, common_static, edex_Static) we're looking in
    # searchScope - optional. list of levels to search for. If none provided, default will be used

    # verbose - optional. Mainly passed to allow downstream fileGetWrapper to print out where it's sourcing files from 
 
    # OLD LIST-BASED FORMAT. HAVE SINCE TRANSITIONED TO ORDERED DICTIONARY (see below)
    ## What is the conventional order, from base to workstation, of all possible localization levels (as of 2019)?
    #fullOrderedLocSet=['base','configured','region','site','user','workstation']

    fullOrderedLocDict=OrderedDict()
    # Prepopulate each allowed localization level as an ordered dictionary with some specific information related to each. 
    # In particular, what is the "localization name" that will be needed to use each (e.g. WFO name for site/configured, region abbrev for region, etc.) 
    # TO PREVENT ANY OF THESE LOCALIZATION LEVELS FROM BEING USED, SIMPLY COMMENT THAT LINE OUT
    # CAN'T INITIALIZE AS BELOW WITH ALL ELEMNETS... ORDERED DICT ONLY PRESERVES ORDER WHEN ELEMENTS ADDED ONE-BY-ONE
    #fullOrderedLocDict={
    #    'base': {'locName':''}, # base has no locName
    #    'configured': {'locName':sessionData['loc']}, # configured uses wfo name
    #    'region': {'locName': getRegion(sessionData)}, # region uses region name, which here uses a special function to retrieve
    #    'site': {'locName':sessionData['loc']}, # site again uses wfo name
    #    'user': {'locName': getUser(sessionData)}, # user uses user name, which here uses a special function to retrieve,
    #    # 'workstation' # Deciding not to bother with this one for now...
    #    }
    fullOrderedLocDict['base'] = {'locName':''} # base has no locName
    fullOrderedLocDict['configured'] = {'locName':sessionData['loc']} # configured uses wfo name
    fullOrderedLocDict['region'] = {'locName': getRegion(sessionData)} # region uses region name, which here uses a special function to retrieve
    fullOrderedLocDict['site'] = {'locName':sessionData['loc']} # site again uses wfo name
    fullOrderedLocDict['user'] = {'locName': getUser(sessionData)} # user uses user name, which here uses a special function to retrieve,
    #fullOrderedLocDict['workstation] = None  # Deciding not to bother with this one for now...


    defaultScope=['base','site']

    # Define a sub-function that will filter the full set of possible locs based on what is provided.  This approach:
    #   1) bases the final search scope on the actual, allowable localization names
    #   2) keeps the ordering of localizations in tact, which is needed for prioritizing discovered files
    def filteredLocSet(searchScope):
        outputList=[]
        for l in fullOrderedLocDict:
            # check if each valid loc option is present in the requested searchscope argument
            if l.lower() in searchScope:
                outputList.append(l)
        # Simple check of whether we got anything. Return none if no valid levels included
        outputList=outputList if len(outputList)>0 else None
        return outputList


    # Determine what the actual searchScope will be.  Rules:
    #   --> If none provided, use defaultScope
    #   --> if "All" provided, use all possible ones
    #validate provided choices against list (WIP?)
    if searchScope:
        # If a searchScope was provided...
        if not isinstance(searchScope,list) and isinstance(searchScope,str) and (searchScope.lower()=="all" or searchScope.lower()=="*"):
            # If search scope is a string and it is one of the allowed values to request using ALL scopes, then sinply assign to full list of loc levels
            finalSearchScope=[k for k,v in list(fullOrderedLocDict.items())] # only reference keys now, since they'll be iterated over later
        elif isinstance(searchScope,list) and len(searchScope)>0:
            # If search scope is a valid, non-zero-length list...
            # ... then use filteredLocSet sub-function to get subset of fullOrderedLocDict which match valid requests
            finalSearchScope=filteredLocSet(searchScope) 
        else: 
            # if fails these tests, then assign as None
            finalSearchScope=None
    else: 
        # No searchScope provided
        finalSearchScope=None

    verboseprint(finalSearchScope)


    # Here decide what to do if finalSearchScope is None, i.e. not decided yet
    if not finalSearchScope: # check if assigned
        # If no searchScope provided, use defaultScope
        verboseprint('Missing or invalid searchScope, using default') # WOULD BE NICE to provide more debugging help here for developer...
        finalSearchScope=defaultScope        
  
    # Here provide some feedback to developer about what final search scope looks like
    verboseprint('Search scope to use for ??? file is: {}'.format(finalSearchScope))

    # Use the finalSearchScope to search the corresponding localization level. Note:
    #  ... will search in REVERSE ORDER for highest granularity first
    #  ... list members are used as keys to access level-specific localization information from fullOrderedLocDict
    matchFound=False
    match=None # initial assignment of match
    for locLevel in finalSearchScope[::-1]: # indexing trick to reverse order
        verboseprint(locLevel)
        # The following are needed for a call to the fileGetWrapper:
        # locType: (e.g. cave_static) assumed that this will be provided as argument, depending on need
        # locLevel: (e.g. base) this is what we are iterating over based on finalSearchScope
        # locName: Look up based on locLevel, as this is specific to that (e.g. site name, user name, etc.
        locName = fullOrderedLocDict[locLevel]['locName']
        # filePrefix: assumed that this will be provided as argument, depending on need. This is any subdirectory  beyond above localization info
        # fileName: assumed that this will be provided as argument, depending on need. This is actual file name

        # Create a handy string for messaging where the search is looking
        searchLoc='/'.join([locType, locLevel, locName,filePrefix,fileName])
        #match = fileGetWrapper(sessionData, locType, locLevel, locName, filePrefix, fileName, verbose=False) # Original, ALWAYS assumes no verbose
        match = fileGetWrapper(sessionData, locType, locLevel, locName, filePrefix, fileName, verbose=verbose) # Added read of verbose option passed in to multiLevelLocSearch (allows user to control if fileGetWrapper prints things, even when using multiLevelLocSearch)
        if match is not None:
            verboseprint('FOUND {} FILE at {} level ({})'.format(fileName,locLevel,searchLoc))
            return match, locLevel # Returns both match (an xml object) and "matchLevel"  at which it was found 
        else:
            verboseprint('Could not find {} file at {} level ({})'.format(fileName,locLevel,searchLoc))
    else: # "else" executes if for loop completes without exiting
        return None, None # Returns None for both expected "match" and "matchLevel" returns
   
    


# DEFINE a utility function which can be used to get the region information (if needed)
def getRegion(sessionData):
    # This is a wrapper which can call another function depending on the desired approach:
    #   1) This could conceivably be done with a db query (such as to cwa maps table).
    #   2) Have a simple prompt where user selects from allowed choices
    # FOR NOW, to keep simple (and because db calls seem iffy), use simple prompt approach. But approach can be changed by making differnet call
    allowedRegions=['ER','WR','AR','CR','PR','SR']
    
    region = None # initial assumption is we don't have one 

    # FIRST of all, let's design this function to be used to return the known region info if it's already saved to sessionData
    if 'region' in sessionData and sessionData['region'] in allowedRegions:
        return sessionData['region'] # simply return it to whatever function is asking

    region=getRegionByPrompt(sessionData, allowedRegions) 
  
    # Check if, for some reason, region could not be determined... then print alert if so
    if not region:
        print(('ERROR: Unable to determine region for {}... some localization info may not be accessible'.format(sessionData['loc'])))
        
    # In the end, assign region to sessionData, then return
    sessionData['region']=region
    # in the end, return the region
    return region

# DEFINE a helper function for region selection which uses a simple prompt to request info
def getRegionByPrompt(sessionData, allowedRegions):
    print(("Please select the region to which '{}' belongs from the below choices by entering the number, then ENTER...".format(sessionData['loc'])))
    # Use the allowedRegions list to pose question
    print(('\n'.join('{:2d}: {}'.format(*k) for k in enumerate(allowedRegions))))
    choice = input('[EDIT/ENTER]:')
    # HANDLE INPUT. Currently, depends on it being a number. Also need bad entry handling
    if choice.isdigit() and int(choice)< (len(allowedRegions)):
        region = allowedRegions[int(choice)]
        print(('You entered: {} ({})'.format(choice, region)))
        #bundleToMod=tuple([bundleDict[bundleName]]) # if I don't do this, i end up iterating over the xml children of the bundle
        #verboseprint(bundleToMod)
    else:
        print('INVALID CHOICE: {}'.format(choice))
        # In event of bad argument, employ this same fucntion again by calling on it. This is supported in python as a "recursive" function
        region=getRegionByPrompt(sessionData, allowedRegions) # Makes call to the same function to repeat process

    # In the end, return selected region
    return region
 

# DEFINE a utility function which can be used to get the user information (if needed)
def getUser(sessionData):
    # This is a wrapper which can call another function depending on the desired approach:
    #   1) Automated: Leverage python's os library to get username
    #   2) Have a simple prompt where user is manually entered
    # FOR NOW, will HARD-CODE the os-based approach here rather than adding a secondary function call, as rough initial approach

    user = None # initial assumption is we don't have one 

    # FIRST of all, let's design this function to be simply return the known user info if it's already saved to sessionData
    if 'user' in sessionData and isinstance(sessionData['user'],str):
        return sessionData['user'] # simply return it to whatever function is asking

    # Run logic for grabbing user now... (HERE WOULD CALL TO ANOTHER FUNCTION IF DECIDE TO PARSE OUT)
    try:
        user = os.path.basename(os.environ['HOME'])
    except:
        user=None # fail to get user
        print('ERROR: unable to determine user name... some localization info may not be accessible')
        
        
    # In the end, assign user to sessionData, then return
    sessionData['user']=user
    # in the end, return the region
    return user


# DEFINE a wrapper function which can handle searching for a particular file IN ALL LEVELS of the utility tree.
# This will be used, for example, to identify whether colormaps used in bundle are present in util tree


# DEFINE a function which wraps for searches of colormaps
def searchColorMaps(sessionData,cmapInfo):
    # Parse cmapInfo into prefix path (if any) and name
    cmapSubdir = os.path.dirname(cmapInfo)
    filePrefix = '/'.join(['colormaps',cmapSubdir])+'/' if len(cmapSubdir)>0 else 'colormaps/'
    cmapName = os.path.basename(cmapInfo) + '.cmap'

    # set parameters for WHERE we are looking for scale information. Should be in common_static (RIGHT?) but could be in any level under that
    locType = 'common_static'
    # Grab current user for search in user portion of utility tree
    try:
        user = os.path.basename(os.environ['HOME'])
    except:
        user=None # fail to get user


    # Set up dictionary for attempts
    attempts={}
    # Populate dictionary with localization addresses to search for
    attempts['base'] = {'locType': locType, 'locLevel': 'base', 'locName': ''}
    attempts['site'] = {'locType': locType, 'locLevel': 'site', 'locName': sessionData['loc']}
    # Include a user level search if possible
    if user:
        attempts['user'] = {'locType': locType, 'locLevel': 'user', 'locName': user}


    # For each possibility, call fileGetWrapper and just see if found
    matchFound=False
    for a in attempts:
        locType = attempts[a]['locType']
        locLevel = attempts[a]['locLevel']
        locName = attempts[a]['locName']
        match = fileGetWrapper(sessionData, locType, locLevel, locName, filePrefix, cmapName,verbose=False)
        if match:
            return match
    else: # "else" executes if for loop completes without exiting
        return None

