Commit fbd9bd7a authored by Thibault VINCENT's avatar Thibault VINCENT
Browse files

fix: bunch of corrections

 * tags not working properly
 * log messages
 * typos, indent, comments
 * etc...
parent b3c2e34d
Loading
Loading
Loading
Loading
+108 −92
Original line number Diff line number Diff line
# -*- coding: utf-8 -*-

from logging import debug, warning, info
from logging import debug, error, warning, info
from fnmatch import fnmatchcase
from time import sleep
from sjrpc.core import RpcError
@@ -82,6 +82,7 @@ class NodeHandler(RpcHandler):
            'hbios'     : self._tag_map_direct('get_hw_bios', 24*3600),
            'hvver'     : self._tag_map_direct('get_hv_version', 24*3600),
            'platform'  : self._tag_map_direct('get_platform', 24*3600),
            'os'        : self._tag_map_direct('get_system', 24*3600),
            'uname'     : self._tag_map_direct('get_uname', 24*3600),
            'cpufreq'   : self._tag_map_direct('get_cpu_frequency', 24*3600),
            'mem'       : self._tag_map_direct('get_mem', 24*3600),
@@ -195,7 +196,7 @@ class NodeHandler(RpcHandler):
        if len(disks):
            result['disk'] = ' '.join(disks.keys())
        for name, size in disks.iteritems():
            if size:
            if size is not None:
                result['disk%s_size' % name] = str(size)
        if not result:
            result = None
@@ -212,13 +213,13 @@ class NodeHandler(RpcHandler):
            for pool_name in pools:
                pool = hv.storage().pool_get(pool_name)
                capa = pool.get_space_capacity()
                if capa:
                if capa is not None:
                    result['sto%s_size' % pool_name] = str(capa)
                free = pool.get_space_free()
                if free:
                if free is not None:
                    result['sto%s_free' % pool_name] = str(free)
                used = pool.get_space_used()
                if used:
                if used is not None:
                    result['sto%s_used' % pool_name] = str(used)
                vol = pool.volume_list()
                if vol:
@@ -245,7 +246,7 @@ class NodeHandler(RpcHandler):
            if path:
                result['disk%i_path' % vol_id] = str(path)
            capa = vol.get_space_capacity()
            if capa:
            if capa is not None:
                result['disk%i_size' % vol_id] = str(capa)
        if not result:
            result = None
@@ -275,7 +276,7 @@ class NodeHandler(RpcHandler):
    def scheduler_run(self):
        '''
        '''
        # call handler scheduler
        # call host scheduler
        if hasattr(self._host_handle, 'scheduler_run'):
            self._host_handle.scheduler_run()
        # (un)register sub nodes if this host has the capability
@@ -319,7 +320,7 @@ class NodeHandler(RpcHandler):
        '''
        '''
        result = {}
        info('get_tags: server requested tags=`%s` noresolve_tags=`%s`', tags,
        info('server requested tags=`%s` noresolve_tags=`%s`', tags,
                                                                noresolve_tags)
        # build a single dict of tags, boolean means "resolve"
        mytags = {}
@@ -340,51 +341,50 @@ class NodeHandler(RpcHandler):
                try:
                    # helper is available on the current host
                    if handler[0](self._host_handle):
                        debug('get_tags: host implements `%s`', pattern)
                        debug('host implements `%s`', pattern)
                        # get tags from helper
                        htags = handler[0](self._host_handle, resolve=False)
                        # append all tags
                        for t in htags:
                            mytags[t] = True
                except Exception as err:
                    warning('get_tags: `%s` -> `%s`', repr(err), err)
            debug('get_tags: no tag specified, expanded list to `%s`', 
                                                                mytags.keys())
                    debug('error adding globbing tags `%r`:`%s`', err, err)
            debug('no tag specified, expanded list to `%s`', mytags.keys())
        # add mandatory tags if missing in the list, or set noresolve
        else:
            for t in self.HV_TAG_MANDATORY:
                if t not in mytags or not mytags[t]:
                    debug('get_tags: add/correct mandatory tag `%s`', t)
                    debug('add/correct mandatory tag `%s`', t)
                    mytags[t] = True
        # query host
        info('get_tags: query host with tag list `%s`', mytags.keys())
        info('query host with tag list `%s`', mytags.keys())
        for tag, resolve in mytags.iteritems():
            # first, test tag name againts list of plain name
            if tag in self.HV_TAG_MAP:
                debug('get_tags: plain mapping found for tag `%s`', tag)
                debug('A1: plain mapping found for tag `%s`', tag)
                try:
                    if self.HV_TAG_MAP[tag][0](self._host_handle):
                        debug('get_tags: tag `%s` is available on host', tag)
                        debug('B1: tag `%s` is available on host', tag)
                        result[tag] = {}
                        result[tag]['ttl'] = self.HV_TAG_MAP[tag][2]
                        if resolve:
                            debug('get_tags: resolving now tag `%s`', tag)
                            debug('C1: now resolving tag `%s`', tag)
                            # fetch tag data
                            q = self.HV_TAG_MAP[tag][1](self._host_handle, tag)
                            debug('get_tags: host returned `%s`', q)
                            debug('D1: host returned `%s`=`%s`', tag, q)
                            if q is not None:
                                # append tag data
                                result[tag]['value'] = str(q)
                            else:
                                debug('get_tags: I wont return `%s`=`None`',tag)
                                debug('E1: I wont return `%s`=`None`', tag)
                    else:
                        debug('get_tags: tag `%s` is NOT implemented', tag)
                        debug('tag `%s` is not implemented !!!', tag)
                except Exception as err:
                    warning('get_tags: `%s` -> `%s`', repr(err), err)
                    debug('error with tag mapping `%r`:`%s`', err, err)
            # if no direct tag mapping exists, test name against globbing
            # a list
            else:
                debug('get_tags: searching for `%s` in globbing tags', tag)
                debug('A2: searching for `%s` in globbing tags', tag)
                # iter on globbing patterns, and get helper references
                # process the first globbing tag that match then exit bcause 
                # there should not exist two globbing pattern matching
@@ -393,26 +393,26 @@ class NodeHandler(RpcHandler):
                    try:
                        # helper is available on the current host
                        if handler[0](self._host_handle, tag):
                            debug('get_tags: testing pattern `%s`', pattern)
                            debug('B2: testing pattern `%s`', pattern)
                            if fnmatchcase(tag, pattern):
                                debug('get_tags: processing tag `%s` with '
                                                'pattern `%s`', tag, pattern)
                                debug('C2: processing tag `%s` with pattern `%s`',
                                                                tag, pattern)
                                # get tags from helper
                                htags = handler[1](self._host_handle, tag)
                                # FIXME intead of extracting one tag, try not
                                # to build the whole list. Maybe it's too
                                # difficult and not worth to implement
                                if tag in htags:
                                    debug('get_tags: found tag in helper result'
                                                ' with value `%s`', htags[tag])
                                    debug('D2: found tag in helper result with'
                                                    ' value `%s`', htags[tag])
                                    result[tag] = {}
                                    result[tag]['ttl'] = handler[2]
                                    if resolve:
                                    if resolve and htags[tag] is not None:
                                        result[tag]['value'] = str(htags[tag])
                                    
                                break
                    except Exception as err:
                        warning('get_tags: `%s` -> `%s`', repr(err), err)
                        debug('error with globbing tag `%r`:`%s`', err, err)
        return result
    
    def _sub_tag_list(self, sub_obj):
@@ -426,32 +426,32 @@ class NodeHandler(RpcHandler):
            try:
                # helper is available on the current host
                if handler[0](sub_obj):
                    debug('sub_tags: sub node implements `%s`' % pattern)
                    debug('sub node implements `%s`' % pattern)
                    # get tags from helper
                    htags = handler[0](sub_obj, resolve=False)
                    debug('sub_tags: handler provides `%s`' % htags)
                    debug('handler provides `%s`' % htags)
                    # append all tags
                    for t in htags.keys():
                        result.append(t)
            except Exception as err:
                warning('_sub_tag_list: `%s` -> `%s`', repr(err), err)
                debug('error while listing sub node tags `%r`:`%s`', err, err)
        return result
    
    @pure
    def sub_tags(self, sub_id, tags=None, noresolve_tags=None):
        '''
        '''
        info('sub_tags: server requested tags for `%s`', sub_id)
        info('server requested tags for `%s` with tags=`%s` noresolve_tags=`%s`'
                                                , sub_id, tags, noresolve_tags)
        if sub_id not in self._host_handle.vm_list():
            warning('sub_tags: sub node `%s` is unknown !', sub_id)
            debug('sub node `%s` is unknown !', sub_id)
            raise HypervisorError('sub node `%s` is unknown' % sub_id)
        else:
            # open a wrapper to the VM
            debug('sub_tags: fetching vm data for `%s`', sub_id)
            debug('fetching vm data for `%s`', sub_id)
            sub = self._host_handle.vm_get(sub_id)
        # build a single list of tags
        info('sub_tags: server requested tags `%s` + `%s`', tags,
                                                                noresolve_tags)
        debug('server requested tags `%s` + `%s`', tags, noresolve_tags)
        available_tags = self._sub_tag_list(sub)
        mytags = {}
        # return all resolved tags case
@@ -474,11 +474,11 @@ class NodeHandler(RpcHandler):
                mytags[t] = False
            for t in tags:
                mytags[t] = True
        debug('sub_tags: expanded list to `%s`', mytags.keys())
        debug('expanded list to `%s`', mytags.keys())
        # add mandatory tags if missing in the list, or set noresolve
        for t in self.VM_TAG_MANDATORY:
            if t not in mytags or not mytags[t]:
                debug('sub_tags: add/correct mandatory tag `%s`' % t)
                debug('add/correct mandatory tag `%s`' % t)
                mytags[t] = True
        # query the subnode
        result = {}
@@ -487,28 +487,27 @@ class NodeHandler(RpcHandler):
            for tag, resolve in mytags.iteritems():
                # first, search tag in plain mappings
                if tag in self.VM_TAG_MAP:
                    info('sub_tags: plain mapping found for tag `%s`', tag)
                    debug('A1: plain mapping found for tag `%s`', tag)
                    # proceed if tag can be resolved on this VM
                    try:
                        if self.VM_TAG_MAP[tag][0](sub):
                            result[tag] = {}
                            result[tag]['ttl'] = self.VM_TAG_MAP[tag][2]
                            if resolve:
                                debug('sub_tags: resolving tag `%s`', tag)
                                debug('B1: resolving tag `%s`', tag)
                                # call the wrapper mapping lambda
                                q = self.VM_TAG_MAP[tag][1](sub, tag)
                                info('sub_tags: tag query returned `%s`', q)
                                debug('C1: tag query returned `%s`=`%s`', tag, q)
                                if q is not None:
                                    if resolve:
                                        result[tag]['value'] = str(q)
                                else:
                                    warning('sub_tags: I wont return `%s`=`None`'
                                                                        , tag)
                                    warning('D1: I wont return `%s`=`None`', tag)
                    except Exception as err:
                        warning('sub_tags: plain: `%s` -> `%s`', repr(err), err)
                        debug('error with plain mapping `%r`:`%s`', err, err)
                # no tag mapping exist, test name against the globbing list
                else:
                    info('sub_tags: searching for `%s` in globbing tags', tag)
                    debug('A2: searching for `%s` in globbing tags', tag)
                    # iter on globbing patterns, and get helper references
                    # process the first globbing tag that match then exit
                    # because there should not exist two globbing pattern
@@ -518,16 +517,16 @@ class NodeHandler(RpcHandler):
                            # helper is available on the current VM
                            if handler[0](sub, tag):
                                if fnmatchcase(tag, pattern):
                                    debug('sub_tags: processing tag `%s` with '
                                                'pattern `%s`' % (tag, pattern))
                                    debug('B2: processing tag `%s` with pattern'
                                                    ' `%s`' % (tag, pattern))
                                    # get tags from helper
                                    htags = handler[1](sub, tag)
                                    # FIXME intead of extracting one tag, try
                                    # not to build the whole list. Maybe it's
                                    # too difficult and not worth implementing
                                    if tag in htags:
                                        info('sub_tags: found tag in helper '
                                          'result with value `%s`' % htags[tag])
                                        debug('C2: found tag in helper result with'
                                                    ' value `%s`' % htags[tag])
                                        result[tag] = {}
                                        result[tag]['ttl'] = handler[2]
                                        if resolve:
@@ -535,10 +534,9 @@ class NodeHandler(RpcHandler):
                                                                    htags[tag])
                                    break
                        except Exception as err:
                            warning('sub_tags: glob: `%s` -> `%s`', repr(err),
                                                                            err)
                            debug('error with globbing tag `%r`:`%s`', err, err)
        except Exception as err:
            warning('sub_tags: global: `%s` -> `%s`' % (repr(err), err))
            debug('global error `%r`:`%s`', err, err)
        return result
    
    ##################################
@@ -549,27 +547,27 @@ class NodeHandler(RpcHandler):
    def node_shutdown(self, reboot=True, gracefull=True):
        '''
        '''
        warning('node_shutdown: server requested shutdown of local host')
        info('node_shutdown: reboot=%s gracefull=%s', reboot, gracefull)
        info('server requested shutdown of local host with options '
                                'reboot=`%s` gracefull=`%s`', reboot, gracefull)
        if reboot:
            method = 'power_reboot' if gracefull else 'power_force_reboot'
        else:
            method = 'power_shutdown' if gracefull else 'power_off'
        if hasattr(self._host_handle, method):
            result = getattr(self._host_handle, method)()
            info('node_shutdown: in progress ... action returned `%s`', result)
            info('in progress... action returned `%s`', result)
            return result
        else:
            error('node_shutdown: unable to proceed, this feature is not available')
            info('unable to proceed, this feature is not available')
            raise NotImplementedError('host handler has no method `%s`' %method)
    
    @pure
    def execute_command(self, command):
        '''
        '''
        error('execute_command: starting execution of `%s`' % command)
        info('starting execution of `%s`' % command)
        output = self._host_handle.execute(command)
        info('execute_command: finished execution of `%s`' % command)
        info('finished execution of `%s`' % command)
        return output
    
    ##################################
@@ -580,11 +578,11 @@ class NodeHandler(RpcHandler):
    def vm_define(self, data, format='xml'):
        '''
        '''
        warning('vm_define: server requested creation of a new VM')
        debug('vm_define: with description data `%s`' % data)
        info('server requested creation of a new VM')
        debug('description data is `%s`', data)
        if hasattr(self._host_handle, 'vm_define'):
            name = self._host_handle.vm_define(data)
            debug('vm_define: new VM has name `%s`' % name)
            info('new VM has name `%s`', name)
            return name
        else:
            raise NotImplementedError('host do not support VM creation')
@@ -593,7 +591,7 @@ class NodeHandler(RpcHandler):
    def vm_undefine(self, name):
        '''
        '''
        warning('vm_undefine: server requested deletion of `%s`' % name)
        info('server requested deletion of `%s`', name)
        if hasattr(self._host_handle, 'vm_get'):
            vm = self._host_handle.vm_get(name)
            if hasattr(vm, 'undefine'):
@@ -607,7 +605,7 @@ class NodeHandler(RpcHandler):
    def vm_export(self, name, format='xml'):
        '''
        '''
        warning('vm_export: server requested configuration of `%s`' % name)
        info('server requested configuration of `%s`', name)
        if hasattr(self._host_handle, 'vm_get'):
            vm = self._host_handle.vm_get(name)
            if hasattr(vm, 'get_config'):
@@ -621,20 +619,19 @@ class NodeHandler(RpcHandler):
    def vm_stop(self, vm_names=None, force=False):
        '''
        '''
        warning('vm_stop: server requested stop of `%s`' % vm_names)
        debug('vm_stop: force stop is `%s`' % force)
        info('server requested stop of `%s`, force is `%s`', vm_names, force)
        if vm_names is None:
            vm_names = self._host_handle.vm_list_running()
            debug('vm_stop: no vm specified, expanded list to `%s`' % vm_names)
            debug('no vm specified, expanded list to `%s`', vm_names)
        for vm_name in vm_names:
            try:
                debug('vm_stop: fetching vm data for `%s`' % vm_name)
                debug('fetching vm data for `%s`', vm_name)
                vm = self._host_handle.vm_get(vm_name)
                if force:
                    debug('vm_stop: powering off `%s`' % vm_name)
                    debug('powering off `%s`', vm_name)
                    vm.power_off()
                else:
                    info('vm_stop: shutdown requested for `%s`' % vm_name)
                    debug('shutdown requested for `%s`', vm_name)
                    vm.power_shutdown()
            except:
                pass
@@ -643,15 +640,15 @@ class NodeHandler(RpcHandler):
    def vm_start(self, vm_names=None):
        '''
        '''
        warning('vm_start: server requested start of `%s`' % vm_names)
        info('server requested start of `%s`', vm_names)
        if vm_names is None:
            vm_names = self._host_handle.vm_list_stopped()
            debug('vm_start: no vm specified, expanded list to `%s`' % vm_names)
            debug('no vm specified, expanded list to `%s`', vm_names)
        for vm_name in vm_names:
            try:
                debug('vm_start: fetching vm data for `%s`' % vm_name)
                debug('fetching vm data for `%s`', vm_name)
                vm = self._host_handle.vm_get(vm_name)
                info('vm_start: powering on `%s`' % vm_name)
                debug('powering on `%s`', vm_name)
                vm.power_on()
            except:
                pass
@@ -660,16 +657,15 @@ class NodeHandler(RpcHandler):
    def vm_suspend(self, vm_names=None):
        '''
        '''
        warning('vm_suspend: server requested suspend of `%s`' % vm_names)
        info('server requested suspend of `%s`', vm_names)
        if vm_names is None:
            vm_names = self._host_handle.vm_list_running()
            debug('vm_suspend: no vm specified, expanded list to `%s`'
                                                                    % vm_names)
            debug('no vm specified, expanded list to `%s`',vm_names)
        for vm_name in vm_names:
            try:
                debug('vm_suspend: fetching vm data for `%s`' % vm_name)
                debug('fetching vm data for `%s`', vm_name)
                vm = self._host_handle.vm_get(vm_name)
                info('vm_suspend: pause execution of `%s`' % vm_name)
                debug('pause execution of `%s`', vm_name)
                vm.power_suspend()
            except:
                pass
@@ -678,15 +674,15 @@ class NodeHandler(RpcHandler):
    def vm_resume(self, vm_names=None):
        '''
        '''
        warning('vm_resume: server requested resume of `%s`' % vm_names)
        info('server requested resume of `%s`', vm_names)
        if vm_names is None:
            vm_names = self._host_handle.vm_list_running()
            debug('vm_resume: no vm specified, expanded list to `%s`'% vm_names)
            vm_names = self._host_handle.vm_list_paused()
            debug('no vm specified, expanded list to `%s`', vm_names)
        for vm_name in vm_names:
            try:
                debug('vm_resume: fetching vm data for `%s`' % vm_name)
                debug('fetching vm data for `%s`', vm_name)
                vm = self._host_handle.vm_get(vm_name)
                info('vm_resume: resume execution of `%s`' % vm_name)
                debug('resume execution of `%s`', vm_name)
                vm.power_resume()
            except:
                pass
@@ -697,6 +693,8 @@ class NodeHandler(RpcHandler):
    
    @pure
    def vol_create(self, pool, name, size):
        '''
        '''
        size = int(size)
        if hasattr(self._host_handle, 'storage'):
            pool = self._host_handle.storage().pool_get(pool)
@@ -706,6 +704,8 @@ class NodeHandler(RpcHandler):
    
    @pure
    def vol_delete(self, pool, name, wipe=False):
        '''
        '''
        if hasattr(self._host_handle, 'storage'):
            pool = self._host_handle.storage().pool_get(pool)
            vol = pool.volume_get(name)
@@ -717,13 +717,16 @@ class NodeHandler(RpcHandler):
    
    @pure
    def vol_export(self, pool, name, raddr, rport):
        '''
        '''
        rport = int(rport)
        jmgr = self._host_handle.jobmgr
        if hasattr(self._host_handle, 'storage'):
            # get device path info
            sto = self._host_handle.storage()
            vol_path = sto.pool_get(pool).volume_get(name).get_path()
            # create job
            job = SendFileJob(self._host_handle.jobmgr, vol_path, raddr, rport)
            job = SendFileJob(jmgr, vol_path, raddr, rport)
            job.start_now()
            jid = job.get_id()
            # wait for job completion
@@ -733,21 +736,23 @@ class NodeHandler(RpcHandler):
            res = {}
            res['id'] = jid
            res['log'] = job.get_log()
            if jmgr.is_job_finished(jid):
                res['checksum'] = job.get_checksum()
            return res
        else:
            raise NotImplementedError('host handler has no storage support')
    
    @pure
    def vol_import(self, pool, name, timeout=30):
        timeout = int(timeout)
    def vol_import(self, pool, name):
        '''
        '''
        if hasattr(self._host_handle, 'storage'):
            # get device path info
            sto = self._host_handle.storage()
            vol_path = sto.pool_get(pool).volume_get(name).get_path()
            # create job
            # FIXME timeout
            job = ReceiveFileJob(self._host_handle.jobmgr, vol_path)
            # prepare job now, so we can fetch the port number below
            job.prepare()
            job.start_now()
            # return job info
@@ -764,7 +769,7 @@ class NodeHandler(RpcHandler):
        '''
        jid = int(jid)
        jmgr = self._host_handle.jobmgr
        # wait for the job to be finished
        # wait for job completion
        while jmgr.is_job_running(jid):
            sleep(2)
        # get the job
@@ -773,14 +778,25 @@ class NodeHandler(RpcHandler):
        res = {}
        res['id'] = job.get_id()
        res['log'] = job.get_log()
        if jmgr.is_job_finished(job.get_id()):
            res['checksum'] = job.get_checksum()
        return res
    
    @pure
    def vol_import_list(self):
        '''
        '''
        raise NotImplementedError()
    
    @pure
    def vol_import_cancel(self, jid):
        '''
        '''
        jid = int(jid)
        raise NotImplementedError()
 No newline at end of file
        jmgr = self._host_handle.jobmgr
        jmgr.cancel(jid)




+2 −8

File changed.

Preview size limit exceeded, changes collapsed.

+35 −46

File changed.

Preview size limit exceeded, changes collapsed.