From 651c5d8ca543b23f100b2bcfee7b647ad9dda7bf Mon Sep 17 00:00:00 2001 From: Antoine Millet Date: Mon, 8 Jun 2015 17:44:04 +0200 Subject: [PATCH] PEP8 --- cloudcontrol/node/config.py | 1 - cloudcontrol/node/host/jobs.py | 2 ++ cloudcontrol/node/host/tags.py | 1 + cloudcontrol/node/hypervisor/__init__.py | 5 +-- .../node/hypervisor/domains/__init__.py | 3 +- .../node/hypervisor/domains/vm_tags.py | 2 +- cloudcontrol/node/hypervisor/jobs.py | 33 +++++++++---------- cloudcontrol/node/hypervisor/kvm.py | 3 +- cloudcontrol/node/hypervisor/lib.py | 2 -- cloudcontrol/node/hypervisor/tags.py | 2 +- cloudcontrol/node/jobs.py | 1 + 11 files changed, 24 insertions(+), 31 deletions(-) diff --git a/cloudcontrol/node/config.py b/cloudcontrol/node/config.py index dff2918..4f029f7 100644 --- a/cloudcontrol/node/config.py +++ b/cloudcontrol/node/config.py @@ -93,7 +93,6 @@ class NodeConfigParser(object): 'Configuration attribute "verbosity"' ' is invalid (section "node")') - self.debug = config.getboolean('node', 'debug', False) self.logging_output = 'console' if self.debug else 'syslog' diff --git a/cloudcontrol/node/host/jobs.py b/cloudcontrol/node/host/jobs.py index 7c9d06e..abe1141 100644 --- a/cloudcontrol/node/host/jobs.py +++ b/cloudcontrol/node/host/jobs.py @@ -23,6 +23,7 @@ from cloudcontrol.common.jobs import JobsManagerInterface, Job, JobCancelError from cloudcontrol.common.tql.db.helpers import taggify from cloudcontrol.common.client.tags import TagDB, Tag + class NodeJobsManagerInterface(JobsManagerInterface): TAG_ATTRIBUTES = ('title', 'status', 'state', 'owner', 'created', 'ended', @@ -42,6 +43,7 @@ class NodeJobsManagerInterface(JobsManagerInterface): for tag in self.TAG_ATTRIBUTES), 'job', ) + def tag_duration(job): if job.ended is None: ended = datetime.fromtimestamp(self.handler.main.evloop.now()) diff --git a/cloudcontrol/node/host/tags.py b/cloudcontrol/node/host/tags.py index d977231..8879407 100644 --- a/cloudcontrol/node/host/tags.py +++ b/cloudcontrol/node/host/tags.py @@ -188,5 +188,6 @@ def load(): pass return load_ + def plugins(handler): return ' '.join(handler.plugins) or None diff --git a/cloudcontrol/node/hypervisor/__init__.py b/cloudcontrol/node/hypervisor/__init__.py index 2845dab..b82f39c 100644 --- a/cloudcontrol/node/hypervisor/__init__.py +++ b/cloudcontrol/node/hypervisor/__init__.py @@ -267,8 +267,7 @@ class Handler(HostHandler): except libvirt.libvirtError as exc: # Libvirt raises exception 'domain is not running' even if domain # is running, might be a bug in libvirt - if 'domain is not running' not in str(exc) or ( - self.hypervisor.domains[name].state != 'running'): + if 'domain is not running' not in str(exc) or (self.hypervisor.domains[name].state != 'running'): logger.exception('Error while destroying VM %s', name) raise except KeyError: @@ -449,7 +448,6 @@ class Handler(HostHandler): # update autostart value now instead of 10 seconds lag vm.tag_db['__main__']['autostart'].update_value() - @libvirt_handler def tag_add(self, name, tag, value): """Add a static tag on specified VM. @@ -483,7 +481,6 @@ class Handler(HostHandler): vm = self.hypervisor.domains[name] return vm.tags - @libvirt_handler def vol_create(self, pool, name, size): logger.debug('Volume create %s, pool %s, size %s', name, pool, size) diff --git a/cloudcontrol/node/hypervisor/domains/__init__.py b/cloudcontrol/node/hypervisor/domains/__init__.py index 1c2e225..4264f51 100644 --- a/cloudcontrol/node/hypervisor/domains/__init__.py +++ b/cloudcontrol/node/hypervisor/domains/__init__.py @@ -84,8 +84,7 @@ class VirtualMachine(object): Tag('disk%s_path' % i, v.path, 10), Tag('disk%s_pool' % i, v.storage, 10), # FIXME: change Tag('disk%s_vol' % i, v.name, 10), - Tag('disk%s_cache' %i, - lambda: self.cache_behaviour.get(v.path), 10) + Tag('disk%s_cache' % i, lambda: self.cache_behaviour.get(v.path), 10) ): self.tag_db.add_tag(t) diff --git a/cloudcontrol/node/hypervisor/domains/vm_tags.py b/cloudcontrol/node/hypervisor/domains/vm_tags.py index 518c847..3a52b6c 100644 --- a/cloudcontrol/node/hypervisor/domains/vm_tags.py +++ b/cloudcontrol/node/hypervisor/domains/vm_tags.py @@ -60,7 +60,7 @@ def status(dom): def hv(dom): - #FIXME: what shoud be the value of this tag ? + # FIXME: what shoud be the value of this tag ? return dom.hypervisor.name diff --git a/cloudcontrol/node/hypervisor/jobs.py b/cloudcontrol/node/hypervisor/jobs.py index 2014361..6598daf 100644 --- a/cloudcontrol/node/hypervisor/jobs.py +++ b/cloudcontrol/node/hypervisor/jobs.py @@ -268,7 +268,6 @@ class ExportVolume(BaseIOJob): self.clean_fds() break - self.checksum = checksum.hexdigest() self.clean_fds() @@ -643,8 +642,8 @@ class DRBDAllocator(object): # is calling some drbd helpers that returns non 0 value and make the # synchronisation halt. self.subproc_call([self.MODPROBE, 'drbd', - 'minor_count=%d' % self.MINOR_MAX, - 'usermode_helper=/bin/true']) + 'minor_count=%d' % self.MINOR_MAX, + 'usermode_helper=/bin/true']) except CalledProcessError: logger.error('Cannot load drbd kernel module') @@ -783,8 +782,7 @@ class DRBD(object): logger.debug('Create a copy DM of the LV') # get LV table try: - self.dm_table = self.subproc_call([self.DMSETUP, 'table', - '--showkeys', self.volume.path]) + self.dm_table = self.subproc_call([self.DMSETUP, 'table', '--showkeys', self.volume.path]) except CalledProcessError: logger.error('Cannot get DM table of VM LV') raise DRBDError('Cannot get DM table of VM LV') @@ -804,20 +802,20 @@ class DRBD(object): # wipe drbd metadata (just in case) try: self.subproc_call([self.DRBDMETA, '--force', self.drbd_path, - 'v08', self.meta_volume.path, '0', 'wipe-md']) + 'v08', self.meta_volume.path, '0', 'wipe-md']) except CalledProcessError: pass try: self.subproc_call([self.DRBDMETA, '--force', self.drbd_path, - 'v08', self.meta_volume.path, '0', 'create-md']) + 'v08', self.meta_volume.path, '0', 'create-md']) except CalledProcessError: logger.error('Cannot create DRBD external metadata on device') raise DRBDError('Cannot create DRBD metadata') try: self.subproc_call([self.DRBDSETUP, self.drbd_path, 'disk', - '/dev/mapper/%s' % self.dm_copy, - self.meta_volume.path, - '0', '--create-device']) + '/dev/mapper/%s' % self.dm_copy, + self.meta_volume.path, + '0', '--create-device']) except CalledProcessError: logger.error('Error while creating DRBD device') raise DRBDError('Cannot create DRBD device') @@ -834,9 +832,9 @@ class DRBD(object): # connect to remote node try: self.subproc_call([self.DRBDSETUP, self.drbd_path, 'net', - '0.0.0.0:%d' % self.drbd_port, - '%s:%d' % (remote_addr, remote_port), - 'C', '-m', '-S', '10000000']) + '0.0.0.0:%d' % self.drbd_port, + '%s:%d' % (remote_addr, remote_port), + 'C', '-m', '-S', '10000000']) except CalledProcessError: logger.error('Error while setting up network facility for DRBD') raise DRBDError('Cannot set up network for DRBD') @@ -844,8 +842,7 @@ class DRBD(object): sleep(.5) # FIXME logger.debug('Set up bandwidth limit') try: - self.subproc_call([self.DRBDSETUP, self.drbd_path, 'syncer', '-r', - self.DRBD_RATE]) + self.subproc_call([self.DRBDSETUP, self.drbd_path, 'syncer', '-r', self.DRBD_RATE]) except CalledProcessError: logger.error('Cannot set bandwidth rate limit on DRBD') raise DRBDError('Error while setting bandwidth limit') @@ -857,9 +854,9 @@ class DRBD(object): sleep(.5) # FIXME try: self.subproc_call([self.DRBDSETUP, self.drbd_path, 'wait-connect', - '-t', self.DRBD_TIMEOUT, - '-d', self.DRBD_TIMEOUT, - '-o', self.DRBD_TIMEOUT]) + '-t', self.DRBD_TIMEOUT, + '-d', self.DRBD_TIMEOUT, + '-o', self.DRBD_TIMEOUT]) except CalledProcessError: logger.error('Error while waiting for remote DRBD to connect,' ' timeout = %s', self.DRBD_TIMEOUT) diff --git a/cloudcontrol/node/hypervisor/kvm.py b/cloudcontrol/node/hypervisor/kvm.py index 002a22a..a20e3dd 100644 --- a/cloudcontrol/node/hypervisor/kvm.py +++ b/cloudcontrol/node/hypervisor/kvm.py @@ -157,8 +157,7 @@ class KVM(object): else: raise else: - logger.info('Domain change state from %s to %s', vm.state, - state) + logger.info('Domain change state from %s to %s', vm.state, state) if event == 'Stopped' and vm.redefine_on_stop: # if the vm was changed while it was running, then we # need to recreate it now as stated above diff --git a/cloudcontrol/node/hypervisor/lib.py b/cloudcontrol/node/hypervisor/lib.py index abc441f..032a075 100644 --- a/cloudcontrol/node/hypervisor/lib.py +++ b/cloudcontrol/node/hypervisor/lib.py @@ -314,7 +314,6 @@ class StorageIndex(object): self.paths = None self.update_path_index() - def update(self): """Update storage pools and volumes.""" # go through all storage pools and check if it is already in the index @@ -466,7 +465,6 @@ class Storage(object): StringIO(self.lv_storage.XMLDesc(0))).get('type') - class Volume(object): """Volume abstraction.""" def __init__(self, lv_volume): diff --git a/cloudcontrol/node/hypervisor/tags.py b/cloudcontrol/node/hypervisor/tags.py index 125be1d..a764b59 100644 --- a/cloudcontrol/node/hypervisor/tags.py +++ b/cloudcontrol/node/hypervisor/tags.py @@ -90,7 +90,7 @@ def hvm(): return result[and_( set( 'vmx', # Intel VT - 'svm', # AMD + 'svm', # AMD ) & set( l.split(': ')[-1].split() ) for l in open('/proc/cpuinfo').readline() if l.startswith('Tags') diff --git a/cloudcontrol/node/jobs.py b/cloudcontrol/node/jobs.py index 179865b..fb2bf26 100644 --- a/cloudcontrol/node/jobs.py +++ b/cloudcontrol/node/jobs.py @@ -75,6 +75,7 @@ class JobManager(object): def start(self): pass + def stop(self): logger.debug('Stopping all currently running jobs') for job in self.jobs.itervalues(): -- GitLab