Loading cloudcontrol/node/hypervisor/__init__.py +3 −3 Original line number Diff line number Diff line Loading @@ -21,7 +21,7 @@ from xml.etree import cElementTree as et import libvirt from sjrpc.utils import threadless, pass_connection from cloudcontrol.common.client.tags import Tag, tag_inspector from cloudcontrol.common.client.tags import Tag, tag_inspector, ParentWrapper from cloudcontrol.common.client.plugins import ( rpc_handler, rpc_handler_decorator_factory, Loading Loading @@ -121,7 +121,7 @@ class Handler(HostHandler): # register domains for dom in self.hypervisor.domains.itervalues(): self.tag_db.add_sub_object(dom.name, dom.tags.itervalues(), 'vm') dom.tag_db.set_parent(ParentWrapper(dom.name, 'vm', self.tag_db)) # we must refresh those tags only when domains tags are registered to # have the calculated values Loading Loading @@ -416,7 +416,7 @@ class Handler(HostHandler): vm = self.hypervisor.domains[name] vm.lv_dom.setAutostart(int(bool(autostart))) # update autostart value now instead of 10 seconds lag vm.tags['autostart'].update_value() vm.tag_db['__main__']['autostart'].update_value() @libvirt_handler def vol_create(self, pool, name, size): Loading cloudcontrol/node/hypervisor/domains/__init__.py +6 −7 Original line number Diff line number Diff line Loading @@ -25,7 +25,7 @@ from itertools import izip, count import pyev import libvirt from cloudcontrol.common.client.tags import Tag, tag_inspector from cloudcontrol.common.client.tags import Tag, tag_inspector, TagDB, ParentWrapper from cloudcontrol.node.hypervisor import lib as _libvirt from cloudcontrol.node.hypervisor.lib import DOMAIN_STATES as STATE Loading Loading @@ -59,8 +59,7 @@ class VirtualMachine(object): #: state of VM: started, stoped, paused self._state = STATE[dom.info()[0]] #: tags for this VM # FIXME use a tag db instance self.tags = dict((t.name, t) for t in tag_inspector(vm_tags, self)) self.tag_db = TagDB(tags=tag_inspector(vm_tags, self)) #: Driver cache behavior for each VM storage, see #: http://libvirt.org/formatdomain.html#elementsDisks self.cache_behaviour = dict() Loading @@ -74,7 +73,7 @@ class VirtualMachine(object): Tag('disk%s_cache' %i, lambda: self.cache_behaviour.get(v.path), 10) ): self.tags[t.name] = t self.tag_db.add_tag(t) for i, nic in izip(count(), self.iter_nics()): for t in ( Loading @@ -82,7 +81,7 @@ class VirtualMachine(object): Tag('nic%s_source' % i, nic.source), Tag('nic%s_model' % i, nic.model), ): self.tags[t.name] = t sef.tag_db.add_tag(t) #: keep record of CPU stats (libev timestamp, cpu time) self.cpu_stats = (hypervisor.handler.main.evloop.now(), dom.info()[4]) Loading @@ -105,8 +104,8 @@ class VirtualMachine(object): @state.setter def state(self, value): self._state = value self.tags['status'].update_value() self.tags['vncport'].update_value() self.tag_db['__main__']['status'].update_value() self.tag_db['__main__']['vncport'].update_value() @property def lv_dom(self): Loading cloudcontrol/node/hypervisor/kvm.py +4 −3 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ import traceback import libvirt from cloudcontrol.common.client.utils import main_thread from cloudcontrol.common.client.tags import ParentWrapper from cloudcontrol.node.hypervisor.lib import ( DOMAIN_STATES, EVENTS, Loading Loading @@ -128,7 +129,7 @@ class KVM(object): # this can be the result of a change in the domain configuration # we first remove the old domain vm = self.domains.pop(dom.name()) self.handler.tag_db.remove_sub_object(vm.name) vm.tag_db.set_parent(None) if vm.state not in ('stopped', 'crashed'): # if the vm was updated while it was "on", then the # modifications will not be reflected since we construct the Loading Loading @@ -177,7 +178,7 @@ class KVM(object): logger.info('Created domain %s', vm.name) vm.redefine_on_stop = redefine_on_stop self.domains[vm.name] = vm self.handler.tag_db.add_sub_object(vm.name, vm.tags.itervalues(), 'vm') vm.tag_db.set_parent(ParentWrapper(vm.name, 'vm', self.handler.tag_db)) self.update_domain_count() def vm_unregister(self, name): Loading @@ -190,7 +191,7 @@ class KVM(object): # we still update storage and tag attributes pass else: self.handler.tag_db.remove_sub_object(vm.name) vm.tag_db.set_parent(None) # update Storage pools in case VM had volumes that were deleted self.storage.update() self.update_domain_count() Loading cloudcontrol/node/hypervisor/tags.py +8 −8 Original line number Diff line number Diff line Loading @@ -150,28 +150,28 @@ def vmstopped(handl): @_check_virt_connected def cpurunning(handl): """CPU total used by running VMs on the hypervisor.""" return sum(int(vm.tags['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['cpu'].value and return sum(int(vm.tag_db['__main__']['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['cpu'].value and vm.state == 'running') @_check_virt_connected def cpualloc(handl): """CPU total used by all VMs on the hypervisor.""" return sum(int(vm.tags['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['cpu'].value) return sum(int(vm.tag_db['__main__']['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['cpu'].value) @_check_virt_connected def memrunning(handl): """Memory used by running VMs on the hypervisor.""" return sum(int(vm.tags['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['mem'].value and return sum(int(vm.tag_db['__main__']['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['mem'].value and vm.state == 'running') @_check_virt_connected def memalloc(handl): """Memory used by all VMs on the hypervisor.""" return sum(int(vm.tags['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['mem'].value) return sum(int(vm.tag_db['__main__']['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['mem'].value) Loading
cloudcontrol/node/hypervisor/__init__.py +3 −3 Original line number Diff line number Diff line Loading @@ -21,7 +21,7 @@ from xml.etree import cElementTree as et import libvirt from sjrpc.utils import threadless, pass_connection from cloudcontrol.common.client.tags import Tag, tag_inspector from cloudcontrol.common.client.tags import Tag, tag_inspector, ParentWrapper from cloudcontrol.common.client.plugins import ( rpc_handler, rpc_handler_decorator_factory, Loading Loading @@ -121,7 +121,7 @@ class Handler(HostHandler): # register domains for dom in self.hypervisor.domains.itervalues(): self.tag_db.add_sub_object(dom.name, dom.tags.itervalues(), 'vm') dom.tag_db.set_parent(ParentWrapper(dom.name, 'vm', self.tag_db)) # we must refresh those tags only when domains tags are registered to # have the calculated values Loading Loading @@ -416,7 +416,7 @@ class Handler(HostHandler): vm = self.hypervisor.domains[name] vm.lv_dom.setAutostart(int(bool(autostart))) # update autostart value now instead of 10 seconds lag vm.tags['autostart'].update_value() vm.tag_db['__main__']['autostart'].update_value() @libvirt_handler def vol_create(self, pool, name, size): Loading
cloudcontrol/node/hypervisor/domains/__init__.py +6 −7 Original line number Diff line number Diff line Loading @@ -25,7 +25,7 @@ from itertools import izip, count import pyev import libvirt from cloudcontrol.common.client.tags import Tag, tag_inspector from cloudcontrol.common.client.tags import Tag, tag_inspector, TagDB, ParentWrapper from cloudcontrol.node.hypervisor import lib as _libvirt from cloudcontrol.node.hypervisor.lib import DOMAIN_STATES as STATE Loading Loading @@ -59,8 +59,7 @@ class VirtualMachine(object): #: state of VM: started, stoped, paused self._state = STATE[dom.info()[0]] #: tags for this VM # FIXME use a tag db instance self.tags = dict((t.name, t) for t in tag_inspector(vm_tags, self)) self.tag_db = TagDB(tags=tag_inspector(vm_tags, self)) #: Driver cache behavior for each VM storage, see #: http://libvirt.org/formatdomain.html#elementsDisks self.cache_behaviour = dict() Loading @@ -74,7 +73,7 @@ class VirtualMachine(object): Tag('disk%s_cache' %i, lambda: self.cache_behaviour.get(v.path), 10) ): self.tags[t.name] = t self.tag_db.add_tag(t) for i, nic in izip(count(), self.iter_nics()): for t in ( Loading @@ -82,7 +81,7 @@ class VirtualMachine(object): Tag('nic%s_source' % i, nic.source), Tag('nic%s_model' % i, nic.model), ): self.tags[t.name] = t sef.tag_db.add_tag(t) #: keep record of CPU stats (libev timestamp, cpu time) self.cpu_stats = (hypervisor.handler.main.evloop.now(), dom.info()[4]) Loading @@ -105,8 +104,8 @@ class VirtualMachine(object): @state.setter def state(self, value): self._state = value self.tags['status'].update_value() self.tags['vncport'].update_value() self.tag_db['__main__']['status'].update_value() self.tag_db['__main__']['vncport'].update_value() @property def lv_dom(self): Loading
cloudcontrol/node/hypervisor/kvm.py +4 −3 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ import traceback import libvirt from cloudcontrol.common.client.utils import main_thread from cloudcontrol.common.client.tags import ParentWrapper from cloudcontrol.node.hypervisor.lib import ( DOMAIN_STATES, EVENTS, Loading Loading @@ -128,7 +129,7 @@ class KVM(object): # this can be the result of a change in the domain configuration # we first remove the old domain vm = self.domains.pop(dom.name()) self.handler.tag_db.remove_sub_object(vm.name) vm.tag_db.set_parent(None) if vm.state not in ('stopped', 'crashed'): # if the vm was updated while it was "on", then the # modifications will not be reflected since we construct the Loading Loading @@ -177,7 +178,7 @@ class KVM(object): logger.info('Created domain %s', vm.name) vm.redefine_on_stop = redefine_on_stop self.domains[vm.name] = vm self.handler.tag_db.add_sub_object(vm.name, vm.tags.itervalues(), 'vm') vm.tag_db.set_parent(ParentWrapper(vm.name, 'vm', self.handler.tag_db)) self.update_domain_count() def vm_unregister(self, name): Loading @@ -190,7 +191,7 @@ class KVM(object): # we still update storage and tag attributes pass else: self.handler.tag_db.remove_sub_object(vm.name) vm.tag_db.set_parent(None) # update Storage pools in case VM had volumes that were deleted self.storage.update() self.update_domain_count() Loading
cloudcontrol/node/hypervisor/tags.py +8 −8 Original line number Diff line number Diff line Loading @@ -150,28 +150,28 @@ def vmstopped(handl): @_check_virt_connected def cpurunning(handl): """CPU total used by running VMs on the hypervisor.""" return sum(int(vm.tags['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['cpu'].value and return sum(int(vm.tag_db['__main__']['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['cpu'].value and vm.state == 'running') @_check_virt_connected def cpualloc(handl): """CPU total used by all VMs on the hypervisor.""" return sum(int(vm.tags['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['cpu'].value) return sum(int(vm.tag_db['__main__']['cpu'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['cpu'].value) @_check_virt_connected def memrunning(handl): """Memory used by running VMs on the hypervisor.""" return sum(int(vm.tags['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['mem'].value and return sum(int(vm.tag_db['__main__']['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['mem'].value and vm.state == 'running') @_check_virt_connected def memalloc(handl): """Memory used by all VMs on the hypervisor.""" return sum(int(vm.tags['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tags['mem'].value) return sum(int(vm.tag_db['__main__']['mem'].value) for vm in handl.hypervisor.domains.itervalues() if vm.tag_db['__main__']['mem'].value)