import logging import os import sys import signal import socket import time import traceback from StringIO import StringIO from xml.etree import cElementTree as et import libvirt from sjrpc.utils import threadless, pass_connection from cloudcontrol.node.host import Handler as HostHandler from cloudcontrol.node.tags import Tag, tag_inspector, get_tags from cloudcontrol.node.hypervisor import tags from cloudcontrol.node.hypervisor.kvm import KVM from cloudcontrol.node.exc import ( UndefinedDomain, DRBDError, VMMigrationError, ) from cloudcontrol.node.hypervisor.jobs import ( ImportVolume, ExportVolume, TCPTunnel, DRBD, ) from cloudcontrol.node.utils import close_fds, set_signal_map logger = logging.getLogger(__name__) # FIXME find a way to refactor Handler and Hypervisor class class Handler(HostHandler): def __init__(self, *args, **kwargs): """ :param loop: MainLoop instance :param hypervisor_name: hypervisor name """ self.hypervisor_name = kwargs.pop('hypervisor_name') HostHandler.__init__(self, *args, **kwargs) #: keep index of asynchronous calls self.async_calls = dict() self.timer = self.main.evloop.timer(.0, 5., self.virt_connect_cb) self.hypervisor = None self._virt_connected = False # register tags self.tag_db.add_tags(tag_inspector(tags, self)) @property def virt_connected(self): return self._virt_connected @virt_connected.setter def virt_connected(self, value): self._virt_connected = value # update tags for tag in ('vir_status', 'sto', 'nvm', 'vmpaused', 'vmstarted', 'vmstopped', 'hvver', 'libvirtver', 'hv'): self.tag_db['__main__'][tag].update_value() def start(self): self.timer.start() HostHandler.start(self) def stop(self): self.timer.stop() if self.hypervisor is not None: self.hypervisor.stop() HostHandler.stop(self) def virt_connect_cb(self, *args): # initialize hypervisor instance try: self.hypervisor = KVM( name=self.hypervisor_name, handler=self, ) except libvirt.libvirtError: logger.exception('Error while connecting to libvirt') return self.virt_connected = True # register hypervisor storage tags for name, storage in self.hypervisor.storage.storages.iteritems(): self.tag_db.add_tags(( Tag('sto%s_state' % name, lambda: storage.state, 5, 5), Tag('sto%s_size' % name, lambda: storage.capacity, 5, 5), Tag('sto%s_free' % name, lambda: storage.available, 5, 5), Tag('sto%s_used' % name, lambda: storage.capacity - storage.available, 5, 5), Tag('sto%s_type' % name, lambda: storage.type, 5, 5), )) # register domains for dom in self.hypervisor.domains.itervalues(): self.tag_db.add_sub_object(dom.name, dom.tags.itervalues()) # we must refresh those tags only when domains tags are registered to # have the calculated values for tag in ('cpualloc', 'cpurunning', 'memalloc', 'memrunning'): self.tag_db['__main__'][tag].update_value() self.rpc_handler.update(dict( vm_define=self.vm_define, vm_undefine=self.vm_undefine, vm_export=self.vm_export, vm_stop=self.vm_stop, vm_start=self.vm_start, vm_suspend=self.vm_suspend, vm_resume=self.vm_resume, )) self.main.reset_handler('vm_define', self.vm_define) self.main.reset_handler('vm_undefine', self.vm_undefine) self.main.reset_handler('vm_export', self.vm_export) self.main.reset_handler('vm_stop', self.vm_stop) self.main.reset_handler('vm_destroy', self.vm_destroy) self.main.reset_handler('vm_start', self.vm_start) self.main.reset_handler('vm_suspend', self.vm_suspend) self.main.reset_handler('vm_resume', self.vm_resume) self.main.reset_handler('vm_migrate_tunneled', self.vm_migrate_tunneled) self.main.reset_handler('vol_create', self.vol_create) self.main.reset_handler('vol_delete', self.vol_delete) self.main.reset_handler('vol_import', self.vol_import) self.main.reset_handler('vol_import_wait', self.vol_import_wait) self.main.reset_handler('vol_export', self.vol_export) self.main.reset_handler('tun_setup', self.tun_setup) self.main.reset_handler('tun_connect', self.tun_connect) self.main.reset_handler('tun_connect_hv', self.tun_connect_hv) self.main.reset_handler('tun_destroy', self.tun_destroy) self.main.reset_handler('drbd_setup', self.drbd_setup) self.main.reset_handler('drbd_connect', self.drbd_connect) self.main.reset_handler('drbd_role', self.drbd_role) self.main.reset_handler('drbd_takeover', self.drbd_takeover) self.main.reset_handler('drbd_sync_status', self.drbd_sync_status) self.main.reset_handler('drbd_shutdown', self.drbd_shutdown) self.main.reset_handler('vm_open_console', self.vm_open_console) self.main.reset_handler('vm_disable_virtio_cache', self.vm_disable_virtio_cache) self.main.reset_handler('vm_set_autostart', self.vm_set_autostart) # if everything went fine, unregister the timer self.timer.stop() def virt_connect_restart(self): """Restart libvirt connection. This method might be called when libvirt connection is lost. """ if not self.virt_connected: return logger.error('Connection to libvirt lost, trying to restart') # update connection state self.virt_connected = False # refresh those tags for tag in ('cpualloc', 'cpurunning', 'memalloc', 'memrunning'): self.tag_db['__main__'][tag].update_value() # unregister tags that will be re registered later for storage in self.hypervisor.storage.storages: self.tag_db.remove_tags(( 'sto%s_state' % storage, 'sto%s_size' % storage, 'sto%s_free' % storage, 'sto%s_used' % storage, 'sto%s_type' % storage, )) # unregister sub objects (for the same reason) for sub_id in self.tag_db.keys(): if sub_id == '__main__': continue self.tag_db.remove_sub_object(sub_id) # stop and delete hypervisor instance self.hypervisor.stop() self.hypervisor = None # remove handlers related to libvirt self.main.remove_handler('vm_define') self.main.remove_handler('vm_undefine') self.main.remove_handler('vm_export') self.main.remove_handler('vm_stop') self.main.remove_handler('vm_destroy') self.main.remove_handler('vm_start') self.main.remove_handler('vm_suspend') self.main.remove_handler('vm_resume') self.main.remove_handler('vm_migrate_tunneled') self.main.remove_handler('vol_create') self.main.remove_handler('vol_delete') self.main.remove_handler('vol_import') self.main.remove_handler('vol_import_wait') self.main.remove_handler('vol_export') self.main.remove_handler('tun_setup') self.main.remove_handler('tun_connect') self.main.remove_handler('tun_connect_hv') self.main.remove_handler('tun_destroy') self.main.remove_handler('drbd_setup') self.main.remove_handler('drbd_connect') self.main.remove_handler('drbd_role') self.main.remove_handler('drbd_takeover') self.main.remove_handler('drbd_sync_status') self.main.remove_handler('drbd_shutdown') self.main.remove_handler('vm_open_console') self.main.remove_handler('vm_disable_virtio_cache') self.main.remove_handler('vm_set_autostart') # launch connection timer self.timer.start() def iter_vms(self, vm_names): """Utility function to iterate over VM objects using their names.""" if vm_names is None: return get_domain = self.hypervisor.domains.get for name in vm_names: dom = get_domain(name) if dom is not None: yield dom def vm_define(self, data, format='xml'): logger.debug('VM define') if format != 'xml': raise NotImplementedError('Format not supported') return self.hypervisor.vm_define(data) def vm_undefine(self, name): logger.debug('VM undefine %s', name) vm = self.hypervisor.domains.get(name) if vm is not None: vm.undefine() def vm_export(self, name, format='xml'): logger.debug('VM export %s', name) if format != 'xml': raise NotImplementedError('Format not supported') vm = self.hypervisor.domains.get(name) if vm is None: return return vm.lv_dom.XMLDesc(0) def vm_stop(self, name): logger.debug('VM stop %s', name) try: self.hypervisor.domains[name].stop() except libvirt.libvirtError: logger.exception('Error while stopping VM %s', name) raise except KeyError: msg = 'Cannot stop VM %s because it is not defined' % name logger.error(msg) raise UndefinedDomain(msg) def vm_destroy(self, name): logger.debug('VM destroy %s', name) try: self.hypervisor.domains[name].destroy() except libvirt.libvirtError as exc: # Libvirt raises exception 'domain is not running' even if domain # is running, might be a bug in libvirt if 'domain is not running' not in str(exc) or ( self.hypervisor.domains[name].state != 'running'): logger.exception('Error while destroying VM %s', name) raise except KeyError: msg = 'Cannot destroy VM %s because it is not defined' % name logger.error(msg) raise UndefinedDomain(msg) def vm_start(self, name): logger.debug('VM start %s', name) try: self.hypervisor.domains[name].start() except libvirt.libvirtError: logger.exception('Error while starting VM %s', name) raise except KeyError: msg = 'Cannot start VM %s because it is not defined' % name logger.error(msg) raise UndefinedDomain(msg) def vm_suspend(self, name): logger.debug('VM suspend %s', name) try: self.hypervisor.domains[name].suspend() except libvirt.libvirtError: logger.exception('Error while suspending VM %s', name) raise except KeyError: msg = 'Cannot suspend VM %s because it is not defined' % name logger.error(msg) raise UndefinedDomain(msg) def vm_resume(self, name): logger.debug('VM resume %s', name) try: self.hypervisor.domains[name].resume() except libvirt.libvirtError: logger.exception('Error while resuming VM %s', name) raise except KeyError: msg = 'Cannot resume VM %s because it is not defined' % name logger.error(msg) raise UndefinedDomain(msg) def vm_migrate_tunneled(self, name, tun_res, migtun_res, unsafe=False, timeout=60.): """Live migrate VM through TCP tunnel. :param name: VM name to migrate :param tun_res: result of tunnel_setup handler :param migtun_res: result of tunnel setup handler :param unsafe: for Libvirt >= 0.9.11, see http://libvirt.org/html/libvirt-libvirt.html#virDomainMigrateFlags :param float timeout: timeout for libvirt migration (prevents libvirt from trying to acquire domain lock forever) """ logger.debug('VM live migrate %s', name) try: # this is the port used by our libvirt in the cc-node (client # libvirt) to connect to the remote libvirtd remote_virt_port = tun_res['port'] except KeyError: logger.error('Invalid formatted argument tun_res for live' ' migration') raise try: # this is the port used by local libvirtd to connect to the remote # libvirtd (see http://libvirt.org/migration.html) remote_virt_port2 = migtun_res['port'] except KeyError: logger.error('Invalid formatted argument migtun_res for live' ' migration') raise try: vm = self.hypervisor.domains[name] except KeyError: logger.exception('Cannot find domain %s on hypervisor for live' ' migration', name) raise # we open a new connection to libvirt and fork because sometimes libvirt # python binding, while doing a operation, # doesn't seem to realease CPython's GIL, therefore all node # operations are blocked # the only solution we have found right now is to use a dedicated # libvirt connection for the migration and fork, the migration operation # in itself is handled by the child while other threads can be scheduled try: pid = os.fork() except OSError: logger.error('Cannot fork before running live migration') raise if pid == 0: # child sys.stderr.write('Hello from child !\n') sys.stderr.write('Debug is %s\n' % self.main.config.debug) try: close_fds(debug=self.main.config.debug) set_signal_map({ signal.SIGTERM: lambda *args: os._exit(1), signal.SIGUSR1: signal.SIG_IGN, signal.SIGINT: signal.SIG_IGN, # FIXME need more signal ? }) except: sys.stderr.write('Error while performing post fork work\n') traceback.print_exc(file=sys.stderr) # create a new libvirt connection dedicated to migration sys.stderr.write('Open new connection to libvirt\n') try: new_con = libvirt.open('qemu:///system') domain = new_con.lookupByUUIDString(vm.uuid) except libvirt.libvirtError: sys.stderr.write('Cannot connect to libvirt\n') os._exit(4) except: # error traceback.print_exc(sys.stderr) os._exit(2) sys.stderr.write('Open destination libvirt connection\n') try: dest_virt_con = libvirt.open( 'qemu+tcp://127.0.0.1:%d/system' % remote_virt_port) except libvirt.libvirtError: sys.stderr.write('Cannot connect to remote libvirt for live' ' migrating vm %s', name) os._exit(5) except: # error traceback.print_exc(file=sys.stderr) os._exit(2) try: if unsafe: # VIR_MIGRATE_UNSAFE is not defined for libvirt < 0.9.11 append_flags = getattr(libvirt, 'VIR_MIGRATE_UNSAFE', 0) else: append_flags = 0 sys.stderr.write('Do migrate\n') domain.migrate( dest_virt_con, libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_PEER2PEER | libvirt.VIR_MIGRATE_TUNNELLED | libvirt.VIR_MIGRATE_PERSIST_DEST | libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | append_flags, None, 'qemu+tcp://127.0.0.1:%d/system' % remote_virt_port2, 0, ) except libvirt.libvirtError: sys.stderr('libvirt error during migration\n') traceback.print_exc(file=sys.stderr) os._exit(1) except: # whatever the matter is we MUST NOT return to libev or sjRPC sys.stderr('error during migration\n') traceback.print_exc(file=sys.stderr) os._exit(2) else: os._exit(0) finally: new_con.close() dest_virt_con.close() else: # watch for migration status every second started_migration = time.time() while True: # wait timeout time.sleep(1.) # waitpid with no delay try: rpid, status = os.waitpid(pid, os.WNOHANG) except OSError as exc: logger.error('Error while waiting for child to terminate: %s', os.strerror(exc.errno)) raise # convert status to return status status >>= 8 logger.debug('Status: %s', status) if rpid == status == 0: if time.time() - started_migration < timeout: continue # waitpid returned immediately, thus migration still running # after timeout fired, we need to kill the child (term would # have no effect) os.kill(pid, signal.SIGKILL) try: rpid, status = os.waitpid(pid, 0) except OSError as exc: logger.error('Error while waiting for child after killing' ' it: %s', os.strerror(exc.errno)) raise assert rpid == pid, 'PID returned by waitpid is not valid' logger.error('Migration timeout for vm %s', name) raise VMMigrationError('Timeout') else: if status == 4: raise VMMigrationError('Cannot open new connection to' ' libvirt') elif status == 5: raise VMMigrationError('Cannot open connection to' ' remote libvirt') elif status != 0: # error logger.error('Libvirt error while live migrating vm %s', name) logger.debug('Exit status %s', status) raise VMMigrationError('Migration failed') else: logger.info('Sucessfuly live migrated vm %s', name) break @threadless @pass_connection def vm_open_console(self, conn, name): """ :param conn: sjRPC connection instance :param name: VM name """ vm = self.hypervisor.domains[name] # create connection to the VM console try: endpoint = vm.open_console() except socket.error: # cannot create socketpair logger.error('Cannot create connection to VM console') raise def on_close(tun): """Method of Tunnel protocol close callback.""" tun.endpoint.close() vm.close_console() # connect as tunnel endpoint proto = conn.create_tunnel(endpoint=endpoint, on_close=on_close) return proto.label def vm_disable_virtio_cache(self, name): """Set virtio cache to none on VM disks. :param name: VM name """ vm = self.hypervisor.domains[name] # get VM XML try: xml = vm.lv_dom.XMLDesc(0) except libvirt.libvirtError: logger.exception('Error while getting domain XML from libvirt, %s', vm.name) raise xml_tree = et.ElementTree() xml_tree.parse(StringIO(xml)) for disk in xml_tree.findall('devices/disk'): # check that disk is virtio target = disk.find('target') if target is None or target.get('bus') != 'virtio': continue # modify cache attr driver = disk.find('driver') assert driver is not None driver.set('cache', 'none') logger.debug('Set cache attribute for disk %s of VM %s', target.get('dev'), name) # write back the XML tree out = StringIO() xml_tree.write(out) # check encoding is fine try: self.hypervisor.vir_con.defineXML(out.getvalue()) except libvirt.libvirtError: logger.exception('Cannot update XML file for domain %s', name) raise def vm_set_autostart(self, name, autostart=True): """Set autostart on VM. :param name: VM name :param bool autostart: autostart value to set """ vm = self.hypervisor.domains[name] vm.lv_dom.setAutostart(int(bool(autostart))) # update autostart value now instead of 10 seconds lag vm.tags['autostart'].update_value() def vol_create(self, pool, name, size): logger.debug('Volume create %s, pool %s, size %s', name, pool, size) try: self.hypervisor.storage.create_volume(pool, name, size) except Exception: logger.exception('Error while creating volume') raise def vol_delete(self, pool, name): logger.debug('Volume delete %s, pool %s', name, pool) try: self.hypervisor.storage.delete_volume(pool, name) except Exception: logger.exception('Error while deleting volume') raise def vol_import(self, pool, name): """ :param pool: pool name where the volume is :param name: name of the volume """ logger.debug('Volume import pool = %s, volume = %s', pool, name) try: pool = self.hypervisor.storage.get_storage(pool) if pool is None: raise Exception('Pool storage does not exist') # TODO exc volume = pool.volumes.get(name) if volume is None: raise Exception('Volume does not exist') # create the job job = self.main.job_manager.create(ImportVolume, volume) job.start() except Exception: logger.exception('Error while starting import job') raise return dict(id=job.id, port=job.port) def vol_import_wait(self, job_id): """Block until completion of the given job id.""" job = self.main.job_manager.get(job_id) logger.debug('Waiting for import job to terminate') job.wait() logger.debug('Import job terminated') return dict(id=job.id, log='', checksum=job.checksum) def vol_import_cancel(self, job_id): """Cancel import job.""" logger.debug('Cancel import job') job = self.main.job_manager.get(job_id) self.main.job_manager.cancel(job_id) # wait for job to end job.join() # we don't call wait as it is already called in # vol_import_wait handler def vol_export(self, pool, name, raddr, rport): """ :param pool: pool name where the volume is :param name: name of the volume :param raddr: IP address of the destination to send the volume to :param rport: TCP port of the destination """ pool = self.hypervisor.storage.get_storage(pool) if pool is None: raise Exception('Pool storage does not exist') volume = pool.volumes.get(name) if volume is None: raise Exception('Volume does not exist') try: job = self.main.job_manager.create(ExportVolume, volume, raddr, rport) job.start() job.wait() except Exception: logger.exception('Error while exporting volume') raise logger.debug('Export volume successfull') return dict(id=job.id, log='', checksum=job.checksum) @threadless def tun_setup(self, local=True): """Set up local tunnel and listen on a random port. :param local: indicate if we should listen on localhost or all interfaces """ logger.debug('Tunnel setup: local = %s', local) # create job job = self.main.job_manager.create(TCPTunnel) job.setup_listen('127.0.0.1' if local else '0.0.0.0') return dict( jid=job.id, key='FIXME', port=job.port, ) @threadless def tun_connect(self, res, remote_res, remote_ip): """Connect tunnel to the other end. :param res: previous result of `tun_setup` handler :param remote_res: other end result of `tun_setup` handler :param remote_ip: where to connect """ logger.debug('Tunnel connect %s %s', res['jid'], remote_ip) job = self.main.job_manager.get(res['jid']) job.setup_connect((remote_ip, remote_res['port'])) job.start() @threadless def tun_connect_hv(self, res, migration=False): """Connect tunnel to local libvirt Unix socket. :param res: previous result of `tun_setup` handler """ logger.debug('Tunnel connect hypervisor %s', res['jid']) job = self.main.job_manager.get(res['jid']) job.setup_connect('/var/run/libvirt/libvirt-sock') job.start() @threadless def tun_destroy(self, res): """Close given tunnel. :param res: previous result as givent by `tun_setup` handler """ logger.debug('Tunnel destroy %s', res['jid']) job = self.main.job_manager.get(res['jid']) job.wait() def drbd_setup(self, pool, name): """Create DRBD volumes. :param pool: storage pool :param name: storage volume name """ pool = self.hypervisor.storage.get_storage(pool) if pool is None: raise DRBDError('Cannot setup DRBD: pool storage does not exist') elif pool.type != 'logical': raise DRBDError('Cannot setup DRBD: pool storage is not LVM') volume = pool.volumes.get(name) if volume is None: raise DRBDError('Cannot setup DRBD: volume does not exist') try: job = self.main.job_manager.create(DRBD, self.main.evloop, self.hypervisor.storage, pool, volume) except Exception: logger.exception('Error while creating DRBD job') raise job.setup() logger.debug('DRBD setup successfull') return dict( jid=job.id, port=job.drbd_port, ) def drbd_connect(self, res, remote_res, remote_ip): """Set up DRBD in connect mode. (Wait for connection and try to connect to the remote peer. :param res: previous result of `drbd_setup` handler :param remote_res: result of remote `drbd_setup` handler :param remote_ip: IP of remote peer """ job = self.main.job_manager.get(res['jid']) job.connect(remote_ip, remote_res['port']) job.wait_connection() def drbd_role(self, res, primary): """Set up DRBD role. :param res: previous result of `drbd_setup` handler :param bool primary: if True, set up in primary mode else secondary """ job = self.main.job_manager.get(res['jid']) if primary: job.switch_primary() else: job.switch_secondary() def drbd_takeover(self, res, state): """Set up DRBD device as the VM disk. FIXME :param res: previous result of `drbd_setup` handler :param state: FIXME """ job = self.main.job_manager.get(res['jid']) job.takeover() def drbd_sync_status(self, res): """Return synchronization status of a current DRBD job. :param res: previous result of `drbd_setup` handler """ status = self.main.job_manager.get(res['jid']).status() result = dict( done=status['disk'] == 'UpToDate', completion=status['percent'], ) logger.debug('DRBD status %s', result) return result def drbd_shutdown(self, res): """Destroy DRBD related block devices. :param res: previous result of `drbd_setup` handler """ logger.debug('DRBD shutdown') job = self.main.job_manager.get(res['jid']) job.cleanup() # remove job from job_manager list self.main.job_manager.notify(job)