Skip to content
__init__.py 41.4 KiB
Newer Older
import logging
Anael Beutot's avatar
Anael Beutot committed
import os
Anael Beutot's avatar
Anael Beutot committed
import signal
Anael Beutot's avatar
Anael Beutot committed
import socket
Anael Beutot's avatar
Anael Beutot committed
import time
Anael Beutot's avatar
Anael Beutot committed
import weakref
import traceback
from StringIO import StringIO
from itertools import chain, imap
from xml.etree import cElementTree as et
Anael Beutot's avatar
Anael Beutot committed
from sjrpc.utils import threadless, pass_connection
from cloudcontrol.node.host import Handler as HostHandler
from cloudcontrol.node.tags import Tag, tag_inspector, get_tags
from cloudcontrol.node.hypervisor import tags
from cloudcontrol.node.hypervisor.lib import (
Anael Beutot's avatar
Anael Beutot committed
    DOMAIN_STATES, EVENTS, STORAGE_STATES,
    EventLoop as VirEventLoop,
)
from cloudcontrol.node.hypervisor.domains import VirtualMachine
Anael Beutot's avatar
Anael Beutot committed
from cloudcontrol.node.exc import (
    UndefinedDomain, PoolStorageError, DRBDError, VMMigrationError,
)
Anael Beutot's avatar
Anael Beutot committed
from cloudcontrol.node.hypervisor.jobs import (
    ImportVolume, ExportVolume, TCPTunnel, DRBD,
)
from cloudcontrol.node.utils import close_fds, set_signal_map


logger = logging.getLogger(__name__)


# FIXME find a way to refactor Handler and Hypervisor class
class Handler(HostHandler):
    def __init__(self, *args, **kwargs):
        """
Anael Beutot's avatar
Anael Beutot committed
        :param loop: MainLoop instance
Anael Beutot's avatar
Anael Beutot committed
        :param hypervisor_name: hypervisor name
        self.hypervisor_name = kwargs.pop('hypervisor_name')
        HostHandler.__init__(self, *args, **kwargs)

Anael Beutot's avatar
Anael Beutot committed
        #: keep index of asynchronous calls
        self.async_calls = dict()
        self.timer = self.main.evloop.timer(.0, 5., self.virt_connect_cb)
        self.hypervisor = None
        self._virt_connected = False

        # register tags
        self.tag_db.add_tags(tag_inspector(tags, self))

    @property
    def virt_connected(self):
        return self._virt_connected

    @virt_connected.setter
    def virt_connected(self, value):
        self._virt_connected = value
        # update tags
        for tag in ('vir_status', 'sto', 'nvm', 'vmpaused', 'vmstarted',
                    'vmstopped', 'hvver', 'libvirtver', 'hv'):
            self.tag_db['__main__'][tag].update_value()

    def start(self):
        self.timer.start()
        HostHandler.start(self)

    def stop(self):
        self.timer.stop()
        if self.hypervisor is not None:
            self.hypervisor.stop()
        HostHandler.stop(self)

    def virt_connect_cb(self, *args):
        # initialize hypervisor instance
        try:
            self.hypervisor = Hypervisor(
                name=self.hypervisor_name,
                handler=self,
            )
        except libvirt.libvirtError:
            logger.exception('Error while connecting to libvirt')
            return

        self.virt_connected = True
Anael Beutot's avatar
Anael Beutot committed
        # register hypervisor storage tags
        for name, storage in self.hypervisor.storage.storages.iteritems():
            self.tag_db.add_tags((
                Tag('sto%s_state' % name, lambda: storage.state, 5, 5),
                Tag('sto%s_size' % name, lambda: storage.capacity, 5, 5),
                Tag('sto%s_free' % name, lambda: storage.available, 5, 5),
Anael Beutot's avatar
Anael Beutot committed
                Tag('sto%s_used' % name,
                    lambda: storage.capacity - storage.available, 5, 5),
                Tag('sto%s_type' % name, lambda: storage.type, 5, 5),
        # register domains
Anael Beutot's avatar
Anael Beutot committed
        for dom in self.hypervisor.domains.itervalues():
            self.tag_db.add_sub_object(dom.name, dom.tags.itervalues())
        # we must refresh those tags only when domains tags are registered to
        # have the calculated values
        for tag in ('cpualloc', 'cpurunning', 'memalloc', 'memrunning'):
            self.tag_db['__main__'][tag].update_value()

Anael Beutot's avatar
Anael Beutot committed
        self.rpc_handler.update(dict(
            vm_define=self.vm_define,
            vm_undefine=self.vm_undefine,
            vm_export=self.vm_export,
            vm_stop=self.vm_stop,
            vm_start=self.vm_start,
            vm_suspend=self.vm_suspend,
            vm_resume=self.vm_resume,
        ))
        self.main.reset_handler('vm_define', self.vm_define)
        self.main.reset_handler('vm_undefine', self.vm_undefine)
        self.main.reset_handler('vm_export', self.vm_export)
        self.main.reset_handler('vm_stop', self.vm_stop)
        self.main.reset_handler('vm_destroy', self.vm_destroy)
        self.main.reset_handler('vm_start', self.vm_start)
        self.main.reset_handler('vm_suspend', self.vm_suspend)
        self.main.reset_handler('vm_resume', self.vm_resume)
        self.main.reset_handler('vm_migrate_tunneled', self.vm_migrate_tunneled)
        self.main.reset_handler('vol_create', self.vol_create)
        self.main.reset_handler('vol_delete', self.vol_delete)
        self.main.reset_handler('vol_import', self.vol_import)
        self.main.reset_handler('vol_import_wait', self.vol_import_wait)
        self.main.reset_handler('vol_export', self.vol_export)
        self.main.reset_handler('tun_setup', self.tun_setup)
        self.main.reset_handler('tun_connect', self.tun_connect)
        self.main.reset_handler('tun_connect_hv', self.tun_connect_hv)
        self.main.reset_handler('tun_destroy', self.tun_destroy)
Anael Beutot's avatar
Anael Beutot committed
        self.main.reset_handler('drbd_setup', self.drbd_setup)
        self.main.reset_handler('drbd_connect', self.drbd_connect)
        self.main.reset_handler('drbd_role', self.drbd_role)
        self.main.reset_handler('drbd_takeover', self.drbd_takeover)
        self.main.reset_handler('drbd_sync_status', self.drbd_sync_status)
        self.main.reset_handler('drbd_shutdown', self.drbd_shutdown)
Anael Beutot's avatar
Anael Beutot committed
        self.main.reset_handler('vm_open_console', self.vm_open_console)
        self.main.reset_handler('vm_disable_virtio_cache',
                                self.vm_disable_virtio_cache)
        self.main.reset_handler('vm_set_autostart', self.vm_set_autostart)

        # if everything went fine, unregister the timer
        self.timer.stop()

    def virt_connect_restart(self):
        """Restart libvirt connection.
        This method might be called when libvirt connection is lost.
        """
        if not self.virt_connected:
            return

        logger.error('Connection to libvirt lost, trying to restart')
        # update connection state
        self.virt_connected = False
        # refresh those tags
        for tag in ('cpualloc', 'cpurunning', 'memalloc', 'memrunning'):
            self.tag_db['__main__'][tag].update_value()

        # unregister tags that will be re registered later
        for storage in self.hypervisor.storage.storages:
            self.tag_db.remove_tags((
                'sto%s_state' % storage,
                'sto%s_size' % storage,
                'sto%s_free' % storage,
                'sto%s_used' % storage,
                'sto%s_type' % storage,
        # unregister sub objects (for the same reason)
        for sub_id in self.tag_db.keys():
            if sub_id == '__main__':
                continue
            self.tag_db.remove_sub_object(sub_id)
        # stop and delete hypervisor instance
        self.hypervisor.stop()
        self.hypervisor = None

        # remove handlers related to libvirt
        self.main.remove_handler('vm_define')
        self.main.remove_handler('vm_undefine')
        self.main.remove_handler('vm_export')
        self.main.remove_handler('vm_stop')
        self.main.remove_handler('vm_destroy')
        self.main.remove_handler('vm_start')
        self.main.remove_handler('vm_suspend')
        self.main.remove_handler('vm_resume')
        self.main.remove_handler('vm_migrate_tunneled')
        self.main.remove_handler('vol_create')
        self.main.remove_handler('vol_delete')
        self.main.remove_handler('vol_import')
        self.main.remove_handler('vol_import_wait')
        self.main.remove_handler('vol_export')
        self.main.remove_handler('tun_setup')
        self.main.remove_handler('tun_connect')
        self.main.remove_handler('tun_connect_hv')
        self.main.remove_handler('tun_destroy')
Anael Beutot's avatar
Anael Beutot committed
        self.main.remove_handler('drbd_setup')
        self.main.remove_handler('drbd_connect')
        self.main.remove_handler('drbd_role')
        self.main.remove_handler('drbd_takeover')
        self.main.remove_handler('drbd_sync_status')
        self.main.remove_handler('drbd_shutdown')
Anael Beutot's avatar
Anael Beutot committed
        self.main.remove_handler('vm_open_console')
        self.main.remove_handler('vm_disable_virtio_cache')
        self.main.remove_handler('vm_set_autostart')
        # launch connection timer
        self.timer.start()

    def iter_vms(self, vm_names):
        """Utility function to iterate over VM objects using their names."""
        if vm_names is None:
            return
        get_domain = self.hypervisor.domains.get
        for name in vm_names:
            dom = get_domain(name)
            if dom is not None:
                yield dom

    def vm_define(self, data, format='xml'):
        logger.debug('VM define')
        if format != 'xml':
            raise NotImplementedError('Format not supported')

        return self.hypervisor.vm_define(data)

    def vm_undefine(self, name):
        logger.debug('VM undefine %s', name)
        vm = self.hypervisor.domains.get(name)
        if vm is not None:
            vm.undefine()

    def vm_export(self, name, format='xml'):
        logger.debug('VM export %s', name)
        if format != 'xml':
            raise NotImplementedError('Format not supported')

        vm = self.hypervisor.domains.get(name)

        if vm is None:
            return

        return vm.lv_dom.XMLDesc(0)
    def vm_stop(self, name):
        logger.debug('VM stop %s', name)
        try:
            self.hypervisor.domains[name].stop()
        except libvirt.libvirtError:
            logger.exception('Error while stopping VM %s', name)
            raise
        except KeyError:
            msg = 'Cannot stop VM %s because it is not defined' % name
            logger.error(msg)
            raise UndefinedDomain(msg)

    def vm_destroy(self, name):
        logger.debug('VM destroy %s', name)
        try:
            self.hypervisor.domains[name].destroy()
        except libvirt.libvirtError as exc:
            # Libvirt raises exception 'domain is not running' event is domain
            # is running, might be a bug in libvirt
            if 'domain is not running' not in str(exc) or (
                self.hypervisor.domains[name].state != 'running'):
                logger.exception('Error while destroying VM %s', name)
                raise
        except KeyError:
            msg = 'Cannot destroy VM %s because it is not defined' % name
            logger.error(msg)
            raise UndefinedDomain(msg)

    def vm_start(self, name):
        logger.debug('VM start %s', name)
        try:
            self.hypervisor.domains[name].start()
        except libvirt.libvirtError:
            logger.exception('Error while starting VM %s', name)
            raise
        except KeyError:
            msg = 'Cannot start VM %s because it is not defined' % name
            logger.error(msg)
            raise UndefinedDomain(msg)

    def vm_suspend(self, name):
        logger.debug('VM suspend %s', name)
        try:
            self.hypervisor.domains[name].suspend()
        except libvirt.libvirtError:
            logger.exception('Error while suspending VM %s', name)
            raise
        except KeyError:
            msg = 'Cannot suspend VM %s because it is not defined' % name
            logger.error(msg)
            raise UndefinedDomain(msg)

    def vm_resume(self, name):
        logger.debug('VM resume %s', name)
        try:
            self.hypervisor.domains[name].resume()
        except libvirt.libvirtError:
            logger.exception('Error while resuming VM %s', name)
            raise
        except KeyError:
            msg = 'Cannot resume VM %s because it is not defined' % name
            logger.error(msg)
            raise UndefinedDomain(msg)
Anael Beutot's avatar
Anael Beutot committed
    def vm_migrate_tunneled(self, name, tun_res, migtun_res, unsafe=False,
                            timeout=60.):
        """Live migrate VM through TCP tunnel.

        :param name: VM name to migrate
        :param tun_res: result of tunnel_setup handler
        :param migtun_res: result of tunnel setup handler
        :param unsafe: for Libvirt >= 0.9.11, see
            http://libvirt.org/html/libvirt-libvirt.html#virDomainMigrateFlags
Anael Beutot's avatar
Anael Beutot committed
        :param float timeout: timeout for libvirt migration (prevents libvirt
            from trying to acquire domain lock forever)
        """
        logger.debug('VM live migrate %s', name)

        try:
            # this is the port used by our libvirt in the cc-node (client
            # libvirt) to connect to the remote libvirtd
            remote_virt_port = tun_res['port']
        except KeyError:
            logger.error('Invalid formatted argument tun_res for live'
                         ' migration')
            raise
        try:
            # this is the port used by local libvirtd to connect to the remote
            # libvirtd (see http://libvirt.org/migration.html)
            remote_virt_port2 = migtun_res['port']
        except KeyError:
            logger.error('Invalid formatted argument migtun_res for live'
                         ' migration')
            raise
        try:
            vm = self.hypervisor.domains[name]
        except KeyError:
            logger.exception('Cannot find domain %s on hypervisor for live'
                             ' migration', name)
            raise
Anael Beutot's avatar
Anael Beutot committed

        # we open a new connection to libvirt and fork because sometimes libvirt
        # python binding, while doing a operation,
        # doesn't seem to realease CPython's GIL, therefore all node
        # operations are blocked
        # the only solution we have found right now is to use a dedicated
        # libvirt connection for the migration and fork, the migration operation
        # in itself is handled by the child while other threads can be scheduled

        try:
            pid = os.fork()
        except OSError:
            logger.error('Cannot fork before running live migration')
            raise

        if pid == 0:
            # child
            sys.stderr.write('Hello from child !\n')
            sys.stderr.write('Debug is %s\n' % self.main.config.debug)
            try:
                close_fds(debug=self.main.config.debug)
                set_signal_map({
                    signal.SIGTERM: lambda *args: os._exit(1),
                    signal.SIGUSR1: signal.SIG_IGN,
                    signal.SIGINT: signal.SIG_IGN,
                    # FIXME need more signal ?
                })
            except:
                sys.stderr.write('Error while performing post fork work\n')
                traceback.print_exc(file=sys.stderr)
            # create a new libvirt connection dedicated to migration
            sys.stderr.write('Open new connection to libvirt\n')
            try:
                new_con = libvirt.open('qemu:///system')
                domain = new_con.lookupByUUIDString(vm.uuid)
            except libvirt.libvirtError:
                sys.stderr.write('Cannot connect to libvirt\n')
                os._exit(4)
            except:
                # error
                traceback.print_exc(sys.stderr)
            sys.stderr.write('Open destination libvirt connection\n')
            try:
                dest_virt_con = libvirt.open(
                    'qemu+tcp://127.0.0.1:%d/system' % remote_virt_port)
            except libvirt.libvirtError:
                sys.stderr.write('Cannot connect to remote libvirt for live'
                                 ' migrating vm %s', name)
                os._exit(5)
            except:
                # error
                traceback.print_exc(file=sys.stderr)
Anael Beutot's avatar
Anael Beutot committed
            try:
                if unsafe:
                    # VIR_MIGRATE_UNSAFE is not defined for libvirt < 0.9.11
                    append_flags = getattr(libvirt, 'VIR_MIGRATE_UNSAFE', 0)
                else:
                    append_flags = 0
                sys.stderr.write('Do migrate\n')
Anael Beutot's avatar
Anael Beutot committed
                domain.migrate(
                    dest_virt_con,
                    libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_PEER2PEER |
                    libvirt.VIR_MIGRATE_TUNNELLED |
                    libvirt.VIR_MIGRATE_PERSIST_DEST |
                    libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
                    append_flags,
                    None,
                    'qemu+tcp://127.0.0.1:%d/system' % remote_virt_port2,
Anael Beutot's avatar
Anael Beutot committed
                    0,
                )
            except libvirt.libvirtError:
                sys.stderr('libvirt error during migration\n')
                traceback.print_exc(file=sys.stderr)
Anael Beutot's avatar
Anael Beutot committed
                os._exit(1)
            except:
                # whatever the matter is we MUST NOT return to libev or sjRPC
                sys.stderr('error during migration\n')
                traceback.print_exc(file=sys.stderr)
Anael Beutot's avatar
Anael Beutot committed
                os._exit(2)
            else:
                os._exit(0)
            finally:
Anael Beutot's avatar
Anael Beutot committed
                dest_virt_con.close()
        else:
            # watch for migration status every second
            started_migration = time.time()
            while True:
                # wait timeout
                time.sleep(1.)

                # waitpid with no delay
                try:
                    rpid, status = os.waitpid(pid, os.WNOHANG)
                except OSError as exc:
                    logger.error('Error while waiting for child to terminate: %s',
                                 os.strerror(exc.errno))
                    raise

                # convert status to return status
                status >>= 8
                logger.debug('Status: %s', status)
Anael Beutot's avatar
Anael Beutot committed
                if rpid == status == 0:
                    if time.time() - started_migration < timeout:
                        continue

                    # waitpid returned immediately, thus migration still running
                    # after timeout fired, we need to kill the child (term would
                    # have no effect)
                    os.kill(pid, signal.SIGKILL)

                    try:
                        rpid, status = os.waitpid(pid, 0)
                    except OSError as exc:
                        logger.error('Error while waiting for child after killing'
                                     ' it: %s', os.strerror(exc.errno))
                        raise

                    assert rpid == pid, 'PID returned by waitpid is not valid'

                    logger.error('Migration timeout for vm %s', name)
                    raise VMMigrationError('Timeout')
                else:
                    if status == 4:
                        raise VMMigrationError('Cannot open new connection to'
                                               ' libvirt')
                    elif status == 5:
                        raise VMMigrationError('Cannot open connection to'
                                               ' remote libvirt')
                    elif status != 0:
Anael Beutot's avatar
Anael Beutot committed
                        # error
                        logger.error('Libvirt error while live migrating vm %s',
                                     name)
                        logger.debug('Exit status %s', status)
                        raise VMMigrationError('Migration failed')
                    else:
                        logger.info('Sucessfuly live migrated vm %s', name)
                        break
Anael Beutot's avatar
Anael Beutot committed
    @threadless
    @pass_connection
    def vm_open_console(self, conn, name):
        """
        :param conn: sjRPC connection instance
        :param name: VM name
        """
        vm = self.hypervisor.domains[name]

        # create connection to the VM console
        try:
            endpoint = vm.open_console()
        except socket.error:
            # cannot create socketpair
            logger.error('Cannot create connection to VM console')
            raise

        def on_close(tun):
            """Method of Tunnel protocol close callback."""
            tun.endpoint.close()
            vm.close_console()

        # connect as tunnel endpoint
        proto = conn.create_tunnel(endpoint=endpoint, on_close=on_close)
        return proto.label

    def vm_disable_virtio_cache(self, name):
        """Set virtio cache to none on VM disks.

        :param name: VM name
        """
        vm = self.hypervisor.domains[name]

        # get VM XML
        try:
            xml = vm.lv_dom.XMLDesc(0)
        except libvirt.libvirtError:
            logger.exception('Error while getting domain XML from libvirt, %s',
                             vm.name)
            raise

        xml_tree = et.ElementTree()
        xml_tree.parse(StringIO(xml))
        for disk in xml_tree.findall('devices/disk'):
            # check that disk is virtio
            target = disk.find('target')
            if target is None or target.get('bus') != 'virtio':
                continue
            # modify cache attr
            driver = disk.find('driver')
            assert driver is not None
            driver.set('cache', 'none')
            logger.debug('Set cache attribute for disk %s of VM %s',
                         target.get('dev'), name)

        # write back the XML tree
        out = StringIO()
        xml_tree.write(out)  # check encoding is fine
        try:
            self.hypervisor.vir_con.defineXML(out.getvalue())
        except libvirt.libvirtError:
            logger.exception('Cannot update XML file for domain %s', name)
            raise

    def vm_set_autostart(self, name, autostart=True):
        """Set autostart on VM.

        :param name: VM name
        :param bool autostart: autostart value to set
        """
        vm = self.hypervisor.domains[name]
        vm.lv_dom.setAutostart(int(bool(autostart)))
        # update autostart value now instead of 10 seconds lag
        vm.tags['autostart'].update_value()

    def vol_create(self, pool, name, size):
        logger.debug('Volume create %s, pool %s, size %s', name, pool, size)
        try:
            self.hypervisor.storage.create_volume(pool, name, size)
        except Exception:
            logger.exception('Error while creating volume')
            raise

    def vol_delete(self, pool, name):
        logger.debug('Volume delete %s, pool %s', name, pool)
        try:
            self.hypervisor.storage.delete_volume(pool, name)
        except Exception:
            logger.exception('Error while deleting volume')
            raise

    def vol_import(self, pool, name):
        """
        :param pool: pool name where the volume is
        :param name: name of the volume
        """
        logger.debug('Volume import pool = %s, volume = %s', pool, name)
        try:
            pool = self.hypervisor.storage.get_storage(pool)
            if pool is None:
                raise Exception('Pool storage does not exist')  # TODO exc

            volume = pool.volumes.get(name)
            if volume is None:
                raise Exception('Volume does not exist')

            # create the job
            job = self.main.job_manager.create(ImportVolume, volume)
            job.start()
        except Exception:
            logger.exception('Error while starting import job')
            raise

        return dict(id=job.id, port=job.port)

    def vol_import_wait(self, job_id):
        """Block until completion of the given job id."""
        job = self.main.job_manager.get(job_id)
        logger.debug('Waiting for import job to terminate')
Anael Beutot's avatar
Anael Beutot committed
        job.wait()
        logger.debug('Import job terminated')

        return dict(id=job.id, log='', checksum=job.checksum)

    def vol_import_cancel(self, job_id):
        """Cancel import job."""
        logger.debug('Cancel import job')
Anael Beutot's avatar
Anael Beutot committed
        job = self.main.job_manager.get(job_id)
        self.main.job_manager.cancel(job_id)
Anael Beutot's avatar
Anael Beutot committed
        # wait for job to end
        job.join()  # we don't call wait as it is already called in
                    # vol_import_wait handler

    def vol_export(self, pool, name, raddr, rport):
        """
        :param pool: pool name where the volume is
        :param name: name of the volume
        :param raddr: IP address of the destination to send the volume to
        :param rport: TCP port of the destination
        """
        pool = self.hypervisor.storage.get_storage(pool)

        if pool is None:
            raise Exception('Pool storage does not exist')

        volume = pool.volumes.get(name)

        if volume is None:
            raise Exception('Volume does not exist')

        try:
            job = self.main.job_manager.create(ExportVolume, volume, raddr, rport)
            job.start()
            job.wait()
        except Exception:
            logger.exception('Error while exporting volume')
            raise

        logger.debug('Export volume successfull')
        return dict(id=job.id, log='', checksum=job.checksum)

    @threadless
    def tun_setup(self, local=True):
        """Set up local tunnel and listen on a random port.

        :param local: indicate if we should listen on localhost or all
            interfaces
        """
        logger.debug('Tunnel setup: local = %s', local)
        # create job
Anael Beutot's avatar
Anael Beutot committed
        job = self.main.job_manager.create(TCPTunnel)
        job.setup_listen('127.0.0.1' if local else '0.0.0.0')
        return dict(
            jid=job.id,
            key='FIXME',
            port=job.port,
        )

    @threadless
    def tun_connect(self, res, remote_res, remote_ip):
        """Connect tunnel to the other end.

        :param res: previous result of `tun_setup` handler
        :param remote_res: other end result of `tun_setup` handler
        :param remote_ip: where to connect
        """
        logger.debug('Tunnel connect %s %s', res['jid'], remote_ip)
        job = self.main.job_manager.get(res['jid'])
        job.setup_connect((remote_ip, remote_res['port']))
Anael Beutot's avatar
Anael Beutot committed
        job.start()

    @threadless
    def tun_connect_hv(self, res, migration=False):
        """Connect tunnel to local libvirt Unix socket.

        :param res: previous result of `tun_setup` handler
        """
        logger.debug('Tunnel connect hypervisor %s', res['jid'])
        job = self.main.job_manager.get(res['jid'])
        job.setup_connect('/var/run/libvirt/libvirt-sock')
Anael Beutot's avatar
Anael Beutot committed
        job.start()

    @threadless
    def tun_destroy(self, res):
        """Close given tunnel.

        :param res: previous result as givent by `tun_setup` handler
        """
        logger.debug('Tunnel destroy %s', res['jid'])
Anael Beutot's avatar
Anael Beutot committed
        job = self.main.job_manager.get(res['jid'])
        job.wait()
Anael Beutot's avatar
Anael Beutot committed
    def drbd_setup(self, pool, name):
        """Create DRBD volumes.

        :param pool: storage pool
        :param name: storage volume name
        """
        pool = self.hypervisor.storage.get_storage(pool)
        if pool is None:
            raise DRBDError('Cannot setup DRBD: pool storage does not exist')
        elif pool.type != 'logical':
            raise DRBDError('Cannot setup DRBD: pool storage is not LVM')

        volume = pool.volumes.get(name)
        if volume is None:
            raise DRBDError('Cannot setup DRBD: volume does not exist')

        try:
            job = self.main.job_manager.create(DRBD, self.main.evloop,
                                               self.hypervisor.storage,
Anael Beutot's avatar
Anael Beutot committed
                                               pool, volume)
        except Exception:
            logger.exception('Error while creating DRBD job')
            raise

        job.setup()

        logger.debug('DRBD setup successfull')
        return dict(
            jid=job.id,
            port=job.drbd_port,
        )

    def drbd_connect(self, res, remote_res, remote_ip):
        """Set up DRBD in connect mode. (Wait for connection and try to connect
        to the remote peer.

        :param res: previous result of `drbd_setup` handler
        :param remote_res: result of remote `drbd_setup` handler
        :param remote_ip: IP of remote peer
        """
        job = self.main.job_manager.get(res['jid'])
        job.connect(remote_ip, remote_res['port'])
        job.wait_connection()

    def drbd_role(self, res, primary):
        """Set up DRBD role.

        :param res: previous result of `drbd_setup` handler
        :param bool primary: if True, set up in primary mode else secondary
        """
        job = self.main.job_manager.get(res['jid'])
        if primary:
            job.switch_primary()
        else:
            job.switch_secondary()

    def drbd_takeover(self, res, state):
        """Set up DRBD device as the VM disk. FIXME

        :param res: previous result of `drbd_setup` handler
        :param state: FIXME
        """
        job = self.main.job_manager.get(res['jid'])
        job.takeover()

    def drbd_sync_status(self, res):
        """Return synchronization status of a current DRBD job.

        :param res: previous result of `drbd_setup` handler
        """
        status = self.main.job_manager.get(res['jid']).status()
        result = dict(
            done=status['disk'] == 'UpToDate',
            completion=status['percent'],
        )
        logger.debug('DRBD status %s', result)
        return result

    def drbd_shutdown(self, res):
        """Destroy DRBD related block devices.

        :param res: previous result of `drbd_setup` handler
        """
        logger.debug('DRBD shutdown')
        job = self.main.job_manager.get(res['jid'])
        job.cleanup()

        # remove job from job_manager list
        self.main.job_manager.notify(job)


class Hypervisor(object):
    """Container for all hypervisor related state."""
    def __init__(self, name, handler):
        """
        :param str name: name of hypervisor instance
        :param Handler handler: hypervisor handler
        self.handler = weakref.proxy(handler)
Anael Beutot's avatar
Anael Beutot committed
        #: hv attributes
        self.name = name
        self.type = u'kvm'

Anael Beutot's avatar
Anael Beutot committed
        # register libvirt error handler
        libvirt.registerErrorHandler(self.vir_error_cb, None)
Anael Beutot's avatar
Anael Beutot committed
        # libvirt event loop abstraction
        self.vir_event_loop = VirEventLoop(self.handler.main.evloop)
        self.vir_con = libvirt.open('qemu:///system')  # currently only support KVM
        # findout storage
        self.storage = StorageIndex(handler, self.vir_con)

        logger.debug('Storages: %s', self.storage.paths)

        #: domains: vms, containers...
        self.domains = dict()
        # find defined domains
        for dom_name in self.vir_con.listDefinedDomains():
            dom = self.vir_con.lookupByName(dom_name)
            self.domains[dom.name()] = VirtualMachine(dom, self)
        # find started domains
        for dom_id in self.vir_con.listDomainsID():
            dom = self.vir_con.lookupByID(dom_id)
            self.domains[dom.name()] = VirtualMachine(dom, self)

        logger.debug('Domains: %s', self.domains)
        self.vir_con.domainEventRegister(self.vir_cb, None)  # TODO find out args
Anael Beutot's avatar
Anael Beutot committed

    def stop(self):
        self.vir_event_loop.stop()
Anael Beutot's avatar
Anael Beutot committed
        # unregister callback
        try:
            self.vir_con.domainEventDeregister(self.vir_cb)
        except libvirt.libvirtError:
            # in case the libvirt connection is broken, it will raise the error
            pass
        ret = self.vir_con.close()
        logger.debug('Libvirt still handling %s ref connections', ret)
Anael Beutot's avatar
Anael Beutot committed
        # TODO delet objects
Anael Beutot's avatar
Anael Beutot committed
    def vir_error_cb(self, ctxt, err):
        """Libvirt error callback.

        See http://libvirt.org/errors.html for more informations.

        :param ctxt: arbitrary context data (not needed because context is
            givent by self
        :param err: libvirt error code
        """
        logger.error('Libvirt error %s', err)

Anael Beutot's avatar
Anael Beutot committed
    def vir_cb(self, conn, dom, event, detail, opaque):
        """Callback for libvirt event loop."""
        logger.debug('Received event %s on domain %s, detail %s', event,
                     dom.name(), detail)

        event = EVENTS[event]

        if event == 'Added':
            # update Storage pools in case VM has volumes that were created
            self.storage.update()
            if dom.name() in self.domains:
                # sometimes libvirt send us the same event multiple times
                # this can be the result of a change in the domain configuration
                # we first remove the old domain
                vm = self.domains.pop(dom.name())
                self.handler.tag_db.remove_sub_object(vm.name)
                logger.debug('Domain %s recreated', dom.name())
            vm = VirtualMachine(dom, self)
            logger.info('Created domain %s', vm.name)
            self.domains[vm.name] = vm
            self.handler.tag_db.add_sub_object(vm.name, vm.tags.itervalues())
            self.update_domain_count()
        elif event == 'Removed':
            vm_name = dom.name()
            self.vm_unregister(vm_name)
            logger.info('Removed domain %s', vm_name)
        elif event in ('Started', 'Suspended', 'Resumed', 'Stopped', 'Saved',
                       'Restored'):
            vm = self.domains.get(dom.name())
            # sometimes libvirt sent a start event before a created event so be
            # careful
            if vm is not None:
                try:
                    state = DOMAIN_STATES[dom.info()[0]]
                except libvirt.libvirtError as exc:
                    # checks that domain was not previously removed
                    # seems to happen only in libvirt 0.8.8
                    if 'Domain not found' in str(exc):
                        self.vm_unregister(dom.name())
                    else:
                        raise
                else:
                    logger.info('Domain change state from %s to %s', vm.state,
                                 state)
                    vm.state = state
                    self.update_domain_count()

    def vm_unregister(self, name):
        """Unregister a VM from the cc-server and remove it from the index."""
        try:
            vm = self.domains.pop(name)
        except KeyError:
            # domain already removed, see hypervisor/domains/vm_tags.py
            # sometimes libvirt send us the remove event too late
            # we still update storage and tag attributes
            pass
        else:
            self.handler.tag_db.remove_sub_object(vm.name)
            # update Storage pools in case VM had volumes that were deleted
            self.storage.update()
            self.update_domain_count()
    def update_domain_count(self):
        """Update domain state count tags."""
        for tag in ('nvm', 'vmpaused', 'vmstarted', 'vmstopped', 'cpualloc',
                    'cpurunning', 'memalloc', 'memrunning'):
            self.handler.tag_db['__main__'][tag].update_value()
    def vm_define(self, xml_desc):
        """Create a VM on the Hypervisor
        :param str xml_desc: XML description in libvirt format
        :return: VM name created
        """
        try:
            return self.vir_con.defineXML(xml_desc).name()
        except libvirt.libvirtError:
            logger.exception('Error while creating domain')
            # reraise exception for the cc-server
            raise
    def _count_domain(self, filter=lambda d: True):
        count = 0

        for dom in self.domains.itervalues():
            if filter(dom):
                count += 1

        return count

    @property
    def vm_started(self):
        """Number of VMs started."""
Anael Beutot's avatar
Anael Beutot committed
        return self._count_domain(lambda d: d.state == 'running')

    @property
    def vm_stopped(self):
        """Number of VMs stopped."""
Anael Beutot's avatar
Anael Beutot committed
        return self._count_domain(lambda d: d.state == 'stopped')

    @property
    def vm_paused(self):
        """Number of VMs paused."""
Anael Beutot's avatar
Anael Beutot committed
        return self._count_domain(lambda d: d.state == 'paused')

    @property
    def vm_total(self):
        """Total number of VMs on the hypervisor."""
        return self._count_domain()


class StorageIndex(object):
    """Keep an index of all storage volume paths."""
    def __init__(self, handler, lv_con):
        :param handler: Hypervisor handler instance
        :param lv_con: Libvirt connection
        """
        self.handler = handler
        self.lv_con = lv_con
        self.storages = dict(
            (s.name, s) for s in imap(
                Storage,
                imap(
                    lv_con.storagePoolLookupByName,
                    chain(
                        lv_con.listDefinedStoragePools(),
                        lv_con.listStoragePools(),
                    ),
                ),
            ),
        )

        self.paths = None
        self.update_path_index()


    def update(self):
        """Update storage pools and volumes."""
        # go through all storage pools and check if it is already in the index
        for lv_storage in imap(
            self.lv_con.storagePoolLookupByName,