Newer
Older
from logging import debug, error, warning, info
from lvm import LVM
from sjrpc.utils import RpcHandler
from sjrpc.utils import pure
from errors import HostError, HypervisorError
from jobs import ReceiveFileJob, SendFileJob, DrbdCopyJob, TCPTunnelJob
#FIXME should not be needed
import traceback, sys
from libvirt import libvirtError
#FIXME clean this up
try:
import kvm
except ImportError:
_MOD_KVM = False
_MOD_XEN = True
try:
import xen
except ImportError:
_MOD_XEN = False
class NodeHandler(RpcHandler):
'''
Main node handler that exports the host capabilities to the server.
'''
def __init__(self, connection, detect_hv=True, safe_mode=True,
force_xen=False):
super(NodeHandler, self).__init__()
self._connection = connection
self._host_handle = None
if detect_hv:
debug('Hypervisor detection in progress')
debug('Initializing connection to the local KVM hypervisor')
self._host_handle = kvm.KvmHypervisor()
elif _MOD_XEN:
debug('Initializing connection to the local Xen hypervisor')
self._host_handle = xen.XenHypervisor()
if self._host_handle is None:
debug('Hypervisor detection failed')
if not detect_hv or self._host_handle is None:
debug('Hypervisor detection disabled, running as regular node')
self._host_handle = LocalHost()
# methods that execute administrative commands, to be banned when
# running in safe mode
self.UNSAFE_METHODS = ['execute_command', 'shutdown']
self.HV_TAG_MANDATORY = ['h']
self.HV_TAG_MAP = {
'version' : ( lambda o: True,
lambda o,t: str(__version__),
-1),
'libvirtver': self._tag_map_direct('get_libvirt_version', -1),
'htype' : self._tag_map_direct('get_hv_type', -1),
'hserial' : self._tag_map_direct('get_hw_serial', -1),
'hvendor' : self._tag_map_direct('get_hw_vendor', -1),
'arch' : self._tag_map_direct('get_arch', -1),
'hvm' : self._tag_map_direct('get_hvm_available', -1),
'cpu' : self._tag_map_direct('get_cpu', -1),
'cpulogical': self._tag_map_direct('get_cpu_thread', -1),
'chaserial' : self._tag_map_direct('get_chassis_serial', -1),
'chaasset' : self._tag_map_direct('get_chassis_asset', -1),
# one day
'hbios' : self._tag_map_direct('get_hw_bios', 24*3600),
'hvver' : self._tag_map_direct('get_hv_version', 24*3600),
'platform' : self._tag_map_direct('get_platform', 24*3600),
'os' : self._tag_map_direct('get_system', 24*3600),
'uname' : self._tag_map_direct('get_uname', 24*3600),
'cpufreq' : self._tag_map_direct('get_cpu_frequency', 24*3600),
'mem' : self._tag_map_direct('get_mem', 24*3600),
'disk' : self._tag_map_keys('get_disks', 24*3600),
'h' : self._tag_map_direct('get_name', 24*3600),
# one hour
# one minute
'memfree' : self._tag_map_direct('get_mem_free', 60),
'memused' : self._tag_map_direct('get_mem_used', 60),
'sto' : ( lambda o: hasattr(o, 'storage'),
lambda o,t: ' '.join(
getattr(o, 'storage')().pool_list()),
60),
# 5 seconds
'uptime' : self._tag_map_direct('get_uptime', 5),
'cpuuse' : self._tag_map_direct('get_cpu_usage', 5),
'load' : self._tag_map_direct('get_loadavg', 5),
'nvm' : self._tag_map_counter('vm_list', 5),
'vmstarted' : self._tag_map_counter('vm_list_running', 5),
'vmstopped' : self._tag_map_counter('vm_list_stopped', 5),
'vmpaused' : self._tag_map_counter('vm_list_paused', 5),
'rjobs' : ( lambda o: True,
lambda o,t: str(len(
self._host_handle.jobmgr.list()['running'])),
5),
# 5 seconds
}
self.HV_TAG_GLOB = {
'disk*' : self._tag_map_helper(self._helper_hv_disk, 24*3600),
'sto*' : self._tag_map_helper(self._helper_hv_sto, 60),
self.VM_TAG_MANDATORY = ['hv', 'h']
self.VM_TAG_MAP = {
'hv' : ( lambda o: hasattr(o, 'hypervisor'),
lambda o,t: o.hypervisor().get_name(),
-1),
'htype' : ( lambda o: hasattr(o, 'hypervisor'),
lambda o,t: o.hypervisor().get_hv_type(),
-1),
'arch' : self._tag_map_direct('get_arch', -1),
'h' : self._tag_map_direct('get_name', -1),
# one day
# one hour
'cpu' : self._tag_map_direct('get_cpu_core', 3600),
'mem' : self._tag_map_direct('get_mem', 3600),
'memmax' : self._tag_map_direct('get_mem_max', 3600),
'vncport' : self._tag_map_direct('get_vnc_port', 3600),
'status' : ( lambda o: True,
lambda o,t: 'running' if o.is_active()
else 'paused' if o.is_paused()
else 'stopped',
5), # FIXME crappy tag implementation
#'cpuuse' : self._tag_map_direct('get_cpu_usage'),
}
self.VM_TAG_GLOB = {
'disk*' : self._tag_map_helper(self._helper_vm_disk, 3600),
'nic*' : self._tag_map_helper(self._helper_vm_nic, 3600),
self._register_vm = []
def __getitem__(self, name):
'''
'''
# filter the private members access
if name.startswith('_'):
raise KeyError('Remote name `%s` is private' % repr(name))
# filter command execution methods
elif not self._safe_mode and name in self.UNSAFE_METHODS:
raise KeyError('Remote name `%s` is disabled by configuration'
% repr(name))
else:
debug('Called %s.%s' % (self.__class__.__name__, name))
return super(NodeHandler, self).__getitem__(name)
'''
'''
return ( lambda o: hasattr(o, method),
'''
'''
return ( lambda o: hasattr(o, method),
'''
'''
return ( lambda o: hasattr(o, method),
lambda o,t: ' '.join(getattr(o, method)().keys()),
ttl)
'''
'''
return ( lambda o, resolve=False: helper(o, resolve=resolve),
lambda o, tag_name=None, resolve=False:
helper(o, tag_name=tag_name, resolve=resolve),
ttl)
def _helper_hv_disk(self, hv, tag_name=None, resolve=True):
Loading
Loading full blame...