blob: b45526aa6af6e9b7dbb0f43ccbd5c8e30237dd0a [file] [log] [blame]
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Sample configuration file for CloudStack agent
# The GUID to identify the agent with, this is mandatory!
# Generate with "uuidgen"
guid=
#resource= the java class, which agent load to execute
resource=com.cloud.hypervisor.kvm.resource.LibvirtComputingResource
#workers= number of threads running in agent
workers=5
#host= The IP address of management server
host=localhost
# The time interval in seconds after which agent will check if connected host
# is the preferred host (the first host in the comma-separated list is preferred
# one) and will attempt to reconnect to the preferred host when it's connected
# to one of the secondary/backup hosts. The timer task is scheduled after agent
# connects to a management server. On connection, it receives admin configured
# cluster-level 'indirect.agent.lb.check.interval' setting that will be used by
# the agent as the preferred host check interval however the following setting
# if defined overrides the received value. The value 0 and lb algorithm 'shuffle'
# disables this background task.
#host.lb.check.interval=0
#port = The port management server listening on, default is 8250
port=8250
#cluster= The cluster which the agent belongs to
cluster=default
#pod= The pod which the agent belongs to
pod=default
#zone= The zone which the agent belongs to
zone=default
#public.network.device= the public nic device
# if this is commented, it is autodetected on service startup
# public.network.device=cloudbr0
#private.network.device= the private nic device
# if this is commented, it is autodetected on service startup
# private.network.device=cloudbr1
#guest.network.device= the guest nic device
# if this is commented, the private nic device will be used
# local storage path, by default, it's /var/lib/libvirt/images/
#local.storage.path=/var/lib/libvirt/images/
# Qemu socket path, directory where Qemu sockets are placed.
# These sockets are for the Qemu Guest Agent and SSVM privisioning
# Make sure that AppArmor or SELinux allow libvirt to write there
#qemu.sockets.path=/var/lib/libvirt/qemu
# The UUID for the local storage pool, this is mandatory!
# Generate with "uuidgen"
local.storage.uuid=
# Location for KVM scripts
domr.scripts.dir=scripts/network/domr/kvm
# the timeout for time-consuming operations, such as create/copy snapshot
#cmds.timeout=7200
# set the vm migrate speed, by default, it will try to guess the speed of the guest network
# In MegaBytes per second
#vm.migrate.speed=0
# set target downtime at end of livemigration, the 'hiccup' for final copy. Higher numbers
# make livemigration easier, lower numbers may cause migration to never complete. Less than 1
# means hypervisor default (20ms).
#vm.migrate.downtime=0
# Busy VMs may never finish migrating, depending on environment. When its available, we will
# want to add support for autoconvergence migration flag which should fix this. Set an upper
# limit in milliseconds for how long live migration should wait, at which point VM is paused and
# migration will finish quickly. Less than 1 means disabled.
#vm.migrate.pauseafter=0
# set the type of bridge used on the hypervisor, this defines what commands the resource
# will use to setup networking. Currently supported NATIVE, OPENVSWITCH
#network.bridge.type=native
# set the driver used to plug and unplug nics from the bridges
# a sensible default will be selected based on the network.bridge.type but can
# be overridden here.
# native = com.cloud.hypervisor.kvm.resource.BridgeVifDriver
# openvswitch = com.cloud.hypervisor.kvm.resource.OvsVifDriver
#libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.BridgeVifDriver
# Set DPDK Support on OpenVswitch
#openvswitch.dpdk.enabled=true
#openvswitch.dpdk.ovs.path=/var/run/openvswitch
# set the hypervisor type, values are: kvm, lxc
hypervisor.type=kvm
# set the hypervisor URI. Usually there is no need for changing this
# For KVM: qemu:///system
# For LXC: lxc:///
# hypervisor.uri=qemu:///system
# settings to enable direct networking in libvirt, should not be used
# on hosts that run system vms, values for mode are: private, bridge, vepa
# libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.DirectVifDriver
# network.direct.source.mode=private
# network.direct.device=eth0
# setting to enable the cpu model to kvm guest globally.
# three option:custom,host-model and host-passthrough.
# custom - user custom the CPU model which specified by guest.cpu.model.
# host-model - identify the named CPU model which most closely matches the host,
# and then request additional CPU flags to complete the match. This should give
# close to maximum functionality/performance, which maintaining good
# reliability/compatibility if the guest is migrated to another host with slightly different host CPUs.
# host-passthrough - tell KVM to passthrough the host CPU with no modifications.
# The difference to host-model, instead of just matching feature flags,
# every last detail of the host CPU is matched. This gives absolutely best performance,
# and can be important to some apps which check low level CPU details,
# but it comes at a cost wrt migration. The guest can only be migrated to
# an exactly matching host CPU.
#
# guest.cpu.mode=custom|host-model|host-passthrough
# This param is only valid if guest.cpu.mode=custom,
# for examples:"Conroe" "Penryn", "Nehalem", "Westmere", "pentiumpro" and so
# on,run virsh capabilities for more details.
# guest.cpu.model=
#
# This param will require CPU features on the <cpu> section
# guest.cpu.features=vmx vme
#
# vm.memballoon.disable=true
# Disable memory ballooning on vm guests for overcommit, by default overcommit
# feature enables balloon and sets currentMemory to a minimum value.
#
# vm.diskactivity.checkenabled=false
# Set to true to check disk activity on VM's disks before starting a VM. This only applies
# to QCOW2 files, and ensures that there is no other running instance accessing
# the file before starting. It works by checking the modify time against the current time,
# so care must be taken to ensure the cluster has time synced, otherwise VMs may fail to start.
#
# vm.diskactivity.checktimeout_s=120
# Timeout for giving up on waiting for VM's disk files to become inactive. Hitting
# this timeout will result in failure to start VM.
#
# vm.diskactivity.inactivetime_ms=30000
# This is the length of time that the disk needs to be inactive in order to pass the check.
# This means current time minus mtime of disk file needs to be greater than this number.
# It also has the side effect of setting the minimum threshold between a stop and start of
# a given VM.
#
# kvmclock.disable=false
# Some newer linux kernels are incapable of reliably migrating vms with kvmclock
# This is a workaround for the bug, admin can set this to true per-host
#
# vm.rng.enable=false
# This enabled the VirtIO Random Number Generator device for guests.
#
# vm.rng.model=random
# The model of VirtIO Random Number Generator (RNG) to present to the Guest.
# Currently only 'random' is supported.
#
# vm.rng.path=/dev/random
# Local Random Number Device Generator to use for VirtIO RNG for Guests.
# This is usually /dev/random, but per platform this might be different
#
# vm.rng.rate.bytes=2048
# The amount of bytes the Guest may request/obtain from the RNG in the period
# specified below.
#
# vm.rng.rate.period=1000
# The number of milliseconds in which the guest is allowed to obtain the bytes
# specified above.
#
# router.aggregation.command.each.timeout=600
# timeout value for aggregation commands send to virtual router
#
# host.overcommit.mem.mb = 0
# allows to increase amount of ram available on host virtually to utilize Zswap, KSM features
# and modern fast SSD/3D XPoint devices. Specified amount of MBs is added to the memory agent
# reports to the Management Server
# Default: 0
#
# host.reserved.mem.mb = 0
# How much host memory to reserve for non-allocation.
# A useful parameter if a node uses some other software that requires memory,
# or in case that OOM Killer kicks in sometimes.
# If this parameter is used, host.overcommit.mem.mb must be set to 0.
# Default value: 0
#
# vm.watchdog.model=i6300esb
# The model of Watchdog timer to present to the Guest
# For all models refer to the libvirt documentation.
# Recommend value is: i6300esb
#
# vm.watchdog.action=none
# Action to take when the Guest/Instance is no longer notifiying the Watchdog
# timer.
# For all actions refer to the libvirt documentation.
# Recommended values are: none, reset and poweroff.