router / dnsmasq
Install LXD
Install LXD
Create Ubuntu minimal image container
Define node variables
NODE=com4-dev NODE_BMC_HOST=com4-dev.ipmi.dev.i.example.com NODE_MAC_NIC1=00:11:22:33:44:55
Define env variables
NODE_BMC_USER=ADMIN NODE_BMC_PASS=ADMIN
Create now node with Redfish (pxe device boot broken)
https://docs.openstack.org/ironic/latest/admin/drivers/redfish.html
openstack baremetal node create \ --name ${NODE} \ --driver redfish \ --driver-info redfish_address="https://${NODE_BMC_HOST}" \ --driver-info redfish_username=${NODE_BMC_USER} \ --driver-info redfish_password=${NODE_BMC_PASS} \ --driver-info redfish_verify_ca=false \ --driver-info redfish_system_id=/redfish/v1/Systems/1
Create now node with IPMI
https://docs.openstack.org/ironic/latest/admin/drivers/ipmitool.html
openstack baremetal node create \ --name ${NODE} \ --driver ipmi \ --driver-info ipmi_address=${NODE_BMC_HOST} \ --driver-info ipmi_username=${NODE_BMC_USER} \ --driver-info ipmi_password=${NODE_BMC_PASS}
iPXE
https://docs.openstack.org/ironic/latest/admin/interfaces/boot.html#pxe-boot
# Intel Ethernet Connections Boot Utility, Preboot Images und EFI-Treiber
wget https://downloadmirror.intel.com/29137/eng/Preboot.tar.gz tar xzf Preboot.tar.gz -C /tmp cd /tmp/APPS/BootUtil/Linux_x64 chmod +x bootutil64e ./bootutil64e
Ansible galaxy
ansible-galaxy install <REPOSITORY> ansible-galaxy role install --roles-path /tmp https://github.com/avanov/ansible-galaxy-pyenv/archive/refs/tags/1.2.0.tar.gz mv /tmp/1.2.0 ~/.ansible/roles/avanov.pyenv
https://galaxy.ansible.com/bennojoy/network_interface/ - Network configuration
https://github.com/Oefenweb/ansible-postfix
https://galaxy.ansible.com/geerlingguy/gitlab/
Linux software RAID (mdadm)
https://galaxy.ansible.com/mrlesmithjr/mdadm
ansible-galaxy install mrlesmithjr.mdadm
# install multipass sudo snap install multipass --edge --classic snap info multipass multipass version # create VM multipass launch --name vm1 multipass launch --name vm5 --disk 4G --mem 256M core18 multipass exec vm3 -- lsb_release -a multipass mount /tmp/mp vm3 multipass transfer /etc/fstab vm3:/tmp/y multipass exec vm3 -- sudo apt update && sudo apt dist-upgrade -y
SSH login to VM
sudo ssh ubuntu@<VM_IP> -i /var/snap/multipass/common/data/multipassd/ssh-keys/id_rsa
multipass delete vm2 multipass purge sudo multipass set local.driver=libvirt
Links
https://multipass.run/
https://multipass.run/docs/launch-command
#!/bin/bash export OS_ENV="@globals.environment@" if [ "${OS_ENV}" == "dev" ]; then export PYENV_ROOT="$HOME/.pyenv" export PATH="$PYENV_ROOT/bin:$PATH" eval "$(pyenv init -)" fi source /etc/kolla/admin-openrc.sh EXIT_CODE=0 # search for broken ovs entry in DB for NODE in $(openstack compute service list -c Host -f value | sort -u); do OUTPUT=$(ssh ${NODE} docker exec openvswitch_vswitchd ovsdb-client dump | grep qvo | egrep -v "tag|mac" | cut -d "\"" -f2) for PORT in ${OUTPUT}; do printf "%-20s %s\n" "${NODE}" "${PORT}" EXIT_CODE=1
#!/bin/bash source /etc/kolla/admin-openrc.sh EXIT_CODE=0 # search for server with status ERROR OUTPUT="$(openstack server list --all --status=ERROR -c ID -c Name -c Status -f value)" #openstack server show ${SERVER_ID} -c fault -f value if [ -n "${OUTPUT}" ]; then echo "${OUTPUT}" EXIT_CODE=1 fi # search for server with status VERIFY_RESIZE OUTPUT="$(openstack server list --all --status=VERIFY_RESIZE -c ID -c Name -c Status -f value)" if [ -n "${OUTPUT}" ]; then echo "${OUTPUT}" EXIT_CODE=1 fi # search for server processes on wrong compute node for COMPUTE_NODE in $(openstack compute service list --service nova-compute -c Host -f value); do for SERVER_ID in $(ssh ${COMPUTE_NODE} pgrep qemu -a | grep -o -P '(?<=-uuid ).*(?= -smbios)'); do
Install CLI client
https://docs.openstack.org/releasenotes/osc-placement/
pip install openstack-placement echo 3.5.6 > .python-version
resource provider allocation
RESOURCE_PROVIDER_ID=$(openstack resource provider list --name ${NODE_NAME}.$(hostname -d) -c uuid -f value) openstack resource provider show --allocations ${RESOURCE_PROVIDER_ID} -c allocations -f json # list hypervisor openstack resource provider list --sort-column name # show openstack resource provider allocation show ${SERVER_ID} NODE_NAME=com1-prod.example.com RESOURCE_PROVIDER_UUID=$(openstack resource provider list --name ${NODE_NAME} -c uuid -f value) openstack resource provider inventory list ${RESOURCE_PROVIDER_UUID} openstack resource provider usage show ${RESOURCE_PROVIDER_UUID} openstack allocation candidate list --resource VCPU=32 --resource DISK_GB=120 --resource MEMORY_MB=4096 --member-of 3f0d0e40-6cf4-422d-a245-ceaffb0ac037
Add resource provider to aggregate
OpenStack admin
openstack image set --property hw_qemu_guest_agent=yes ${IMAGE_ID}
Within the VM
# check whather hw_qemu_guest_agent is enabled ls -l /dev/virtio-ports/org.qemu.guest_agent.0 # install qemu-guest-agent sudo apt-get install -y qemu-guest-agent
supported_commands
docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-info"}'
Execute command
openstack server show d82ca1de-1fcd-4ca6-84db-84891ec37796 -c OS-EXT-SRV-ATTR:hypervisor_hostname -c OS-EXT-SRV-ATTR:instance_name docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-network-get-interfaces"}'
Links
https://www.sebastien-han.fr/blog/2015/02/09/openstack-perform-consistent-snapshots-with-qemu-guest-agent/
https://www.ovh.com/blog/create-and-use-openstack-snapshots/
http://wiki.stoney-cloud.org/wiki/Qemu_Guest_Agent_Integration