Ubuntu multipass

# install  multipass
sudo snap install multipass --edge --classic
 
snap info multipass
multipass version
 
# create VM
multipass launch --name vm1
 
multipass launch --name vm5 --disk 4G --mem 256M core18
 
multipass exec vm3 -- lsb_release -a
 
multipass mount /tmp/mp vm3
 
multipass transfer /etc/fstab vm3:/tmp/y
 
multipass exec vm3 -- sudo apt update && sudo apt dist-upgrade -y

SSH login to VM

sudo ssh ubuntu@<VM_IP> -i /var/snap/multipass/common/data/multipassd/ssh-keys/id_rsa
multipass delete vm2
multipass purge 
 
sudo multipass set local.driver=libvirt

Links
https://multipass.run/
https://multipass.run/docs/launch-command

Check OpenvSwitch

#!/bin/bash
 
export OS_ENV="@globals.environment@"
 
 
if [ "${OS_ENV}" == "dev" ]; then
    export PYENV_ROOT="$HOME/.pyenv"
    export PATH="$PYENV_ROOT/bin:$PATH"
    eval "$(pyenv init -)"
fi
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for broken ovs entry in DB
for NODE in $(openstack compute service list -c Host -f value | sort -u); do
    OUTPUT=$(ssh ${NODE} docker exec openvswitch_vswitchd ovsdb-client dump | grep qvo | egrep -v "tag|mac" | cut -d "\"" -f2)
    for PORT in ${OUTPUT}; do
        printf "%-20s %s\n" "${NODE}" "${PORT}"
 
        EXIT_CODE=1

Check server

#!/bin/bash
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for server with status ERROR
OUTPUT="$(openstack server list --all --status=ERROR -c ID -c Name -c Status -f value)"
#openstack server show ${SERVER_ID} -c fault -f value
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server with status VERIFY_RESIZE
OUTPUT="$(openstack server list --all --status=VERIFY_RESIZE -c ID -c Name -c Status -f value)"
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server processes on wrong compute node
for COMPUTE_NODE in $(openstack compute service list --service nova-compute -c Host -f value); do
    for SERVER_ID in $(ssh ${COMPUTE_NODE} pgrep qemu -a | grep -o -P '(?<=-uuid ).*(?= -smbios)'); do

OpenStack: placement (resource provider)

Install CLI client
https://docs.openstack.org/releasenotes/osc-placement/

pip install openstack-placement
 
echo 3.5.6 > .python-version

resource provider allocation

# list hypervisor
openstack resource provider list --sort-column name
 
# show
openstack resource provider allocation show ${SERVER_ID}
 
# delete
openstack resource provider allocation delete ${SERVER_ID}

Add resource provider to aggregate

#Add compute node to AZ
openstack aggregate add host az1-aggregate ewos1-comX-prod
 
# get resource provider ID
RESOURCE_PROVIDER_ID=$(openstack --os-placement-api-version=1.2 resource provider show XXXXXX-XXXX-XXXX-XXXX-XXXXXX -c uuid -f value)
echo ${RESOURCE_PROVIDER_ID}
 
# get aggregate ID
AGGREGATE_ID=$(openstack --os-compute-api-version=2.53 aggregate show XXX -c uuid -f value)
echo ${AGGREGATE_ID}
 
# Add resource provider to that aggregate
openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate ${AGGREGATE_ID} ${RESOURCE_PROVIDER_ID}

Check multiple / broken resource provider allocation

qemu guest agent

OpenStack admin

openstack image set --property hw_qemu_guest_agent=yes ${IMAGE_ID}

Within the VM

# check whather hw_qemu_guest_agent is enabled
ls -l /dev/virtio-ports/org.qemu.guest_agent.0
 
# install qemu-guest-agent
sudo apt-get install -y qemu-guest-agent

supported_commands

docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-info"}'

Execute command

openstack server show d82ca1de-1fcd-4ca6-84db-84891ec37796 -c OS-EXT-SRV-ATTR:hypervisor_hostname -c OS-EXT-SRV-ATTR:instance_name
docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-network-get-interfaces"}'

Links
https://www.sebastien-han.fr/blog/2015/02/09/openstack-perform-consistent-snapshots-with-qemu-guest-agent/
https://www.ovh.com/blog/create-and-use-openstack-snapshots/
http://wiki.stoney-cloud.org/wiki/Qemu_Guest_Agent_Integration

Create neutron probe

Install crudini

docker exec -ti -u root neutron_l3_agent apt update
docker exec -ti -u root neutron_l3_agent apt install -y crudini

Create configuration

docker exec -ti neutron_l3_agent bash
umask 077
cat /etc/neutron/neutron.conf > /etc/neutron/debug.ini
crudini --merge /etc/neutron/debug.ini < /etc/neutron/l3_agent.ini

Export credentials

unset HISTFILE
# cat /etc/kolla/admin-openrc.sh
# paste export OS_XXX

Get network ID

SERVER_ID=074e2a72-9bd7-488f-af3d-f45f3bc0b6e7
 
PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value)
openstack port show ${PORT_ID} -c network_id -f value

Create probe

neutron-debug --config-file /etc/neutron/debug.ini probe-create ${NETWORK_ID}

Get probe port ID

Barbican (Secret)

# list all secrets
openstack secret list
 
# download
openstack secret get https://barbican.service.example.com/v1/secrets/d5794ec0-a86f-420f-8d03-b1b11b4251bd
  --payload_content_type application/octet-stream \
  --file /tmp/file1.out 
 
# Crate secret / certificate
openstack secret store --name=cert1 -t "application/octet-stream" -e base64 --payload="$(base64 < cert1.p12)"

ACL
https://docs.openstack.org/python-barbicanclient/latest/cli/cli_usage.html

# list allowed user
openstack acl get https://barbican.service.example.com/v1/secrets/1111111-2222-3333-4444-5555555555555
 
# allow access for user to secret
openstack acl user add -u ${USER_ID} https://barbican.service.example.com/v1/secrets/1111111-2222-3333-4444-5555555555555

Test
https://docs.citrix.com/en-us/citrix-application-delivery-management-software/13/orchestration/integrate-with-openstack-platform/openstack-configuring-lbaasv2-using-command-line.html

OpenStack: Debug VM

#!/bin/bash

export SERVER_ID=@option.vm@

# search for VM by name
if [ ${#SERVER_ID} -ne 36 ]; then
    RESULT="$(openstack server list --all --name ${SERVER_ID})"
    if [ $(echo "${RESULT}" | wc -l) -eq 5 ]; then
        SERVER_ID=$(echo "${RESULT}" | tail -2 | head -1 | cut -d " " -f2)
    else
        echo "Found several VMs, please choose one from:"
        echo "${RESULT}"
        exit 0
    fi
fi

echo -e "VM:"
openstack server show -c name -c id -c addresses -c OS-EXT-SRV-ATTR:host -c status ${SERVER_ID}

echo -e "\nProject:"
PROJECT_ID=$(openstack server show -c project_id -f value ${SERVER_ID})
openstack project show -c id -c name -c description ${PROJECT_ID}

echo -e "\nDomain:"
DOMAIN_ID=$(openstack project show -c domain_id -f value ${PROJECT_ID})
openstack domain show -c id -c name -c description ${DOMAIN_ID}

echo -e "\nServer:"
openstack server show ${SERVER_ID}

echo -e "\nConsole:"
openstack console url show ${SERVER_ID}

echo -e "\nMigration(s):"
nova migration-list --instance-uuid ${SERVER_ID}

echo -e "\nVM Port(s):"
#nova interface-list ${SERVER_ID}
openstack port list --server ${SERVER_ID} --long
PORT_IDS=$(openstack port list --server ${SERVER_ID} -c id -f value)

for PORT_ID in ${PORT_IDS}; do
    NETWORK_ID=$(openstack port show ${PORT_ID} -c network_id -f value)
    NETWORK_NAME=$(openstack network show ${NETWORK_ID} -c name -f value)

    echo -e "\n+++++ Start network ${NETWORK_NAME} +++++"
    
    echo -e "\nNetwork:"
    openstack network show ${NETWORK_ID}
    
    echo -e "\nSubnet:"
    SUBNET_ID=$(openstack subnet list --network ${NETWORK_ID} -c ID -f value)
    openstack subnet show ${SUBNET_ID}
    
    echo -e "\nNetwork ports:"
    openstack port list --network ${NETWORK_ID}
    
    echo -e "\nSecurity group(s):"
    #SECURITY_GROUP_IDS="$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids[])"
    # workaround for old OSC
    SECURITY_GROUP_IDS="$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids | tr ',' '\n')"
    for SECURITY_GROUP_ID in ${SECURITY_GROUP_IDS}; do
        openstack security group show ${SECURITY_GROUP_ID}
    done
    
    echo -e "\nRouter:"
    ROUTER_DEVICE_ID=$(openstack port list --network ${NETWORK_ID} --device-owner network:ha_router_replicated_interface -c device_id -f value)
    if [ ! -z ${ROUTER_DEVICE_ID} ]; then
        ROUTER_HOSTS=$(openstack port list --device-id ${ROUTER_DEVICE_ID} --device-owner network:router_ha_interface -c binding_host_id -f value --sort-column binding_host_id)
        for ROUTER_HOST in ${ROUTER_HOSTS}; do
            echo "ssh -t ${ROUTER_HOST} ip netns exec qrouter-${ROUTER_DEVICE_ID} bash"
            ssh ${ROUTER_HOST} ip netns exec qrouter-${ROUTER_DEVICE_ID} ip a | sed -n '/BROADCAST/,$p' | egrep -v "inet6|valid_lft"
            echo
        done
    fi

    echo -e "\nDHCP/DNS:"
    DHCP_HOSTS=$(openstack port list --network ${NETWORK_ID} --device-owner network:dhcp -c binding_host_id -f value --sort-column binding_host_id)
    for DHCP_HOST in ${DHCP_HOSTS}; do
        echo "ssh -t ${DHCP_HOST} ip netns exec qdhcp-${NETWORK_ID} bash"
        ssh ${DHCP_HOST} ip netns exec qdhcp-${NETWORK_ID} ip a | sed -n '/BROADCAST/,$p' | egrep -v "inet6|valid_lft"
        echo
    done
        
    echo "+++++ END network ${NETWORK_NAME} +++++"
done
>

Allow (temprary) incomming ping to VM (icmp traffic)

SERVER_ID=51f8bbe2-4a89-4065-a24f-4a6fa47fadd0
 
PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value)
echo ${PORT_ID}
 
#DEP: SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids |  tr ',' '\n' | head -1)
SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r 'first(.security_group_ids[])')
echo ${SECURITY_GROUP_ID}
# openstack security group show ${SECURITY_GROUP_ID}
 
# create security group rule to allow incomming icmp traffic
SECURITY_GROUP_RULE_ID=$(openstack security group rule create --protocol icmp ${SECURITY_GROUP_ID} -c id -f value)
echo ${SECURITY_GROUP_RULE_ID}
openstack security group rule show ${SECURITY_GROUP_RULE_ID}
 
# remove rule
openstack security group rule delete ${SECURITY_GROUP_RULE_ID}

RabbitMQ

Connect to control node(s)

# show container state
docker ps -a | grep rabbitmq
 
# Connect into RabbitMQ Docker container
docker exec -it rabbitmq bash

RabbitMQ state

# Show status
rabbitmqctl status
 
# Show cluster status
rabbitmqctl cluster_status
 
# Show list_queues
rabbitmqctl list_queues

CLI

# Start the App
rabbitmqctl start_app 
 
# Stop the App
rabbitmqctl stop_app
 
# Reset the App
rabbitmqctl reset
 
# Force reset
rabbitmqctl force_reset
 
# Join the cluster
rabbitmqctl join_cluster rabbit@ctl2-stage
 
# Start the App
rabbitmqctl start_app
 
# forget node
rabbitmqctl forget_cluster_node rabbit@nodeX
rabbitmqctl join_cluster rabbit@nodeX
cd /var/lib/rabbitmq/mnesia/rabbit@foo*-dev