openstack

OpenStack: Debug / cleanup DHCP

Restart DHCP namespaces

openstack subnet set --no-dhcp ${SUBNET_ID}
openstack subnet set --dhcp ${SUBNET_ID}

Find unnecessary DHCP namespaces

MAX_DHCP_NS=3
SUBNET_IDS=$(openstack subnet list --dhcp -c ID -f value)
for SUBNET_ID in ${SUBNET_IDS}; do
    NETWORK_ID=$(openstack subnet show ${SUBNET_ID} -c network_id -f value)
    DHCP_PORTS="$(openstack port list --device-owner network:dhcp --network ${NETWORK_ID} -c ID -c binding_host_id -c fixed_ips -c status -f value)"
 
    if [ $(echo "${DHCP_PORTS}" | wc -l) -ne ${MAX_DHCP_NS} ]; then
        echo "NETWORK_ID: ${NETWORK_ID}"
        echo "${DHCP_PORTS}"
 
        echo
    fi
done

Add / remove DHCP ports

OpenStack: RBAC shared network

# allow access to RBAC net for project 
openstack network rbac create --target-project foo-project1 --action access_as_shared --type network foo-net-01
 
# show rbac quota
neutron quota-show --tenant_id <PROJECT_ID> | grep rbac_policy
 
# set rbac quota to unlimited
openstack quota set --rbac-policies -1 <PROJECT_ID>
 
openstack network rbac list
 
openstack network rbac show ${RBAC_ID}

Links
https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/network-rbac.html
https://docs.openstack.org/mitaka/networking-guide/config-rbac.html
https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/quota.html
https://docs.openstack.org/ocata/admin-guide/cli-networking-advanced-quotas.html

DevStack

DevStack XENA

sudo apt -y install git jq vim
sudo apt purge -y python3-distro-info
 
git clone --branch "stable/xena" https://opendev.org/openstack/devstack
 
HOST_IP=$(ip -o -4 -j a | jq -r '.[].addr_info[] | select(.dev == "ens3") .local')
echo ${HOST_IP}

DevStack XENA
https://openstack.goffinet.org/03-02-openstack-lab-devstack.html
http://lia.deis.unibo.it/Courses/CompNetworksM/1718/slides/NetworksM_Cloud180518_v1.pdf
https://opnfvblog.wordpress.com/2016/10/27/devstack-localconf/
https://01.org/sites/default/files/page/accelerating_openstack_networking_with_intel_architecture_rev008.pdf

gnocchi

openstack metric resource show ${SERVER_ID}
openstack metric measures show ${METRICS_ID}
openstack metric archive-policy list
 
gnocchi resource list | grep ${PROJECT_ID} | grep disk
gnocchi resource show 592a1d1f-696d-4f58-ba5e-9e1367ffef62
gnocchi measures show 031ec7f2-2d14-4adf-a632-ec765c36bad6
gnocchi resource list | grep ${SERVER_ID}
gnocchi archive-policy show evw_min_max_mean
gnocchi measures show 7f6f6744-2f7a-47c5-9740-dbeca6eee5a2 --granularity 3600

Side2Side VPN connection between OpenStack VPN and AVM Fritz!Box

FRITZBOX_WAN_IP=111.1.2.3  # curl ipinfo.io/ip
FRITZBOX_CIDR=192.168.178.0/24
OS_USER=foo
PROJECT_ID=xxxxxxxxxxxxxx
PSK='PASS1234'   # apg -m 32 -a1
 
openstack vpn ike policy create ${OS_USER}-ike-aes256-sha512 \
  --encryption-algorithm aes-256 \
  --auth-algorithm sha512 \
  --pfs group2
 
openstack vpn ipsec policy create ${OS_USER}-ipsec-aes256-sha512 \
  --encryption-algorithm aes-256 \
  --auth-algorithm sha512 \
  --pfs group2
 
# openstack router list
# ROUTER_ID=$(openstack router list -c ID -f value)
openstack vpn service create ${OS_USER}-vpn-service1 \
  --router ${ROUTER_ID} \
  -c external_v4_ip \
  -f value
 
# openstack subnet list
# SUBNET_ID=$(openstack subnet list -c ID -f value --name ${OS_USER}-subnet)
openstack vpn endpoint group create ${OS_USER}-vpn-endpoint-local \
  --type subnet \
  --value ${SUBNET_ID}
 
openstack vpn endpoint group create ${OS_USER}-vpn-endpoint-peer \
  --type cidr \
  --value ${FRITZBOX_CIDR}
 
openstack vpn ipsec site connection create ${OS_USER}-vpn-conn1 \
  --vpnservice ${OS_USER}-vpn-service1 \
  --ikepolicy ${OS_USER}-ike-aes256-sha512 \
  --ipsecpolicy ${OS_USER}-ipsec-aes256-sha512 \
  --peer-address ${FRITZBOX_WAN_IP} \
  --peer-id ${FRITZBOX_WAN_IP} \
  --psk ${PSK} \
  --local-endpoint-group ${OS_USER}-vpn-endpoint-local \
  --peer-endpoint-group ${OS_USER}-vpn-endpoint-peer
 

Cleanup / delete OpenStack objects

PROJECT_ID=f0f745a9c79c47fdbbdd187d728f9e41
 
# Delete VMs
openstack server list --project ${PROJECT_ID}
openstack server delete ${SERVER_ID}
 
openstack volume list --project ${PROJECT_ID}
openstack volume delete ${VOLUME_ID}
 
openstack image list --private --long | grep ${PROJECT_ID}
openstack image delete ${IMAGE_ID}
 
# Delete loadbalancer
openstack loadbalancer list --project ${PROJECT_ID}
openstack loadbalancer delete --cascade ${LOADBALANCER_ID}
 
# Delete secrets
openstack secret list
openstack secret delete ${SECRET_URL}
 
# Delete VPNs
openstack vpn ipsec site connection list --long | grep ${PROJECT_ID}
openstack vpn ipsec site connection delete ${IPSEC_SITE_CONNECTION_ID}
openstack vpn endpoint group list --long | grep ${PROJECT_ID}
openstack vpn endpoint group delete ${VPN_LOCAL_ENDPOINT_GROUP_ID} ${VPN_PEER_ENDPOINT_GROUP_ID}
openstack vpn service list --long | grep ${PROJECT_ID}
openstack vpn service delete ${VPN_SERVICE_ID}
openstack vpn ipsec policy list --long | grep ${PROJECT_ID}
openstack vpn ipsec policy delete ${VPN_IPSEC_POLICY_ID}
openstack vpn ike policy list --long | grep ${PROJECT_ID}
openstack vpn ike policy delete ${VPN_IKE_POLICY_ID}
 
# Delete k8s
openstack coe cluster list 
 
# Delete floating ip
openstack floating ip list --project ${PROJECT_ID}
openstack floating ip delete ${FLOATING_IP}
 
# Delete router

OpenStack Debug VPN connection

Find the VPN server and the relevant router UUID

# get VPN connection ID
openstack vpn ipsec site connection list | grep foo
openstack vpn ipsec site connection list --long | grep <project_id>
 
VPN_CONNECTION_ID=142dc25f-13bb-4fda-b093-edf13df98ed8
openstack vpn ipsec site connection show ${VPN_CONNECTION_ID}
 
VPN_SERVICE_ID=$(openstack vpn ipsec site connection show ${VPN_CONNECTION_ID} -c 'VPN Service' -f value)
openstack vpn service show ${VPN_SERVICE_ID}
 
# get router ID
ROUTER_ID=$(openstack vpn service show ${VPN_SERVICE_ID} -c Router -f value)
echo "ROUTER_ID=${ROUTER_ID}"

Find the ctl Node where the active router is running

ROUTER_PORT_ID=$(openstack port list --device-owner network:router_gateway -f value -c id --router ${ROUTER_ID})
CONTROL_NODE=$(openstack port show ${ROUTER_PORT_ID} -c binding_host_id -f value)
echo "CONTROL_NODE: ${CONTROL_NODE}"
 
echo "ssh ${CONTROL_NODE} sudo ip netns exec qrouter-${ROUTER_ID} ip a s"

Connect to that ctl node and "jump" in its neutron-l3-agent docker container