一. OpenStack部署
1. 环境准备
主机名 IP 磁盘 CPU memory controller 网卡1:10.0.0.10,网卡2:不配置 sda:100G 2C 6G compute01 网卡1:10.0.0.11,网卡2:不配置 sda:100G ,sdb:20G 2C 4G compute02 网卡1:10.0.0.12,网卡2:不配置 sda:100G ,sdb:20G 2C 4G
操作系统 虚拟化工具 Ubuntu22.04 VMware15
2. 配置离线环境
# 解压 tar -zxvf openstackyoga.tar.gz -C /opt/ # 备份文件 cp /etc/apt/sources.list{,.bak} # 配置离线源 cat > /etc/apt/sources.list << EOF deb [trusted=yes] file:// /opt/openstackyoga/debs/ EOF # 清空缓存 apt clean all # 加载源 apt update
3. 环境准备
3.1 配置网络
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.10/24] routes: - to: default via: 10.0.0.254 nameservers: addresses: [114.114.114.114] ens38: dhcp4: false version: 2 EOF # 生效网络 netplan apply
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.11/24] routes: - to: default via: 10.0.0.254 nameservers: addresses: [114.114.114.114] ens38: dhcp4: false version: 2 EOF # 生效网络 netplan apply
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.12/24] routes: - to: default via: 10.0.0.254 nameservers: addresses: [114.114.114.114] ens38: dhcp4: false version: 2 EOF # 生效网络 netplan apply
3.2 配置主机名并配置解析
hostnamectl set-hostname controller.mxq001 # 切换窗口 bash }; zone "controller.mxq001" { type master; file "/etc/bind/db.controller.mxq001"; }; zone "compute01.mxq001" { type master; file "/etc/bind/db.compute01.mxq001"; }; zone "compute02.mxq001" { type master; file "/etc/bind/db.compute02.mxq001"; };
hostnamectl set-hostname compute01.mxq001 # 切换窗口 bash
hostnamectl set-hostname compute02.mxq001 # 切换窗口 bash
cat >> /etc/hosts << EOF 10.0.0.10 controller 10.0.0.11 compute01 10.0.0.12 compute02 EOF
3.3 时间调整
# 开启可配置服务 timedatectl set-ntp true # 调整时区为上海 timedatectl set-timezone Asia/Shanghai # 将系统时间同步到硬件时间 hwclock --systohc
# 安装服务 apt install -y chrony # 配置文件 vim /etc/chrony/chrony.conf 20 server controller iburst maxsources 2 61 allow all 63 local stratum 10 # 重启服务 systemctl restart chronyd
# 安装服务 apt install -y chrony # 配置文件 vim /etc/chrony/chrony.conf 20 pool controller iburst maxsources 4 # 重启服务 systemctl restart chronyd
3.4 安装openstack客户端
apt install -y python3-openstackclient
3.5 安装部署MariaDB
apt install -y mariadb-server python3-pymysql
cat > /etc/mysql/mariadb.conf.d/99-openstack.cnf << EOF [mysqld] bind-address = 0.0.0.0 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8 EOF
service mysql restart
mysql_secure_installation 输入数据库密码:回车 可以在没有适当授权的情况下登录到MariaDB root用户,当前已收到保护:n 设置root用户密码:n 删除匿名用户:y 不允许远程root登录:n 删除测试数据库:y 重新加载数据库:y
3.6 安装部署RabbitMQ
apt install -y rabbitmq-server
创建openstack用户
用户名为:openstack
密码:1qaz@WSX3edc
rabbitmqctl add_user openstack 1qaz@WSX3edc
允许openstack用户进行配置、写入和读取访问
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
3.7 安装部署Memcache(用来缓存tonke的)
apt install -y memcached python3-memcache
vim /etc/memcached.conf 35 -l 0.0.0.0
service memcached restart
4. 部署配置keystone
控制节点操作
创建数据库与用户给予keystone使用
# 创建数据库 CREATE DATABASE keystone; # 创建用户 GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystoneang';
apt install -y keystone
# 备份配置文件 cp /etc/keystone/keystone.conf{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf vim /etc/keystone/keystone.conf [DEFAULT] log_dir = /var/log/keystone [application_credential] [assignment] [auth] [cache] [catalog] [cors] [credential] [database] connection = mysql+pymysql://keystone:keystoneang@www.controller.mxq001/keystone [domain_config] [endpoint_filter] [endpoint_policy] [eventlet_server] [extra_headers] Distribution = Ubuntu [federation] [fernet_receipts] [fernet_tokens] [healthcheck] [identity] [identity_mapping] [jwt_tokens] [ldap] [memcache] [oauth1] [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [policy] [profiler] [receipt] [resource] [revoke] [role] [saml] [security_compliance] [shadow_users] [token] provider = fernet [tokenless_auth] [totp] [trust] [unified_limit] [wsgi]
su -s /bin/sh -c "keystone-manage db_sync" keystone
调用用户和组的密钥库
这些选项是为了允许在另一个操作系统用户/组下运行密钥库
# 用户 keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # 组 keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
在Queens发布之前,keystone需要在两个单独的端口上运行,以容纳Identity v2 API,后者通常在端口35357上运行单独的仅限管理员的服务。随着v2 API的删除,keystones可以在所有接口的同一端口上运行5000
keystone-manage bootstrap --bootstrap-password 1qaz@WSX3edc --bootstrap-admin-url http://www.controller.mxq001:5000/v3/ --bootstrap-internal-url http://www.controller.mxq001:5000/v3/ --bootstrap-public-url http://www.controller.mxq001:5000/v3/ --bootstrap-region-id RegionOne
编辑/etc/apache2/apache2.conf文件并配置ServerName选项以引用控制器节点
echo "ServerName www.controller.mxq001" >> /etc/apache2/apache2.conf
service apache2 restart
cat > /etc/keystone/admin-openrc.sh << EOF export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=1qaz@WSX3edc export OS_AUTH_URL=http:///www.controller.mxq001:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 EOF
source /etc/keystone/admin-openrc.sh
openstack project create --domain default --description "Service Project" service
openstack token issue
5. 部署配置glance镜像
控制节点操作
创建数据库与用户给予glance使用
# 创建数据库 CREATE DATABASE glance; # 创建用户 GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glanceang';
openstack user create --domain default --password glance glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://www.controller.mxq001:9292 openstack endpoint create --region RegionOne image internal http://www.controller.mxq001:9292 openstack endpoint create --region RegionOne image admin http://www.controller.mxq001:9292
apt install -y glance
# 备份配置文件 cp /etc/glance/glance-api.conf{,.bak} # 过滤覆盖配置文件 grep -Ev "^$|#" /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf # 配置项信息 vim /etc/glance/glance-api.conf [DEFAULT] [barbican] [barbican_service_user] [cinder] [cors] [database] connection = mysql+pymysql://glance:glanceang@www.controller.mxq001/glance [file] [glance.store.http.store] [glance.store.rbd.store] [glance.store.s3.store] [glance.store.swift.store] [glance.store.vmware_datastore.store] [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/ [healthcheck] [image_format] disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop.root-tar [key_manager] [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000 auth_url = http://www.controller.mxq001:5000 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = glance [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [paste_deploy] flavor = keystone [profiler] [store_type_location_strategy] [task] [taskflow_executor] [vault] [wsgi]
su -s /bin/sh -c "glance-manage db_sync" glance
service glance-api restart
# 下载镜像 wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img # 上传镜像命令 glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility=public # 查看镜像运行状态 root@controller:~# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 12a404ea-5751-41c6-a319-8f63de543cd8 | cirros | active | +--------------------------------------+--------+--------+
6. 部署配置placement元数据
作用:placement服务跟踪每个供应商的库存和使用情况。例如,在一个计算节点创建一个实例的可消费资源如计算节点的资源提供者的CPU和内存,磁盘从外部共享存储池资源提供商和IP地址从外部IP资源提供者。
创建数据库与用户给予placement使用
# 创建数据库 CREATE DATABASE placement; # 创建用户 GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placementang';
openstack user create --domain default --password placement placement
将Placement用户添加到具有管理员角色的服务项目中
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://www.controller.mxq001:8778 openstack endpoint create --region RegionOne placement internal http://www.controller.mxq001:8778 openstack endpoint create --region RegionOne placement admin http://www.controller.mxq001:8778
apt install -y placement-api
# 备份配置文件 cp /etc/placement/placement.conf{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/placement/placement.conf.bak > /etc/placement/placement.conf # 配置文件 vim /etc/placement/placement.conf [DEFAULT] [api] auth_strategy = keystone [cors] [keystone_authtoken] auth_url = http://www.controller.mxq001:5000/v3 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = placement password = placement [oslo_middleware] [oslo_policy] [placement] [placement_database] connection = mysql+pymysql://placement:placementang@www.controller.mxq001/placement [profiler]
su -s /bin/sh -c "placement-manage db sync" placement
service apache2 restart
root@controller:~# placement-status upgrade check +-------------------------------------------+ | Upgrade Check Results | +-------------------------------------------+ | Check: Missing Root Provider IDs | | Result: Success | | Details: None | +-------------------------------------------+ | Check: Incomplete Consumers | | Result: Success | | Details: None | +-------------------------------------------+ | Check: Policy File JSON to YAML Migration | | Result: Success | | Details: None | +-------------------------------------------+
7. 部署配置nova计算服务
7.1 控制节点配置
# 存放nova交互等数据 CREATE DATABASE nova_api; # 存放nova资源等数据 CREATE DATABASE nova; # 存放nova等元数据 CREATE DATABASE nova_cell0; # 创建管理nova_api库的用户 GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'novaang'; # 创建管理nova库的用户 GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'novaang'; # 创建管理nova_cell0库的用户 GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'novaang';
openstack user create --domain default --password nova nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://www.controller.mxq001:8774/v2.1 openstack endpoint create --region RegionOne compute internal http://www.controller.mxq001:8774/v2.1 openstack endpoint create --region RegionOne compute admin http://www.controller.mxq001:8774/v2.1
apt install -y nova-api nova-conductor nova-novncproxy nova-scheduler
# 备份配置文件 cp /etc/nova/nova.conf{,.bak} # 过滤提取文件 grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf # 配置结果 vim /etc/nova/nova.conf [DEFAULT] log_dir = /var/log/nova lock_path = /var/lock/nova state_path = /var/lib/nova transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001:5672/ my_ip = 10.0.0.10 [api] auth_strategy = keystone [api_database] connection = mysql+pymysql://nova:novaang@www.controller.mxq001/nova_api [barbican] [barbican_service_user] [cache] [cinder] [compute] [conductor] [console] [consoleauth] [cors] [cyborg] [database] connection = mysql+pymysql://nova:novaang@www.controller.mxq001/nova [devices] [ephemeral_storage_encryption] [filter_scheduler] [glance] api_servers = http://www.controller.mxq001:9292 [guestfs] [healthcheck] [hyperv] [image_cache] [ironic] [key_manager] [keystone] [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000/ auth_url = http://www.controller.mxq001:5000/ memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = nova [libvirt] [metrics] [mks] [neutron] [notifications] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [pci] [placement] region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://www.controller.mxq001:5000/v3 username = placement password = placement [powervm] [privsep] [profiler] [quota] [rdp] [remote_debug] [scheduler] [serial_console] [service_user] [spice] [upgrade_levels] [vault] [vendordata_dynamic_auth] [vmware] [vnc] enabled = true server_listen = $my_ip server_proxyclient_address = $my_ip [workarounds] [wsgi] [zvm] [cells] enable = False [os_region_name] openstack =
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# 处理api服务 service nova-api restart # 处理资源调度服务 service nova-scheduler restart # 处理数据库服务 service nova-conductor restart # 处理vnc远程窗口服务 service nova-novncproxy restart
7.2 计算节点配置
compute01节点
安装nova-compute服务
apt install -y nova-compute
# 备份配置文件 cp /etc/nova/nova.conf{,.bak} # 过滤覆盖配置文件 grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf # 完整配置 vim /etc/nova/nova.conf [DEFAULT] log_dir = /var/log/nova lock_path = /var/lock/nova state_path = /var/lib/nova transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001 my_ip = 10.0.0.11 [api] auth_strategy = keystone [api_database] [barbican] [barbican_service_user] [cache] [cinder] [compute] [conductor] [console] [consoleauth] [cors] [cyborg] [database] [devices] [ephemeral_storage_encryption] [filter_scheduler] [glance] api_servers = http://www.controller.mxq001:9292 [guestfs] [healthcheck] [hyperv] [image_cache] [ironic] [key_manager] [keystone] [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000/ auth_url = http://www.controller.mxq001:5000/ memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = nova [libvirt] [metrics] [mks] [neutron] [notifications] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [pci] [placement] region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://www.controller.mxq001:5000/v3 username = placement password = placement [powervm] [privsep] [profiler] [quota] [rdp] [remote_debug] [scheduler] [serial_console] [service_user] [spice] [upgrade_levels] [vault] [vendordata_dynamic_auth] [vmware] [vnc] enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://10.0.0.10:6080/vnc_auto.html [workarounds] [wsgi] [zvm] [cells] enable = False [os_region_name] openstack =
# 确定计算节点是否支持虚拟机的硬件加速 egrep -c '(vmx|svm)' /proc/cpuinfo # 如果结果返回 “0” ,那么需要配置如下 vim /etc/nova/nova-compute.conf [libvirt] virt_type = qemu
service nova-compute restart
compute02节点
安装nova-compute服务
apt install -y nova-compute
# 备份配置文件 cp /etc/nova/nova.conf{,.bak} # 过滤覆盖配置文件 grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf # 完整配置 vim /etc/nova/nova.conf [DEFAULT] log_dir = /var/log/nova lock_path = /var/lock/nova state_path = /var/lib/nova transport_url = rabbit://openstack:1qaz@WSX3edc@controller my_ip = 10.0.0.12 [api] auth_strategy = keystone [api_database] [barbican] [barbican_service_user] [cache] [cinder] [compute] [conductor] [console] [consoleauth] [cors] [cyborg] [database] [devices] [ephemeral_storage_encryption] [filter_scheduler] [glance] api_servers = http://controller:9292 [guestfs] [healthcheck] [hyperv] [image_cache] [ironic] [key_manager] [keystone] [keystone_authtoken] www_authenticate_uri = http://controller:5000/ auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = nova [libvirt] [metrics] [mks] [neutron] [notifications] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [pci] [placement] region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = placement [powervm] [privsep] [profiler] [quota] [rdp] [remote_debug] [scheduler] [serial_console] [service_user] [spice] [upgrade_levels] [vault] [vendordata_dynamic_auth] [vmware] [vnc] enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://10.0.0.10:6080/vnc_auto.html [workarounds] [wsgi] [zvm] [cells] enable = False [os_region_name] openstack =
# 确定计算节点是否支持虚拟机的硬件加速 egrep -c '(vmx|svm)' /proc/cpuinfo # 如果结果返回 “0” ,那么需要配置如下 vim /etc/nova/nova-compute.conf [libvirt] virt_type = qemu
service nova-compute restart
7.3 配置主机发现
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
vim /etc/nova/nova.conf ''' [scheduler] discover_hosts_in_cells_interval = 300 '''
service nova-api restart
root@controller:~# openstack compute service list +--------------------------------------+----------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +--------------------------------------+----------------+------------+----------+---------+-------+----------------------------+ | 68178099-13c5-4464-9a55-71ea0dd30bf5 | nova-scheduler | controller | internal | enabled | up | 2022-09-29T05:45:49.1qaz@WSX3edc | | bd2a33be-1457-41c1-8ce8-3d4a8cb43551 | nova-conductor | controller | internal | enabled | up | 2022-09-29T05:45:49.1qaz@WSX3edc | | 98b4584d-f9bf-4c10-9fd8-331899ebf70b | nova-compute | compute01 | nova | enabled | up | 2022-09-29T05:45:53.1qaz@WSX3edc | | f809da57-8999-4ba4-8a32-5b60991f8838 | nova-compute | compute02 | nova | enabled | up | 2022-09-29T05:45:56.1qaz@WSX3edc | +--------------------------------------+----------------+------------+----------+---------+-------+----------------------------+
8. 配置基于OVS的Neutron网络服务
8.1 控制节点配置
# 创建数据库 CREATE DATABASE neutron; # 创建用户 GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutronang';
openstack user create --domain default --password neutron neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://www.controller.mxq001:9696 openstack endpoint create --region RegionOne network internal http://www.controller.mxq001:9696 openstack endpoint create --region RegionOne network admin http://www.controller.mxq001:9696
cat >> /etc/sysctl.conf << EOF # 用于控制系统是否开启对数据包源地址的校验,关闭 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.default.rp_filter=0 # 开启二层转发设备 net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 EOF
modprobe br_netfilter
sysctl -p
apt install -y neutron-server neutron-plugin-ml2 neutron-l3-agent neutron-dhcp-agent neutron-metadata-agent neutron-openvswitch-agent
# 备份配置文件 cp /etc/neutron/neutron.conf{,.bak} # 过滤提取配置文件 grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf # 完整配置 vim /etc/neutron/neutron.conf [DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = true auth_strategy = keystone state_path = /var/lib/neutron dhcp_agent_notification = true allow_overlapping_ips = true notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001 [agent] root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" [cache] [cors] [database] connection = mysql+pymysql://neutron:neutronang@www.controller.mxq001/neutron [healthcheck] [ironic] [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000 auth_url = http://www.controller.mxq001:5000 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron [nova] auth_url = http://www.controller.mxq001:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = nova [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [placement] [privsep] [quotas] [ssl]
# 备份配置文件 cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini # 完整配置 vim /etc/neutron/plugins/ml2/ml2_conf.ini [DEFAULT] [ml2] type_drivers = flat,vlan,vxlan,gre tenant_network_types = vxlan mechanism_drivers = openvswitch,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = physnet1 [ml2_type_geneve] [ml2_type_gre] [ml2_type_vlan] [ml2_type_vxlan] vni_ranges = 1:1000 [ovs_driver] [securitygroup] enable_ipset = true enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver [sriov_driver]
配置openvswitch_agent.ini文件
# 备份文件 cp /etc/neutron/plugins/ml2/openvswitch_agent.ini{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak > /etc/neutron/plugins/ml2/openvswitch_agent.ini # 完整配置 vim /etc/neutron/plugins/ml2/openvswitch_agent.ini [DEFAULT] [agent] l2_population = True tunnel_types = vxlan prevent_arp_spoofing = True [dhcp] [network_log] [ovs] local_ip = 10.0.0.10 bridge_mappings = physnet1:br-ens34 [securitygroup]
# 备份文件 cp /etc/neutron/l3_agent.ini{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini # 完整配置 vim /etc/neutron/l3_agent.ini [DEFAULT] interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver external_network_bridge = [agent] [network_log] [ovs]
# 备份文件 cp /etc/neutron/dhcp_agent.ini{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini # 完整配置 vim /etc/neutron/dhcp_agent.ini [DEFAULT] interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = True [agent] [ovs]
配置metadata_agent.ini文件
提供元数据服务
元数据什么?
用来支持如指示存储位置、历史数据、资源查找、文件记录等功能。元数据算是一种电子式目录,为了达到编制目录的目的,必须在描述并收藏数据的内容或特色,进而达成协助数据检索的目的。
# 备份文件 cp /etc/neutron/metadata_agent.ini{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini # 完整配置 vim /etc/neutron/metadata_agent.ini [DEFAULT] nova_metadata_host = www.controller.mxq001 metadata_proxy_shared_secret = ws [agent] [cache]
vim /etc/nova/nova.conf ''' [default] linuxnet_interface_driver = nova.network.linux_net.LinuxOVSlnterfaceDriver [neutron] auth_url = http://www.controller.mxq001:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = neutron service_metadata_proxy = true metadata_proxy_shared_secret = ws '''
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
service nova-api restart
ovs-vsctl add-br br-ens34
ovs-vsctl add-port br-ens34 ens34
# 提供neutron服务 service neutron-server restart # 提供ovs服务 service neutron-openvswitch-agent restart # 提供地址动态服务 service neutron-dhcp-agent restart # 提供元数据服务 service neutron-metadata-agent restart # 提供三层网络服务 service neutron-l3-agent restart
8.2 计算节点配置
cat >> /etc/sysctl.conf << EOF # 用于控制系统是否开启对数据包源地址的校验,关闭 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.default.rp_filter=0 # 开启二层转发设备 net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 EOF
modprobe br_netfilter
sysctl -p
apt install -y neutron-openvswitch-agent
# 备份文件 cp /etc/neutron/neutron.conf{,.bak} # 过滤提取文件 grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf # 完整配置 vim /etc/neutron/neutron.conf [DEFAULT] core_plugin = ml2 service_plugins = router auth_strategy = keystone state_path = /var/lib/neutron allow_overlapping_ips = true transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001 [agent] root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" [cache] [cors] [database] [healthcheck] [ironic] [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000 auth_url = http://www.controller.mxq001:5000 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron [nova] [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [placement] [privsep] [quotas] [ssl]
配置openvswitch_agent.ini文件
# 备份文件 cp /etc/neutron/plugins/ml2/openvswitch_agent.ini{,.bak} # 过滤提取文件 grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak > /etc/neutron/plugins/ml2/openvswitch_agent.ini # 完整配置 vim /etc/neutron/plugins/ml2/openvswitch_agent.ini [DEFAULT] [agent] l2_population = True tunnel_types = vxlan prevent_arp_spoofing = True [dhcp] [network_log] [ovs] local_ip = 10.0.0.11 bridge_mappings = physnet1:br-ens34 [securitygroup] enable_security_group = True firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
vim /etc/nova/nova.conf ''' [DEFAULT] linuxnet_interface_driver = nova.network.linux_net.LinuxOVSlnterfaceDriver vif_plugging_is_fatal = true vif_pligging_timeout = 300 [neutron] auth_url = http://www.controller.mxq001:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = neutron '''
service nova-compute restart
ovs-vsctl add-br br-ens34
ovs-vsctl add-port br-ens34 ens34
service neutron-openvswitch-agent restart
cat >> /etc/sysctl.conf << EOF # 用于控制系统是否开启对数据包源地址的校验,关闭 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.default.rp_filter=0 # 开启二层转发设备 net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 EOF
modprobe br_netfilter
sysctl -p
apt install -y neutron-openvswitch-agent
# 备份文件 cp /etc/neutron/neutron.conf{,.bak} # 过滤提取文件 grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf # 完整配置 vim /etc/neutron/neutron.conf [DEFAULT] core_plugin = ml2 service_plugins = router auth_strategy = keystone state_path = /var/lib/neutron allow_overlapping_ips = true transport_url = rabbit://openstack:1qaz@WSX3edc@controller [agent] root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" [cache] [cors] [database] [healthcheck] [ironic] [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron [nova] [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [placement] [privsep] [quotas] [ssl]
配置openvswitch_agent.ini文件
# 备份文件 cp /etc/neutron/plugins/ml2/openvswitch_agent.ini{,.bak} # 过滤提取文件 grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak > /etc/neutron/plugins/ml2/openvswitch_agent.ini # 完整配置 vim /etc/neutron/plugins/ml2/openvswitch_agent.ini [DEFAULT] [agent] l2_population = True tunnel_types = vxlan prevent_arp_spoofing = True [dhcp] [network_log] [ovs] local_ip = 10.0.0.12 bridge_mappings = physnet1:br-ens34 [securitygroup] enable_security_group = True firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
vim /etc/nova/nova.conf ''' [DEFAULT] linuxnet_interface_driver = nova.network.linux_net.LinuxOVSlnterfaceDriver vif_plugging_is_fatal = true vif_pligging_timeout = 300 [neutron] auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = neutron '''
service nova-compute restart
ovs-vsctl add-br br-ens38
ovs-vsctl add-port br-ens38 ens38
service neutron-openvswitch-agent restart
8.3 校验neutron
root@controller:~# openstack network agent list +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | 5695085f-b03f-4ff2-b13f-a8e59036ca15 | Open vSwitch agent | controller | None | :-) | UP | neutron-openvswitch-agent | | 77f6b5e6-a761-49c6-8694-de4d3d52509f | Metadata agent | controller | None | :-) | UP | neutron-metadata-agent | | 87139cbc-27ee-4885-807e-96800816adca | Open vSwitch agent | compute01 | None | :-) | UP | neutron-openvswitch-agent | | 891696fa-01af-4fd9-87f0-ad3d432f05d0 | L3 agent | controller | nova | :-) | UP | neutron-l3-agent | | 91959f9b-db89-4021-b55e-888f71edb0b3 | DHCP agent | controller | nova | :-) | UP | neutron-dhcp-agent | | e5598aa0-e71c-4a74-a11f-b415a2e4fdbb | Open vSwitch agent | compute02 | None | :-) | UP | neutron-openvswitch-agent | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
9. 配置dashboard仪表盘服务
apt install -y openstack-dashboard
vim /etc/openstack-dashboard/local_settings.py ''' # 配置仪表板以在控制器节点上使用OpenStack服务 OPENSTACK_HOST = "www.controller.mxq001" # 在Dashboard configuration部分中,允许主机访问Dashboard ALLOWED_HOSTS = ["*"] # 配置memcached会话存储服务 SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'www.controller.mxq001:11211', } } # 启用Identity API版本3 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST # 启用对域的支持 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True # 配置API版本 OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 3, } # 将Default配置为通过仪表板创建的用户的默认域 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" # 将用户配置为通过仪表板创建的用户的默认角色 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user" # 启用卷备份 OPENSTACK_CINDER_FEATURES = { 'enable_backup': True, } # 配置时区 TIME_ZONE = "Asia/Shanghai" '''
systemctl reload apache2
10. 部署配置cinder卷存储
10.1 控制节点配置
# 创建cinder数据库 CREATE DATABASE cinder; # 创建cinder用户 GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinderang';
openstack user create --domain default --password cinder cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev3 public http://www.controller.mxq001:8776/v3/%\(project_id\)s openstack endpoint create --region RegionOne volumev3 internal http://www.controller.mxq001:8776/v3/%\(project_id\)s openstack endpoint create --region RegionOne volumev3 admin http://www.controller.mxq001:8776/v3/%\(project_id\)s
apt install -y cinder-api cinder-scheduler
# 备份文件 cp /etc/cinder/cinder.conf{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf # 完整配置 vim /etc/cinder/cinder.conf [DEFAULT] transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001 auth_strategy = keystone my_ip = 10.0.0.10 [database] connection = mysql+pymysql://cinder:cinderang@www.controller.mxq001/cinder [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000 auth_url = http://www.controller.mxq001:5000 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = cinder [oslo_concurrency] lock_path = /var/lib/cinder/tmp
su -s /bin/sh -c "cinder-manage db sync" cinder
vim /etc/nova/nova.conf ''' [cinder] os_region_name = RegionOne '''
service nova-api restart
service cinder-scheduler restart
service apache2 reload
10.2 计算节点配置
apt install -y lvm2 thin-provisioning-tools
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
修改lvm.conf文件
作用:添加接受/dev/sdb设备并拒绝所有其他设备的筛选器
vim /etc/lvm/lvm.conf devices { ... filter = [ "a/sdb/", "r/.*/"]
apt install -y cinder-volume tgt
# 备份配置文件 cp /etc/cinder/cinder.conf{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf # 完整配置文件 vim /etc/cinder/cinder.conf [DEFAULT] transport_url = rabbit://openstack:1qaz@WSX3edc@www.controller.mxq001 auth_strategy = keystone my_ip = 10.0.0.11 enabled_backends = lvm glance_api_servers = http://www.controller.mxq001:9292 [database] connection = mysql+pymysql://cinder:cinderang@www.controller.mxq001/cinder [keystone_authtoken] www_authenticate_uri = http://www.controller.mxq001:5000 auth_url = http://www.controller.mxq001:5000 memcached_servers = www.controller.mxq001:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = cinder [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes target_protocol = iscsi target_helper = tgtadm volume_backend_name = lvm [oslo_concurrency] lock_path = /var/lib/cinder/tmp
vim /etc/tgt/conf.d/tgt.conf include /var/lib/cinder/volumes/*
service tgt restart service cinder-volume restart
apt install -y lvm2 thin-provisioning-tools
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
修改lvm.conf文件
作用:添加接受/dev/sdb设备并拒绝所有其他设备的筛选器
vim /etc/lvm/lvm.conf devices { ... filter = [ "a/sdb/", "r/.*/"]
apt install -y cinder-volume tgt
# 备份配置文件 cp /etc/cinder/cinder.conf{,.bak} # 过滤覆盖文件 grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf # 完整配置文件 vim /etc/cinder/cinder.conf [DEFAULT] transport_url = rabbit://openstack:1qaz@WSX3edc@controller auth_strategy = keystone my_ip = 10.0.0.12 enabled_backends = lvm glance_api_servers = http://controller:9292 [database] connection = mysql+pymysql://cinder:cinderang@controller/cinder [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = cinder [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes target_protocol = iscsi target_helper = tgtadm volume_backend_name = lvm [oslo_concurrency] lock_path = /var/lib/cinder/tmp
vim /etc/tgt/conf.d/tgt.conf include /var/lib/cinder/volumes/*
service tgt restart service cinder-volume restart
10.3 校验cinder
root@controller:~# openstack volume service list +------------------+---------------+------+---------+-------+----------------------------+ | Binary | Host | Zone | Status | State | Updated At | +------------------+---------------+------+---------+-------+----------------------------+ | cinder-scheduler | controller | nova | enabled | up | 2022-09-29T07:58:33.1qaz@WSX3edc | | cinder-volume | compute01@lvm | nova | enabled | up | 2022-09-29T07:58:29.1qaz@WSX3edc | | cinder-volume | compute02@lvm | nova | enabled | up | 2022-09-29T07:58:34.1qaz@WSX3edc |
11. 运维实战
11.1 加载openstack环境变量
source /etc/keystone/admin-openrc.sh
11.2 创建路由器
openstack router create Ext-Router
11.3 创建Vxlan网络
openstack network create --provider-network-type vxlan Intnal
openstack subnet create Intsubnal --network Intnal --subnet-range 166.66.66.0/24 --gateway 166.66.66.1 --dns-nameserver 114.114.114.114
11.4 将内部网络添加到路由器
openstack router add subnet Ext-Router Intsubnal
11.5 创建Flat网络
openstack network create --provider-physical-network physnet1 --provider-network-type flat --external Extnal
openstack subnet create Extsubnal --network Extnal --subnet-range 192.168.3.0/24 --allocation-pool start=192.168.3.100,end=192.168.3.200 --gateway 192.168.3.1 --dns-nameserver 114.114.114.114 --no-dhcp
11.6 设置路由器网关接口
openstack router set Ext-Router --external-gateway Extnal
11.7 开放安全组
# 开放icmp协议 openstack security group rule create --proto icmp default # 开放22端口 openstack security group rule create --proto tcp --dst-port 22:22 default # 查看安全组规则 openstack security group rule list
11.8 上传镜像
openstack image create cirros04 --disk-format qcow2 --file cirros-0.4.0-x86_64-disk.img
11.9 创建云主机
ssh-keygen -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack flavor create --vcpus 1 --ram 512 --disk 1 C1-512MB-1G
openstack server create --flavor C1-512MB-1G --image cirros --security-group default --nic net-id=4a9567c0-f2bb-4b11-a9db-5a33eb7d3192 --key-name mykey vm01
openstack floating ip create Extnal
openstack server add floating ip vm01 192.168.3.118
openstack console url show vm01
11.10 创建卷类型
openstack volume type create lvm
11.11 卷类型添加元数据
cinder --os-username admin --os-tenant-name admin type-key lvm set volume_backend_name=lvm
11.12 查看卷类型
openstack volume type list
11.13 创建卷
openstack volume create lvm01 --type lvm --size 1
11.14 卷绑定云主机
nova volume-attach vm01 2ebc30ed-7380-4ffa-a2fc-33beb32a8592
二. Ceph集群部署
1. 环境准备
主机名 IP 磁盘 CPU memory node1 10.0.0.18 sda:100G,sdb:50G,sdc:50G 2C 4G node2 10.0.0.19 sda:100G ,sdb:50G,sdc:50G 2C 4G node3 10.0.0.20 sda:100G ,sdb:50G,sdc:50G 2C 4G
操作系统 虚拟化工具 Ubuntu22.04 VMware15
1.1 配置地址
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.18/24] gateway4: 10.0.0.254 version: 2 EOF # 生效网络 netplan apply
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.19/24] gateway4: 10.0.0.254 version: 2 EOF # 生效网络 netplan apply
cat > /etc/netplan/00-installer-config.yaml << EOF # This is the network config written by 'subiquity' network: ethernets: ens33: dhcp4: false addresses: [10.0.0.20/24] gateway4: 10.0.0.254 version: 2 EOF # 生效网络 netplan apply
1.2 更改主机名
hostnamectl set-hostname storage01.mxq001 # 切换窗口 bash
hostnamectl set-hostname storage02.mxq001 # 切换窗口 bash
hostnamectl set-hostname storage03.mxq001 # 切换窗口 bash
2. 配置hosts解析(所有节点)
cat >> /etc/hosts <<EOF 10.0.0.13 storage01 10.0.0.14 storage02 10.0.0.15 storage03 EOF
3. 制作离线源(所有节点)
tar zxvf ceph_quincy.tar.gz -C /opt/ cp /etc/apt/sources.list{,.bak} cat > /etc/apt/sources.list << EOF deb [trusted=yes] file:// /opt/ceph_quincy/debs/ EOF apt-get clean all apt-get update
4. 配置时间同步
# 可配置开启 timedatectl set-ntp true # 配置上海时区 timedatectl set-timezone Asia/Shanghai # 系统时钟与硬件时钟同步 hwclock --systohc
# 安装服务 apt install -y chrony # 配置文件 vim /etc/chrony/chrony.conf 20 server controller iburst maxsources 2 61 allow all 63 local stratum 10 # 重启服务 systemctl restart chronyd
# 安装服务 apt install -y chrony # 配置文件 vim /etc/chrony/chrony.conf 20 pool controller iburst maxsources 4 # 重启服务 systemctl restart chronyd
5. 安装docker(所有节点)
apt -y install docker-ce
6. 安装cephadm(node1)
apt install -y cephadm
7. 导入ceph镜像(所有节点)
docker load -i cephadm_images_v17.tar
7.1 搭建制作本地仓库(node1)
# 导入镜像 docker load -i registry.tar # 启动 docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef
cat >> /etc/docker/daemon.json << EOF { "insecure-registries":["www.storage01.mxq001:5000"] } EOF systemctl daemon-reload systemctl restart docker
docker tag 0912465dcea5 www.storage01.mxq001:5000/ceph:v17
docker push www.storage01.mxq001:5000/ceph:v17
7.2 配置私有仓库
cat >> /etc/docker/daemon.json << EOF { "insecure-registries":["www.storage01.mxq001:5000"] } EOF systemctl daemon-reload systemctl restart docker
8. 引导集群(node1)
mkdir -p /etc/ceph cephadm --image www.storage01.mxq001:5000/ceph:v17 bootstrap --mon-ip 10.0.0.13 --initial-dashboard-user admin --initial-dashboard-password 1qaz@WSX3edc --skip-pull ps: # 要部署其他监视器 ceph orch apply mon "test01,test02,test03" # 删除集群 cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force
9. 安装ceph-common工具(node1)
apt install -y ceph-common
10. 添加主机到集群(node1)
ssh-copy-id -f -i /etc/ceph/ceph.pub storage02 ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
ceph orch host add storage02 ceph orch host add storage03
11. 部署OSD
# 查看可用的磁盘设备 ceph orch device ls # 添加到ceph集群中,在未使用的设备上自动创建osd ceph orch apply osd --all-available-devices PS: # 从特定主机上的特定设备创建OSD: ceph orch daemon add osd node1:/dev/sdb ceph orch daemon add osd node2:/dev/sdb ceph orch daemon add osd node3:/dev/sdb # 查看osd磁盘 ceph -s ceph df
12. 访问仪表盘查看状态
三. OpenStack对接Ceph平台
1. 创建后端需要的存储池
1.1 cinder卷的存储池
ceph osd pool create volumes 32
1.2 glance存储池
ceph osd pool create images 16
1.3 备份存储池
ceph osd pool create backups 16
1.4 创建实例存储池
ceph osd pool create vms 16
2. 创建后端用户
2.1 创建密钥
cd /etc/ceph/
在ceph上创建cinder、glance、cinder-backup、nova用户创建密钥,允许访问使用Ceph存储池
2.1.1 创建用户client.cinder
对volumes存储池有rwx权限,对vms存储池有rwx权限,对images池有rx权限
ceph auth get-or-create client.cinder mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images" # class-read:x的子集,授予用户调用类读取方法的能力 # object_prefix 通过对象名称前缀。下例将访问限制为任何池中名称仅以 rbd_children 为开头的对象。
2.1.2 创建用户client.glance
ceph auth get-or-create client.glance mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=images"
2.1.3 创建用户client.cinder-backup
ceph auth get-or-create client.cinder-backup mon "profile rbd" osd "profile rbd pool=backups" # 使用 rbd profile 为新的 cinder-backup 用户帐户定义访问权限。然后,客户端应用使用这一帐户基于块来访问利用了 RADOS 块设备的 Ceph 存储。
2.2 创建存放目录
mkdir /etc/ceph/
mkdir /etc/ceph/
mkdir /etc/ceph/
2.3 导出密钥
ceph auth get client.glance -o ceph.client.glance.keyring
ceph auth get client.cinder -o ceph.client.cinder.keyring
ceph auth get client.cinder-backup -o ceph.client.cinder-backup.keyring
2.4 拷贝密钥
2.4.1 控制节点准备
scp ceph.client.glance.keyring root@www.controller.mxq001:/etc/ceph/
scp ceph.client.cinder.keyring root@www.controller.mxq001:/etc/ceph/
scp ceph.conf root@www.controller.mxq001:/etc/ceph/
2.4.2 计算节点准备
scp ceph.client.cinder.keyring root@www.compute01.mxq001:/etc/ceph/ scp ceph.client.cinder.keyring root@www.compute02.mxq001:/etc/ceph/
拷贝cinder-backup密钥(backup服务节点)
scp ceph.client.cinder-backup.keyring root@www.compute01.mxq001:/etc/ceph/ scp ceph.client.cinder-backup.keyring root@www.compute02.mxq001:/etc/ceph/
scp ceph.conf root@www.compute01.mxq001:/etc/ceph/ scp ceph.conf root@www.compute02.mxq001:/etc/ceph/
3. 计算节点添加libvirt密钥
3.1 compute01添加密钥
生成密钥(PS:注意,如果有多个计算节点,它们的UUID必须一致)
cd /etc/ceph/ UUID=$(uuidgen) cat >> secret.xml << EOF <secret ephemeral='no' private='no'> <uuid>$UUID</uuid> <usage type='ceph'> <name>client.cinder secret</name> </usage> </secret> EOF
[root@compute01 ~]# virsh secret-define --file secret.xml Secret bf168fa8-8d5b-4991-ba4c-12ae622a98b1 created
# 将key值复制出来 [root@compute01 ~]# cat ceph.client.cinder.keyring AQALyS1jHz4dDRAAEmt+c8JlXWyzxmCx5vobZg== [root@compute01 ~]# virsh secret-set-value --secret ${UUID} --base64 $(cat ceph.client.cinder.keyring | grep key | awk -F ' ' '{print $3}')
virsh secret-list
3.2 compute02添加密钥
生成密钥(PS:注意,如果有多个计算节点,它们的UUID必须一致)
cd /etc/ceph/ UUID=bf168fa8-8d5b-4991-ba4c-12ae622a98b1 cat >> secret.xml << EOF <secret ephemeral='no' private='no'> <uuid>$UUID</uuid> <usage type='ceph'> <name>client.cinder secret</name> </usage> </secret> EOF
[root@compute02 ~]# virsh secret-define --file secret.xml Secret bf168fa8-8d5b-4991-ba4c-12ae622a98b1 created
# 将key值复制出来 [root@compute02 ~]# cat ceph.client.cinder.keyring AQALyS1jHz4dDRAAEmt+c8JlXWyzxmCx5vobZg== [root@compute02 ~]# virsh secret-set-value --secret ${UUID} --base64 $(cat ceph.client.cinder.keyring | grep key | awk -F ' ' '{print $3}') # 忽略报错信息
virsh secret-list
4. 安装ceph客户端
主要作用是OpenStack可调用Ceph资源
controller节点
apt install -y ceph-common
apt install -y ceph-common
apt install -y ceph-common
5. 配置glance后端存储
chown glance.glance /etc/ceph/ceph.client.glance.keyring
vim /etc/glance/glance-api.conf [glance_store] #stores = file,http #default_store = file #filesystem_store_datadir = /var/lib/glance/images/ stores = rbd,file,http default_store = rbd rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8
apt install -y python3-boto3
service glance-api restart
openstack image create cirros04_v1 --disk-format qcow2 --file cirros-0.4.0-x86_64-disk.img
rbd ls images
6. 配置cinder后端存储
更改cinder密钥属性(controller、compute01、compute02节点)
chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
vim /etc/cinder/cinder.conf [DEFAULT] # 指定存储类型,否则在创建卷时,类型为 __DEFAULT__ default_volume_type = ceph # 重启服务生效配置 service cinder-scheduler restart
修改配置文件(compute01、compute02存储节点)
vim /etc/cinder/cinder.conf [DEFAULT] enabled_backends = ceph,lvm [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid = e2a779f1-d257-440d-8ff1-b8795dd40957 volume_backend_name = ceph # 重启服务生效配置 service cinder-volume restart
openstack volume type create ceph
cinder --os-username admin --os-tenant-name admin type-key ceph set volume_backend_name=ceph
openstack volume type list
openstack volume create ceph01 --type ceph --size 1
rbd ls volumes
7. 配置卷备份
compute01、compute02节点
安装服务
apt install cinder-backup -y
chown cinder.cinder /etc/ceph/ceph.client.cinder-backup.keyring
vim /etc/cinder/cinder.conf [DEFAULT] backup_driver = cinder.backup.drivers.ceph.CephBackupDriver backup_ceph_conf=/etc/ceph/ceph.conf backup_ceph_user = cinder-backup backup_ceph_chunk_size = 4194304 backup_ceph_pool = backups backup_ceph_stripe_unit = 0 backup_ceph_stripe_count = 0 restore_discard_excess_bytes = true
service cinder-backup restart
openstack volume backup create --name ceph_backup ceph01
rbd ls backups
8. 配置nova集成ceph
compute01、compute02节点
修改配置文件
vim /etc/nova/nova.conf [DEFAULT] live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE" [libvirt] images_type = rbd images_rbd_pool = vms images_rbd_ceph_conf = /etc/ceph/ceph.conf rbd_user = cinder rbd_secret_uuid = e2a779f1-d257-440d-8ff1-b8795dd40957
apt install -y qemu-block-extra
service nova-compute restart
openstack server create --flavor C1-512MB-1G --image cirros04_v1 --security-group default --nic net-id=$(vxlan网络id) --key-name mykey vm02 openstack server create --flavor C1-512MB-1G --image cirros04_v1 --security-group default --nic net-id=4a9567c0-f2bb-4b11-a9db-5a33eb7d3192 --key-name mykey vm02 # 安全组对应admin项目ID
rbd ls vms
8.1 热迁移配置
compute01、compute02节点
配置监听地址
vim /etc/libvirt/libvirtd.conf listen_tls = 0 listen_tcp = 1 tcp_port = "16509" listen_addr = "10.0.0.12" # 注意自己的主机地址 auth_tcp = "none"
vim /etc/default/libvirtd LIBVIRTD_ARGS="--listen"
systemctl mask libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tls.socket libvirtd-tcp.socket
service libvirtd restart
service nova-compute restart
测试是否能互相通信连接
compute01连接compute02
virsh -c qemu+tcp://www.compute02.mxq001/system
virsh -c qemu+tcp://www.compute01.mxq001/system
openstack server list
openstack server show 1f6dd9b8-7700-43a7-bd1f-0695e0de4a04
nova live-migration openstack server show 1f6dd9b8-7700-43a7-bd1f-0695e0de4a04 www.compute02.mxq001
发表回复