侧边栏壁纸
  • 累计撰写 30 篇文章
  • 累计创建 3 个标签
  • 累计收到 4 条评论
标签搜索

目 录CONTENT

文章目录

Openstack手工搭建T版

小裴
2023-04-04 / 0 评论 / 2 点赞 / 360 阅读 / 2,913 字

一、两台centos7.9虚拟,每台虚拟机两张网卡
第一张网卡用来连接外网
第二张网卡用来绑定neturon服务,
二、初始化

关闭selinux和防火墙

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config; systemctl stop firewalld; systemctl disable firewalld

配置时间服务器

yum -y install chrony
# controller
server ntp3.aliyun.com iburst    # 只留这一段
allow all
local stratum 10
# compute
vi /etc/chrony.conf 
server controller iburst

#配置yum源

rm -rf /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
# 安装openstackT版源
yum install centos-release-openstack-train -y   
# 需要依赖什么安装的源
yum install python-openstackclient openstack-selinux -y

安装数据库服务(controller)

yum install mariadb mariadb-server python2-PyMySQL
vi /etc/my.cnf.d/openstack.cnf 
[mysqld]
bind-address = controller节点IP

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

systemctl enable mariadb.service; systemctl start mariadb.service

初始化数据库

mysql_secure_installation
回车
y    # 设置密码123,确认123
y
n
y
y

消息队列

yum install rabbitmq-server -y
#启动自启
systemctl enable rabbitmq-server.service; systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack openstack123
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
# 查看需要启动的服务
rabbitmq-plugins list
# 开启图形化界面
rabbitmq-plugins enable rabbitmq_management rabbitmq_management_agent

缓存服务

yum install memcached python-memcached -y

cat /etc/sysconfig/memcached 
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="1024"
OPTIONS="-l 127.0.0.1,::1,controller"

systemctl enable memcached.service;  systemctl start memcached.service

keystone

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone123';

yum install openstack-keystone httpd mod_wsgi -y

vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:keystone123@controller/keystone

[token]
provider = fernet

# 同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone

# 创建令牌
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne

vim /etc/httpd/conf/httpd.conf
ServerName controller

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

systemctl enable httpd.service; systemctl start httpd.service

vi admin.sh 
#!/bin/bash
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3

source admin.sh 

openstack domain create --description "An Example Domain" example

openstack project create --domain default --description "Service Project" service

openstack project create --domain default --description "Demo Project" myproject

# 需要输入admin密码,密码为myuser
openstack user create --domain default --password-prompt myuser      # 需要设置密码,统一myuser

openstack role create myrole

openstack role add --project myproject --user myuser myrole

# 验证
unset OS_AUTH_URL OS_PASSWORD

# 需要输入admin密码,密码为admin
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue

# 需要输入myuser密码,密码为myuser
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue

vi myuser.sh 
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

# 最终验证
source admin.sh
openstack token issue

glance

# 创库授权
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance123';

# 需要设置密码,统一glance
openstack user create --domain default --password-prompt glance    # 需要设置密码,统一glance

openstack role add --project service --user glance admin

openstack service create --name glance --description "OpenStack Image" image

openstack endpoint create --region RegionOne image public http://controller:9292

openstack endpoint create --region RegionOne image internal http://controller:9292

openstack endpoint create --region RegionOne image admin http://controller:9292

# 安装服务
yum install openstack-glance -y

# 配置glance文件(openstack配置文件不能有中文,注释的也不行)
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:glance123@controller/glance

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance

[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

# 同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance

# 启动自启服务
systemctl enable openstack-glance-api.service; systemctl start openstack-glance-api.service

placement

# 创库授权
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement123';

# 创建账号、域、用户等配置
# 设置密码,统一placement
openstack user create --domain default --password-prompt placement  

openstack role add --project service --user placement admin

openstack service create --name placement --description "Placement API" placement

openstack endpoint create --region RegionOne placement public http://controller:8778

openstack endpoint create --region RegionOne placement internal http://controller:8778

openstack endpoint create --region RegionOne placement admin http://controller:8778

# 安装服务
yum install openstack-placement-api -y

# 配置placement文件
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:placement123@controller/placement

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement

# 同步数据库
su -s /bin/sh -c "placement-manage db sync" placement

*******
# 解决bug(最后加入)
cat /etc/httpd/conf.d/00-placement-api.conf 
'''
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>

# 重启http
systemctl restart httpd

# 验证
placement-status upgrade check

nova

# 创库授权
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123';

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';


# 创建计算服务凭证
openstack user create --domain default --password-prompt nova

openstack role add --project service --user nova admin

openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1


# 安装服务
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y

openstack-nova-conductor 负责数据库
openstack-nova-novncproxy  负责云主机连接
openstack-nova-scheduler  负责调度调度


vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[api_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api

[database]
connection = mysql+pymysql://nova:nova123@controller/nova

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller:5672/

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[DEFAULT]
my_ip = 192.168.4.213    **#controller节点IP

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement

# 同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

# 验证 nova cell0 和 cell1 是否正确注册
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

# 启动自启服务
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service; systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

nova(compute)

# 安装服务
yum install openstack-nova-compute -y

vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
my_ip = 192.168.3.97   ##compute节点IP

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver


[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://192.168.4.213:6080/vnc_auto.html  ###controller节点IP

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement


# 查看是否支持cpu虚拟化
egrep -c '(vmx|svm)' /proc/cpuinfo
# 如为零配置
vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu

# 启动自启服务
systemctl enable libvirtd.service openstack-nova-compute.service; systemctl start libvirtd.service openstack-nova-compute.service

nova(验证)

# 到控制节点验证
# 验证
openstack compute service list --service nova-compute
# 主机发现
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

neutron ***难点


# 创库授权
CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron123';

# 要创建服务凭证等操作,请完成以下步骤
# 密码统一neutron
openstack user create --domain default --password-prompt neutron   # 密码统一neutron

openstack role add --project service --user neutron admin

openstack service create --name neutron --description "OpenStack Networking" network

openstack endpoint create --region RegionOne network public http://controller:9696

openstack endpoint create --region RegionOne network internal http://controller:9696

openstack endpoint create --region RegionOne network admin http://controller:9696


# 安装服务
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y


# 配置neutron文件
# vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:neutron123@controller/neutron

[DEFAULT]
core_plugin = ml2
service_plugins =route

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

***
vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[ml2_type_vlan]
network_vlan_ranges = provider:1:1024

[ml2_type_vxlan]
vni_ranges = 1:1024

[securitygroup]
enable_ipset = true

*****
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:eth1  ***第二张网卡名


[vxlan]
enable_vxlan = true
local_ip = 192.168.4.213  ****controller节点IP
l2_population = true


[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

# 配置内核
cat /etc/sysctl.conf 
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
 modprobe br_netfilter
 sysctl -p
 
 ****
vi  /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

vi /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = xier123

***
vi /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = xier123
 
 # 配置软链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

# 同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

systemctl restart openstack-nova-api.service

systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service; systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

neutron (compute) ***难点

yum install openstack-neutron-linuxbridge ebtables ipset -y

# 配置文件
]# vim /etc/neutron/neutron.conf 
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

***
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:eth1  ## 第二张网卡


[vxlan]
enable_vxlan = true
local_ip = 192.168.3.97  ## compute节点IP
l2_population = true


[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

***
vi /etc/nova/nova.conf 
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

systemctl restart openstack-nova-compute.service

systemctl enable neutron-linuxbridge-agent.service; systemctl start neutron-linuxbridge-agent.service

# 到controller验证
openstack network agent list

dashboard

# 安装服务
yum install openstack-dashboard -y
###有的话取消注释,没有的添加
vim /etc/openstack-dashboard/local_settings

WEBROOT = '/dashboard/'
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}


OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

TIME_ZONE = "Asia/Shanghai"

***
vi   /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}

systemctl restart httpd.service memcached.service

创建实例如果报错看这里

# 查看服务器支持的虚拟化类型
virsh capabilities

vim /etc/nova/nova.conf
[libvirt]
'''''
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0	# 更改虚拟化类型
cpu_mode = host-passthrough		# 直接使用宿主机的cpu

2

评论区