OpenStack集群部署—基础服务

2020-06-12 22:19:33 浏览数 (1)

mariadb集群 部署mariadb服务

代码语言:javascript复制
注意: 以下步骤在二台服务器都要进行
[root@controller01 ~]# yum install libaio galera rsync lsof -y
[root@controller01 ~]# useradd  mysql -s /sbin/nologin -M
[root@controller01 ~]# cd /usr/local/src/
[root@controller01 ~]# tar  xf mariadb-10.2.29-linux-x86_64.tar.gz  -C  /usr/local/
[root@controller01 ~]# ln -s /usr/local/mariadb-10.2.29-linux-x86_64/ /usr/local/mysql
[root@controller01 ~]# cd /usr/local/mysql/
[root@controller01 mysql]# cp support-files/my-medium.cnf my.cnf
[root@controller01 mysql]# chown -R mysql.mysql /usr/local/mariadb-10.2.29-linux-x86_64
[root@controller01 mysql]# mkdir -pv  /DB/data/mysql
[root@controller01 mysql]# chown -R mysql.mysql /DB/data/mysql

编辑系统文件

代码语言:javascript复制
[root@controller01 mysql]# cp /usr/local/mysql/support-files/mysql.server  /etc/init.d/mysqld
[root@controller01 ~]# vim /etc/init.d/mysqld
basedir=/usr/local/mysql
datadir=/DB/data/mysql

[root@controller01 ~]# vim /etc/profile
export MYSQL_HOME=/usr/local/mysql
PATH=":$MYSQL_HOME/bin:$PATH"
[root@controller01 ~]# source /etc/profile

编辑controller01配置文件

代码语言:javascript复制
[root@controller01 mysql]# cat /usr/local/mysql/my.cnf| grep -v "#" | grep -v "^$"
[client]
port            = 3306
socket          = /tmp/mysql.sock
[mysqld]
port            = 3306
socket          = /tmp/mysql.sock
basedir = /usr/local/mysql
datadir = /DB/data/mysql
skip-external-locking
key_buffer_size = 16M
max_allowed_packet = 1M
table_open_cache = 64
sort_buffer_size = 512K
net_buffer_length = 8K
read_buffer_size = 256K
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 8M
[galera]
wsrep_on=ON
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_address="gcomm://192.168.182.131,192.168.182.132"
wsrep_node_name= controller01
wsrep_node_address=192.168.182.131
binlog_format=row
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
wsrep_slave_threads=1
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=120M
wsrep_sst_method=rsync
wsrep_causal_reads=ON
log-bin=mysql-bin
binlog_format=row
server-id       = 1
[mysqldump]
quick
max_allowed_packet = 16M
[mysql]
no-auto-rehash
[myisamchk]
key_buffer_size = 20M
sort_buffer_size = 20M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout

编辑controller02配置文件

代码语言:javascript复制
[root@controller02 ~]# cat /usr/local/mysql/my.cnf| grep -v "#" | grep -v "^$"
[client]
port            = 3306
socket          = /tmp/mysql.sock
[mysqld]
port            = 3306
socket          = /tmp/mysql.sock
basedir = /usr/local/mysql
datadir = /DB/data/mysql
skip-external-locking
key_buffer_size = 16M
max_allowed_packet = 1M
table_open_cache = 64
sort_buffer_size = 512K
net_buffer_length = 8K
read_buffer_size = 256K
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 8M
[galera]
wsrep_on=ON
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_address="gcomm://192.168.182.131,192.168.182.132"
wsrep_node_name= controller02
wsrep_node_address=192.168.182.132
binlog_format=row
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
wsrep_slave_threads=1
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=120M
wsrep_sst_method=rsync
wsrep_causal_reads=ON
log-bin=mysql-bin
binlog_format=row
server-id       = 1
[mysqldump]
quick
max_allowed_packet = 16M
[mysql]
no-auto-rehash
[myisamchk]
key_buffer_size = 20M
sort_buffer_size = 20M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout

配置MariaDB Galera Cluster 1.mariadb第一个节点进行初始化(192.168.182.131)

代码语言:javascript复制
[root@controller01 ~]# /usr/local/mysql/scripts/mysql_install_db --user=mysql --basedir=/usr/local/mysql --datadir=/DB/data/mysql --defaults-file=/usr/local/mysql/my.cnf

2.在192.168.182.131节点上通过bootstrap启动(第一次启动一定要使用--wsrep-new-cluster,再次启动就不需要)

代码语言:javascript复制
[root@controller01 ~]# mysqld_safe --defaults-file=/usr/local/mysql/my.cnf  --user=mysql --wsrep-new-cluster &

3.在192.168.182.131节点上设置root密码以及安全设置

代码语言:javascript复制
[root@controller01 ~]# mysql_secure_installation
Enter current password for root (enter for none): 回车
Set root password? [Y/n] y
Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] n
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y

4.在其他节点上(192.168.182.132)上启动mariadb

代码语言:javascript复制
[root@controller02 ~]# mysqld_safe --defaults-file=/usr/local/mysql/my.cnf --user=mysql  &

5.检查服务

代码语言:javascript复制
[root@controller01 ~]# netstat  -lntup|grep mysqld
tcp        0      0 0.0.0.0:4567            0.0.0.0:*               LISTEN      28589/mysqld
tcp6       0      0 :::3306                 :::*                    LISTEN      28589/mysqld
[root@controller02 ~]# netstat  -lntup|grep mysqld
tcp        0      0 0.0.0.0:4567            0.0.0.0:*               LISTEN      15798/mysqld
tcp6       0      0 :::3306                 :::*                    LISTEN      15798/mysqld

校验MariaDB Galera Cluster 登录数据库进行查看:

代码语言:javascript复制
[root@controller01 ~]# mysql -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or g.
Your MariaDB connection id is 23
Server version: 10.2.24-MariaDB-log MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or 'h' for help. Type 'c' to clear the current input statement.

MariaDB [(none)]> SHOW STATUS LIKE 'wsrep_cluster_size';
 -------------------- ------- 
| Variable_name      | Value |
 -------------------- ------- 
| wsrep_cluster_size | 2     |
 -------------------- ------- 
1 row in set (0.00 sec)

MariaDB [(none)]> show global status like 'ws%';
 ------------------------------- ------------------------------------------- 
| Variable_name                 | Value                                     |
 ------------------------------- ------------------------------------------- 
| wsrep_applier_thread_count    | 1                                         |
| wsrep_apply_oooe              | 0.000000                                  |
| wsrep_apply_oool              | 0.000000                                  |
| wsrep_apply_window            | 1.000000                                  |
| wsrep_causal_reads            | 1                                         |
| wsrep_cert_deps_distance      | 1.000000                                  |
| wsrep_cert_index_size         | 2                                         |
| wsrep_cert_interval           | 0.000000                                  |
| wsrep_cluster_conf_id         | 2                                         |
| wsrep_cluster_size            | 2                                         |
| wsrep_cluster_state_uuid      | 34db925e-0dd1-11ea-899d-de432d301557      |
| wsrep_cluster_status          | Primary                                   |
| wsrep_commit_oooe             | 0.000000                                  |
| wsrep_commit_oool             | 0.000000                                  |
| wsrep_commit_window           | 1.000000                                  |
| wsrep_connected               | ON                                        |
| wsrep_desync_count            | 0                                         |
| wsrep_evs_delayed             |                                           |
| wsrep_evs_evict_list          |                                           |
| wsrep_evs_repl_latency        | 0/0/0/0/0                                 |
| wsrep_evs_state               | OPERATIONAL                               |
| wsrep_flow_control_paused     | 0.000000                                  |
| wsrep_flow_control_paused_ns  | 0                                         |
| wsrep_flow_control_recv       | 0                                         |
| wsrep_flow_control_sent       | 0                                         |
| wsrep_gcomm_uuid              | 34da0224-0dd1-11ea-abba-97637e178a34      |
| wsrep_incoming_addresses      | 192.168.182.131:3306,192.168.182.132:3306 |
| wsrep_last_committed          | 3                                         |
| wsrep_local_bf_aborts         | 0                                         |
| wsrep_local_cached_downto     | 1                                         |
| wsrep_local_cert_failures     | 0                                         |
| wsrep_local_commits           | 0                                         |
| wsrep_local_index             | 0                                         |
| wsrep_local_recv_queue        | 0                                         |
| wsrep_local_recv_queue_avg    | 0.166667                                  |
| wsrep_local_recv_queue_max    | 2                                         |
| wsrep_local_recv_queue_min    | 0                                         |
| wsrep_local_replays           | 0                                         |
| wsrep_local_send_queue        | 0                                         |
| wsrep_local_send_queue_avg    | 0.000000                                  |
| wsrep_local_send_queue_max    | 1                                         |
| wsrep_local_send_queue_min    | 0                                         |
| wsrep_local_state             | 4                                         |
| wsrep_local_state_comment     | Synced                                    |
| wsrep_local_state_uuid        | 34db925e-0dd1-11ea-899d-de432d301557      |
| wsrep_protocol_version        | 7                                         |
| wsrep_provider_name           | Galera                                    |
| wsrep_provider_vendor         | Codership Oy <info@codership.com>         |
| wsrep_provider_version        | 3.16(r5c765eb)                            |
| wsrep_ready                   | ON                                        |
| wsrep_received                | 6                                         |
| wsrep_received_bytes          | 440                                       |
| wsrep_repl_data_bytes         | 1077                                      |
| wsrep_repl_keys               | 3                                         |
| wsrep_repl_keys_bytes         | 93                                        |
| wsrep_repl_other_bytes        | 0                                         |
| wsrep_replicated              | 3                                         |
| wsrep_replicated_bytes        | 1362                                      |
| wsrep_rollbacker_thread_count | 1                                         |
| wsrep_thread_count            | 2                                         |
 ------------------------------- ------------------------------------------- 
60 rows in set (0.00 sec)

MariaDB [(none)]>

RabbitMQ集群 安装rabbitmq

代码语言:javascript复制
# 在全部控制节点,使用aliyun的epel镜像,以controller01节点为例
[root@controller01 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@controller01 ~]# yum install erlang rabbitmq-server -y

# 设置开机启动
[root@controller01 ~]# systemctl enable rabbitmq-server.service

构建rabbitmq集群

代码语言:javascript复制
# 任选1个控制节点首先启动rabbitmq服务,这里选择controller01节点
[root@controller01 ~]# systemctl start rabbitmq-server.service
[root@controller01 ~]# rabbitmqctl cluster_status

# 分发.erlang.cookie
[root@controller01 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@192.168.182.132:/var/lib/rabbitmq/

# 修改controller02节点.erlang.cookie文件的用户/组,以controller02节点为例
[root@controller02 ~]# chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie

# 注意修改全部控制节点.erlang.cookie文件的权限,默认即400权限,可不修改
[root@controller02 ~]# ll /var/lib/rabbitmq/.erlang.cookie

# 启动controller02节点的rabbitmq服务 
[root@controller02 ~]# systemctl start rabbitmq-server

# 构建集群,controller02节点以ram节点的形式加入集群
[root@controller02 mysql]# rabbitmqctl stop_app
Stopping rabbit application on node rabbit@controller02
[root@controller02 mysql]# rabbitmqctl join_cluster --ram rabbit@controller01
Clustering node rabbit@controller02 with rabbit@controller01
[root@controller02 mysql]# rabbitmqctl start_app
Starting node rabbit@controller02

rabbitmq账号

代码语言:javascript复制
# 在任意节点新建账号并设置密码,以controller01节点为例
[root@controller01 ~]# rabbitmqctl add_user openstack rabbitmq

# 设置新建账号的状态
[root@controller01 ~]# rabbitmqctl set_user_tags openstack administrator

# 设置新建账号的权限
[root@controller01 ~]# rabbitmqctl set_permissions -p "/" openstack ".*" ".*" ".*"

# 查看账号
[root@controller01 ~]# rabbitmqctl list_users

镜像队列ha

代码语言:javascript复制
# 设置镜像队列高可用
[root@controller01 ~]# rabbitmqctl set_policy ha-all "^" '{"ha-mode":"all"}'

# 查看镜像队列策略
[root@controller01 ~]# rabbitmqctl list_policies 

# 在全部控制节点安装web管理插件,以controller01节点为例
[root@controller01 ~]# rabbitmq-plugins enable rabbitmq_management

#访问:
http://192.168.182.131:15672

Memcached集群

代码语言:javascript复制
Memcached是无状态的,各控制节点独立部署,openstack各服务模块统一调用多个控制节点的memcached服务即可。

#所有控制节点都安装
[root@controller01 ~]#  yum install memcached python-memcached -y
[root@controller01 ~]# sed -i 's|127.0.0.1,::1|0.0.0.0|g' /etc/sysconfig/memcached 
[root@controller01 ~]# systemctl enable memcached.service
Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.
[root@controller01 ~]# systemctl start memcached.service
[root@controller01 ~]# systemctl status memcached.service
● memcached.service - memcached daemon
   Loaded: loaded (/usr/lib/systemd/system/memcached.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2019-11-25 10:09:55 CST; 12s ago
 Main PID: 3653 (memcached)
   CGroup: /system.slice/memcached.service
           └─3653 /usr/bin/memcached -p 11211 -u memcached -m 64 -c 1024 -l 0.0.0.0

Nov 25 10:09:55 controller01 systemd[1]: Started memcached daemon.

keepalived和haproxy集群 yum安装就可以 配置如下:

代码语言:javascript复制
global_defs {
   router_id LVS_openstack
}


vrrp_instance VI_1 {
    state MASTER
    interface em3 # 本地网卡名称
    virtual_router_id 61
    priority 120 # 权重,要唯一
    advert_int 1
    mcast_src_ip 192.168.182.131 # 本地IP
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.182.132
    }
    virtual_ipaddress {
        192.168.182.130/24 # VIP
    }
}
global
  chroot  /var/lib/haproxy
  daemon
  group  haproxy
  user  haproxy
  maxconn  4000
  pidfile  /var/run/haproxy.pid

defaults
  log  global
  maxconn  4000
  option  redispatch
  retries  3
  timeout  http-request 10s
  timeout  queue 1m
  timeout  connect 10s
  timeout  client 1m
  timeout  server 1m
  timeout  check 10s
# mysql服务;
#listen mysql_cluster
#  bind 192.168.182.130:3306
#  balance  source
#  mode    tcp
#  server controller01 192.168.182.131:3306 check inter 2000 rise 2 fall 5
#  server controller02 192.168.182.132:3306 backup check inter 2000 rise 2 fall 5
# haproxy监控页
listen stats
  bind 0.0.0.0:1080
  mode http
  stats enable
  stats uri /
  stats realm OpenStack Haproxy
  stats auth admin:admin
  stats  refresh 30s
  stats  show-node
  stats  show-legends
  stats  hide-version

# horizon服务
 listen dashboard_cluster
  bind 192.168.182.130:80
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:80 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:80 check inter 2000 rise 2 fall 5


# 为rabbirmq提供ha集群访问端口,供openstack各服务访问;
# 如果openstack各服务直接连接rabbitmq集群,这里可不设置rabbitmq的负载均衡
 listen rabbitmq_cluster
   bind 192.168.182.130:5673
   mode tcp
   option tcpka
   balance roundrobin
   timeout client  3h
   timeout server  3h
   option  clitcpka
   server controller01 192.168.182.131:5672 check inter 10s rise 2 fall 5
   server controller02 192.168.182.132:5672 check inter 10s rise 2 fall 5

# glance_api服务
 listen glance_api_cluster
  bind 192.168.182.130:9292
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:9292 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:9292 check inter 2000 rise 2 fall 5


# keystone_public _api服务
  listen keystone_public_cluster
  bind 192.168.182.130:5000
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:5000 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:5000 check inter 2000 rise 2 fall 5

# 兼容aws ec2-api
# listen nova_ec2_api_cluster
#  bind 192.168.182.130:8773
#  balance  source
#  option  tcpka
#  option  tcplog
#  server controller01 192.168.182.131:8773 check inter 2000 rise 2 fall 5
#  server controller02 192.168.182.132:8773 check inter 2000 rise 2 fall 5

 listen nova_compute_api_cluster
  bind 192.168.182.130:8774
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:8774 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:8774 check inter 2000 rise 2 fall 5

 listen nova_placement_cluster
  bind 192.168.182.130:8778
  balance  source
  option  tcpka
  option  tcplog
  server controller01 192.168.182.131:8778 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:8778 check inter 2000 rise 2 fall 5

 listen nova_metadata_api_cluster
  bind 192.168.182.130:8775
  balance  source
  option  tcpka
  option  tcplog
  server controller01 192.168.182.131:8775 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:8775 check inter 2000 rise 2 fall 5

 listen nova_vncproxy_cluster
  bind 192.168.182.130:6080
  balance  source
  option  tcpka
  option  tcplog
  server controller01 192.168.182.131:6080 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:6080 check inter 2000 rise 2 fall 5

 listen neutron_api_cluster
  bind 192.168.182.130:9696
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:9696 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:9696 check inter 2000 rise 2 fall 5

 listen cinder_api_cluster
  bind 192.168.182.130:8776
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller01 192.168.182.131:8776 check inter 2000 rise 2 fall 5
  server controller02 192.168.182.132:8776 check inter 2000 rise 2 fall 5

后面实验环境没拿vip,自己做时候,手动替换下就可以,就能做集群了

0 人点赞