GBase GCDW
安装配置
文章

物理机版安装、使用、维护步骤

GBASE涂俊兵
发表于2024-07-23 11:39:02435次浏览0个评论

集群搭建版本:GC-NoLicense-9.8.0.6.17_patch.1
集群搭建字符集:utf8mb4

##集群基本信息
##集群管理入口
GBASEDATA01    192.168.195.101
##GC IP列表
GBASEDATA01    192.168.195.101
GBASEDATA02    192.168.195.103
GBASEDATA03    192.168.195.105
#FDB IP列表
GBASEDATA01    192.168.195.101
GBASEDATA02    192.168.195.103
GBASEDATA03    192.168.195.105
#Minio IP列表
GBASEDATA01    192.168.195.101

软件包上传:
1)上传expect RPM包至/opt/gbase_workspace/tools目录下
2)上传autossh_par.sh互信脚本至/opt/gbase_workspace/scripts/autossh目录下
3)上传GC相应版本安装包至/opt/gbase_workspace/setup目录下
4)上传foundationdb-clients foundationdb-server RPM包至/opt/gbase_workspace/setup/soft目录下
5)上传mcli minio RPM包至/opt/gbase_workspace/setup/soft目录下

安装步骤:

一、新集群安装准备工作
1.1 解压GC软件包,并确认其版本

[root@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/; ll
[root@GBASEDATA01 setup]$ tar -xvf GC-NoLicense-9.8.0.6.17_patch.1-redhat7-x86_64.tar.bz2
[root@GBASEDATA01 setup]$ cat /opt/gbase_workspace/setup/gcinstall/BUILDINFO

1.2 root用户互信配置

[root@GBASEDATA01 ~]# rpm -ivh --force /opt/gbase_workspace/tools/rpm/expect*.rpm
[root@GBASEDATA01 ~]# cd /opt/gbase_workspace/scripts/autossh
[root@GBASEDATA01 autossh]# echo -e "192.168.195.101\n192.168.195.102\n192.168.195.103\n192.168.195.104" > ip.list
[root@GBASEDATA01 autossh]# cat ip.list
[root@GBASEDATA01 autossh]# ssh-keygen
[root@GBASEDATA01 autossh]# expect autossh_par.sh ip.list root gbase

1.3 /etc/hosts配置

[root@GBASEDATA01 autossh]# hostname
[root@GBASEDATA01 autossh]# cat /etc/hosts
[root@GBASEDATA01 autossh]# sed -i '/^127/s/$/ '`hostname`'/' /etc/hosts
[root@GBASEDATA01 autossh]# cat /etc/hosts

1.4 C3工具配置 (root用户执行)

[root@GBASEDATA01 autossh]# rpm -ivh --force /opt/gbase_workspace/tools/c3-5.1.2-1.noarch.rpm
[root@GBASEDATA01 autossh]# vim /etc/c3.conf
cluster coor {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
}
cluster wh1 {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
}
cluster all {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
}
cluster fdb {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
}
[root@GBASEDATA01 autossh]# chmod 666 /etc/c3.conf
[root@GBASEDATA01 autossh]# cexec -p all: 'hostname'

1.5 创建gbase用户

[root@GBASEDATA01 autossh]# cexec all: 'useradd gbase'
[root@GBASEDATA01 autossh]# cexec all: 'echo gbase:gbase|chpasswd'
[root@GBASEDATA01 autossh]# cexec all: 'mkdir -p /opt'
[root@GBASEDATA01 autossh]# cexec all: 'chown -R gbase:gbase /opt'
[root@GBASEDATA01 autossh]# cexec all: 'chage -M 99999 gbase'
[root@GBASEDATA01 autossh]# chown -R gbase:gbase /opt/gbase_workspace

1.6 环境检查 (root用户执行)

[root@GBASEDATA01 autossh]# cexec all: 'cat /etc/redhat-release'
[root@GBASEDATA01 autossh]# cexec all: 'nkvers'
[root@GBASEDATA01 autossh]# cexec all: 'cat /proc/version'
[root@GBASEDATA01 autossh]# cexec all: 'cat /etc/system-release'
[root@GBASEDATA01 autossh]# cexec all: 'df -h'
[root@GBASEDATA01 autossh]# cexec all: 'ip a|grep inet'
[root@GBASEDATA01 autossh]# cexec -p all: 'cat /proc/cpuinfo |grep "model name" | head -n1'
[root@GBASEDATA01 autossh]# cexec -p all: 'grep "model name" /proc/cpuinfo|wc -l'
[root@GBASEDATA01 autossh]# cexec -p all: 'grep "physical id" /proc/cpuinfo|sort|uniq|wc -l'
[root@GBASEDATA01 autossh]# cexec -p all: 'grep "cpu cores" /proc/cpuinfo|sort|uniq'
[root@GBASEDATA01 autossh]# cexec -p all: 'free -g'
[root@GBASEDATA01 autossh]# cexec -p all: 'hostname'

1.7 gbase用户互信配置 (gbase用户执行)

[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/scripts/autossh
[gbase@GBASEDATA01 autossh]$ cat ip.list
[gbase@GBASEDATA01 autossh]$ expect autossh_par.sh ip.list gbase gbase

1.8 环境初始化 (root用户执行)

[root@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[root@GBASEDATA01 gcinstall]# cpush all: SetSysEnv.py /tmp
[root@GBASEDATA01 gcinstall]# cexec all: 'python /tmp/SetSysEnv.py --installPrefix=/opt --dbaUser=gbase'


二、FoundationDB 软件安装和配置
2.1 FDB RMP软件上传

[root@GBASEDATA01 ~]# cd /opt/gbase_workspace/setup/soft; ls | grep foundationdb
foundationdb-clients-6.3.13-1.el7.x86_64.rpm
foundationdb-server-6.3.13-1.el7.x86_64.rpm

2.2 FDB RMP软件分发

[root@GBASEDATA01 soft]# scp -r * root@192.168.195.103:/opt/
foundationdb-server-6.3.13-1.el7.x86_64.rpm 100%   17MB   8.4MB/s   00:02
foundationdb-clients-6.3.13-1.el7.x86_64.rpm 100%   19MB   6.2MB/s   00:03
[root@GBASEDATA01 soft]# scp -r soft root@192.168.195.105:/opt/
minio-20240704142545.0.0-1.x86_64.rpm 100%   37MB   7.3MB/s   00:05
mcli-20240703201725.0.0-1.x86_64.rpm 100%   10MB   9.8MB/s   00:01

2.3 FDB RMP软件安装

[root@GBASEDATA01 soft]# cexec all: 'rpm -ivh /opt/foundationdb*.rpm'
************************* all *************************
--------- 192.168.195.101---------
Preparing...                          ########################################
Updating / installing...
foundationdb-clients-6.3.13-1         ########################################
foundationdb-server-6.3.13-1          ########################################
--------- 192.168.195.103---------
Preparing...                          ########################################
Updating / installing...
foundationdb-clients-6.3.13-1         ########################################
foundationdb-server-6.3.13-1          ########################################
--------- 192.168.195.105---------
Preparing...                          ########################################
Updating / installing...
foundationdb-clients-6.3.13-1         ########################################
foundationdb-server-6.3.13-1          ########################################

2.4 FDB进程确认

[root@GBASEDATA01 soft]# cexec all: 'ps -ef|grep foundation | grep -v grep'
************************* all *************************
--------- 192.168.195.101---------
root      41856      1  0 00:10 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  41858  41856  7 00:10 ?        00:00:15 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /var/lib/foundationdb/data/4500 --listen_address public --logdir /var/log/foundationdb --public_address auto:4500
root      41895  41856  0 00:13 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
--------- 192.168.195.103---------
root      13211      1  0 00:10 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  13213  13211  7 00:10 ?        00:00:15 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /var/lib/foundationdb/data/4500 --listen_address public --logdir /var/log/foundationdb --public_address auto:4500
root      13297  13211  0 00:13 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
--------- 192.168.195.105---------
root      14005      1  0 00:10 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  14007  14005  5 00:10 ?        00:00:13 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /var/lib/foundationdb/data/4500 --listen_address public --logdir /var/log/foundationdb --public_address auto:4500
root      14094  14005  0 00:13 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize

2.5 FDB 统一入口配置

[root@GBASEDATA01 data]# cd /usr/lib/foundationdb/
[root@GBASEDATA01 foundationdb]# ll
total 16580
-rwxr-xr-x. 1 root root 15962352 Jun  3  2021 fdbbackup
-rwxr-xr-x. 1 root root  1002136 Jun  3  2021 fdbmonitor
-rw-r--r--. 1 root root     4117 Jul 11 00:26 make_public.py
[root@GBASEDATA01 foundationdb]# cat /etc/foundationdb/fdb.cluster
42QHG5zi:I5shyNar@127.0.0.1:4500
[root@GBASEDATA01 foundationdb]# python /usr/lib/foundationdb/make_public.py -a 192.168.195.101
Redirecting to /bin/systemctl restart  foundationdb.service
/etc/foundationdb/fdb.cluster is now using address 192.168.195.101
[root@GBASEDATA01 foundationdb]# cat /etc/foundationdb/fdb.cluster
42QHG5zi:I5shyNar@192.168.195.101:4500
#停止集群服务
[root@GBASEDATA01 foundationdb]# cexec all: 'systemctl stop foundationdb.service'

2.6 FDB 集群目录创建

[root@GBASEDATA01 opt]# cexec all: 'mkdir -p /opt/foundationdb/data'
[root@GBASEDATA01 opt]# cexec all: 'mkdir -p /opt/foundationdb/log'
[root@GBASEDATA01 opt]# cexec all: 'chown -R foundationdb:foundationdb /etc/foundationdb'
[root@GBASEDATA01 opt]# cexec all: 'chown -R foundationdb:foundationdb /opt/foundationdb'
[root@GBASEDATA01 systemd]# cd /etc/foundationdb; ll
total 8
-rw-rw-r--. 1 foundationdb foundationdb   39 Jul 11 00:31 fdb.cluster
-rw-r--r--. 1 foundationdb foundationdb 1222 Apr 21  2021 foundationdb.conf

2.7 FDB 配置datadir调整

[root@GBASEDATA01 foundationdb]# cat foundationdb.conf | grep datadir
datadir = /var/lib/foundationdb/data/$ID
[root@GBASEDATA01 foundationdb]# sed -i 's#datadir = /var/lib/foundationdb/data#datadir = /opt/foundationdb/data#g' foundationdb.conf; cat foundationdb.conf | grep datadir
datadir = /opt/foundationdb/data/$ID

2.8 FDB 配置logdir调整

[root@GBASEDATA01 foundationdb]# cat foundationdb.conf | grep logdir
logdir = /var/log/foundationdb
logdir = /var/log/foundationdb
[root@GBASEDATA01 foundationdb]# sed -i 's#logdir = /var/log/foundationdb#logdir = /opt/foundationdb/log#g' foundationdb.conf; cat foundationdb.conf | grep logdir
logdir = /opt/foundationdb/log
logdir = /opt/foundationdb/log

2.9 FDB 注释backup

[root@GBASEDATA01 foundationdb]# cat foundationdb.conf | grep backup
[backup_agent]
command = /usr/lib/foundationdb/backup_agent/backup_agent
[backup_agent.1]
[root@GBASEDATA01 foundationdb]# sed -i '/backup/ s/^\(.*\)$/#\1/g' foundationdb.conf; cat foundationdb.conf | grep backup
#[backup_agent]
#command = /usr/lib/foundationdb/backup_agent/backup_agent
#[backup_agent.1]

2.10 推送其他节点

[root@GBASEDATA01 foundationdb]# cpush all: /etc/foundationdb/* /etc/foundationdb/
[root@GBASEDATA01 foundationdb]# cexec all: 'cat /etc/foundationdb/fdb.cluster'
************************* all *************************
--------- 192.168.195.101---------
42QHG5zi:I5shyNar@192.168.195.101:4500
--------- 192.168.195.103---------
42QHG5zi:I5shyNar@192.168.195.101:4500
--------- 192.168.195.105---------
42QHG5zi:I5shyNar@192.168.195.101:4500
[root@GBASEDATA01 foundationdb]# cexec all: 'cat /etc/foundationdb/foundationdb.conf | grep dir'
************************* all *************************
--------- 192.168.195.101---------
datadir = /opt/foundationdb/data/$ID
logdir = /opt/foundationdb/log
logdir = /opt/foundationdb/log
--------- 192.168.195.103---------
datadir = /opt/foundationdb/data/$ID
logdir = /opt/foundationdb/log
logdir = /opt/foundationdb/log
--------- 192.168.195.105---------
datadir = /opt/foundationdb/data/$ID
logdir = /opt/foundationdb/log
logdir = /opt/foundationdb/log

2.11 配置FDB守护进程

[root@GBASEDATA01 foundationdb]# cat /usr/lib/systemd/system/foundationdb.service | grep -C1 KillMode
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
[root@GBASEDATA01 foundationdb]#
[root@GBASEDATA01 foundationdb]# sed -i '/KillMode/a\Restart=on-failure' /usr/lib/systemd/system/foundationdb.service
[root@GBASEDATA01 foundationdb]#
[root@GBASEDATA01 foundationdb]# cat /usr/lib/systemd/system/foundationdb.service | grep -C1 KillMode
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
Restart=on-failure
[root@GBASEDATA01 foundationdb]# cpush all: /usr/lib/systemd/system/foundationdb.service /usr/lib/systemd/system/
[root@GBASEDATA01 foundationdb]# cexec all: 'cat /usr/lib/systemd/system/foundationdb.service | grep -C1 KillMode'
************************* all *************************
--------- 192.168.195.101---------
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
Restart=on-failure
--------- 192.168.195.103---------
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
Restart=on-failure
--------- 192.168.195.105---------
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
Restart=on-failure

2.12 启动FDB服务

[root@GBASEDATA01 foundationdb]# cexec all: 'systemctl enable foundationdb.service'
[root@GBASEDATA01 foundationdb]# cexec all: 'systemctl start foundationdb.service'
************************* all *************************
--------- 192.168.195.101---------
Warning: foundationdb.service changed on disk. Run 'systemctl daemon-reload' to reload units.
--------- 192.168.195.103---------
Warning: foundationdb.service changed on disk. Run 'systemctl daemon-reload' to reload units.
--------- 192.168.195.105---------
Warning: foundationdb.service changed on disk. Run 'systemctl daemon-reload' to reload units.
[root@GBASEDATA01 foundationdb]# cexec all: 'ps -ef|grep foundation | grep -v grep'
************************* all *************************
--------- 192.168.195.101---------
root      42947      1  0 01:01 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  42948  42947  1 01:01 ?        00:00:00 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /opt/foundationdb/data/4500 --listen_address public --logdir /opt/foundationdb/log --public_address auto:4500
--------- 192.168.195.103---------
root      14942      1  0 01:01 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  14943  14942  1 01:01 ?        00:00:00 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /opt/foundationdb/data/4500 --listen_address public --logdir /opt/foundationdb/log --public_address auto:4500
--------- 192.168.195.105---------
root      16236      1  0 01:01 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  16237  16236  1 01:01 ?        00:00:00 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /opt/foundationdb/data/4500 --listen_address public --logdir /opt/foundationdb/log --public_address auto:4500

2.13 FDB 配置

[root@GBASEDATA01 foundationdb]# fdbcli
Using cluster file `fdb.cluster'.
The database is unavailable; type `status' for more information.
Welcome to the fdbcli. For help, type `help'.
# 查看集群状态
fdb> status details
Using cluster file `fdb.cluster'.
The coordinator(s) have no record of this database. Either the coordinator
addresses are incorrect, the coordination state on those machines is missing, or
no database has been created.
 192.168.195.101:4500  (reachable)
Unable to locate the data distributor worker.
Unable to locate the ratekeeper worker.
# 配置SSD盘 or memory
fdb> configure new single ssd
Database created
# 配置集群高可用
fdb> coordinators 192.168.195.101:4500 192.168.195.103:4500 192.168.195.105:4500
Coordination state changed
# 配置冗余模式
fdb> configure double
Configuration changed
# 配置集群进程数
fdb> configure proxies=8
Configuration changed
fdb> configure resolvers=1
Configuration changed
fdb> configure logs=30
Configuration changed
fdb> exit

2.14 FDB集群信息查看

[root@GBASEDATA01 foundationdb]# fdbcli
Using cluster file `fdb.cluster'.
The database is available.
Welcome to the fdbcli. For help, type `help'.
fdb> status details
Using cluster file `fdb.cluster'.
Configuration:
 Redundancy mode        - double
 Storage engine         - ssd-2
 Coordinators           - 3
 Desired Proxies        - 8
 Desired Resolvers      - 1
 Desired Logs           - 30
 Usable Regions         - 1
Cluster:
 FoundationDB processes - 3
 Zones                  - 3
 Machines               - 3
 Memory availability    - 2.3 GB per process on machine with least available
                          >>>>> (WARNING: 4.0 GB recommended) <<<<<
 Fault Tolerance        - 0 machines (1 without data loss)
 Server time            - 07/11/24 01:09:01
Data:
 Replication health     - Healthy
 Moving data            - 0.000 GB
 Sum of key-value sizes - 0 MB
 Disk space used        - 629 MB
Operating space:
 Storage server         - 13.4 GB free on most full server
 Log server             - 13.4 GB free on most full server
Workload:
 Read rate              - 7 Hz
 Write rate             - 0 Hz
 Transactions started   - 2 Hz
 Transactions committed - 0 Hz
 Conflict rate          - 0 Hz
Backup and DR:
 Running backups        - 0
 Running DRs            - 0
Process performance details:
 192.168.195.101:4500   ( 11% cpu; 10% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 2.4 GB RAM  )
 192.168.195.103:4500   ( 11% cpu;  9% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 2.6 GB RAM  )
 192.168.195.105:4500   ( 17% cpu; 14% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 2.3 GB RAM  )
Coordination servers:
 192.168.195.101:4500  (reachable)
 192.168.195.103:4500  (reachable)
 192.168.195.105:4500  (reachable)
Client time: 07/11/24 01:09:00
fdb> exit
[root@GBASEDATA01 foundationdb]#

2.15 启动 FDB 备份服务

[root@GBASEDATA02 opt]# cd foundationdb
[root@GBASEDATA02 foundationdb]# ll
total 4
drwxr-xr-x. 3 foundationdb foundationdb   17 Jul 11 01:01 data
drwxr-xr-x. 2 foundationdb foundationdb 4096 Jul 11 01:21 log
[root@GBASEDATA02 foundationdb]# mkdir -p /opt/foundationdb/backup_data
[root@GBASEDATA02 foundationdb]# mkdir -p /opt/foundationdb/backup_log
[root@GBASEDATA02 foundationdb]# chown -R foundationdb:foundationdb /opt/foundationdb/backup_data/
[root@GBASEDATA02 foundationdb]# chown -R foundationdb:foundationdb /opt/foundationdb/backup_log/
[root@GBASEDATA02 foundationdb]# ll
total 4
drwxr-xr-x. 2 foundationdb foundationdb    6 Jul 11 01:23 backup_data
drwxr-xr-x. 2 foundationdb foundationdb    6 Jul 11 01:23 backup_log
drwxr-xr-x. 3 foundationdb foundationdb   17 Jul 11 01:01 data
drwxr-xr-x. 2 foundationdb foundationdb 4096 Jul 11 01:21 log
[root@GBASEDATA02 foundationdb]# cd /etc/foundationdb/
[root@GBASEDATA02 foundationdb]# cat foundationdb.conf | grep backup
#[backup_agent]
#command = /usr/lib/foundationdb/backup_agent/backup_agent
#[backup_agent.1]
[root@GBASEDATA02 foundationdb]# sed -i "/^#.*backup.*/s/^#//" foundationdb.conf; cat foundationdb.conf | grep backup
[backup_agent]
command = /usr/lib/foundationdb/backup_agent/backup_agent
[backup_agent.1]
[root@GBASEDATA02 foundationdb]# vim foundationdb.conf
[root@GBASEDATA02 foundationdb]# cat foundationdb.conf | grep backup
[backup_agent]
command = /usr/lib/foundationdb/backup_agent/backup_agent
logdir = /opt/foundationdb/backup_log
[backup_agent.1]
[root@GBASEDATA02 foundationdb]# cd /opt/foundationdb/backup_log
[root@GBASEDATA02 backup_log]# systemctl start foundationdb.service
Warning: foundationdb.service changed on disk. Run 'systemctl daemon-reload' to reload units.
[root@GBASEDATA02 backup_log]# ps -ef|grep foundation | grep -v grep
root      16044      1  0 01:53 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  16046  16044 15 01:53 ?        00:00:07 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /opt/foundationdb/data/4500 --listen_address public --logdir /opt/foundationdb/log --public_address auto:4500
root      16049  16044  0 01:53 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize

 

三、Minio 软件安装和配置(单机)
3.1 Minio RMP软件确认

[root@GBASEDATA01 ~]# cd /opt/gbase_workspace/setup/soft; ls | grep -E "mcli|minio"
mcli-20240703201725.0.0-1.x86_64.rpm
minio-20240704142545.0.0-1.x86_64.rpm

3.2 Minio RMP软件安装

[root@GBASEDATA01 soft]# rpm -ivh *.rpm
Preparing...                          ################################# [100%]
Updating / installing...
  1:minio-20240704142545.0.0-1       ################################# [ 50%]
  2:mcli-20240703201725.0.0-1        ################################# [100%]
[root@GBASEDATA01 soft]# rpm -qa|grep -E "minio|mcli"
mcli-20240703201725.0.0-1.x86_64
minio-20240704142545.0.0-1.x86_64
[root@GBASEDATA01 soft]# mkdir -p /opt/minio/data
[root@GBASEDATA01 soft]# cd /opt/minio

3.3 启动服务

[root@GBASEDATA01 minio]# minio server data &
[1] 44170
[root@GBASEDATA01 minio]# Formatting 1st pool, 1 set(s), 1 drives per set.
WARNING: Host local has more than 0 drives of set. A host failure will result in data becoming unavailable.
MinIO Object Storage Server
Copyright: 2015-2024 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-07-04T14-25-45Z (go1.22.5 linux/amd64)
API: http://192.168.195.102:9000  http://192.168.195.101:9000  http://192.168.122.1:9000  http://127.0.0.1:9000
  RootUser: minioadmin
  RootPass: minioadmin
WebUI: http://192.168.195.102:49260 http://192.168.195.101:49260 http://192.168.122.1:49260 http://127.0.0.1:49260
  RootUser: minioadmin
  RootPass: minioadmin
CLI: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
  $ mc alias set 'myminio' 'http://192.168.195.102:9000' 'minioadmin' 'minioadmin'
Docs: https://min.io/docs/minio/linux/index.html
Status:         1 Online, 0 Offline.
STARTUP WARNINGS:
- Detected Linux kernel version older than 4.0.0 release, there are some known potential performance problems with this kernel version. MinIO recommends a minimum of 4.x.x linux kernel version for best performance
- Detected default credentials 'minioadmin:minioadmin', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables
- The standard parity is set to 0. This can lead to data loss.
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ You are running an older version of MinIO released 6 days before the latest release ┃
┃ Update: Run `mc admin update ALIAS`                                                 ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛

[root@GBASEDATA01 minio]# ps -ef | grep minio
root      44170   4308  7 02:27 pts/1    00:00:04 minio server data
root      44186   4308  0 02:27 pts/1    00:00:00 grep --color=auto minio

3.4 mcli查看配置信息

[root@GBASEDATA01 minio]# mcli config host ls
mcli: Configuration written to `/root/.mcli/config.json`. Please update your access credentials.
mcli: Successfully created `/root/.mcli/share`.
mcli: Initialized share uploads `/root/.mcli/share/uploads.json` file.
mcli: Initialized share downloads `/root/.mcli/share/downloads.json` file.
gcs
 URL       : https://storage.googleapis.com
 AccessKey : YOUR-ACCESS-KEY-HERE
 SecretKey : YOUR-SECRET-KEY-HERE
 API       : S3v2
 Path      : dns
local
 URL       : http://localhost:9000
 AccessKey :
 SecretKey :
 API       :
 Path      : auto
play
 URL       : https://play.min.io
 AccessKey : Q3AM3UQ867SPQQA43P2F
 SecretKey : zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
 API       : S3v4
 Path      : auto
s3
 URL       : https://s3.amazonaws.com
 AccessKey : YOUR-ACCESS-KEY-HERE
 SecretKey : YOUR-SECRET-KEY-HERE
 API       : S3v4
 Path      : dns

3.5 mcli创建数据存储桶

[root@GBASEDATA01 minio]# mcli mb data/test
Bucket created successfully `data/test`.

3.6 mcli创建AK SK

[root@GBASEDATA01 minio]# mcli alias set 'tujunbingminio' 'http://192.168.195.101:9000' 'minioadmin' 'minioadmin'
Added `tujunbingminio` successfully.
[root@GBASEDATA01 minio]# mcli config host ls
gcs
 URL       : https://storage.googleapis.com
 AccessKey : YOUR-ACCESS-KEY-HERE
 SecretKey : YOUR-SECRET-KEY-HERE
 API       : S3v2
 Path      : dns
local
 URL       : http://localhost:9000
 AccessKey :
 SecretKey :
 API       :
 Path      : auto
play
 URL       : https://play.min.io
 AccessKey : Q3AM3UQ867SPQQA43P2F
 SecretKey : zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
 API       : S3v4
 Path      : auto
s3
 URL       : https://s3.amazonaws.com
 AccessKey : YOUR-ACCESS-KEY-HERE
 SecretKey : YOUR-SECRET-KEY-HERE
 API       : S3v4
 Path      : dns
tujunbingminio
 URL       : http://192.168.195.101:9000
 AccessKey : minioadmin
 SecretKey : minioadmin
 API       : s3v4
 Path      : auto

3.7 查看存储桶,并更改权限

[root@GBASEDATA01 minio]# mcli ls tujunbingminio
[0000-12-31 16:07:02 LMT]     0B test/
[root@GBASEDATA01 minio]# mcli anonymous set public tujunbingminio/test
Access permission for `tujunbingminio/test` is set to `public`
[root@GBASEDATA01 minio]# mcli anonymous get tujunbingminio/test
Access permission for `tujunbingminio/test` is `public`
[root@GBASEDATA01 minio]# mcli mb data/loaddata
Bucket created successfully `data/loaddata`.
[root@GBASEDATA01 minio]# mcli mb data/backup
Bucket created successfully `data/backup`.
[root@GBASEDATA01 data]# mcli ls tujunbingminio
[0000-12-31 16:07:02 LMT]     0B backup/
[0000-12-31 16:07:02 LMT]     0B loaddata/
[0000-12-31 16:07:02 LMT]     0B test/
[root@GBASEDATA01 data]# mcli anonymous set public tujunbingminio/loaddata
Access permission for `tujunbingminio/loaddata` is set to `public`
[root@GBASEDATA01 minio]# mcli anonymous set public tujunbingminio/backup
Access permission for `tujunbingminio/backup` is set to `public`

3.8 设置开机自启动

[root@GBASEDATA01 minio]# echo "/usr/local/bin/minio server /opt/minio/data &" >> /etc/rc.d/rc.local
[root@GBASEDATA01 minio]# cat /etc/rc.d/rc.local | grep minio

 

四、GC软件安装和配置
4.1 配置demo.options文件

[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[gbase@GBASEDATA01 gcinstall]$ cat demo.options
installPrefix= /opt
coordinateHost = 192.168.195.101
coordinateHostNodeID = 101
dataHost = 192.168.195.101,192.168.195.103,192.168.195.105
#existCoordinateHost =
#existDataHost =
#existGcwareHost=
gcwareHost = 192.168.195.101
gcwareHostNodeID = 101
dbaUser = gbase
dbaGroup = gbase
dbaPwd = 'gbase'
gcluster_instance_name=gc1
instance_root_name=u1
instance_root_password=gbase20110531
rootPwd = 'gbase'
account_admin_password=Gc@2024
#rootPwdFile = rootPwd.json
characterSet = utf8mb4
#dbPort = 5258
#sshPort = 22
# 's3' or 'hdfs'
GC_STORAGE_STYLE = s3
GC_S3_BUCKET=test
GC_S3_ENDPOINT=192.168.195.101:9000
GC_S3_ACCESS_KEY_ID=minioadmin
GC_S3_SECRET_KEY=minioadmin
GC_S3_REGION=''
#GC_HDFS_NAMENODES=192.168.151.100:50070,192.168.151.101:50070
#GC_HDFS_URI=hdp://gbase@192.168.151.100:50070/

4.2 安装GC软件

[gbase@GBASEDATA01 gcinstall]$ python gcinstall.py --silent=demo.options
[gbase@GBASEDATA01 gcinstall]$ exit
[root@GBASEDATA01 ~]# cexec all: 'python /tmp/SetSysEnv.py --installPrefix=/opt --dbaUser=gbase'

4.3 GC集群状态检查

[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ gcadmin
CLUSTER STATE:         ACTIVE

=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.103 | OPEN  |     0     |
---------------------------------------------------
| FreeNode2 | 192.168.195.101 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------

0 warehouse
1 coordinator node
3 free data node
[gbase@GBASEDATA01 ~]$ account --show
account:

*************************** 1. row ***************************
accountname         :gc1
company             :
mobile              :
email               :
nickname            :
disabled            :N
coor_addr           :192.168.195.101
storage_style       :s3
s3_accessKey        :minioadmin
s3_secretKey        :minioadmin
s3_endPoint         :192.168.195.101:9000
s3_region           :
s3_bucket           :test
comment             :
hdfs_namenodes      :None
hdfs_uri            :None
schedule_type       :
1 rows in set
SUCCESS

4.4 创建warehouse

[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[gbase@GBASEDATA01 gcinstall]$ gcadmin createwh e wh1.xml
[gbase@GBASEDATA01 gcinstall]$ vim wh1.xml
<?xml version='1.0' encoding="utf-8"?>
<servers>
   <rack>
        <node ip="192.168.195.101"/>
        <node ip="192.168.195.103"/>
   </rack>
   <wh_name name="wh1"/>
   <comment message="wh1"/>
</servers>
[gbase@GBASEDATA01 gcinstall]$ gcadmin createwh wh1.xml
[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE

=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------

============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |

---------------------------------------------------
| FreeNode3 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------

1 warehouse
1 coordinator node
1 free data node
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh1
CLUSTER STATE:         ACTIVE

============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | OPEN  |
---------------------------------------------------------------

2 data node

4.5 重启集群 (gbase用户执行)

[gbase@GBASEMA01 config]$ cexec all: 'gcluster_services all stop'
[gbase@GBASEMA01 config]$ cexec all: 'gcluster_services all start'
[gbase@GBASEMA01 config]$ gcadmin showdistribution
[gbase@GBASEMA01 config]$ gcadmin showcluster

4.6 设置开机自启动 (root用户执行)

[gbase@GBASEMA01 config]$ exit
[root@GBASEMA01 config]# cexec all: "python /tmp/SetSysEnv.py -c --installPrefix=/opt --dbaUser=gbase"
[root@GBASEMA01 config]# cexec all: 'cat /etc/rc.d/rc.local | grep gbase'
[root@GBASEMA01 config]# cexec all: 'chmod +x /etc/rc.d/rc.local'
[root@GBASEMA01 config]# cexec all: "sed -i '/gbase/d' /etc/rc.d/rc.local"
[root@GBASEMA01 config]# cexec coor: 'echo "su - gbase -c \"gcware_services all start\"" >> /etc/rc.d/rc.local'
[root@GBASEMA01 config]# cexec all: 'echo "su - gbase -c \"gcluster_services all start\"" >> /etc/rc.d/rc.local'

4.7 数据库建表操作

[gbase@GBASEDATA01 gcinstall]$ gccli -uu1 -pgbase20110531
gbase> set password for u1=password('X@gbase2024');
gbase> use warehouse wh1;
gbase> show databases;
gbase> create database tdb;
#创建用户create user u2 identified by 'gbase';
gbase> create user u2;
gbase> set password for u2=password('gbase');
#创建角色
gbase> create role test_role;                     #创建角色
gbase> grant role test_role to role systemadmin;  #将角色挂到权限树上
#角色授权
gbase> grant all on tdb.* to role test_role;                    #对该角色赋数据对象DB的权限
gbase> grant operate_warehouse on *.*.* to role test_role;  #对该角色赋予计算资源(warehouse)权限
#角色赋给用户
gbase> grant role test_role to user u2;           #将角色权限赋给用户
gbase> alter user u2 default_role = 'test_role';  #修改用户默认权限,之后的用户将会使用test_role的权限进行操作
#建业务表
gbase> create table tdb.tb1 (id int, name varchar(10));
gbase> insert into tdb.tb1 values(1,'tujunbing');
gbase> select * from tdb.tb1;
角色级别:accountadmin > systemadmin > 自定义角色 > publicadmin
create user u3 identified by 'gbase' default_role='test_role' default_warehouse='wh1';

4.8 创建角色、用户及授权

[gbase@GBASEDATA01 gcinstall]$ gccli -uu1 -pX@gbase2024
GBase client 9.8.0.6.17_patch.19971bae. Copyright (c) 2004-2024, GBase.  All Rights Reserved.
gbase> create role role_exam;
gbase> grant role role_exam to role systemadmin;
gbase> grant CREATE,RELOAD,PROCESS,CREATE_FILE_FORMAT,FILE_FORMAT_USAGE,CREATE_STAGE,STAGE_WRITE,STAGE_READ,SHOW DATABASES,OPERATE_WAREHOUSE,USAGE on *.*.* to role role_exam;
gbase> select * from cloud.role where rolename = 'role_exam'\G;
gbase> 
gbase> create user tujunbing identified by 'X@gbase2024' default_role='role_exam' default_warehouse='wh_exam';
gbase> select * from cloud.user where user='tujunbing'\G;
gbase> select * from cloud.user_check where user='tujunbing'\G;

 4.9 密码策略调整

#修改管理节点gbase_8a_gcluster.cnf、数据节点gbase_8a_gbase.cnf文件增加密码策略参数
password_format_option=15
password_min_length=8
password_reuse_max=3
password_life_time=90
login_attempt_max=10
#重启集群服务
[gbase@GBASEDATA01 gcinstall]$ cexec all: 'gcluster_services all restart'
[gbase@GBASEDATA01 gcinstall]$ gccli -utujunbing -pX@gbase2024 -e "show variables like '%password_format_option%'"
[gbase@GBASEDATA01 gcinstall]$ gccli -utujunbing -pX@gbase2024 -e "show variables like '%password_min_length%'"
[gbase@GBASEDATA01 gcinstall]$ gccli -utujunbing -pX@gbase2024 -e "show variables like '%password_reuse_max%'"
[gbase@GBASEDATA01 gcinstall]$ gccli -utujunbing -pX@gbase2024 -e "show variables like '%password_life_time%'"
[gbase@GBASEDATA01 gcinstall]$ gccli -utujunbing -pX@gbase2024 -e "show variables like '%login_attempt_max%'"

4.10 创建ssbm库表


[gbase@GBASEDATA01 ssbm]$ pwd
/opt/ssbm
[gbase@GBASEDATA01 ssbm]$ ls
create_ssbm.sql  customer.tbl  dbgen  dists.dss  dwdate.tbl  lineorder.tbl  part.tbl  supplier.tbl
[gbase@GBASEDATA01 ssbm]$ gccli -utujunbing -pX@gbase2024
GBase client 9.8.0.6.17_patch.19971bae. Copyright (c) 2004-2024, GBase.  All Rights Reserved.
gbase> use warehouse wh_exam;
Query OK, 0 rows affected (Elapsed: 00:00:00.03)
gbase> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| performance_schema |
| cloud              |
| gclusterdb         |
| gctmpdb            |
| tdb                |
+--------------------+
6 rows in set (Elapsed: 00:00:00.21)
gbase> source /opt/ssbm/create_ssbm.sql;
gbase> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| performance_schema |
| cloud              |
| gclusterdb         |
| gctmpdb            |
| tdb                |
| ssbm               |
+--------------------+
7 rows in set (Elapsed: 00:00:00.51)
gbase> use ssbm;
Query OK, 0 rows affected (Elapsed: 00:00:00.11)
gbase> show tables;
+----------------+
| Tables_in_ssbm |
+----------------+
| customer       |
| dates          |
| lineorder      |
| part           |
| supplier       |
+----------------+
5 rows in set (Elapsed: 00:00:00.09)

4.11 数据导出、加载

[gbase@GBASEDATA01 ssbm]$ gccli -utujunbing -pX@gbase2024
gbase> use warehouse wh_exam;

# 加载S3:
gbase> create file format ssbm.ssbm_file CHARACTER SET utf8 DATA_FORMAT 3 NULL_VALUE 'NULL' fields terminated by '|';
gbase> show file formats in ssbm\G;
gbase> create stage ssbm.ssbm_stage type='s3n' options='s3n://minioadmin:minioadmin@192.168.195.101:9000/minio/loaddata/' comment='ssbm state from minio s3 type';
gbase> show stages in ssbm\G;
#show file formats in db; show stages in db语句中create_time字段值是时间戳,应格式化成标准日期时间格式
gbase> LOAD DATA INFILE 'customer.tbl' INTO TABLE ssbm.customer FILE FORMAT ssbm.ssbm_file stage ssbm.ssbm_stage;
gbase> LOAD DATA INFILE 'part.tbl' INTO TABLE ssbm.part FILE FORMAT ssbm.ssbm_file stage ssbm.ssbm_stage;

# 导出ftp
gbase> select * from ssbm.customer into outfile 'ftp://hadoop:gbase@192.168.195.20/customer' FIELDS TERMINATED BY '|' writemode by overwrites;
gbase> select * from ssbm.part into outfile 'ftp://hadoop:gbase@192.168.195.20/part' FIELDS TERMINATED BY '|' writemode by overwrites;

# 加载ftp
gbase> load data infile 'ftp://hadoop:gbase@192.168.195.20/part/part' into table ssbm.part FIELDS TERMINATED BY '|';

# 导出hdfs
gbase> select * from tdb.tb1 into outfile 'hdp://hadoop@192.168.195.20/test/part' FIELDS TERMINATED BY '|' writemode by overwrites;

# 加载hdfs
gbase> load data infile 'hdp://hadoop@192.168.195.20/test/part/*' into table ssbm.part FIELDS TERMINATED BY '|';

4.12 数据备份、恢复

[gbase@GBASEDATA01 ssbm]$ gccli -uu1 -pX@gbase2024
#创建备份连接信息
gbase> select * from cloud.backup_conn_info;
gbase> backup connection cleanup;
gbase> backup add connection 'backcon' route 'backup' s3_accesskey 'minioadmin' s3_secretkey 'minioadmin' s3_endpoint '192.168.195.101:9000' s3_region '' s3_bucket 'backup' storage_style 's3';
gbase> select * from cloud.backup_conn_info;
#备份数据
#批量表备份不需要提前清空备份状态信息系统表
gbase> use warehouse wh_exam;
gbase> show vcs;
gbase> select * from cloud.backup_status_info;
gbase> backup status cleanup;
gbase> backup add tables sysvc.ssbm.customer;
gbase> backup add tables sysvc.ssbm.dates;
gbase> backup add tables sysvc.ssbm.lineorder;
gbase> backup add tables sysvc.ssbm.part;
gbase> backup add tables sysvc.ssbm.supplier;
gbase> select * from cloud.backup_status_info;
gbase> backup level 0 tables with connection 'backcon';
gbase> backup level 0 table sysvc.tdb.tb1 with connection 'backcon';
gbase> select * from cloud.backup_status_info;
gbase> backup show with connection 'backcon'\G;
gbase> backup level 1 tables with connection 'backcon';
gbase> select * from cloud.backup_status_info;
gbase> backup show with connection 'backcon'\G;
#恢复数据
gbase> backup recover cycle 'cycleId' point 'pointId' with connection 'backcon';
gbase> backup show with connection 'backcon'\G;
#删除备份周期
gbase> backup drop cycle 'cycleId' with connection 'backcon';
gbase> backup show with connection 'backcon'\G;
#删除备份点
gbase> backup drop cycle 'cycleId' point 'pointId' with connection 'backcon';
gbase> backup show with connection 'backcon'\G;

4.13 创建外部表

[gbase@GBASEDATA01 ssbm]$ gccli -uu1 -pX@gbase2024
gbase> create database ssbm_external;
gbase> use ssbm_external;
#只读外部表
gbase> create external table if not exists ssbm_external.customer1(C_CUSTKEY INTEGER, C_NAME VARCHAR(25) NOT NULL, C_ADDRESS VARCHAR(40) NOT NULL, C_CITY VARCHAR(10) NOT NULL, C_NATION VARCHAR(15) NOT NULL, C_REGION VARCHAR(12) NOT NULL, C_PHONE VARCHAR(15) NOT NULL, C_MKTSEGMENT VARCHAR(10) NOT NULL) location 'ftp://hadoop:gbase@192.168.195.20/customer/' INFORMAT DATA_FORMAT 3 NULL_VALUE 'NULL' fields terminated by '|' OUTFORMAT;
gbase> select * from customer1 limit 10;
#带有load参数读取,其需要fileid
gbase> create load read external table if not exists ssbm_external.customer2(C_CUSTKEY INTEGER, C_NAME VARCHAR(25) NOT NULL, C_ADDRESS VARCHAR(40) NOT NULL, C_CITY VARCHAR(10) NOT NULL, C_NATION VARCHAR(15) NOT NULL, C_REGION VARCHAR(12) NOT NULL, C_PHONE VARCHAR(15) NOT NULL, C_MKTSEGMENT VARCHAR(10) NOT NULL) location 'ftp://hadoop:gbase@192.168.195.20/customer/' INFORMAT DATA_FORMAT 3 NULL_VALUE 'NULL' fields terminated by '|' OUTFORMAT;
gbase> select * from customer2 limit 10;
#不带有load参数读取,其不需要fileid
gbase> create read external table if not exists ssbm_external.customer3(C_CUSTKEY INTEGER, C_NAME VARCHAR(25) NOT NULL, C_ADDRESS VARCHAR(40) NOT NULL, C_CITY VARCHAR(10) NOT NULL, C_NATION VARCHAR(15) NOT NULL, C_REGION VARCHAR(12) NOT NULL, C_PHONE VARCHAR(15) NOT NULL, C_MKTSEGMENT VARCHAR(10) NOT NULL) location 'ftp://hadoop:gbase@192.168.195.20/customer/' INFORMAT DATA_FORMAT 3 NULL_VALUE 'NULL' fields terminated by '|' OUTFORMAT;
gbase> select * from customer3 limit 10;
#读写外部表
gbase> create write external table if not exists ssbm_external.customer4(C_CUSTKEY INTEGER, C_NAME VARCHAR(25) NOT NULL, C_ADDRESS VARCHAR(40) NOT NULL, C_CITY VARCHAR(10) NOT NULL, C_NATION VARCHAR(15) NOT NULL, C_REGION VARCHAR(12) NOT NULL, C_PHONE VARCHAR(15) NOT NULL, C_MKTSEGMENT VARCHAR(10) NOT NULL) location 'ftp://hadoop:gbase@192.168.195.20/customer/' INFORMAT DATA_FORMAT 3 NULL_VALUE 'NULL' fields terminated by '|' OUTFORMAT;
gbase> select * from customer4 limit 10;
gbase> show tables;
gbase> show full tables;
show create table customer1;
show create table customer2;
show create table customer3;
show create table customer4;

4.14 配置审计日志

gbase>
gbase> set global audit_log=1;
gbase> set global log_output='table';
gbase> set global long_query_time=0;
gbase> create audit policy default_audit(enable='Y');
gbase> drop event if exists audit_event;
gbase> DELIMITER // 
create event if not exists audit_event on schedule every 60 minute do begin use warehouse wh1; flush audit logs force; end // 
DELIMITER ;
gbase>
gbase> select * from information_schema.events where event_name='audit_event'\G;
*************************** 1. row ***************************
      EVENT_CATALOG: NULL
           EVENT_VC: sysvc
       EVENT_SCHEMA: gclusterdb
         EVENT_NAME: audit_event
            DEFINER: gbase@accountadmin
          TIME_ZONE: +08:00
         EVENT_BODY: SQL
   EVENT_DEFINITION: begin use warehouse wh1; flush audit logs force; end
         EVENT_TYPE: RECURRING
         EXECUTE_AT: NULL
     INTERVAL_VALUE: 60
     INTERVAL_FIELD: MINUTE
           SQL_MODE: PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,ONLY_FULL_GROUP_BY,NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH
             STARTS: 2024-07-11 23:22:25
               ENDS: NULL
             STATUS: ENABLED
      ON_COMPLETION: NOT PRESERVE
            CREATED: 2024-07-12 14:22:25
       LAST_ALTERED: 2024-07-12 14:22:25
      LAST_EXECUTED: NULL
      EVENT_COMMENT:
       EXECUTE_MODE: 1
CHARACTER_SET_CLIENT: utf8mb4
COLLATION_CONNECTION: utf8mb4_general_ci
 DATABASE_COLLATION: utf8_general_ci
              OWNER: accountadmin
1 row in set (Elapsed: 00:00:00.29)
ERROR:
No query specified
gbase> select * from gclusterdb.audit_log order by start_time desc limit 1;
gbase> show variables like 'audit_log';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| audit_log     | ON    |
+---------------+-------+
1 row in set (Elapsed: 00:00:00.01)
gbase> show variables like 'log_output';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| log_output    | TABLE |
+---------------+-------+
1 row in set (Elapsed: 00:00:00.01)
gbase> show variables like 'long_query_time';
+-----------------+----------+
| Variable_name   | Value    |
+-----------------+----------+
| long_query_time | 0.000000 |
+-----------------+----------+
1 row in set (Elapsed: 00:00:00.01)
gbase> select * from cloud.audit_policy;
+---------------+--------+-------+------+----+----------+--------+--------------+-----------------+--------+
| name          | enable | hosts | user | db | obj_type | object | sql_commands | Long_query_time | status |
+---------------+--------+-------+------+----+----------+--------+--------------+-----------------+--------+
| default_audit | Y      |       |      |    |          |        |              |                 |        |
+---------------+--------+-------+------+----+----------+--------+--------------+-----------------+--------+
1 row in set (Elapsed: 00:00:00.03)

 

五、FDB扩容、缩容

描述:FDB增加192.168.195.20节点作为数据节点

5.1 root用户互信配置

[root@GBASEDATA01 ~]# cd /opt/gbase_workspace/scripts/autossh
[root@GBASEDATA01 autossh]# echo -e "192.168.195.20" > ip.list
[root@GBASEDATA01 autossh]# cat ip.list
[root@GBASEDATA01 autossh]# expect autossh_par.sh ip.list root gbase

5.2 C3工具配置 (root用户执行)

[root@GBASEDATA01 autossh]# vim /etc/c3.conf
cluster coor {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
}
cluster wh1 {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
}
cluster all {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
}
cluster fdb {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
}
cluster newfdb {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.20
}
[root@GBASEDATA01 autossh]# cexec -p newfdb: 'hostname'

5.3 FDB RMP软件上传

[root@GBASEDATA01 ~]# cexec newfdb: 'mkdir -p /opt'
[root@GBASEDATA01 ~]# cpush newfdb: /opt/gbase_workspace/setup/soft/*.rpm /opt/'
[root@GBASEDATA01 ~]# cexec newfdb: 'ls /opt/*.rpm'
************************* newfdb *************************
--------- 192.168.195.20---------
foundationdb-clients-6.3.13-1.el7.x86_64.rpm
foundationdb-server-6.3.13-1.el7.x86_64.rpm

5.4 FDB RMP软件安装

[root@GBASEDATA01 ~]# cexec newfdb: 'rpm -ivh /opt/foundationdb*.rpm'
************************* newfdb *************************
--------- 192.168.195.20---------
Preparing...                          ########################################
Updating / installing...
foundationdb-clients-6.3.13-1         ########################################
foundationdb-server-6.3.13-1          ########################################

5.5 FDB进程确认

[root@GBASEDATA01 ~]# cexec newfdb: 'ps -ef|grep foundation | grep -v grep'
************************* newfdb *************************
--------- 192.168.195.20---------
root      41856      1  0 00:10 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  41858  41856  7 00:10 ?        00:00:15 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /var/lib/foundationdb/data/4500 --listen_address public --logdir /var/log/foundationdb --public_address auto:4500
root      41895  41856  0 00:13 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize

5.6 FDB 配置文件调整

[root@GBASEDATA01 ~]# cexec newfdb: 'mkdir -p /opt/foundationdb/data'
[root@GBASEDATA01 ~]# cexec newfdb: 'mkdir -p /opt/foundationdb/log'
[root@GBASEDATA01 ~]# cexec newfdb: 'chown -R foundationdb:foundationdb /etc/foundationdb'
[root@GBASEDATA01 ~]# cexec newfdb: 'chown -R foundationdb:foundationdb /opt/foundationdb'
[root@GBASEDATA01 ~]# cpush newfdb: /etc/foundationdb/* /etc/foundationdb/
[root@GBASEDATA01 ~]# cexec newfdb: 'cat /etc/foundationdb/fdb.cluster'
[root@GBASEDATA01 ~]# cexec newfdb: 'cat /etc/foundationdb/foundationdb.conf | grep -E "logdir|datadir"'

5.7 配置FDB守护进程

[root@GBASEDATA01 ~]# cpush newfdb: /usr/lib/systemd/system/foundationdb.service /usr/lib/systemd/system/
[root@GBASEDATA01 ~]# cexec newfdb: 'cat /usr/lib/systemd/system/foundationdb.service | grep -C1 KillMode'
************************* newfdb *************************
--------- 192.168.195.20---------
ExecStart=/usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
KillMode=process
Restart=on-failure

5.8 重启FDB服务

[root@GBASEDATA01 ~]# cexec newfdb: 'systemctl enable foundationdb.service'
[root@GBASEDATA01 ~]# cexec newfdb: 'systemctl restart foundationdb.service'
[root@GBASEDATA01 ~]# cexec newfdb: 'ps -ef|grep foundation | grep -v grep'
************************* newfdb *************************
--------- 192.168.195.20---------
root      23623      1  0 18:55 ?        00:00:00 /usr/lib/foundationdb/fdbmonitor --conffile /etc/foundationdb/foundationdb.conf --lockfile /var/run/fdbmonitor.pid --daemonize
foundat+  23624  23623  9 18:55 ?        00:00:00 /usr/sbin/fdbserver --cluster_file /etc/foundationdb/fdb.cluster --datadir /opt/foundationdb/data/4500 --listen_address public --logdir /opt/foundationdb/log --public_address auto:4500

5.9 FDB信息检查

[root@GBASEDATA01 foundationdb]# fdbcli
Using cluster file `fdb.cluster'.
The database is unavailable; type `status' for more information.
Welcome to the fdbcli. For help, type `help'.
# 查看集群状态
fdb> status details
Configuration:
 Redundancy mode        - triple
 Storage engine         - ssd-2
 Coordinators           - 3
 Desired Proxies        - 8
 Desired Resolvers      - 1
 Desired Logs           - 30
 Usable Regions         - 1
Cluster:
 FoundationDB processes - 4
 Zones                  - 4
 Machines               - 4
 Memory availability    - 0.5 GB per process on machine with least available
                          >>>>> (WARNING: 4.0 GB recommended) <<<<<
 Fault Tolerance        - 1 machines
 Server time            - 07/17/24 18:56:50
Data:
 Replication health     - Healthy
 Moving data            - 0.000 GB
 Sum of key-value sizes - 1 MB
 Disk space used        - 840 MB
Operating space:
 Storage server         - 9.5 GB free on most full server
 Log server             - 9.5 GB free on most full server
Workload:
 Read rate              - 11 Hz
 Write rate             - 0 Hz
 Transactions started   - 4 Hz
 Transactions committed - 0 Hz
 Conflict rate          - 0 Hz
Backup and DR:
 Running backups        - 0
 Running DRs            - 0
Process performance details:
 192.168.195.20:4500    (  5% cpu; 17% machine; 0.000 Gbps;  0% disk IO; 0.3 GB / 0.5 GB RAM  )
 192.168.195.101:4500   (  7% cpu; 22% machine; 0.000 Gbps;  4% disk IO; 0.4 GB / 1.5 GB RAM  )
 192.168.195.103:4500   ( 11% cpu; 17% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 1.7 GB RAM  )
 192.168.195.105:4500   ( 12% cpu; 17% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 1.8 GB RAM  )
Coordination servers:
 192.168.195.101:4500  (reachable)
 192.168.195.103:4500  (reachable)
 192.168.195.105:4500  (reachable)
Client time: 07/17/24 18:56:50

5.10 FDB缩容

[root@GBASEDATA01 foundationdb]# fdbcli
Using cluster file `fdb.cluster'.
The database is unavailable; type `status' for more information.
Welcome to the fdbcli. For help, type `help'
fdb> exclude 192.168.195.20
Waiting for state to be removed from all excluded servers. This may take a while.
(Interrupting this wait with CTRL+C will not cancel the data movement.)
 192.168.195.20(Whole machine)  ---- Successfully excluded. It is now safe to remove this machine from the cluster.
fdb> exit
[root@GBASEDATA01 ~]# cexec newfdb: 'systemctl stop foundationdb.service'
************************* newfdb *************************
--------- 192.168.195.20---------
[root@GBASEDATA01 autossh]# fdbcli
Using cluster file `/etc/foundationdb/fdb.cluster'.
The database is available, but has issues (type 'status' for more information).
Welcome to the fdbcli. For help, type `help'.
fdb> status details
Using cluster file `/etc/foundationdb/fdb.cluster'.
Configuration:
 Redundancy mode        - triple
 Storage engine         - ssd-2
 Coordinators           - 3
 Exclusions             - 1 (type `exclude' for details)
 Desired Proxies        - 8
 Desired Resolvers      - 1
 Desired Logs           - 30
 Usable Regions         - 1
Cluster:
 FoundationDB processes - 3
 Zones                  - 3
 Machines               - 3
 Memory availability    - 1.6 GB per process on machine with least available
                          >>>>> (WARNING: 4.0 GB recommended) <<<<<
 Retransmissions rate   - 0 Hz
 Fault Tolerance        - 0 machines (1 without data loss)
 Server time            - 07/17/24 19:10:31
Data:
 Replication health     - Healthy
 Moving data            - 0.000 GB
 Sum of key-value sizes - 1 MB
 Disk space used        - 630 MB
Operating space:
 Storage server         - 9.4 GB free on most full server
 Log server             - 9.4 GB free on most full server
Workload:
 Read rate              - 7 Hz
 Write rate             - 0 Hz
 Transactions started   - 4 Hz
 Transactions committed - 0 Hz
 Conflict rate          - 0 Hz
Backup and DR:
 Running backups        - 0
 Running DRs            - 0
Process performance details:
 192.168.195.101:4500   ( 11% cpu; 25% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 1.6 GB RAM  )
 192.168.195.103:4500   ( 12% cpu; 19% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 1.7 GB RAM  )
 192.168.195.105:4500   ( 16% cpu; 23% machine; 0.000 Gbps;  0% disk IO; 0.4 GB / 1.8 GB RAM  )
Coordination servers:
 192.168.195.101:4500  (reachable)
 192.168.195.103:4500  (reachable)
 192.168.195.105:4500  (reachable)
Client time: 07/17/24 19:10:31

 

六、GC扩容、缩容

描述:FDB增加192.168.195.104、192.168.195.106节点作为数据节点

6.1 root用户互信配置

[root@GBASEDATA01 ~]# rpm -ivh --force /opt/gbase_workspace/tools/rpm/expect*.rpm
[root@GBASEDATA01 ~]# cd /opt/gbase_workspace/scripts/autossh
[root@GBASEDATA01 autossh]# echo -e "192.168.195.104\n192.168.195.106" > ip.list
[root@GBASEDATA01 autossh]# cat ip.list
[root@GBASEDATA01 autossh]# expect autossh_par.sh ip.list root gbase

6.2 C3工具配置 (root用户执行)

[root@GBASEDATA01 autossh]# vim /etc/c3.conf
cluster coor {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
}
cluster wh1 {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
}
cluster wh2 {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.104
    192.168.195.106
}
cluster all {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
    192.168.195.104
    192.168.195.106
}
cluster fdb {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.101
    192.168.195.103
    192.168.195.105
}
cluster new {
    192.168.195.101:127.0.0.1
    dead remove-index-00
    192.168.195.104
    192.168.195.106
}
[root@GBASEDATA01 autossh]# chmod 666 /etc/c3.conf
[root@GBASEDATA01 autossh]# cexec -p new: 'hostname'

6.3 创建gbase用户

[root@GBASEDATA01 autossh]# cexec new: 'useradd gbase'
[root@GBASEDATA01 autossh]# cexec new: 'echo gbase:gbase|chpasswd'
[root@GBASEDATA01 autossh]# cexec new: 'mkdir -p /opt'
[root@GBASEDATA01 autossh]# cexec new: 'chown -R gbase:gbase /opt'
[root@GBASEDATA01 autossh]# cexec new: 'chage -M 99999 gbase'

6.4 环境检查 (root用户执行)

[root@GBASEDATA01 autossh]# cexec new: 'cat /etc/redhat-release'
[root@GBASEDATA01 autossh]# cexec new: 'nkvers'
[root@GBASEDATA01 autossh]# cexec new: 'cat /proc/version'
[root@GBASEDATA01 autossh]# cexec new: 'cat /etc/system-release'
[root@GBASEDATA01 autossh]# cexec new: 'df -h'
[root@GBASEDATA01 autossh]# cexec new: 'ip a|grep inet'
[root@GBASEDATA01 autossh]# cexec -p new: 'cat /proc/cpuinfo |grep "model name" | head -n1'
[root@GBASEDATA01 autossh]# cexec -p new: 'grep "model name" /proc/cpuinfo|wc -l'
[root@GBASEDATA01 autossh]# cexec -p new: 'grep "physical id" /proc/cpuinfo|sort|uniq|wc -l'
[root@GBASEDATA01 autossh]# cexec -p new: 'grep "cpu cores" /proc/cpuinfo|sort|uniq'
[root@GBASEDATA01 autossh]# cexec -p new: 'free -g'
[root@GBASEDATA01 autossh]# cexec -p new: 'hostname'

6.5 gbase用户互信配置(gbase用户执行)

[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/scripts/autossh
[gbase@GBASEDATA01 autossh]$ cat ip.list
[gbase@GBASEDATA01 autossh]$ expect autossh_par.sh ip.list gbase gbase

6.6 环境初始化 (root用户执行)

[root@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[root@GBASEDATA01 gcinstall]# cpush new: SetSysEnv.py /tmp
[root@GBASEDATA01 gcinstall]# cexec new: 'python /tmp/SetSysEnv.py --installPrefix=/opt --dbaUser=gbase'

6.7 配置demo.options文件

[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[gbase@GBASEDATA01 gcinstall]$ cat demo.options
installPrefix= /opt
#coordinateHost = 192.168.195.101
#coordinateHostNodeID = 101
dataHost = 192.168.195.104,192.168.195.106
existCoordinateHost = 192.168.195.101
existDataHost = 192.168.195.101,192.168.195.103,192.168.195.105
existGcwareHost=192.168.195.101
#gcwareHost = 192.168.195.101
#gcwareHostNodeID = 101
dbaUser = gbase
dbaGroup = gbase
dbaPwd = 'gbase'
gcluster_instance_name=gc1
instance_root_name=u1
instance_root_password=gbase20110531
rootPwd = 'gbase'
account_admin_password=Gc@2024
#rootPwdFile = rootPwd.json
characterSet = utf8mb4
#dbPort = 5258
#sshPort = 22
# 's3' or 'hdfs'
GC_STORAGE_STYLE = s3
GC_S3_BUCKET=test
GC_S3_ENDPOINT=192.168.195.101:9000
GC_S3_ACCESS_KEY_ID=minioadmin
GC_S3_SECRET_KEY=minioadmin
GC_S3_REGION=''
#GC_HDFS_NAMENODES=192.168.151.100:50070,192.168.151.101:50070
#GC_HDFS_URI=hdp://gbase@192.168.151.100:50070/

6.8 安装GC软件

[gbase@GBASEDATA01 gcinstall]$ python gcinstall.py --silent=demo.options
[gbase@GBASEDATA01 gcinstall]$ exit
[root@GBASEDATA01 ~]# cexec all: 'python /tmp/SetSysEnv.py --installPrefix=/opt --dbaUser=gbase'

6.9 GC集群状态检查


[root@GBASEDATA01 ~]# su - gbase
[gbase@GBASEDATA01 ~]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------
| FreeNode2 | 192.168.195.106 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.104 | OPEN  |     0     |
---------------------------------------------------
1 warehouse: wh1
1 coordinator node
3 free data node

[gbase@GBASEDATA01 gcinstall]$ account --show
account:

*************************** 1. row ***************************
accountname         :gc1
company             :
mobile              :
email               :
nickname            :
disabled            :N
coor_addr           :192.168.195.101
storage_style       :s3
s3_accessKey        :minioadmin
s3_secretKey        :minioadmin
s3_endPoint         :192.168.195.101:9000
s3_region           :
s3_bucket           :test
comment             :
hdfs_namenodes      :None
hdfs_uri            :None
schedule_type       :
1 rows in set
SUCCESS

6.10 创建warehouse

[gbase@GBASEDATA01 ~]$ cd /opt/gbase_workspace/setup/gcinstall
[gbase@GBASEDATA01 gcinstall]$ gcadmin createwh e wh2.xml
[gbase@GBASEDATA01 gcinstall]$ vim wh2.xml
<?xml version='1.0' encoding="utf-8"?>
<servers>
   <rack>
        <node ip="192.168.195.104"/>
        <node ip="192.168.195.106"/>
   </rack>
   <wh_name name="wh2"/>
   <comment message="wh2"/>
</servers>
[gbase@GBASEDATA01 gcinstall]$ gcadmin createwh wh2.xml
[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
|     wh2      | wh2       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------
2 warehouse: wh1, wh2
1 coordinator node
1 free data node
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh1
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | OPEN  |
---------------------------------------------------------------
2 data node
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh2
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh2      | wh2       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.104              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.106              | OPEN  |
---------------------------------------------------------------
2 data node

6.11 删除warehouse

[gbase@GBASEDATA01 gcinstall]$ gcadmin rmwh wh2
[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh1      | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------
| FreeNode2 | 192.168.195.106 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.104 | OPEN  |     0     |
---------------------------------------------------
1 warehouse: wh1
1 coordinator node
3 free data node

6.12 重命名warehouse

[gbase@GBASEDATA01 gcinstall]$ gcadmin renamewh wh1 wh_exam
gcadmin rename wh [wh2] to [wh3] successful
[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|     wh_exam  | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------
| FreeNode2 | 192.168.195.106 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.104 | OPEN  |     0     |
---------------------------------------------------
1 warehouse: wh_exam
1 coordinator node
3 free data node

6.13 挂起warehouse

[gbase@GBASEDATA01 gcinstall]$ gcadmin stopwh wh_exam gbase gbase
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh_exam
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | CLOSE |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | CLOSE |
---------------------------------------------------------------
2 data node

6.14 唤醒warehouse

[gbase@GBASEDATA01 gcinstall]$ gcadmin startwh wh_exam gbase gbase
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh_exam
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | OPEN  |
---------------------------------------------------------------
2 data node

6.15 扩容warehouse

[gbase@GBASEDATA01 gcinstall]$ cat gcChangeinfo_wh1.xml
<?xml version='1.0' encoding="utf-8"?>
<servers>
   <rack>
        <node ip="192.168.195.105"/>
   </rack>
</servers>
[gbase@GBASEDATA01 gcinstall]$ gcadmin addnodes gcChangeinfo_wh1.xml wh_exam
[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode2 | 192.168.195.106 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.104 | OPEN  |     0     |
---------------------------------------------------
1 warehouse: wh_exam
1 coordinator node
2 free data node

[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh_exam
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | OPEN  |
---------------------------------------------------------------
|  node3   |             192.168.195.105              | OPEN  |
---------------------------------------------------------------
3 data node

6.16 缩容warehouse

[gbase@GBASEDATA01 gcinstall]$ cat gcChangeinfo_wh1.xml
<?xml version='1.0' encoding="utf-8"?>
<servers>
   <rack>
        <node ip="192.168.195.105"/>
   </rack>
</servers>
[gbase@GBASEDATA01 gcinstall]$ gcadmin rmnodes gcChangeinfo_wh1.xml wh_exam
[gbase@GBASEDATA01 gcinstall]$ gcadmin showcluster wh wh_exam
CLUSTER STATE:         ACTIVE
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===============================================================
|               WAREHOUSE DATA NODE INFORMATION               |
===============================================================
| NodeName |                IpAddress                 | gnode |
---------------------------------------------------------------
|  node1   |             192.168.195.101              | OPEN  |
---------------------------------------------------------------
|  node2   |             192.168.195.103              | OPEN  |
---------------------------------------------------------------
2 data node
^[[A[gbase@GBASEDATA01 gcinstall]$ gcadmin
CLUSTER STATE:         ACTIVE
=======================================
|  GBASE GCWARE CLUSTER INFORMATION   |
=======================================
| NodeName |    IpAddress    | gcware |
---------------------------------------
| gcware1  | 192.168.195.101 |  OPEN  |
---------------------------------------
=========================================================
|         GBASE COORDINATOR CLUSTER INFORMATION         |
=========================================================
|   NodeName   |    IpAddress    | gcluster | DataState |
---------------------------------------------------------
| coordinator1 | 192.168.195.101 |   OPEN   |     0     |
---------------------------------------------------------
============================
| GBASE WAREHOUSE INFORMAT |
============================
|    WhName    |  comment  |
----------------------------
|   wh_exam    | wh1       |
----------------------------
===================================================
|    GBASE CLUSTER FREE DATA NODE INFORMATION     |
===================================================
| NodeName  |    IpAddress    | gnode | DataState |
---------------------------------------------------
| FreeNode1 | 192.168.195.105 | OPEN  |     0     |
---------------------------------------------------
| FreeNode2 | 192.168.195.106 | OPEN  |     0     |
---------------------------------------------------
| FreeNode3 | 192.168.195.104 | OPEN  |     0     |
---------------------------------------------------
1 warehouse: wh_exam
1 coordinator node
3 free data node

 

 

评论

登录后才可以发表评论