RHEL 7.5安装greenplum 集群

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Dream19881003/article/details/88178760

1、环境介绍:

more /etc/redhat-release 

Red Hat Enterprise Linux Server release 7.5 (Maipo)

系统安装采取最小化安装。

greenplum-db-5.16.0-rhel7-x86_64.zip

下载地址:

https://network.pivotal.io/products/pivotal-gpdb/#/releases/280281/file_groups/1355

more /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.15.201  rhmdw

192.168.15.202  rhsdw1

192.168.15.203  rhsdw2

2、系统调整

 systemctl stop firewalld.service

systemctl disable firewalld.service

 yum -y install psmisc-22.20-15.el7.x86_64 unzip

 sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux 

pvcreate /dev/sda3

  Physical volume "/dev/sda3" successfully created.

vgextend rhel /dev/sda3

  Volume group "rhel" successfully extended

lvcreate -n gplum -L +10000M rhel

  Logical volume "gplum" created.

mkfs.xfs /dev/rhel/gplum

meta-data=/dev/rhel/gplum        isize=512    agcount=4, agsize=640000 blks

         =                       sectsz=512   attr=2, projid32bit=1

         =                       crc=1        finobt=0, sparse=0

data     =                       bsize=4096   blocks=2560000, imaxpct=25

         =                       sunit=0      swidth=0 blks

naming   =version 2              bsize=4096   ascii-ci=0 ftype=1

log      =internal log           bsize=4096   blocks=2560, version=2

         =                       sectsz=512   sunit=0 blks, lazy-count=1

realtime =none                   extsz=4096   blocks=0, rtextents=0

echo "/dev/mapper/rhel-gplum   /gp                       xfs     defaults        0 0" >> /etc/fstab

mount -a

df -hT

文件系统               类型      容量  已用  可用 已用% 挂载点

/dev/mapper/rhel-root  xfs       5.0G  1.2G  3.9G   24% /

devtmpfs               devtmpfs  1.2G     0  1.2G    0% /dev

tmpfs                  tmpfs     1.2G     0  1.2G    0% /dev/shm

tmpfs                  tmpfs     1.2G  8.6M  1.2G    1% /run

tmpfs                  tmpfs     1.2G     0  1.2G    0% /sys/fs/cgroup

/dev/sda1              xfs       197M  120M   77M   61% /boot

tmpfs                  tmpfs     229M     0  229M    0% /run/user/0

/dev/sr0               iso9660   4.4G  4.4G     0  100% /mnt

/dev/mapper/rhel-gplum xfs       9.8G   33M  9.8G    1% /gp

3、准备安装,所有节点操作

3.1、添加用户及组

groupadd supergroup

groupadd gpadmin

useradd -G supergroup,gpadmin gpadmin

chown -R gpadmin:gpadmin /gp

3.2、

echo "/usr/local/lib" >>  /etc/ld.so.conf

ls -lrt /etc/security/limits.conf ^C

cat <<eof>> /etc/security/limits.conf 

> * soft nofile 65536

> * hard nofile 65536

> * soft nproc 131072

> * hard nproc 131072

> eof

cat <<eof>> /etc/sysctl.conf 

> kernel.shmmax = 500000000

> kernel.shmmni = 4096

> kernel.shmall = 4000000000

> kernel.sem = 500 1024000 200 4096

> kernel.sysrq = 1

> kernel.core_uses_pid = 1

> kernel.msgmnb = 65536

> kernel.msgmax = 65536

> kernel.msgmni = 2048

> net.ipv4.tcp_syncookies = 1

> net.ipv4.ip_forward = 0

> net.ipv4.conf.default.accept_source_route = 0

> net.ipv4.tcp_tw_recycle = 1

> net.ipv4.tcp_max_syn_backlog = 4096

> net.ipv4.conf.all.arp_filter = 1

> net.ipv4.ip_local_port_range = 1025 65535

> net.core.netdev_max_backlog = 10000

> net.core.rmem_max = 2097152

> net.core.wmem_max = 2097152

> vm.overcommit_memory = 2

> eof

sysctl -p

3.3、安装cmake3.x

[root@rhmdw soft]# sh cmake-3.13.4-Linux-x86_64.sh 

CMake Installer Version: 3.13.4, Copyright (c) Kitware

......

Do you accept the license? [yN]: 

y

By default the CMake will be installed in:

  "/soft/cmake-3.13.4-Linux-x86_64"

Do you want to include the subdirectory cmake-3.13.4-Linux-x86_64?

Saying no will install in: "/soft" [Yn]: 

y

......

[root@rhmdw soft]# mv cmake-3.13.4-Linux-x86_64 /usr/bin/

[root@rhmdw soft]# ln -sf /usr/bin/cmake-3.13.4-Linux-x86_64/bin/cmake /usr/bin/cmake

3.4 配置时间同步

3.5 调整CPU调度模式,调整deadline,rhel 7 3.6 默认为该模式

3.6 修改磁盘预读参数

blockdev --setra 65535 /dev/sda

echo "blockdev --setra 65535 /dev/sda" >> /etc/rc.d/rc.local

=============================++++++++++++++

203 未配置,确定是否可以永久修改

4、mdw 安装greenplum

4.1、

[gpadmin@rhmdw soft]$ unzip greenplum-db-5.16.0-rhel7-x86_64.zip 

Archive:  greenplum-db-5.16.0-rhel7-x86_64.zip

  inflating: greenplum-db-5.16.0-rhel7-x86_64.bin  

  inflating: greenplum-db-5.16.0-rhel7-x86_64.bin.sha256  

[gpadmin@rhmdw soft]$ ls -lrt

总用量 545520

-rwxr-xr-x. 1 gpadmin gpadmin 280341880 1月  16 10:47 greenplum-db-5.16.0-rhel7-x86_64.bin

-rw-r--r--. 1 gpadmin gpadmin       111 1月  16 10:47 greenplum-db-5.16.0-rhel7-x86_64.bin.sha256

-rw-r--r--. 1 gpadmin gpadmin 278265370 3月   5 11:18 greenplum-db-5.16.0-rhel7-x86_64.zip

[gpadmin@rhmdw soft]$ sh greenplum-db-5.16.0-rhel7-x86_64.bin

********************************************************************************

Do you accept the Pivotal Database license agreement? [yes|no]

********************************************************************************

yes

********************************************************************************

Provide the installation path for Greenplum Database or press ENTER to 

accept the default installation path: /usr/local/greenplum-db-5.16.0

********************************************************************************

/gp/app

********************************************************************************

Install Greenplum Database into /gp/app? [yes|no]

********************************************************************************

yes

[gpadmin@rhmdw soft]$ ls -lrt /gp/app/

总用量 32

drwxr-xr-x. 4 gpadmin gpadmin   41 5月  29 2018 share

drwxr-xr-x. 7 gpadmin gpadmin 8192 5月  29 2018 lib

drwxr-xr-x. 7 gpadmin gpadmin   93 1月  16 03:07 pxf

drwxr-xr-x. 4 gpadmin gpadmin   37 1月  16 10:35 docs

drwxr-xr-x. 3 gpadmin gpadmin   20 1月  16 10:36 ext

drwxr-xr-x. 6 gpadmin gpadmin 4096 1月  16 10:36 include

drwxr-xr-x. 2 gpadmin gpadmin   98 1月  16 10:36 etc

drwxr-xr-x. 2 gpadmin gpadmin 4096 1月  16 10:40 sbin

drwxr-xr-x. 7 gpadmin gpadmin 4096 1月  16 10:46 bin

-rw-rw-r--. 1 gpadmin gpadmin  714 3月   5 11:20 greenplum_path.sh

目录和文件解释:

GPDB-LICENSE.txt:Greenplum许可协议。

greenplum_path.sh:此文件包含Greenplum数据库的环境变量,请参阅设置Greenplum环境变量。

bin:此目录包含Greenplum数据库管理程序,此目录还包含PostgreSQL客户端和服务器程序,其中大部分也用于Greenplum数据库。

sbin:支持/内部脚本和程序。

demo:此目录包含Greenplum演示程序。

docs:Greenplum数据库文档(PDF文件)。

etc:OpenSSL的示例配置文件。

ext:一些Greenplum数据库实用程序使用的捆绑程序(如Python)。

include:Greenplum数据库的C语言头文件。

lib:Greenplum数据库和PostgreSQL库文件。

share:Greenplum数据库的共享文件。

[gpadmin@rhmdw soft]$ echo "source /gp/app/greenplum_path.sh" >> ~/.bash_profile 

[gpadmin@rhmdw soft]$ cd /gp/app/

[gpadmin@rhmdw app]$ mkdir config

[gpadmin@rhmdw app]$ cd config/

[gpadmin@rhmdw config]$ more *

::::::::::::::

hostlist

::::::::::::::

rhmdw

rhsdw1

rhsdw2

::::::::::::::

seg_host

::::::::::::::

rhsdw1

rhsdw2

4.2、配置互信

[gpadmin@rhmdw config]$ gpssh-exkeys -f hostlist 

[STEP 1 of 5] create local ID and authorize on local host

  ... /home/gpadmin/.ssh/id_rsa file exists ... key generation skipped

[STEP 2 of 5] keyscan all hosts and update known_hosts file

[STEP 3 of 5] authorize current user on remote hosts

  ... send to rhsdw1

  ***

  *** Enter password for rhsdw1: 

  ... send to rhsdw2

[STEP 4 of 5] determine common authentication file content

[STEP 5 of 5] copy authentication files to all remote hosts

  ... finished key exchange with rhsdw1

  ... finished key exchange with rhsdw2

[INFO] completed successfully

4.3、互信验证

[gpadmin@rhmdw config]$ gpssh -f /gp/app/config/hostlist -e 'pwd'

[ rhmdw] pwd

[ rhmdw] /home/gpadmin

[rhsdw1] pwd

[rhsdw1] /home/gpadmin

[rhsdw2] pwd

[rhsdw2] /home/gpadmin

5、通过master端安装seg端软件:

******该方式可以跳过rhmdw的安装     ++++++++快照点

5.1、

[gpadmin@rhmdw config]$ gpseginstall -f hostlist -u gpadmin -p gpadmin

......

20190305:11:34:14:014689 gpseginstall:rhmdw:gpadmin-[INFO]:-SUCCESS -- Requested commands completed

5.2、验证

[gpadmin@rhmdw config]$ gpssh -f /gp/app/config/hostlist -e 'ls -lrt /gp/app'

5.3、配置seg环境变量

[gpadmin@rhmdw config]$ scp ~/.bash_profile  rhsdw1:/home/gpadmin/

.bash_profile                                                                           100%  228   260.7KB/s   00:00    

[gpadmin@rhmdw config]$ scp ~/.bash_profile  rhsdw2:/home/gpadmin/

.bash_profile  

5.4、相关主机上创建相关目录

[gpadmin@rhmdw config]$ cd /gp/

[gpadmin@rhmdw gp]$ mkdir -p  gpdata/master

[gpadmin@rhmdw gp]$ gpssh -f /gp/app/config/seg_host -e 'mkdir -p /gp/gpdata/primary'

[rhsdw1] mkdir -p /gp/gpdata/primary

[rhsdw2] mkdir -p /gp/gpdata/primary

[gpadmin@rhmdw gp]$ gpssh -f /gp/app/config/seg_host -e 'mkdir -p /gp/gpdata/mirror'

[rhsdw2] mkdir -p /gp/gpdata/mirror

[rhsdw1] mkdir -p /gp/gpdata/mirror

6、系统检测

6.1、初始化数据库前检测master 参数检测,该问题可跳过

[gpadmin@rhmdw gp]$ gpcheck -f /gp/app/config/hostlist -m rhmdw

20190305:11:39:29:015430 gpcheck:rhmdw:gpadmin-[INFO]:-dedupe hostnames

20190305:11:39:29:015430 gpcheck:rhmdw:gpadmin-[INFO]:-Detected platform: Generic Linux Cluster

20190305:11:39:29:015430 gpcheck:rhmdw:gpadmin-[INFO]:-generate data on servers

20190305:11:39:30:015430 gpcheck:rhmdw:gpadmin-[INFO]:-copy data files from servers

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[INFO]:-delete remote tmp files

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[INFO]:-Using gpcheck config file: /gp/greenplum-db/./etc/gpcheck.cnf

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(None): utility will not check all settings when run as non-root user

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-root has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-gplum has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/sda1 has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhmdw): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-root has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-gplum has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/sda1 has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw2): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-root has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-root is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-gplum has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/mapper/rhel-gplum is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/sda1 has 6 XFS mount options and 4 are expected

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'allocsize=16m'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[ERROR]:-GPCHECK_ERROR host(rhsdw1): XFS filesystem on device /dev/sda1 is missing the recommended mount option 'noatime'

20190305:11:39:31:015430 gpcheck:rhmdw:gpadmin-[INFO]:-gpcheck completing...

重新调整gp mount选项

nodev,noatime,inode64,allocsize=16m

参数性能检查

gpcheck -f /gp/app/config/hostlist -m mdw -s smdw

核对并修改下你感应系统参数。

2    性能检查

2.1    网络性能检查

gpcheckperf -f /gp/app/config/hostlist -r N -d /tmp > checknetwork.out

2.2    磁盘性能检查

gpcheckperf -f /gp/app/config/hostlist -r ds -D -d /gp/gpdata/primary -d /gp/gpdata/mirror r > checkio.out

检查磁盘IO的目标目录是数据库的数据文件目录,测试工具会写入系统内存的两倍大小的数据量。

[gpadmin@rhmdw ~]$ gpcheckperf -f /gp/app/config/hostlist -r -N -d /tmp

/gp/greenplum-db/./bin/gpcheckperf -f /gp/app/config/hostlist -r -N -d /tmp

-------------------

--  NETPERF TEST

-------------------

====================

==  RESULT

====================

Netperf bisection bandwidth test

rhmdw -> rhsdw1 = 274.460000

rhsdw2 -> rhmdw = 389.820000

rhsdw1 -> rhmdw = 402.760000

rhmdw -> rhsdw2 = 278.710000

Summary:

sum = 1345.75 MB/sec

min = 274.46 MB/sec

max = 402.76 MB/sec

avg = 336.44 MB/sec

median = 389.82 MB/sec

[Warning] connection between rhmdw and rhsdw1 is no good

[Warning] connection between rhmdw and rhsdw2 is no good

7、创建GreenPlum数据库的参数文件(仅Master节点)

cp app/docs/cli_help/gpconfigs/gpinitsystem_config /gp/app/config/

[gpadmin@rhmdw config]$ more /gp/app/config/gpinitsystem_config   | egrep -v "(^#|^$)" 

ARRAY_NAME="Greenplum Data Platform"

SEG_PREFIX=gpseg

PORT_BASE=6000

declare -a DATA_DIRECTORY=(/gp/gpdata/primary)

MASTER_HOSTNAME=rhmdw

MASTER_DIRECTORY=/gp/gpdata/master

MASTER_PORT=5432

TRUSTED_SHELL=ssh

CHECK_POINT_SEGMENTS=8

ENCODING=UNICODE

ARRAY_NAME:设置阵列名称,默认EMC Greenplum DW。

SEG_PREFIX:设置segment的前缀,默认gpseg。

PORT_BASE:设置segment的起始端口,会从此端口往上增加,默认从40000开始。

DATA_DIRECTORY:设置segment primary的数据存储目录,有几个segment节点就需要设置几个数据存储目录。

MASTER_HOSTNAME:设置master的主机名。

MASTER_DIRECTORY:设置master的存储目录。

MASTER_PORT:设置master的端口,默认5432。

TRUSTED_SHELL:设置节点之间的信任方式,默认SSH。

CHECK_POINT_SEGMENTS:预写日志文件(WAL)数量,默认为8,这意味着为主机上的每个Segment或Master实例分配1088MB的WAL空间。

ENCODING=UNICODE:设置初始字符集,默认UNICODE(UTF-8)。

8、初始化数据库 master

[gpadmin@rhmdw config]$ gpinitsystem -c /gp/app/config/gpinitsystem_config -h seg_host

-c:指定初始化文件。

-h:指定segment主机文件。

-s:指定standby主机,创建standby节点。

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Checking configuration parameters, please wait...

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Reading Greenplum configuration file /gp/app/config/gpinitsystem_config

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Locale has not been set in /gp/app/config/gpinitsystem_config, will set to default value

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Locale set to en_US.utf8

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-No DATABASE_NAME set, will exit following template1 updates

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-MASTER_MAX_CONNECT not set, will set to default value 250

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Checking configuration parameters, Completed

20190305:13:16:11:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Commencing multi-home checks, please wait...

..

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Checking new segment hosts, Completed

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Greenplum Database Creation Parameters

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:---------------------------------------

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master Configuration

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:---------------------------------------

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master instance name       = Greenplum Data Platform

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master hostname            = rhmdw

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master port                = 5432

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master instance dir        = /gp/gpdata/master/gpseg-1

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master LOCALE              = en_US.utf8

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Greenplum segment prefix   = gpseg

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master Database            = 

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master connections         = 250

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master buffers             = 128000kB

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Segment connections        = 750

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Segment buffers            = 128000kB

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Checkpoint segments        = 8

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Encoding                   = UNICODE

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Postgres param file        = Off

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Initdb to be used          = /gp/greenplum-db/./bin/initdb

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-GP_LIBRARY_PATH is         = /gp/greenplum-db/./lib

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-HEAP_CHECKSUM is           = on

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-HBA_HOSTNAMES is           = 0

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Ulimit check               = Passed

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Array host connect type    = Single hostname per node

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master IP address [1]      = ::1

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master IP address [2]      = 10.0.2.15

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master IP address [3]      = 192.168.15.201

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master IP address [4]      = fe80::a00:27ff:fe2f:f586

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Master IP address [5]      = fe80::a00:27ff:fee0:d824

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Standby Master             = Not Configured

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Primary segment #          = 1

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Total Database segments    = 2

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Trusted shell              = ssh

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Number segment hosts       = 2

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Mirroring config           = OFF

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:----------------------------------------

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Greenplum Primary Segment Configuration

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:----------------------------------------

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-rhsdw1      /gp/gpdata/primary/gpseg0       6000    2       0

20190305:13:16:21:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-rhsdw2      /gp/gpdata/primary/gpseg1       6000    3       1

Continue with Greenplum creation Yy|Nn (default=N):

> y

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-The Master /gp/gpdata/master/gpseg-1/pg_hba.conf post gpinitsystem

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-has been configured to allow all hosts within this new

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-array to intercommunicate. Any hosts external to this

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-new array must be explicitly added to this file

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-Refer to the Greenplum Admin support guide which is

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-located in the /gp/greenplum-db/./docs directory

20190305:13:17:45:023530 gpinitsystem:rhmdw:gpadmin-[INFO]:-------------------------------------------------------

如果gpinitsystem实用程序失败,如果系统处于部分安装状态,它将创建以下备用脚本:

?/gpAdminLogs/backout_gpinitsystem_ <user> _ <timestamp>

9、安装后检查

[gpadmin@rhmdw app]$ gpssh -f /gp/app/config/hostlist -e 'netstat -nltp | grep postgres'

[ rhmdw] netstat -nltp | grep postgres

[ rhmdw] (Not all processes could be identified, non-owned process info

[ rhmdw]  will not be shown, you would have to be root to see it all.)

[ rhmdw] tcp        0      0 0.0.0.0:5432            0.0.0.0:*               LISTEN      25910/postgres      

[ rhmdw] tcp6       0      0 :::46371                :::*                    LISTEN      25918/postgres:  54 

[ rhmdw] tcp6       0      0 :::5432                 :::*                    LISTEN      25910/postgres      

[rhsdw1] netstat -nltp | grep postgres

[rhsdw1] (Not all processes could be identified, non-owned process info

[rhsdw1]  will not be shown, you would have to be root to see it all.)

[rhsdw1] tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      12803/postgres      

[rhsdw1] tcp6       0      0 :::6000                 :::*                    LISTEN      12803/postgres      

[rhsdw2] netstat -nltp | grep postgres

[rhsdw2] (Not all processes could be identified, non-owned process info

[rhsdw2]  will not be shown, you would have to be root to see it all.)

[rhsdw2] tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      12864/postgres      

[rhsdw2] tcp6       0      0 :::6000                 :::*                    LISTEN      12864/postgres  

[root@rhmdw ~]# netstat -alntp | grep postgres

tcp        0      0 0.0.0.0:5432            0.0.0.0:*               LISTEN      25910/postgres      

tcp6       0      0 :::46371                :::*                    LISTEN      25918/postgres:  54 

tcp6       0      0 :::5432                 :::*                    LISTEN      25910/postgres      

[root@rhsdw1 soft]# netstat -anltp |grep postgres

tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      12803/postgres      

tcp6       0      0 :::6000                 :::*                    LISTEN      12803/postgres 

[root@rhsdw2 gp]# netstat -anltp | grep postgres

tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      12864/postgres      

tcp6       0      0 :::6000                 :::*                    LISTEN      12864/postgres  

参考文档:

https://gp-docs-cn.github.io/docs/admin_guide/managing/maintain.html

https://github.com/greenplum-db/gpdb

https://www.cnblogs.com/lottu/p/8717400.html

http://www.ywnds.com/?p=9484

猜你喜欢

转载自blog.csdn.net/Dream19881003/article/details/88178760