安装12cR1对内存的要求比较高, 本次测试安装,两个节点分配了4800M的内存。但是安装的时候,发现内存占用比较厉害。
安装GI的过程比较慢,而且发现内存几乎被耗光,安装Oracle的时候,还是比较快的。
安装过程如下:
/etc/hosts 设置
127.0.0.1 localhost.localdomain localhost # Public 192.168.2.201 host01 192.168.2.202 host02 # Private 10.10.10.1 host01-priv 10.10.10.2 host02-priv # Virtual 192.168.2.101 host01-vip 192.168.2.102 host02-vip # SCAN 192.168.2.111 scan
创建组和用户
groupadd --gid 54321 oinstall groupadd --gid 54322 dba groupadd --gid 54323 asmdba groupadd --gid 54324 asmoper groupadd --gid 54325 asmadmin groupadd --gid 54326 oper groupadd --gid 54327 backupdba groupadd --gid 54328 dgdba groupadd --gid 54329 kmdba useradd --uid 54321 --gid oinstall --groups dba,oper,asmdba,asmoper,backupdba,dgdba,kmdba oracle # passwd oracle useradd --uid 54322 --gid oinstall --groups dba,asmadmin,asmdba,asmoper grid # passwd grid
ntp 设置
[root@host01 etc]# mv ntp.conf ntp.conf_bak [root@host02 etc]# mv ntp.conf ntp.conf_bak
安装包,12cR1需要以下安装包
binutils compat-libcap1 compat-libstdc++-33 gcc gcc-c++ glibc glibc-devel ksh libgcc libstdc++ libstdc++-devel libaio libaio-devel libXext libXtst libX11 libXau libxcb libXi make sysstat libXmu libXt libXv libXxf86dgak libXxf86misc libXxf86vm xorg-x11-utils xorg-x11-xauth nfs-utils
配置YUM
[root@host02 etc]# cd /etc/yum.repos.d/ [root@host02 yum.repos.d]# rm -rf *.repo [root@host02 yum.repos.d]#
vi rhel-info.repo
[Server] name=Server baseurl=file:///mnt/Server enabled=1 gpgcheck=1 gpgkey=file:///mnt/RPM-GPG-KEY-redhat-release
yum clean all yum update
把需要安装的包,写在一个临时文件中,然后使用awk和yum组合命令来安装
vi /tmp/req-rpm.txt yum install `awk '{print $1}' /tmp/req-rpm.txt`
OS的配置
vi /etc/selinux/config SELINUX=enforcing setenforce 1
配置 shm 等 /etc/sysctl.conf
kernel.shmmax = 4398046511104 kernel.shmall = 1073741824 kernel.shmmni = 4096 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048576 fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.panic_on_oops = 1
配置Limits.conf
-- vi /etc/security/limits.conf oracle soft nproc 16384 oracle hard nproc 16384 oracle soft nofile 1024 oracle hard nofile 65536 oracle soft stack 10240 oracle hard stack 32768 grid soft nproc 16384 grid hard nproc 16384 grid soft nofile 1024 grid hard nofile 65536 grid soft stack 10240 grid hard stack 32768
vi /etc/pam.d/login
session required pam_limits.so
vi /etc/profile
if [ $USER = "oracle" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -u 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi fi if [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -u 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi fi
创建文件夹
mkdir --parents /u01/app/grid chown --recursive grid.oinstall /u01/ mkdir /u01/app/oracle chown --recursive oracle.oinstall /u01/app/oracle
添加共享磁盘
VBoxManage createhd --filename D:\rac12c_asm\asmocr1.vdi --size 4096 --format VDI --variant Fixed VBoxManage createhd --filename D:\rac12c_asm\asmocr2.vdi --size 4096 --format VDI --variant Fixed VBoxManage createhd --filename D:\rac12c_asm\asmocr3.vdi --size 4096 --format VDI --variant Fixed VBoxManage createhd --filename D:\rac12c_asm\asmdata1.vdi --size 102400 --format VDI --variant Fixed
将共享磁盘挂在节点1和节点2上
VBoxManage storageattach host01 --storagectl "SATA" --port 1 --device 0 --type hdd --medium D:\rac12c_asm\asmocr1.vdi --mtype shareable VBoxManage storageattach host01 --storagectl "SATA" --port 2 --device 0 --type hdd --medium D:\rac12c_asm\asmocr2.vdi --mtype shareable VBoxManage storageattach host01 --storagectl "SATA" --port 3 --device 0 --type hdd --medium D:\rac12c_asm\asmocr3.vdi --mtype shareable VBoxManage storageattach host01 --storagectl "SATA" --port 4 --device 0 --type hdd --medium D:\rac12c_asm\asmdata1.vdi --mtype shareable
VBoxManage storageattach host02 --storagectl "SATA" --port 1 --device 0 --type hdd --medium D:\rac12c_asm\asmocr1.vdi --mtype shareable VBoxManage storageattach host02 --storagectl "SATA" --port 2 --device 0 --type hdd --medium D:\rac12c_asm\asmocr2.vdi --mtype shareable VBoxManage storageattach host02 --storagectl "SATA" --port 3 --device 0 --type hdd --medium D:\rac12c_asm\asmocr3.vdi --mtype shareable VBoxManage storageattach host02 --storagectl "SATA" --port 4 --device 0 --type hdd --medium D:\rac12c_asm\asmdata1.vdi --mtype shareable
VBoxManage modifyhd D:\rac12c_asm\asmocr1.vdi --type shareable VBoxManage modifyhd D:\rac12c_asm\asmocr2.vdi --type shareable VBoxManage modifyhd D:\rac12c_asm\asmocr3.vdi --type shareable VBoxManage modifyhd D:\rac12c_asm\asmdata1.vdi --type shareable
配置UDEV
/sbin/scsi_id -g -u -d /dev/sdb -- 6的格式 1ATA_VBOX_HARDDISK_VB8bd65052-55e13b1a 1ATA_VBOX_HARDDISK_VB891c85f2-51b4d83d 1ATA_VBOX_HARDDISK_VB1c5b1a12-47d3ffce 1ATA_VBOX_HARDDISK_VB37769cee-7ecf5571
KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB8bd65052-55e13b1a", NAME="asm-ocr1", OWNER="grid", GROUP="asmadmin", MODE="0660" KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB891c85f2-51b4d83d", NAME="asm-ocr2", OWNER="grid", GROUP="asmadmin", MODE="0660" KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB1c5b1a12-47d3ffce", NAME="asm-ocr3", OWNER="grid", GROUP="asmadmin", MODE="0660" KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB37769cee-7ecf5571", NAME="asm-data1", OWNER="grid", GROUP="asmadmin", MODE="0660"
查看ownership
[root@host01 dev]# ls -lrt asm* brw-rw----. 1 grid asmadmin 8, 17 Apr 22 17:08 asm-ocr1 brw-rw----. 1 grid asmadmin 8, 33 Apr 22 17:08 asm-ocr2 brw-rw----. 1 grid asmadmin 8, 49 Apr 22 17:08 asm-ocr3 brw-rw----. 1 grid asmadmin 8, 65 Apr 22 17:08 asm-data1 [root@host01 dev]#
开始安装,过程略
查看gird的状态
[grid@host01 bin]$ ./crsctl status resource -t -------------------------------------------------------------------------------- Name Target State Server State details -------------------------------------------------------------------------------- Local Resources -------------------------------------------------------------------------------- ora.LISTENER.lsnr ONLINE ONLINE host01 STABLE ONLINE ONLINE host02 STABLE ora.OCRVOTE.dg ONLINE ONLINE host01 STABLE ONLINE ONLINE host02 STABLE ora.asm ONLINE ONLINE host01 Started,STABLE ONLINE ONLINE host02 Started,STABLE ora.net1.network ONLINE ONLINE host01 STABLE ONLINE ONLINE host02 STABLE ora.ons ONLINE ONLINE host01 STABLE ONLINE ONLINE host02 STABLE -------------------------------------------------------------------------------- Cluster Resources -------------------------------------------------------------------------------- ora.LISTENER_SCAN1.lsnr 1 ONLINE ONLINE host02 STABLE ora.MGMTLSNR 1 ONLINE ONLINE host02 169.254.48.111 10.10 .10.2,STABLE ora.cvu 1 ONLINE ONLINE host02 STABLE ora.host01.vip 1 ONLINE ONLINE host01 STABLE ora.host02.vip 1 ONLINE ONLINE host02 STABLE ora.mgmtdb 1 ONLINE ONLINE host02 Open,STABLE ora.oc4j 1 ONLINE ONLINE host02 STABLE ora.scan1.vip 1 ONLINE ONLINE host02 STABLE -------------------------------------------------------------------------------- [grid@host01 bin]$
END