1. Environmental preparation
1. The purpose of the experiment
(Single machine) Migrate the 11.2.0.4 single-instance database from the file system to the ASM single-instance disk group, and register it to the cluster management.
2. Environmental preparation
project | Source library |
operating system | RHEL6.8 |
CPU name | s11g |
IP address | 192.168.56.20 |
Database and version | ORACLE11.2.0.4 |
Database character set | AL32UTF8 |
ORACLE SID | orcl |
Data file directory | /oradata/orcl |
3. ASM disk group planning
Disk group name | size | Redundant mode |
OCR | 5G |
EXTERNAL |
DATA | 10G | EXTERNAL |
4. Data file location record
/ORADATA/ORCL
--数据文件
SYS@orcl>select name from v$datafile;
NAME
------------------------------------------------------------
/oradata/orcl/system01.dbf
/oradata/orcl/sysaux01.dbf
/oradata/orcl/undotbs01.dbf
/oradata/orcl/users01.dbf
/oradata/orcl/example01.dbf
/oradata/orcl/ogg01.dbf
6 rows selected.
--控制文件
SYS@orcl>select name from v$controlfile;
NAME
------------------------------------------------------------
/oradata/orcl/control01.ctl
/u01/app/oracle/fast_recovery_area/orcl/control02.ctl
--临时文件
SYS@orcl>select name from v$tempfile;
NAME
------------------------------------------------------------
/oradata/orcl/temp01.dbf
--日志文件
SYS@orcl>select member from v$logfile;
MEMBER
------------------------------------------------------------
/oradata/orcl/redo03.log
/oradata/orcl/redo02.log
/oradata/orcl/redo01.log
--参数文件
SYS@orcl>show parameter pfile
NAME TYPE VALUE
-----------------------------------------------------------------------------
spfile string /u01/app/oracle/product/11.2.0/db/dbs/spfileorcl.ora
2. Grid software installation
1. Create grid users and groups
/usr/sbin/groupadd -g 1120 asmadmin
/usr/sbin/groupadd -g 1121 asmdba
/usr/sbin/groupadd -g 1122 asmoper
/usr/sbin/useradd -u 1200 -g oinstall -G asmadmin,asmdba,asmoper,dba,oper grid
/usr/sbin/usermod -a -G asmdba oracle
echo "oracle" |passwd grid --stdin
2. Create a grid file directory
mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/grid
chown -R grid:oinstall /u01/app/grid
chown -R grid:oinstall /u01/app/11.2.0/grid
chown -R grid:oinstall /u01/app/oraInventory
3. Configure grid environment variables
vi /home/grid/.bash_profile
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid/
export ORACLE_SID=+ASM
LD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/rdbms/lib:/lib:/usr/lib:.
export LD_LIBRARY_PATH
SHLIB_PATH=$ORACLE_HOME/lib32:$ORACLE_HOME/rdbms/lib32:.
export SHLIB_PATH
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_HOME/bin:/usr/local/bin:/usr/bin:/usr/sbin:.
export PATH
CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib:$ORACLE_HOME/network/jlib
export CLASSPATH
umask 022
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SRVM_REMOTESHELL=/usr/bin/ssh
export ORACLE_SRVM_REMOTECOPY=/usr/bin/scp
PS1=$LOGNAME@`hostname`:'$PWD''$ '
umask 022
4. udev+multipath bind disk
--多路径绑定
yum install -y device-mapper*
mpathconf --enable --with_multipathd y
mv /etc/multipath.conf /etc/multipath.conf.bak >/dev/null 2>&1
##配置multipath.conf
cat <<EOF >/etc/multipath.conf
defaults {
user_friendly_names yes
}
blacklist {
devnode "^sda"
}
EOF
multipath -F >/dev/null 2>&1
multipath -v2 >/dev/null 2>&1
multipath -ll
--绑定udev
##获取dm_uuid
cd /dev/mapper
for i in mpath*;
do
printf "%s %s\n" "$i" "$(udevadm info --query=all --name=/dev/mapper/$i |grep -i dm_uuid)">>udev_info;
done
##写入99-oracle-asmdevices.rules
cat /dev/mapper/udev_info |while read line
do
dm_uuid=`echo $line |awk -F'=' '{print $2}'`;
disk_name=`echo $line|awk '{print $1}'`;
echo "KERNEL==\"dm-*\",ENV{DM_UUID}==\"${dm_uuid}\",SYMLINK+=\"asm_${disk_name}\",OWNER=\"grid\",GROUP=\"asmadmin\",MODE=\"0660\"">>/etc/udev/rules.d/99-oracle-asmdevices.rules
done
udevadm control --reload-rules
udevadm trigger
ls -l /dev/asm*
5. User resource configuration
cat <<EOF >> /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
EOF
6. Install the CVU software package
--上传grid安装包并解压
cd /soft
unzip -q /soft/p13390677_112040_Linux-x86-64_3of7.zip
--安装cvu软件包
yum install -y smartmontools
rpm -ivh /soft/grid/rpm/cvuqdisk-1.0.9-1.rpm
7. Install Grid software
1. Install the grid software silently
安装模式:- HA_CONFIG : To configure Grid Infrastructure for stand alone server
--编辑grid.rsp静默安装响应文件
cat <<EOF >> /soft/grid.rsp
oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v11_2_0
ORACLE_HOSTNAME=s11g
INVENTORY_LOCATION=/u01/app/oraInventory
SELECTED_LANGUAGES=en
oracle.install.option=HA_CONFIG
ORACLE_BASE=/u01/app/grid
ORACLE_HOME=/u01/app/11.2.0/grid/
oracle.install.asm.OSDBA=asmdba
oracle.install.asm.OSOPER=asmoper
oracle.install.asm.OSASM=asmadmin
oracle.install.crs.config.gpnp.scanName=
oracle.install.crs.config.gpnp.scanPort=
oracle.install.crs.config.clusterName=
oracle.install.crs.config.gpnp.configureGNS=false
oracle.install.crs.config.gpnp.gnsSubDomain=
oracle.install.crs.config.gpnp.gnsVIPAddress=
oracle.install.crs.config.autoConfigureClusterNodeVIP=false
oracle.install.crs.config.clusterNodes=
oracle.install.crs.config.networkInterfaceList=
oracle.install.crs.config.storageOption=
oracle.install.crs.config.sharedFileSystemStorage.diskDriveMapping=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskRedundancy=NORMAL
oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=
oracle.install.crs.config.sharedFileSystemStorage.ocrRedundancy=NORMAL
oracle.install.crs.config.useIPMI=false
oracle.install.crs.config.ipmi.bmcUsername=
oracle.install.crs.config.ipmi.bmcPassword=
oracle.install.asm.SYSASMPassword=oracle
oracle.install.asm.diskGroup.name=OCR
oracle.install.asm.diskGroup.redundancy=EXTERNAL
oracle.install.asm.diskGroup.AUSize=1
oracle.install.asm.diskGroup.disks=/dev/asm_mpathb
oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm*
oracle.install.asm.monitorPassword=oracle
oracle.install.crs.upgrade.clusterNodes=
oracle.install.asm.upgradeASM=false
oracle.installer.autoupdates.option=SKIP_UPDATES
oracle.installer.autoupdates.downloadUpdatesLoc=
AUTOUPDATES_MYORACLESUPPORT_USERNAME=
AUTOUPDATES_MYORACLESUPPORT_PASSWORD=
PROXY_HOST=
PROXY_PORT=0
PROXY_USER=
PROXY_PWD=
PROXY_REALM=
EOF
--静默安装
yum install -y openssh-clients
chown -R grid:oinstall /soft/grid/
rm -rf /u01/app/oraInventory/*
su - grid -c "/soft/grid/runInstaller -silent -showProgress -ignoreSysPrereqs -ignorePrereq -responseFile /soft/grid.rsp"
--执行root.sh脚本
/u01/app/11.2.0/grid/root.sh
--Create a file that contain all the asm password cfgrsp.properties
su - grid
cat <<EOF >> /home/grid/cfgrsp.properties
oracle.assistants.asm|S_ASMPASSWORD=oracle
oracle.assistants.asm|S_ASMMONITORPASSWORD=oracle
EOF
--执行cfgrsp.properties脚本
/u01/app/11.2.0/grid/cfgtoollogs/configToolAllCommands RESPONSE_FILE=/home/grid/cfgrsp.properties
2. Graphical installation
slightly
8. Create ASM disk group DATA
su - grid
sqlplus / as sysasm
SQL> create diskgroup DATA external redundancy disk '/dev/asm_mpathc';
Diskgroup created.
SQL> SELECT NAME,state FROM v$asm_diskgroup;
NAME STATE
-----------------------------------------------------------------------------------
OCR MOUNTED
DATA MOUNTED
select NAME,TOTAL_MB/1024 "TOTAL/G",FREE_MB/1024 "FREE/G",round(FREE_MB/TOTAL_MB*100)||'%' Per_Free,state from v$asm_diskgroup;
NAME TOTAL/G FREE/G PER_FREE STATE
-------------------- ---------- ---------- -------------------- -------------------
OCR 10 9.94238281 99% MOUNTED
DATA 5 4.95117188 99% MOUNTED
ASMCMD [+] > lsdg
State Type Rebal Sector Block AU Total_MB Free_MB Req_mir_free_MB Usable_file_MB Offline_disks Voting_files Name
MOUNTED EXTERN N 512 4096 1048576 5120 5070 0 5070 0 N DATA/
MOUNTED EXTERN N 512 4096 1048576 10240 10181 0 10181 0 N OCR/
9. Check the ASM instance
grid@s11g:/home/grid$ crsctl stat res -t
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE s11g
ora.LISTENER.lsnr
ONLINE ONLINE s11g
ora.OCR.dg
ONLINE ONLINE s11g
ora.asm
ONLINE ONLINE s11g Started
ora.ons
OFFLINE OFFLINE s11g
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.cssd
1 ONLINE ONLINE s11g
ora.diskmon
1 OFFLINE OFFLINE
ora.evmd
1 ONLINE ONLINE s11g
Three, single instance data file migration to ASM disk group (RMAN)
1. Edit the parameter file to specify the new control file path
SYS@orcl>create pfile='/home/oracle/pfile.ora' from spfile;
File created.
--修改参数文件
*.control_files='+DATA/orcl/controlfile/control01.ctl','+DATA/orcl/controlfile/control02.ctl'
*.db_recovery_file_dest='+DATA'
*.db_create_file_dest='+DATA'
*.log_archive_dest_1='location=+DATA'
2. Start the database to nomount mode
SYS@orcl>shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SYS@orcl>startup mount
ORACLE instance started.
Total System Global Area 1336176640 bytes
Fixed Size 2253024 bytes
Variable Size 838864672 bytes
Database Buffers 486539264 bytes
Redo Buffers 8519680 bytes
Database mounted.
SYS@orcl>alter database archivelog;
Database altered.
SYS@orcl>shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SYS@orcl>
SYS@orcl>startup nomount pfile='/home/oracle/pfile.ora';
ORACLE instance started.
Total System Global Area 839282688 bytes
Fixed Size 2257880 bytes
Variable Size 281021480 bytes
Database Buffers 553648128 bytes
Redo Buffers 2355200 bytes
3. RMAN recovery control file
The essential:
1. Need to execute setasmgidwrap -o $ORACLE_HOME
2. After performing the previous step, you need to restart the database instance to nomount
3. Due to the use of RMAN, the archive mode needs to be turned on in advance
RMAN> restore controlfile from '/oradata/orcl/control01.ctl';
Starting restore at 28-MAR-21
using target database control file instead of recovery catalog
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=13 device type=DISK
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-03002: failure of restore command at 03/28/2021 05:20:18
ORA-19504: failed to create file "+DATA/control01.ctl"
ORA-17502: ksfdcre:3 Failed to create file +DATA/control01.ctl
ORA-15001: diskgroup "DATA" does not exist or is not mounted
ORA-15040: diskgroup is incomplete
ORA-19600: input file is control file (/oradata/orcl/control01.ctl)
ORA-19601: output file is control file (+DATA/control01.ctl)
--这里由于oracle software是先于grid安装,所以权限不对
[oracle@s11g ~]$ ll /u01/app/oracle/product/11.2.0/db/bin/oracle
-rwxrwxr-x. 1 oracle oinstall 239626683 Mar 26 12:51 /u01/app/oracle/product/11.2.0/db/bin/oracle
--因此要先执行setasmgidwrap -o $ORACLE_HOME
grid@s11g:/u01/app/11.2.0/grid/bin$ setasmgidwrap -o /u01/app/oracle/product/11.2.0/db/bin/oracle
[root@s11g ~]# ll /u01/app/oracle/product/11.2.0/db/bin/oracle
-rwsr-s--x 1 oracle asmadmin 239626641 Mar 26 14:52 /u01/app/oracle/product/11.2.0/db/bin/oracle
--这里执行完之后,需要重启数据库实例,这一步很重要,不然报错
[oracle@s11g ~]$ rman target /
Recovery Manager: Release 11.2.0.4.0 - Production on Sun Mar 28 05:23:01 2021
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-00554: initialization of internal recovery manager package failed
RMAN-04005: error from target database:
ORA-27140: attach to post/wait facility failed
ORA-27300: OS system dependent operation:invalid_egid failed with status: 1
ORA-27301: OS failure message: Operation not permitted
ORA-27302: failure occurred at: skgpwinit6
ORA-27303: additional information: startup egid = 54321 (oinstall), current egid = 1120 (asmadmin)
--恢复控制文件
RMAN> restore controlfile from '/oradata/orcl/control01.ctl';
Starting restore at 26-MAR-21
using target database control file instead of recovery catalog
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=30 device type=DISK
channel ORA_DISK_1: copied control file copy
output file name=+DATA/orcl/controlfile/control01.ctl
output file name=+DATA/orcl/controlfile/control02.ctl
Finished restore at 26-MAR-21
4. Start the database to mount mode
RMAN> alter database mount;
database mounted
released channel: ORA_DISK_1
5. RMAN Copy data file
RMAN> backup as copy database format '+DATA';
6. RMAN Switch data file
RMAN> switch database to copy;
7, RMAN Switch temporary files, and open the database
run {
set newname for tempfile 1 to '+DATA';
switch tempfile all;
}
executing command: SET NEWNAME
renamed tempfile 1 to +DATA in control file
RMAN> alter database open;
database opened
8. Migrate redo log files
Key: Here, the order of DROP LOGFILE must be deleted from back to front, otherwise an error will be reported:
ERROR at line 1: ORA-00362: member is required to form a valid logfile in group 2 ORA-01517: log member: '/oradata/orcl/redo02.log'
--查看未迁移前的日志文件
SYS@orcl>set line222
col member for a60
SYS@orcl>SELECT a.group#, b.member, a.status FROM v$log a, v$logfile b WHERE a.group#=b.group#;
GROUP# MEMBER STATUS
---------- ------------------------------------------------------------ -----------
3 /oradata/orcl/redo03.log INACTIVE
2 /oradata/orcl/redo02.log CURRENT
1 /oradata/orcl/redo01.log INACTIVE
--迁移过程(中间可以需要多次切换日志,将日志切换到INACTIVE才可以drop)
ALTER DATABASE ADD LOGFILE MEMBER '+DATA' TO GROUP 1;
ALTER DATABASE ADD LOGFILE MEMBER '+DATA' TO GROUP 2;
ALTER DATABASE ADD LOGFILE MEMBER '+DATA' TO GROUP 3;
ALTER SYSTEM SWITCH LOGFILE;
ALTER DATABASE DROP LOGFILE MEMBER '/oradata/orcl/redo03.log';
ALTER SYSTEM SWITCH LOGFILE;
ALTER DATABASE DROP LOGFILE MEMBER '/oradata/orcl/redo02.log';
ALTER SYSTEM SWITCH LOGFILE;
ALTER DATABASE DROP LOGFILE MEMBER '/oradata/orcl/redo01.log';
ALTER SYSTEM CHECKPOINT;
--迁移后的日志文件
SYS@orcl>SELECT a.group#, b.member, a.status FROM v$log a, v$logfile b WHERE a.group#=b.group#;
GROUP# MEMBER STATUS
---------- ------------------------------------------------------------ ----------------
1 +DATA/orcl/onlinelog/group_1.280.1068223535 INACTIVE
2 +DATA/orcl/onlinelog/group_2.281.1068223535 INACTIVE
3 +DATA/orcl/onlinelog/group_3.282.1068223535 CURRENT
9. Server parameter file and restart the database
--查看spfile文件是否创建
SYS@orcl>show parameter pfile
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string
--创建spfile文件
SYS@orcl>create spfile='+DATA/orcl/spfileorcl.ora' from pfile='/home/oracle/pfile.ora';
File created.
--查看生成的spfile文件
ASMCMD> ls
ARCHIVELOG/
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
TEMPFILE/
spfileorcl.ora
--修改dbs下spfile文件
cd $ORACLE_HOME/dbs
rm -rf spfileorcl.ora
cat <<EOF >> initorcl.ora
spfile=+DATA/orcl/spfileorcl.ora
EOF
--重启数据库
shutdown immediate
startup
10. Verify the storage location of each file (all have been switched to the ASM disk group DATA)
select name from v$datafile
union all
select name from v$tempfile
union all
select member from v$logfile
union all
select name from v$controlfile;
NAME
--------------------------------------------------------------------------------
+DATA/orcl/datafile/system.273.1068222147
+DATA/orcl/datafile/sysaux.274.1068222153
+DATA/orcl/datafile/undotbs1.276.1068222159
+DATA/orcl/datafile/users.278.1068222161
+DATA/orcl/datafile/example.275.1068222157
+DATA/orcl/tempfile/temp.279.1068223335
+DATA/orcl/onlinelog/group_1.280.1068223535
+DATA/orcl/onlinelog/group_2.281.1068223535
+DATA/orcl/onlinelog/group_3.282.1068223535
+DATA/orcl/controlfile/control01.ctl
+DATA/orcl/controlfile/control02.ctl
11 rows selected.
SYS@orcl>show parameter pfile
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string +DATA/orcl/spfileorcl.ora
Fourth, register to the Grid cluster
[oracle@s11g ~]$ srvctl add database -d orcl -o /u01/app/oracle/product/11.2.0/db -p +DATA/orcl/spfileorcl.ora -n orcl -a DATA
[oracle@s11g ~]$ srvctl config database -d orcl
Database unique name: orcl
Database name: orcl
Oracle home: /u01/app/oracle/product/11.2.0/db
Oracle user: oracle
Spfile: +DATA/orcl/spfileorcl.ora
Domain:
Start options: open
Stop options: immediate
Database role: PRIMARY
Management policy: AUTOMATIC
Database instance: orcl
Disk Groups: DATA
Services:
grid@s11g:/home/grid$ crs_stat -v -t
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora.DATA.dg ora....up.type 0/5 0/ ONLINE ONLINE s11g
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE s11g
ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE s11g
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE s11g
ora.cssd ora.cssd.type 0/5 0/5 ONLINE ONLINE s11g
ora.diskmon ora....on.type 0/10 0/5 OFFLINE OFFLINE
ora.evmd ora.evm.type 0/10 0/5 ONLINE ONLINE s11g
ora.ons ora.ons.type 0/3 0/ OFFLINE OFFLINE
ora.orcl.db ora....se.type 0/2 0/1 OFFLINE OFFLINE
grid@s11g:/home/grid$ srvctl start database -d orcl
grid@s11g:/home/grid$ crs_stat -v -t
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora.DATA.dg ora....up.type 0/5 0/ ONLINE ONLINE s11g
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE s11g
ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE s11g
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE s11g
ora.cssd ora.cssd.type 0/5 0/5 ONLINE ONLINE s11g
ora.diskmon ora....on.type 0/10 0/5 OFFLINE OFFLINE
ora.evmd ora.evm.type 0/10 0/5 ONLINE ONLINE s11g
ora.ons ora.ons.type 0/3 0/ OFFLINE OFFLINE
ora.orcl.db ora....se.type 0/2 0/1 ONLINE ONLINE s11g