linux系统下oracle数据库静默安装指南



确定如下信息


操作系统版本
ORACLE版本
oracle PSU




安装数据库请提供如下信息:
网络信息: 公网,私网,虚网,SCAN 7个IP地址
磁盘信息: 提供3个2GB磁盘,其他磁盘依据业务数据大小提供,是否开启归档,归档磁盘大小
主机信息: 主机名,密码(要求复杂度)
实例信息: 数据库实例名,端口号,字符集,sys、system,sysasm 密码(要求复杂度)


请详细提供上面信息,信息不全无法安装数据库,谢谢配合!!






在已安装11G的集群用GRID运行 gpnptool find ,确保不要和现有的主机名和集群名重复。


# hostname infa-node6
# vi /etc/sysconfig/network




vi /etc/hosts
#public ip:
10.208.119.95 infp-dbcl05




#rpm -q gcc compat-gcc-34 elfutils-libelf-devel compat-libstdc++-33 elfutils-libelf gcc-c++ glibc-headers libaio libaio-devel libgcc libstdc++ libstdc++-devel make ksh sysstat unixODBC compat-libcap1 libXp


iso 挂载:


mount /dev/cdrom /mnt
or
mount -o loop /iso/rh68.iso /mnt


#head -n 1 /etc/issue
Red Hat Enterprise Linux Server release 5.8 (Tikanga)


#vi /etc/yum.repos.d/rhel-source.repo
[rhel-oracle-lib]
name=oracle
baseurl=file:///mnt
enabled=1
gpgcheck=0


#yum clean all 
#yum list |head


#yum install -y  gcc compat-gcc-34 elfutils-libelf-devel compat-libstdc++-33 elfutils-libelf gcc-c++ glibc-headers libaio libaio-devel libgcc libstdc++ libstdc++-devel make ksh sysstat unixODBC compat-libcap1 libXp


w
#export DISPLAY=IP:0.0(本机ip)
#xclock


/sbin/iptables -F; /sbin/ip6tables -F
/sbin/chkconfig iptables off; /sbin/chkconfig ip6tables off
/bin/sed -i s/SELINUX=enforcing/SELINUX=disabled/ /etc/selinux/config
/usr/sbin/setenforce 0


cat >> /etc/sysctl.conf <<EOF
#for oracle
kernel.shmmni = 4096 
kernel.sem = 1010 129280 1010 128 
fs.aio-max-nr = 1048576
fs.file-max = 6815744
net.ipv4.ip_local_port_range = 9000 65500 
net.core.rmem_default = 262144 
net.core.rmem_max = 4194304 
net.core.wmem_default = 262144 
net.core.wmem_max = 1048576 
EOF




cat >> /etc/security/limits.conf <<EOF 
#for oracle
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 10240
EOF


cat >> /etc/pam.d/login <<EOF
#for oracle
session    required     pam_limits.so
EOF


cat >> /etc/profile <<EOF
#for oracle
if [ \$USER = "oracle" ] || [ \$USER = "grid" ]; then
if [ \$SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
EOF


sysctl -p




/usr/sbin/groupadd -g 1001 oinstall                                      
/usr/sbin/groupadd -g 1002 dba                                           
/usr/sbin/groupadd -g 1003 asmadmin                                      
/usr/sbin/groupadd -g 1004 asmdba                                        
/usr/sbin/groupadd -g 1005 asmoper                                       
/usr/sbin/useradd -u 1002 -g oinstall -G dba,asmdba oracle               
/bin/echo "oracle" |/usr/bin/passwd --stdin oracle


+++++++++++++++依据系统实际情况建立目录++++++++++++++++++++++++++++
df -h


/bin/mkdir -p /u01/app/oracle
/bin/chown -R oracle:oinstall /u01
/bin/chmod -R 775 /u01




oracle:
cat >> /home/oracle/.bash_profile <<EOF
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=\$ORACLE_BASE/product/11.2.0/db_1
export ORACLE_SID=test
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib
export PATH=\$ORACLE_HOME/OPatch:\$ORACLE_HOME/bin:\$PATH
umask=022
EOF
. ~/.bash_profile


chown -R oracle.oinstall /soft/
chmod -R 775 /soft
export DISPLAY=10.200.224.120:0.0(本机ip)










rpm -ivh gcc-4.4.7-4.el6.x86_64.rpm gcc-c++-4.4.7-4.el6.x86_64.rpm --nodeps
rpm -ivh libaio-0.3.107-10.el6.x86_64.rpm libaio-devel-0.3.107-10.el6.x86_64.rpm --nodeps
rpm -ivh compat-libstdc++-33-3.2.3-69.el6.x86_64.rpm --nodeps
rpm -ivh elfutils-libelf-devel-0.152-1.el6.x86_64.rpm --nodeps
rpm -ivh libstdc++-4.4.7-4.el6.x86_64.rpm --nodeps
rpm -ivh libstdc++-devel-4.4.7-4.el6.x86_64.rpm --nodeps
rpm -ivh libaio-devel-0.3.107-10.el6.x86_64.rpm --nodeps
rpm -ivh cpp-4.4.7-4.el6.x86_64.rpm --nodeps
rpm -ivh ppl-0.10.2-11.el6.x86_64.rpm --nodeps
rpm -ivh cloog-ppl-0.15.7-1.2.el6.x86_64.rpm --nodeps
rpm -ivh mpfr-2.4.1-6.el6.x86_64.rpm mpfr-devel-2.4.1-6.el6.x86_64.rpm --nodeps
































配置grid及oracle用户各节点的SSH互信并测试。
注:node1、node2换成实际的各节点hostname。


方案1(手工配置):
在各节点执行:
ssh-keygen -t dsa;ssh-keygen -t rsa
在node1执行:
cat ~/.ssh/id_*sa.pub >> ~/.ssh/authorized_keys           
ssh node2 cat ~/.ssh/id_*sa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/* node2:~/.ssh/




方案2(使用oracle提供的脚本配置):
在11g的grid和database安装包里均有一个sshUserSetup.sh脚本,可以直接运用来配置互信,在shell脚本里有用法语法:
./sshUserSetup.sh -user grid -hosts "tmsdbrac1 tmsdbrac2" -advanced –noPromptPassphrase
./sshUserSetup.sh -user oracle -hosts "tmsdbrac1 tmsdbrac2" -advanced –noPromptPassphrase
方案3(使用OUI图形界面配置)


验证SSH:
ssh -o StrictHostKeyChecking=no node1 date;ssh -o StrictHostKeyChecking=no node1-priv date;ssh -o StrictHostKeyChecking=no node2 date;ssh -o StrictHostKeyChecking=no node2-priv date


NTP 配置:


1.检查时区:
检查/etc/sysconfig/clock设置:
ZONE="Asia/Shanghai"(Linux会拷贝/usr/share/zoneinfo/Asia/Shanghai 到/etc/locatime作为本地时区,如果我们需要改变时区,那么只需要修改/etc/sysconfig/clock,并且将对应的/usr/share/zoneinfo/Asia的时区文件拷贝成/etc/localtime即可)


UTC=false
ARC=false


2.安装NTP包:
rpm -ivh ntp-xxxxxx.rpm
或者:
yum install ntp


3.1 如果有专门的NTP服务器修改如下:
编辑/etc/ntp.conf文件:
/*:
server 10.200.160.7 prefer iburst burst minpoll 4 maxpoll 4
server 10.200.160.38 iburst burst minpoll 4 maxpoll 4
*/
server 10.200.160.7 prefer
server 10.200.160.38
3.2 没有专门的NTP服务器:
服务端:
vi /etc/ntp.conf
Server 127.127.1.0 #local clock
Fudge 127.127.1.0 sttatum 10
Driftfile  /var/lib/ntp/drift


客户端:
Vi /etc/ntp.conf
Server 10.208.103.33 #lnode1 ip
Driftfile  /var/lib/ntp/drift


4:修改参数:
vi /etc/sysconfig/ntpd
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid -g"
SYNC_HWCLOCK=yes




4.重启ntp服务:
service ntpd restart


5.等待5分钟,测试客户端更新时间: 


6.加入自启动
chkconfig ntpd on 




ntpq -p    --检查




cluvfy comp clocksync -n all -verbose
service ntpd stop
ntpd -qg
service ntpd start


关于NPTD详细解释看 man ntpd








ASM磁盘规划
 
 分配共享两个2GB磁盘作为OCR,ASM做normal方式作为冗余。
 分配其他磁盘为数据盘。


1:裸设备
多路径软件安装:
device-mapper-multipath-0.4.9-72.el6.x86_64.rpm
device-mapper-multipath-libs-0.4.9-72.el6.x86_64.rpm


配置多路径:
方法1:手工修改配置/etc/multipath.conf 
(从安装路径复制,如/usr/share/doc/device-mapper-multipath-0.4.9/multipath.conf)
方法2:multipath enable(未测试)


启动multipathd服务:
service multipathd start
设置开机启动服务:
chkconfig multipathd on 
查看磁盘挂载情况:
multipath -ll


将下面内容记录到/ETC/RC.LOCAL中,
raw /dev/raw/raw1 /dev/mapper/mpatha
raw /dev/raw/raw2 /dev/mapper/mpathb
raw /dev/raw/raw3 /dev/mapper/mpathc
raw /dev/raw/raw4 /dev/mapper/mpathd
raw /dev/raw/raw5 /dev/mapper/mpathe


sleep 5


/bin/chown -R grid:oinstall /dev/mapper/mpatha
/bin/chown -R grid:oinstall /dev/mapper/mpathb
/bin/chown -R grid:oinstall /dev/mapper/mpathc
/bin/chown -R grid:oinstall /dev/mapper/mpathd
/bin/chown -R grid:oinstall /dev/mapper/mpathe




chmod -R 660 /dev/raw/raw1
chmod -R 660 /dev/raw/raw2
chmod -R 660 /dev/raw/raw3
chmod -R 660 /dev/raw/raw4
chmod -R 660 /dev/raw/raw5


/bin/chown -R grid:oinstall /dev/raw/raw1
/bin/chown -R grid:oinstall /dev/raw/raw2
/bin/chown -R grid:oinstall /dev/raw/raw3
/bin/chown -R grid:oinstall /dev/raw/raw4
/bin/chown -R grid:oinstall /dev/raw/raw5






2:使用UDEV:
映射udev,配置ASM所需磁盘
RHEL5:
1)在节点1执行以下命令,然后拷贝99-oracle-asmdevices.rules到其他节点与节点1相同的目录下:
for i in b c; do 
echo "KERNEL==\"sd*\",BUS==\"scsi\",PROGRAM==\"/sbin/scsi_id -g -u -s %p\", RESULT==\"`/sbin/scsi_id -g -u -s /block/sd$i`\",NAME=\"oracleasm/disks/asm_disk$i\",OWNER=\"grid\",GROUP=\"asmadmin\",MODE=\"0660\""
done >> /etc/udev/rules.d/99-oracle-asmdevices.rules


2)重载并启用udev
udevcontrol reload_rules && start_udev


RHEL6:
1)在节点1执行以下命令,然后拷贝99-oracle-asmdevices.rules到其他节点与节点1相同的目录下:
for i in 1 2; do
echo "KERNEL==\"dm-*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/dm-$i`\", NAME=\"oracleasm/disks/asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      
done >>/etc/udev/rules.d/99-oracle-asmdevices.rules




例子:
for i in mpathc mpathb; do
echo KERNEL==\"dm-*\",ENV{DM_UUID}==\"`printf "$(udevadm info --query=all --name=/dev/mapper/$i | grep -i dm_uuid |awk -F'=' '{print $2}')"`\",SYMLINK+=\"oracleasm/disks/$i\",OWNER=\"grid\",GROUP=\"asmadmin\",MODE=\"0660\";
done >/etc/udev/rules.d/99-oracle-asmdevices.rules


KERNEL=="dm-*",ENV{DM_UUID}=="mpath-3600b342db561ed6d77aad9f11d0000dd",SYMLINK+="oracleasm/disks/mpathc",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-3600b342f431e0c7d7788d2737d0000dd",SYMLINK+="oracleasm/disks/mpathb",OWNER="grid",GROUP="asmadmin",MODE="0660"




for i in sdc sdd sde sdb; do
echo KERNEL==\"sd*[!0-9]\",ENV{ID_SERIAL}==\"`printf "$(udevadm info --query=all --name=$i | grep -i ID_SERIAL= |awk -F'=' '{print $2}')"`\",SYMLINK+=\"oracleasm/disks/$i\",OWNER=\"grid\",GROUP=\"asmadmin\",MODE=\"0660\";
done >/etc/udev/rules.d/99-oracle-asmdevices.rules




KERNEL=="sd*[!0-9]",ENV{ID_SERIAL}=="36000c29a02dd990eb983ca6ce623594b",SYMLINK+="oracleasm/disks/ocr01",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*[!0-9]",ENV{ID_SERIAL}=="36000c29df849d2bd00abaa90d3f075ad",SYMLINK+="oracleasm/disks/ocr02",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*[!0-9]",ENV{ID_SERIAL}=="36000c298601481217da5f26898cd5600",SYMLINK+="oracleasm/disks/ocr03",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*[!0-9]",ENV{ID_SERIAL}=="36000c2936015c7ec8aeb71dc80272b54",SYMLINK+="oracleasm/disks/data01",OWNER="grid",GROUP="asmadmin",MODE="0660"










2)重载并启用udev
udevadm control --reload-rules && start_udev


3)udev add new disk
vi /etc/udev/rules.d/99-oracle-asmdevices.rules
find dev_path:
udevadm info --query=all -name=sdc
simulate a udev event run:
udevadm test /devices/pci0000:00/.../sdc


--安装前验证:
在grid安装包里,有cvu工具的rpm包,安装cvu工具并校验当前配置是否通过集群验证。


在各节点安装cvu工具:
export CVUQDISK_GRP=oinstall
rpm –ivh cvuqdisk-1.0.9-1.rpm


cvu工具校验:
./runcluvfy.sh stage -pre crsinst -n aqjxdb1,aqjxdb1 -verbose
root 执行脚本需要没一个脚本都在个节点执行完,再执行下一个脚本


安装结束执行
./runcluvfy.sh stage -post crsinst -n aqjxdb1,aqjxdb2 -verbose


cluvfy stage -post crsinst -n ywpjdb1,ywpjdb2 -verbose




--补丁:
opatch auto简要步骤
(1).解压opatch,PSU
unzip p6880880_112000_Linux-x86-64.zip
unzip p21523375_112040_Linux-x86-64.zip
chown -R grid:oinstall /oracle/soft/22191577


(2).替换opatch(root):
mv $GI_HOME/OPatch $GI_HOME/OPatch.bak
mv $ORACLE_HOME/Opatch $ORACLE_HOME/OPatch.bak


cp -r /oracle/soft/OPatch $GI_HOME
cp -r /oracle/soft/OPatch $ORACLE_HOME


chown -R grid:oinstall $GI_HOME/OPatch
chown -R oracle:oinstall $ORACLE_HOME/OPatch


(3).收集补丁集信息(grid && oracle):
opatch lsinventory -detail -oh $ORACLE_HOME


(4).创建ocm文件并打补丁(root/grid/oracle):
$GI_HOME/OPatch/ocm/bin/emocmrsp  -no_banner -output /tmp/ocm.rsp


(5).打补丁(root)
export LANG=C
$GI_HOME/OPatch/opatch auto /oracle/soft/22191577 -ocmrf /tmp/ocm.rsp


(6).仅在集群的一个实例执行catbundle.sql(oracle):
SQL> @?/rdbms/admin/catbundle.sql psu apply


(7).检查是否有失效对象(oracle):
sqlplus / as sysdba
set linesize 140
column owner format a20
column object_type format a30
select owner, object_type, count(1) from dba_objects where status = 'INVALID' group by owner, object_type order by 3 desc;


(8).确认补丁信息:
su - grid 
opatch lsinventory


su - oracle
opatch lsinventory


sqlplus / as sysdba
set lines 200 pages 200
col action_time for a30
col action for a15
col namespace for a15
col version for a15
col comments for a20
select * from dba_registry_history;




附:


1:参数调整


重要参数调整:必须要改!!!
alter system set "_px_use_large_pool"=true scope=spfile sid ='*';
alter system set parallel_force_local=true  scope=spfile sid='*';


alter system set "_gc_policy_time"=0  scope=spfile sid='*';
alter system set "_gc_undo_affinity"=false  scope=spfile sid='*';


alter system set deferred_segment_creation=false  scope=spfile sid='*';
alter system set event='28401 trace name context forever,level 1'  scope=spfile sid='*';
alter system set "_use_adaptive_log_file_sync"=false sid='*';
alter system set "_clusterwide_global_transactions"=false scope=spfile sid='*'; 




重要参数说明:


调整Px操作使用shared pool
alter system set "_px_use_large_pool"=true scope=spfile sid ='*';
备注:11g数据库中,并行会话默认使用的是shared pool用于并行执行时的消息缓冲区,并行过多时容易造成shared pool不足,
使数据库报ORA-4031错误。将这个参数设置为true,使并行会话改为使用large pool。


禁用跨节点并行
alter system set parallel_force_local=true  scope=spfile sid='*';
备注:该11g的新增参数,用于将并行的slave进程限制在发起并行SQL的会话所在的节点,即避免跨节点并行产生大量的节点间数据交换
和引起性能问题。该参数取代11g之前的instance_groups和parallel_instance_group两个参数。


禁用Oracle DRM
alter system set "_gc_policy_time"=0  scope=spfile sid='*';
alter system set "_gc_undo_affinity"=false  scope=spfile sid='*';


或者动态修改:
alter system set "_gc_affinity_limit"=250 SCOPE=BOTH sid='*';
alter system set "_gc_affinity_minimum"=10485760 SCOPE=BOTH  sid='*';




备注:这个参数用于关闭RAC的DRM(dynamic remastering)特性,drm每次进行资源的remaster时,会进行资源的短暂冻结,会导致冻结
的资源都无法访问。对于高并发的环境影响比较严重,另外由于DRM的bug本身很多,drm触发时容易导致系统hang住。关闭这两个参数同
时也会禁用Read-mostly Locking新特性。11g引入的Read mostly 特性目前存在大量的bug,建议屏蔽。


note:老的机制是以数据块为单位在集群内存间传递,DRM是以对象为单位remaster,以表为对象进行内存块的传递,遇到大的表那么后果
是悲剧的。


关闭deferred segment特性
alter system set deferred_segment_creation=false  scope=spfile sid='*';
备注:这个参数用于关闭11g的段延迟创建特性,避免出现这个新特性引起的BUG,比如数据导入导出BUG、表空间删除后对应的表对象还
在数据字典里面等。


禁用延迟用户验证特性
alter system set event='28401 trace name context forever,level 1'  scope=spfile sid='*';
备注:28401事件用于关闭11g数据库中用户持续输入错误密码时的延迟用户验证特性,避免用户持续输入错误密码时产生大量的
row cache lock或library cache lock等待,严重时使数据库完全不能登录。


关闭undo自动调节功能
alter system set "_undo_autotune"=false SCOPE=BOTH sid='*';
备注:关闭UNDO表空间的自动调整功能,避免出现UNDO表空间利用率过高或者是UNDO段争用的问题。也可以打开undo数据文件的自动扩展,同时指定maxsize属性即可。


禁用审计功能
alter system set audit_trail=none scope=spfile sid='*' ;
备注: 11g默认打开数据库审计,为了避免审计带来的SYSTEM表空间的过多占用,可以关闭审计,同时避免产生大量的trace文件。


禁用 adaptive Log 
alter system set "_use_adaptive_log_file_sync"=false sid='*';
备注: 11g默认打开log file Polling 模式,系统会在Polling模式和老模式下切换lgwr trace可以看到,该模式下,待commit进程通知
lgwr后进入sleep环节,timeout后去看是否log buffer中的内容写入磁盘,lgwr不再通知待commit进程,该机制下解放了一部分lgwr工作,
但是在高事物的系统中会导致待commit进程长期处于log file sync等待,同时引起大量的gc * 等待事件,
注意log file sync等待隶属于gc *等待的一部分。


禁用全局范围事物
alter system set "_clusterwide_global_transactions"=false scope=spfile sid='*'; 
#11g新特性,缺省是true,可能会导致DBLINK HANG死、UNDO坏块,同时OGG的解析也会出问题




其他参数调整参考:
alter system set job_queue_processes=100 scope=spfile sid='*';
alter system set DB_FILES=2000 scope=spfile sid='*';
alter system set open_cursors=3000 scope=spfile sid='*';
alter system set open_links_per_instance=48 scope=spfile sid='*';
alter system set open_links=100 scope=spfile sid='*';
alter system set parallel_max_servers=20 scope=spfile sid='*'; #限制最大并行数
alter system set session_cached_cursors=200 scope=spfile sid='*'; #增加session cached cursor
alter system set undo_retention=10800 scope=spfile sid='*'; #增加undo retention时长
alter system set result_cache_max_size=0 scope=spfile sid='*';
alter system set db_cache_advice=off scope=both sid='*';
alter system set resource_manager_plan='FORCE:' scope=spfile sid='*'; #关闭资源计划
alter system set enable_ddl_logging=true scope=spfile sid='*'; #开启alert记录DDL操作功能
alter system set event='28401 trace name context forever, level 1' scope=spfile sid='*'; #关闭logon delay,防止大量密码错误尝试导致的library cache lock/pin
alter system set "_undo_autotune"=false scope=spfile sid='*'; #关闭undo自动调整
alter system set "_highthreshold_undoretention"=50000 scope=spfile sid='*'; #减少US争用
alter system set "_partition_large_extents"=false scope=spfile sid='*'; 
alter system set "_index_partition_large_extents"=false scope=spfile sid='*';
#关闭large partition extents, 如果为true,新建分区分初始化较大的extent,在分区较多的分区表中,会占用较多空间。
alter system set "_use_adaptive_log_file_sync"=false scope=spfile sid='*';
#减少10g升级到11g后,自适应造成log file sync等待严重
alter system set "_optimizer_use_feedback"=false scope=spfile sid='*'; #关闭feedback特性
alter system set "_cleanup_rollback_entries"=2000 scope=spfile sid='*';
alter system set "_gc_policy_time"=0 scope=spfile sid='*'; #禁用RAC DRM特性,避免节点频繁remaster
alter system set "_library_cache_advice"=false scope=both sid='*';
alter system set "_optimizer_null_aware_antijoin"=FALSE scope=spfile sid='*';
alter system set "_px_use_large_pool"=true scope=spfile sid='*'; #并行操作使用large pool,减少shared pool出现ORA-4031的可能
alter system set "_resource_manager_always_on"=FALSE scope=both sid='*';
可以考虑的参数
alter system set sec_case_sensitive_logon=FALSE scope=spfile sid='*'; #关闭密码大小写敏感
alter system set audit_trail=none scope=spfile sid='*'; #关闭审计功能
alter system set control_file_record_keep_time=15 scope=spfile sid='*'; #增大控制文件记录时间
*._bloom_filter_enabled # bloom算法,可以disable
*._bloom_pruning_enabled
*._datafile_write_errors_crash_instance=false # 可以考虑 
*._high_priority_processes='LMS*|LGWR|PMON'         # 提高进程的优先级


2. 调整profile
alter profile "DEFAULT" limit PASSWORD_GRACE_TIME UNLIMITED;
alter profile "DEFAULT" limit PASSWORD_LIFE_TIME UNLIMITED;
alter profile "DEFAULT" limit PASSWORD_LOCK_TIME UNLIMITED;
alter profile "DEFAULT" limit FAILED_LOGIN_ATTEMPTS UNLIMITED;


3. 调整CRS参数
crsctl set css misscount 150
crsctl set css disktimeout 200
crsctl get css misscount
crsctl get css disktimeout


4. 修改监听端口,根据实际需求调整
srvctl modify scan_listener -p "TCP:1621"  
srvctl stop scan_listener 
srvctl start scan_listener
srvctl modify listener -l LISTENER -p "TCP:1621"
srvctl stop listener
srvctl start listener
alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 132.228.235.163)(PORT = 1621))' scope=both sid='stephen1';
alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 132.228.235.164)(PORT = 1621))' scope=both sid='stephen2';


5. 修改sqlnet.ora及listener.ora
在sqlnet.ora中添加:
DIAG_ADR_ENABLED = OFF
SQLNET.INBOUND_CONNECT_TIMEOUT = 0
动态修改listener.ora:
LSNRCTL> set inbound_connect_timeout 0 
LSNRCTL> save_config


6. 关闭自动执行的JOB(可选)
SQL> select client_name,status from DBA_AUTOTASK_CLIENT;


CLIENT_NAME                                                      STATUS
---------------------------------------------------------------- --------
auto optimizer stats collection                                  ENABLED
auto space advisor                                               ENABLED
sql tuning advisor                                               ENABLED
begin
  DBMS_AUTO_TASK_ADMIN.DISABLE(
    client_name => 'auto optimizer stats collection',
    operation => NULL,
    window_name => NULL);
end;
/


7. 设置AWR保存时间
SQL> set lines 200 pages 100
SQL> col SNAP_INTERVAL for a20
SQL> col RETENTION for a20 
SQL> select * from DBA_HIST_WR_CONTROL;


      DBID SNAP_INTERVAL        RETENTION            TOPNSQL
---------- -------------------- -------------------- ----------
 569792660 +00000 01:00:00.0    +00008 00:00:00.0    DEFAULT


根据需求设置AWR保存时间及快照间隔时间,如保存30天,间隔30分钟:
BEGIN 
  DBMS_WORKLOAD_REPOSITORY.modify_snapshot_settings( 
retention => 30*24*60,
interval => 30);
END; 
/
SQL> select * from DBA_HIST_WR_CONTROL;


      DBID SNAP_INTERVAL        RETENTION            TOPNSQL
---------- -------------------- -------------------- ----------
 569792660 +00000 00:30:00.0    +00030 00:00:00.0    DEFAULT


8. RMAN配置
根据实际备份策略调整。


9.调整redo
1) 增加redo组
alter database add logfile thread 1 group 5 '+DATA' size 1024M;


alter database add logfile thread 1 group 5 '+ORADATA' size 1024M;
alter database add logfile member '+ORADATA' to group 5;


alter database add logfile thread 2 group 6 '+ORADATA' size 1024M;
alter database add logfile member '+ORADATA' to group 6;


2) 增加redo组成员
alter database add logfile member '+DATA' to group 1;


3) 删除redo组
alter database drop logfile group 5;


4) 删除redo组成员
alter database drop logfile member '+DATA/stephen/onlinelog/group_1.269.879681389';


10.表空间
注:创建、删除表空间时请特别注意,避免误操作。




创建表空间举例:
create tablespace tbs1 datafile '/oradata/tbs01.dbf' size 20G autoextend off;
create tablespace tbs1 datafile '/oradata/tbs01.dbf' size 10G reuse autoextend off;
create tablespace tbs1 datafile '/oradata/tbs01.dbf' size 10G autoextend on next 1M maxsize 20G;
create tablespace tbs1 datafile '/oradata/tbs01.dbf' size 10G autoextend on maxsize 20M uniform size 10M;
create tablespace tbs1 datafile '+DATA' size 20G autoextend off;


向表空间添加数据文件:
alter tablespace tbs1 add datafile '/oradata/tbs02.dbf' size 10G autoextend off;
alter tablespace tbs1 add datafile '+DATA' size 10G autoextend off;


删除表空间:
drop tablespace tbs1;


删除表空间及数据文件:
drop tablespace tbs1 including contents and datafiles;


11.用户
注:创建、删除用户时请特别注意,避免误操作。


创建用户:
create user u1 identified by "oracle" default tablespace tbs1 temporary tablespace temp profile default;


删除用户:
drop user u1 cascade;


12: 磁盘配置


1.SCSI_ID方式
RHEL5:
for i in b c d;
do
echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id -g -u -s %p\", RESULT==\"`scsi_id -g -u -s /block/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""
done >> /etc/udev/rules.d/99-oracle-asmdevices.rules


重载&&启动udev:
udevcontrol reload_rules && start_udev


RHEL6:
for i in b c d;
do
echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"oracleasm/disks/asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      
done >>/etc/udev/rules.d/99-oracle-asmdevices.rules


重载&&启动udev:
RHEL6:
udevadm control --reload-rules && start_udev




2.Multipath + SCSI_ID方式
# rpm -q device-mapper-multipath
# yum install device-mapper-multipath


# for i in `cat /proc/partitions | awk '{print $4}' |grep sd | grep [a-z]$`; do echo "$i: `/lib/udev/scsi_id --whitelisted --device=/dev/$i`"; done


  sda: 36b82a720d76bab001b7378c604059784
  sdb: 360a98000572d4435522f394534495530
  sdc: 360a98000572d4435522f3945344a372d
  sdd: 360a98000572d4435522f394534495530
  sde: 360a98000572d4435522f3945344a372d


  <==sdb,sdd scsi_id相同,sdc,sdescsi_id相同


# cat /etc/multipath.conf
defaults {
       udev_dir                /dev
       polling_interval        5
       path_grouping_policy    failover
       getuid_callout          "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
       prio                    const
       path_checker            directio
       rr_min_io               1000
       rr_weight               uniform
       failback                manual
       no_path_retry           fail
       user_friendly_names     yes
}
blacklist {
        devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
        devnode "^hd[a-z]"
        devnode "^cciss!c[0-9]d[0-9]*"
}
multipaths {
   multipath {
      wwid 360a98000572d4435522f394534495530
      alias ocrvote
      path_grouping_policy failover
   }
   multipath {
      wwid 360a98000572d4435522f3945344a372d
      alias data
      path_grouping_policy failover
   }
}


# service multipathd start
Starting multipathd daemon: [  OK  ]


# chkconfig multipathd on
# chkconfig --list |grep multi
multipathd      0:off   1:off   2:on    3:on    4:on   5:on    6:off


# multipath -ll
ocrvote (360a98000572d4435522f394534495530) dm-0 NETAPP,LUN
size=2.0G features='3 pg_init_retries 50 retain_attached_hw_handler' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=4 status=active
| `- 7:0:1:0 sdd 8:48 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
  `- 7:0:0:0 sdb 8:16 active ready running
data (360a98000572d4435522f3945344a372d) dm-1 NETAPP,LUN
size=500G features='3 pg_init_retries 50 retain_attached_hw_handler' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=4 status=active
| `- 7:0:1:1 sde 8:64 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
  `- 7:0:0:1 sdc 8:32 active ready running


--# touch /etc/udev/rules.d/99-oracle-asmdevices.rules


# for i in ocrvote data; do
echo KERNEL==\"dm-*\",ENV{DM_UUID}==\"`printf "$(udevadm info --query=all --name=/dev/mapper/$i | grep -i dm_uuid |awk -F'=' '{print $2}')"`\",SYMLINK+=\"oracleasm/disks/$i\",OWNER=\"grid\",GROUP=\"asmadmin\",MODE=\"0660\";
done > /etc/udev/rules.d/99-oracle-asmdevices.rules


# cat /etc/udev/rules.d/99-oracle-asmdevices.rules                             
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-360a98000572d4435522f394534495530",SYMLINK+="oracleasm/disks/ocrvote",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-360a98000572d4435522f3945344a372d",SYMLINK+="oracleasm/disks/data",OWNER="grid",GROUP="asmadmin",MODE="0660"






# start_udev
正在启动 udev:[确定]


# ls -l /dev/dm*
brw-rw---- 1 grid asmadmin 253, 0 11月 20 15:21 /dev/dm-0
brw-rw---- 1 grid asmadmin 253, 1 11月 20 15:21 /dev/dm-1




scp /etc/multipath.conf node2:/etc


scp /etc/udev/rules.d/99-oracle-asmdevices.rules node2:/etc/udev/rules.d/



猜你喜欢

转载自blog.csdn.net/vincentlhh/article/details/80227178