Oracle 19c rac的搭建

  1. hostnamectl set-hostname raclhr-19c-n1

  2. hostnamectl set-hostname raclhr-19c-n2

  3. #Public IP

  4. 192.168.59.52            raclhr-19c-n1

  5. 192.168.59.53            raclhr-19c-n2

  6. #Private IP

  7. 192.168.2.52             raclhr-19c-n1-priv

  8. 192.168.2.53             raclhr-19c-n2-priv

  9. #Virtual IP

  10. 192.168.59.54            raclhr-19c-n1-vip

  11. 192.168.59.55            raclhr-19c-n2-vip

  12. #Scan IP

  13. 192.168.59.56            raclhr-19c-scan

  14. 192.168.59.57            raclhr-19c-scan

  15. 192.168.59.58            raclhr-19c-scan

  16. mkdir -p /u01/app/19.3.0/grid

  17. mkdir -p /u01/app/grid

  18. mkdir -p /u01/app/oracle

  19. mkdir -p /u01/app/oracle/product/19.3.0/dbhome_1

  20. chown -R grid:oinstall /u01

  21. chown -R oracle:oinstall /u01/app/oracle

  22. chmod -R 775 /u01/

  23. mkdir -p /u01/app/oraInventory

  24. chown -R grid:oinstall /u01/app/oraInventory

  25. chmod -R 775 /u01/app/oraInventory

  26. export ORACLE_BASE=/u01/app/grid

  27. export ORACLE_HOME=/u01/app/19.3.0/grid

  28. export GRID_BASE=$ORACLE_BASE

  29. export GRID_HOME=$ORACLE_HOME

  30. export PATH=$PATH:$ORACLE_HOME/bin

  31. yum install bc gcc gcc-c++  binutils  make gdb cmake  glibc ksh \

  32. elfutils-libelf elfutils-libelf-devel fontconfig-devel glibc-devel  \

  33. libaio libaio-devel libXrender libXrender-devel libX11 libXau sysstat \

  34. libXi libXtst libgcc librdmacm-devel libstdc++ libstdc++-devel libxcb \

  35. net-tools nfs-utils compat-libcap1 compat-libstdc++  smartmontools  targetcli \

  36. python python-configshell python-rtslib python-six  unixODBC unixODBC-devel

  37. grid用户:

  38. unzip /soft/LINUX.X64_193000_grid_home.zip  -d /u01/app/19.3.0/grid

  39. oracle用户:

  40. unzip /soft/LINUX.X64_193000_db_home.zip -d /u01/app/oracle/product/19.3.0/dbhome_1

  41. $ORACLE_HOME/runcluvfy.sh  stage -pre crsinst -n "raclhr-19c-n1,raclhr-19c-n2"  -verbose

  42.  

  43. /u01/app/19.3.0/grid/oui/prov/resources/scripts/sshUserSetup.sh -user grid  -hosts "raclhr-19c-n1 raclhr-19c-n2" -advanced exverify –confirm

  44. /u01/app/19.3.0/grid/oui/prov/resources/scripts/sshUserSetup.sh -user oracle  -hosts "raclhr-19c-n1 raclhr-19c-n2" -advanced exverify -confirm

  45. --19c的mgmtdb可以选择不安装

  46. $GRID_HOME/gridSetup.sh -silent  -force -noconfig -waitforcompletion -ignorePrereq \

  47. INVENTORY_LOCATION=/u01/app/oraInventory \

  48. oracle.install.option=CRS_CONFIG \

  49. ORACLE_BASE=/u01/app/grid \

  50. oracle.install.asm.OSDBA=asmdba \

  51. oracle.install.asm.OSOPER=asmoper \

  52. oracle.install.asm.OSASM=asmadmin \

  53. oracle.install.crs.config.scanType=LOCAL_SCAN \

  54. oracle.install.crs.config.gpnp.scanName=raclhr-19c-scan \

  55. oracle.install.crs.config.gpnp.scanPort=1521 \

  56. oracle.install.crs.config.ClusterConfiguration=STANDALONE \

  57. oracle.install.crs.config.configureAsExtendedCluster=false \

  58. oracle.install.crs.config.clusterName=raclhr-cluster \

  59. oracle.install.crs.config.gpnp.configureGNS=false \

  60. oracle.install.crs.config.autoConfigureClusterNodeVIP=false \

  61. oracle.install.crs.config.clusterNodes=raclhr-19c-n1:raclhr-19c-n1-vip:HUB,raclhr-19c-n2:raclhr-19c-n2-vip:HUB \

  62. oracle.install.crs.config.networkInterfaceList=ens33:192.168.59.0:1,ens37:192.168.2.0:5,virbr0:192.168.122.0:3 \

  63. oracle.install.asm.configureGIMRDataDG=true \

  64. oracle.install.crs.config.useIPMI=false \

  65. oracle.install.asm.storageOption=ASM \

  66. oracle.install.asmOnNAS.configureGIMRDataDG=false \

  67. oracle.install.asm.SYSASMPassword=lhr \

  68. oracle.install.asm.diskGroup.name=OCR \

  69. oracle.install.asm.diskGroup.redundancy=EXTERNAL \

  70. oracle.install.asm.diskGroup.AUSize=4 \

  71. oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-diskc,,/dev/asm-diskd,,/dev/asm-diske, \

  72. oracle.install.asm.diskGroup.disks=/dev/asm-diskc,/dev/asm-diskd,/dev/asm-diske \

  73. oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm-* \

  74. oracle.install.asm.configureAFD=false \

  75. oracle.install.crs.configureRHPS=false \

  76. oracle.install.crs.config.ignoreDownNodes=false \

  77. oracle.install.config.managementOption=NONE \

  78. oracle.install.crs.rootconfig.executeRootScript=false

  79. /u01/app/19.3.0/grid/gridSetup.sh -executeConfigTools -silent -responseFile /u01/app/19.3.0/grid/install/response/grid_2019-08-23_03-14-44PM.rsp

  80. $GRID_HOME/bin/kfod disks=asm  st=true ds=true cluster=true

  81. $ORACLE_HOME/runcluvfy.sh  stage -post  crsinst -n "raclhr-19c-n1,raclhr-19c-n2"  -verbose 

  82. $ORACLE_HOME/runInstaller -silent  -force -noconfig  -ignorePrereq \

  83. oracle.install.option=INSTALL_DB_SWONLY \

  84. UNIX_GROUP_NAME=oinstall \

  85. INVENTORY_LOCATION=/u01/app/oraInventory \

  86. ORACLE_BASE=/u01/app/oracle \

  87. ORACLE_HOME=/u01/app/oracle/product/19.3.0/dbhome_1 \

  88. oracle.install.db.InstallEdition=EE \

  89. oracle.install.db.OSDBA_GROUP=dba \

  90. oracle.install.db.OSOPER_GROUP=oper \

  91. oracle.install.db.OSBACKUPDBA_GROUP=backupdba \

  92. oracle.install.db.OSDGDBA_GROUP=dgdba \

  93. oracle.install.db.OSKMDBA_GROUP=kmdba \

  94. oracle.install.db.OSRACDBA_GROUP=racdba \

  95. oracle.install.db.CLUSTER_NODES=raclhr-19c-n1,raclhr-19c-n2 \

  96. oracle.install.db.config.starterdb.type=GENERAL_PURPOSE

  97. create diskgroup DATA external redundancy disk '/dev/asm-diski','/dev/asm-diskj','/dev/asm-diskk'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';

  98. create diskgroup FRA external redundancy disk '/dev/asm-diskl','/dev/asm-diskm','/dev/asm-diskn'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';

  99. srvctl start diskgroup -diskgroup data

  100. srvctl start diskgroup -diskgroup fra

  101. alter diskgroup all mount;

  102. dbca -silent -ignorePreReqs  -ignorePrereqFailure  -createDatabase -templateName General_Purpose.dbc -responseFile NO_VALUE \

  103. -gdbname rac19c  -sid rac19c \

  104. -createAsContainerDatabase TRUE \

  105. -sysPassword lhr -systemPassword lhr -pdbAdminPassword lhr -dbsnmpPassword lhr \

  106. -datafileDestination '+DATA' -recoveryAreaDestination '+FRA' \

  107. -storageType ASM \

  108. -characterset AL32UTF8 \

  109. -sampleSchema true \

  110. -totalMemory 1024 \

  111. -databaseType MULTIPURPOSE \

  112. -emConfiguration none \

  113. -nodeinfo raclhr-19c-n1,raclhr-19c-n2

  114. -------------------- 19c的MGMTDB是可选的

  115. 1.完成CDB的创建

  116. create diskgroup MGMT external redundancy disk '/dev/asm-diskf','/dev/asm-diskg','/dev/asm-diskh'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';

  117. /u01/app/19.3.0/grid/bin/dbca -silent -createDatabase -createAsContainerDatabase true -templateName MGMTSeed_Database.dbc -sid -MGMTDB -gdbName _mgmtdb -storageType ASM -diskGroupName MGMT -datafileJarLocation /u01/app/19.3.0/grid/assistants/dbca/templates -characterset AL32UTF8 -autoGeneratePasswords -skipUserTemplateCheck

  118. 2.完成pdb的创建

  119. mgmtca -local

其它请参考: https://blog.csdn.net/luis_ora/article/details/90054248

Linux平台 Oracle 19c RAC安装

一、 实施前期准备工作

二、 安装前期准备工作

Linux平台 Oracle 19c RAC安装指导:
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

一、实施前期准备工作

1.1 服务器安装操作系统

配置完全相同的两台服务器,安装相同版本的Linux操作系统。留存系统光盘或者镜像文件。
我这里是OEL7.6,系统目录大小均一致。对应OEL7.6的系统镜像文件放在服务器上,供后面配置本地yum使用。

1.2 Oracle安装介质

Oracle 19.3 版本2个zip包(总大小6G+,注意空间):
LINUX.X64_193000_grid_home.zip MD5:
LINUX.X64_193000_db_home.zip MD5:
这个自己去Oracle官网下载,然后只需要上传到节点1即可。

1.3 共享存储规划

从存储中划分出两台主机可以同时看到的共享LUN,3个1G的盘用作OCR和Voting Disk,其余分了3个12G的盘规划做用做数据盘和FRA。
注:19c安装GI时,可以选择是否配置GIMR,且默认不配置,我这里选择不配置,所以无需再给GIMR分配对应空间。

--OEL7使用udev需要给磁盘创建分区,这里我使用fdisk 将对应盘创建一个主分区,分区号为2(这里只是为了区分):
sdb  sdc  sdd  sde  sdf  sdg
sdb2 sdc2 sdd2 sde2 sdf2 sdg21G   1G   1G   12G  12G  12G
--OEL7中udev需绑定对应磁盘的分区for i in b c d e f g;doecho "KERNEL==\"sd?2\", SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id -g -u -d /dev/\$parent\", RESULT==\"`/usr/lib/udev/scsi_id -g -u -d /dev/sd\$i`\", SYMLINK+=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      done
--vi /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB208b8d32-df9af9d6", SYMLINK+="asm-ocr1", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBe51f4d0a-1b73c589", SYMLINK+="asm-ocr2", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBc63b1aa3-1e290288", SYMLINK+="asm-ocr3", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB53ee20b6-40c4b9a3", SYMLINK+="asm-data01", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB3822afbf-62d8a84d", SYMLINK+="asm-data02", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBfbb7943a-5ec216e5", SYMLINK+="asm-data03", OWNER="grid", GROUP="asmadmin", MODE="0660"--udevadm配置重载生效:
[root@db193 rules.d]# udevadm control --reload[root@db193 rules.d]# udevadm trigger--确认udev已绑定成功:
[root@db193 rules.d]# ls -l /dev/asm*lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data01 -> sde2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data02 -> sdf2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data03 -> sdg2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr1 -> sdb2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr2 -> sdc2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr3 -> sdd2
--第二个节点db195最开始直接使用udevadm操作发现不行,此时需先partprobe,再udevadm触发即可成功
--使用partprobe将磁盘分区表变化信息通知内核,请求操作系统重新加载分区表
[root@db195 ~]# partprobe /dev/sdb[root@db195 ~]# partprobe /dev/sdc[root@db195 ~]# partprobe /dev/sdd[root@db195 ~]# partprobe /dev/sde[root@db195 ~]# partprobe /dev/sdf[root@db195 ~]# partprobe /dev/sdg--udevadm配置重载生效:
[root@db195 ~]# udevadm control --reload[root@db195 ~]# udevadm trigger--确认udev已绑定成功:
[root@db195 ~]# ls -l /dev/asm*lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data01 -> sde2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data02 -> sdf2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data03 -> sdg2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr1 -> sdb2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr2 -> sdc2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr3 -> sdd2

我这次搭建的实验环境为了精简,没有再去模拟多路径的环境,如果想尽可能的模拟生产环境,可以参考之前18c的配置:

  • Linux平台 Oracle 18c RAC安装Part1:准备工作

1.4 网络规范分配

公有网络 以及 私有网络。
公有网络:这里实验环境是enp0s3是public IP,enp0s8是ASM & Private IP,实际生产需根据实际情况调整规划,一般public是有OS层绑定(bonding),private是使用HAIP。

二、安装前期准备工作

2.1 各节点系统时间校对

各节点系统时间校对:

--检验时间和时区确认正确date --关闭chrony服务,移除chrony配置文件(后续使用ctss)systemctl list-unit-files|grep chronyd
systemctl status chronyd
systemctl disable chronyd
systemctl stop chronyd
mv /etc/chrony.conf /etc/chrony.conf_bak

这里实验环境,选择不使用NTP和chrony,这样Oracle会自动使用自己的ctss服务。

2.2 各节点关闭防火墙和SELinux

各节点关闭防火墙:

systemctl list-unit-files|grep firewalld
systemctl status firewalld
systemctl disable firewalld
systemctl stop firewalld

各节点关闭SELinux:

getenforce
cat /etc/selinux/config手工修改/etc/selinux/config SELINUX=disabled,或使用下面命令:
sed -i '/^SELINUX=.*/ s//SELINUX=disabled/' /etc/selinux/configsetenforce 0

最后核实各节点已经关闭SELinux即可。

2.3 各节点检查系统依赖包安装情况

yum install -y oracle-database-preinstall-18c-1.0-1.el7.x86_64

在OEL7.6中还是oracle-database-preinstall-18c的名字,并没有对应19c的,但实际测试,在依赖包方面基本没区别。
如果选用的是其他Linux,比如常用的RHEL,那就需要yum安装官方文档要求的依赖包了。

2.4 各节点配置/etc/hosts

编辑/etc/hosts文件:

#public ip192.168.1.193  db193192.168.1.195  db195#virtual ip192.168.1.194  db193-vip192.168.1.196  db195-vip#scan ip192.168.1.197  db19c-scan#private ip10.10.1.193    db193-priv10.10.1.195    db195-priv

修改主机名(建议由SA调整):

--例如:修改主机名为db193:hostnamectl statushostnamectl set-hostname db193
hostnamectl status

2.5 各节点创建需要的用户和组

创建group & user,给oracle、grid设置密码:

groupadd -g 54321 oinstall
groupadd -g 54322 dba
groupadd -g 54323 oper
groupadd -g 54324 backupdba
groupadd -g 54325 dgdba
groupadd -g 54326 kmdba
groupadd -g 54327 asmdba
groupadd -g 54328 asmoper
groupadd -g 54329 asmadmin
groupadd -g 54330 racdba  useradd -u 54321 -g oinstall -G dba,asmdba,backupdba,dgdba,kmdba,racdba,oper oracle
useradd -u 54322 -g oinstall -G asmadmin,asmdba,asmoper,dba grid
echo oracle | passwd --stdin oracleecho oracle | passwd --stdin grid

我这里测试环境设置密码都是oracle,实际生产环境建议设置符合规范的复杂密码。

2.6 各节点创建安装目录

各节点创建安装目录(root用户):

mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
chown -R grid:oinstall /u01
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/

2.7 各节点系统配置文件修改

内核参数修改:vi /etc/sysctl.conf
实际上OEL在安装依赖包的时候也同时修改了这些值,以下参数主要是核对或是对RHEL版本作为参考:

# vi /etc/sysctl.conf  增加如下内容:
vm.swappiness = 1vm.dirty_background_ratio = 3vm.dirty_ratio = 80vm.dirty_expire_centisecs = 500vm.dirty_writeback_centisecs = 100kernel.shmmni = 4096  kernel.shmall = 1073741824  kernel.shmmax = 4398046511104kernel.sem = 250 32000 100 128net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144net.core.rmem_max = 4194304net.core.wmem_default = 262144net.core.wmem_max = 1048576 fs.aio-max-nr = 1048576fs.file-max = 6815744kernel.panic_on_oops = 1net.ipv4.conf.enp0s8.rp_filter = 2net.ipv4.conf.enp0s9.rp_filter = 2net.ipv4.conf.enp0s10.rp_filter = 2

修改生效:

#sysctl -p /etc/sysctl.conf

注:enp0s9和enp0s10是IPSAN专用的网卡,跟私网一样设置loose mode。(我这里因为没有使用IPSAN,所以没有这两张网卡)

#sysctl -p /etc/sysctl.d/98-oracle.confnet.ipv4.conf.enp0s8.rp_filter = 2net.ipv4.conf.enp0s9.rp_filter = 2net.ipv4.conf.enp0s10.rp_filter = 2

用户shell的限制:vi /etc/security/limits.d/99-grid-oracle-limits.conf

oracle soft nproc 16384oracle hard nproc 16384oracle soft nofile 1024oracle hard nofile 65536oracle soft stack 10240oracle hard stack 32768grid soft nproc 16384grid hard nproc 16384grid soft nofile 1024grid hard nofile 65536grid soft stack 10240grid hard stack 32768

这里需要注意OEL自动配置的 /etc/security/limits.d/oracle-database-server-12cR2-preinstall.conf 并不包含grid用户的,可以手工加上。

vi /etc/profile.d/oracle-grid.sh

#Setting the appropriate ulimits for oracle and grid userif [ $USER = "oracle" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -u 16384 ulimit -n 65536 elseulimit -u 16384 -n 65536 fifiif [ $USER = "grid" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -u 16384 ulimit -n 65536 elseulimit -u 16384 -n 65536 fifi

这个OEL中也没有自动配置,需要手工配置。

2.8 各节点设置用户的环境变量

第1个节点grid用户:

export ORACLE_SID=+ASM1;export ORACLE_BASE=/u01/app/grid;export ORACLE_HOME=/u01/app/19.3.0/grid;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第2个节点grid用户:

export ORACLE_SID=+ASM2;export ORACLE_BASE=/u01/app/grid;export ORACLE_HOME=/u01/app/19.3.0/grid;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第1个节点oracle用户:

export ORACLE_SID=jydb1;export ORACLE_BASE=/u01/app/oracle;export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第2个节点oracle用户:

export ORACLE_SID=jydb2;export ORACLE_BASE=/u01/app/oracle;export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

三、 GI(Grid Infrastructure)安装

  • 3.1 解压GI的安装包
  • 3.2 安装配置Xmanager软件
  • 3.3 共享存储LUN的赋权
  • 3.4 使用Xmanager图形化界面配置GI
  • 3.5 验证crsctl的状态
  • 3.6 测试集群的FAILED OVER功能

Linux平台 Oracle 19c RAC安装指导:
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

三、GI(Grid Infrastructure)安装

3.1 解压GI的安装包

su - grid
解压 GRID 到 GRID用户的$ORACLE_HOME下

[grid@db193 grid]$ pwd/u01/app/19.3.0/grid
[grid@db193 grid]$ unzip /u01/media/LINUX.X64_193000_grid_home.zip

3.2 安装配置Xmanager软件

在自己的Windows系统上成功安装Xmanager Enterprise之后,运行Xstart.exe可执行程序,
配置如下

Session:db193
Host:192.168.1.193
Protocol:SSH
User Name:grid
Execution Command:/usr/bin/xterm -ls -display $DISPLAY
点击RUN,输入grid用户的密码可以正常弹出命令窗口界面,即配置成功。
当然也可以通过开启Xmanager - Passive,直接在SecureCRT连接的会话窗口中临时配置DISPLAY变量直接调用图形:
export DISPLAY=192.168.1.31:0.0

3.3 共享存储LUN的赋权

在《 Linux平台 Oracle 19c RAC安装Part1:准备工作 -> 1.3 共享存储规划》中已完成绑定和权限,这里不需要再次操作。

--只需确认两个节点链接文件(/dev/asm*)对应的sd[b-g]2的权限正确即可:
# ls -l /dev/sd?2brw-rw---- 1 root disk     8,  2 Jul 31 09:13 /dev/sda2brw-rw---- 1 grid asmadmin 8, 18 Jul 31 18:06 /dev/sdb2brw-rw---- 1 grid asmadmin 8, 34 Jul 31 18:06 /dev/sdc2brw-rw---- 1 grid asmadmin 8, 50 Jul 31 18:06 /dev/sdd2brw-rw---- 1 grid asmadmin 8, 66 Jul 31 17:10 /dev/sde2brw-rw---- 1 grid asmadmin 8, 82 Jul 31 17:10 /dev/sdf2brw-rw---- 1 grid asmadmin 8, 98 Jul 31 17:10 /dev/sdg2

3.4 使用Xmanager图形化界面配置GI

Xmanager通过grid用户登录,进入$ORACLE_HOME目录,运行gridSetup配置GI

$ cd $ORACLE_HOME$ ./gridSetup.sh

其实从12cR2开始,GI的配置就跟之前有一些变化,19c也一样,下面来看下GI配置的整个图形化安装的过程截图:





注:这里Public网卡这里用的enp0s3,ASM&Private这里用的enp0s8。


注:这里有一个新的存储GIMR的,之前12c、18c版本都是选择是外部冗余的一个40G大小的盘(当初给很多刚接触安装12cRAC的DBA造成了不适),而在19c安装中可以看到默认就是不选择的,恢复了11g时代的清爽感,这点我个人觉得很赞。

注:这里跟之前区别不大,我依然是选择3块1G的盘,Normal冗余作为OCR和voting disk。








注:这里检查出来的问题都需要认真核对,确认确实可以忽略才可以点击“Ignore All”,如果这里检测出缺少某些RPM包,需要使用yum安装好。我这里是自己的测试环境,分的配置较低,所以有内存和swap检测不通过,实际生产环境不应出现。


注:执行root脚本,确保先在一节点执行完毕后,再在其他节点执行。

第一个节点root执行脚本:

[root@db193 ~]# /u01/app/oraInventory/orainstRoot.shChanging permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@db193 ~]# /u01/app/19.3.0/grid/root.shPerforming root user operation.
The following environment variables are set as:ORACLE_OWNER= gridORACLE_HOME=  /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]: Copying dbhome to /usr/local/bin ...Copying oraenv to /usr/local/bin ...Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed byDatabase Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on optionUsing configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:/u01/app/grid/crsdata/db193/crsconfig/rootcrs_db193_2019-07-31_07-27-23AM.log2019/07/31 07:28:07 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.2019/07/31 07:28:08 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.2019/07/31 07:28:08 CLSRSC-363: User ignored prerequisites during installation2019/07/31 07:28:08 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.2019/07/31 07:28:17 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.2019/07/31 07:31:54 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.2019/07/31 07:32:24 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.2019/07/31 07:32:37 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.2019/07/31 07:33:17 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.2019/07/31 07:33:18 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.2019/07/31 07:33:42 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.2019/07/31 07:33:43 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'2019/07/31 07:34:48 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.2019/07/31 07:35:21 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.2019/07/31 07:35:50 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.2019/07/31 07:36:14 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.ASM has been created and started successfully.
[DBT-30001] Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-190731AM073727.log for details.2019/07/31 07:40:00 CLSRSC-482: Running command: '/u01/app/19.3.0/grid/bin/ocrconfig -upgrade grid oinstall'CRS-4256: Updating the profile
Successful addition of voting disk b789e47e76d74f06bf5f8b5cb4d62b88.
Successful addition of voting disk 3bc8119dfafe4fbebf7e1bf11aec8b9a.
Successful addition of voting disk bccdf28694a54ffcbf41354c7e4f133d.
Successfully replaced voting disk group with +CRS.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced##  STATE    File Universal Id                File Name Disk group--  -----    -----------------                --------- --------- 1. ONLINE   b789e47e76d74f06bf5f8b5cb4d62b88 (/dev/asm-ocr1) [CRS] 2. ONLINE   3bc8119dfafe4fbebf7e1bf11aec8b9a (/dev/asm-ocr2) [CRS] 3. ONLINE   bccdf28694a54ffcbf41354c7e4f133d (/dev/asm-ocr3) [CRS]
Located 3 voting disk(s).2019/07/31 07:48:20 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.2019/07/31 07:50:48 CLSRSC-343: Successfully started Oracle Clusterware stack2019/07/31 07:50:49 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.2019/07/31 08:04:16 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.2019/07/31 08:08:26 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@db193 ~]#

执行成功后,在第二个节点root执行脚本:

[root@db195 ~]# /u01/app/oraInventory/orainstRoot.shChanging permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@db195 ~]# /u01/app/19.3.0/grid/root.shPerforming root user operation.
The following environment variables are set as:ORACLE_OWNER= gridORACLE_HOME=  /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]: Copying dbhome to /usr/local/bin ...Copying oraenv to /usr/local/bin ...Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed byDatabase Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on optionUsing configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:/u01/app/grid/crsdata/db195/crsconfig/rootcrs_db195_2019-07-31_08-10-55AM.log2019/07/31 08:11:34 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.2019/07/31 08:11:34 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.2019/07/31 08:11:34 CLSRSC-363: User ignored prerequisites during installation2019/07/31 08:11:34 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.2019/07/31 08:11:44 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.2019/07/31 08:11:44 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.2019/07/31 08:11:45 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.2019/07/31 08:11:51 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.2019/07/31 08:12:03 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.2019/07/31 08:12:04 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.2019/07/31 08:12:32 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.2019/07/31 08:12:33 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.2019/07/31 08:12:56 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.2019/07/31 08:12:59 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'2019/07/31 08:13:59 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.2019/07/31 08:14:31 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.2019/07/31 08:14:42 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.2019/07/31 08:14:51 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.2019/07/31 08:14:58 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.2019/07/31 08:15:32 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.2019/07/31 08:17:41 CLSRSC-343: Successfully started Oracle Clusterware stack2019/07/31 08:17:42 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.2019/07/31 08:22:03 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.2019/07/31 08:23:05 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@db195 ~]#

这段时间从打印的日志来看,虽然耗时也比以前11g时代长,但实际相比12c的上一个版本18c来说却缩短了不少。
root脚本成功执行完后继续安装:

注:最后这个报错提示,查看日志发现是因为使用了一个scan ip的提示,可以忽略。

至此GI配置完成。

3.5 验证crsctl的状态

crsctl stat res -t查看集群资源状态信息,看到19c实际相对18c来说,有做很多的精简,更趋于稳定性因素考虑~

[grid@db193 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------Name           Target  State        Server                   State details
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.chad               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.net1.network               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.ons               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLEora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db193                    STABLEora.db193.vip      1        ONLINE  ONLINE       db193                    STABLEora.db195.vip      1        ONLINE  ONLINE       db195                    STABLEora.qosmserver      1        ONLINE  ONLINE       db193                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

crsctl stat res -t -init

[grid@db193 ~]$ crsctl stat res -t -init
--------------------------------------------------------------------------------Name           Target  State        Server                   State details
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.asm      1        ONLINE  ONLINE       db193                    Started,STABLEora.cluster_interconnect.haip      1        ONLINE  ONLINE       db193                    STABLEora.crf      1        ONLINE  ONLINE       db193                    STABLEora.crsd      1        ONLINE  ONLINE       db193                    STABLEora.cssd      1        ONLINE  ONLINE       db193                    STABLEora.cssdmonitor      1        ONLINE  ONLINE       db193                    STABLEora.ctssd      1        ONLINE  ONLINE       db193                    ACTIVE:0,STABLEora.diskmon      1        OFFLINE OFFLINE                               STABLEora.evmd      1        ONLINE  ONLINE       db193                    STABLEora.gipcd      1        ONLINE  ONLINE       db193                    STABLEora.gpnpd      1        ONLINE  ONLINE       db193                    STABLEora.mdnsd      1        ONLINE  ONLINE       db193                    STABLEora.storage      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

3.6 测试集群的FAILED OVER功能

节点2被重启,查看节点1状态:

[grid@db193 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------Name           Target  State        Server                   State details
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db193                    STABLEora.chad               ONLINE  ONLINE       db193                    STABLEora.net1.network               ONLINE  ONLINE       db193                    STABLEora.ons               ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLEora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db193                    STABLEora.db193.vip      1        ONLINE  ONLINE       db193                    STABLEora.db195.vip      1        ONLINE  INTERMEDIATE db193                    FAILED OVER,STABLEora.qosmserver      1        ONLINE  ONLINE       db193                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

节点1被重启,查看节点2状态:

[grid@db195 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------Name           Target  State        Server                   State details
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db195                    STABLEora.chad               ONLINE  ONLINE       db195                    STABLEora.net1.network               ONLINE  ONLINE       db195                    STABLEora.ons               ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db195                    STABLEora.asm(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db195                    STABLEora.db193.vip      1        ONLINE  INTERMEDIATE db195                    FAILED OVER,STABLEora.db195.vip      1        ONLINE  ONLINE       db195                    STABLEora.qosmserver      1        ONLINE  ONLINE       db195                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------

附:集群日志位置:

--如果忘记,可以使用adrci查看日志位置[grid@db195 trace]$ pwd
/u01/app/grid/diag/crs/db195/crs/trace
[grid@db195 trace]$ tail -20f alert.log
2019-07-31 09:13:53.813 [CRSD(9156)]CRS-8500: Oracle Clusterware CRSD process is starting with operating system process ID 91562019-07-31 09:14:05.943 [CRSD(9156)]CRS-1012: The OCR service started on node db195.2019-07-31 09:14:06.585 [CRSD(9156)]CRS-1201: CRSD started on node db195.2019-07-31 09:14:15.787 [ORAAGENT(9824)]CRS-8500: Oracle Clusterware ORAAGENT process is starting with operating system process ID 98242019-07-31 09:14:16.576 [ORAROOTAGENT(9856)]CRS-8500: Oracle Clusterware ORAROOTAGENT process is starting with operating system process ID 98562019-07-31 09:14:28.516 [ORAAGENT(10272)]CRS-8500: Oracle Clusterware ORAAGENT process is starting with operating system process ID 102722019-07-31 09:21:07.409 [OCTSSD(8378)]CRS-2407: The new Cluster Time Synchronization Service reference node is host db195.2019-07-31 09:21:10.569 [OCSSD(7062)]CRS-1625: Node db193, number 1, was shut down2019-07-31 09:21:10.948 [OCSSD(7062)]CRS-1601: CSSD Reconfiguration complete. Active nodes are db195 .2019-07-31 09:21:11.055 [CRSD(9156)]CRS-5504: Node down event reported for node 'db193'.2019-07-31 09:21:11.292 [CRSD(9156)]CRS-2773: Server 'db193' has been removed from pool 'Free'.2019-07-31 09:22:25.944 [OLOGGERD(21377)]CRS-8500: Oracle Clusterware OLOGGERD process is starting with operating system process ID 213772019-07-31 09:23:41.207 [OCSSD(7062)]CRS-1601: CSSD Reconfiguration complete. Active nodes are db193 db195 .
[grid@db195 trace]$ tail -5f ocssd.trc
2019-07-31 09:35:40.732 :    CSSD:527664896: [     INFO] clssgmDiscEndpcl: initiating gipcDestroy 0x275532019-07-31 09:35:40.732 :    CSSD:527664896: [     INFO] clssgmDiscEndpcl: completed gipcDestroy 0x275532019-07-31 09:35:42.136 :    CSSD:527664896: [     INFO]   : Processing member data change type 1, size 4 for group HB+ASM, memberID 17:2:22019-07-31 09:35:42.136 :    CSSD:527664896: [     INFO]   : Sending member data change to GMP for group HB+ASM, memberID 17:2:22019-07-31 09:35:42.138 :    CSSD:1010091776: [     INFO] clssgmpcMemberDataUpdt: grockName HB+ASM memberID 17:2:2, datatype 1 datasize 4

至此,19c的GI配置已全部完成。

Linux平台 Oracle 19c RAC安装Part3:DB配置

四、 DB(Database)配置

  • 4.1 解压DB的安装包
  • 4.2 DB软件配置
  • 4.3 ASMCA创建磁盘组
  • 4.4 DBCA建库
  • 4.5 验证crsctl的状态

Linux平台 Oracle 19c RAC安装指导:
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

四、DB(Database)安装

4.1 解压DB的安装包

oracle用户登录,在$ORACLE_HOME下解压db包(19c的db也是像18c一样直接解压到$ORACLE_HOME下,免安装):

[oracle@db193 ~]$ mkdir -p /u01/app/oracle/product/19.3.0/db_1
[oracle@db193 ~]$ cd $ORACLE_HOME
[oracle@db193 db_1]$ pwd/u01/app/oracle/product/19.3.0/db_1
[oracle@db193 db_1]$ ls
[oracle@db193 db_1]$ unzip /u01/media/LINUX.X64_193000_db_home.zip

4.2 DB软件配置

打开Xmanager软件,Oracle用户登录,配置数据库软件。

[oracle@db193 db_1]$ pwd/u01/app/oracle/product/19.3.0/db_1
[oracle@db193 db_1]$ export DISPLAY=192.168.1.31:0.0[oracle@db193 db_1]$ ./runInstaller

下面截取DB软件配置的过程如下:

注:这里选择只安装软件,数据库后面创建好ASM磁盘组后再运行dbca创建。


注:配置好ssh等价性。





注:可以进行修复的,按提示执行脚本修复。
我这里还有swap的问题,因为是测试环境资源有限,可以忽略,如果生产环境,强烈建议调整符合要求。
如果还有其他的检查项未通过,则无论是生产还是测试环境,都不建议忽略,而应该整改符合要求为止。


注:最后root用户按安装提示执行1个脚本,需要在各节点分别执行。

至此,已完成DB软件的配置。

4.3 ASMCA创建磁盘组

打开Xmanager软件,grid用户登录,asmca创建ASM磁盘组

[grid@db193 ~]$ export DISPLAY=192.168.1.31:0.0[grid@db193 ~]$ asmca

使用asmca调用创建磁盘组的图形界面,首先映入眼帘的是鲜艳的19c配色图:

然后正式进入asmca的界面:

这里我先创建一个DATA磁盘组,一个FRA磁盘组,冗余选择external(生产如果选择external,底层存储必须已经做了RAID)。



这里看到新创建的DATA和FRA磁盘组已经创建完成并成功mount。

4.4 DBCA建库

打开Xmanager软件,oracle用户登录,dbca图形创建数据库。
下面是DBCA建库的过程截图:





注:这里选择是否启用CDB,并定义CDB和PDB的名称。我选择启用CDB,CDB名称为jydb,并自动创建2个PDB,前缀名就叫pdb。

注:这里我选择使用OMF,默认也是使用OMF。

注:这里我暂时没有启用FRA,后续调整开归档时将归档文件放在+FRA磁盘组中。


注:这里选择内存分配具体值,选择数据库的字符集,我这里没选择,字符集默认是AL32UTF8。需要根据实际情况修改。

注:这里可以选择是否配置EM,我这里选择配置,如果你不需要,可以选择不配置。CVU一般也不配置,我这里学习目的选择配置。

注:这里设置密码,我实验环境直接oracle,不符合规范,生产环境建议设置复杂密码。

注:这里可以选择将创建数据库的脚本保存下来,根据你的需求,可选可不选。

注:这里如果还有其他的检查未通过,则不能忽略。我这里是因为使用一个scan,对应报错可以忽略。

注:这里是安装信息的概览,建议认真核实,如果有不对的还可以退回去改。确认无误后开始创建数据库。

注:19c建库的时间还是和18c一样,长到让人崩溃,感觉以后DBA安装过程中可以提前下几个电影来边等边看了。

INFO: Aug 01, 2019 4:42:58 AM oracle.assistants.common.base.driver.AssistantConfigDriver updateExitStatus
INFO: Total time taken for DBCA operation in mm:ss is 286:16


至此,Oracle 19.3 RAC数据库已经创建成功,目前如果你的企业想上12c系列的数据库,推荐直接选择19c(12c的最终版本12.2.0.3),19c相对18c来说更趋于稳定,Oracle的支持周期也更长。

4.5 验证crsctl的状态

grid用户登录,crsctl stat res -t 查看集群资源的状态,发现各节点的DB资源已经正常Open。

[grid@db193 ~]$ crsctl stat res -t--------------------------------------------------------------------------------Name           Target  State        Server                   State details
--------------------------------------------------------------------------------Local Resources--------------------------------------------------------------------------------ora.LISTENER.lsnrONLINE  ONLINE       db193                    STABLEONLINE  ONLINE       db195                    STABLE
ora.chadONLINE  ONLINE       db193                    STABLEONLINE  ONLINE       db195                    STABLE
ora.net1.networkONLINE  ONLINE       db193                    STABLEONLINE  ONLINE       db195                    STABLE
ora.onsONLINE  ONLINE       db193                    STABLEONLINE  ONLINE       db195                    STABLE--------------------------------------------------------------------------------Cluster Resources--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.DATA.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        ONLINE  OFFLINE                               STABLE
ora.FRA.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        ONLINE  OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLE
ora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.cvu      1        ONLINE  ONLINE       db193                    STABLE
ora.db193.vip      1        ONLINE  ONLINE       db193                    STABLE
ora.db195.vip      1        ONLINE  ONLINE       db195                    STABLE
ora.jydb.db      1        ONLINE  ONLINE       db193                    Open,HOME=/u01/app/oracle/product/19.3.0/db_1,STABLE      2        ONLINE  ONLINE       db195                    Open,HOME=/u01/app/oracle/product/19.3.0/db_1,STABLE
ora.qosmserver      1        ONLINE  ONLINE       db193                    STABLE
ora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE--------------------------------------------------------------------------------

oracle用户登录,sqlplus / as sysdba

[oracle@db193 ~]$ sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Thu Aug 1 06:49:29 2019
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle.  All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0SQL> select inst_id, name, open_mode from gv$database;INST_ID NAME      OPEN_MODE
---------- --------- --------------------1 JYDB      READ WRITE2 JYDB      READ WRITESQL> show con_idCON_ID
------------------------------
1SQL> show con_nameCON_NAME
------------------------------CDB$ROOTSQL> show pdbsCON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------2 PDB$SEED                       READ ONLY  NO3 PDB1                           READ WRITE NO4 PDB2                           READ WRITE NOSQL> alter session set container = pdb2;Session altered.SQL> show pdbsCON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------4 PDB2                           READ WRITE NOSQL> select name from v$datafile;NAME
--------------------------------------------------------------------------------
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/system.280.1015129671
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/sysaux.281.1015129671
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/undotbs1.279.1015129669
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/undo_2.283.1015129973
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/users.284.1015130015SQL>

可以看到所有的资源均正常,至此,整个在OEL 7.6 上安装 Oracle 19.3 GI & RAC 的工作已经全部结束。

【19c】Oracle 19c rac的搭建相关推荐

  1. oracle 11g rac adg 搭建实施

    oracle 11g rac adg 搭建实施 一.环境说明 1.基础环境 二.ADG配置 1.配置域名解析文件 2.主库开启归档.开启附加日志 3.主库配置归档删除策略 4.主库配置 Standby ...

  2. VMware下Oracle 11g RAC环境搭建

    主机操作系统:windows 10  虚拟机VMware12:两台Oracle Linux R6 U3 x86_64  Oracle Database software: Oracle11gR2  C ...

  3. oracle 11g rac adg搭建,11G RAC ADG 配置

    很高兴又来和大家分享技术知识了,本次分享的技术是11G RAC环境如何配置ADG,ADG是Oracle 11G 的新特性. 1.在主库,备库上配置tnsnames. 1)在主库上tnsnames.or ...

  4. Oracle 19c rac的搭建

    Oracle  19c rac的搭建 基于18c的rac进行删除再搭建: http://blog.itpub.net/26736162/viewspace-2220931/ hostnamectl s ...

  5. 【DG】Oracle 19c使用dbca来搭建物理DG--主rac备rac

    [DG]Oracle 19c使用dbca来搭建物理DG(单实例)参考:http://blog.itpub.net/26736162/viewspace-2638038/ 以下内容为主库是rac,备库也 ...

  6. Oracle 19c使用dbca来搭建物理DG--主rac备rac

    Oracle 19c使用dbca来搭建物理DG--主rac备rac 主备配置rman参数: CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL ...

  7. Oracle 19c rac的搭建

    Oracle 19c rac的搭建 基于18c的rac进行删除再搭建: http://blog.itpub.net/26736162/viewspace-2220931/ hostnamectl se ...

  8. Oracle 19C搭建rac环境

    安装环境oracle linux7.7 + vbox 5.2.30 + Oracle 19C 一.创建用户 groupadd -g 11001 oinstall groupadd -g 11002 d ...

  9. 【DG】Oracle 19c使用dbca来搭建物理DG

    2019独角兽企业重金招聘Python工程师标准>>> [DG]Oracle 19c使用dbca来搭建物理DG(12cR2可以使用但主库必须是单机非CDB的库,18c无限制) Usi ...

最新文章

  1. mailscanner参数
  2. Bootstrap的lia
  3. 使用批处理查看.class文件内容--javap指令
  4. NLP新秀:BERT的优雅解读
  5. 备忘:phalcon的坑
  6. Springboot项目修改html后不需要重启---springboot项目的热部署
  7. 在Java 7中处理文件
  8. Dynamics 365 On-premises和Online 的不同
  9. 排序专题之C++中的sort函数调用
  10. greasyfork脚本怎么取消_更新了js脚本,回答一些常见问题
  11. 清华硕士分享思维导图:机器学习所需的数学基础
  12. 必须声明标量变量是什么意思_机器视觉学习之halcon系列---一文带你理解handle变量是什么意思...
  13. sql sever如何进行英文词频统计_英语语料库及词频表介绍
  14. DY-SV17F运用集—语音IC
  15. WebSocket协议及聊天室的简易实现
  16. OSPF多实例路由防环检测功能介绍
  17. HDU - 1431 素数回文 [ 学到了 ]
  18. excel怎么启用宏_轻便免费的Excel合并工具,支持wps和office全系统
  19. 挂件巡检机器人_最近DIY的室内巡检机器人
  20. MATLAB用imshow画图时更改和显示坐标轴

热门文章

  1. Adding visible gpu devices: 0 每次运行到这里卡很久
  2. 原来order by 中也可以使用子查询
  3. idea 项目结构后面显示0%classes,0% lines covered
  4. PTA——心理阴影面积
  5. WindowsAPI 获取驱动器属性
  6. CF754D Fedor and coupons(堆)
  7. 中标麒麟、linux系统-Sikuli安装操作手册
  8. 如何重置mac上的系统管理控制器smc教程
  9. R语言limma包差异表达分析
  10. 基于MVC轻量级的JWT权限验证