суббота, 18 марта 2017 г.

Параметры инициализации:


Специальные параметры кластера:

instance_number
instance_group
thread
cluster_database
cluster_database_instances
cluster_interconnects
remote_listener
local_listener
parallel_instance
max_commit_propagation_delay

Пример:

TESTDB1.instance_number=1
TESTDB2.instance_number=2
TESTDB1.thread=1
TESTDB2.thread=2
*.cluster_database_instances=2
*.cluster_database=TRUE
*.remote_listener='rac-tdb-scan:1521'


Параметры имеющие одинаковое значение для всех инстансов:

active_instance_count
archive_lag_target
compatible
cluster_database
cluster_database_instances
cluster_interconnects      
control_files
db_block_size
db_domain
db_files
db_name
db_recovery_file_dest
db_recovery_file_dest_size
db_unique_name
dml_locks                     -- when 0
instance_type                -- rdbms or asm
max_commit_propagation_delay
parallel_max_servers
remote_login_password_file
trace_enabled
undo_management

Пример:

*.cluster_database=true
*.cluster_database_instances=2
*.compatible='11.2.0.2'
*.undo_management='AUTO'


Параметры имеющие уникальные значения для всех инстансов:


instance_number
instance_name
thread
undo_tablespace/rollback_segments

Пример:

INST1.instance_number=1
INST1.instance_name=TESTDB1
INST1.thread=1
INST1.undo_tablespace='UNDOTBS1'

INST2.instance_number=2
INST2.instance_name=TESTDB1
INST2.thread=2
INST2.undo_tablespace='UNDOTBS2'



Пример файла параметров инициализации:

TESTDB2.__db_cache_size=111132278784
TESTDB1.__db_cache_size=109521666048
TESTDB1.__oracle_base='/oracle/diag'#ORACLE_BASE set from environment
TESTDB2.__oracle_base='/oracle/diag'#ORACLE_BASE set from environment
TESTDB2.__shared_io_pool_size=0
TESTDB1.__shared_io_pool_size=268435456
TESTDB2.__streams_pool_size=1207959552
TESTDB1.__streams_pool_size=1342177280

TESTDB1.dispatchers='(PROTOCOL=TCP)(SERVICE=TESTDB1XDB)'
TESTDB2.dispatchers='(PROTOCOL=TCP)(SERVICE=TESTDB2XDB)'
TESTDB1.instance_name='TESTDB1'
TESTDB2.instance_name='TESTDB2'
TESTDB1.instance_number=1
TESTDB2.instance_number=2
TESTDB1.thread=1
TESTDB2.thread=2
TESTDB1.undo_tablespace='PSAPUNDO'
TESTDB2.undo_tablespace='PSAPUNDO2'

*._awr_mmon_deep_purge_all_expired=TRUE
*._fix_control='5099019:ON','5705630:ON','6055658:OFF','6120483:OFF','6399597:ON','6430500:ON','6440977:ON','6626018:ON','6972291:ON','7168184:OFF','7658097:ON','8937971:ON','9196440:ON','9495669:ON','13077335:ON','13627489:ON','14255600:ON','14595273:ON','18405517:2','20355502:8','14846352:OFF','22540411:ON','10038517:OFF'#Note 1888485 RECOMMENDED SETTINGS
*._high_priority_processes='LMS*|LM*|LCK0|GCR*|DIAG|CKPT|DBRM|RMS0|LGWR'
*._in_memory_undo=FALSE
*._ktb_debug_flags=8
*._MUTEX_WAIT_SCHEME=1
*._MUTEX_WAIT_TIME=10
*._OPTIM_PEEK_USER_BINDS=false
*._OPTIMIZER_ADAPTIVE_CURSOR_SHARING=false
*._optimizer_aggr_groupby_elim=FALSE
*._optimizer_batch_table_access_by_rowid=FALSE
*._OPTIMIZER_EXTENDED_CURSOR_SHARING_REL='none'
*._optimizer_reduce_groupby_key=FALSE
*._OPTIMIZER_USE_FEEDBACK=false
*._rowsets_enabled=FALSE
*._securefiles_concurrency_estimate=50
*._suppress_identifiers_on_dupkey=TRUE
*._use_single_log_writer='TRUE'

*.audit_file_dest='/oracle/rdbms/12.1.0.2/TESTDB/rdbms/audit'
*.audit_sys_operations=true
*.audit_trail='DB'
*.cluster_database_instances=2
*.cluster_database=TRUE
*.compatible='12.1.0.2.0'
*.control_file_record_keep_time=30
*.control_files='+TST_DATA/TESTDB/controlfile/current.774.882710569'#Set by RMAN
*.db_block_size=8192
*.db_cache_size=111132278784
*.db_create_file_dest='+TST_DATA'
*.db_create_online_log_dest_1='+TS_REDO'
*.db_file_name_convert='+DATA','+TST_DATA'
*.db_files=1024
*.db_name='TESTDB'
*.db_recovery_file_dest_size=30000M
*.db_unique_name='TESTDB'
*.event='10027','10028','10142','10183','10191','10995 level 2','38068 level 100','38085','38087','44951 level 1024'#SAP_121022_201503 RECOMMENDED SETTINGS
*.fal_client='TESTDB'
*.fal_server='TESTDB'
*.fast_start_io_target=0
*.fast_start_mttr_target=0
*.filesystemio_options='SETALL'
*.JAVA_POOL_SIZE=261196308
*.LARGE_POOL_SIZE=261196308
*.log_archive_config='NODG_CONFIG'
*.log_archive_dest_1='LOCATION=+ARCHLOGS/TESTDB'
*.log_archive_dest_2=''
*.log_archive_dest_state_2='ENABLE'
*.log_archive_format='%t_%s_%r.dbf'
*.log_checkpoint_interval=3000000
*.log_checkpoint_timeout=0
*.log_checkpoints_to_alert=true
*.log_file_name_convert='/oracle/TESTDB/','+DATA/TESTDB/'
*.max_dump_file_size='20000'
*.open_cursors=2000
*.optimizer_adaptive_features=FALSE
*.optimizer_capture_sql_plan_baselines=FALSE
*.optimizer_index_cost_adj=20
*.parallel_execution_message_size=16384
*.parallel_max_servers=0
*.parallel_min_servers=0
*.parallel_threads_per_cpu=1
*.pga_aggregate_target=36045090422
*.processes=1000
*.query_rewrite_enabled='false'
*.recyclebin='off'
*.remote_listener='rac-tdb-scan:1521'
*.remote_login_passwordfile='exclusive'
*.replication_dependency_tracking=false
*.resource_manager_plan=''
*.service_names='TESTDB'
*.sessions=2000
*.shared_pool_size=11753833833
*.standby_archive_dest='+ARCHLOGS/TESTDB'
*.standby_file_management='AUTO'
*.undo_retention=86400
*.use_large_pages='only'




Добавление нового узла к кластеру:


Check networking connectivty/ASM disk status at OS level

[grid@omega01]$ ssh omega03 date
[grid@omega01]$ nslookup omega03
[root@omega01]# ping omega03.localdomain.ru
[root@omega01]# ping 192.168.2.73

[root@omega03]# /etc/init.d/oracleasm listdisks
DATA1
DATA2
DATA3
OCR1
OCR2
OCR3

[root@omega03]# ls -l  /dev/oracleasm/*
/dev/oracleasm/disks:
total 0
brw-rw---- 1 grid asmadmin 8, 17 Aug 30 09:21 DATA1
brw-rw---- 1 grid asmadmin 8, 33 Aug 29 14:16 DATA2
brw-rw---- 1 grid asmadmin 8, 49 Aug 29 14:16 DATA3
brw-rw---- 1 grid asmadmin 8, 65 Aug 29 14:16 OCR1
brw-rw---- 1 grid asmadmin 8, 81 Aug 29 14:16 OCR2
brw-rw---- 1 grid asmadmin 8, 97 Aug 29 14:16 OCR3


You can also dump the ASM header by running kfed if addnode.sh failed but the file copy operation of addnotes.sh succeeded

[grid@omega03]$ $GRID_HOME/bin/kfed  read /dev/oracleasm/disks/DATA2  | grep name
kfdhdb.dskname:               DATA_0001 ; 0x028: length=9
kfdhdb.grpname:                    DATA ; 0x048: length=4
kfdhdb.fgname:                DATA_0001 ; 0x068: length=9
kfdhdb.capname:                         ; 0x088: length=0

Use kfod to get an idea about the ASM disk status

[grid@omega03]$ kfod asm_diskstring='/dev/oracleasm/disks/*' nohdr=true verbose=true disks=all status=true op=disks
5114 CANDIDATE /dev/oracleasm/disks/DATA1 grid     asmadmin
5114 MEMBER    /dev/oracleasm/disks/DATA2 grid     asmadmin
5114 MEMBER    /dev/oracleasm/disks/DATA3 grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR1  grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR2  grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR3  grid     asmadmin



Verify the current setup with the following cluvfy commands:

[grid@omega01]$ cluvfy stage -post hwos -n omega01, omega03
[grid@omega01]$ cluvfy stage -pre nodeadd -n omega03 | grep PRV
[grid@omega01]$ cluvfy stage -pre crsinst -n omega03
[grid@omega01]cluvfy comp peer -refnode omega01 -n omega03 -orainv oinstall -osdba asmdba -verbose
use IGNORE_PREADDNODE_CHECKS if needed


Run addNode.sh for the Clusterware (GRID_HOME)

Use IGNORE_PREADDNODE_CHECKS Y  when there were exlainable cluvfy errors like  PRVG-1013, PRVF-5636, PRVF-7617
For GNS clusters use CLUSTER_NEW_NODES
For NON-GNS cluster use CLUSTER_NEW_NODES and CLUSTER_NEW_VIRTUAL_HOSTNAMES

Run addnode.sh

[grid@omega01]$ setenv IGNORE_PREADDNODE_CHECKS Y
[grid@omega01]$ cd $GRID_HOME/oui/bin
[grid@omega01]$ ./addNode.sh "CLUSTER_NEW_NODES={omega03}"


Run required root scripts

[root@omega03]# /u01/app/oraInventory/orainstRoot.sh
[root@omega03]# /u01/app/11203/grid/root.sh


Run cluvfy and crsctl stat res -t   to verify cluster node add installation

[root@omega01]# my_crs_stat


Verify post CRS status with cluvfy

[grid@omega01]$ cluvfy stage -post crsinst -n omega01,omega02,omega03
[grid@omega01]$ cluvfy stage -post nodeadd -n omega03

Ignore well known errors like:
PRVF-5217 : An error occurred while trying to look up IP address for "omega01.localdomain.ru"


Install RAC database software on omega03

Install RAC database software on omega03 as oracle owner

[oracle@omega01]$ id
uid=54321(oracle) gid=54321(oinstall) groups=54321(oinstall),501(vboxsf),506(asmdba),54322(dba)

[oracle@omega01]$ cd $ORACLE_HOME/oui/bin
[oracle@omega01]$ ./addNode.sh "CLUSTER_NEW_NODES={omega03}"


[root@omega03]# /u01/app/oracle/product/11203/testdb/root.sh


Invoke dbca and add instance TESTDB3 on node OMEGA03

Run dbca as oracle owner:
[oracle@omega03]$ dbca
  Oracle RAC cluster database
   Instance Management
     Add an Instance
       Select your RAC database (TESTDB)
         Select instance: TESTDB3 - Host: OMEGA03
          Accept the default values   for
          Initialization Parameters
             Instance     Name                          Value
             TESTDB3     instance_number     3
             TESTDB3     thread                      3
             TESTDB3     undo_tablespace     UNDOTBS3   
           Tablespaces
             Name     Type                     Extent Management
             UNDOTBS3     PERMANENT , UNDO     LOCAL 
           Data Files
             Name                     Tablespace     Size(M)
             OMF_UNDOTBS3_DATAFILE_0    UNDOTBS3     100
           Redo Log Groups
             Group  Size(K)     Thread
              5     51200             3
              6     51200


Verify cluster status using dbca, cluvfy

[oracle@omega01]$ srvctl status database -d TESTDB
[grid@omega01]$ cluvfy comp admprv -o db_config -d $ORACLE_HOME -n omega01,omega02,omega03 -verbose


--------------------------------------------------------------------------------------------------

Добавим новый узел rac3 в файл /etc/hosts  на всех узлах:
cat /etc/hosts

127.0.0.1 localhost localhost.localdomain

192.168.1.231  rac1.localdomain      rac1
10.10.10.231   rac1-priv.localdomain rac1-priv
192.168.1.234  rac1-vip.localdomain  rac1-vip

192.168.1.232  rac2.localdomain      rac2
10.10.10.232   rac2-priv.localdomain rac2-priv
192.168.1.235  rac2-vip.localdomain  rac2-vip

192.168.1.233  rac3.localdomain      rac3
10.10.10.233   rac3-priv.localdomain rac3-priv
192.168.1.236  rac3-vip.localdomain  rac3-vip

192.168.1.237 rac-scan.localdomain


Проверим доступность по сети нового узла rac3

root@rac3# ping 192.168.1.231
root@rac3# ping 192.168.1.232
root@rac3# hostname

root@rac1# ping rac3

Проверили сеть
Обновили DNS
Обновили /etc/hosts


Конфигурация ntpd на новом узле


Создаём пользователей


Создаем структуру каталогов на узле rac3 аналогично как на узле rac1.

mkdir -p /u01/app/12.1.0/grid_1
chown -R root:oinstall /u01/app/12.1.0/grid_1
mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1
chown -R oracle:oinstall /u01/app/oracle/product/12.1.0/dbhome_1
chown -R grid:oinstall /u01/app/12.1.0/grid_1

Устанавливаем пакеты

Настраиваем параметры ядра


Проверяем связь по ssh с rac1 и rac2 из под пользователя grid

From oracle_home/oui/bin on existing Node:

/runSSHSetup.sh -user oracle -hosts "Existing_Node New_Node" -advanced -exverify

Configure secure shell for oracle user on all nodes

From oracle_home/oui/bin on rac1

./runSSHSetup.sh -user oracle -hosts "rac2 rac3" -advanced -exverify




Проверяем видимость дисков с узла rac3

root@rac3# cd /etc/udev
root@rac3# ls
root@rac3# cd rules.d/
root@rac3# cat 99-oracle.rules


Проверяем что узел rac3 подготовлен для инсталляции ПО


Verify New Node (HWOS)

From grid_home on rac1
$GRID_HOME/bin/cluvfy stage -post hwos -n rac3


Verify Peer (REFNODE)

From grid_home on rac1
$GRID_HOME/bin/cluvfy comp peer -refnode rac1 -n rac3 -orainv oinstall -osdba dba -verbose


Verify New Node (NEW NODE PRE)

From grid_home on rac1
$GRID_HOME/bin/cluvfy stage -pre nodeadd -n rac3 -fixup -verbose

Будет предложено выполнить под root скрипт на rac3
root@rac3# /tmp/CVU_12.1.0.1.0_grid/runfixup.sh


Инсталляция GRID Software на новый узел rac3:

проверим что узел готов к этому

cluvfy stage -pre nodeadd -n rac3 -verbose

cd /u01/app/12.1.0/grid_1
cd addnode

Если вы используете Grid Naming Service (GNS):

./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}"

Если вы используете Grid Naming Service (GNS):

./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac3-vip}"

На новом узле rac3 запустим от root скрипт:

root.sh

Если вы не используете службу Oracle Grid Naming Service (GNS),
вы должны добавить имя и адрес для rac3 в DNS.

Теперь вы должны запустить Oracle Clusterware на новом узле.
Чтобы проверить установку Oracle Clusterware на новом узле,
вы можете запустить следующую команду на вновь настроенном узле rac3:

$ cd /u01/app/12.1.0/grid/bin
$ ./cluvfy stage -post nodeadd -n rac3 -verbose

Проверяем стек

crsctl check cluster
crsctl check has
crsctl status res -t
olsnodes -n

srvctl status asm -a
ocrcheck
crsctl query css votedisk


Проверить что новый grid_home появился в файле inventory.xml на всех узлах.

cd /u01/app/oraInventory/ContentsXML/
cat inventory.xml

Избегайте изменять имена хостов после завершения установки Oracle Clusterware,
включая добавление или удаление квалификаций домена.
Узлы с измененными именами хостов должны быть удалены из кластера и добавлены
обратно с новым именем.


Инсталляция RDBMS Software на новый узел rac3:

На первом узле запускаем

$ cd /u01/app/oracle/product/12.1.0/dbhome_1/addnode
$ ./addNode.sh -silent "CLUSTER_NEW_NODES = {rac3}"

На новом узле rac3 запустим от root скрипт:
root.sh


Создание экземпляра ORCL3 на новом узле rac3

Вы можете добавить экземпляр в кластер, используя опцию "Instance Management" в DBCA
или с помощью Enterprise Manager.

Но можно и в silent режиме:

dbca -silent -addInstance -nodelist rac3 -gdbname ORCL -instanceName ORCL3 -sysDBAUserName sys -sysDBAPassword oracle

Проверяем

srvctl status database -d orcl

Проверить что новый oracle_home появился в файле inventory.xml на всех узлах.

cd /u01/app/oraInventory/ContentsXML/
cat inventory.xml

crsctl status res -t

crsctl status res -t | grep -i offline


---------------------------------------------------------------------------------------------------------------------


Добавили новый сервер omega03
Дали доступ по ssh


Проверяем настройки ОС на готовность к установке кластерного ПО:

[grid@omega01 ~]$  cluvfy stage -pre crsinst -n omega03 -verbose
Решаем проблемы:


Проверяем новый узел на готовность установки Oracle Clusterware:

[grid@omega01 ~]$ cd /u01/distrib/grid/
[grid@omega01 grid]$./runcluvfy.sh stage -pre nodeadd -n omega03

[grid@omega01 ~]$crsctl check cluster -all



Устанавливаем кластерное ПО на omega03

[grid@omega-1 u01]$ cd /u01/app/grid/11.2.0/asm_1/oui/bin/
[grid@omega01 u01]$ export IGNORE_PREADDNODE_CHECKS=Y
[grid@omega01 bin]$ ./addNode.sh --silent  "CLUSTER_NEW_NODES={omega03}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={omega03-vip}"

Выполняем скрипты от root:

root@omega03 ~ #  /u01/app/oraInventory/orainstRoot.sh
root@omega03 ~ #  /u01/app/grid/11.2.0/asm_1/root.sh


Проверяем:

[grid@omega03 ~]$ crsctl check cluster -all


Проверяем установку Oracle Clusterware на узле omega03

[grid@omega01 u01]$ cluvfy stage -post crsinst -n all

[grid@omega01 u01]$ crsctl status serverpool



Установка ПО Oracle RAC:

[oracle@omega01 ~]$ cd /u01/app/oracle/11.2.0/db_1/oui/bin/
[oracle@omega01 bin]$ ./addNode.sh -silent "CLUSTER_NEW_NODES={omega03}"


Выполняем скрипт от root:

root@omega03 ~ #  /u01/app/oracle/11.2.0/db_1/root.sh


Проверяем:

[oracle@omega01 bin]$  crsctl status serverpool

[grid@omega03 ~]$ crsctl status res -t



Добавляем экземпляр:

[grid@omega01 ~]$ dbca -silent -addInstance -nodeList omega03 -gdbName TESTDB3_OMEGA03 -instanceName TESTDB3 -sysDBAUserName sys                            
Enter SYS user password:


Проверяем:

[grid@omega01 ~]$ crsctl status serverpool                                                                                                                
                                                                                                                                        
[grid@omega01 ~]$ srvctl add serverpool -g mypool -l 1 -u -1 -i 1 -n "omega01,omega02,omega03"
                                                                                                                                                        



Удаление узла из кластера



Verify that all the instances are up and running.

[oracle@omega03 ~]$ srvctl status database -d TESTDB


Check resources running on note omega03

[grid@omega03 ~]$ crs | egrep 'omega03|STATE|--'



Verify the current ocr backup.

[grid@omega03 ~]$ ocrconfig -showbackup



Ensure that all the instances are registered in the default CRS Listener.
[grid@omega03 ~]$ lsnrctl status LISTENER_SCAN2

Start DCBA from a node other than the one that you are removing and select
  -->"Real Application Clusters"
    --> "Instance Management"
     -->  "Delete Instance".
      --> Accept the alert windows to delete the instance.



Verify that the instance has been deleted and thread is disabled by querying gv$instance and v$thread. 

SQL> select INST_ID, INSTANCE_NUMBER, INSTANCE_NAME, HOST_NAME from gv$instance;
SQL> select THREAD#, STATUS, INSTANCE from v$thread;


Verify that the thread for the deleted instance has been disabled. If it is still enabled, disable it as follows:

SQL>ALTER DATABASE DISABLE THREAD 2;
THREAD# 3 is already disable


Delete the Node from the Cluster

$ srvctl disable listener -l -n < NodeToBeDeleted >
$ srvctl stop listener -l -n < NodeToBeDeleted >


Checking listners:
[grid@omega03 ~]$ ps -elf | grep tns
No need to run the above commands as all listeners run from GRID_HOME



Run the following command from the $ORACLE_HOME/oui/bin directory
on the node that you are deleting to update the inventory on that node:


[oracle@omega03 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega03 -local



Remove the Oracle RAC software by runing the following command on the node to be deleted from the $ORACLE_HOME/deinstall directory:
[oracle@omega03 ~]$ $ORACLE_HOME/deinstall/deinstall -local



Update the nodes list on the remaining nodes as in the following example:

omega01:
 

[root@omega03 Desktop]# ssh omega01
[root@omega01 ~]# su - oracle
Active ORACLE_SID:   TESTDB1

[oracle@omega01 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega01,omega02
[oracle@omega01 ~]$ $ORACLE_HOME/OPatch/opatch lsinventory


Verify whether the node to be deleted is active or not by using following command from the $CRS_HOME/bin directory:

[grid@omega01 ~]$ olsnodes -s -t

On omega02:

[root@omega02 ~]# su - oracle
Active ORACLE_SID:   TESTDB2

[oracle@omega02 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega01,omega02
[oracle@omega02 ~]$ $ORACLE_HOME/OPatch/opatch lsinventory
[root@omega02 ~]# su - grid
[grid@omega02 ~]$ olsnodes -s -t



Disable the Oracle Clusterware applications and daemons running on the node.
Run the rootcrs.pl script as root from the $CRS_HOME/crs/ on the node to be deleted
(if it is last node use the option -lastnode) as follows:

[root@omega03 Desktop]# $GRID_HOME/crs/install/rootcrs.pl -deconfig -force



From any node that you are not deleting, run the following command from the $CRS_HOME/bin directory as root to delete the node from the cluster:

[root@omega01 ~]# $GRID_HOME/bin/crsctl delete node -n omega03



Update the node list on the node to be deleted ( omega03) , run the following command from the CRS_HOME/oui/bin directory:

[grid@omega03 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES=omega03 -local



Update the node list on the remaining nodes by running the following command from $CRS_HOME/oui/bin from each of the remaining nodes the cluster:

on omega01:

[grid@omega01 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES={omega01,omega02} -local
[grid@omega01 ~]$ $GRID_HOME/OPatch/opatch lsinventory 

on omega02:

[grid@omega02 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES={omega01,omega02} -local
[grid@omega02 ~]$ $GRID_HOME/OPatch/opatch lsinventory 



Deinstall the Oracle Clusterware home from the node that you want to delete:

grid@omega03 ~]$ $GRID_HOME/deinstall/deinstall -local



Check cluster and resources status of our 2-Note cluster

grid@omega02 ~]$  olsnodes -s -t
[grid@omega02 ~]$ crs


--------------------------------------------------------------------------------------------------------------------

Удаление ноды из кластера

grid@rac1$ olsnodes
grid@rac1$ crsctl get cluster mode status
grid@rac1$ crsctl config gns
grid@rac1$ oifcfg getif
grid@rac1$ crsctl get node role config
grid@rac1$ asmcmd showclustermode
grid@rac1$ asmcmd showclusterstate
grid@rac1$ srvctl status asm -detail
grid@rac1$ crsctl get node role config -all
grid@rac1$ crsctl get node role status -all
grid@rac1$ crsctl status res -t

Перед удалением ноды удалим её из GC OEM

Backup OCR from rac1

root@rac1# /u01/app/11.2.0/grid/bin/ocrconfig -manualbackup


root@rac1# /u01/app/12.1.0/grid_1/bin/olsnodes -s
root@rac1# /u01/app/12.1.0/grid_1/bin/crsctl query css votedisk
root@rac1# /u01/app/12.1.0/grid_1/bin/ocrcheck
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl status database -d orcl
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl config service -d ORCL
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl status service -d orcl


Удалить инстанс ORCL3

root@rac1# xhost +
access control disabled, clients can connect from any host

root@rac1# su - oracle
oracle@rac1$ which dbca
oracle@rac1$ dbca


Или так:

oracle@rac1$ dbca -silent -deleteInstance -nodeList rac3 -gdbName ORCL -instanceName ORCL3 -sysDBAUserName sys -sysDBAPassword oracle

Проверяем:
srvctl status database -d orcl
увидим два инстанса

oracle@rac1$ srvctl config database -d orcl -v
oracle@rac1$ sqlplus '/as sysdba'
SQL> select * from gv$instance
SQL> select * from v$thread
SQL> select * from v$log where thread#=3
SQL> select * from v$logfile

Удалить online redo и undo, если есть

oracle@rac1$ srvctl config listener -a



Remove RDBMS Software

oracle@rac3$ cd $ORACLE_HOME/oui/bin
$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac3}" -local

# In the above command the "-local" hint is used, So that when we
# deinstall the oracle software, then only the local installation
# in removed otherwise. If this hint is not supplied then
# Oracle binaries from all the instances would be removed.

Your oratab should not contain any database entries except for ASM entries.

Sample oratab file

root@rac3# cat /etc/oratab
+ASM3:/u01/app/11.2.0/grid:N            # line added by Agent


Deinstall Oracle Software.

root@rac3# su - oracle
oracle@rac3$
oracle@rac3$ cd $ORACLE_HOME/deinstall
oracle@rac3$ deinstall]$ ./deinstall -local


On the remaining nodes (rac1,rac2) update the oraInventory.

root@rac1# su - oracle
oracle@rac1$ cd $ORACLE_HOME/oui/bin
oracle@rac1$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac1,rac2}"


Verify that the Inventory.xml file on both nodes should only contain
dbhome entries for rac1 and rac2.


oracle@rac1$ cat /u01/app/oraInventory/ContentsXML/inventory.xml

.....
< HOME NAME="OraDb11g_home1" LOC="/u01/app/oracle/product/11.2.0/dbhome_1" TYPE="O" IDX="2" >
   < NODE_LIST >
      < NODE NAME="rac1"/ >
      < NODE NAME="rac2"/ >
   < /NODE_LIST >
< /HOME >


Remove Clusterware from the rac3.

root@rac1# cd /u01/app/11.2.0/grid/bin/
root@rac1# ./olsnodes -s -t
rac1    Active  Unpinned
rac2    Active  Unpinned
rac3    Active  Unpinned

The third node should be unpinned if it's not then do the following.

root@rac1# ./crsctl unpin css -n rac3


Disable Oracle clusterware by executing the following from rac3.

root@rac3# cd /u01/app/11.2.0/grid/crs/install/
root@rac3# ./rootcrs.pl -deconfig -force


Run the following from rac1 to verify.

root@rac1# cd /u01/app/11.2.0/grid/bin/
root@rac1# ./crsctl delete node -n rac3
CRS-4661: Node rac3 successfully deleted.

root@rac1# ./olsnodes -t -s
rac1    Active  Unpinned
rac2    Active  Unpinned


Update Oracle inventory from rac3


grid@rac3$ cd $GRID_HOME/oui/bin
grid@rac3$ ./runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME "CLUSTER_NODES={rac3}" CRS=TRUE -silent -local


Deinstall Grid Infrastructure Software from rac3.

grid@rac3$ cd $GRID_HOME/deinstall
grid@rac3$ ./deinstall -local


Run the specified commands as root from rac3.

root@rac3# rm -rf /etc/oraInst.loc
root@rac3# rm -rf /opt/ORCLfmap
root@rac3# rm -rf /u01/app/11.2.0/
root@rac3# rm -rf /u01/app/oracle/


After the de-install make sure that oracle clusterware does not start
by checking the following.


root@rac3# diff /etc/inittab /etc/inittab.no_crs


Update Oracle Inventory on all nodes by running the following command from rac1.

root@rac1# su - grid
grid@rac1$ cd $GRID_HOME/oui/bin
grid@rac1$ ./runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME "CLUSTER_NODES={rac1,rac2}" CRS=TRUE -silent



Do post node removal checks from rac1.

grid@rac1$ cluvfy stage -post nodedel -n rac3 -verbose

olsnodes -s -t
crsctl status res -t | grep -i rac3


Удалить ORACLE ASM на узле rac3.

root@rac3# /usr/sbin/oracleasm exit
root@rac3# rpm -qa | grep asm
root@rac3# rpm -ev oracleasmlib-2.0.4-1.el5 oracleasm-2.6.18-164.el5-2.0.5-1.el5 oracleasm-support-2.1.7-1.el5

Удалить директории

root@rac3# rm -f /etc/sysconfig/oracleasm.rpmsave
root@rac3# rm -f /etc/sysconfig/oracleasm-_dev_oracleasm
root@rac3# rm -f /etc/rc.d/rc2.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc0.d/K20oracleasm
root@rac3# rm -f /etc/rc.d/rc5.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc4.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc1.d/K20oracleasm
root@rac3# rm -f /etc/rc.d/rc3.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc6.d/K20oracleasm


Удалить пользователей и группы

root@rac3# userdel -r grid
root@rac3# userdel -r oracle
root@rac3# groupdel oinstall
root@rac3# groupdel asmadmin
root@rac3# groupdel asmdba
root@rac3# groupdel asmoper
root@rac3# groupdel dba
root@rac3# groupdel oper





Комментариев нет:

Отправить комментарий