понедельник, 20 марта 2017 г.

Добавить / Удалить redo log groups in Oracle RAC

Просмотр ONLINE и STANDBY redo logs

set lines 1000
column MEMBER format a50

select lf.group#,
       l_type.log_type as type,
       lf.member,
       l_type.size_mb
 from v$logfile lf
 join (
   select group#,'ONLINE' as log_type, bytes/1024/1024 as size_mb from v$log
   union
   select group#,'STANDBY' as log_type, bytes/1024/1024 as size_mb from v$standby_log) l_type
on lf.group#=l_type.group#
order by lf.group#;



Добавить / удалить логи

online

alter database add logfile group  1 '+DATA' size 512M;
alter database add logfile group  6 '+DATA' size 512M;

alter database drop logfile group 1;
alter database drop logfile group 6;




standby

alter database add standby logfile group  7 '+DATA' size 512M;
alter database add standby logfile group 13 '+DATA' size 512M;

alter database drop logfile group  7;
alter database drop logfile group 13;
 



Просмотр текущих redo log groups:


set lines 1000
column REDOLOG_FILE_NAME format a50

SELECT a.GROUP#,
       a.THREAD#,
       a.SEQUENCE#,
       a.ARCHIVED,
       a.STATUS,
       b.MEMBER AS REDOLOG_FILE_NAME,
       (a.BYTES/1024/1024) AS SIZE_MB
FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP#;



Добавляем:

alter database add logfile thread 1 group 40 ('+REDO_B') size 5120m;
alter database add logfile thread 1 group 42 ('+REDO_B') size 5120m;
alter database add logfile thread 1 group 44 ('+REDO_B') size 5120m;
alter database add logfile thread 1 group 46 ('+REDO_B') size 5120m;
alter database add logfile thread 1 group 48 ('+REDO_B') size 5120m;

alter database add logfile thread 2 group 41 ('+REDO_B') size 5120m;
alter database add logfile thread 2 group 43 ('+REDO_B') size 5120m;
alter database add logfile thread 2 group 45 ('+REDO_B') size 5120m;
alter database add logfile thread 2 group 47 ('+REDO_B') size 5120m;
alter database add logfile thread 2 group 49 ('+REDO_B') size 5120m;

Мультиплексируем:

ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 40;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 41;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 42;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 43;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 44;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 45;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 46;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 47;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 48;
ALTER DATABASE ADD LOGFILE MEMBER '+REDO_B' TO GROUP 49;

Удаляем:

alter database drop logfile group 30;
alter database drop logfile group 31;
alter database drop logfile group 32;
alter database drop logfile group 33;
alter database drop logfile group 34;
alter database drop logfile group 35;
alter database drop logfile group 36;
alter database drop logfile group 37;
alter database drop logfile group 38;
alter database drop logfile group 39;


Примеры:

SELECT a.GROUP#,
       a.THREAD#,
       a.SEQUENCE#,
       a.ARCHIVED,
       a.STATUS,
       b.MEMBER AS REDOLOG_FILE_NAME,
       (a.BYTES/1024/1024) AS SIZE_MB
FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP#;

GROUP#  THREAD# SEQUENCE#  ARCHIVED STATUS      REDOLOG_FILE_NAME                       SIZE_MB

30    1    82494    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_30_1_1    5120
30    1    82494    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_30_1_2    5120

31    2    62915    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_31_2_1    5120
31    2    62915    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_31_2_2    5120

32    1    82495    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_32_1_1    5120
32    1    82495    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_32_1_2    5120

33    2    62916    YES    ACTIVE            +REDO_FLASH/rt2/onlinelog/group_33_2_1    5120
33    2    62916    YES    ACTIVE            +REDO_FLASH/rt2/onlinelog/group_33_2_2    5120

34    1    82496    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_34_1_1    5120
34    1    82496    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_34_1_2    5120

35    2    62917    NO    CURRENT            +REDO_FLASH/rt2/onlinelog/group_35_2_1    5120
35    2    62917    NO    CURRENT            +REDO_FLASH/rt2/onlinelog/group_35_2_2    5120

36    1    82497    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_36_1_1    5120
36    1    82497    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_36_1_2    5120

37    2    62913    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_37_2_1    5120
37    2    62913    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_37_2_2    5120

38    1    82498    NO    CURRENT            +REDO_FLASH/rt2/onlinelog/group_38_1_1    5120
38    1    82498    NO    CURRENT            +REDO_FLASH/rt2/onlinelog/group_38_1_2    5120

39    2    62914    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_39_2_1    5120
39    2    62914    YES    INACTIVE    +REDO_FLASH/rt2/onlinelog/group_39_2_2    5120


Добавление redo log groups:

SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 40 ('+REDO_FLASH/rt2/onlinelog/group_40_1_1') SIZE 5120m,
GROUP 40 ('+REDO_FLASH/rt2/onlinelog/group_40_1_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 41 ('+REDO_FLASH/rt2/onlinelog/group_41_2_1') SIZE 5120m,
GROUP 41 ('+REDO_FLASH/rt2/onlinelog/group_41_2_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 42 ('+REDO_FLASH/rt2/onlinelog/group_42_1_1') SIZE 5120m,
GROUP 42 ('+REDO_FLASH/rt2/onlinelog/group_42_1_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 43 ('+REDO_FLASH/rt2/onlinelog/group_43_2_1') SIZE 5120m,
GROUP 43 ('+REDO_FLASH/rt2/onlinelog/group_43_2_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 44 ('+REDO_FLASH/rt2/onlinelog/group_44_1_1') SIZE 5120m,
GROUP 44 ('+REDO_FLASH/rt2/onlinelog/group_44_1_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 45 ('+REDO_FLASH/rt2/onlinelog/group_45_2_1') SIZE 5120m,
GROUP 45 ('+REDO_FLASH/rt2/onlinelog/group_45_2_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 46 ('+REDO_FLASH/rt2/onlinelog/group_46_1_1') SIZE 5120m,
GROUP 46 ('+REDO_FLASH/rt2/onlinelog/group_46_1_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 47 ('+REDO_FLASH/rt2/onlinelog/group_47_2_1') SIZE 5120m,
GROUP 47 ('+REDO_FLASH/rt2/onlinelog/group_47_2_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 48 ('+REDO_FLASH/rt2/onlinelog/group_48_1_1') SIZE 5120m,
GROUP 48 ('+REDO_FLASH/rt2/onlinelog/group_48_1_2') SIZE 5120m;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 49 ('+REDO_FLASH/rt2/onlinelog/group_49_2_1') SIZE 5120m,
GROUP 49 ('+REDO_FLASH/rt2/onlinelog/group_49_2_2') SIZE 5120m;



Или так:

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 40 '+REDO_FLASH/rt2/onlinelog/group_40_1_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 40 '+REDO_FLASH/rt2/onlinelog/group_40_1_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 41 '+REDO_FLASH/rt2/onlinelog/group_41_2_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 41 '+REDO_FLASH/rt2/onlinelog/group_41_2_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 42 '+REDO_FLASH/rt2/onlinelog/group_42_1_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 42 '+REDO_FLASH/rt2/onlinelog/group_42_1_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 43 '+REDO_FLASH/rt2/onlinelog/group_43_2_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 43 '+REDO_FLASH/rt2/onlinelog/group_43_2_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 44 '+REDO_FLASH/rt2/onlinelog/group_44_1_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 44 '+REDO_FLASH/rt2/onlinelog/group_44_1_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 45 '+REDO_FLASH/rt2/onlinelog/group_45_2_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 45 '+REDO_FLASH/rt2/onlinelog/group_45_2_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 46 '+REDO_FLASH/rt2/onlinelog/group_46_1_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 46 '+REDO_FLASH/rt2/onlinelog/group_46_1_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 47 '+REDO_FLASH/rt2/onlinelog/group_47_2_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 47 '+REDO_FLASH/rt2/onlinelog/group_47_2_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 48 '+REDO_FLASH/rt2/onlinelog/group_48_1_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 48 '+REDO_FLASH/rt2/onlinelog/group_48_1_2' SIZE 5120m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 49 '+REDO_FLASH/rt2/onlinelog/group_49_2_1' SIZE 5120m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 49 '+REDO_FLASH/rt2/onlinelog/group_49_2_2' SIZE 5120m;



Удалять можно  inactive / unused redo log group:

SQL> select group#,thread#,members,status from v$log;

group#  thread# members status

30    1    2    INACTIVE
31    2    2    INACTIVE
32    1    2    CURRENT
33    2    2    INACTIVE
34    1    2    INACTIVE
35    2    2    INACTIVE
36    1    2    INACTIVE
37    2    2    INACTIVE
38    1    2    INACTIVE
39    2    2    CURRENT


SQL> alter database drop logfile group 30;

SQL> select group#,thread#,members,status from v$log;




Ещё пример:

SQL> column REDOLOG_FILE_NAME format a50;

SQL> set lines 1000
SQL>
SELECT a.GROUP#,
       a.THREAD#,
       a.SEQUENCE#,
       a.ARCHIVED,
       a.STATUS,
       b.MEMBER AS REDOLOG_FILE_NAME,
       (a.BYTES/1024/1024) AS SIZE_MB
FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP#;


    GROUP#    THREAD#  SEQUENCE# ARC STATUS           REDOLOG_FILE_NE              SIZE_MB
   ------- ---------- ---------- --- ---------------- --------------------------- ----------
         1          1          3 YES ACTIVE           +REDO/proddb/redo01.log      100
         2          1          4 NO  CURRENT          +REDO/proddb/redo02.log      100
         3          2          1 YES INACTIVE         +REDO/proddb/redo03.log      100
         4          2          2 NO  CURRENT          +REDO/proddb/redo04.log      100



SQL> ALTER DATABASE ADD LOGFILE THREAD 1
GROUP 5 ('+REDO/proddb/redo05.log') SIZE 100m,
GROUP 6 ('+REDO/proddb/redo06.log') SIZE 100m;

SQL>
SQL> ALTER DATABASE ADD LOGFILE THREAD 2
GROUP 7 ('+REDO/proddb/redo07.log') SIZE 100m,
GROUP 8 ('+REDO/proddb/redo08.log') SIZE 100m;


или так:

ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 5 '+REDO' SIZE 100m;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 6 '+REDO' SIZE 100m;

ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 7 '+REDO' SIZE 100m;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 8 '+REDO' SIZE 100m;

Проверяем:

SQL> column REDOLOG_FILE_NAME format a50;

SQL> set lines 1000
SQL> SELECT a.GROUP#, a.THREAD#, a.SEQUENCE#,
 a.ARCHIVED, a.STATUS, b.MEMBER AS REDOLOG_FILE_NAME,
 (a.BYTES/1024/1024) AS SIZE_MB FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP#;

    GROUP#    THREAD#  SEQUENCE# ARC STATUS           REDOLOG_FILE_NAME                                     SIZE_MB
---------- ---------- ---------- --- ---------------- -------------------------------------------------- ----------
         1          1          3 YES INACTIVE         +REDO/proddb/redo01.log                                 100
         2          1          4 NO  CURRENT          +REDO/proddb/redo02.log                                 100
         3          2          1 YES INACTIVE         +REDO/proddb/redo03.log                                 100
         4          2          2 NO  CURRENT          +REDO/proddb/redo04.log                                 100
         5          1          0 YES UNUSED           +REDO/proddb/redo05.log                                 100
         6          1          0 YES UNUSED           +REDO/proddb/redo06.log                                 100
         7          2          0 YES UNUSED           +REDO/proddb/redo07.log                                 100
         8          2          0 YES UNUSED           +REDO/proddb/redo08.log                                 100



Удаляем только  INACTIVE илиr UNUSED:

SQL>  select group#,thread#,members,status from v$log;

    GROUP#    THREAD#    MEMBERS     STATUS
---------- ---------- ---------- ----------------
         1          1          2    INACTIVE
         2          1          2    INACTIVE
         3          2          2    CURRENT
         4          2          2    INACTIVE
         5          1          2    INACTIVE
         6          1          2    INACTIVE
         7          2          2    INACTIVE
         8          2          2    CURRENT


SQL> аlter database drop logfile group 1;

Проверяем:

SQL>  select group#,thread#,members,status from v$log;






воскресенье, 19 марта 2017 г.

asmcmd

$ asmcmd help

showversion
showversion --releasepath --softwarepath
showpatches
showclustermode
showclusterstate

lsdg
(v$asm_diskgroup_stat)

lsdg --discovery
(v$asm_diskgroup)
покажет и dismounted группы

lsdg DATA

lsdg DATA -g
(v$asm_diskgroup_stat)
покажет номера инстансов в кластере

lsdg DATA -g --discovery
покажет дисковую группу на всех нодах кластера в том числе и dismounted


dsget

lsdsk

lsdsk --discovery
(v$asm_disk)
покажет все диски

lsdsk -G DATA
lsdsk -G FRA
lsdsk -G REDO

lsdsk -g -G DATA
(gv$asm_disk_stat)
покажет instance_id

lsdsk --member
только члены asm дисковых групп

lsdsk --candidate
свободные диски, кандидаты в группы

lsdsk -k -G DATA

lsdg -p
с path

lsdsk -t
покажет когда созданы и смонтированы и пути

lsdsk --statistics
покажет i/o статистики

lsdsk *02* -p --discovery
(маска в путях диска)

lsod
покажет какие процессы используют диски

lsod -G DATA

iostat
(v$asm_disk_iostat)

iostat -G FRA

iostat -e
покажет еще и read/write error

iostat -e -t
покажет еще и read_time / write_time

iostat -G DATA --region
статистики по регионам cold_reads/writes  hot_read/writes

iostat -G DATA -e --io
теперь i/o не в байтах
(v$asm_disk_iostat)

spget
путь к файлу параметров

lsct DATA
какие клиенты asm используют диски  (версии asm)

lsct DATA -g
тоже, но для всего кластера


pwd
ls
ls -l
ls -l -g
exit


$asmcmp -p
в приглашении будет полный путь

cd +
cd +DATA/TESTDB/DATAFILE
cd +

ls -l -s
space информация в байтах

ls  --permission

du

du  +DATA/TESTDB/ONLINELOG

find  + sysaux*

find +FRA current*

find --type CONTROLFILE + *

lsof
покажет все файлы открытые asm

lsof -G FRA

lsof --dbname testdb
файлы открытые базой testdb

lsof -G FRA -C testdb1
для db_instance

lsattr -G FRA -l -m
атрибуты дисковой группы

lsattr -G FRA -l -m size
lsattr -G FRA -l -m cell (for exadata)

exit


Не интерактивный режим
$asmcmd find + OCR*


Используя sqlplus:
$sqlplus "/as sysasm"

show parameter power

v$asm_estimate
gv$asm_estimate

v$asm_operation



ASMCMD> lsdsk -G sap_data_new -p --discovery

Group_Num  Disk_Num      Incarn  Mount_Stat  Header_Stat  Mode_Stat  State   Path
        6         0  3916017890  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_17-ASM-SAP_DATA34
        6         1  3916017881  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_18-ASM-SAP_DATA35
        6         2  3916017882  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_19-ASM-SAP_DATA36
        6         3  3916017888  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_20-ASM-SAP_DATA37
        6         4  3916017887  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_21-ASM-SAP_DATA38
        6         5  3916017886  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_22-ASM-SAP_DATA39
        6         6  3916017877  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_23-ASM-SAP_DATA40
        6         7  3916017876  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_24-ASM-SAP_DATA41
        6         8  3916017883  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_25-ASM-SAP_DATA42
        6         9  3916017878  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_26-ASM-SAP_DATA43
        6        10  3916017885  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_27-ASM-SAP_DATA44
        6        11  3916017874  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_28-ASM-SAP_DATA45
        6        12  3916017889  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_29-ASM-SAP_DATA46
        6        13  3916017884  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_30-ASM-SAP_DATA47
        6        14  3916017875  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_31-ASM-SAP_DATA48
        6        15  3916017879  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_32-ASM-SAP_DATA49
        6        16  3916017880  CACHED      MEMBER       ONLINE     NORMAL  /dev/oraasm/HP3PARDR-VDI01_33-ASM-SAP_DATA50
ASMCMD>

на всех нодах
SQL> alter diskgroup data dismount force;


ASMCMD> lsdg -g --discovery
Inst_ID  State       Type    Rebal  Sector  Block       AU  Total_MB   Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
      1  MOUNTED     EXTERN  N         512   4096  1048576   1048576   1047023                0         1047023              0             N  ARCHLOGS/
      2  MOUNTED     EXTERN  N         512   4096  1048576   1048576   1047023                0         1047023              0             N  ARCHLOGS/
      1  MOUNTED     EXTERN  N         512   4096  1048576    204800    204677                0          204677              0             N  REDO_B/
      2  MOUNTED     EXTERN  N         512   4096  1048576    204800    204677                0          204677              0             N  REDO_B/
      1  MOUNTED     EXTERN  N         512   4096  1048576    204800    102230                0          102230              0             N  REDO_FLASH/
      2  MOUNTED     EXTERN  N         512   4096  1048576    204800    102230                0          102230              0             N  REDO_FLASH/
      1  MOUNTED     EXTERN  N         512   4096  1048576    204800     73471                0           73471              0             N  RT_REDO/
      2  MOUNTED     EXTERN  N         512   4096  1048576    204800     73471                0           73471              0             N  RT_REDO/
      1  MOUNTED     EXTERN  N         512   4096  1048576  18350080  13859093                0        13859093              0             N  SAP_DATA/
      2  MOUNTED     EXTERN  N         512   4096  1048576  18350080  13859093                0        13859093              0             N  SAP_DATA/
      1  MOUNTED     EXTERN  N         512   4096  1048576  17825792   2836162                0         2836162              0             N  SAP_DATA_B/
      2  MOUNTED     EXTERN  N         512   4096  1048576  17825792   2836162                0         2836162              0             N  SAP_DATA_B/
      1  DISMOUNTED          N           0      0        0         0         0                0               0              0             N  SAP_DATA_NEW/
      2  DISMOUNTED          N           0      0        0         0         0                0               0              0             N  SAP_DATA_NEW/
      1  MOUNTED     EXTERN  N         512   4096  1048576     65536     60909                0           60909              0             Y  SAP_OCR/
      2  MOUNTED     EXTERN  N         512   4096  1048576     65536     60909                0           60909              0             Y  SAP_OCR/
ASMCMD>



SQL> drop diskgroup sap_data_new force including contents;

Diskgroup dropped.

SQL>



ASMCMD> lsdg -g --discovery
Inst_ID  State    Type    Rebal  Sector  Block       AU  Total_MB   Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
      2  MOUNTED  EXTERN  N         512   4096  1048576   1048576   1047023                0         1047023              0             N  ARCHLOGS/
      1  MOUNTED  EXTERN  N         512   4096  1048576   1048576   1047023                0         1047023              0             N  ARCHLOGS/
      2  MOUNTED  EXTERN  N         512   4096  1048576    204800    204677                0          204677              0             N  REDO_B/
      1  MOUNTED  EXTERN  N         512   4096  1048576    204800    204677                0          204677              0             N  REDO_B/
      2  MOUNTED  EXTERN  N         512   4096  1048576    204800    102230                0          102230              0             N  REDO_FLASH/
      1  MOUNTED  EXTERN  N         512   4096  1048576    204800    102230                0          102230              0             N  REDO_FLASH/
      2  MOUNTED  EXTERN  N         512   4096  1048576    204800     73471                0           73471              0             N  RT_REDO/
      1  MOUNTED  EXTERN  N         512   4096  1048576    204800     73471                0           73471              0             N  RT_REDO/
      2  MOUNTED  EXTERN  N         512   4096  1048576  18350080  13859093                0        13859093              0             N  SAP_DATA/
      1  MOUNTED  EXTERN  N         512   4096  1048576  18350080  13859093                0        13859093              0             N  SAP_DATA/
      2  MOUNTED  EXTERN  N         512   4096  1048576  17825792   2836162                0         2836162              0             N  SAP_DATA_B/
      1  MOUNTED  EXTERN  N         512   4096  1048576  17825792   2836162                0         2836162              0             N  SAP_DATA_B/
      2  MOUNTED  EXTERN  N         512   4096  1048576     65536     60909                0           60909              0             Y  SAP_OCR/
      1  MOUNTED  EXTERN  N         512   4096  1048576     65536     60909                0           60909              0             Y  SAP_OCR/
ASMCMD>











суббота, 18 марта 2017 г.

Параметры инициализации:


Специальные параметры кластера:

instance_number
instance_group
thread
cluster_database
cluster_database_instances
cluster_interconnects
remote_listener
local_listener
parallel_instance
max_commit_propagation_delay

Пример:

TESTDB1.instance_number=1
TESTDB2.instance_number=2
TESTDB1.thread=1
TESTDB2.thread=2
*.cluster_database_instances=2
*.cluster_database=TRUE
*.remote_listener='rac-tdb-scan:1521'


Параметры имеющие одинаковое значение для всех инстансов:

active_instance_count
archive_lag_target
compatible
cluster_database
cluster_database_instances
cluster_interconnects      
control_files
db_block_size
db_domain
db_files
db_name
db_recovery_file_dest
db_recovery_file_dest_size
db_unique_name
dml_locks                     -- when 0
instance_type                -- rdbms or asm
max_commit_propagation_delay
parallel_max_servers
remote_login_password_file
trace_enabled
undo_management

Пример:

*.cluster_database=true
*.cluster_database_instances=2
*.compatible='11.2.0.2'
*.undo_management='AUTO'


Параметры имеющие уникальные значения для всех инстансов:


instance_number
instance_name
thread
undo_tablespace/rollback_segments

Пример:

INST1.instance_number=1
INST1.instance_name=TESTDB1
INST1.thread=1
INST1.undo_tablespace='UNDOTBS1'

INST2.instance_number=2
INST2.instance_name=TESTDB1
INST2.thread=2
INST2.undo_tablespace='UNDOTBS2'



Пример файла параметров инициализации:

TESTDB2.__db_cache_size=111132278784
TESTDB1.__db_cache_size=109521666048
TESTDB1.__oracle_base='/oracle/diag'#ORACLE_BASE set from environment
TESTDB2.__oracle_base='/oracle/diag'#ORACLE_BASE set from environment
TESTDB2.__shared_io_pool_size=0
TESTDB1.__shared_io_pool_size=268435456
TESTDB2.__streams_pool_size=1207959552
TESTDB1.__streams_pool_size=1342177280

TESTDB1.dispatchers='(PROTOCOL=TCP)(SERVICE=TESTDB1XDB)'
TESTDB2.dispatchers='(PROTOCOL=TCP)(SERVICE=TESTDB2XDB)'
TESTDB1.instance_name='TESTDB1'
TESTDB2.instance_name='TESTDB2'
TESTDB1.instance_number=1
TESTDB2.instance_number=2
TESTDB1.thread=1
TESTDB2.thread=2
TESTDB1.undo_tablespace='PSAPUNDO'
TESTDB2.undo_tablespace='PSAPUNDO2'

*._awr_mmon_deep_purge_all_expired=TRUE
*._fix_control='5099019:ON','5705630:ON','6055658:OFF','6120483:OFF','6399597:ON','6430500:ON','6440977:ON','6626018:ON','6972291:ON','7168184:OFF','7658097:ON','8937971:ON','9196440:ON','9495669:ON','13077335:ON','13627489:ON','14255600:ON','14595273:ON','18405517:2','20355502:8','14846352:OFF','22540411:ON','10038517:OFF'#Note 1888485 RECOMMENDED SETTINGS
*._high_priority_processes='LMS*|LM*|LCK0|GCR*|DIAG|CKPT|DBRM|RMS0|LGWR'
*._in_memory_undo=FALSE
*._ktb_debug_flags=8
*._MUTEX_WAIT_SCHEME=1
*._MUTEX_WAIT_TIME=10
*._OPTIM_PEEK_USER_BINDS=false
*._OPTIMIZER_ADAPTIVE_CURSOR_SHARING=false
*._optimizer_aggr_groupby_elim=FALSE
*._optimizer_batch_table_access_by_rowid=FALSE
*._OPTIMIZER_EXTENDED_CURSOR_SHARING_REL='none'
*._optimizer_reduce_groupby_key=FALSE
*._OPTIMIZER_USE_FEEDBACK=false
*._rowsets_enabled=FALSE
*._securefiles_concurrency_estimate=50
*._suppress_identifiers_on_dupkey=TRUE
*._use_single_log_writer='TRUE'

*.audit_file_dest='/oracle/rdbms/12.1.0.2/TESTDB/rdbms/audit'
*.audit_sys_operations=true
*.audit_trail='DB'
*.cluster_database_instances=2
*.cluster_database=TRUE
*.compatible='12.1.0.2.0'
*.control_file_record_keep_time=30
*.control_files='+TST_DATA/TESTDB/controlfile/current.774.882710569'#Set by RMAN
*.db_block_size=8192
*.db_cache_size=111132278784
*.db_create_file_dest='+TST_DATA'
*.db_create_online_log_dest_1='+TS_REDO'
*.db_file_name_convert='+DATA','+TST_DATA'
*.db_files=1024
*.db_name='TESTDB'
*.db_recovery_file_dest_size=30000M
*.db_unique_name='TESTDB'
*.event='10027','10028','10142','10183','10191','10995 level 2','38068 level 100','38085','38087','44951 level 1024'#SAP_121022_201503 RECOMMENDED SETTINGS
*.fal_client='TESTDB'
*.fal_server='TESTDB'
*.fast_start_io_target=0
*.fast_start_mttr_target=0
*.filesystemio_options='SETALL'
*.JAVA_POOL_SIZE=261196308
*.LARGE_POOL_SIZE=261196308
*.log_archive_config='NODG_CONFIG'
*.log_archive_dest_1='LOCATION=+ARCHLOGS/TESTDB'
*.log_archive_dest_2=''
*.log_archive_dest_state_2='ENABLE'
*.log_archive_format='%t_%s_%r.dbf'
*.log_checkpoint_interval=3000000
*.log_checkpoint_timeout=0
*.log_checkpoints_to_alert=true
*.log_file_name_convert='/oracle/TESTDB/','+DATA/TESTDB/'
*.max_dump_file_size='20000'
*.open_cursors=2000
*.optimizer_adaptive_features=FALSE
*.optimizer_capture_sql_plan_baselines=FALSE
*.optimizer_index_cost_adj=20
*.parallel_execution_message_size=16384
*.parallel_max_servers=0
*.parallel_min_servers=0
*.parallel_threads_per_cpu=1
*.pga_aggregate_target=36045090422
*.processes=1000
*.query_rewrite_enabled='false'
*.recyclebin='off'
*.remote_listener='rac-tdb-scan:1521'
*.remote_login_passwordfile='exclusive'
*.replication_dependency_tracking=false
*.resource_manager_plan=''
*.service_names='TESTDB'
*.sessions=2000
*.shared_pool_size=11753833833
*.standby_archive_dest='+ARCHLOGS/TESTDB'
*.standby_file_management='AUTO'
*.undo_retention=86400
*.use_large_pages='only'




Добавление нового узла к кластеру:


Check networking connectivty/ASM disk status at OS level

[grid@omega01]$ ssh omega03 date
[grid@omega01]$ nslookup omega03
[root@omega01]# ping omega03.localdomain.ru
[root@omega01]# ping 192.168.2.73

[root@omega03]# /etc/init.d/oracleasm listdisks
DATA1
DATA2
DATA3
OCR1
OCR2
OCR3

[root@omega03]# ls -l  /dev/oracleasm/*
/dev/oracleasm/disks:
total 0
brw-rw---- 1 grid asmadmin 8, 17 Aug 30 09:21 DATA1
brw-rw---- 1 grid asmadmin 8, 33 Aug 29 14:16 DATA2
brw-rw---- 1 grid asmadmin 8, 49 Aug 29 14:16 DATA3
brw-rw---- 1 grid asmadmin 8, 65 Aug 29 14:16 OCR1
brw-rw---- 1 grid asmadmin 8, 81 Aug 29 14:16 OCR2
brw-rw---- 1 grid asmadmin 8, 97 Aug 29 14:16 OCR3


You can also dump the ASM header by running kfed if addnode.sh failed but the file copy operation of addnotes.sh succeeded

[grid@omega03]$ $GRID_HOME/bin/kfed  read /dev/oracleasm/disks/DATA2  | grep name
kfdhdb.dskname:               DATA_0001 ; 0x028: length=9
kfdhdb.grpname:                    DATA ; 0x048: length=4
kfdhdb.fgname:                DATA_0001 ; 0x068: length=9
kfdhdb.capname:                         ; 0x088: length=0

Use kfod to get an idea about the ASM disk status

[grid@omega03]$ kfod asm_diskstring='/dev/oracleasm/disks/*' nohdr=true verbose=true disks=all status=true op=disks
5114 CANDIDATE /dev/oracleasm/disks/DATA1 grid     asmadmin
5114 MEMBER    /dev/oracleasm/disks/DATA2 grid     asmadmin
5114 MEMBER    /dev/oracleasm/disks/DATA3 grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR1  grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR2  grid     asmadmin
2047 CANDIDATE /dev/oracleasm/disks/OCR3  grid     asmadmin



Verify the current setup with the following cluvfy commands:

[grid@omega01]$ cluvfy stage -post hwos -n omega01, omega03
[grid@omega01]$ cluvfy stage -pre nodeadd -n omega03 | grep PRV
[grid@omega01]$ cluvfy stage -pre crsinst -n omega03
[grid@omega01]cluvfy comp peer -refnode omega01 -n omega03 -orainv oinstall -osdba asmdba -verbose
use IGNORE_PREADDNODE_CHECKS if needed


Run addNode.sh for the Clusterware (GRID_HOME)

Use IGNORE_PREADDNODE_CHECKS Y  when there were exlainable cluvfy errors like  PRVG-1013, PRVF-5636, PRVF-7617
For GNS clusters use CLUSTER_NEW_NODES
For NON-GNS cluster use CLUSTER_NEW_NODES and CLUSTER_NEW_VIRTUAL_HOSTNAMES

Run addnode.sh

[grid@omega01]$ setenv IGNORE_PREADDNODE_CHECKS Y
[grid@omega01]$ cd $GRID_HOME/oui/bin
[grid@omega01]$ ./addNode.sh "CLUSTER_NEW_NODES={omega03}"


Run required root scripts

[root@omega03]# /u01/app/oraInventory/orainstRoot.sh
[root@omega03]# /u01/app/11203/grid/root.sh


Run cluvfy and crsctl stat res -t   to verify cluster node add installation

[root@omega01]# my_crs_stat


Verify post CRS status with cluvfy

[grid@omega01]$ cluvfy stage -post crsinst -n omega01,omega02,omega03
[grid@omega01]$ cluvfy stage -post nodeadd -n omega03

Ignore well known errors like:
PRVF-5217 : An error occurred while trying to look up IP address for "omega01.localdomain.ru"


Install RAC database software on omega03

Install RAC database software on omega03 as oracle owner

[oracle@omega01]$ id
uid=54321(oracle) gid=54321(oinstall) groups=54321(oinstall),501(vboxsf),506(asmdba),54322(dba)

[oracle@omega01]$ cd $ORACLE_HOME/oui/bin
[oracle@omega01]$ ./addNode.sh "CLUSTER_NEW_NODES={omega03}"


[root@omega03]# /u01/app/oracle/product/11203/testdb/root.sh


Invoke dbca and add instance TESTDB3 on node OMEGA03

Run dbca as oracle owner:
[oracle@omega03]$ dbca
  Oracle RAC cluster database
   Instance Management
     Add an Instance
       Select your RAC database (TESTDB)
         Select instance: TESTDB3 - Host: OMEGA03
          Accept the default values   for
          Initialization Parameters
             Instance     Name                          Value
             TESTDB3     instance_number     3
             TESTDB3     thread                      3
             TESTDB3     undo_tablespace     UNDOTBS3   
           Tablespaces
             Name     Type                     Extent Management
             UNDOTBS3     PERMANENT , UNDO     LOCAL 
           Data Files
             Name                     Tablespace     Size(M)
             OMF_UNDOTBS3_DATAFILE_0    UNDOTBS3     100
           Redo Log Groups
             Group  Size(K)     Thread
              5     51200             3
              6     51200


Verify cluster status using dbca, cluvfy

[oracle@omega01]$ srvctl status database -d TESTDB
[grid@omega01]$ cluvfy comp admprv -o db_config -d $ORACLE_HOME -n omega01,omega02,omega03 -verbose


--------------------------------------------------------------------------------------------------

Добавим новый узел rac3 в файл /etc/hosts  на всех узлах:
cat /etc/hosts

127.0.0.1 localhost localhost.localdomain

192.168.1.231  rac1.localdomain      rac1
10.10.10.231   rac1-priv.localdomain rac1-priv
192.168.1.234  rac1-vip.localdomain  rac1-vip

192.168.1.232  rac2.localdomain      rac2
10.10.10.232   rac2-priv.localdomain rac2-priv
192.168.1.235  rac2-vip.localdomain  rac2-vip

192.168.1.233  rac3.localdomain      rac3
10.10.10.233   rac3-priv.localdomain rac3-priv
192.168.1.236  rac3-vip.localdomain  rac3-vip

192.168.1.237 rac-scan.localdomain


Проверим доступность по сети нового узла rac3

root@rac3# ping 192.168.1.231
root@rac3# ping 192.168.1.232
root@rac3# hostname

root@rac1# ping rac3

Проверили сеть
Обновили DNS
Обновили /etc/hosts


Конфигурация ntpd на новом узле


Создаём пользователей


Создаем структуру каталогов на узле rac3 аналогично как на узле rac1.

mkdir -p /u01/app/12.1.0/grid_1
chown -R root:oinstall /u01/app/12.1.0/grid_1
mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1
chown -R oracle:oinstall /u01/app/oracle/product/12.1.0/dbhome_1
chown -R grid:oinstall /u01/app/12.1.0/grid_1

Устанавливаем пакеты

Настраиваем параметры ядра


Проверяем связь по ssh с rac1 и rac2 из под пользователя grid

From oracle_home/oui/bin on existing Node:

/runSSHSetup.sh -user oracle -hosts "Existing_Node New_Node" -advanced -exverify

Configure secure shell for oracle user on all nodes

From oracle_home/oui/bin on rac1

./runSSHSetup.sh -user oracle -hosts "rac2 rac3" -advanced -exverify




Проверяем видимость дисков с узла rac3

root@rac3# cd /etc/udev
root@rac3# ls
root@rac3# cd rules.d/
root@rac3# cat 99-oracle.rules


Проверяем что узел rac3 подготовлен для инсталляции ПО


Verify New Node (HWOS)

From grid_home on rac1
$GRID_HOME/bin/cluvfy stage -post hwos -n rac3


Verify Peer (REFNODE)

From grid_home on rac1
$GRID_HOME/bin/cluvfy comp peer -refnode rac1 -n rac3 -orainv oinstall -osdba dba -verbose


Verify New Node (NEW NODE PRE)

From grid_home on rac1
$GRID_HOME/bin/cluvfy stage -pre nodeadd -n rac3 -fixup -verbose

Будет предложено выполнить под root скрипт на rac3
root@rac3# /tmp/CVU_12.1.0.1.0_grid/runfixup.sh


Инсталляция GRID Software на новый узел rac3:

проверим что узел готов к этому

cluvfy stage -pre nodeadd -n rac3 -verbose

cd /u01/app/12.1.0/grid_1
cd addnode

Если вы используете Grid Naming Service (GNS):

./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}"

Если вы используете Grid Naming Service (GNS):

./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac3-vip}"

На новом узле rac3 запустим от root скрипт:

root.sh

Если вы не используете службу Oracle Grid Naming Service (GNS),
вы должны добавить имя и адрес для rac3 в DNS.

Теперь вы должны запустить Oracle Clusterware на новом узле.
Чтобы проверить установку Oracle Clusterware на новом узле,
вы можете запустить следующую команду на вновь настроенном узле rac3:

$ cd /u01/app/12.1.0/grid/bin
$ ./cluvfy stage -post nodeadd -n rac3 -verbose

Проверяем стек

crsctl check cluster
crsctl check has
crsctl status res -t
olsnodes -n

srvctl status asm -a
ocrcheck
crsctl query css votedisk


Проверить что новый grid_home появился в файле inventory.xml на всех узлах.

cd /u01/app/oraInventory/ContentsXML/
cat inventory.xml

Избегайте изменять имена хостов после завершения установки Oracle Clusterware,
включая добавление или удаление квалификаций домена.
Узлы с измененными именами хостов должны быть удалены из кластера и добавлены
обратно с новым именем.


Инсталляция RDBMS Software на новый узел rac3:

На первом узле запускаем

$ cd /u01/app/oracle/product/12.1.0/dbhome_1/addnode
$ ./addNode.sh -silent "CLUSTER_NEW_NODES = {rac3}"

На новом узле rac3 запустим от root скрипт:
root.sh


Создание экземпляра ORCL3 на новом узле rac3

Вы можете добавить экземпляр в кластер, используя опцию "Instance Management" в DBCA
или с помощью Enterprise Manager.

Но можно и в silent режиме:

dbca -silent -addInstance -nodelist rac3 -gdbname ORCL -instanceName ORCL3 -sysDBAUserName sys -sysDBAPassword oracle

Проверяем

srvctl status database -d orcl

Проверить что новый oracle_home появился в файле inventory.xml на всех узлах.

cd /u01/app/oraInventory/ContentsXML/
cat inventory.xml

crsctl status res -t

crsctl status res -t | grep -i offline


---------------------------------------------------------------------------------------------------------------------


Добавили новый сервер omega03
Дали доступ по ssh


Проверяем настройки ОС на готовность к установке кластерного ПО:

[grid@omega01 ~]$  cluvfy stage -pre crsinst -n omega03 -verbose
Решаем проблемы:


Проверяем новый узел на готовность установки Oracle Clusterware:

[grid@omega01 ~]$ cd /u01/distrib/grid/
[grid@omega01 grid]$./runcluvfy.sh stage -pre nodeadd -n omega03

[grid@omega01 ~]$crsctl check cluster -all



Устанавливаем кластерное ПО на omega03

[grid@omega-1 u01]$ cd /u01/app/grid/11.2.0/asm_1/oui/bin/
[grid@omega01 u01]$ export IGNORE_PREADDNODE_CHECKS=Y
[grid@omega01 bin]$ ./addNode.sh --silent  "CLUSTER_NEW_NODES={omega03}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={omega03-vip}"

Выполняем скрипты от root:

root@omega03 ~ #  /u01/app/oraInventory/orainstRoot.sh
root@omega03 ~ #  /u01/app/grid/11.2.0/asm_1/root.sh


Проверяем:

[grid@omega03 ~]$ crsctl check cluster -all


Проверяем установку Oracle Clusterware на узле omega03

[grid@omega01 u01]$ cluvfy stage -post crsinst -n all

[grid@omega01 u01]$ crsctl status serverpool



Установка ПО Oracle RAC:

[oracle@omega01 ~]$ cd /u01/app/oracle/11.2.0/db_1/oui/bin/
[oracle@omega01 bin]$ ./addNode.sh -silent "CLUSTER_NEW_NODES={omega03}"


Выполняем скрипт от root:

root@omega03 ~ #  /u01/app/oracle/11.2.0/db_1/root.sh


Проверяем:

[oracle@omega01 bin]$  crsctl status serverpool

[grid@omega03 ~]$ crsctl status res -t



Добавляем экземпляр:

[grid@omega01 ~]$ dbca -silent -addInstance -nodeList omega03 -gdbName TESTDB3_OMEGA03 -instanceName TESTDB3 -sysDBAUserName sys                            
Enter SYS user password:


Проверяем:

[grid@omega01 ~]$ crsctl status serverpool                                                                                                                
                                                                                                                                        
[grid@omega01 ~]$ srvctl add serverpool -g mypool -l 1 -u -1 -i 1 -n "omega01,omega02,omega03"
                                                                                                                                                        



Удаление узла из кластера



Verify that all the instances are up and running.

[oracle@omega03 ~]$ srvctl status database -d TESTDB


Check resources running on note omega03

[grid@omega03 ~]$ crs | egrep 'omega03|STATE|--'



Verify the current ocr backup.

[grid@omega03 ~]$ ocrconfig -showbackup



Ensure that all the instances are registered in the default CRS Listener.
[grid@omega03 ~]$ lsnrctl status LISTENER_SCAN2

Start DCBA from a node other than the one that you are removing and select
  -->"Real Application Clusters"
    --> "Instance Management"
     -->  "Delete Instance".
      --> Accept the alert windows to delete the instance.



Verify that the instance has been deleted and thread is disabled by querying gv$instance and v$thread. 

SQL> select INST_ID, INSTANCE_NUMBER, INSTANCE_NAME, HOST_NAME from gv$instance;
SQL> select THREAD#, STATUS, INSTANCE from v$thread;


Verify that the thread for the deleted instance has been disabled. If it is still enabled, disable it as follows:

SQL>ALTER DATABASE DISABLE THREAD 2;
THREAD# 3 is already disable


Delete the Node from the Cluster

$ srvctl disable listener -l -n < NodeToBeDeleted >
$ srvctl stop listener -l -n < NodeToBeDeleted >


Checking listners:
[grid@omega03 ~]$ ps -elf | grep tns
No need to run the above commands as all listeners run from GRID_HOME



Run the following command from the $ORACLE_HOME/oui/bin directory
on the node that you are deleting to update the inventory on that node:


[oracle@omega03 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega03 -local



Remove the Oracle RAC software by runing the following command on the node to be deleted from the $ORACLE_HOME/deinstall directory:
[oracle@omega03 ~]$ $ORACLE_HOME/deinstall/deinstall -local



Update the nodes list on the remaining nodes as in the following example:

omega01:
 

[root@omega03 Desktop]# ssh omega01
[root@omega01 ~]# su - oracle
Active ORACLE_SID:   TESTDB1

[oracle@omega01 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega01,omega02
[oracle@omega01 ~]$ $ORACLE_HOME/OPatch/opatch lsinventory


Verify whether the node to be deleted is active or not by using following command from the $CRS_HOME/bin directory:

[grid@omega01 ~]$ olsnodes -s -t

On omega02:

[root@omega02 ~]# su - oracle
Active ORACLE_SID:   TESTDB2

[oracle@omega02 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME CLUSTER_NODES=omega01,omega02
[oracle@omega02 ~]$ $ORACLE_HOME/OPatch/opatch lsinventory
[root@omega02 ~]# su - grid
[grid@omega02 ~]$ olsnodes -s -t



Disable the Oracle Clusterware applications and daemons running on the node.
Run the rootcrs.pl script as root from the $CRS_HOME/crs/ on the node to be deleted
(if it is last node use the option -lastnode) as follows:

[root@omega03 Desktop]# $GRID_HOME/crs/install/rootcrs.pl -deconfig -force



From any node that you are not deleting, run the following command from the $CRS_HOME/bin directory as root to delete the node from the cluster:

[root@omega01 ~]# $GRID_HOME/bin/crsctl delete node -n omega03



Update the node list on the node to be deleted ( omega03) , run the following command from the CRS_HOME/oui/bin directory:

[grid@omega03 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES=omega03 -local



Update the node list on the remaining nodes by running the following command from $CRS_HOME/oui/bin from each of the remaining nodes the cluster:

on omega01:

[grid@omega01 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES={omega01,omega02} -local
[grid@omega01 ~]$ $GRID_HOME/OPatch/opatch lsinventory 

on omega02:

[grid@omega02 ~]$ $GRID_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME CLUSTER_NODES={omega01,omega02} -local
[grid@omega02 ~]$ $GRID_HOME/OPatch/opatch lsinventory 



Deinstall the Oracle Clusterware home from the node that you want to delete:

grid@omega03 ~]$ $GRID_HOME/deinstall/deinstall -local



Check cluster and resources status of our 2-Note cluster

grid@omega02 ~]$  olsnodes -s -t
[grid@omega02 ~]$ crs


--------------------------------------------------------------------------------------------------------------------

Удаление ноды из кластера

grid@rac1$ olsnodes
grid@rac1$ crsctl get cluster mode status
grid@rac1$ crsctl config gns
grid@rac1$ oifcfg getif
grid@rac1$ crsctl get node role config
grid@rac1$ asmcmd showclustermode
grid@rac1$ asmcmd showclusterstate
grid@rac1$ srvctl status asm -detail
grid@rac1$ crsctl get node role config -all
grid@rac1$ crsctl get node role status -all
grid@rac1$ crsctl status res -t

Перед удалением ноды удалим её из GC OEM

Backup OCR from rac1

root@rac1# /u01/app/11.2.0/grid/bin/ocrconfig -manualbackup


root@rac1# /u01/app/12.1.0/grid_1/bin/olsnodes -s
root@rac1# /u01/app/12.1.0/grid_1/bin/crsctl query css votedisk
root@rac1# /u01/app/12.1.0/grid_1/bin/ocrcheck
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl status database -d orcl
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl config service -d ORCL
root@rac1# /u01/app/12.1.0/grid_1/bin/srvctl status service -d orcl


Удалить инстанс ORCL3

root@rac1# xhost +
access control disabled, clients can connect from any host

root@rac1# su - oracle
oracle@rac1$ which dbca
oracle@rac1$ dbca


Или так:

oracle@rac1$ dbca -silent -deleteInstance -nodeList rac3 -gdbName ORCL -instanceName ORCL3 -sysDBAUserName sys -sysDBAPassword oracle

Проверяем:
srvctl status database -d orcl
увидим два инстанса

oracle@rac1$ srvctl config database -d orcl -v
oracle@rac1$ sqlplus '/as sysdba'
SQL> select * from gv$instance
SQL> select * from v$thread
SQL> select * from v$log where thread#=3
SQL> select * from v$logfile

Удалить online redo и undo, если есть

oracle@rac1$ srvctl config listener -a



Remove RDBMS Software

oracle@rac3$ cd $ORACLE_HOME/oui/bin
$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac3}" -local

# In the above command the "-local" hint is used, So that when we
# deinstall the oracle software, then only the local installation
# in removed otherwise. If this hint is not supplied then
# Oracle binaries from all the instances would be removed.

Your oratab should not contain any database entries except for ASM entries.

Sample oratab file

root@rac3# cat /etc/oratab
+ASM3:/u01/app/11.2.0/grid:N            # line added by Agent


Deinstall Oracle Software.

root@rac3# su - oracle
oracle@rac3$
oracle@rac3$ cd $ORACLE_HOME/deinstall
oracle@rac3$ deinstall]$ ./deinstall -local


On the remaining nodes (rac1,rac2) update the oraInventory.

root@rac1# su - oracle
oracle@rac1$ cd $ORACLE_HOME/oui/bin
oracle@rac1$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac1,rac2}"


Verify that the Inventory.xml file on both nodes should only contain
dbhome entries for rac1 and rac2.


oracle@rac1$ cat /u01/app/oraInventory/ContentsXML/inventory.xml

.....
< HOME NAME="OraDb11g_home1" LOC="/u01/app/oracle/product/11.2.0/dbhome_1" TYPE="O" IDX="2" >
   < NODE_LIST >
      < NODE NAME="rac1"/ >
      < NODE NAME="rac2"/ >
   < /NODE_LIST >
< /HOME >


Remove Clusterware from the rac3.

root@rac1# cd /u01/app/11.2.0/grid/bin/
root@rac1# ./olsnodes -s -t
rac1    Active  Unpinned
rac2    Active  Unpinned
rac3    Active  Unpinned

The third node should be unpinned if it's not then do the following.

root@rac1# ./crsctl unpin css -n rac3


Disable Oracle clusterware by executing the following from rac3.

root@rac3# cd /u01/app/11.2.0/grid/crs/install/
root@rac3# ./rootcrs.pl -deconfig -force


Run the following from rac1 to verify.

root@rac1# cd /u01/app/11.2.0/grid/bin/
root@rac1# ./crsctl delete node -n rac3
CRS-4661: Node rac3 successfully deleted.

root@rac1# ./olsnodes -t -s
rac1    Active  Unpinned
rac2    Active  Unpinned


Update Oracle inventory from rac3


grid@rac3$ cd $GRID_HOME/oui/bin
grid@rac3$ ./runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME "CLUSTER_NODES={rac3}" CRS=TRUE -silent -local


Deinstall Grid Infrastructure Software from rac3.

grid@rac3$ cd $GRID_HOME/deinstall
grid@rac3$ ./deinstall -local


Run the specified commands as root from rac3.

root@rac3# rm -rf /etc/oraInst.loc
root@rac3# rm -rf /opt/ORCLfmap
root@rac3# rm -rf /u01/app/11.2.0/
root@rac3# rm -rf /u01/app/oracle/


After the de-install make sure that oracle clusterware does not start
by checking the following.


root@rac3# diff /etc/inittab /etc/inittab.no_crs


Update Oracle Inventory on all nodes by running the following command from rac1.

root@rac1# su - grid
grid@rac1$ cd $GRID_HOME/oui/bin
grid@rac1$ ./runInstaller -updateNodeList ORACLE_HOME=$GRID_HOME "CLUSTER_NODES={rac1,rac2}" CRS=TRUE -silent



Do post node removal checks from rac1.

grid@rac1$ cluvfy stage -post nodedel -n rac3 -verbose

olsnodes -s -t
crsctl status res -t | grep -i rac3


Удалить ORACLE ASM на узле rac3.

root@rac3# /usr/sbin/oracleasm exit
root@rac3# rpm -qa | grep asm
root@rac3# rpm -ev oracleasmlib-2.0.4-1.el5 oracleasm-2.6.18-164.el5-2.0.5-1.el5 oracleasm-support-2.1.7-1.el5

Удалить директории

root@rac3# rm -f /etc/sysconfig/oracleasm.rpmsave
root@rac3# rm -f /etc/sysconfig/oracleasm-_dev_oracleasm
root@rac3# rm -f /etc/rc.d/rc2.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc0.d/K20oracleasm
root@rac3# rm -f /etc/rc.d/rc5.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc4.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc1.d/K20oracleasm
root@rac3# rm -f /etc/rc.d/rc3.d/S29oracleasm
root@rac3# rm -f /etc/rc.d/rc6.d/K20oracleasm


Удалить пользователей и группы

root@rac3# userdel -r grid
root@rac3# userdel -r oracle
root@rac3# groupdel oinstall
root@rac3# groupdel asmadmin
root@rac3# groupdel asmdba
root@rac3# groupdel asmoper
root@rac3# groupdel dba
root@rac3# groupdel oper





воскресенье, 5 марта 2017 г.

Утилита OPatch

Утилита OPatch

Установим переменную окружения
# export PATH=$PATH:/OPatch

Установка патча в GI home и все  Oracle RAC database homes одинаковой версии:
# opatchauto apply /stage/PSUpatch/22899531 -ocmrf < ocm response file >

Установить патч только для GI home:
# opatchauto apply /stage/PSUpatch/22899531 -oh < GI_HOME > -ocmrf < ocm response file >

Установка патча в одну или несколько Oracle RAC database homes:
# opatchauto apply /stage/PSUpatch/22899531 -oh < oracle_home1_path >, < oracle_home2_path > -ocmrf < ocm response file >

Откат патча из GI home and и каждого Oracle RAC database home:
# opatchauto rollback /stage/PSUpatch/22899531

Откат патча только для GI home:
# opatchauto rollback /stage/PSUpatch/22899531 -oh < GI_HOME >

Откат патча из одного или нескольких Oracle RAC database home:
# opatchauto rollback /stage/PSUpatch/22899531 -oh < oracle_home1_path >, < oracle_home2_path >


При использовании OPatch 12.2.0.1.5 или выше опцию -ocmrf < ocm response file > больше не нужно использовать и < ocm response file > не нужно создавать


# export PATH=$PATH:< GI_HOME >/OPatch

Чтобы пропатчить GI home и все Oracle RAC database homes Oracle RAC той же версии:
# opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930

Для патча только GI home:
# opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >

Для патча одного или нескольких Oracle RAC database homes:
# opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < oracle_home1_path >, < oracle_home2_path >

Откат патча из GI home и каждого Oracle RAC database home:
# opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930

Откат патча из GI home:
# opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < path to GI home > 

Откат патча из Oracle RAC database home:
# opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < oracle_home1_path >,< oracle_home2_path >

Для получения дополнительной информации о opatchauto см. Руководство пользователя Oracle® OPatch.


Перед установкой PSU нужно проапдейтить OPATCH utility во всех HOME

$ unzip p6880880_121010_LINUX.zip -d /u01/app/oracle/product/12.1.0/db_1
$ /u01/app/oracle/product/12.1.0/db_1/OPatch/opatch version

$ unzip p6880880_121010_LINUX.zip -d /u01/app/oracle/12.1.0.2/grid
$ /u01/app/oracle/12.1.0.2/grid/OPatch/opatch version


Если при установке PSU хотим использовать opatchauto, то необходимо создать ocm.rsp  файл:

$ export ORACLE_HOME=/u01/app/oracle/12.1.0.2/grid
$ $ORACLE_HOME/OPatch/ocm/bin/emocmrsp -no_banner -output /stage/ocm.rsp

Ответы: ENTER
        Y

И проверить валидность Oracle Inventory:

$ /u01/app/oracle/product/12.1.0/db_1/OPatch/opatch lsinventory -detail -oh /u01/app/oracle/product/12.1.0/db_1

$ /u01/app/oracle/12.1.0.2/grid/OPatch/opatch lsinventory -detail -oh /u01/app/oracle/12.1.0.2/grid



Хотим установить PSU /stage/PSUpatch/22899531
Распаковываем его:


$ mkdir /stage/PSUpatch
$ cp /stage/p22899531_121020_Linux-x86-64.zip /stage/PSUpatch
$ cd /stage/PSUpatch
$ unzip p22899531_121020_.zip


С какими патчами он будет конфликтовать ?

Из под root запускаем:

/u01/app/oracle/12.1.0.2/grid/OPatch/opatchauto apply /stage/PSUpatch/22899531 -analyze -ocmrf /stage/ocm.rsp

И если уже есть установленные патчи, то нам будет предложено их откатить.

Откатить конкретный патч из конкретного HOME можно так:


opatch rollback -id 2143694 -local -oh /u01/app/oracle/12.1.0.2/grid

Перед откатом некоротых патчей необходимо завершить работу has

Из под root запускаем:


/u01/app/oracle/12.1.0.2/grid/bin/crsctl stop has -f


Если анализ конфликтов завершается fail,  то

Из под root запускаем:


$ORA_GI_HOME/crs/install/roothas.pl –postpatch

И повторяем попытку:
/u01/app/oracle/12.1.0.2/grid/OPatch/opatchauto apply /stage/PSUpatch/22899531 -analyze -ocmrf /stage/ocm.rsp


Проверить достаточно ли свободного дискового пространства, для установки PSU можно так:
/u01/app/oracle/12.1.0.2/grid/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /stage/PSUpatch/22899531/22899531/22806133
/u01/app/oracle/12.1.0.2/grid/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /stage/PSUpatch/22899531/22899531/23006522


Следующая команда установит PSU 22899531 в GRID_HOME  и во все RDBMS_HOME информацию о доступных HOME берется из Oracle Inventory

# /u01/app/oracle/12.1.0.2/grid/OPatch/opatchauto apply /stage/PSUpatch/22899531 -ocmrf /stage/ocm.rsp


Проверим Oracle Inventory:
$ /u01/app/oracle/product/12.1.0/db_1/OPatch/opatch lsinventory -detail -oh /u01/app/oracle/product/12.1.0/db_1

$ /u01/app/oracle/12.1.0.2/grid/OPatch/opatch lsinventory -detail -oh /u01/app/oracle/12.1.0.2/grid


После установки PSU:
Loading Modified SQL Files into the Database:


$ sqlplus /nolog
SQL> Connect / as sysdba
SQL> startup
SQL> quit

$ cd $ORACLE_HOME/OPatch
$ ./datapatch -verbose


Check for the list of patches applied to the database.

SQL> select action_time, patch_id, patch_uid, version, status, bundle_series, description
     from dba_registry_sqlpatch;



Пример установки PSU для  Grid Home и  DB Home (Oracle Restart Configure)


Patch 22191577 - Oracle Grid Infrastructure Patch Set Update 11.2.0.4.160119 (Jan2016) (Includes Database PSU 11.2.0.4.160119)
21948347 DB_PSU 11.2.0.4.160119 (INCLUDES CPUJAN2016) Both DB Homes and Grid Home
21948348 OCW_PSU 11.2.0.4.160119 Both DB Homes and Grid Home
21948355 ACFS_PSU 11.2.0.4.160119 Only Grid Home


Установить последнюю версию утилиты OPatch (патч 6880880) во все $ORACLE_HOME.


GRID_HOME
su - root
chown oracle:oinstall /u01/app/grid/product/11.2.0.4
su – oracle
$ . oraenv
cd /u01/distrib/11.2.0.4/PSU/160119
cp p6880880_112000_Linux-x86-64.Opatch_11.2.0.3.12.zip $ORACLE_HOME
cd $ORACLE_HOME
mv OPatch OPatch.old
unzip p6880880_112000_Linux-x86-64.Opatch_11.2.0.3.12.zip
$ORACLE_HOME/OPatch/opatch version

DB_HOME
su – oracle
$ . oraenv
cd /u01/distrib/11.2.0.4/PSU/160119
cp p6880880_112000_Linux-x86-64.Opatch_11.2.0.3.12.zip $ORACLE_HOME
cd $ORACLE_HOME
mv OPatch OPatch.old
unzip p6880880_112000_Linux-x86-64.Opatch_11.2.0.3.12.zip
$ORACLE_HOME/OPatch/opatch version

Создать файл unconfig.rsp во всех $ORACLE_HOME

GRID_HOME
su – oracle
$ . oraenv
$ORACLE_HOME/OPatch/ocm/bin/emocmrsp -no_banner -output $ORACLE_HOME/OPatch/ocm/unconfig.rsp

DB_HOME
su – oracle
$ . oraenv
$ORACLE_HOME/OPatch/ocm/bin/emocmrsp -no_banner -output $ORACLE_HOME/OPatch/ocm/unconfig.rsp

Подготовка перед установкой PSU патчей
Рекомендуется остановить работающий экземпляр СУБД 
GI – должна продолжать работать

su – oracle
. oraenv

$/bin/srvctl stop database –d  < db-unique-name >

Установка последней версии PSU патча (для GI+DB ) в АВТО режиме.


Директория, где лежит распакованный патч не должна ничего содержать кроме этого патча. Убедиться, что в /u01 свободно минимум 24 GB.
Скопировать патч в директорию /u01/distrib/11.2.0.4/PSU/160119

su – oracle

cd /u01/distrib/11.2.0.4/PSU/160119

unzip p22191577_112040_Linux-x86-64.GI.zip

установка производится из-под пользователя root

su - root
unset TMOUT
export PATH=$PATH:/usr/ccs/bin:/u01/app/grid/product/11.2.0.4/OPatch

# opatch auto /u01/distrib/11.2.0.4/PSU/160119/22191577 -ocmrf /u01/app/grid/product/11.2.0.4/OPatch/ocm/unconfig.rsp


Проверка наличия установленных патчей для GRID_HOME + DB_HOME

GRID_HOME
su - oracle
. oraenv
ORACLE_SID = [TEST] ? +ASM

$ORACLE_HOME/OPatch/opatch lspatches
21948355;ACFS Patch Set Update : 11.2.0.4.160119 (21948355)
21948348;OCW Patch Set Update : 11.2.0.4.160119 (21948348)
21948347;Database Patch Set Update : 11.2.0.4.160119 (21948347)
OPatch succeeded.

DB_HOME
su - oracle
. oraenv
ORACLE_SID = [+ASM] ? TEST

$ORACLE_HOME/OPatch/opatch lspatches
21948348;OCW Patch Set Update : 11.2.0.4.160119 (21948348)
21948347;Database Patch Set Update : 11.2.0.4.160119 (21948347)
OPatch succeeded.




Пример установки и отката PSU для конфигурации:

Oracle RAC
GI Home не shared
Database Home shared
ACFS используется.


Patch 27010930 - Database Proactive Bundle Patch 12.1.0.2.180116

26925263 DB_BP 12.1.0.2.180116 Only DB Home for non-Oracle RAC setup. Both DB Homes and Grid Home for Oracle RAC setup.
26925218 OCW_PSU 12.1.0.2.180116 Both DB Homes and Grid Home
26910974 ACFS_PSU 12.1.0.2.180116Foot2 Only Grid Home
26983807 DBWLM_PSU 12.1.0.2.180116Footref2 Only Grid Home
Footnote2 For ACFS and DBWLM these subpatches are not applicable to the HP-UX Itanium and Linux on IBM System z platforms.

Установка патча:

На первом узле:

$ $< ORACLE_HOME >/bin/srvctl stop database –d < db-unique-name >
Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < ORACLE_HOME >
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

На следующем узле:
Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

На следующем узле:
Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto apply < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

и т.д.

Откат патча:

На первом узле:

$ $< ORACLE_HOME >/bin/srvctl stop database –d < db-unique-name >
Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < ORACLE_HOME >
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

На следующем узле:
Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

На следующем узле:

Размонтируйте файловые системы ACFS. (1494652.1)
# < GI_HOME >/OPatch/opatchauto rollback < UNZIPPED_PATCH_LOCATION >/27010930 -oh < GI_HOME >
A system reboot is recommended before using ACFS
Перемонтируйте файловые системы ACFS. (1494652.1)
$ < ORACLE_HOME >/bin/srvctl start instance –d < db-unique-name > -n < nodename >

и т.д.


Patch Post-Installation/Post-DeInstallation Instructions



Applying Conflict Resolution Patches

Loading Modified SQL Files into the Database

Standalone DB

% sqlplus /nolog
SQL> Connect / as sysdba
SQL> startup
SQL> quit
% cd $ORACLE_HOME/OPatch
% ./datapatch -verbose

Single/Multitenant (CDB/PDB) DB
% sqlplus /nolog
SQL> Connect / as sysdba
SQL> startup
alter pluggable database all open;
SQL> quit
% cd $ORACLE_HOME/OPatch
% ./datapatch -verbose

Проверка логов:


Check the following log files in $ORACLE_BASE/cfgtoollogs/sqlpatch/26925263/< unique patch ID > for errors:
26925263_apply_< database SID >_< CDB name >_< timestamp >.log

Check the following log files in $ORACLE_HOME/sqlpatch/26925263/ for errors:
26925263_rollback_< database SID >_< CDB name >_< timestamp >.log


Инвалидные объекты:

Run utlrp.sql to revalidate these objects.

cd $ORACLE_HOME/rdbms/admin
sqlplus /nolog
SQL> CONNECT / AS SYSDBA
SQL> @utlrp.sql

Каталог rman:


Upgrade Oracle Recovery Manager Catalog

$ rman catalog username/password@alias
RMAN> UPGRADE CATALOG;
RMAN> UPGRADE CATALOG;




Дополнительную информацию об утилите OPatch можно получить из следующих документов:

(Doc ID 293369.1)


OPatch User Guides

General

Note 1561792.1 Patching Assistant: Oracle Database/Client
Note 1451669.1 FAQ : Understanding OPatch Tool And Patching
Note 224346.1 OPatch - Where Can I Find the Latest Version of OPatch?
Note 453495.1 FAQs on OPatch Version : 11.1
Note.334108.1 OPatch version 10.2 - FAQ

Note 433218.1 Why Should You Install 32-Bit JDK/JRE When You Have 64-Bit Oracle and Operating System ?
Note 1475077.1 Can the $ORACLE_HOME/jdk and $ORACLE_HOME/jdk/jre Directories Be Removed?
Note 1449674.1 Is It Supported to Update /Upgrade the Default JDK/JRE in Oracle Home?

Note 1446582.1 Frequently Asked Questions (FAQ): Patching Oracle Database Server
Note 865255.1 Best approach for Oracle database patching sequence to latest/required patchset along with CPU/PSU/any-other-one-off patch
Note 1526322.1 11g : Can Examples CD Be Installed After Interim Patches Are Applied On The Base Version?

Note 1530108.1 Oracle Database 12.1 : FAQ on Queryable Patch Inventory
Note 1376691.1 Composite Patches for Oracle Products [Video]
Note 761111.1 RDBMS Online Patching Aka Hot Patching
Note 1463390.1 Limits Of Online Patching

Note 563656.1 Different types of Patch Conflicts - Superset/Subset/Duplicate/Bug/File
Note 1321267.1 Database Patch conflict resolution
Note 1299688.1 Patch conflict resolution
Note 1061295.1 Patch Set Updates - One-off Patch Conflict Resolution

Note 418537.1 Significance of oraInst.loc When Installing Oracle Products and Applying Patches
Note 812874.1 Understanding and Diagnosing MISSING_COMPONENT and/or SKIPPING_COMPONENT messages during opatch apply
Note 551222.1 Why Does OPatch Backup The Same Library Multiple Times?
Note 283367.1 OPatch for Oracle Application Server 10g (10.1.2, 10.1.3, 10.1.4)
Note 403212.1 Location Of Logs For OPatch And OUI
Note 403218.1 Can You Delete $ORACLE_HOME/.patch_storage Directory?

Note 861152.1 Does  PSU Released For Oracle 10gR2 (10.2.0.4) ,11g R1 (11.1.0.7) and 11g R2( 11.2.0.1) Change The Database Version/Fifth Digit?

Note 1520299.1 Master Note For Oracle Database 12c Release 1 (12.1) Database/Client Installation/Upgrade/Migration Standalone Environment (Non-RAC)
Note 2232156.1 OPatch: Behavior Changes starting in OPatch 12.2.0.1.9 and 11.2.0.3.15 releases

How To

Note 274526.1 How To Download And Install The Latest OPatch Version
Note 298906.1 How To Repair A Corrupted Inventory For RDBMS
Note 556834.1 Steps To Recreate Central Inventory(oraInventory) In RDBMS Homes
Note 406037.1 Using opatch -report option, how to check for potential conflicts during patch apply without Database / Listener shutdown
Note 458485.1 How to find whether the one-off Patches will conflict or not?
Note 550522.1 How To Avoid Disk Full Issues Because OPatch Backups Take Big Amount Of Disk Space.
Note 749787.1 How to run opatch in silent mode?
Note 821263.1 How to confirm that a Critical Patch Update (CPU) has been installed in Linux / UNIX
Note 750350.1 How To Find The Description/Details Of The Bugs Fixed By A Patch Using OPatch?
Note 729384.1 How To Find If a Specific Patch Has Been Applied On Oracle 8i or 9.0.1 (or 9i R1)?
Note 470225.1 How To Use Truss With OPatch?
Note 312767.1 How to Rollback a Failed Interim Patch Installation
Note 966023.1 How to create a ocm response file for opatch silent installation.
Note 459360.1 How To Do The Prerequisite/Conflicts Checks Using OUI(Oracle Universal Installer) And OPatch Before Applying/Rolling Back A Patch
Note.739963.1 Using AIX commands genld, genkld and slibclean to avoid library file locking errors (libjox)
Note 1394537.1 How To Rollback An Online Patch If The Database Is Down Or Removed
Note 453399.1 How To Check if inventory.xml/comps.xml Or Any Oracle XML File Is Correct?
Note 1474666.1 How to apply OC4J patch (13705478) on RDBMS home
Note 1543697.1 How To Use MOS Patch Planner Via Enterprise Manager 12c To Check for and Resolve Patch Conflicts

Diagnostic Tools For OPatch

Note 1430571.1 Script To Collect OPatch Diagnostic Information For Unix Platform (opatchdiag.sh)
Note 1441309.1 Oneoffsdiag.sh:Script To Verify The Existance Of $ORACLE_HOME/inventory/oneoffs/PATCH_ID and also actions.xml and actions.xml inventory.xml in $ORACLE_HOME/inventory/oneoffs/PATCH_ID/etc/config

Known Issues

With Inventory

Note 229773.1 ALERT Applying Multiple Interim Patches Using OPatch Can Cause Inventory Corruption
Note 401454.1 opatch lsinventory Raises An OPatchException While Accessing O2O
Note 413829.1 OPatch Fails With 'Can not get details for given Oracle Home'
Note 417275.1 opatch lsinventory Reports `Exception in thread "main" java.lang.NoClassDefFoundError: javax/xml/transform/Source` When Executed Against 9.2.0.x
Note 417694.1 "OUI-67124:Inventory check failed: Patch ID is not registered in Oracle Home Inventory" while applying one-off patches using opatch
Note 431783.1 opatch lsinventory Result Shows: * Warning: no filemap entries available.
Note 795452.1 OPatch reports Error when using multiple oracle users
Note 864117.1 OPatch error: "Inventory load failed... OPatch cannot load inventory for the given Oracle Home."
Note 821550.1 opatch lsinventory Error: " Unable To Parse The Xml File"
Note 861677.1 OPatch error : "Unable to lock Central Inventory"
Note 864803.1 OPatch error: "Apply Session failed: ApplySession failed to prepare the system. No detail available."
Note 437026.1 ERROR :"OPatch cannot find a valid oraInst.loc file to locate Central Inventory.OPatch failed with error code 104"
Note 728417.1 OPatch Fails With "LsInventorySession failed:OracleHomeInventory gets null oracleHomeInfo"
Note 1086853.1 opatch lsinventory Reports "OUI-67076 : OracleHomeInventory was not able to create a lock file"
Note 876343.1 Patch conflict Error OUI-67301 on 11.1.0.7.0 due to one off in inventory
Note 1185246.1 OPatch Fails With "OUI-67028:Inventory load failed"
Note 1308866.1 OPatch Cannot Load Inventory For The Given Oracle Home
Note 1475147.1 OPatch - Failed to load the patch object. Possible causes are: OPatch failed with error code = 73 LsInventorySession failed:
Note 1456247.1 The opatch apply failing with OPatch failed with error code 39 :This patch requires some components to be installed in the home. Either the Oracle Home doesn't have the components or this patch is not suitable for this Oracle Home.

With Java

Note 332698.1 OPatch was not able to locate executable jar
Note 417372.1 OPatch Error " java.lang.UnsatisfiedLinkError: ... liboraInstaller.so: ... "
Note 808368.1 OPatch: cannot open shared object file.  Possible cause: architecture word width mismatch
Note 974371.1 After Installing The 11.2.0.1 32-bit Client Software On SPARC Solaris, OPatch and OUI Fail With ELFCLASS32 Errors For liboraInstaller.so
Note 417177.1 ERROR : "Java could not be located. OPatch cannot proceed!.OPatch failed with error code = 1"
Note 1474852.1 UTE011: Active tracepoint array length for JVM is 7559; should be 7577 Could not create JVM.
Note 1475894.1 OPatch commands error: JVMJ9VM019E Fatal error: Unable to find and initialize required class java/lang/ClassCircularityError

With Pre-Patching

Note 943436.1 opatch prereq checkapplicable Error: "Unable to create Patch Object. Check if the given patch location is correct"
Note 780858.1 OPatch Fails With `Prerequisite check "CheckActiveFilesAndExecutables" failed` And `Following executables are active : $ORACLE_HOME/lib/libnmemso.so`
Note 882932.1 Prerequisite check "CheckPatchApplicableOnCurrentPlatform" failed When Applied Generic Patch 8282506
Note 861507.1 PSU OPatch conflict check fails with "OPatch failed with error code 73"
Note 292946.1 OPatch Detecting Wrong Platform Id For Install Of Patch
Note 404473.1 OPatch Reports Error "OPatch detects your platform as... this patch...supports platforms: 0 (Generic Platform)"
Note 602334.1 OPatch Not Working, Reporting That It Is Obsolete
Note 729218.1 ERROR: "The Oracle Home does not meet OUI version requirement.OPatch failed with error code 73"
Note 257800.1 OPatch: Interim Patch  is holding the lock from this patch
Note 312926.1 opatch apply Gives: Can't Locate English.Pm In @Inc
Note 1050819.1 OPatch Reports Missing Component and Prerequisite Check "CheckApplicable" Failed
Note 869945.1 Zop-40: The Patch(es) Has Conflicts/Supersets With Other Patches
Note 1231223.1 OPatch Fails With Error Code 255
Note 747049.1 OPatch apply/rollback error: Prerequisite check "CheckActiveFilesAndExecutables" failed
Note 1475206.1 Patch requires OPatch version
Note 1629444.1 Opatch Fails Updating Archives with " No space left on device " Error
Note 2039927.1 Source file / does not exists or is not readable

With Linking

Note 417319.1 Getting Warnings While Applying Patches on Linux x86-64
Note 457126.1 OPatch Aborted After Relink Fails On Target Ioracle.
Note 789053.1 Cannot find or open library file: -l pls10 when applying a patch with 10.2
Note 402945.1 While installing one-off Patch on AIX systems, getting several WARNING messages: OUI-67215 - TOC overflow and/or xlC: not found / xlC: Execute permission denied
Note 1446945.1 "ld: warning: symbol `_start' has differing types:" While Installing Or Patching 11gR2 On Oracle Solaris Platform
Note 1474762.1 WARNING:OUI-67200:Make failed to invoke "/usr/ccs/bin/make -f ins_rdbms.mk ioracle ORACLE_HOME="....'ld: fatal: file //lib/prod/lib/v9/crti.o: open failed: Permission denied
Note 1467060.1 Relinking Oracle Home FAQ ( Frequently Asked Questions)
Note 1525333.1 Relink error on target iKfed while patching Oracle 11.2.0.3 Client with 11.2.0.3.5 PSU
Note 1534583.1 OPatch fails to Rollback Patch due to relink errors.
Note 1539887.1 Undefined first referenced symbol ld: fatal: symbol referencing errors.

With Post-Patching

Note 353150.1 OPatch Failing Validation Phase When Archiving Really Did Not Fail
Note 791187.1 OPatch Failing Validation Phase - OPatch versions 10.2.0.4.6 and 11.1.0.6.6
Note 1361802.1 opatch auto doesn't patch the RDBMS Home if no database exists

With SQLPatching (datapatch)

Note 1599479.1 Datapatch errors at "SYS.DBMS_QOPATCH"
Note 1632818.1 Datapatch may fail while patching a RAC+PDB environment

Miscellaneous

Note 799883.1 OPatch error: OUI-67160 Unexpected end of ZLIB input stream
Note 739815.1 OUI-67294 Warning During opatch apply
Note 553244.1 WARNING MESSAGE "OUI-67078" and "OUI-67620" from OPatch when applying 10.2.X patches
Note 751107.1 ERROR: "Copy Action: Source file does not exist.'oracle.rdbms, Cannot copy file'"
Note 553263.1 OPatch Reports 'libclntsh.so.10.1 not removed. Text file busy'
Note 563356.1 OPatch error 'file not found' message on 11.1.0.6 32-bit
Note 1136432.1 OPatch fails with JVMXM008
Note 1066937.1 Apply Patch Fails with 'Patch ID is null' and Error Code 73
Note 1475147.1 OPatch - Failed to load the patch object. Possible causes are: OPatch failed with error code = 73 LsInventorySession failed:
Note 1108834.1 OUI-67073:ApplySession failed: Patch ID is null occurred when applying patch 9352179
Note 803097.1 OUI-67200:Make failed problem while applying a Patch
Note 730754.1 OPatch Gives Exception In Thread "Main" Unsatisfiedlinkerror: Haltimpl
Note 603465.1 OPatch Fails With "Jar not applied ... to $ORACLE_HOME/jdbc/lib/classes12.zip... 'invalid entry compressed size (expected x but got y bytes)'"
Note 1474874.1 opatch apply Fails - Jar Action: Source File "// '': Cannot update file '///' with '//'
Note.1085753.1 OPatch Failing With Error Code 74 While Applying PSU
Note 402945.1 While installing one-off Patch on AIX systems, getting several WARNING messages: OUI-67215 - TOC overflow and/or xlC: not found / xlC: Execute permission denied
Note 1055397.1 opatch napply Hanging
Note 1244414.1 OPatch throws: Archive Action: Source file "$ORACLE_HOME/.patch_storage/........." does not exist.
Note 1269454.1 OPatch Failed With Error Code 73 Because Session Is Not Able to Create a Directory
Note 1355040.1 Online patching errors: use "OPatch util enableOnlinePatch" to add the SID(s)
Note 1462303.1 OPatch Fails With Error "No such file or directory"
Note 1331900.1 Prerequisite Check "CheckRollbackable" On Auto-rollback Patches Failed
Note 1474805.1 OPatch Log Shows - INFO:Skip patch patch number from list of patches to apply: This patch is not needed.
Note 1476964.1 OPatch Logging "Generic Conflict" Between Sub-Patches Of A Composite Patch
Note 1468097.1 Argument (s) Error ... Illegal File Format For Composite Patch. There Is No Composite.xml File.
Note 1435438.1 Multiple OUI Messages In OPatch Log : OUI-67161, OUI-67085, OUI-67047, OUI-67049, OUI-67048, OUI-67085 and OUI-6705
Rolling Patch - OPatch Support for RAC

Windows Specific

Note 390761.1 Using OPatch On Windows When ORACLE_HOME Contains Space Characters
Note 416318.1 HOW TO Use Process Explorer to obtain a list of DLLs loaded by a Windows process
Note 418479.1 Files in Use errors, when running OPatch to apply patches on Windows
Note 791005.1 OPatch permissions problem on 64-bit windows 2008 and Oracle 10.2.0.3 and 10.2.0.4
Note 1446273.1 OPatch 11.2.0.3.0 LOCKS SRVM.JAR WHEN APPLYING ANY 11.2.0.3 BUNDLE PATCH ON WINDOWS X64
Note 1530132.1 Applied Bundle patch 16 (16167942/16167941) but opatch lsinventory incorrectly showing as Bundle patch 15.
Note 1568694.1 OPatch throwing "Unable to lock Central Inventory" on Windows 7

Using My Oracle Support Effectively

Note 732697.1 What Information Oracle Support Need To Work On OPatch Service Requests?
Note 735257.1 What information Oracle Support needs to work on Critical Patch Update (CPU) Installation Service Requests
Note 868955.1 My Oracle Support Health Checks Catalog
Note 166650.1 Working Effectively With Global Customer Support
Note 1194734.1 Where do I find that on My Oracle Support (MOS) [Video]

Generic Links

Note 1454618.1 Quick Reference To Patch Numbers For Database PSU, CPU And Bundle Patches
Note 1119703.1 Database PSU-CPU Cross-Reference List
Note 854428.1 Patch Set Updates for Oracle Products
Note 268895.1 Oracle Database Server Patchset Information, Versions: 8.1. 7 to 11.2.0
Note 161549.1 Oracle Database Server and Networking Patches for Microsoft Platforms
Note 161818.1 Oracle Database (RDBMS) Releases Support Status Summary

Fusion Middle Ware

Note 1587524.1 Using OPatch 13.1 for Oracle Fusion Middleware 12c (12.1.2+)
Note 1591487.1 Using OPatch 11.1 for Oracle Fusion Middleware 11g (11.1.1/11.1.2)
Note 283367.1 OPatch for Oracle Application Server 10g (10.1.2, 10.1.3, 10.1.4)

Information on Security Patch Updates (SPU's)