Friday, September 16, 2011

Clean Standby archive

filename standby_archive_clean.sh

#!/bin/bash



#Begin environment setup
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_SID=TESTDB
export PATH=$PATH:/usr/sbin:$ORACLE_HOME/bin:$HOME/bin
CURRENT_DATE=`date +%Y%m%d`
logfile="/u01/app/oracle/admin/TESTDB/dba_scripts/standby_archive_clean$CURRENT_DATE.log"

# Begin RMAN script
/u01/app/oracle/product/11.2.0/dbhome_1/bin/rman log /u01/app/oracle/admin/TESTDB/dba_scripts/standby_archive_clean$CURRENT_DATE.log << EOF
CONNECT TARGET /
SET COMMAND ID TO 'DELETE STANDBY ARCHIVE';
DELETE NOPROMPT  ARCHIVELOG ALL;
CROSSCHECK ARCHIVELOG ALL;
EXIT;
EOF

# Validation and e-mail status
status=$?
if [$status -gt 0]; then
mailx -s "STANDBY ARCHIVE CLEAN FAILED: TESTDB" DBA@TESTDB.COM<< !
`cat $logfile`
!
else
 mailx -s "SUCCESSFUL STANDBY ARCHIVE CLEAN: TESTDB" DBA@TESTDB.COM << !
`cat $logfile`
!
fi

# remove empty directores left by rman
for folder in $(find /u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_STANDBY/archivelog -type d); do
if [ "`ls $folder | wc -l`" -eq 0 ]; then
rmdir $folder
fi
done


Moinitor Invalid Packages in SYS to debug an issue

file name: sys_monitor.sh

#!/bin/bash


#Begin environment setup
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_SID=TESTDB
export PATH=$PATH:/usr/sbin:$ORACLE_HOME/bin:$HOME/bin
CURRENT_DATE=`date +%Y%m%d`
logfile="/home/oracle/bin/dba_scripts/sys_monitor$CURRENT_DATE.log"

# Begin sql script
sqlplus  /nolog @ '/home/oracle/bin/dba_scripts/sys_monitor.sql' > $logfile

# Validation and e-mail status
status=$?
if [ $status -gt 0 ]; then
mailx  -s "sys monitor info  FAILED" dba@testdb.com<< !
`cat $logfile`
!
else
 mailx  -s "sys monitor info SUCCESSFUL"dba@testdb.com<< !
 `cat $logfile`
!
fi

rm $logfile


--------------------
filename: sys_monitor.sql

connect / as sysdba
select 'ALTER PACKAGE SYS.' || OBJECT_NAME || ' COMPILE;' from dba_objects
where owner='SYS' and object_type='PACKAGE' AND STATUS='INVALID';


select 'ALTER PACKAGE SYS.' || OBJECT_NAME || ' COMPILE BODY;' from dba_objects
where owner='SYS' and object_type='PACKAGE BODY' AND STATUS='INVALID';

exit;







Primary DB Backup Script

#filename: primarydb_backup.sh
#!/bin/bash

#Begin environment setup
ORACLE_BASE=/u01/app/oracle
export ORACLE_BASE
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_SID=TESTDB
export PATH=$PATH:/usr/sbin:$ORACLE_HOME/bin:$HOME/bin
CURRENT_DATE=`date +%Y%m%d`

#define variables
logfile="/home/oracle/bin/dba_scripts/primarydb_backup$CURRENT_DATE.log"

# Begin RMAN script
/u01/app/oracle/product/11.2.0/dbhome_1/bin/rman @ '/home/oracle/bin/dba_scripts/primarydb_backup.rman' USING $CURRENT_DATE log /home/oracle/bin/dba_scripts/primarydb_backup$CURRENT_DATE.log

# Validation and send email
status=$?
if [ $status -gt 0 ]; then
mailx -s "testdbbackup FAILED: testdb" dba@testdb.com << !
`cat $logfile`
!
else
 mailx -s "testdb backup SUCCESSFUL: testdb" dba@testdb.com << !
`cat $logfile`
!
fi

rm $logfile

#Remove empty directories, after rman delete obselet, it leaves some empty dated directories
for folder in $(find /u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backupset -type d); do
if [ "`ls $folder | wc -l`" -eq 0 ]; then
rmdir $folder
fi
done

----------------------
FILENAME: primarydb_backup.rman

CONNECT TARGET /
SET COMMAND ID TO 'cron_rman_full_bk_&1';
backup device type disk tag 'primary_full_daily%s' database;
backup device type disk tag 'primary_full_daily%s' archivelog all not backed up delete all input;
allocate channel for maintenance type disk;
crosscheck backup;
crosscheck archivelog all;
delete noprompt expired backup;
delete noprompt obsolete device type disk;
release channel;
EXIT;

-------------------------

Primary DB Scripts

#!/bin/bash
# collecting information from primary db, filename primary_db-info.sh

#Begin environment setup

export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/grid
export ORACLE_SID=+ASM
export PATH=$PATH:/usr/sbin:$ORACLE_HOME/bin:$HOME/bin:.

CURRENT_DATE=`date +%Y%m%d`
logfile="/home/oracle/bin/dba_scripts/prim_db_info$CURRENT_DATE.log"
# Begin sql script, -S turns off the banner
cd /home/oracle/bin/dba_scripts/
sqlplus  -S /nolog @ '/home/oracle/bin/dba_scripts/primarydb_asm_info.sql'


export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_SID=TESTDB


# Begin sql script
sqlplus  -S /nolog @ '/home/oracle/bin/dba_scripts/primary_db_info.sql' 

# Validation and e-mail status
status=$?
if [ $status -gt 0 ]; then
mailx  -s "testdb_primary info gathering FAILED" dba@testdb.com << !
`df -h`
`cat $logfile`
!
else
 mailx  -s "testdb_primary info gathering" dba@testdb.com << !
`df -h`
 `cat $logfile`
!
fi

rm $logfile

---------------------------
connect / as sysdba

-------------------------------------------------------------------------
-- this script is to collect   data guard related info for primary db  --
-- It is referencing a note in metalink  
-- file name primary_db_info.sql                             --
-------------------------------------------------------------------------
column timecol new_value timestamp
column spool_extension new_value suffix

--this date format has to match whatever in the standby_db_info.sh so the spooled log can be emailed.
select to_char(sysdate,'YYYYmmdd') timecol,
'.log' spool_extension from sys.dual;
spool prim_db_info&&timestamp&&suffix APPEND

set echo off
set feedback off
set lines 200
set pagesize 35
set trim on
set trims on

alter session set nls_date_format = 'MON-DD-YYYY HH24:MI:SS';

COLUMN time heading "PRIMARY DATABASE INFO GENERATED ON" FORMAT a35
select to_char(sysdate) time from dual;

column pid Format 9999999999
column status  heading STATUS JUS L Format a13 tru
column thread# JUS R Format  99
column sequence# Format 9999999999
column block# Format 9999999999
column delay_mins Format 99999999999

-- Query v$managed_standby to see the status of processes involved in
-- the shipping redo on this system.  Does not include processes needed to
-- apply redo.

select process,status,client_process,sequence#
from v$managed_standby
where PROCESS='LNS' OR (PROCESS='ARCH' AND STATUS='CONNECTED');

-- The following query will determine the current sequence number
-- and the last sequence archived.  If you are remotely archiving
-- using the LGWR process then the archived sequence should be one
-- higher than the current sequence.  If remotely archiving using the
-- ARCH process then the archived sequence should be equal to the
-- current sequence.  The applied sequence information is updated at
-- log switch time.

select ads.dest_id,max(sequence#) "Current Sequence",
           max(log_sequence) "Last Archived"
   from v$archived_log al, v$archive_dest ad, v$archive_dest_status ads
   where ad.dest_id=al.dest_id
   and al.dest_id=ads.dest_id
   and al.resetlogs_change#=(select max(resetlogs_change#) from v$archived_log )
   group by ads.dest_id;

-- In the following the database_role should be primary. If protection_level is different
-- than protection_mode then for some reason the mode listed in
-- protection_mode experienced a need to downgrade.  Once the error
-- condition has been corrected the protection_level should match the
-- protection_mode after the next log switch.

column role format a7 tru
column name format a10 wrap

select name,platform_id,database_role role,log_mode,
       flashback_on flashback,protection_mode,protection_level 
from v$database;

-- ARCHIVER can be (STOPPED | STARTED | FAILED). FAILED means that the
-- archiver failed to archive a log last time, but will try again within 5
-- minutes. LOG_SWITCH_WAIT is the ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log
-- switching is waiting for.  Note that if ALTER SYSTEM SWITCH LOGFILE is
-- hung, but there is room in the current online redo log, then value is
-- NULL

column host_name format a20 tru
column version format a9 tru

select instance_name,host_name,version,archiver,log_switch_wait
from v$instance;
 


-- Force logging is not mandatory but is recommended.
-- During normal operations it is acceptable for
-- SWITCHOVER_STATUS to be SESSIONS ACTIVE or TO STANDBY.

column force_logging format a13 tru
column remote_archive format a14 tru
column dataguard_broker format a16 tru

select force_logging,remote_archive,
       supplemental_log_data_pk,supplemental_log_data_ui,
       switchover_status,dataguard_broker
from v$database; 

-- This query produces a list of all archive destinations.  It shows if
-- they are enabled, what process is servicing that destination, if the
-- destination is local or remote, and if remote what the current mount ID
-- is.

column destination format a35 wrap
column process format a7
column archiver format a8
column ID format 99
column mid format 99

select dest_id "ID",destination,status,target,
       schedule,process,mountid  mid
from v$archive_dest where dest_id=1 or dest_id=2;

-- This select will give further detail on the destinations as to what
-- options have been set.  Register indicates whether or not the archived
-- redo log is registered in the remote destination control file.

set numwidth 8
column ID format 99

select dest_id "ID",archiver,transmit_mode,affirm,async_blocks async,
       net_timeout net_time,delay_mins delay,reopen_secs reopen,
       register,binding
from v$archive_dest where dest_id=1 or dest_id=2;

-- The following select will show any errors that occured the last time
-- an attempt to archive to the destination was attempted.  If ERROR is
-- blank and status is VALID then the archive completed correctly.

column error format a55 wrap

select dest_id,status,error from v$archive_dest
where dest_id=1 or dest_id=2;
-- The query below will determine if any error conditions have been
-- reached by querying the v$dataguard_status view (view only available in
-- 9.2.0 and above):

column message format a80

select message, timestamp
from v$dataguard_status
where severity in ('Error','Fatal')
order by timestamp;



-- The following select will attempt to gather as much information as
-- possible from the standby.  SRLs are not supported with Logical Standby
-- until Version 10.1.

set numwidth 8
column ID format 99
column "SRLs" format 99
column Active format 99

select dest_id id,database_mode db_mode,recovery_mode,
       protection_mode,standby_logfile_count "SRLs",
       standby_logfile_active ACTIVE,
       archived_seq#
from v$archive_dest_status where dest_id=1 or dest_id=2;



-- The following query is run on the primary to see if SRL's have been
-- created in preparation for switchover.

SELECT 'STANDBY LOGS (SRL)' AS "The Following Are:" from dual;
select group#,sequence#,bytes from v$standby_log;

-- The above SRL's should one more in number and match in size with the ORL's
-- returned below:

SELECT 'ONLINE REDO LOGS (ORL)' AS "The Following Are:" from dual;
select group#,thread#,sequence#,bytes,archived,status from v$log;


spool off

exit;

----------------------------
--file name primarydb_asm_info.sql

connect / as sysdba
column timecol new_value timestamp
column spool_extension new_value suffix

--this date format has to match whatever in the standby_db_info.sh so the spooled log can be emailed.
select to_char(sysdate,'YYYYmmdd') timecol,
'.log' spool_extension from sys.dual;
spool prim_db_info&&timestamp&&suffix

set echo OFF
set feedback off

SET LINESIZE 600
set pagesize 35
set trim on
set trims on

alter session set nls_date_format='DD-MON-YYYY HH24:MI:SS';

COLUMN time heading "ASM DISK GROUP INFO GENERATED ON" FORMAT a35
select  sysdate time from dual;
select 'HOSTNAME:'AS HOSTNAME, MACHINE " " from v$session where program like '%SMON%';


SELECT GROUP_NUMBER, NAME, TOTAL_MB/1024 AS TOTAL_GB, FREE_MB/1024 AS FREE_GB FROM V$ASM_DISKGROUP ORDER BY GROUP_NUMBER;

SELECT GROUP_NUMBER, DISK_NUMBER,BYTES_READ/1024/1024/1024 AS GB_READ,BYTES_WRITTEN/1024/1024/1024 GB_WRITTEN, READ_ERRS,WRITE_ERRS,HOT_READS,HOT_WRITES, COLD_READS, COLD_WRITES FROM v$ASM_DISK ORDER BY GROUP_NUMBER,DISK_NUMBER;


spool off

exit;

Tuesday, August 30, 2011

ADDR Reports Significant virtual memory paging was detected on the host operating system

Our oracle server is running on Linux x86-64, 24 gb RAM. SGA=10gb
The ADDR Reports on DBConsole displays this message

Significant virtual memory paging was detected on the host operating system.
Host operating system was experiencing significant paging but no particular root cause could be detected.
After searching in metalink, I found that this is expected behavior for windows operating system. But, for 64-bit Linux, SGA>10g tend to cause this problem. Refering to this metalink note HugePages on Oracle Linux 64-bit [ID 361468.1], it says if your combined database SBAs is large (more than 8GB), you will need HugePages configured. But another metalink note "HugePages and Oracle Databaes 11g Automatic Memory Management (AMM) on Linux [ID 749851.1]" discusses the inteoperability of the Automatic Memory Management (AMM) feature introduced by Oracle 11g and the HugePages feature of Linux OS kernel. Whenever 11g AMM feature is enabled by MEMORY_TARGET/MEMORY_MAX_TARGET parameters, HugePages are not used. The use of AMM is absolutely incompatible with HugePages. If you want to use HugePages make sure both MEMORY_TARGET/MEMORY_MAX_TARGET parameter are unset (ALTER SYSTEM RESET)

Thursday, August 18, 2011

Creating Physical Standby Using RMAN Duplicate For ASM Standby and ASM Primary

Creating Physical Standby Using RMAN Duplicate in ASM Standby For ASM Primary

Scenario
The primary and standby were up and running, some archives on standby were not applied for some reason, the disk were 91% full. There was a need to clear the disk and recreate standby from Active Primary database.
References
The steps are a combination of different notes. Mainly, We were following this metalink note
"Step By Step Guide On Creating Physical Standby Using RMAN Duplicate In ASM Filesystem For ASM Primary [ID 837102.1]" But since this note was valid both for 10g and 11R2, it created standby from primary backup not from active database because that option was not available for 10g. We need to modify it to create standby from active primary. So we were also referencing Oracle Data Guard Concepts and Administration for 11R2
http://download.oracle.com/docs/cd/E11882_01/server.112/e17022.pdf
where it says we could use "FROM ACTIVE DATABASE", and we need to also specify
NOFILENAMECHECK option for the DUPLICATE FOR STANDBY command.
Assumptions
The steps also assumes following.
The primary init parameters were configured per Data Guard documentation and is up and running.
A pfile was created from primary spfile and was modified for standby parameters and was copied to standby. Then an spfile was create from this copy of pfile for standby and resides in ASM.
A standby password file was created from primary password file using orapw
ASM instance and diskgroups were configured in Standby server.
Network connectivity between PRIMARY and STANDBY server was configured.
Force logging was enabled in primary, standby redologs was created in primary.
(See [[Setup Standby Database Method 1 (RMAN Backup Copy)]] for details or refer to Oracle Data Guard Concepts and Administration for 11R2)
Brief Info About Log Shipping and Applying
Redo received from redo source database via redo transport is written to the
current standby redo log group by an RFS foreground process. When a log switch
occurs on the source database, incoming redo is then written to the next standby
redo log group, and the previously used standby redo log group is archived by an
ARCn foreground process.
The process of sequentially filling and then archiving redo log file groups at source database is mirrored at each redo transport destination by the sequential filling and archiving of standby redo log groups. Each standby redo log file must be at least as large as the largest redo log file in the
redo log of the redo source database. For administrative ease, Oracle recommends that
all redo log files in the redo log at the redo source database and the standby redo log at
a redo transport destination be of the same size. The standby redo log must have at least one more redo log group than the redo log at the redo source database, for each redo thread at the redo source database. At the redo source database, query the V$LOG view to determine how many redo log groups are in the redo log at the redo source database
Steps
1. First of all, delete all the archive logs shipped from primary so the disk space is cleared. Since those archive logs were not applied, we had to use FORCE option to force delete.
RMAN> DELETE NOPROMPT FORCE ARCHIVELOG ALL;
2. Shutdown standby
--set ORCLE_HOME and ORACLE_SID
. oraenv
TESTDB
SQL>ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
SQL>SHUTDOWN IMMEDIATE
3. Remove all the database files
[oracle@test-testdb-02]$ export ORACLE_HOME=/u01/app/oracle/product/11.2.0/grid
[oracle@test-testdb-02 archivelog]$ export ORACLE_SID=+ASM
or
. oraenv
+ASM
[oracle@test-testdb-02]$ asmcmd
cd DATA
rm all the control file, datafiles and onlinelog files, just leave spfile there.
4. Startup nomount using the spfile in ASM
SQL> startup nomount
5. Duplicate For Standby From Active Database
ON PRIMARY test -testdb-01
[oracle@test-testdb-02]$ rman target /
connected to target database: TESTDB (DBID=299787364)
RMAN> connect auxiliary sys/XXXXXX@TESTDB_STANDBY
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-04006: error from auxiliary database: ORA-12528: TNS:listener: all appropriate instances are blocking new connections
Debugging for the above error.
[oracle@test-testdb-02]$ lsnrctl status
STATUS of the LISTENER
------------------------
Alias LISTENER
Version TNSLSNR for Linux: Version 11.2.0.2.0 - Production
Start Date 14-AUG-2011 00:46:29
Uptime 3 days 14 hr. 1 min. 21 sec
Trace Level off
Security ON: Local OS Authentication
SNMP OFF
Listener Parameter File /u01/app/oracle/product/11.2.0/grid/network/admin/listener.ora
Listener Log File /u01/app/oracle/product/11.2.0/grid/network/log/listener.log
Listening Endpoints Summary...
(DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EXTPROC1521)))
(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=test-testdb-02.testsystem.edu)(PORT=1521)))
Services Summary...
Service "+ASM" has 1 instance(s).
Instance "+ASM", status READY, has 1 handler(s) for this service...
Service "TESTDB_STANDBY.testsystem.edu" has 1 instance(s).
Instance "TESTDB", status BLOCKED, has 1 handler(s) for this service...
Service "TESTDB_STANDBY_DGMGRL.testsystem.edu" has 1 instance(s).
Instance "TESTDB", status UNKNOWN, has 1 handler(s) for this service...
The command completed successfully
The output showed that TESTDB_STANDBY.testsystem.edu was blocked, probably because it was in nomount mode and auto register listner is not working the same way as having a physical entry in the listener.
To resolve this error, need to add an entry in the listener.ora file
set the oraenv first (listener for 11R2 is in grid home)
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/grid
or
[oracle@test-testdb-02] . oraenv
+ASM
or
.oraenv
grid
--stop the listener
[oracle@test-testdb-02] lsnrctl stop
edit /u01/app/oracle/product/11.2.0/grid/network/admin/listener.ora
change this
SID_LIST_LISTENER =
(SID_LIST =
(SID_DESC =
(GLOBAL_DBNAME = TESTDB_STANDBY_DGMGRL.testsystem.edu)
(ORACLE_HOME = /u01/app/oracle/product/11.2.0/dbhome_1)
(SID_NAME = TESTDB)
)
)
to
SID_LIST_LISTENER =
(SID_LIST =
(SID_DESC =
(GLOBAL_DBNAME = TESTDB_STANDBY.testsystem.edu)
(ORACLE_HOME = /u01/app/oracle/product/11.2.0/dbhome_1)
(SID_NAME = TESTDB)
)
(SID_DESC =
(GLOBAL_DBNAME = TESTDB_STANDBY_DGMGRL.testsystem.edu)
(ORACLE_HOME = /u01/app/oracle/product/11.2.0/dbhome_1)
(SID_NAME = TESTDB)
)
)
[oracle@test-testdb-02] lsnrctl start
Continue with Step 5. Duplicate For Standby From Active Database
ON PRIMARY test-testdb-01
[oracle@test-testdb-02]$ rman target /
connected to target database: TESTDB (DBID=299787364)
--setup 2 parallel channels for the rman job that follows
CONFIGURE DEVICE TYPE DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 2;
-- connect to auxiliary standby from primary rman
RMAN> connect auxiliary sys/XXXXXX@TESTDB_STANDBY
--run the rman duplicate for standby from active database command as follows
RMAN>
run {
allocate auxiliary channel C1 device type disk;
duplicate target database for standby from active database nofilenamecheck;
}
This creates a standby control file and ship to standby asm automatically when not specifying any path. This took about 2 hours.
On STANDBY test-testdb-02:
On metalink note, it says standby redo logs (SRL) need to be created after duplicate, but since we already prepared primary and standby redo logs were created on primary, those were duplicated to standby as well. Otherwise do this
SQL> ALTER DATABASE ADD STANDBY LOGFILE Group 17 SIZE 256000K;
SQL> ALTER DATABASE ADD STANDBY LOGFILE Group 18 SIZE 256000K;
...
verify redologs
select * from v$log;
verify standby logs
select * from v$standby_log
--turn on the real time recovery mode for standby
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT FROM SESSION;
6. Validate that log shipping and applying is working
a. execute on PRIMARY database
SQL> archive log list;
Database log mode Archive Mode
Automatic archival Enabled
Archive destination /u01/app/oracle/oradata/TESTDB/archivelog/
Oldest online log sequence 5112
Next log sequence to archive 5127
Current log sequence 5127
SQL> ALTER SYSTEM SWITCH LOGFILE;
SQL> archive log list;
Database log mode Archive Mode
Automatic archival Enabled
Archive destination /u01/app/oracle/oradata/TESTDB/archivelog/
Oldest online log sequence 5113
Next log sequence to archive 5128
Current log sequence 5128
b. execute on STANDBY database
d. Verify the new redo data was archived on the standby database. On the standby database, query the V$ARCHIVED_LOG view to verify the redo data was received and archived on the standby database: SQL> SELECT SEQUENCE#, FIRST_TIME, NEXT_TIME FROM V$ARCHIVED_LOG ORDER BY SEQUENCE#; NOTE: Verify new archived redo log files were applied. At the standby database, query the V$ARCHIVED_LOG view to verify the archived redo log files were applied. SQL> SELECT SEQUENCE#,APPLIED FROM V$ARCHIVED_LOG ORDER BY SEQUENCE#;
--set some formatting things first.
SQL> alter session set NLS_DATE_FORMAT='yyyy/mm/dd-HH24:mi:ss';
SQL> set linesize 132;
col message for a100;
SQL> SELECT TIMESTAMP,DEST_ID,MESSAGE FROM V$DATAGUARD_STATUS;

TIMESTAMP DEST_ID MESSAGE
------------------- ---------- ----------------------------------------------------------------------------------------------------
2011/08/17-16:37:33 0 MRP0: Background Managed Standby Recovery process started
2011/08/17-16:37:38 0 Managed Standby Recovery starting Real Time Apply
2011/08/17-16:37:39 0 Media Recovery Log /u01/app/oracle/oradata/TESTDB/archivelog/1_4907_757654478.arc
2011/08/17-16:37:39 0 Media Recovery Log /u01/app/oracle/oradata/TESTDB/archivelog/1_4908_757654478.arc
2011/08/17-16:37:39 0 Media Recovery Log /u01/app/oracle/oradata/TESTDB/archivelog/1_4909_757654478.arc
2011/08/17-16:37:41 0 Media Recovery Log /u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_STANDBY/archivelog/2011_08_17/o1_mf_
1_4910_74rdzx01_.arc

2011/08/17-16:37:44 0 Media Recovery Waiting for thread 1 sequence 4911 (in transit)

SQL> SQL> SELECT pid,process,status,thread#,sequence#,block#,blocks FROM v$managed_standby order by sequence#;
PID PROCESS STATUS THREAD# SEQUENCE# BLOCK# BLOCKS
---------- --------- ------------ ---------- ---------- ---------- ----------
27492 ARCH CONNECTED 0 0 0 0
27494 ARCH CONNECTED 0 0 0 0
3567 RFS IDLE 0 0 0 0
3874 RFS IDLE 0 0 0 0
3571 RFS IDLE 0 0 0 0
27498 ARCH CONNECTED 0 0 0 0
3899 RFS IDLE 0 0 0 0
3569 RFS IDLE 0 0 0 0
27496 ARCH CLOSING 1 4910 262144 1715
3835 RFS IDLE 1 4911 34257 2
3934 MRP0 APPLYING_LOG 1 4911 34256 512000
Notice that the status of MRP0 is APPLYING_LOG, RFS is responsible for receiving logs check to see if the sequence number matches the primary number when doing a log switch.
SQL> SELECT SEQUENCE#, APPLIED, DEST_ID, FIRST_TIME, NEXT_TIME FROM V$ARCHIVED_LOG ORDER BY SEQUENCE#;
SEQUENCE# APPLIED DEST_ID FIRST_TIME NEXT_TIME
---------- --------- ---------- ------------------- -------------------
4907 YES 2 2011/08/17-14:53:17 2011/08/17-15:30:18
4908 YES 2 2011/08/17-15:30:18 2011/08/17-15:30:30
4909 YES 2 2011/08/17-15:30:30 2011/08/17-16:29:08
4910 NO 1 2011/08/17-16:29:08 2011/08/17-16:34:50
4910 YES 2 2011/08/17-16:29:08 2011/08/17-16:34:50
4911 IN-MEMORY 1 2011/08/17-16:34:50 2011/08/17-16:40:10
4911 IN-MEMORY 2 2011/08/17-16:34:50 2011/08/17-16:40:10
SQL> /
SEQUENCE# APPLIED DEST_ID FIRST_TIME NEXT_TIME
---------- --------- ---------- ------------------- -------------------
4907 YES 2 2011/08/17-14:53:17 2011/08/17-15:30:18
4908 YES 2 2011/08/17-15:30:18 2011/08/17-15:30:30
4909 YES 2 2011/08/17-15:30:30 2011/08/17-16:29:08
4910 NO 1 2011/08/17-16:29:08 2011/08/17-16:34:50
4910 YES 2 2011/08/17-16:29:08 2011/08/17-16:34:50
4911 IN-MEMORY 1 2011/08/17-16:34:50 2011/08/17-16:40:10
4911 YES 2 2011/08/17-16:34:50 2011/08/17-16:40:10
4912 IN-MEMORY 1 2011/08/17-16:40:10 2011/08/17-16:42:05
4912 IN-MEMORY 2 2011/08/17-16:40:10 2011/08/17-16:42:05
7. Validate ASM files
[oracle@test-testdb-02]$ . oraenv
ORACLE_SID = [TESTDB] ? +ASM
[oracle@test-testdb-02]$ asmcmd
ASMCMD> ls
DATA/
ASMCMD> cd data/TESTDB_STANDBY/
ASMCMD> ls
ASMCMD> ls
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
spfileTESTDB_STANDBY.ora
ASMCMD> cd CONTROLFILE
ASMCMD> ls
Current.335.759425381
Current.353.759425381
ASMCMD> cd ..
ASMCMD> ls
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
spfileTESTDB_STANDBY.ora
ASMCMD> cd datafile
ASMCMD> ls
TEST1_DATA.338.759427589
TEST1_INDEX.261.759428529
TEST2_DATA.260.759428553
TEST3_DATA.342.759427135
TEST3_INDEX.270.759427951
SYSAUX.330.759428759
SYSTEM.264.759428353
UNDOTBS1.349.759425891
USERS.322.759428871
ASMCMD> cd ..
ASMCMD> ls
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
spfileTESTDB_STANDBY.ora
ASMCMD> cd onlinelog
ASMCMD> ls
group_1.304.759428953
group_10.295.759428959
group_11.293.759428961
group_12.292.759428961
group_13.291.759428961
group_14.290.759428963
group_15.289.759428963
group_16.288.759428963
group_17.287.759428965
group_18.286.759428965
group_19.285.759428967
group_2.303.759428953
group_20.284.759428967
group_21.283.759428967
group_22.282.759428969
group_23.281.759428969
group_24.280.759428971
group_25.279.759428971
group_26.278.759428973
group_27.277.759428973
group_28.275.759428975
group_29.274.759428975
group_3.302.759428953
group_30.273.759428975
group_31.294.759428977
group_32.276.759428977
group_33.257.759428979
group_4.301.759428955
group_5.300.759428955
group_6.299.759428957
group_7.298.759428957
group_8.297.759428957
group_9.296.759428959
ASMCMD> cd ..
ASMCMD> ls
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
spfileTESTDB_STANDBY.ora
ASMCMD> cd parameterfile
ASMCMD> ls
spfile.352.757690657

Friday, August 5, 2011

An example Send Mail Function Using UTL_SMTP

CREATE OR REPLACE FUNCTION send_email (addr IN VARCHAR2, msg IN VARCHAR2)
RETURN VARCHAR2
IS
c UTL_SMTP.connection;
BEGIN
c := UTL_SMTP.open_connection ('smtp.test.edu');
UTL_SMTP.helo (c, 'smtp.test.edu');
UTL_SMTP.mail (c, 'TEST@test.edu');
UTL_SMTP.rcpt (c, addr);
UTL_SMTP.open_data (c);
UTL_SMTP.write_data (
c,
'From'
|| ': '
|| '"TEST SEND MAIL" '
|| UTL_TCP.CRLF);
UTL_SMTP.write_data (
c,
'To' || ': ' || '"Recipient" <' || addr || '>' || UTL_TCP.CRLF);
UTL_SMTP.write_data (
c,
'Subject' || ': ' || 'TEST FOR SEND MAIL' || UTL_TCP.CRLF);
UTL_SMTP.write_data (
c,
'Reply-To:'
|| '"YOUR NAME" '
|| UTL_TCP.crlf);
UTL_SMTP.write_data (c, msg);
UTL_SMTP.close_data (c);
UTL_SMTP.quit (c);
RETURN 'OK';
EXCEPTION
WHEN UTL_SMTP.transient_error OR UTL_SMTP.permanent_error
THEN
BEGIN
UTL_SMTP.quit (c);
EXCEPTION
WHEN UTL_SMTP.transient_error OR UTL_SMTP.permanent_error
THEN
NULL; -- When the SMTP server is down or unavailable, we don't have
-- a connection to the server. The quit call will raise an
-- exception that we can ignore.
END;

RETURN 'Failed to send mail due to the following error: ' || SQLERRM;
END;
/

To enable users to use UTL_TCP, UTL_SMTP packages to acces network services

See Oracle XML DB Developers Guide for more informaiton

http://download.oracle.com/docs/cd/E11882_01/appdev.112/e16659/xdb21sec.htm

Oracle allows access to external network services using several PL/SQL APIs (UTL_TCP, UTL_SMTP, UTL_MAIL, UTL_HTTP and UTL_INADDR), all of which are implemented using the TCP protocol. In previous versions of the database, access to external services was effectively an on/off switch based on whether a user was granted execute permissions on a specific package or not. Oracle 11g introduces fine grained access to network services using access control lists (ACL) in the XML DB repository, allowing control over which users access which network resources, regardless of package grants.

To enale all users to user the UTL_SMTP package to access two mail servers

smtp.test.edu; smtp.test2.edu

BEGIN

DBMS_NETWORK_ACL_ADMIN.CREATE_ACL (
acl => 'UTL_SMTP.xml',
description => 'ACL for utl_smtp package',
principal => 'ZENGYH',
is_grant => TRUE,
privilege => 'connect',
start_date => SYSTIMESTAMP,
end_date => NULL

);

DBMS_NETWORK_ACL_ADMIN.ADD_PRIVILEGE(acl => 'UTL_SMTP.xml',principal => 'PUBLIC', is_grant => TRUE,privilege => 'connect');


DBMS_NETWORK_ACL_ADMIN.ASSIGN_ACL (
acl => 'UTL_SMTP.xml',
host => 'smtp.test.edu'
);

DBMS_NETWORK_ACL_ADMIN.ASSIGN_ACL (
acl => 'UTL_SMTP.xml',
host => 'smtp.test2.edu'
);

END;
/
COMMIT;

If need to assigne more mail servers, just copy the DBMS_NETWORK_ACL_ADMIN.ASSIGN_ACL code from the above and replace the mail host with a new host.

To delete the access control list to start over, drop the access control list and unassign the host

exec dbms_network_acl_admin.drop_acl('UTL_SMTP.xml');

BEGIN
DBMS_NETWORK_ACL_ADMIN.UNASSIGN_ACL(acl => 'UTL_SMTP.xml', host => 'smtp.missouri.edu');
END;

In addition to the above, install the oracle mail scripts to instal UTL_MAIL packages to enable sending emails form inside of database

SQL>@$ORACLE_HOME/rdbms/admin/utlmail.sql

SQL>@$ORACLE_HOME/rdbms/admin/prvtmail.plb

SQL>ALTER SYSTEM SET smtp_out_server='smtp.test.edu;smtp.test2.edu' scope=BOTH;

to test the mail package

exec utl_mail.send(sender => 'oracle@testdb.test.com', recipients => 'test@test', subject => 'Testing UTL_MAIL Option', message => 'blah blah blah');

create a test function send_mail to use the UTL_SMTP package

select send_email('test@test','BALBALBALBA2') from dual;

To check

select * from v$parameter
where name like 'smtp%'

SELECT acl FROM dba_network_acls
WHERE host = 'smtp.missouri.edu' AND lower_port
IS NULL AND upper_port IS NULL;

select DBMS_NETWORK_ACL_ADMIN.CHECK_PRIVILEGE('UTL_SMTP.xml',
'PUBLIC','connect') from dual;

SELECT host, lower_port, upper_port, acl
FROM dba_network_acls;

SELECT acl,
principal,
privilege,
is_grant

FROM dba_network_acl_privileges;


SELECT host, lower_port, upper_port, privilege, status
FROM user_network_acl_privileges;

WRITING to directory different than DATA_PUMP_DIR ORA-15180: could not open dynamic library ASM Library in the alert.log

User wants to run this code to write exports to a oracle external directory TEST_ORAFILE
test_file := UTL_FILE.FOPEN('TEST_ORAFILE','test9002.txt','w');

test_log := UTL_FILE.FOPEN('TEST_ORAFILE','test.log','w');



SQL>CREATE OR REPLACE DIRECTORY TEST_ORAFILE AS '/u01/home/testdba/orafile';
SQL>GRANT READ, WRITE ON DIRECTORY SYS.RIMS_ORAFILE TO TEST

when user run the code, in the alert log, there is this error
ORA-15180: could not open dynamic library ASM Library in the alert.log
there is a metalink note on this issue.
ORA-15180: Could Not Open Dynamic Library ASM Library (libasm.so Open err: open) [ID 1314022.1]

https://support.oracle.com/CSP/ui/flash.html#tab=KBHome(page=KBHome&id=()),(page=KBNavigator&id=(bmDocDsrc=KB&bmDocID=1314022.1&viewingMode=1143&bmDocType=PROBLEM&from=BOOKMARK&bmDocTitle=ORA-15180:%20Could%20Not%20Open%20Dynamic%20Library%20ASM%20Library%20(libasm.so%20Open%20err:%20open)))

The solution is
1) /opt/oracle/ directory does not have permissions for read and execute for the "others" OS group:


[root@1 1]# ls -ld /opt/
drwxr-xr-x 4 root root 4096 Mar 30 16:14 /opt/
[root@1 1]# ls -l /opt/
total 8
drwxr-x--- 4 root oinstall 4096 Mar 30 16:14 oracle <(=== here
drwxr-xr-x 3 root root 4096 Mar 30 16:14 ORCLfmap
[root@1 1]# ls -l /opt/oracle/
total 8
drwxr-x--- 2 root oinstall 4096 Mar 30 16:14 bin
drwxr-xr-x 3 root root 4096 Mar 29 11:08 extapi
[root@1 1]# ls -l /opt/oracle/extapi/
total 4
drwxr-xr-x 3 root root 4096 Mar 29 11:08 64
[root@1 1]# ls -l /opt/oracle/extapi/64/
total 4
drwxr-xr-x 3 root root 4096 Mar 29 11:08 asm
[root@1 1]# ls -l /opt/oracle/extapi/64/asm/
total 4
drwxr-xr-x 3 root root 4096 Mar 29 11:08 orcl
[root@1 1]# ls -l /opt/oracle/extapi/64/asm/orcl/
total 4
drwxr-xr-x 2 root root 4096 Mar 29 11:08 1
[root@1 1]# ls -l /opt/oracle/extapi/64/asm/orcl/1
total 20
-rwxr-xr-x 1 root root 18176 Mar 18 2009 libasm.so
[root@1 1]# ls -l /opt/oracle/extapi/64/asm/orcl/1/libasm.so
-rwxr-xr-x 1 root root 18176 Mar 18 2009 /opt/oracle/extapi/64/asm/orcl/1/libasm.so


2) Since the libasm.so is located at /opt/oracle/extapi/64/asm/orcl/1/ directory, then it cannot be read due to the read and execution permissions for the "others" OS group is closed.
Solution
Please set the in the /opt/oracle/ directory the next read and execution permissions for the "others" OS group like this:

$> ls -ld /opt/
drwxr-xr-x 4 root root 4096 Mar 30 16:14 /opt/

$> ls -l /opt/
total 8
drwxr-xr-x 4 root oinstall 4096 Mar 30 16:14 oracle <(=== here
set the /opt/oracle directory read and executeable for others OS group

After setting this readable,
user get another invalid operation error.

ORA-29283: invalid file operation

ORA-06512: at "SYS.UTL_FILE", line 536

ORA-29283: invalid file operation


This is because the os folder for this external directory was not writable by oracle

chmod 777 /u01/home/testdba/


fixed it.
CHANGE ASM DISK GROUP FROM NORMAL REDUNDANCY TO EXTERNAL REDUNDANCY
When the disk group was created with normal redundancy, the only way to change is is to drop the disk group and recreate a new one. Drop the existing diskgroup after database backup and create a new diskgroup with desired external redundancy.
Referred to this Metalink Note
How To Move The Database To Different Diskgroup (Change Diskgroup Redundancy) [ID 438580.1]
But since this note was for 10g to 11.1, it did not use asmca for configuring asm disk group. asmca is only available for 11R2. The steps were modified to use asmca and the init+ASM.ora was backup to /u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/asm_backup_pfile
But even it is not backed up and deleted, the asm will startup with a default set of init parameters which usually is the same as the backup.

1.Shutdown(immediate) the database and then startup mount. Take a valid RMAN backup of existing database as:
export ORACLE_HOME =/u01/app/oracle/product/11.2.0/dbhome_1
export ORACLE_SID=TESTDB
sqlplus / as sysdba

shutdown immediate;
startup mount;

rman target /

RMAN> backup device type disk format '/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/%U' database ;

RMAN> backup device type disk format '/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/%U' archivelog all;
2. Make copy of spfile to accessible location:

sqlplus / as sysdba
SQL> create pfile='/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/initTESTDB_PRIMARY.ora' from spfile;

SQL> alter database backup control file to '/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/control.ctl';

3. Shutdown the RDBMS instance
SQL> shutdown immediate


4. Connect to ASM Instance and Drop the existing Diskgroups
asm alert log is here
/u01/app/oracle/diag/asm/+asm/+ASM/trace

export ORACLE_HOME=/u01/app/oracle/product/11.2.0/grid
export ORACLE_SID=+ASM
sqlplus / as sysasm
backup asm spfile
create pfile='/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_asm_pfile/init+ASM.ora'

(The reason for the force is the spfile is in ASM and is in use)
SQL> alter diskgroup data dismount force
SQL> drop diskgroup data force including contents;

START X-SERVER ON CLIENT
cd $ORACLE_HOME/bin
./asmca
create a new diskgroup with external redundancy
OR
SQL> CREATE DISKGROUP DATA EXTERNAL REDUNDANCY DISK 'ORCL:ASMDISK1' SIZE 307196M ,'ORCL:ASMDISK2' SIZE 307196M ,'ORCL:ASMDISK3' SIZE 307196M ATTRIBUTE 'compatible.asm'='11.2.0.0.0','compatible.rdbms'='10.1.0.0.0','au_size'='1M'
alter diskgroup mount

create spfile='+DATA' from memory;
or
create spfile='+DATA' from pfile='/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_asm_pfile/init+ASM.ora'

shutdown immediate
startup

7. Connect to the RDBMS instance and startup in nomount state using pfile

sqlplus / as sysdba
startup nomount pfile='/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/initTESTDB_PRIMARY.ora'
SQL> create spfile='+DATA' from pfile='/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/initTESTDB_PRIMARY.ora'

8. Now restore the control file and backup's using RMAN

RMAN > restore control file from '/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backup_redundency/control.ctl';
RMAN> alter database mount;
RMAN> restore database;
RMAN> recover database;

unable to find archive log
archive log thread=1 sequence=4
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-03002: failure of recover command at 07/05/2007 18:24:32
RMAN-06054: media recovery requesting unknown log: thread 1 seq 4 lowscn
570820

While recovery it will give an error for archive log missing, this is expected we need to open the database with resetlogs as:

--for primary database
RMAN> alter database open resetlogs

-We also need to change Flash Recovery Area to newly created diskgroup location.
--ignore this since our db_recoery_file is on OS
SQL> alter system set db_recovery_file_dest='+DG1' scope=both;


SQL> alter database flashback off ;
--SQL> alter database flashback on ;
. oraenv
+ASM
asmcmd
cd DATA/TESTDB_PIRMARY
ls -l PARAMETERFILE

PARAMETERFILE UNPROT COARSE JUL 28 11:00:00 Y spfile.256.757683869
PARAMETERFILE UNPROT COARSE JUL 28 13:00:00 Y spfile.352.757690657

ASMCMD>cd PARAMETERFILE
ASMCMD> rm spfile.256.757683869

ASMCMD>cd TESTDB_PRIMARY

mkalias +DATA/TESTDB_PRIMARY/PARAMETERFILE/spfile.352.757690657 spfileTESTDB_PRIMARY.ora


change the value inside file $ORACLE_HOME/dbs/initTESTDB.ora
from
SPFILE='+DATA/TESTDB/spfileTESTDB.ora'
to
SPFILE='+DATA/TESTDB_PIRMARY/spfileTESTDB_PRIMARY.ora'

shutdown immediate
startup
this time it uses correct spfile




clean up the files in asm in case there are extra copies
select * from dba_data_files

select * from v$tempfile

select * from v$logfile

select * from v$log

select * from v$standby_logfie

select * from v$controlfil

On primary
DGMGRL> edit database 'dbname' set property 'LogShipping'='ON';
SQL>alter system set log_archive_dest_state_2='ENABLE' scope=both



TO FIX THE spfile problem (it was in TESTDB folder, after recover, it ended up in TESTDB_PRIMARY folder)
This is what we did on PRIMARY
ASMCMD> ls -l parameterfile
Type Redund Striped Time Sys Name
PARAMETERFILE UNPROT COARSE JUL 27 14:00:00 Y spfile.256.757607617
ASMCMD> mkalias +DATA/TESTDB_PRIMARY/PARAMETERFILE/spfile.256.757607617 spfileTESTDB_PRIMARY.ora
ASMCMD> ls
CONTROLFILE/
DATAFILE/
ONLINELOG/
PARAMETERFILE/
TEMPFILE/
spfileTESTDB_PRIMARY.ora
ASMCMD>

change the value inside file $ORACLE_HOME/dbs/initTESTDB.ora
from
SPFILE='+DATA/TESTDB/spfileTESTDB.ora'
to
SPFILE='+DATA/TESTDB_PRIMARY/spfileTESTDB_PRIMARY.ora'

Configure Fast Recovery Area and RMAN

Configure Fast Recovery Area and RMAN
1. Enabling the Fast Recovery Area
This is used to be called Flash Recovery Area, but since it is ofen confused with the Flashback feature. It is now called Fast Recovery Area.
It is enabled by setting two initializatoin parameters. Set DB_RECOVERY_FILE_DEST first.
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST='/u01/app/oracle/oradata/TESTDB/archivelog' SCOPE=BOTH;
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=520G SCOPE=BOTH;
The size needs to be larger than at least following files
Size of datafiles: 162 gb
Size of redo logs: 250mb/grp*16 groups = 4 gb
Size of 1 day of archived redo logs: 300 log switches * 250 mb/log = 73 gb
size of backupset: 20 gb/backupset * 4= 80 gb
size of control file 12 mb
After getting fast recovery area almost full in the alert log, did this
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=800G SCOPE=BOTH;
2. Configure the Backup Retention Policy
Specifies how many full or level 0 backups of each datafil and control file that RMAN should keep. if the file exceeds the setting, then it is considered obselete.
RMAN>CONFIGURE RETENTION POLICY TO REDUNDANCY 4;
3. Auto Backup Control File and SPfile when it changes.
RMAN>CONFIGURE CONTROLFILE AUTOBACKUP ON
4. Archive Log Deletion Policy
RMAN>CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL STANDBY;
This policy makes sure the archive logs are not deleted until it is shipped and applied on standby
5. Configure Compression
RMAN>CONFIGURE COMPRESSION ALGORITHM 'BASIC' AS OF RELEASE 'DEFAULT' OPTIMIZE FOR LOAD TRUE ;
--The default OPTIMIZE FOR LOAD TRUE ensure RMAN avoid using addtional CPU resouces to perform precompression Block Processing.
6. Configure Parallelism
CONFIGURE DEVICE TYPE DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 4;
7. DBID of primary TESTDBand standby TESTDB: DBID=299787364
To perform a disaster recovery, you must have the following:
Backups of all datafiles
All archived redo logs generated after the creation time of the oldest backup that you intened to restore
At lesast one control file autobackup
a record of DBID of the database
Setting Up Scripts
The backup scripts are stored in /home/oracle/bin/dba_scripts on both primary and standby
chmod 775 if the script you created were not executable at begining.
On Primary
primarydb_backup.rman -- this script perform a full database backup and then delete the archivelog files every night
On Standby
standby_archive_clean.sh -- this script deletes archive logs every night

primarydb_backup.rman

CONNECT TARGET /
SET COMMAND ID TO 'cron_rman_full_bk_&1';
backup device type disk tag 'primary_full_daily' database;
backup device type disk tag 'primary_full_daily' archivelog all not backed up delete all input;
run {
allocate channel oem_backup_disk1 type disk maxpiecesize 1000 G;
backup tag 'primary_full_daily' current controlfile;
release channel oem_backup_disk1;
}
allocate channel for maintenance type disk;
crosscheck backupset;
crosscheck archivelog all;
delete noprompt obsolete device type disk;
release channel;
EXIT;

primarydb_back.sh
#!/bin/bash
ORACLE_BASE=/u01/app/oracle
export ORACLE_BASE; ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_HOME;ORACLE_SID=TESTDB
export ORACLE_SID
PATH=/usr/sbin:$PATH; export PATH
PATH=$ORACLE_HOME/bin:$PATH; export PATH
PATH=$PATH:$HOME/bin
export PATH
CURRENT_DATE=`date +%Y%m%d`
logfile="/home/oracle/bin/dba_scripts/primarydb_backup$CURRENT_DATE.log"
/u01/app/oracle/product/11.2.0/dbhome_1/bin/rman @ '/home/oracle/bin/dba_scripts/primarydb_backup.rman' USING $CURRENT_DATE log /home/oracle/bin/dba_scripts/primarydb_backup$CURRENT_DATE.log
status=$?
if [ $status -gt 0 ]; then
mailx -s "BACKUP FAILED: TESTDB" dba_email_here<< !
`cat $logfile`
!
else
mailx -s "BACKUP SUCCESSFUL: TESTDB" dba_email_here << !
`cat $logfile`
!
fi

rm_empty_backup_dir.sh
this script remove empty directories

for folder in $(find /u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/backupset -type d); do
if [ "`ls $folder | wc -l`" -eq 0 ]; then
rmdir $folder
fi
done


standby_archive_clean.sh
#!/bin/bash
ORACLE_BASE=/u01/app/oracle
export ORACLE_BASE; ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export ORACLE_HOME;ORACLE_SID=TESTDB
export ORACLE_SID
PATH=/usr/sbin:$PATH; export PATH
PATH=$ORACLE_HOME/bin:$PATH; export PATH
PATH=$PATH:$HOME/bin
export PATH
CURRENT_DATE=`date +%Y%m%d`
logfile="/u01/app/oracle/admin/TESTDB/dba_scripts/standby_archive_clean$CURRENT_DATE.log"
/u01/app/oracle/product/11.2.0/dbhome_1/bin/rman log /u01/app/oracle/admin/TESTDB/dba_scripts/standby_archive_clean$CURRENT_DATE.log << EOF
CONNECT TARGET /
SET COMMAND ID TO 'DELETE STANDBY ARCHIVE';
DELETE NOPROMPT ARCHIVELOG ALL;
CROSSCHECK ARCHIVELOG ALL;
EXIT;
EOF
status=$?
if [$status -gt 0]; then
mailx -s "STANDBY ARCHIVE CLEAN FAILED: TESTDB" umdoitdwdba@missouri.edu << !
`cat $logfile`
!
else
mailx -s "SUCCESSFUL STANDBY ARCHIVE CLEAN: TESTDB" umdoitdwdba@missouri.edu << !
`cat $logfile`
!
fi

Thursday, August 4, 2011

Location of Database Files Configred with ASM and Data Guard

For Primary DB
SID=TESTDB
DB_UNIQUE_NAME=TESTDB_PRIMARY

For Standby DB
SID=TESTDB
DB_UNIQUE_NAME=TESTDB_STANDBY

ORACLE_BASE
/u01/app/oracle
ORALCE DATABASE HOME
$ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbhome_1
$ORACLE_SID=TESTDB
ORACLE GRID HOME (Oracle Grid Infrastructure--including ASM and Oracle Restart)
$ORACLE_HOME=/u01/app/oracle/product/11.2.0/grid
$ORACLE_SID=+ASM
Listener Home
The default home for all listeners is the grid home
/u01/app/oracle/product/11.2.0/grid/network/admin
Alert and trace files
$ORACLE_BASE/diag/rdbms/
Data Guard Broker Configuration Datafile
$ORACLE_HOME/dbs
Data Guard Broker Configuration log
$ORACLE_BASE/diag/rdbms/trace
ASM log and trace file
/u01/app/oracle/diag/asm/+asm/+ASM
Oracle Sql Scripts
/u01/app/oracle/product/11.2.0/dbhome_1/rdbms/admin
FAST RECOVERY AREA
/u01/app/oracle/oradata/TESTDB/archivelog
TESTDB_PRIMARY DATABASE FILES
Control File -- in TESTDB_PRIMARY (DB_UNIQUE_NAME) direcotry
select * from v$controlfile;
+DATA/testdb_primary/controlfile/current.256.747592231
/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/controlfile/o1_mf_6sng3qps_.ctl
(oracle save a copy of control file in FAST RECOVERY AREA defined by DB_RECOVERY_FILE_DEST
init parameter)
Parameter File -- in TESTDB_PRIMARY (DB_UNIQUE_NAME) directory
The initSID.ora is in $ORACLE_HOME/dbs
initTESTDB.ora, inside the file it is the pointer to the spfile.
SPFILE='+DATA/TESTDB_PRIMARY/spfileTESTDB_PRIMARY.ora'
select * from V$PARAMETER where name = 'spfile'
Passowrd File
$ORACLE_HOME/dbs
Datafile -- in TESTDB_PRIMARY (DB_UNIQUE_NAME) directory
SELECT NAME FROM V$DATAFILE;
+DATA/TESTDB_PRIMARY/DATAFILE/
Online Redo Log Files --TESTDB_PRIMARY (DB_UNIQUE_NAME) Directory

select * from v$logfile where type='ONLINE'

+DATA/testdb_primary/onlinelog/

/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/onlinelog/

oracle maintains a 2nd copy at Flash Recovery Destination

If a new online redo log is created, a correspoindg standby redo log must be created on standby.

ALTER DATABASE ADD STANDBY LOGFILE Group 17 SIZE 256000K;

Standby Redo Log Files -- TESTDB_PRIMARY (DB_UNIQUE_NAME) Directory

select * from v$logfile where type='STANDBY'

+DATA/testdb_primary/onlinelog/

/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_PRIMARY/onlinelog/

oracle maintains a 2nd copy at Flash Recovery Destination

Archive Log Files -- in TESTDB, the default DB_RECOVERY_FILE_DEST

The archive log file is saved in /u01/app/oracle/oradata/TESTDB/archivelog

show parameter DB_RECOVERY_FILE_DEST;

/u01/app/oracle/oradata/TESTDB/archivelog

show parameter log_archive_dest_1;

LOCATION=/u01/app/oracle/oradata/TESTDB/archivelog/ VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=TESTDB_PRIMARY

TESTDB_STANDBY DATABASE FILES

Control File -- in TESTDB_STANDBY (DB_UNIQUE_NAME) direcotry
select * from v$controlfile;
+DATA/testdb_standby/controlfile/current.257.757684131
/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB/controlfile/o1_mf_6qf8lbgc_.ctl
Parameter File -- in TESTDB_STANDBY(DB_UNIQUE_NAME) directory
The initSID.ora is in $ORACLE_HOME/dbs
initTESTDB.ora, inside the file it is the pointer to the spfile.
SPFILE='+DATA/TESTDB_STANDBY/spfileTESTDB.ora'
select * from V$PARAMETER where name = 'spfile'
Passowrd File
$ORACLE_HOME/dbs
Datafile -- in TESTDB_STANDBY (DB_UNIQUE_NAME) directory
SELECT NAME FROM V$DATAFILE;
+DATA/TESTDB_STANDBY/DATAFILE
Online Redo Log Files --TESTDB_STANDBY (DB_UNIQUE_NAME) Directory

select * from v$logfile where type='ONLINE'

+DATA/testdb_standby/onlinelog/

/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_STANDBY/

oracle maintains a 2nd copy at Flash Recovery Destination

If a new online redo log is created on primary, a correspoindg standby redo log must be created on standby.

ALTER DATABASE ADD STANDBY LOGFILE Group 17 SIZE 256000K;

Standby Redo Log Files -- TESTDB_STANDBY(DB_UNIQUE_NAME) Directory

select * from v$logfile where type='STANDBY'

+DATA/testdb_standby/onlinelog/

/u01/app/oracle/oradata/TESTDB/archivelog/TESTDB_STANDBY/

oracle maintains a 2nd copy at Flash Recovery Destination

Archive Log Files -- shipped archives are in TESTDB folder, standby archives are in TESTDB_STANDBY

The archive log file is saved in /u01/app/oracle/oradata/TESTDB/archivelog

show parameter DB_RECOVERY_FILE_DEST;

/u01/app/oracle/oradata/TESTDB/archivelog

show parameter log_archive_dest_1;

LOCATION=/u01/app/oracle/oradata/TESTDB/archivelog/ VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=TESTDB_STANDBY

when it is in standby role, this destination stores the archive shipped from primary

select * from V$PARAMETER where name = 'log_archive_dest_2'

LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(STANDBY_LOGFILE,STANDBY_ROLE)

This uses DB_RECOVERY_FILE_DEST which is

/u01/app/oracle/oradata/TESTDB/archivelog

when it is in standby role with real time log apply, the primary online log is applied to standby redo log and then is archved to this destination.

Backupset and Autobackups

When fast recovery area is defined, RMAN automatically saves backupset and autobackups in the fast recovery area, when the fast recovery area is full, RMAN auto deletes obseletes,expired to make room available.