一、通用配置
1. 使用Oracle 10.2.0.4版本,并且安装所有关键补订包
2. 下游捕获需要源和目标库运行在相同平台上
3. 准备源和目标数据库的redo logs
(1) 配置源和目标库的archivelog模式
(2) 配置本地归档目标路径,log_archive_dest_1参数,不要使用flash recovery area。
4. 建立流转用表空间:对于下游捕获,只在下游捕获数据库建立;对于上游捕获,源和目标库都建立
CREATE TABLESPACE streams_ts DATAFILE '/ora_data1/migopt/stream01.dbf' SIZE 100M AUTOEXTEND ON NEXT 10M MAXSIZE UNLIMITED
LOGGING
PERMANENT
EXTENT MANAGEMENT LOCAL AUTOALLOCATE
BLOCKSIZE 8K
SEGMENT SPACE MANAGEMENT AUTO
FLASHBACK ON;
5. 建立流管理员数据库用户
create user streamsadmin identified by streamsadmin default tablespace streams_ts temporary tablespace temp;
6. 授予流管理权限和DBA权限
execute DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE('STREAMSADMIN');
grant DBA to streamsadmin;
7. 设置关键的初始化参数
AQ_TM_PROCESSES # 1
DB_NAME # SELECT NAME FROM V$DATABASE;
DB_DOMAIN # com
GLOBAL_NAME # TRUE
COMPATIBLE # 10.2
JOB_QUEUE_PROCESSES # 建议最小值是4
_JOB_QUEUE_INTERVAL # 1
TIMED_STATISTICS # TRUE
STATISTICS_LEVEL # TYPICAL
SHARED_POOL_SIZE # 最小256M
STREAMS_POOL_SIZE # 最小256M
8. 建立源和目标库之间的数据库链
(1) 用streamsadmin登录目标数据库确认全局数据库名
select * from global_name;
(2) 用streamsadmin登录源数据库建立从源到目标的数据库链,数据库链的名字是上一步的global_name
create database link <global_name_d>
connect to streamsadmin identified by streamsadmin
using '<TNSNAMES.ORA alias entry>';
(3) 在源数据库上运行下面的查询来验证数据库链连通
select * from dual@<db_link_name>;
(4) 用streamsadmin登录目标数据库
(5) 建立从目标到源的数据库链:下游捕捉需要建立;上游捕捉可选建立
create database link <global_name_s>
connect to streamsadmin identified by streamsadmin
using '<TNSNAMES.ORA alias entry>';
(6) 在目标数据库上运行下面的查询来验证数据库链连通
select * from dual@<db_link_name>;
9. 用streamsadmin登录在源和目标库上建立目录对象
CREATE DIRECTORY streams_dir as '/ora_data1'; # 目标
CREATE DIRECTORY streams_dir as '/db'; # 源
10. 如果选择复制DDLs,需要处理源和目标库上对象或表空间名字不一致的问题
(1) 避免系统生成约束或索引的名字
(2) 数据库间保持相同的表空间名字或者使用一个DDL handler来显式的处理不一致的名字
二、建议的下流捕获配置
1. 确认步骤一的所有配置
2. 在源和目标库上修改初始化参数
源数据库:
LOG_ARCHIVE_DEST_1=
'LOCATION= ENG/streamssrc10g/
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE)
MANDATORY'
LOG_ARCHIVE_DEST_2=
'SERVICE=streamsdest10g_halinux06.us.oracle.com
LGWR ASYNC NOREGISTER VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE)
DB_UNIQUE_NAME=streamsdest10g'
LOG_ARCHIVE_DEST_STATE_1=ENABLE
LOG_ARCHIVE_DEST_STATE_2=DEFER
LOG_ARCHIVE_CONFIG='SEND, DG_CONFIG=(streamssrc10g,streamsdest10g)'
LOG_ARCHIVE_MAX_PROCESSES=4
# SERVICE是TNS服务名
# DG_CONFIG里面写两个库的db_unique_name
目的数据库:
LOG_ARCHIVE_DEST_1=
'LOCATION=use_db_recovery_file_dest
VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_DEST_2=
'LOCATION= ENG/streamsdest10g/arch_srl_streamss/
VALID_FOR=(STANDBY_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_DEST_STATE_1=ENABLE
LOG_ARCHIVE_DEST_STATE_2=ENABLE
LOG_ARCHIVE_CONFIG='RECEIVE, DG_CONFIG=(streamssrc10g,streamsdest10g)'
LOG_ARCHIVE_MAX_PROCESSES=4
3. 在下游数据库配置standby redo logs
(1) 在源数据库上查询v$log视图找到redo log groups的个数,用下面的公式计算SRLs的数量
Number of SRLs = sum of all production online redo log groups for each thread number of threads
# threads即instance
(2) 在下游数据库里添加standby redo logs
ALTER DATABASE ADD STANDBY LOGFILE GROUP 4
(' ENG/streamsdest10g/standbylog/srl_m1.dbf', ' ENG/streamsdest10g/standbylog/srl_m2.dbf')
SIZE 1024M;
(3) 查询v$standby_log视图验证日志组和状态
SELECT GROUP#,SEQUENCE#,STATUS FROM V$STANDBY_LOG;
初始状态下,sequence#为0,status为UNASSIGNED
4. 在源数据库上起用远程归档重做日志目标
(1) 开始传送redo数据
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2='ENABLE' SCOPE=BOTH;
(2) 切换日志文件
ALTER SYSTEM SWITCH LOGFILE;
(3) 查询v$archive_dest_status验证归档日志目标
SELECT DEST_ID, DEST_NAME, DESTINATION, DATABASE_NAME, SRL, ERROR FROM V$ARCHIVE_DEST_STATUS;
5. 在下游库上复制模式,用streamsadmin在下游数据库上执行:
BEGIN
DBMS_STREAMS_ADM.MAINTAIN_SCHEMAS(
SCHEMA_NAMES => 'TEST1,TEST2,TEST3,TEST4,TEST5,TEST6', # 复制的模式名
SOURCE_DATABASE => 'STREAMSS.US.ORACLE.COM', # 源库的GLOBAL_NAME
SOURCE_DIRECTORY_OBJECT => 'STREAMS_DIR',
DESTINATION_DATABASE => 'STREAMSD.US.ORACLE.COM', # 目标库的GLOBAL_NAME
DESTINATION_DIRECTORY_OBJECT => 'STREAMS_DIR',
CAPTURE_QUEUE_NAME => 'DS_STREAMS_QUEUE',
APPLY_QUEUE_NAME => 'DS_STREAMS_QUEUE',
BI_DIRECTIONAL => FALSE,
INCLUDE_DDL => FALSE );
END;
/
# 可以通过查询dba_recoverable_script视图和两个库上的alert.log文件来监控执行过程,也可以在过程运行时查询下游库上的STREAMS_BUILD_STATUS视图
CREATE OR REPLACE VIEW STREAMS_BUILD_STATUS AS
SELECT TO_CHAR(rs.creation_time,'HH24:Mi:SS MM/DD/YY') CREATE_DATE,
rs.status, rs.done_block_num||' of '||rs.total_blocks||' Steps Completed' PROGRESS,
TO_CHAR(to_number(sysdate-rs.creation_time)*86400,9999.99) ELAPSED_SECONDS,
substr(to_char(rsb.forward_block),1,100) CURRENT_STEP,
rs.invoking_package||'.'||rs.invoking_procedure PROCEDURE, rs.script_id
FROM DBA_RECOVERABLE_SCRIPT rs,
DBA_RECOVERABLE_SCRIPT_BLOCKS rsb
WHERE rs.script_id = rsb.script_id AND rsb.block_num = rs.done_block_num 1;
6. 在下游库上为捕获进程配置实时挖掘,用streamsadmin在下游数据库上执行:
BEGIN
DBMS_CAPTURE_ADM.SET_PARAMETER(
capture_name => 'DS_REALTIME_CAPTURE',
parameter => 'downstream_real_time_mine',
value => 'y');
END;
/
7. 验证下游数据库
(1) 在源库上切换日志
ALTER SYSTEM SWITCH LOGFILE;
(2) 查询下游库的v$standby_log视图
SELECT GROUP#,SEQUENCE#, FIRST_TIME, FIRST_CHANGE#, LAST_CHANGE#, ARCHIVED,STATUS FROM V$STANDBY_LOG;
# 如果FIRST_CHANGE#, LAST_CHANGE#变化,则说明实时挖掘下游捕捉正常配置
三、建议的上游捕捉配置
1. 确认步骤一的所有配置
2. 在源和目标库上修改初始化参数
源数据库:
LOG_ARCHIVE_DEST_1=
'LOCATION= ENG/streamssrc10g/archivelog/
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE)
MANDATORY'
LOG_ARCHIVE_DEST_STATE_1=ENABLE
LOG_ARCHIVE_MAX_PROCESSES=4
目的数据库:
LOG_ARCHIVE_DEST_1=
'LOCATION= CX700/streamsdestb10g/
VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_DEST_STATE_1=ENABLE
3. 复制模式,用streamsadmin在源数据库上执行:
BEGIN
DBMS_STREAMS_ADM.MAINTAIN_SCHEMAS(
SCHEMA_NAMES => 'TEST1,TEST2,TEST3,TEST4,TEST5,TEST6', # 复制的模式名
SOURCE_DATABASE => 'STREAMSS.US.ORACLE.COM', # 源库的GLOBAL_NAME
SOURCE_DIRECTORY_OBJECT => 'STREAMS_DIR',
DESTINATION_DATABASE => 'STREAMSD.US.ORACLE.COM', # 目标库的GLOBAL_NAME
DESTINATION_DIRECTORY_OBJECT => 'STREAMS_DIR',
BI_DIRECTIONAL => FALSE,
INCLUDE_DDL => FALSE );
END;
/
# 可以通过查询dba_recoverable_script视图和两个库上的alert.log文件来监控执行过程,也可以在过程运行时查询下游库上的STREAMS_BUILD_STATUS视图
CREATE OR REPLACE VIEW STREAMS_BUILD_STATUS AS
SELECT TO_CHAR(rs.creation_time,'HH24:Mi:SS MM/DD/YY') CREATE_DATE,
rs.status, rs.done_block_num||' of '||rs.total_blocks||' Steps Completed' PROGRESS,
TO_CHAR(to_number(sysdate-rs.creation_time)*86400,9999.99) ELAPSED_SECONDS,
substr(to_char(rsb.forward_block),1,100) CURRENT_STEP,
rs.invoking_package||'.'||rs.invoking_procedure PROCEDURE, rs.script_id
FROM DBA_RECOVERABLE_SCRIPT rs,
DBA_RECOVERABLE_SCRIPT_BLOCKS rsb
WHERE rs.script_id = rsb.script_id AND rsb.block_num = rs.done_block_num 1;
四、配置后任务
1. 设置捕获进程的CHECKPOINT_RETENTION_TIME参数
BEGIN
DBMS_CAPTURE_ADM.ALTER_CAPTURE(
capture_name => 'DS_REALTIME_CAPTURE',
checkpoint_retention_time => 7 ); # 缺省为60
END;
2. 设置应用并行度为4
BEGIN
DBMS_APPLY_ADM.SET_PARAMETER('APPLY$_STREAMSS_36,
'PARALLELISM','4');
END;
/
3. 运行流健康检查脚本
Oracle Metalink Note 273674.1.
五、调整网络配置参数
1. 在所有服务器上设置TCP/IP网络参数
(1) 设置读写缓冲区最大值网络核心参数
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
(2) 设置TCP/IP最小、缺省、最大缓冲区大小
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
#编辑/etc/sysctl.conf文件,执行sysctl –p命令生效
2. 设置Oracle Net Session Data Unit (SDU) 大小
修改源库的SQLNET.ORA文件,例如:
streamsdest10g_halinux06.us.oracle.com=
(DESCRIPTION=
(SDU=32767)
(ADDRESS=(PROTOCOL=tcp)
(HOST=halinux06vip)
(PORT=1521))
(CONNECT_DATA=
(SERVER = DEDICATED)
(SERVICE_NAME = streamsd.us.oracle.com))
)
修改目标库的LISTENER.ORA文件,例如:
SID_LIST_listener_name=
(SID_LIST=
(SID_DESC=
(SDU=32767)
(GLOBAL_DBNAME=streamsd.us.oracle.com)
(SID_NAME=STRM10g6)
(ORACLE_HOME=/usr/oracle)))
3. 设置TCP Socket Buffer 大小
公式(例如 带宽1Gbps,RTT 25毫秒):
BDP= 1,000 Mbps * 25msec (.025 sec)1,000,000,000 * .02525,000,000 Megabits / 8 = 3,125,000 bytes
socket buffer size = 3 * bandwidth * delay
= 3,125,000 * 3
= 9,375,000 bytes
修改两个库的SQLNET.ORA文件
streamsdest10g_halinux06.us.oracle.com =
(DESCRIPTION=
(SDU=32767)
(SEND_BUF_SIZE=9375000)
(RECV_BUF_SIZE=9375000)
(ADDRESS=(PROTOCOL=tcp)
(HOST=halinux06vip)(PORT=1521))
(CONNECT_DATA=
(SERVER = DEDICATED)
(SERVICE_NAME = streamsd.us.oracle.com)))
4. 设置网络设备队列大小
例如:
echo 20000 > /proc/sys/net/core/netdev_max_backlog
echo 1 > /proc/sys/net/ipv4/route/flush
ifconfig eth0 txqueuelen 10000