Dynamic Sampling at AskTom
I saw an interesting question on dynamic sampling over at Ask Tom and ended up pretty much dumping a half-finished blog posting in reply. I'll save myself the elctrons of publishing it here by just linking to it. Enjoy.
Please use http://oraclesponge.wordpress.com
I saw an interesting question on dynamic sampling over at Ask Tom and ended up pretty much dumping a half-finished blog posting in reply. I'll save myself the elctrons of publishing it here by just linking to it. Enjoy.
As I said yesterday, I've been fiddling around with loading trace files into the database for analysis, and today I was looking for a good test case to see how the numbers are coming out. The obvious example to choose was the direct path enhancement to materialized view refresh that I also wrote about recently.
drop table master;
drop table master_test_data;
drop materialized view master_mv1;
create table master
(
location_cd number not null,
tx_timestamp date not null,
product_cd number not null,
tx_qty number not null,
tx_cst number not null
)
pctfree 0
nologging
/
create materialized view log on master
with rowid
(
location_cd,
tx_timestamp,
product_cd,
tx_qty,
tx_cst
)
including new values
/
create materialized view master_mv1
Using No Index
Refresh Fast On Commit
Enable Query Rewrite
As
Select
location_cd,
trunc(tx_timestamp,'MM') tx_month,
sum(tx_qty) s_tx_qty,
sum(tx_cst) s_tx_cst,
count(*) c_star
from
master
group by
location_cd,
trunc(tx_timestamp,'MM')
/
create materialized view master_mv2
Using No Index
Refresh Fast On Commit
Enable Query Rewrite
As
Select
product_cd,
trunc(tx_timestamp,'MM') tx_month,
sum(tx_qty) sum_tx_qty,
sum(tx_cst) sum_tx_cst,
count(*) c_star
from
master
group by
product_cd,
trunc(tx_timestamp,'MM')
/
create table master_test_data
(
location_cd not null,
tx_timestamp not null,
product_cd not null,
tx_qty not null,
tx_cst not null
)
pctfree 0
nologging
as
select
trunc(dbms_random.value(1,10)),
to_date('01-jan-2005','DD-Mon-YYYY')
+ dbms_random.value(1,31),
trunc(dbms_random.value(1,1000)),
trunc(dbms_random.value(1,10)),
trunc(dbms_random.value(1,100),2)
from
dual
connect by
1=1 and
level < 1000001
/
commit
/
select count(*) from master_test_data
/
alter session set timed_statistics = true;
alter session set max_dump_file_size=1000000000;
alter session set events '10046 trace name context forever, level 8';
insert into master
select *
from master_test_data
where rownum < 100001
/
commit
/
exit
begin
trace.purge; -- purges all data from the trace file repository
trace.load_file
(directory =>'UDUMP',
file_name => 'slimdb_ora_4412.trc',
identify_as => 'direct path',
debug => 0,
log_extents => true,
log_files => true);
trace.load_file
(directory =>'UDUMP',
file_name => 'slimdb_ora_4600.trc',
identify_as => 'conv path',
debug => 0,
log_extents => true,
log_files => true);
end;
/
SQL> select
2 substr(identify_as,1,12),
3 op_name,
4 sum(op_c),
5 sum(op_e)/1000000 sec,
6 count(*)
7 from
8 (
9 select
10 identify_as,
11 cur_dep,
12 f.file#,
13 f.file_name,
14 c.cursor#,
15 o.op_name,
16 OP_C ,
17 OP_E ,
18 OP_P ,
19 OP_CR ,
20 OP_CU ,
21 OP_TIM
22 from
23 trace_ops o,
24 trace_cursors c,
25 trace_files f
26 where
27 f.file# = c.file# and
28 c.cursor# = o.cursor#
29 )
30 group by
31 substr(identify_as,1,12),
32 op_name
33 order by
34 1,2,4 desc
35 /
SUBSTR(IDENT OP_NAME SUM(OP_C) SEC COUNT(*)
------------ ---------- ---------- ---------- ----------
conv path EXEC 20875000 33.358519 100342
conv path FETCH 2296875 3.026768 304
conv path PARSE 156250 .661319 333
direct path EXEC 2890625 3.476836 350
direct path FETCH 140625 .406403 402
direct path PARSE 859375 1.302354 224
6 rows selected.
Of course all the interesting work is going on behind the scenes in the form of recursive SQL:
SQL> select
2 substr(identify_as,1,12),
3 cur_dep,
4 op_name,
5 sum(op_c),
6 sum(op_e)/1000000 sec,
7 count(*)
8 from
9 (
10 select
11 identify_as,
12 cur_dep,
13 f.file#,
14 f.file_name,
15 c.cursor#,
16 o.op_name,
17 OP_C ,
18 OP_E ,
19 OP_P ,
20 OP_CR ,
21 OP_CU ,
22 OP_TIM
23 from
24 trace_ops o,
25 trace_cursors c,
26 trace_files f
27 where
28 f.file# = c.file# and
29 c.cursor# = o.cursor#
30 )
31 group by
32 substr(identify_as,1,12),
33 cur_dep,
34 op_name
35 order by
36 1,2,3,5 desc
37 /
SUBSTR(IDENT CUR_DEP OP_NAME SUM(OP_C) SEC COUNT(*)
------------ ---------- ---------- ---------- ---------- ----------
conv path 0 EXEC 14484375 22.637736 18
conv path 0 FETCH 2125000 2.342986 15
conv path 0 PARSE 93750 .56999 16
conv path 1 EXEC 6343750 10.694057 100188
conv path 1 FETCH 109375 .619292 147
conv path 1 PARSE 62500 .079386 187
conv path 2 EXEC 46875 .026135 128
conv path 2 FETCH 62500 .062152 134
conv path 2 PARSE 0 .009992 128
conv path 3 EXEC 0 .00006 1
conv path 3 FETCH 0 .002111 1
conv path 3 PARSE 0 .001875 1
conv path 4 EXEC 0 .000531 7
conv path 4 FETCH 0 .000227 7
conv path 4 PARSE 0 .000076 1
direct path 0 EXEC 1718750 2.218695 3
direct path 0 PARSE 31250 .17893 2
direct path 1 EXEC 750000 .841392 138
direct path 1 FETCH 15625 .128181 91
direct path 1 PARSE 343750 .47748 133
direct path 2 EXEC 140625 .14998 124
direct path 2 FETCH 109375 .168264 124
direct path 2 PARSE 421875 .52747 27
direct path 3 EXEC 281250 .260751 78
direct path 3 FETCH 15625 .068693 180
direct path 3 PARSE 46875 .109109 61
direct path 4 EXEC 0 .006018 7
direct path 4 FETCH 0 .041265 7
direct path 4 PARSE 15625 .009365 1
29 rows selected.
SQL> select CUR_SQL_TEXT
2 from trace_cursors
3 where cursor# =
4 (
5 select cursor#
6 from
7 (
8 Select cursor#,count(*)
9 from trace_ops
10 where op_name = 'EXEC'
11 group by cursor#
12 order by 2 desc
13 )
14 where rownum = 1
15 )
16 /
CUR_SQL_TEXT
--------------------------------------------------------------------------------
INSERT /*+ IDX(0) */ INTO "DAVE"."MLOG$_MASTER" (dmltype$$,old_new$$,snaptime$$,
change_vector$,m_row$,"LOCATION_CD","TX_TIMESTAMP","PRODUCT_CD","TX_QTY","TX_C
ST") VALUES (:d,:o,to_date('4000-01-01:00:00:00','YYYY-MM-DD:HH24:MI:SS'),:c,:m,
:1,:2,:3,:4,:5)
Hey, at least it's using bind variables!
Funny how easy it is to get side-tracked. Well not really side-tracked, but just more and more involved in one particular thing, so that you end up burrowing so deeply into it that it loses all relevance to the real world. Just as a simple SQL statement generates deeper and deeper levels of more and more obscure recursive statements, so it can be with the most simple of tasks.
begin
trace.load_file
(directory =>'UDUMP', -- 1
file_name => 'slimdb_ora_4680.trc', -- 2
debug => 0, -- 3
log_extents => true, -- 4
log_files => true); -- 5
end;
/
SQL> select * from all_trace_files;
FILE# FILE_NAME READ_DATE
---------- ------------------------------ ---------
132440 slimdb_ora_4680.trc 05-OCT-05
SQL> column SUM_WT_ELA format 999,999,990
SQL> column AVG_WT_ELA format 999,999,990
SQL> column waits format 999,990
SQL> break on report
SQL> compute sum of SUM_WT_ELA on report
SQL> select substr(WT_NAME,1,25) wt_name,
2 sum(WT_ELA) sum_wt_ela,
3 avg(wt_ela) avg_wt_ela,
4 count(*) waits
5 from last_trace_waits
6 group by substr(WT_NAME,1,25)
7 /
WT_NAME SUM_WT_ELA AVG_WT_ELA WAITS
------------------------- ------------ ------------ --------
SQL*Net message from clie 27,372,317 9,124,106 3
SQL*Net message to client 14 5 3
db file scattered read 4,771,858 18,640 256
db file sequential read 1,490,384 3,644 409
------------
sum 33,634,573
SQL> select CUR_DEP,
2 count(*)
3 from last_trace_cursors
4 group by cur_dep
5 order by 1
6 /
CUR_DEP COUNT(*)
---------- ----------
0 2
1 391
2 2189
3 92
4 2
5 1
6 rows selected.
SQL> set pagesize 100
SQL> select substr(WT_NAME,1,23) wt_name,
2 substr(segment_name,1,20) segment_name,
3 sum(WT_ELA) sum_wt_ela,
4 avg(wt_ela) avg_wt_ela,
5 count(*) waits
6 from last_trace_obj_waits
7 group by substr(WT_NAME,1,23),substr(segment_name,1,20)
8 order by 3 desc
9 /
WT_NAME SEGMENT_NAME SUM_WT_ELA AVG_WT_ELA WAITS
----------------------- -------------------- ------------ ------------ --------
db file scattered read FCT_FSA_1 4,698,204 19,908 236
db file sequential read INDSUBPART$ 3,963,720 1,149 3,450
db file sequential read C_OBJ#_INTCOL# 2,355,677 4,403 535
db file sequential read C_FILE#_BLOCK# 2,129,218 1,342 1,587
db file sequential read I_OBJ2 676,269 1,375 492
db file sequential read I_FILE#_BLOCK# 470,275 4,237 111
db file sequential read FCT_FSA_1 417,323 5,962 70
db file sequential read TABSUBPART$ 170,888 4,069 42
db file scattered read INDCOMPART$ 130,647 6,221 21
db file sequential read HIST_HEAD$ 62,906 7,863 8
db file sequential read I_INDSUBPART$ 61,507 1,922 32
db file scattered read TABCOMPART$ 57,753 6,417 9
db file sequential read I_CDEF2 34,272 4,284 8
db file sequential read SNAP$ 30,162 7,541 4
db file sequential read PARTOBJ$ 29,690 7,423 4
db file sequential read SUMAGG$ 24,404 6,101 4
db file sequential read INDCOMPART$ 19,534 9,767 2
db file sequential read SUMKEY$ 19,116 4,779 4
db file sequential read DIMJOINKEY$ 17,296 4,324 4
db file sequential read PARTCOL$ 15,068 3,767 4
db file sequential read TABCOMPART$ 14,988 7,494 2
db file sequential read SUMDELTA$ 12,143 12,143 1
db file sequential read I_TABSUBPART$ 11,146 5,573 2
db file sequential read I_CDEF3 10,627 10,627 1
db file sequential read I_SUBCOLTYPE1 10,131 10,131 1
db file sequential read I_NTAB2 9,696 9,696 1
db file sequential read I_CCOL1 9,366 4,683 2
db file sequential read C_COBJ# 9,322 9,322 1
db file sequential read SUM$ 8,722 4,361 2
db file sequential read I_OBJ#_INTCOL# 7,496 7,496 1
db file sequential read I_SUBPARTCOL$ 7,059 7,059 1
db file sequential read I_DIMATTR$_2 6,601 6,601 1
db file sequential read DEPENDENCY$ 6,432 3,216 2
db file sequential read I_HH_OBJ#_INTCOL# 6,267 6,267 1
db file sequential read I_DEFSUBPART$ 5,595 5,595 1
db file sequential read VIEW$ 5,231 5,231 1
db file sequential read DEFSUBPART$ 4,795 4,795 1
db file sequential read I_DEPENDENCY1 4,649 4,649 1
db file sequential read I_REFCON2 3,544 3,544 1
db file sequential read I_PARTCOL$ 2,705 2,705 1
db file scattered read SUM$ 742 371 2
db file sequential read I_OPQTYPE1 195 195 1
db file sequential read SUBPARTCOL$ 138 138 1
db file sequential read I_DIMLEVELKEY$_2 136 136 1
------------
sum 15,541,655