However, when you initially create the object, the extents are
determined by figuring out the space allocated to the newly createdobject taking into account the INITIAL, NEXT, PCTINCREASE, MINEXTENTSstorage parameters. So the object might start off with 1M extentsinstead of starting off with 64K extents. The algorithm is similar tothe one outlined above but it is more complicated. The NEXT andPCTINCREASE seem to be ignored after the object is created.e.g.create table ... tablespace locally_managed_autoallocatestorage (initial 1M next 512K minextents 15 pctincrease 0) ...;Initial allocation will be 1M + (15 - 1) * 512K = 8MWhen you create the table, you will see eight extents, each of onemegabyte.
odshis_table_11.2.0.4.txt
---------------------------------------
(PARTITION "P_18991231" VALUES LESS THAN (TO_DATE(' 1899-12-31 00:00:00', 'SYYYY-MM-DD HH24:MI:SS'
, 'NLS_CALENDAR=GREGORIAN')) SEGMENT CREATION IMMEDIATE
PCTFREE 0 PCTUSED 40 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING <<<<<<<<<<<<<<<<<<<<<<<<<< NOCOMPRESS
STORAGE(INITIAL 8388608 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
ods_table_11.2.0.3.txt
---------------------------------------
...
(PARTITION "P_18991231" VALUES LESS THAN (TO_DATE(' 1899-12-31 00:00:00', 'SYY
YY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN')) SEGMENT CREATION DEFERRED
PCTFREE 0 PCTUSED 40 INITRANS 1 MAXTRANS 255
COMPRESS BASIC LOGGING <<<<<<<<<<<<<<<<<<<<<<<<< COMPRESS
########spool html format
- SET markup html ON spool ON pre off entmap off
-
- set term off
- set heading on
- set verify off
- set feedback off
-
- set linesize 2000
- set pagesize 30000
- set long 999999999
- set longchunksize 999999
-
--------------------- 本文来自 suyishuai 的CSDN 博客 ,全文地址请点击:https://blog.csdn.net/suyishuai/article/details/22725543?utm_source=copy
######
------analyze_table_space script------
-----------------------------------------------------------------------------------------------------------< set serveroutput on size 1000000 set feedback off prompt Input segment name, owner (default current user), type (default table), partition name (default null) DECLARE p_segname varchar2(500); p_owner varchar2(100); p_type varchar2(100); p_partition varchar2(500); l_free_blks number; l_total_blocks number; l_total_bytes number; l_unused_blocks number; l_unused_bytes number; l_lastUsedExtFileId number; l_lastUsedExtBlockId number; l_last_used_block number; l_segment_space_mgmt varchar2(255); l_unformatted_blocks number; l_unformatted_bytes number; l_fs1_blocks number; l_fs1_bytes number; l_fs2_blocks number; l_fs2_bytes number; l_fs3_blocks number; l_fs3_bytes number; l_fs4_blocks number; l_fs4_bytes number; l_full_blocks number; l_full_bytes number; procedure p(p_label in varchar2,p_num in number) as begin dbms_output.put_line(rpad(p_label,40,'.')||to_char(p_num,'999,999,999,999')); end; begin p_segname := upper('&p_segname'); p_owner := upper('&p_owner'); if p_owner is null then p_owner := user; end if; p_type := upper('&p_type'); if p_type is null then p_type := 'TABLE'; end if; p_partition := upper('&p_partition'); begin execute immediate 'select ts.segment_space_management from dba_segments seg,dba_tablespaces ts where seg.segment_name=:p_segname and (:p_partition is null or seg.partition_name =:p_segname) and seg.owner=:p_owner and seg.tablespace_name=ts.tablespace_name' into l_segment_space_mgmt using p_segname,p_partition,p_partition,p_owner; exception when too_many_rows then dbms_output.put_line('This must be a partitioned table,use p_partition=>'); return; end; if l_segment_space_mgmt='AUTO' then dbms_space.space_usage(p_owner,p_segname,p_type, l_unformatted_blocks, l_unformatted_bytes, l_fs1_blocks,l_fs1_bytes, l_fs2_blocks,l_fs2_bytes, l_fs3_blocks,l_fs3_bytes, l_fs4_blocks,l_fs4_bytes, l_full_blocks,l_full_bytes,p_partition); p('Unformatted Block ',l_unformatted_blocks); p('FS1 Blocks (0-25) ',l_fs1_blocks); p('FS2 Blocks (25-50) ',l_fs2_blocks); p('FS3 Blocks (50-75) ',l_fs3_blocks); p('FS4 Blocks (75-100) ',l_fs4_blocks); p('Full Blocks ',l_full_blocks); else dbms_space.free_blocks(segment_owner=>p_owner, segment_name=>p_segname, segment_type=>p_type, freelist_group_id=>0, free_blks=>l_free_blks); p('Free Blocks',l_free_blks); end if; dbms_space.unused_space(segment_owner=>p_owner, segment_name=>p_segname, segment_type=>p_type, total_blocks=>l_total_blocks, total_bytes=>l_total_bytes, unused_blocks=>l_unused_blocks, unused_bytes=>l_unused_bytes, last_used_extent_file_id=>l_lastUsedExtFileId, last_used_extent_block_id=>l_lastUsedExtBlockId, last_used_block=>l_last_used_block, partition_name=>p_partition); p('Total Blocks',l_total_blocks); p('Total Bytes',l_total_bytes); p('Total MBytes',trunc(l_total_bytes/1024/1024)); p('Unused Blocks',l_unused_blocks); p('Unused Bytes',l_unused_bytes); p('Last Used Ext FileId',l_lastUsedExtFileId); p('Last Used Ext BlockId',l_lastUsedExtBlockId); p('Last Used Block',l_last_used_block); END; -- Procedure /
example:
nput segment name, owner (default current user), type (default table), partition name (default null)
Enter value for p_segname: BAK1604_E002_SYM_RB_ACCTold 35: p_segname := upper('&p_segname');new 35: p_segname := upper('BAK1604_E002_SYM_RB_ACCT');Enter value for p_owner: OMLOPRold 37: p_owner := upper('&p_owner');new 37: p_owner := upper('OMLOPR');Enter value for p_type: TABLE PARTITIONold 42: p_type := upper('&p_type');new 42: p_type := upper('TABLE PARTITION');Enter value for p_partition: P_29991231
###solution:
方法1:
the way to compress data after it is inserted via a non-direct operation is to move the table and compress the data:
alter table compressed move compress;
方法2:DataPump utilities (expdp/impdp) perform direct path operations and so the table compression is maintained
impdp test/test directory=dpu dumpfile=test_compress.dmp tables=compressed,noncompressed
参考方法:
Import Bypasses Table Compression or Does Not Compress if the Table is Precreated as Compressed (文档 ID 341805.1) |
APPLIES TO:Oracle Server - Enterprise Edition - Version: 9.2.0.1 to 11.2.0.1 - Release: 9.2 to 11.2Information in this document applies to any platform.SYMPTOMSOriginal import utility bypasses the table compression or does not compress, if the table is precreated as compressed. Please follow the next example that demonstrates this.connect / as sysdbacreate tablespace tbs_compress datafile '/tmp/tbs_compress01.dbf' size 100m;create user test identified by test default tablespace tbs_compress temporary tablespace temp;grant connect, resource to test;connect test/test-- create compressed tablecreate table compressed( id number, text varchar2(100)) pctfree 0 pctused 90 compress;-- create non-compressed tablecreate table noncompressed( id number, text varchar2(100)) pctfree 0 pctused 90 nocompress;-- populate compressed table with databegin for i in 1..100000 loop insert into compressed values (1, lpad ('1', 100, '0')); end loop; commit;end;/-- populate non-compressed table with identical databegin for i in 1..100000 loop insert into noncompressed values (1, lpad ('1', 100, '0')); end loop; commit;end;/-- compress the table COMPRESSED (previous insert doesn't use the compression)alter table compressed move compress; connect test/test #> exp test/test file=test_compress.dmp tables=compressed,noncompressed compress=n ...About to export specified tables via Conventional Path .... . exporting table COMPRESSED 100000 rows exported. . exporting table NONCOMPRESSED 100000 rows exportedExport terminated successfully without warnings. and then import them back: connect test/testdrop table compressed;drop table noncompressed;#> imp test/test file=test_compress.dmp tables=compressed,noncompressed .... importing TEST's objects into TEST. . importing table "COMPRESSED" 100000 rows imported. . importing table "NONCOMPRESSED" 100000 rows importedImport terminated successfully without warnings. Verify the extents after original import: col segment_name format a30select segment_name, bytes, extents, blocks from user_segments;SEGMENT_NAME BYTES EXTENTS BLOCKS------------------------------ ---------- ---------- ----------COMPRESSED 11534336 26 1408NONCOMPRESSED 11534336 26 14082 rows selected. CAUSEThis is an expected behaviour. Import is not performing a bulk load/direct path operations, so the data is not inserted as compressed.Only Direct path operations such as CTAS (Create Table As Select), SQL*Loader Direct Path will compress data. These operations include:
SOLUTIONThe way to compress data after it is inserted via a non-direct operation is to move the table and compress the data:alter table compressed move compress; #> expdp test/test directory=dpu dumpfile=test_compress.dmp tables=compressed,noncompressed ...Processing object type TABLE_EXPORT/TABLE/TABLE. . exported "TEST"."NONCOMPRESSED" 10.30 MB 100000 rows. . exported "TEST"."COMPRESSED" 10.30 MB 100000 rowsMaster table "TEST"."SYS_EXPORT_TABLE_01" successfully loaded/unloaded and re-import after deletion with: #> impdp test/test directory=dpu dumpfile=test_compress.dmp tables=compressed,noncompressed ...Processing object type TABLE_EXPORT/TABLE/TABLE_DATA. . imported "TEST"."NONCOMPRESSED" 10.30 MB 100000 rows. . imported "TEST"."COMPRESSED" 10.30 MB 100000 rowsJob "TEST"."SYS_IMPORT_TABLE_01" successfully completed at 12:47:51 Verify the extents after DataPump import: col segment_name format a30select segment_name, bytes, extents, blocks from user_segments;SEGMENT_NAME BYTES EXTENTS BLOCKS------------------------------ ---------- ---------- ----------COMPRESSED 2097152 17 256NONCOMPRESSED 11534336 26 14082 rows selected. |
ps: 数据压缩相关技术点
哪些表是压缩表?
欲查询哪些表进行了压缩,可查询如下视图:
SQL> SELECT table_name, compression, compress_for FROM user_tables;TABLE_NAME COMPRESSION COMPRESS_FOR---------------- ------------ -----------------T1 DISABLEDT2 ENABLED BASICT3 ENABLED OLTPT4 ENABLED QUERY HIGHT5 ENABLED ARCHIVE LOW
欲查询哪些分区表进行了压缩,可查询如下视图:
SQL> SELECT table_name, partition_name, compression, compress_for FROM user_tab_partitions;TABLE_NAME PARTITION_NAME COMPRESSION COMPRESS_FOR----------- ---------------- ----------- ---------------SALES Q4_2004 ENABLED ARCHIVE HIGHSALES Q3_2008 ENABLED QUERY HIGHSALES Q4_2008 ENABLED QUERY HIGHSALES Q1_2009 ENABLED OLTPSALES Q2_2009 ENABLED OLTP ...
如何更改压缩算法?
一个表的压缩算法是可以更改的,从而适应于不同数据变化情况。例如,交易明细当前数据可采取OLTP压缩算法,而针对超过6个月极少更改的数据可采取Warehouse压缩算法。
以下语句将表转换为basic压缩表:
SQL> alter table emp move compress basic;
以下语句将表转换为OLTP压缩表:
SQL> alter table emp move compress for oltp;
以下语句一个分区转换为basic压缩分区:
SQL>alter table part_objects move partition p3 compress basic;
以下语句修改一个分区的现有压缩算法,但更改之后的压缩算法只对新记录有效,而不影响现有记录。
SQL>alter table part_objects modify partition p1 compress for oltp;
上述move和modify操作均非在线操作,即在进行这些操作时,这些表将不可进行DML操作。欲在线进行压缩算法变更操作,则需要采用表在线重定义技术。限于篇幅,不赘述。请参考Oracle相关联机文档。
哪些记录是压缩记录?
由于表的压缩算法可更改,这样一个表中的记录可以是非压缩或采取不同压缩算法的。欲查询不同记录的压缩情况,可通过记录的ROWID值进行如下查询:
SELECT DECODE(DBMS_COMPRESSION.GET_COMPRESSION_TYPE( ownname => 'HR', tabname => 'EMPLOYEES', row_id => 'AAAVEIAAGAAAABTAAD'), 1, 'No Compression', 2, 'Basic or OLTP Compression', 4, 'Hybrid Columnar Compression for Query High', 8, 'Hybrid Columnar Compression for Query Low', 16, 'Hybrid Columnar Compression for Archive High', 32, 'Hybrid Columnar Compression for Archive Low', 'Unknown Compression Type') compression_typeFROM DUAL;
其它技术点
- 压缩表不支持在线shrink操作。
- 表压缩技术不适合于11g新的大对象技术:SecureFiles。因为SecureFiles采用自己的压缩技术。
- 当表采用basic压缩算法之后,表的PCTFREE参数自动设置为0。
- 使用COMPRESS FOR OLTP 或COMPRESS BASIC方式表的字段数量不能超过255个。虽然可以设置成功,但数据实际上是没有压缩的。
- IOT表不支持压缩。
######### for table and owner sgent and extent size
set pause off
prompt /* Create an SQLPLUS variable, containing the block size */ col res new_value block_size noprint select value res from v$parameter where name = 'db_block_size'; accept ownr char prompt 'Give owner to display extents of: ' accept oname char prompt 'Give table name to display extents of: ' set verify off set feed off col tn format a10 heading 'Name' trunc col init format 9,999,999 heading 'Init' col next format 9,999,999 heading 'Next' col mi format 999 heading 'Min' col ma format 9999999999 heading 'Max' col pct format 990 heading 'PctInc' col s_init format a12 heading 'Init' col s_next format a12 heading 'Nxt to alloc' col sn format a15 heading 'Segment Name' col tn format a15 heading 'Tabspace/FileID' col st format a10 heading 'Type' col bytes format 999,999,999 break on report compute sum of bytes blocks on report clear screen prompt Storage params for the tablespace the object is in: select distinct d.tablespace_name tn, initial_extent / &block_size || ' (' || initial_extent || ')' s_init, next_extent / &block_size || ' (' || next_extent || ')' s_next, pct_increase pct, min_extents mi, max_extents ma from sys.dba_tablespaces d, sys.dba_extents u where u.segment_name = upper('&oname') and d.tablespace_name = u.tablespace_name and u.owner = upper('&ownr'); prompt prompt Storage params for the segment the object is in: select distinct s.segment_name sn, t.initial_extent||' ('||t.initial_extent / &block_size||')' s_init, t.next_extent||' ('||t.next_extent / &block_size||')' s_next, t.pct_increase pct, t.min_extents mi, t.max_extents ma from sys.dba_segments s, sys.dba_tables t where s.segment_name = upper('&oname') and s.owner = upper('&ownr') and t.owner = upper('&ownr') and t.table_name = s.segment_name(+); prompt prompt Overview of all the allocated extents in this segment: col st format a8 col bts format 999999999999999999999 heading "Bytes" col bks format 9999 heading "Blocks" col bi format 9999999999999999999 heading "1st Blck" select extent_id, segment_name sn, segment_type st, tablespace_name||'/'||file_id tn, bytes bts, blocks bks, block_id bi from sys.dba_extents where segment_name = upper('&oname') and owner = upper('&ownr') order by bytes,extent_id; pause Return... clear screen set pause on set pause Return... col owner format a15 prompt Overview of all extents in same tablespace ordered by size select segment_name sn, segment_type st, extent_id, owner, bytes, blocks from sys.dba_extents where tablespace_name = (select tablespace_name from sys.dba_segments where segment_name = upper('&oname') and owner = upper('&ownr') ) order by bytes desc; set verify on set feed on clear columns clear breaks clear computes /
example:
nput segment name, owner (default current user), type (default table), partition name (default null)
Enter value for p_segname: BAK1604_E002_SYM_RB_ACCTold 35: p_segname := upper('&p_segname');new 35: p_segname := upper('BAK1604_E002_SYM_RB_ACCT');Enter value for p_owner: OMLOPRold 37: p_owner := upper('&p_owner');new 37: p_owner := upper('OMLOPR');Enter value for p_type: TABLE PARTITIONold 42: p_type := upper('&p_type');new 42: p_type := upper('TABLE PARTITION');Enter value for p_partition: P_29991231
###########v0 for user all table avg size
drop table column_counts;
create table column_counts ( table_name, column_count ) as ( select table_name, max(column_id) from user_tab_columns where data_type not like 'LONG%' AND table_name in (select table_name from user_tables) group by table_name ) ; set pages 0 set tab on set trim on set verify off set feedback off set termout off set head off set lines 100 set recsep off set embedded on spool getavgcol.sql prompt column TB format A30 prompt set head off recsep off prompt set lines 80 feedback off pages 0 prompt spool getavgcol REM column select_line format A8 column end_line format A1 column from_stmt format A34 word_wrap column col_nm format A100 column col_val format A32 column tnm1 noprint column tnmprint format A37 column column_id noprint break on tnm1 skip 2 set null '' clear breaks select UTC.table_name tnm1, decode(column_id,1,'select ' || chr(39) || UTC.table_name || chr(39) || ' TB, ', ' ') || 'round(avg(nvl(vsize('||column_name||'),0)),0)' || decode(column_id,column_count, ' row_size from ' || UTC.table_name || ';'|| chr(10)||chr(10), ' +') col_nm from user_tab_columns UTC, column_counts CC where UTC.data_type not like 'LONG%' AND UTC.table_name = CC.table_name order by UTC.table_name, UTC.column_id; prompt spool off prompt exit spool off drop table column_counts; exit##########v1 for tbale avg size
drop table column_counts;
create table column_counts ( table_name, column_count ) as ( select upper('&oname'), max(column_id) from dba_tab_columns where data_type not like 'LONG%' AND table_name = upper('&oname') ); set pages 0 set tab on set trim on set verify off set feedback off set termout off set head off set lines 100 set recsep off set embedded on spool getavgcol.sql prompt column TB format A30 prompt set head off recsep off prompt set lines 80 feedback off pages 0 prompt spool getavgcol REM column select_line format A8 column end_line format A1 column from_stmt format A34 word_wrap column col_nm format A100 column col_val format A32 column tnm1 noprint column tnmprint format A37 column column_id noprint break on tnm1 skip 2 set null '' clear breaks select UTC.table_name tnm1, decode(column_id,1,'select ' || chr(39) || UTC.table_name || chr(39) || ' TB, ', ' ') || 'round(avg(nvl(vsize('||column_name||'),0)),0)' || decode(column_id,column_count, ' row_size from ' || UTC.owner || '.'||UTC.table_name || ';'|| chr(10)||chr(10), ' +') col_nm from dba_tab_columns UTC, column_counts CC where UTC.data_type not like 'LONG%' AND UTC.table_name = CC.table_name order by UTC.table_name, UTC.column_id; prompt spool off prompt exit spool off drop table column_counts; exit