📄 heapscan.sql
字号:
---------------------------------------------------------------------------------- Test multi user lock interaction of ddl. --------------------------------------------------------------------------------run resource 'createTestProcedures.subsql';autocommit off;connect 'wombat' as deleter;-- by default, holdability of ResultSet objects created using this Connection object is true. Following will set it to false for this connection.NoHoldForConnection;connect 'wombat' as scanner;-- by default, holdability of ResultSet objects created using this Connection object is true. Following will set it to false for this connection.NoHoldForConnection;-- set upset connection scanner;set isolation CS;run resource 'LockTableQuery.subsql';autocommit off;drop table data;-- create a table with 2 rows per page.create table data (keycol int, data varchar(2000)) ;insert into data values (0, PADSTRING('0',2000));insert into data values (10, PADSTRING('100',2000));insert into data values (20, PADSTRING('200',2000));insert into data values (30, PADSTRING('300',2000));insert into data values (40, PADSTRING('400',2000));insert into data values (50, PADSTRING('100',2000));insert into data values (60, PADSTRING('200',2000));insert into data values (70, PADSTRING('300',2000));insert into data values (80, PADSTRING('400',2000));commit;set connection deleter;set current isolation = cursor stability;autocommit off;commit;---------------------------------------------------------------------------------- Test 0: position scanner in the middle of the dataset using group commit-- in a read commited scan which uses zero duration locks, then have-- deleter remove all the rows in the table except for the last one, -- and wait long enough for the post commit job to reclaim the page -- that the scanner is positioned on. Then do a next on the scanner-- and verify the scanner goes to the last page.--------------------------------------------------------------------------------set connection scanner;create table just_to_block_on (a int);CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault','2');get cursor scan_cursor as 'select keycol from data';call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault', '16');next scan_cursor;select * from lock_table order by tabname, type desc, mode, cnt, lockname;-- now delete all rows but the last one, space should be reclaimed before-- the scanner gets a chance to run.set connection deleter;select conglomeratename, isindex, numallocatedpages, numfreepages, pagesize, estimspacesavingfrom new org.apache.derby.diag.SpaceTable('DATA') t order by conglomeratename; commit;delete from data where keycol < 80;select * from lock_table order by tabname, type desc, mode, cnt, lockname;commit;-- give post commit a chance to run, by hanging on a lock.drop table just_to_block_on;commit;select conglomeratename, isindex, numallocatedpages, numfreepages, pagesize, estimspacesavingfrom new org.apache.derby.diag.SpaceTable('DATA') t order by conglomeratename; commit;set connection scanner;-- this will return 10, from the group buffer (this looks wierd as 10 is -- deleted at this point - but that is what you get with read committed).next scan_cursor;-- this will now go through the code which handles jumping over deleted pages.next scan_cursor;commit;---------------------------------------------------------------------------------- cleanup--------------------------------------------------------------------------------set connection deleter;commit;disconnect;set connection scanner;drop table data;drop table just_to_block_on;commit;disconnect;---------------------------------------------------------------------------------- Test 1: position scanner in the middle of the dataset using group commit-- in a read commited scan which uses zero duration locks. Now arrange-- for the row the scan is positioned on to be purged by post commit,-- but leave a row on the page for scan to reposition to.------------------------------------------------------------------------------------------------- setup---------------autocommit off;connect 'wombat' as deleter1;connect 'wombat' as deleter2;connect 'wombat' as scanner;connect 'wombat' as lockholder;-- set upset connection scanner;set isolation to read committed;autocommit off;drop table data;-- create a table with 4 rows per page.create table data (keycol int, data varchar(900));insert into data values (0, PADSTRING('0',900));insert into data values (10, PADSTRING('100',900));insert into data values (20, PADSTRING('200',900));insert into data values (30, PADSTRING('300',900));insert into data values (40, PADSTRING('400',900));insert into data values (50, PADSTRING('100',900));insert into data values (60, PADSTRING('200',900));insert into data values (70, PADSTRING('300',900));insert into data values (80, PADSTRING('400',900));create unique index idx on data (keycol);commit;set connection deleter1;set isolation read committed;autocommit off;commit;set connection deleter2;set isolation READ COMMITTED;autocommit off;commit;set connection lockholder;set CURRENT isolation TO CS;autocommit off;commit;---------------- run the test--------------set connection lockholder;create table just_to_block_on (a int);set connection scanner;CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault','2');get cursor scan_cursor as 'select keycol from data';call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault', '16');next scan_cursor;next scan_cursor;-- scan is now positioned on row (10, 100), as it group fetched 2 rows.-- in deleter1 thread delete the last row on the page, but don't commit.-- in the other deleter thread delete the rest of the rows on the page and-- commit it, which will result in a post commit to try and reclaim all the-- rows on the page, but it won't be able to reclaim the one that has not-- been committed by deleter1.-- delete in this transaction keycol (30, 300).set connection deleter1;delete from data where keycol = 30;-- delete in this transaction the rest of rows on the page.set connection deleter2;delete from data where keycol = 0;delete from data where keycol = 10;delete from data where keycol = 20;commit;-- block deleter threads on a lock to give post commit a chance to run.set connection deleter2;select * from just_to_block_on;set connection deleter1;select * from just_to_block_on;-- now assume post commit has run, roll back deleter1 so that one non-deleted-- row remains on the page.set connection deleter1;rollback;-- the scanner gets a chance to run.set connection scanner;-- now at this point the scanner will resume and find the row it is positioned-- on has been purged, and it will reposition automatically to (30, 300) on-- the same page.next scan_cursor;next scan_cursor;next scan_cursor;commit;select * from data;commit;---------------------------------------------------------------------------------- cleanup--------------------------------------------------------------------------------set connection scanner;disconnect;set connection deleter1;disconnect;set connection deleter2;disconnect;set connection lockholder;disconnect;---------------------------------------------------------------------------------- Test 2: position scanner in the middle of the dataset using group commit-- in a read commited scan which uses zero duration locks. Now arrange-- for the row the scan is positioned on to be purged by post commit,-- but leave a row on the page for scan to reposition to, as did Test 1.-- This time make the row left on the page be deleted, so when the-- scan repositions, it should jump over the deleted row.------------------------------------------------------------------------------------------------- setup---------------connect 'wombat' as deleter1;connect 'wombat' as deleter2;connect 'wombat' as scanner;connect 'wombat' as lockholder;-- set upset connection scanner;set isolation read committed;autocommit off;drop table data;-- create a table with 4 rows per page.create table data (keycol int, data varchar(900)) ;insert into data values (0, PADSTRING('0',900));insert into data values (10, PADSTRING('100',900));insert into data values (20, PADSTRING('200',900));insert into data values (30, PADSTRING('300',900));insert into data values (40, PADSTRING('400',900));insert into data values (50, PADSTRING('100',900));insert into data values (60, PADSTRING('200',900));insert into data values (70, PADSTRING('300',900));insert into data values (80, PADSTRING('400',900));create unique index idx on data (keycol);commit;set connection deleter1;set isolation read committed;autocommit off;commit;set connection deleter2;set isolation read committed;autocommit off;commit;set connection lockholder;set isolation read committed;autocommit off;commit;---------------- run the test--------------set connection lockholder;create table just_to_block_on (a int);set connection scanner;CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault','2');get cursor scan_cursor as 'select keycol from data';call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.language.bulkFetchDefault', '16');next scan_cursor;next scan_cursor;-- scan is now positioned on row (10, 100), as it group fetched 2 rows.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -