⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 onlinecompresstest.java

📁 derby database source code.good for you.
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
    Connection  conn,    boolean     create_table,    boolean     long_table,    String      schemaName,    String      table_name,    int         num_rows)         throws SQLException     {        testProgress(            "begin simpleDeleteAllRows," + num_rows + " row test, create = " +                 create_table + ".");        if (long_table)            createAndLoadLongTable(conn, create_table, table_name, num_rows);        else            createAndLoadTable(conn, create_table, table_name, num_rows, 0);        if (verbose)            testProgress("Calling compress.");        // compress with no deletes should not affect size        int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);        callCompress(conn, "APP", table_name, true, true, true, true);        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, true);        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change.",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        testProgress("no delete case complete.");        // delete all the rows.        ret_before = getSpaceInfo(conn, "APP", table_name, true);        executeQuery(conn, "delete from " + table_name, true);        if (verbose)            testProgress("deleted all rows, now calling compress.");        callCompress(conn, "APP", table_name, true, true, true, true);        ret_after  = getSpaceInfo(conn, "APP", table_name, true);        // An empty table has 2 pages, one allocation page and the 1st page        // which will have a system row in it.  The space vti only reports        // a count of the user pages so the count is 1.        if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)        {            log_wrong_count(                "Expected all pages to be truncated.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        testProgress("delete all rows case succeeded.");        conn.commit();        testProgress("end simple deleteAllRows," + num_rows + " row test.");    }    /**     * Check/exercise purge pass phase.     * <p>     * Assumes that either test creates the table, or called on an empty     * table with no committed deleted rows or free pages in the middle of     * the table in it.     * <p>     *	 * @exception  StandardException  Standard exception policy.     **/    private void checkPurgePhase(    Connection  conn,    boolean     create_table,    boolean     long_table,    String      schemaName,    String      table_name,    int         num_rows)         throws SQLException     {        testProgress(            "begin checkPurgePhase" + num_rows + " row test, create = " +                 create_table + ".");        if (long_table)            createAndLoadLongTable(conn, create_table, table_name, num_rows);        else            createAndLoadTable(conn, create_table, table_name, num_rows, 0);        // dump_table(conn, schemaName, table_name, false);        // delete all the rows, but don't commit the delete        int[] ret_before = getSpaceInfo(conn, "APP", table_name, false);        executeQuery(conn, "delete from " + table_name, false);        // dump_table(conn, schemaName, table_name, false);        // Purge pass on non-committed deleted rows should do nothing.          // System.out.println("lock info before compress call:\n " + get_lock_info(conn, true));        // Calling compress with just the "purge" pass option, no commit called.        callCompress(conn, "APP", table_name, true, false, false, false);        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, false);        // expect no change in the number of allocated pages!        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change(1).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        // expect no change in the number of free pages, if there are there        // is a problem with purge locking recognizing committed deleted rows.        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])        {            log_wrong_count(                "Expected no free page change(1).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_FREE],                 ret_after[SPACE_INFO_NUM_FREE],                ret_before, ret_after);        }        // Test that it is ok to call multiple purge passes in single xact.        // Calling compress with just the "purge" pass option, no commit called.        callCompress(conn, "APP", table_name, true, false, false, false);        ret_after  = getSpaceInfo(conn, "APP", table_name, false);        // expect no change in the number of allocated pages!        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change(2).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        // expect no change in the number of free pages, if there are there        // is a problem with purge locking recognizing committed deleted rows.        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])        {            log_wrong_count(                "Expected no free page change(2).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_FREE],                 ret_after[SPACE_INFO_NUM_FREE],                ret_before, ret_after);        }        // since table was just loaded a defragment pass also should        // not find anything to do.                // Calling compress with just the "defragment" option, no commit called.        // currently the defragment option requires a table level lock in        // the nested user transaction, which will conflict and cause a        // lock timeout.        try        {            callCompress(conn, "APP", table_name, false, true, false, false);                        logError("Defragment pass did not get a lock timeout.");        }        catch (SQLException sqle)        {            // ignore exception.        }        ret_after  = getSpaceInfo(conn, "APP", table_name, false);        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change(3).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])        {            log_wrong_count(                "Expected no free page change(3).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_FREE],                 ret_after[SPACE_INFO_NUM_FREE],                ret_before, ret_after);        }        // make sure table is back to all deleted row state.  lock timeout        // will abort transaction.        // delete all rows and commit.        executeQuery(conn, "delete from " + table_name, true);        // compress all space and commit.        callCompress(conn, "APP", table_name, true, true, true, true);        // add back all rows and commit.        if (long_table)            createAndLoadLongTable(conn, create_table, table_name, num_rows);        else            createAndLoadTable(conn, create_table, table_name, num_rows, 0);        conn.commit();        // delete all rows, and NO commit.        executeQuery(conn, "delete from " + table_name, false);        // Calling compress with just the truncate option, may change allocated        // and free page count as they system may have preallocated pages to        // the end of the file as part of the load.  The file can't shrink        // any more than the free page count before the compress.        // running the truncate pass only.  If it compresses anything it is        // just the preallocated pages at end of the file.        // currently the defragment option requires a table level lock in        // the nested user transaction, which will conflict and cause a        // lock timeout.        ret_before = getSpaceInfo(conn, "APP", table_name, false);        callCompress(conn, "APP", table_name, false, false, true, false);        ret_after  = getSpaceInfo(conn, "APP", table_name, false);        // expect no change in the number of allocated pages!        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change(4).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        // The only space that truncate only pass can free are free pages         // located at end of file, so after free space can be anywhere from         // what it was before to 0 pages.        if (ret_after[SPACE_INFO_NUM_FREE] > ret_before[SPACE_INFO_NUM_FREE])        {            log_wrong_count(                "Expected no increase in free pages(4).",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_FREE],                 ret_after[SPACE_INFO_NUM_FREE],                ret_before, ret_after);        }        // now commit the deletes, run all phases and make sure empty table        // results.        conn.commit();        // check the table.  Note that this will accumulate locks and        // will commit the transaction.        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        // test running each phase in order.        callCompress(conn, "APP", table_name, true,  false, false, false);        callCompress(conn, "APP", table_name, false, true,  false, false);        callCompress(conn, "APP", table_name, false, false, true , false);        ret_after  = getSpaceInfo(conn, "APP", table_name, false);        // An empty table has 2 pages, one allocation page and the 1st page        // which will have a system row in it.  The space vti only reports        // a count of the user pages so the count is 1.        if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)        {            log_wrong_count(                "Expected all pages to be truncated.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (ret_after[SPACE_INFO_NUM_FREE] != 0)        {            log_wrong_count(                "Expected no free page after all pages truncated.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        testProgress("end checkPurgePhase" + num_rows + " row test.");    }    /**     * Test 1 - various # page tests, regular row/columns     * <p>     * perform a number of insert/delete/compress operations on a variety     * of sized tables, use space allocation information to verify that     * compression is happening and use consistency checker to verify that     * tables and indexes are all valid following the operations.     * <p>     * loop through testing interesting row count cases.  The cases are     * 0    rows  - basic edge case, 2 page table: 1 alloc, 1 user page     * 1    row   - another edge case, 2 page table: 1 alloc, 1 user page     * 50   rows  - 3 page table case: 1 alloc, 1 user page, 1 user page freed     * 4000 rows  - reasonable number of pages to test out, still 1 alloc page     *     * note that row numbers greater than 4000 may lead to lock escalation     * issues, if queries like "delete from x" are used to delete all the      * rows.     *     * <p>     *     **/    private void test1(    Connection  conn,    String      test_name,    String      table_name)        throws SQLException     {        beginTest(conn, test_name);        int[] test_cases = {0, 1, 50, 4000};        for (int i = 0; i < test_cases.length; i++)        {            // first create new table and run the tests.            deleteAllRows(                conn, true, false, "APP", table_name, test_cases[i]);            // now rerun tests on existing table, which had all rows deleted            // and truncated.            deleteAllRows(                conn, false, false, "APP", table_name, test_cases[i]);            checkPurgePhase(                conn, false, false, "APP", table_name, test_cases[i]);            executeQuery(conn, "drop table " + table_name, true);        }        endTest(conn, test_name);    }    /**     * Test 2 - check repeated delete tests.     * <p>     * There was a timing error where test1 would usually pass, but      * repeated execution of this test found a timing problem with     * allocation using an "unallocated" page and getting an I/O error.     *     **/    private void test2(    Connection  conn,    String      test_name,    String      table_name)        throws SQLException     {        beginTest(conn, test_name);        int[] test_cases = {4000};        for (int i = 0; i < test_cases.length; i++)        {            // first create new table and run the tests.            simpleDeleteAllRows(                conn, true, false, "APP", table_name, test_cases[i]);            for (int j = 0; j < 100; j++)            {                // now rerun tests on existing table, which had all rows deleted                // and truncated.                deleteAllRows(                    conn, false, false, "APP", table_name, test_cases[i]);            }            executeQuery(conn, "drop table " + table_name, true);        }        endTest(conn, test_name);    }    /**     * Test 3 - various # page tests, long row and long columns     * <p>     * perform a number of insert/delete/compress operations on a variety     * of sized tables, use space allocation information to verify that     * compression is happening and use consistency checker to verify that     * tables and indexes are all valid following the operations.     * <p>     * loop through testing interesting row count cases.  The cases are     * 0    rows  - basic edge case     * 1    row   - another edge case     * 100  rows  - ~50 meg table     * 4000 rows  - ~2 gig table

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -