⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 onlinecompresstest.java

📁 derby database source code.good for you.
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
     *     * note that row numbers greater than 4000 may lead to lock escalation     * issues, if queries like "delete from x" are used to delete all the      * rows.     *     * <p>     *     **/    private void test3(    Connection  conn,    String      test_name,    String      table_name)        throws SQLException     {        beginTest(conn, test_name);        // note that 500 rows took 30 minutes on a ~1.5 ghz laptop        int[] test_cases = {1, 2, 50};        for (int i = 0; i < test_cases.length; i++)        {            // first create new table and run the tests.            deleteAllRows(                conn, true, true, "APP", table_name, test_cases[i]);            // now rerun tests on existing table, which had all rows deleted            // and truncated.            deleteAllRows(                conn, false, true, "APP", table_name, test_cases[i]);            checkPurgePhase(                conn, false, true, "APP", table_name, test_cases[i]);            executeQuery(conn, "drop table " + table_name, true);        }        endTest(conn, test_name);    }    /**     * Test 4 - check repeated delete tests.     * <p>     * There was a timing error where test1 would usually pass, but      * repeated execution of this test found a timing problem with     * allocation using an "unallocated" page and getting an I/O error.     *     **/    private void test4(    Connection  conn,    String      test_name,    String      table_name)        throws SQLException     {        beginTest(conn, test_name);        int[] test_cases = {4000};        for (int i = 0; i < test_cases.length; i++)        {            for (int j = 0; j < 100; j++)            {                // first create new table and run the tests.                simpleDeleteAllRows(                    conn, true, false, "APP", table_name, test_cases[i]);                // now rerun tests on existing table, which had all rows deleted                // and truncated.                deleteAllRows(                    conn, false, false, "APP", table_name, test_cases[i]);                executeQuery(conn, "drop table " + table_name, true);            }        }        endTest(conn, test_name);    }    /**     * Create and load table for test5.     * <p>     * schema of table:     *     keycol   int,      *     onehalf  int,      *     onethird int,      *     c        varchar(300)     *     * @param conn          Connection to use for sql execution.     * @param create_table  If true, create new table - otherwise load into     *                      existing table.     * @param tblname       table to use.     * @param num_rows      number of rows to add to the table.     *	 * @exception  StandardException  Standard exception policy.     **/    private void test5_load(    Connection  conn,    String      schemaName,    String      table_name,    int         num_rows)        throws SQLException    {        Statement s = conn.createStatement();        s.execute(            "create table " + table_name +             " (keycol integer primary key, onehalf integer, onethird integer, c varchar(300))");        s.close();        PreparedStatement insert_stmt =             conn.prepareStatement(                "insert into " + table_name + " values(?, ?, ?, ?)");        char[]  data1_data = new char[200];        for (int i = 0; i < data1_data.length; i++)        {            data1_data[i] = 'b';        }        String  data1_str = new String(data1_data);        for (int i = 0; i < num_rows; i++)        {            insert_stmt.setInt(1, i);               // keycol            insert_stmt.setInt(2, i % 2);           // onehalf:  0 or 1             insert_stmt.setInt(3, i % 3);           // onethird: 0, 1, or 3            insert_stmt.setString(4, data1_str);    // c            insert_stmt.execute();        }        conn.commit();    }        /**     * Execute test5, simple defragement test.      * <p>     * o delete every other row, defragment     * o delete every third row, defragment     * o delete last 1000 rows, defragment     * o delete first 512 rows, defragment.     * <p>     * run test with at least 2000 rows.     **/    private void test5_run(    Connection  conn,    String      schemaName,    String      table_name,    int         num_rows)        throws SQLException    {        testProgress("begin test5: " + num_rows + " row test.");        if (verbose)            testProgress("Calling compress.");        // compress with no deletes should not affect size        int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);        callCompress(conn, "APP", table_name, true, true, true, true);        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, true);        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])        {            log_wrong_count(                "Expected no alloc page change.",                 table_name, num_rows,                 ret_before[SPACE_INFO_NUM_ALLOC],                 ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        // DELETE EVERY OTHER ROW, COMPRESS, CHECK        //        //        // delete all the rows every other row.        ret_before = getSpaceInfo(conn, "APP", table_name, true);        executeQuery(            conn, "delete from " + table_name + " where onehalf = 0", true);        if (verbose)            testProgress("deleted every other row, now calling compress.");        callCompress(conn, "APP", table_name, true, true, true, true);        ret_after  = getSpaceInfo(conn, "APP", table_name, true);        if (total_pages(ret_after) != total_pages(ret_before))        {            // currently deleting every other row does not add free or unfilled            // pages to the container so defragment has nowhere to put the rows.            log_wrong_count(                "Expected no truncation.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        // DELETE EVERY THIRD ROW in original dataset, COMPRESS, CHECK        //        //        // delete every third row        ret_before = getSpaceInfo(conn, "APP", table_name, true);        executeQuery(            conn, "delete from " + table_name + " where onethird = 0", true);        if (verbose)            testProgress("deleted every third row, now calling compress.");        callCompress(conn, "APP", table_name, true, true, true, true);        ret_after  = getSpaceInfo(conn, "APP", table_name, true);        if (total_pages(ret_after) != total_pages(ret_before))        {            // currently deleting every third row does not create any free             // or unfilled pages so defragment has no place to move rows.            log_wrong_count(                "Expected no truncation.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        // DELETE top "half" of rows in original dataset, COMPRESS, CHECK        //        //        // delete top "half" of the rows in the original dataset.        ret_before = getSpaceInfo(conn, "APP", table_name, true);        executeQuery(            conn, "delete from " + table_name + " where keycol > " +             (num_rows / 2), true);        if (verbose)            testProgress("deleted top half of the rows, now calling compress.");        callCompress(conn, "APP", table_name, true, true, true, true);        ret_after  = getSpaceInfo(conn, "APP", table_name, true);        // compress should be able to clean up about 1/2 of the pages.        if (verbose)        {            log_wrong_count(                "deleted top half keys, spaceinfo:",                table_name, num_rows,                 ((total_pages(ret_before) / 2) + 2),                ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (total_pages(ret_after) > ((total_pages(ret_before) / 2) + 2))        {            log_wrong_count(                "Expected at least " +                 (ret_before[SPACE_INFO_NUM_ALLOC] / 2 + 2) +                " pages to be truncated.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        // DELETE 1st 500 rows in original dataset, COMPRESS, CHECK        //        //        // delete keys less than 500        ret_before = getSpaceInfo(conn, "APP", table_name, true);        executeQuery(            conn, "delete from " + table_name + " where keycol < 500 ", true);        if (verbose)            testProgress("deleted keys < 500, now calling compress.");        callCompress(conn, "APP", table_name, true, true, true, true);        ret_after  = getSpaceInfo(conn, "APP", table_name, true);        if (verbose)        {            log_wrong_count(                "deleted bottom 500 keys, spaceinfo:",                table_name, num_rows,                 (total_pages(ret_before) - 33),                ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        // The bottom 500 keys, assuming 4k pages, takes about 33 pages        if (total_pages(ret_after) > (total_pages(ret_before) - 33))        {            log_wrong_count(                "Expected at least 33 pages reclaimed.",                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],                ret_before, ret_after);        }        if (verbose)            testProgress("calling consistency checker.");        if (!checkConsistency(conn, schemaName, table_name))        {            logError("conistency check failed.");        }        conn.commit();        testProgress("end test5: " + num_rows + " row test.");    }    /**     * Cleanup after test5_run     **/    private void test5_cleanup(    Connection  conn,    String      schemaName,    String      table_name,    int         num_rows)        throws SQLException    {        executeQuery(conn, "drop table " + table_name, true);    }    /**     * Test 5 - simple defragment test.     * <p>     * Create dataset and then:     * o delete every other row, defragment     * o delete every third row, defragment     * o delete last 1000 rows, defragment     * o delete first 512 rows, defragment.     * <p>     * run test with at least 2000 rows.     *     **/    private void test5(    Connection  conn,    String      test_name,    String      table_name)        throws SQLException     {        beginTest(conn, test_name);        int[] test_cases = {2000, 10000};        for (int i = 0; i < test_cases.length; i++)        {            test5_load(conn, "APP", table_name, test_cases[i]);            test5_run(conn, "APP", table_name, test_cases[i]);            test5_cleanup(conn, "APP", table_name, test_cases[i]);        }        endTest(conn, test_name);    }    public void testList(Connection conn)        throws SQLException    {        test1(conn, "test1", "TEST1");        // test2(conn, "test2", "TEST2");        test3(conn, "test3", "TEST3");        // test4(conn, "test4", "TEST4");        test5(conn, "test5", "TEST5");    }    public static void main(String[] argv)         throws Throwable    {        OnlineCompressTest test = new OnlineCompressTest();   		ij.getPropertyArg(argv);         Connection conn = ij.startJBMS();        conn.setAutoCommit(false);        try        {            test.testList(conn);        }        catch (SQLException sqle)        {			org.apache.derby.tools.JDBCDisplayUtil.ShowSQLException(                System.out, sqle);			sqle.printStackTrace(System.out);		}    }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -